| OLD | NEW | 
|---|
| 1 # This file is part of Adblock Plus <https://adblockplus.org/>, | 1 # This file is part of Adblock Plus <https://adblockplus.org/>, | 
| 2 # Copyright (C) 2006-2016 Eyeo GmbH | 2 # Copyright (C) 2006-2016 Eyeo GmbH | 
| 3 # | 3 # | 
| 4 # Adblock Plus is free software: you can redistribute it and/or modify | 4 # Adblock Plus is free software: you can redistribute it and/or modify | 
| 5 # it under the terms of the GNU General Public License version 3 as | 5 # it under the terms of the GNU General Public License version 3 as | 
| 6 # published by the Free Software Foundation. | 6 # published by the Free Software Foundation. | 
| 7 # | 7 # | 
| 8 # Adblock Plus is distributed in the hope that it will be useful, | 8 # Adblock Plus is distributed in the hope that it will be useful, | 
| 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of | 
| 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
| (...skipping 26 matching lines...) Expand all  Loading... | 
| 37     're.match': 're.search', | 37     're.match': 're.search', | 
| 38     'codecs.open': 'io.open', | 38     'codecs.open': 'io.open', | 
| 39 } | 39 } | 
| 40 | 40 | 
| 41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce', | 41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce', | 
| 42                                            'intern', 'file'} | 42                                            'intern', 'file'} | 
| 43 | 43 | 
| 44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break) | 44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break) | 
| 45 VOLATILE = object() | 45 VOLATILE = object() | 
| 46 | 46 | 
|  | 47 is_unicode_literals = False | 
|  | 48 | 
| 47 | 49 | 
| 48 def evaluate(node): | 50 def evaluate(node): | 
| 49     try: | 51     try: | 
| 50         return eval(compile(ast.Expression(node), '', 'eval'), {}) | 52         return eval(compile(ast.Expression(node), '', 'eval'), {}) | 
| 51     except Exception: | 53     except Exception: | 
| 52         return VOLATILE | 54         return VOLATILE | 
| 53 | 55 | 
| 54 | 56 | 
| 55 def is_const(node): | 57 def is_const(node): | 
| 56     return evaluate(node) is not VOLATILE | 58     return evaluate(node) is not VOLATILE | 
| (...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 370 def check_non_default_encoding(physical_line, line_number): | 372 def check_non_default_encoding(physical_line, line_number): | 
| 371     if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): | 373     if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): | 
| 372         return (0, 'A303 non-default file encoding') | 374         return (0, 'A303 non-default file encoding') | 
| 373 | 375 | 
| 374 check_non_default_encoding.name = 'abp-non-default-encoding' | 376 check_non_default_encoding.name = 'abp-non-default-encoding' | 
| 375 check_non_default_encoding.version = __version__ | 377 check_non_default_encoding.version = __version__ | 
| 376 | 378 | 
| 377 | 379 | 
| 378 def check_quotes(logical_line, tokens, previous_logical): | 380 def check_quotes(logical_line, tokens, previous_logical): | 
| 379     first_token = True | 381     first_token = True | 
|  | 382     global is_unicode_literals | 
|  | 383 | 
|  | 384     # check if this is beginning of file | 
|  | 385     if tokens[0][3][0] == 1: | 
|  | 386         is_unicode_literals = False | 
|  | 387 | 
|  | 388     # check if in unicode_literals mode | 
|  | 389     token_strings = [t[1] for t in tokens] | 
|  | 390     if token_strings[:3] == ['from', '__future__', 'import']: | 
|  | 391         if 'unicode_literals' in token_strings: | 
|  | 392             is_unicode_literals = True | 
| 380 | 393 | 
| 381     for kind, token, start, end, _ in tokens: | 394     for kind, token, start, end, _ in tokens: | 
| 382         if kind == tokenize.INDENT or kind == tokenize.DEDENT: | 395         if kind == tokenize.INDENT or kind == tokenize.DEDENT: | 
| 383             continue | 396             continue | 
| 384 | 397 | 
| 385         if kind == tokenize.STRING: | 398         if kind == tokenize.STRING: | 
| 386             match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', | 399             match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', | 
| 387                               token, re.IGNORECASE | re.DOTALL) | 400                               token, re.IGNORECASE | re.DOTALL) | 
| 388             (is_unicode, is_bytes, is_raw, | 401             (is_unicode, is_bytes, is_raw, | 
| 389              literal, has_doc_quotes) = match.groups() | 402              literal, has_doc_quotes) = match.groups() | 
| 390 | 403 | 
| 391             if first_token and re.search(r'^(?:(?:def|class)\s|$)', | 404             if first_token and re.search(r'^(?:(?:def|class)\s|$)', | 
| 392                                          previous_logical): | 405                                          previous_logical): | 
| 393                 if not has_doc_quotes: | 406                 if not has_doc_quotes: | 
| 394                     yield (start, 'A109 use triple double ' | 407                     yield (start, 'A109 use triple double ' | 
| 395                                   'quotes for docstrings') | 408                                   'quotes for docstrings') | 
| 396                 elif is_unicode or is_bytes or is_raw: | 409                 if is_raw: | 
| 397                     yield (start, "A109 don't use u'', b'' " | 410                     yield (start, 'A109 do not use "r" prefix for docstrings') | 
| 398                                   "or r'' for doc strings") | 411                 if is_unicode and not is_unicode_literals: | 
|  | 412                     yield (start, 'A112 use "from __future__ import' | 
|  | 413                                   'unicode_literals" instead of ' | 
|  | 414                                   'prefixing literals with "u"') | 
| 399             elif start[0] == end[0]: | 415             elif start[0] == end[0]: | 
| 400                 if is_raw: | 416                 if is_raw: | 
| 401                     literal = re.sub(r'\\(?!{})'.format(literal[0]), | 417                     literal = re.sub(r'\\(?!{})'.format(literal[0]), | 
| 402                                      '\\\\\\\\', literal) | 418                                      '\\\\\\\\', literal) | 
| 403 | 419                 if is_unicode and not is_unicode_literals: | 
|  | 420                     yield (start, 'A112 use "from __future__ import' | 
|  | 421                                   'unicode_literals" instead of ' | 
|  | 422                                   'prefixing literals with "u"') | 
| 404                 if sys.version_info[0] >= 3: | 423                 if sys.version_info[0] >= 3: | 
| 405                     if is_bytes: | 424                     if is_bytes: | 
| 406                         literal = 'b' + literal | 425                         literal = 'b' + literal | 
| 407                 elif is_unicode: | 426                 elif not is_bytes: | 
| 408                     literal = 'u' + literal | 427                     literal = 'u' + literal | 
| 409 | 428 | 
| 410                 if ascii(eval(literal)) != literal: | 429                 if ascii(eval(literal)) != literal: | 
| 411                     yield (start, "A110 string literal doesn't match " | 430                     yield (start, "A110 string literal doesn't match " | 
| 412                                   '{}()'.format(ascii.__name__)) | 431                                   '{}()'.format(ascii.__name__)) | 
| 413 | 432 | 
| 414         first_token = False | 433         first_token = False | 
| 415 | 434 | 
| 416 check_quotes.name = 'abp-quotes' | 435 check_quotes.name = 'abp-quotes' | 
| 417 check_quotes.version = __version__ | 436 check_quotes.version = __version__ | 
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 462                     if tokens[i + 1][:2] != (tokenize.OP, ':'): | 481                     if tokens[i + 1][:2] != (tokenize.OP, ':'): | 
| 463                         break | 482                         break | 
| 464 | 483 | 
| 465                     return [(pos, 'A111 redundant parenthesis for {} ' | 484                     return [(pos, 'A111 redundant parenthesis for {} ' | 
| 466                                   'statement'.format(statement))] | 485                                   'statement'.format(statement))] | 
| 467 | 486 | 
| 468     return [] | 487     return [] | 
| 469 | 488 | 
| 470 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' | 489 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' | 
| 471 check_redundant_parenthesis.version = __version__ | 490 check_redundant_parenthesis.version = __version__ | 
| OLD | NEW | 
|---|