| Left: | ||
| Right: |
| OLD | NEW |
|---|---|
| 1 # This file is part of Adblock Plus <https://adblockplus.org/>, | 1 # This file is part of Adblock Plus <https://adblockplus.org/>, |
| 2 # Copyright (C) 2006-2016 Eyeo GmbH | 2 # Copyright (C) 2006-2016 Eyeo GmbH |
| 3 # | 3 # |
| 4 # Adblock Plus is free software: you can redistribute it and/or modify | 4 # Adblock Plus is free software: you can redistribute it and/or modify |
| 5 # it under the terms of the GNU General Public License version 3 as | 5 # it under the terms of the GNU General Public License version 3 as |
| 6 # published by the Free Software Foundation. | 6 # published by the Free Software Foundation. |
| 7 # | 7 # |
| 8 # Adblock Plus is distributed in the hope that it will be useful, | 8 # Adblock Plus is distributed in the hope that it will be useful, |
| 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 37 're.match': 're.search', | 37 're.match': 're.search', |
| 38 'codecs.open': 'io.open', | 38 'codecs.open': 'io.open', |
| 39 } | 39 } |
| 40 | 40 |
| 41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce', | 41 ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce', |
| 42 'intern', 'file'} | 42 'intern', 'file'} |
| 43 | 43 |
| 44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break) | 44 LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break) |
| 45 VOLATILE = object() | 45 VOLATILE = object() |
| 46 | 46 |
| 47 IS_UNICODE_LITERALS = False | |
|
Sebastian Noack
2016/05/25 08:31:36
Uppercase notation is only for variables that are
| |
| 48 | |
| 47 | 49 |
| 48 def evaluate(node): | 50 def evaluate(node): |
| 49 try: | 51 try: |
| 50 return eval(compile(ast.Expression(node), '', 'eval'), {}) | 52 return eval(compile(ast.Expression(node), '', 'eval'), {}) |
| 51 except Exception: | 53 except Exception: |
| 52 return VOLATILE | 54 return VOLATILE |
| 53 | 55 |
| 54 | 56 |
| 55 def is_const(node): | 57 def is_const(node): |
| 56 return evaluate(node) is not VOLATILE | 58 return evaluate(node) is not VOLATILE |
| (...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 370 def check_non_default_encoding(physical_line, line_number): | 372 def check_non_default_encoding(physical_line, line_number): |
| 371 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): | 373 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): |
| 372 return (0, 'A303 non-default file encoding') | 374 return (0, 'A303 non-default file encoding') |
| 373 | 375 |
| 374 check_non_default_encoding.name = 'abp-non-default-encoding' | 376 check_non_default_encoding.name = 'abp-non-default-encoding' |
| 375 check_non_default_encoding.version = __version__ | 377 check_non_default_encoding.version = __version__ |
| 376 | 378 |
| 377 | 379 |
| 378 def check_quotes(logical_line, tokens, previous_logical): | 380 def check_quotes(logical_line, tokens, previous_logical): |
| 379 first_token = True | 381 first_token = True |
| 380 global is_unicode_literals | 382 global IS_UNICODE_LITERALS |
| 383 | |
| 384 # --- check if this is beginning of file | |
|
Sebastian Noack
2016/05/25 08:31:36
We generally don use --- in comments. So please re
Vasily Kuznetsov
2016/05/25 13:55:31
Also pep8 recommends capitalising the first letter
| |
| 385 if tokens[0][3][0] == 1: | |
| 386 IS_UNICODE_LITERALS = False | |
| 387 | |
| 388 # --- check if in unicode_literals mode | |
| 389 token_strings = [t[1] for t in tokens] | |
|
Sebastian Noack
2016/05/25 08:31:35
I wonder whether we should also check for the toke
Vasily Kuznetsov
2016/05/25 13:55:32
Do we really need to? Can you imagine a line that
Sebastian Noack
2016/05/25 14:45:29
I feel that checking for the token type is more co
Vasily Kuznetsov
2016/05/25 16:25:56
I see, it does feel a bit sloppy to just check the
| |
| 390 if token_strings[:3] == ['from', '__future__', 'import']: | |
| 391 IS_UNICODE_LITERALS = 'unicode_literals' in token_strings | |
|
Vasily Kuznetsov
2016/05/25 13:55:32
Won't this break if it gets a piece of code like t
| |
| 381 | 392 |
| 382 for kind, token, start, end, _ in tokens: | 393 for kind, token, start, end, _ in tokens: |
| 383 if kind == tokenize.INDENT or kind == tokenize.DEDENT: | 394 if kind == tokenize.INDENT or kind == tokenize.DEDENT: |
| 384 continue | 395 continue |
| 385 | 396 |
| 386 if start[0] == 1: | |
| 387 is_unicode_literals = False | |
| 388 | |
| 389 if logical_line == 'from __future__ import unicode_literals': | |
| 390 is_unicode_literals = True | |
| 391 | |
| 392 if kind == tokenize.STRING: | 397 if kind == tokenize.STRING: |
| 393 match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', | 398 match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', |
| 394 token, re.IGNORECASE | re.DOTALL) | 399 token, re.IGNORECASE | re.DOTALL) |
| 395 (is_unicode, is_bytes, is_raw, | 400 (is_unicode, is_bytes, is_raw, |
| 396 literal, has_doc_quotes) = match.groups() | 401 literal, has_doc_quotes) = match.groups() |
| 397 | 402 |
| 398 if first_token and re.search(r'^(?:(?:def|class)\s|$)', | 403 if first_token and re.search(r'^(?:(?:def|class)\s|$)', |
| 399 previous_logical): | 404 previous_logical): |
| 400 if not has_doc_quotes: | 405 if not has_doc_quotes: |
| 401 yield (start, 'A109 use triple double ' | 406 yield (start, 'A109 use triple double ' |
| 402 'quotes for docstrings') | 407 'quotes for docstrings') |
| 403 elif is_unicode or is_bytes or is_raw: | 408 elif is_unicode or is_bytes or is_raw: |
| 404 yield (start, "A109 don't use u'', b'' " | 409 yield (start, "A109 don't use u'', b'' " |
| 405 "or r'' for doc strings") | 410 "or r'' for doc strings") |
| 406 elif start[0] == end[0]: | 411 elif start[0] == end[0]: |
| 407 if is_raw: | 412 if is_raw: |
| 408 literal = re.sub(r'\\(?!{})'.format(literal[0]), | 413 literal = re.sub(r'\\(?!{})'.format(literal[0]), |
| 409 '\\\\\\\\', literal) | 414 '\\\\\\\\', literal) |
| 410 | |
| 411 if sys.version_info[0] >= 3: | 415 if sys.version_info[0] >= 3: |
| 412 if is_bytes: | 416 if is_bytes: |
| 413 literal = 'b' + literal | 417 literal = 'b' + literal |
| 414 elif is_unicode or is_unicode_literals: | 418 elif is_unicode and not IS_UNICODE_LITERALS: |
|
Sebastian Noack
2016/05/25 08:31:35
It seems the check for IS_UNICODE_LITERALS is inco
Vasily Kuznetsov
2016/05/25 13:55:32
I second that it should come before the Python 3 c
Sebastian Noack
2016/05/25 14:45:29
Just an idea, how about moving it even above the c
Vasily Kuznetsov
2016/05/25 16:25:56
Moving the check to above the check for docstrings
| |
| 419 yield(start, 'A112 use "from __future__ import"' | |
|
Sebastian Noack
2016/05/25 08:31:35
There should be a space after "yield". Otherwise i
Sebastian Noack
2016/05/25 08:31:35
Please document A112 in the README.
| |
| 420 'unicode_literals instead of prefixing' | |
|
Sebastian Noack
2016/05/25 08:31:35
Please indent long error messages like it's done i
| |
| 421 'literals with "u"') | |
| 422 elif not is_bytes: | |
| 415 literal = 'u' + literal | 423 literal = 'u' + literal |
| 416 | 424 |
| 417 if ascii(eval(literal)) != literal: | 425 if ascii(eval(literal)) != literal: |
| 418 yield (start, "A110 string literal doesn't match " | 426 yield (start, "A110 string literal doesn't match " |
| 419 '{}()'.format(ascii.__name__)) | 427 '{}()'.format(ascii.__name__)) |
| 420 | 428 |
| 421 first_token = False | 429 first_token = False |
| 422 | 430 |
| 423 check_quotes.name = 'abp-quotes' | 431 check_quotes.name = 'abp-quotes' |
| 424 check_quotes.version = __version__ | 432 check_quotes.version = __version__ |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 469 if tokens[i + 1][:2] != (tokenize.OP, ':'): | 477 if tokens[i + 1][:2] != (tokenize.OP, ':'): |
| 470 break | 478 break |
| 471 | 479 |
| 472 return [(pos, 'A111 redundant parenthesis for {} ' | 480 return [(pos, 'A111 redundant parenthesis for {} ' |
| 473 'statement'.format(statement))] | 481 'statement'.format(statement))] |
| 474 | 482 |
| 475 return [] | 483 return [] |
| 476 | 484 |
| 477 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' | 485 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' |
| 478 check_redundant_parenthesis.version = __version__ | 486 check_redundant_parenthesis.version = __version__ |
| OLD | NEW |