| Left: | ||
| Right: |
| OLD | NEW |
|---|---|
| 1 # This file is part of Adblock Plus <https://adblockplus.org/>, | 1 # This file is part of Adblock Plus <https://adblockplus.org/>, |
| 2 # Copyright (C) 2006-2016 Eyeo GmbH | 2 # Copyright (C) 2006-2016 Eyeo GmbH |
| 3 # | 3 # |
| 4 # Adblock Plus is free software: you can redistribute it and/or modify | 4 # Adblock Plus is free software: you can redistribute it and/or modify |
| 5 # it under the terms of the GNU General Public License version 3 as | 5 # it under the terms of the GNU General Public License version 3 as |
| 6 # published by the Free Software Foundation. | 6 # published by the Free Software Foundation. |
| 7 # | 7 # |
| 8 # Adblock Plus is distributed in the hope that it will be useful, | 8 # Adblock Plus is distributed in the hope that it will be useful, |
| 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| (...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 369 | 369 |
| 370 | 370 |
| 371 def check_non_default_encoding(physical_line, line_number): | 371 def check_non_default_encoding(physical_line, line_number): |
| 372 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): | 372 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): |
| 373 return (0, 'A303 non-default file encoding') | 373 return (0, 'A303 non-default file encoding') |
| 374 | 374 |
| 375 check_non_default_encoding.name = 'abp-non-default-encoding' | 375 check_non_default_encoding.name = 'abp-non-default-encoding' |
| 376 check_non_default_encoding.version = __version__ | 376 check_non_default_encoding.version = __version__ |
| 377 | 377 |
| 378 | 378 |
| 379 def check_quotes(logical_line, tokens, previous_logical): | 379 def check_quotes(logical_line, tokens, previous_logical, checker_state): |
| 380 first_token = True | 380 first_token = True |
| 381 if tokens[0][3] == 1: | |
| 382 checker_state['has_unicode_literals'] = False | |
|
Sebastian Noack
2016/06/01 09:37:52
Awesome that we can avoid the global variable. How
| |
| 383 | |
| 384 # check if in unicode_literals mode | |
| 385 token_strings = [t[1] for t in tokens] | |
| 386 future_import = token_strings[:3] == ['from', '__future__', 'import'] | |
| 387 | |
| 388 if future_import and 'unicode_literals' in token_strings: | |
| 389 checker_state['has_unicode_literals'] = True | |
| 390 | |
| 391 has_unicode_literals = checker_state.get('has_unicode_literals') | |
| 381 | 392 |
| 382 for kind, token, start, end, _ in tokens: | 393 for kind, token, start, end, _ in tokens: |
| 383 if kind == tokenize.INDENT or kind == tokenize.DEDENT: | 394 if kind == tokenize.INDENT or kind == tokenize.DEDENT: |
| 384 continue | 395 continue |
| 385 | 396 |
| 386 if kind == tokenize.STRING: | 397 if kind == tokenize.STRING: |
| 387 match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$', | 398 match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$', |
| 388 token, re.IGNORECASE | re.DOTALL) | 399 token, re.IGNORECASE | re.DOTALL) |
| 389 prefixes, quote, text = match.groups() | 400 prefixes, quote, text = match.groups() |
| 390 prefixes = prefixes.lower() | 401 prefixes = prefixes.lower() |
| 391 | 402 |
| 403 if 'u' in prefixes and not has_unicode_literals: | |
|
Vasily Kuznetsov
2016/06/01 11:01:32
I wrote it before but maybe it got lost with all t
Jon Sonesen
2016/06/01 17:07:00
Thanks for the reminder I thought I had removed it
| |
| 404 yield (start, 'A112 use "from __future__ import ' | |
| 405 'unicode_literals" instead of ' | |
| 406 'prefixing literals with "u"') | |
| 407 | |
| 392 if first_token and re.search(r'^(?:(?:def|class)\s|$)', | 408 if first_token and re.search(r'^(?:(?:def|class)\s|$)', |
| 393 previous_logical): | 409 previous_logical): |
| 394 if quote != '"""': | 410 if quote != '"""': |
| 395 yield (start, 'A109 use triple double ' | 411 yield (start, 'A109 use triple double ' |
| 396 'quotes for docstrings') | 412 'quotes for docstrings') |
| 397 elif prefixes: | 413 elif start[0] != end[0]: |
|
Vasily Kuznetsov
2016/06/01 11:01:32
I have a mild preference for how this looked befor
Sebastian Noack
2016/06/01 11:05:01
I disagree. That way it's easier to understand tha
Vasily Kuznetsov
2016/06/01 11:34:31
Yeah, you have a point too. Ok, let's leave it as
| |
| 398 yield (start, "A109 don't use u'', b'' " | 414 pass |
| 399 "or r'' for doc strings") | 415 elif 'r' in prefixes: |
| 400 elif start[0] == end[0]: | 416 if quote != "'" and not (quote == '"' and "'" in text): |
| 401 if 'r' in prefixes: | 417 yield (start, 'A110 use single quotes for raw string') |
| 402 if quote != "'" and not (quote == '"' and "'" in text): | 418 else: |
| 403 yield (start, 'A110 use single quotes for raw string') | 419 is_unicode = 'u' in prefixes |
| 404 else: | 420 is_bytes = 'b' in prefixes |
| 405 prefix = 'b' if sys.version_info[0] >= 3 else 'u' | 421 prefix = '' |
| 406 if prefix not in prefixes: | 422 if sys.version_info[0] >= 3: |
| 407 prefix = '' | 423 if 'b' in prefixes: |
| 424 prefix = 'b' | |
| 425 elif is_unicode or has_unicode_literals and not is_bytes: | |
|
Sebastian Noack
2016/06/01 09:37:52
For better code locality please retrieve checker_s
Jon Sonesen
2016/06/01 17:07:00
I agree with you regarding the use of the fetch ha
Jon Sonesen
2016/06/01 17:14:03
Ah, prior to reducing the indent level with the ch
Jon Sonesen
2016/06/02 01:14:10
Also I just tried to implement the if else logic a
| |
| 426 prefix = 'u' | |
| 408 | 427 |
| 409 literal = '{0}{1}{2}{1}'.format(prefix, quote, text) | 428 literal = '{0}{1}{2}{1}'.format(prefix, quote, text) |
| 410 if ascii(eval(literal)) != literal: | 429 if ascii(eval(literal)) != literal: |
| 411 yield (start, "A110 string literal doesn't match " | 430 yield (start, "A110 string literal doesn't match " |
| 412 '{}()'.format(ascii.__name__)) | 431 '{}()'.format(ascii.__name__)) |
| 413 | 432 |
| 414 first_token = False | 433 first_token = False |
| 415 | 434 |
| 416 check_quotes.name = 'abp-quotes' | 435 check_quotes.name = 'abp-quotes' |
| 417 check_quotes.version = __version__ | 436 check_quotes.version = __version__ |
| 418 | 437 |
| 419 | 438 |
| 420 def check_redundant_parenthesis(logical_line, tokens): | 439 def check_redundant_parenthesis(logical_line, tokens): |
| 421 start_line = tokens[0][2][0] | 440 start_line = tokens[0][2][0] |
| 422 level = 0 | 441 level = 0 |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 462 if tokens[i + 1][:2] != (tokenize.OP, ':'): | 481 if tokens[i + 1][:2] != (tokenize.OP, ':'): |
| 463 break | 482 break |
| 464 | 483 |
| 465 return [(pos, 'A111 redundant parenthesis for {} ' | 484 return [(pos, 'A111 redundant parenthesis for {} ' |
| 466 'statement'.format(statement))] | 485 'statement'.format(statement))] |
| 467 | 486 |
| 468 return [] | 487 return [] |
| 469 | 488 |
| 470 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' | 489 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' |
| 471 check_redundant_parenthesis.version = __version__ | 490 check_redundant_parenthesis.version = __version__ |
| OLD | NEW |