OLD | NEW |
1 # This file is part of Adblock Plus <https://adblockplus.org/>, | 1 # This file is part of Adblock Plus <https://adblockplus.org/>, |
2 # Copyright (C) 2006-2016 Eyeo GmbH | 2 # Copyright (C) 2006-2016 Eyeo GmbH |
3 # | 3 # |
4 # Adblock Plus is free software: you can redistribute it and/or modify | 4 # Adblock Plus is free software: you can redistribute it and/or modify |
5 # it under the terms of the GNU General Public License version 3 as | 5 # it under the terms of the GNU General Public License version 3 as |
6 # published by the Free Software Foundation. | 6 # published by the Free Software Foundation. |
7 # | 7 # |
8 # Adblock Plus is distributed in the hope that it will be useful, | 8 # Adblock Plus is distributed in the hope that it will be useful, |
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
369 | 369 |
370 | 370 |
371 def check_non_default_encoding(physical_line, line_number): | 371 def check_non_default_encoding(physical_line, line_number): |
372 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): | 372 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): |
373 return (0, 'A303 non-default file encoding') | 373 return (0, 'A303 non-default file encoding') |
374 | 374 |
375 check_non_default_encoding.name = 'abp-non-default-encoding' | 375 check_non_default_encoding.name = 'abp-non-default-encoding' |
376 check_non_default_encoding.version = __version__ | 376 check_non_default_encoding.version = __version__ |
377 | 377 |
378 | 378 |
379 def check_quotes(logical_line, tokens, previous_logical): | 379 def check_quotes(logical_line, tokens, previous_logical, checker_state): |
380 first_token = True | 380 first_token = True |
381 | 381 |
| 382 token_strings = [t[1] for t in tokens] |
| 383 future_import = token_strings[:3] == ['from', '__future__', 'import'] |
| 384 |
| 385 if future_import and 'unicode_literals' in token_strings: |
| 386 checker_state['has_unicode_literals'] = True |
| 387 |
382 for kind, token, start, end, _ in tokens: | 388 for kind, token, start, end, _ in tokens: |
383 if kind == tokenize.INDENT or kind == tokenize.DEDENT: | 389 if kind == tokenize.INDENT or kind == tokenize.DEDENT: |
384 continue | 390 continue |
385 | 391 |
386 if kind == tokenize.STRING: | 392 if kind == tokenize.STRING: |
387 match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$', | 393 match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$', |
388 token, re.IGNORECASE | re.DOTALL) | 394 token, re.IGNORECASE | re.DOTALL) |
389 prefixes, quote, text = match.groups() | 395 prefixes, quote, text = match.groups() |
390 prefixes = prefixes.lower() | 396 prefixes = prefixes.lower() |
391 | 397 |
| 398 if 'u' in prefixes: |
| 399 yield (start, 'A112 use "from __future__ import ' |
| 400 'unicode_literals" instead of ' |
| 401 'prefixing literals with "u"') |
| 402 |
392 if first_token and re.search(r'^(?:(?:def|class)\s|$)', | 403 if first_token and re.search(r'^(?:(?:def|class)\s|$)', |
393 previous_logical): | 404 previous_logical): |
394 if quote != '"""': | 405 if quote != '"""': |
395 yield (start, 'A109 use triple double ' | 406 yield (start, 'A109 use triple double ' |
396 'quotes for docstrings') | 407 'quotes for docstrings') |
397 elif prefixes: | 408 elif start[0] != end[0]: |
398 yield (start, "A109 don't use u'', b'' " | 409 pass |
399 "or r'' for doc strings") | 410 elif 'r' in prefixes: |
400 elif start[0] == end[0]: | 411 if quote != "'" and not (quote == '"' and "'" in text): |
401 if 'r' in prefixes: | 412 yield (start, 'A110 use single quotes for raw string') |
402 if quote != "'" and not (quote == '"' and "'" in text): | 413 else: |
403 yield (start, 'A110 use single quotes for raw string') | 414 prefix = '' |
| 415 if sys.version_info[0] >= 3: |
| 416 if 'b' in prefixes: |
| 417 prefix = 'b' |
404 else: | 418 else: |
405 prefix = 'b' if sys.version_info[0] >= 3 else 'u' | 419 u_literals = checker_state.get('has_unicode_literals') |
406 if prefix not in prefixes: | 420 if 'u' in prefixes or u_literals and 'b' not in prefixes: |
407 prefix = '' | 421 prefix = 'u' |
408 | 422 |
409 literal = '{0}{1}{2}{1}'.format(prefix, quote, text) | 423 literal = '{0}{1}{2}{1}'.format(prefix, quote, text) |
410 if ascii(eval(literal)) != literal: | 424 if ascii(eval(literal)) != literal: |
411 yield (start, "A110 string literal doesn't match " | 425 yield (start, "A110 string literal doesn't match " |
412 '{}()'.format(ascii.__name__)) | 426 '{}()'.format(ascii.__name__)) |
413 | 427 |
414 first_token = False | 428 first_token = False |
415 | 429 |
416 check_quotes.name = 'abp-quotes' | 430 check_quotes.name = 'abp-quotes' |
417 check_quotes.version = __version__ | 431 check_quotes.version = __version__ |
418 | 432 |
419 | 433 |
420 def check_redundant_parenthesis(logical_line, tokens): | 434 def check_redundant_parenthesis(logical_line, tokens): |
421 start_line = tokens[0][2][0] | 435 start_line = tokens[0][2][0] |
422 level = 0 | 436 level = 0 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
462 if tokens[i + 1][:2] != (tokenize.OP, ':'): | 476 if tokens[i + 1][:2] != (tokenize.OP, ':'): |
463 break | 477 break |
464 | 478 |
465 return [(pos, 'A111 redundant parenthesis for {} ' | 479 return [(pos, 'A111 redundant parenthesis for {} ' |
466 'statement'.format(statement))] | 480 'statement'.format(statement))] |
467 | 481 |
468 return [] | 482 return [] |
469 | 483 |
470 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' | 484 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' |
471 check_redundant_parenthesis.version = __version__ | 485 check_redundant_parenthesis.version = __version__ |
OLD | NEW |