OLD | NEW |
1 # This file is part of Adblock Plus <https://adblockplus.org/>, | 1 # This file is part of Adblock Plus <https://adblockplus.org/>, |
2 # Copyright (C) 2006-2016 Eyeo GmbH | 2 # Copyright (C) 2006-2016 Eyeo GmbH |
3 # | 3 # |
4 # Adblock Plus is free software: you can redistribute it and/or modify | 4 # Adblock Plus is free software: you can redistribute it and/or modify |
5 # it under the terms of the GNU General Public License version 3 as | 5 # it under the terms of the GNU General Public License version 3 as |
6 # published by the Free Software Foundation. | 6 # published by the Free Software Foundation. |
7 # | 7 # |
8 # Adblock Plus is distributed in the hope that it will be useful, | 8 # Adblock Plus is distributed in the hope that it will be useful, |
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
369 | 369 |
370 | 370 |
371 def check_non_default_encoding(physical_line, line_number): | 371 def check_non_default_encoding(physical_line, line_number): |
372 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): | 372 if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): |
373 return (0, 'A303 non-default file encoding') | 373 return (0, 'A303 non-default file encoding') |
374 | 374 |
375 check_non_default_encoding.name = 'abp-non-default-encoding' | 375 check_non_default_encoding.name = 'abp-non-default-encoding' |
376 check_non_default_encoding.version = __version__ | 376 check_non_default_encoding.version = __version__ |
377 | 377 |
378 | 378 |
379 def check_quotes(logical_line, tokens, previous_logical): | 379 def check_quotes(logical_line, tokens, previous_logical, checker_state): |
380 first_token = True | 380 first_token = True |
381 | 381 |
| 382 # check if in unicode_literals mode |
| 383 token_strings = [t[1] for t in tokens] |
| 384 future_import = token_strings[:3] == ['from', '__future__', 'import'] |
| 385 |
| 386 if future_import and 'unicode_literals' in token_strings: |
| 387 checker_state['has_unicode_literals'] = True |
| 388 |
382 for kind, token, start, end, _ in tokens: | 389 for kind, token, start, end, _ in tokens: |
383 if kind == tokenize.INDENT or kind == tokenize.DEDENT: | 390 if kind == tokenize.INDENT or kind == tokenize.DEDENT: |
384 continue | 391 continue |
385 | 392 |
386 if kind == tokenize.STRING: | 393 if kind == tokenize.STRING: |
387 match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$', | 394 match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$', |
388 token, re.IGNORECASE | re.DOTALL) | 395 token, re.IGNORECASE | re.DOTALL) |
389 prefixes, quote, text = match.groups() | 396 prefixes, quote, text = match.groups() |
390 prefixes = prefixes.lower() | 397 prefixes = prefixes.lower() |
391 | 398 |
| 399 if 'u' in prefixes: |
| 400 yield (start, 'A112 use "from __future__ import ' |
| 401 'unicode_literals" instead of ' |
| 402 'prefixing literals with "u"') |
| 403 |
392 if first_token and re.search(r'^(?:(?:def|class)\s|$)', | 404 if first_token and re.search(r'^(?:(?:def|class)\s|$)', |
393 previous_logical): | 405 previous_logical): |
394 if quote != '"""': | 406 if quote != '"""': |
395 yield (start, 'A109 use triple double ' | 407 yield (start, 'A109 use triple double ' |
396 'quotes for docstrings') | 408 'quotes for docstrings') |
397 elif prefixes: | 409 elif start[0] != end[0]: |
398 yield (start, "A109 don't use u'', b'' " | 410 pass |
399 "or r'' for doc strings") | 411 elif 'r' in prefixes: |
400 elif start[0] == end[0]: | 412 if quote != "'" and not (quote == '"' and "'" in text): |
401 if 'r' in prefixes: | 413 yield (start, 'A110 use single quotes for raw string') |
402 if quote != "'" and not (quote == '"' and "'" in text): | 414 else: |
403 yield (start, 'A110 use single quotes for raw string') | 415 prefix = '' |
| 416 if sys.version_info[0] >= 3: |
| 417 if 'b' in prefixes: |
| 418 prefix = 'b' |
404 else: | 419 else: |
405 prefix = 'b' if sys.version_info[0] >= 3 else 'u' | 420 u_literals = checker_state.get('has_unicode_literals') |
406 if prefix not in prefixes: | 421 if 'u' in prefixes or u_literals and 'b' not in prefixes: |
407 prefix = '' | 422 prefix = 'u' |
408 | 423 |
409 literal = '{0}{1}{2}{1}'.format(prefix, quote, text) | 424 literal = '{0}{1}{2}{1}'.format(prefix, quote, text) |
410 if ascii(eval(literal)) != literal: | 425 if ascii(eval(literal)) != literal: |
411 yield (start, "A110 string literal doesn't match " | 426 yield (start, "A110 string literal doesn't match " |
412 '{}()'.format(ascii.__name__)) | 427 '{}()'.format(ascii.__name__)) |
413 | 428 |
414 first_token = False | 429 first_token = False |
415 | 430 |
416 check_quotes.name = 'abp-quotes' | 431 check_quotes.name = 'abp-quotes' |
417 check_quotes.version = __version__ | 432 check_quotes.version = __version__ |
418 | 433 |
419 | 434 |
420 def check_redundant_parenthesis(logical_line, tokens): | 435 def check_redundant_parenthesis(logical_line, tokens): |
421 start_line = tokens[0][2][0] | 436 start_line = tokens[0][2][0] |
422 level = 0 | 437 level = 0 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
462 if tokens[i + 1][:2] != (tokenize.OP, ':'): | 477 if tokens[i + 1][:2] != (tokenize.OP, ':'): |
463 break | 478 break |
464 | 479 |
465 return [(pos, 'A111 redundant parenthesis for {} ' | 480 return [(pos, 'A111 redundant parenthesis for {} ' |
466 'statement'.format(statement))] | 481 'statement'.format(statement))] |
467 | 482 |
468 return [] | 483 return [] |
469 | 484 |
470 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' | 485 check_redundant_parenthesis.name = 'abp-redundant-parenthesis' |
471 check_redundant_parenthesis.version = __version__ | 486 check_redundant_parenthesis.version = __version__ |
OLD | NEW |