Index: flake8-abp/flake8_abp.py |
=================================================================== |
--- a/flake8-abp/flake8_abp.py |
+++ b/flake8-abp/flake8_abp.py |
@@ -371,50 +371,69 @@ |
def check_non_default_encoding(physical_line, line_number): |
if line_number <= 2 and re.search(r'^\s*#.*coding[:=]', physical_line): |
return (0, 'A303 non-default file encoding') |
check_non_default_encoding.name = 'abp-non-default-encoding' |
check_non_default_encoding.version = __version__ |
-def check_quotes(logical_line, tokens, previous_logical): |
+def check_quotes(logical_line, tokens, previous_logical, checker_state): |
first_token = True |
+ if tokens[0][3] == 1: |
+ checker_state['has_unicode_literals'] = False |
Sebastian Noack
2016/06/01 09:37:52
Awesome that we can avoid the global variable. How
|
+ |
+ # check if in unicode_literals mode |
+ token_strings = [t[1] for t in tokens] |
+ future_import = token_strings[:3] == ['from', '__future__', 'import'] |
+ |
+ if future_import and 'unicode_literals' in token_strings: |
+ checker_state['has_unicode_literals'] = True |
+ |
+ has_unicode_literals = checker_state.get('has_unicode_literals') |
for kind, token, start, end, _ in tokens: |
if kind == tokenize.INDENT or kind == tokenize.DEDENT: |
continue |
if kind == tokenize.STRING: |
match = re.search(r'^([rub]*)([\'"]{1,3})(.*)\2$', |
token, re.IGNORECASE | re.DOTALL) |
prefixes, quote, text = match.groups() |
prefixes = prefixes.lower() |
+ if 'u' in prefixes and not has_unicode_literals: |
Vasily Kuznetsov
2016/06/01 11:01:32
I wrote it before but maybe it got lost with all t
Jon Sonesen
2016/06/01 17:07:00
Thanks for the reminder I thought I had removed it
|
+ yield (start, 'A112 use "from __future__ import ' |
+ 'unicode_literals" instead of ' |
+ 'prefixing literals with "u"') |
+ |
if first_token and re.search(r'^(?:(?:def|class)\s|$)', |
previous_logical): |
if quote != '"""': |
yield (start, 'A109 use triple double ' |
'quotes for docstrings') |
- elif prefixes: |
- yield (start, "A109 don't use u'', b'' " |
- "or r'' for doc strings") |
- elif start[0] == end[0]: |
- if 'r' in prefixes: |
- if quote != "'" and not (quote == '"' and "'" in text): |
- yield (start, 'A110 use single quotes for raw string') |
- else: |
- prefix = 'b' if sys.version_info[0] >= 3 else 'u' |
- if prefix not in prefixes: |
- prefix = '' |
+ elif start[0] != end[0]: |
Vasily Kuznetsov
2016/06/01 11:01:32
I have a mild preference for how this looked befor
Sebastian Noack
2016/06/01 11:05:01
I disagree. That way it's easier to understand tha
Vasily Kuznetsov
2016/06/01 11:34:31
Yeah, you have a point too. Ok, let's leave it as
|
+ pass |
+ elif 'r' in prefixes: |
+ if quote != "'" and not (quote == '"' and "'" in text): |
+ yield (start, 'A110 use single quotes for raw string') |
+ else: |
+ is_unicode = 'u' in prefixes |
+ is_bytes = 'b' in prefixes |
+ prefix = '' |
+ if sys.version_info[0] >= 3: |
+ if 'b' in prefixes: |
+ prefix = 'b' |
+ elif is_unicode or has_unicode_literals and not is_bytes: |
Sebastian Noack
2016/06/01 09:37:52
For better code locality please retrieve checker_s
Jon Sonesen
2016/06/01 17:07:00
I agree with you regarding the use of the fetch ha
Jon Sonesen
2016/06/01 17:14:03
Ah, prior to reducing the indent level with the ch
Jon Sonesen
2016/06/02 01:14:10
Also I just tried to implement the if else logic a
|
+ prefix = 'u' |
- literal = '{0}{1}{2}{1}'.format(prefix, quote, text) |
- if ascii(eval(literal)) != literal: |
- yield (start, "A110 string literal doesn't match " |
- '{}()'.format(ascii.__name__)) |
+ literal = '{0}{1}{2}{1}'.format(prefix, quote, text) |
+ if ascii(eval(literal)) != literal: |
+ yield (start, "A110 string literal doesn't match " |
+ '{}()'.format(ascii.__name__)) |
first_token = False |
check_quotes.name = 'abp-quotes' |
check_quotes.version = __version__ |
def check_redundant_parenthesis(logical_line, tokens): |