| Index: flake8-abp/flake8_abp.py |
| =================================================================== |
| --- a/flake8-abp/flake8_abp.py |
| +++ b/flake8-abp/flake8_abp.py |
| @@ -372,23 +372,26 @@ |
| return (0, 'A303 non-default file encoding') |
| check_non_default_encoding.name = 'abp-non-default-encoding' |
| check_non_default_encoding.version = __version__ |
| def check_quotes(logical_line, tokens, previous_logical): |
| first_token = True |
| - is_unicode_literals = False |
| + global is_unicode_literals |
| for kind, token, start, end, _ in tokens: |
| if kind == tokenize.INDENT or kind == tokenize.DEDENT: |
| continue |
| - if token is 'unicode_literals': |
| + if start[0] == 1: |
|
Vasily Kuznetsov
2016/05/20 08:46:24
This `if` as well as the following one don't need
|
| + is_unicode_literals = False |
| + |
| + if logical_line == 'from __future__ import unicode_literals': |
|
Vasily Kuznetsov
2016/05/20 08:46:25
Actually this is still not completely right. It co
|
| is_unicode_literals = True |
| if kind == tokenize.STRING: |
| match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', |
| token, re.IGNORECASE | re.DOTALL) |
| (is_unicode, is_bytes, is_raw, |
| literal, has_doc_quotes) = match.groups() |
| @@ -403,19 +406,17 @@ |
| elif start[0] == end[0]: |
| if is_raw: |
| literal = re.sub(r'\\(?!{})'.format(literal[0]), |
| '\\\\\\\\', literal) |
| if sys.version_info[0] >= 3: |
| if is_bytes: |
| literal = 'b' + literal |
| - elif is_unicode: |
| - literal = 'u' + literal |
| - elif not is_unicode_literals: |
| + elif is_unicode or is_unicode_literals: |
|
Sebastian Noack
2016/05/20 13:48:01
Perhaps we should generate a warning when using st
Vasily Kuznetsov
2016/05/23 08:51:14
Yes, this sounds like a good idea. Unfortunately i
Sebastian Noack
2016/05/24 12:41:35
Since compatibility with Python 3 is now mandatory
Vasily Kuznetsov
2016/05/24 13:02:42
Yep. All exactly as you say. And it will actually
|
| literal = 'u' + literal |
| if ascii(eval(literal)) != literal: |
| yield (start, "A110 string literal doesn't match " |
| '{}()'.format(ascii.__name__)) |
| first_token = False |