| Index: flake8-abp/flake8_abp.py |
| =================================================================== |
| --- a/flake8-abp/flake8_abp.py |
| +++ b/flake8-abp/flake8_abp.py |
| @@ -39,16 +39,18 @@ |
| } |
| ESSENTIAL_BUILTINS = set(dir(builtins)) - {'apply', 'buffer', 'coerce', |
| 'intern', 'file'} |
| LEAVE_BLOCK = (ast.Return, ast.Raise, ast.Continue, ast.Break) |
| VOLATILE = object() |
| +is_unicode_literals = False |
|
Sebastian Noack
2016/05/27 12:33:48
has_unicode_literals seems to be more correct gram
|
| + |
| def evaluate(node): |
| try: |
| return eval(compile(ast.Expression(node), '', 'eval'), {}) |
| except Exception: |
| return VOLATILE |
| @@ -372,44 +374,55 @@ |
| return (0, 'A303 non-default file encoding') |
| check_non_default_encoding.name = 'abp-non-default-encoding' |
| check_non_default_encoding.version = __version__ |
| def check_quotes(logical_line, tokens, previous_logical): |
| first_token = True |
| + global is_unicode_literals |
| + |
| + # check if this is beginning of file |
| + if tokens[0][3][0] == 1: |
| + is_unicode_literals = False |
| + |
| + # check if in unicode_literals mode |
| + token_strings = [t[1] for t in tokens] |
| + if token_strings[:3] == ['from', '__future__', 'import']: |
| + if 'unicode_literals' in token_strings: |
|
Sebastian Noack
2016/05/27 12:13:55
This is an superfluous level of indentation. I'd r
Jon Sonesen
2016/06/01 17:06:59
I like that and will get it done.
|
| + is_unicode_literals = True |
| for kind, token, start, end, _ in tokens: |
| if kind == tokenize.INDENT or kind == tokenize.DEDENT: |
| continue |
| if kind == tokenize.STRING: |
| match = re.search(r'^(u)?(b)?(r)?((""")?.*)$', |
| token, re.IGNORECASE | re.DOTALL) |
| (is_unicode, is_bytes, is_raw, |
| literal, has_doc_quotes) = match.groups() |
| + if is_unicode: |
| + yield (start, 'A112 use "from __future__ import' |
|
Sebastian Noack
2016/05/27 12:13:55
A space is missing here. This message would be pri
|
| + 'unicode_literals" instead of ' |
| + 'prefixing literals with "u"') |
| if first_token and re.search(r'^(?:(?:def|class)\s|$)', |
|
Sebastian Noack
2016/05/27 12:13:55
Since this is not an else or elif block, I think t
|
| previous_logical): |
| if not has_doc_quotes: |
| yield (start, 'A109 use triple double ' |
| 'quotes for docstrings') |
| - elif is_unicode or is_bytes or is_raw: |
| - yield (start, "A109 don't use u'', b'' " |
| - "or r'' for doc strings") |
| elif start[0] == end[0]: |
| if is_raw: |
| literal = re.sub(r'\\(?!{})'.format(literal[0]), |
| '\\\\\\\\', literal) |
| - |
| if sys.version_info[0] >= 3: |
| if is_bytes: |
| literal = 'b' + literal |
| - elif is_unicode: |
| + elif not is_bytes: |
|
Sebastian Noack
2016/05/27 12:33:48
Currently, the is_unicode_literals variable is nev
|
| literal = 'u' + literal |
| if ascii(eval(literal)) != literal: |
| yield (start, "A110 string literal doesn't match " |
| '{}()'.format(ascii.__name__)) |
| first_token = False |