@@ -156,11 +156,6 @@ def lru_cache(maxsize=128): # noqa as it's a fake implementation.
156
156
)
157
157
DUNDER_REGEX = re .compile (r'^__([^\s]+)__ = ' )
158
158
159
- # Work around Python < 2.6 behaviour, which does not generate NL after
160
- # a comment which is on a line by itself.
161
- COMMENT_WITH_NL = tokenize .generate_tokens (['#\n ' ].pop ).send (None )[1 ] == '#\n '
162
-
163
-
164
159
_checks = {'physical_line' : {}, 'logical_line' : {}, 'tree' : {}}
165
160
166
161
@@ -1118,7 +1113,7 @@ def compound_statements(logical_line):
1118
1113
last_char = len (line ) - 1
1119
1114
found = line .find (':' )
1120
1115
prev_found = 0
1121
- counts = dict (( char , 0 ) for char in '{}[]()' )
1116
+ counts = { char : 0 for char in '{}[]()' }
1122
1117
while - 1 < found < last_char :
1123
1118
update_counts (line [prev_found :found ], counts )
1124
1119
if ((counts ['{' ] <= counts ['}' ] and # {'a': 1} (dict)
@@ -1762,9 +1757,11 @@ def parse_udiff(diff, patterns=None, parent='.'):
1762
1757
if path [:2 ] in ('b/' , 'w/' , 'i/' ):
1763
1758
path = path [2 :]
1764
1759
rv [path ] = set ()
1765
- return dict ([(os .path .join (parent , filepath ), rows )
1766
- for (filepath , rows ) in rv .items ()
1767
- if rows and filename_match (filepath , patterns )])
1760
+ return {
1761
+ os .path .join (parent , filepath ): rows
1762
+ for (filepath , rows ) in rv .items ()
1763
+ if rows and filename_match (filepath , patterns )
1764
+ }
1768
1765
1769
1766
1770
1767
def normalize_paths (value , parent = os .curdir ):
@@ -1807,11 +1804,6 @@ def _is_eol_token(token):
1807
1804
return token [0 ] in NEWLINE or token [4 ][token [3 ][1 ]:].lstrip () == '\\ \n '
1808
1805
1809
1806
1810
- if COMMENT_WITH_NL :
1811
- def _is_eol_token (token , _eol_token = _is_eol_token ):
1812
- return _eol_token (token ) or (token [0 ] == tokenize .COMMENT and
1813
- token [1 ] == token [4 ])
1814
-
1815
1807
########################################################################
1816
1808
# Framework to run all checks
1817
1809
########################################################################
@@ -2079,14 +2071,6 @@ def check_all(self, expected=None, line_offset=0):
2079
2071
del self .tokens [0 ]
2080
2072
else :
2081
2073
self .check_logical ()
2082
- elif COMMENT_WITH_NL and token_type == tokenize .COMMENT :
2083
- if len (self .tokens ) == 1 :
2084
- # The comment also ends a physical line
2085
- token = list (token )
2086
- token [1 ] = text .rstrip ('\r \n ' )
2087
- token [3 ] = (token [2 ][0 ], token [2 ][1 ] + len (token [1 ]))
2088
- self .tokens = [tuple (token )]
2089
- self .check_logical ()
2090
2074
if self .tokens :
2091
2075
self .check_physical (self .lines [- 1 ])
2092
2076
self .check_logical ()
@@ -2154,8 +2138,8 @@ def get_file_results(self):
2154
2138
2155
2139
def get_count (self , prefix = '' ):
2156
2140
"""Return the total count of errors and warnings."""
2157
- return sum ([ self .counters [key ]
2158
- for key in self .messages if key .startswith (prefix )] )
2141
+ return sum (self .counters [key ]
2142
+ for key in self .messages if key .startswith (prefix ))
2159
2143
2160
2144
def get_statistics (self , prefix = '' ):
2161
2145
"""Get statistics for message codes that start with the prefix.
@@ -2503,8 +2487,7 @@ def read_config(options, args, arglist, parser):
2503
2487
warnings .warn ('[pep8] section is deprecated. Use [pycodestyle].' )
2504
2488
2505
2489
if pycodestyle_section :
2506
- option_list = dict ([(o .dest , o .type or o .action )
2507
- for o in parser .option_list ])
2490
+ option_list = {o .dest : o .type or o .action for o in parser .option_list }
2508
2491
2509
2492
# First, read the default values
2510
2493
(new_options , __ ) = parser .parse_args ([])
0 commit comments