Skip to content

Commit 1de6922

Browse files
authored
Merge branch 'master' into unasync-remove
2 parents 8ee7831 + 1b6fd0c commit 1de6922

File tree

3 files changed

+30
-51
lines changed

3 files changed

+30
-51
lines changed

ci.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ set -ex
44

55
BLACK_VERSION=22.6.0
66

7-
pip install -U pip setuptools wheel
7+
python -m pip install -U pip setuptools wheel
88

99
python setup.py sdist --formats=zip
1010
pip install dist/*.zip

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
include_package_data=True,
1818
packages=find_packages("src"),
1919
package_dir={"": "src"},
20-
install_requires=['tokenize_rt; python_version >= "3.8.0"'],
20+
install_requires=["tokenize_rt"],
2121
keywords=["async"],
2222
python_requires=">=3.7",
2323
classifiers=[

src/unasync/__init__.py

+28-49
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import sys
88
import tokenize as std_tokenize
99

10+
import tokenize_rt
1011
from setuptools.command import build_py as orig
1112

1213
from ._version import __version__ # NOQA
@@ -66,13 +67,11 @@ def _match(self, filepath):
6667
def _unasync_file(self, filepath):
6768
with open(filepath, "rb") as f:
6869
encoding, _ = std_tokenize.detect_encoding(f.readline)
69-
f.seek(0)
70-
tokens = _tokenize(f)
70+
71+
with open(filepath, "rt", encoding=encoding) as f:
72+
tokens = tokenize_rt.src_to_tokens(f.read())
7173
tokens = self._unasync_tokens(tokens)
72-
result = _untokenize(tokens)
73-
# Limit to Python3.8+ until we know how to support older versions
74-
if (sys.version_info[0] == 3 and sys.version_info[1] >= 8) or sys.version_info[0] > 3:
75-
result = self._unasync_remove(contents=result, filename=filepath)
74+
result = tokenize_rt.tokens_to_src(tokens)
7675
outfilepath = filepath.replace(self.fromdir, self.todir)
7776
os.makedirs(os.path.dirname(outfilepath), exist_ok=True)
7877
with open(outfilepath, "wb") as f:
@@ -104,25 +103,30 @@ def _unasync_remove(self, contents, filename):
104103
return new_contents
105104

106105
def _unasync_tokens(self, tokens):
107-
# TODO __await__, ...?
108-
used_space = None
109-
for space, toknum, tokval in tokens:
110-
if tokval in ["async", "await"]:
111-
# When removing async or await, we want to use the whitespace that
112-
# was before async/await before the next token so that
113-
# `print(await stuff)` becomes `print(stuff)` and not
114-
# `print( stuff)`
115-
used_space = space
106+
skip_next = False
107+
for i, token in enumerate(tokens):
108+
if skip_next:
109+
skip_next = False
110+
continue
111+
112+
if token.src in ["async", "await"]:
113+
# When removing async or await, we want to skip the following whitespace
114+
# so that `print(await stuff)` becomes `print(stuff)` and not `print( stuff)`
115+
skip_next = True
116116
else:
117-
if toknum == std_tokenize.NAME:
118-
tokval = self._unasync_name(tokval)
119-
elif toknum == std_tokenize.STRING:
120-
left_quote, name, right_quote = tokval[0], tokval[1:-1], tokval[-1]
121-
tokval = left_quote + self._unasync_name(name) + right_quote
122-
if used_space is None:
123-
used_space = space
124-
yield (used_space, tokval)
125-
used_space = None
117+
if token.name == "NAME":
118+
token = token._replace(src=self._unasync_name(token.src))
119+
elif token.name == "STRING":
120+
left_quote, name, right_quote = (
121+
token.src[0],
122+
token.src[1:-1],
123+
token.src[-1],
124+
)
125+
token = token._replace(
126+
src=left_quote + self._unasync_name(name) + right_quote
127+
)
128+
129+
yield token
126130

127131
def _unasync_name(self, name):
128132
if name in self.token_replacements:
@@ -151,31 +155,6 @@ def unasync_files(fpath_list, rules):
151155
Token = collections.namedtuple("Token", ["type", "string", "start", "end", "line"])
152156

153157

154-
def _tokenize(f):
155-
last_end = (1, 0)
156-
for tok in std_tokenize.tokenize(f.readline):
157-
if tok.type == std_tokenize.ENCODING:
158-
continue
159-
160-
if last_end[0] < tok.start[0]:
161-
yield ("", std_tokenize.STRING, " \\\n")
162-
last_end = (tok.start[0], 0)
163-
164-
space = ""
165-
if tok.start > last_end:
166-
assert tok.start[0] == last_end[0]
167-
space = " " * (tok.start[1] - last_end[1])
168-
yield (space, tok.type, tok.string)
169-
170-
last_end = tok.end
171-
if tok.type in [std_tokenize.NEWLINE, std_tokenize.NL]:
172-
last_end = (tok.end[0] + 1, 0)
173-
174-
175-
def _untokenize(tokens):
176-
return "".join(space + tokval for space, tokval in tokens)
177-
178-
179158
_DEFAULT_RULE = Rule(fromdir="/_async/", todir="/_sync/")
180159

181160

0 commit comments

Comments
 (0)