Skip to content

Commit c04d9a4

Browse files
committed
Use a more precise return type for tokenize.untokenize()
1 parent 9f28171 commit c04d9a4

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

stdlib/tokenize.pyi

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ from collections.abc import Callable, Generator, Iterable, Sequence
44
from re import Pattern
55
from token import *
66
from token import EXACT_TOKEN_TYPES as EXACT_TOKEN_TYPES
7-
from typing import Any, NamedTuple, TextIO, type_check_only
7+
from typing import NamedTuple, TextIO, type_check_only
88
from typing_extensions import TypeAlias
99

1010
__all__ = [
@@ -132,7 +132,7 @@ class Untokenizer:
132132

133133
# the docstring says "returns bytes" but is incorrect --
134134
# if the ENCODING token is missing, it skips the encode
135-
def untokenize(iterable: Iterable[_Token]) -> Any: ...
135+
def untokenize(iterable: Iterable[_Token]) -> bytes | str: ...
136136
def detect_encoding(readline: Callable[[], bytes | bytearray]) -> tuple[str, Sequence[bytes]]: ...
137137
def tokenize(readline: Callable[[], bytes | bytearray]) -> Generator[TokenInfo, None, None]: ...
138138
def generate_tokens(readline: Callable[[], str]) -> Generator[TokenInfo, None, None]: ...

0 commit comments

Comments
 (0)