Skip to content

Commit 60a6847

Browse files
BowenBaopytorchmergebot
authored andcommitted
Bump black version to 23.1.0 (pytorch#96578)
Pull Request resolved: pytorch#96578 Approved by: https://github.com/ezyang
1 parent a229e78 commit 60a6847

File tree

114 files changed

+111
-167
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

114 files changed

+111
-167
lines changed

.lintrunner.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -878,7 +878,7 @@ init_command = [
878878
'tools/linter/adapters/pip_init.py',
879879
'--dry-run={{DRYRUN}}',
880880
'--no-black-binary',
881-
'black==22.3.0',
881+
'black==23.1.0',
882882
'ufmt==1.3.3',
883883
'usort==1.0.2',
884884
]

benchmarks/dynamo/benchmarks.py

+1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
from typing import Set
66

7+
78
# Note - hf and timm have their own version of this, torchbench does not
89
# TOOD(voz): Someday, consolidate all the files into one runner instead of a shim like this...
910
def model_names(filename: str) -> Set[str]:

benchmarks/dynamo/check_graph_breaks.py

-2
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,10 @@ def get_field(csv, model_name: str, field: str, typ=float):
1111

1212

1313
def check_graph_breaks(actual_csv, expected_csv, expected_filename):
14-
1514
failed = []
1615
improved = []
1716

1817
for model in actual_csv["name"]:
19-
2018
graph_breaks = get_field(actual_csv, model, "graph_breaks", typ=int)
2119
expected_graph_breaks = get_field(expected_csv, model, "graph_breaks", typ=int)
2220

benchmarks/dynamo/ci_expected_accuracy/update_expected.py

-2
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131

3232

3333
def query_job_sha(repo, sha):
34-
3534
params = {
3635
"parameters": [
3736
{"name": "sha", "type": "string", "value": sha},
@@ -108,7 +107,6 @@ def write_filtered_csvs(root_path, dataframes):
108107

109108

110109
if __name__ == "__main__":
111-
112110
parser = argparse.ArgumentParser(
113111
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
114112
)

benchmarks/dynamo/huggingface.py

-2
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,6 @@ def load_model(
373373
model_name,
374374
batch_size=None,
375375
):
376-
377376
is_training = self.args.training
378377
use_eval_mode = self.args.use_eval_mode
379378
dtype = torch.float32
@@ -513,7 +512,6 @@ def refresh_model_names_and_batch_sizes():
513512
lm_seen = set()
514513
family_seen = set()
515514
for cls_name in hf_fx._SUPPORTED_MODELS:
516-
517515
if "For" not in cls_name:
518516
continue
519517

benchmarks/dynamo/microbenchmarks/bench_autotune_conv.py

-1
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def bench_op(
7373
warmup=25,
7474
rep=75,
7575
):
76-
7776
skip = False
7877
# allocate inputs, nchw
7978
x = torch.randn((BATCH, IN_C, IN_H, IN_W), dtype=dtype, device="cuda")

benchmarks/dynamo/microbenchmarks/bench_conv.py

-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ def bench_op(
7070
warmup=25,
7171
rep=75,
7272
):
73-
7473
# allocate inputs, nchw
7574
x = torch.randn((BATCH, IN_C, IN_H, IN_W), dtype=dtype, device="cuda")
7675
w = torch.randn(

benchmarks/dynamo/microbenchmarks/bench_conv1x1.py

-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@ def bench_op(
6666
warmup=25,
6767
rep=75,
6868
):
69-
7069
# allocate inputs, nchw
7170
x = torch.randn((BATCH, IN_C, IN_H, IN_W), dtype=dtype, device="cuda")
7271
w = torch.randn(

benchmarks/dynamo/microbenchmarks/bench_conv_fusion.py

-1
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,6 @@ def bench(layer_params, layer_id, p, fusion_types=[""]):
236236

237237
row = [layer_id]
238238
for fusion_type in fusion_types:
239-
240239
if fusion_type == "":
241240
conv_torchinductor = getattr(Func, "conv_torchinductor")
242241
conv = getattr(Func, "conv")

benchmarks/dynamo/microbenchmarks/bench_mm_fusion.py

-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ def tflops(ms):
5656

5757
row = [layer_id]
5858
for fusion_type in fusion_types:
59-
6059
if fusion_type == "":
6160
fn_mm = getattr(Func, "mm")
6261
else:

benchmarks/dynamo/microbenchmarks/profile_conv.py

-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ def profile_op(
4646
warmup=25,
4747
rep=50,
4848
):
49-
5049
# allocate inputs, nchw
5150
x = torch.randn((BATCH, IN_C, IN_H, IN_W), dtype=dtype, device="cuda")
5251
w = torch.randn(

benchmarks/dynamo/parse_logs.py

+1
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ def chunker(seq, size):
6060
out.writeheader()
6161
out.writerow({"explain": gist_url})
6262

63+
6364
# Sometimes backtraces will be in third party code, which results
6465
# in very long file names. Delete the absolute path in this case.
6566
def normalize_file(f):

benchmarks/dynamo/timm_models.py

-1
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,6 @@ def load_model(
182182
model_name,
183183
batch_size=None,
184184
):
185-
186185
is_training = self.args.training
187186
use_eval_mode = self.args.use_eval_mode
188187

benchmarks/dynamo/torchbench.py

-1
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,6 @@ def load_model(
242242
batch_size=None,
243243
part=None,
244244
):
245-
246245
is_training = self.args.training
247246
use_eval_mode = self.args.use_eval_mode
248247
dynamic_shapes = self.args.dynamic_shapes

test/distributed/_composable/fully_shard/test_fully_shard_init.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def _test_fully_shard_construction(
120120
composable_handles = traversal_utils._get_fsdp_handles(composable_module)
121121
fsdp_wrapped_handles = traversal_utils._get_fsdp_handles(fsdp_wrapped_model)
122122
self.assertEqual(len(composable_handles), len(fsdp_wrapped_handles))
123-
for (composable_handle, fsdp_wrapped_handle) in zip(
123+
for composable_handle, fsdp_wrapped_handle in zip(
124124
composable_handles, fsdp_wrapped_handles
125125
):
126126
self.assertEqual(
@@ -179,7 +179,7 @@ def test_sync_module_states(self):
179179
policy=policy,
180180
sync_module_states=True,
181181
)
182-
for (composable_param, fsdp_wrapped_param) in zip(
182+
for composable_param, fsdp_wrapped_param in zip(
183183
composable_module.parameters(),
184184
fsdp_wrapped_model.parameters(),
185185
):

test/distributed/fsdp/test_fsdp_checkpoint.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def _verify_parity(self, losses, outputs, models):
116116
assert outputs
117117
assert models
118118

119-
for (l, o) in zip(losses[1:], outputs[1:]):
119+
for l, o in zip(losses[1:], outputs[1:]):
120120
self.assertEqual(losses[0], l)
121121
self.assertEqual(outputs[0], o)
122122

@@ -324,7 +324,6 @@ def forward(self, x):
324324

325325

326326
class TestFSDPCheckpointSubmodule(FSDPTest):
327-
328327
# TODO: grad value checks occasionally fails when use_reentrant = True
329328
@skip_if_lt_x_gpu(2)
330329
@parametrize("use_reentrant", [False])

test/distributed/fsdp/test_fsdp_comm_hooks.py

-5
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ def forward(self, x):
7070

7171

7272
class DummyState:
73-
7473
__slots__ = ["process_group", "noise"]
7574

7675
def __init__(self, process_group: dist.ProcessGroup, noise: int):
@@ -157,7 +156,6 @@ def test_default_communication_hook_behavior(
157156
self.assertEqual(entry._communication_hook, default_hook)
158157

159158
for _ in range(4):
160-
161159
# Clear gradients
162160
net_default_hook.zero_grad()
163161
loss = net_default_hook(inpt).sum()
@@ -183,7 +181,6 @@ def _get_submodules(self, fsdp_net):
183181
]
184182

185183
def _init_model(self, core, sharding_strategy, mixed_precision=None):
186-
187184
device = torch.device("cuda")
188185
return FSDP(
189186
core,
@@ -424,7 +421,6 @@ def _check_low_precision_hook(
424421
def test_fp16_hook(
425422
self, has_wrapping: bool, sharding_strategy: Optional[ShardingStrategy]
426423
):
427-
428424
state = default_hooks.LowPrecisionState(process_group=_get_default_group())
429425
hook = default_hooks.fp16_compress_hook
430426

@@ -452,7 +448,6 @@ def test_fp16_hook(
452448
def test_bf16_hook(
453449
self, has_wrapping: bool, sharding_strategy: Optional[ShardingStrategy]
454450
):
455-
456451
state = default_hooks.LowPrecisionState(process_group=_get_default_group())
457452
hook = default_hooks.bf16_compress_hook
458453

test/distributed/fsdp/test_fsdp_grad_acc.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def permute_tensor(x: torch.Tensor):
160160
num_iters_to_acc = sum(config.num_iters for config in configs)
161161
for _ in range(num_iters_to_acc - 1):
162162
batches.append(tuple(permute_tensor(t) for t in batch))
163-
for (batch1, batch2) in itertools.combinations(batches, r=2):
163+
for batch1, batch2 in itertools.combinations(batches, r=2):
164164
for t1, t2 in zip(batch1, batch2):
165165
assert not torch.all(
166166
t1 == t2

test/distributed/fsdp/test_fsdp_optim_state.py

-1
Original file line numberDiff line numberDiff line change
@@ -1338,7 +1338,6 @@ def _test_rekey_optim_state_dict_to_names(
13381338
use_multiple_param_groups: bool,
13391339
use_optim_input: bool,
13401340
):
1341-
13421341
NUM_ITERS = 3
13431342
# Run a wrapped model for a few iterations
13441343
model1, optim1, optim_input1 = self._init_nested_model(

test/distributed/fsdp/test_fsdp_state_dict.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -937,14 +937,14 @@ def _create_module(wrap_fsdp=True):
937937
# Check that it can be loaded into FSDP.
938938
new_fsdp, _ = _create_module()
939939
_zero_model(new_fsdp)
940-
for (p1, p2) in zip(fsdp.parameters(), new_fsdp.parameters()):
940+
for p1, p2 in zip(fsdp.parameters(), new_fsdp.parameters()):
941941
self.assertNotEqual(p1, p2)
942942
with FSDP.state_dict_type(new_fsdp, STATE_DICT_MAPPING[state_dict_type]):
943943
if state_dict_type != "local_state_dict":
944944
# FlatParameter has not supported deepcopy yet.
945945
state_dict = deepcopy(state_dict)
946946
new_fsdp.load_state_dict(state_dict, strict=True)
947-
for (p1, p2) in zip(fsdp.parameters(), new_fsdp.parameters()):
947+
for p1, p2 in zip(fsdp.parameters(), new_fsdp.parameters()):
948948
self.assertEqual(p1, p2)
949949

950950
# Test that the checkpoint can be loaded into a local model.
@@ -954,7 +954,7 @@ def _create_module(wrap_fsdp=True):
954954
param.zero_()
955955

956956
with fsdp.summon_full_params(fsdp):
957-
for (p1, p2) in zip(fsdp.parameters(), local.parameters()):
957+
for p1, p2 in zip(fsdp.parameters(), local.parameters()):
958958
self.assertNotEqual(p1, p2)
959959

960960
if state_dict_type == "local_state_dict":
@@ -963,7 +963,7 @@ def _create_module(wrap_fsdp=True):
963963
with fsdp.summon_full_params(fsdp):
964964
if self.rank == 0:
965965
local.load_state_dict(state_dict, strict=True)
966-
for (p1, p2) in zip(fsdp.parameters(), local.parameters()):
966+
for p1, p2 in zip(fsdp.parameters(), local.parameters()):
967967
self.assertEqual(p1, p2)
968968

969969
@skip_if_lt_x_gpu(2)

test/distributed/fsdp/test_shard_utils.py

-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ def _get_and_check_split_sizes(
3131
out_offsets,
3232
in_split_sizes,
3333
):
34-
3534
for my_rank in range(world_size):
3635
_in_split_sizes = in_split_sizes[my_rank]
3736
_out_split_sizes = [

test/dynamo/test_modules.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -847,7 +847,6 @@ def __torch_function__(cls, func, types, args=(), kwargs=None):
847847
torch._dynamo.config.traceable_tensor_subclasses.add(TensorProxy)
848848

849849
try:
850-
851850
x = torch.randn(1).as_subclass(TensorProxy)
852851
cnt = torch._dynamo.testing.CompileCounter()
853852
out1 = foo(x)
@@ -862,7 +861,6 @@ def __torch_function__(cls, func, types, args=(), kwargs=None):
862861

863862
def test_torch_function_with_closure(self):
864863
def run():
865-
866864
counter = 0
867865

868866
def foo(x):
@@ -1097,7 +1095,7 @@ def forward(self, x):
10971095
opt_mod = torch._dynamo.optimize("eager")(mod)
10981096

10991097
# Check parameteres and buffers
1100-
for (p1, p2) in zip(mod.parameters(), opt_mod.parameters()):
1098+
for p1, p2 in zip(mod.parameters(), opt_mod.parameters()):
11011099
self.assertTrue(id(p1) == id(p2))
11021100

11031101
def test_recursion(self):

test/dynamo/test_repros.py

-2
Original file line numberDiff line numberDiff line change
@@ -1572,7 +1572,6 @@ def forward(self, x):
15721572
self.assertEqual(y, 10)
15731573

15741574
def test_sort_out(self):
1575-
15761575
dtype = torch.float32
15771576
device = "cpu"
15781577

@@ -1607,7 +1606,6 @@ def forward(self, x):
16071606
self.assertTrue(same(ref, res))
16081607

16091608
def test_sigmoid_out(self):
1610-
16111609
dtype = torch.float32
16121610
device = "cpu"
16131611

test/inductor/test_config.py

-1
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,6 @@ def d(x):
178178
a(torch.randn(10))
179179

180180
def test_api_options(self):
181-
182181
reduce_overhead_opts = torch._inductor.list_mode_options("reduce-overhead")
183182
self.assertEqual(reduce_overhead_opts["triton.cudagraphs"], True)
184183

test/inductor/test_torchinductor_dynamic_shapes.py

-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ class DynamicShapesCudaTests(TestCase):
7979

8080

8181
class TestInductorDynamic(TestCase):
82-
8382
compile_fn = partial(torch.compile, dynamic=True)
8483

8584
def setUp(self):

test/inductor/test_torchinductor_opinfo.py

-1
Original file line numberDiff line numberDiff line change
@@ -597,7 +597,6 @@ def fn(*args, **kwargs):
597597
)
598598

599599
except Exception as e:
600-
601600
if test_expect is ExpectedTestResult.XFAILURE:
602601
raise e
603602

test/onnx/pytorch_test_common.py

+1
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ def wrapper(*args, **kwargs):
4848
lambda: not torch.cuda.is_bf16_supported(), "BFloat16 CUDA is not available"
4949
)
5050

51+
5152
# skips tests for all versions below min_opset_version.
5253
# if exporting the op is only supported after a specific version,
5354
# add this wrapper to prevent running the test for opset_versions

test/onnx/test_onnx_opset.py

-1
Original file line numberDiff line numberDiff line change
@@ -494,7 +494,6 @@ def forward(self, x, grid, mode, padding_mode, align_corers):
494494
("zeros", "border", "reflection"),
495495
(True, False),
496496
):
497-
498497
args = (
499498
torch.randn(n, c, h_in, w_in), # x
500499
torch.randn(n, h_out, w_out, 2), # grid,

test/onnx/test_onnxscript_no_runtime.py

-2
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,12 @@
1313

1414

1515
class TestONNXScriptExport(common_utils.TestCase):
16-
1716
# opset version is
1817
# 1. local function is supported after opset 15
1918
# 2. onnx-script requires users to determine opset in local function
2019
opset_version = 15
2120

2221
def test_onnxscript_registration_with_multiple_models(self):
23-
2422
from onnxscript.onnx_opset import opset15 as op
2523

2624
# 1. Register Selu onnxscript function as custom Op

test/onnx/test_onnxscript_runtime.py

-3
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,12 @@
1212

1313

1414
class TestONNXScriptRuntime(onnx_test_common._TestONNXRuntime):
15-
1615
# opset version is
1716
# 1. local function is supported after opset 15
1817
# 2. onnx-script requires users to determine opset in local function
1918
opset_version = 15
2019

2120
def test_selu_from_onnxscript_example(self):
22-
2321
x = torch.randn(1, 2, 3, 4, requires_grad=True)
2422
model = torch.nn.SELU()
2523

@@ -52,7 +50,6 @@ def custom_selu(g: jit_utils.GraphContext, X):
5250
self.run_test(model, x)
5351

5452
def test_layer_norm(self):
55-
5653
x = torch.randn(2, 3)
5754
y = torch.randn(2, 3)
5855
z = torch.randn(2, 3)

0 commit comments

Comments
 (0)