Skip to content

Commit 73d0f48

Browse files
cyyeverpytorchmergebot
authored andcommitted
[structural binding][11/N] Replace std::tie with structural binding (pytorch#130830)
Follows pytorch#130784 Pull Request resolved: pytorch#130830 Approved by: https://github.com/janeyx99
1 parent e14d1d1 commit 73d0f48

File tree

6 files changed

+42
-73
lines changed

6 files changed

+42
-73
lines changed

aten/src/ATen/native/cuda/Indexing.cu

+1-2
Original file line numberDiff line numberDiff line change
@@ -1763,8 +1763,7 @@ Tensor index_select_sparse_cuda(const Tensor& self, int64_t dim, const Tensor& i
17631763
return make_output(empty_idx, empty_idx);
17641764
}
17651765

1766-
Tensor selected_dim_indices, res_dim_indices;
1767-
std::tie(selected_dim_indices, res_dim_indices) = [&]() -> std::tuple<Tensor, Tensor> {
1766+
auto [selected_dim_indices, res_dim_indices] = [&]() -> std::tuple<Tensor, Tensor> {
17681767
auto res_dim_indices = at::empty({res_len}, nneg_index.options());
17691768
auto selected_dim_indices = at::empty_like(res_dim_indices);
17701769
auto selected_dim_indices_offsets = intrsc_counts_nneg_index.cumsum(0)

test/cpp/api/modules.cpp

+13-27
Original file line numberDiff line numberDiff line change
@@ -314,8 +314,7 @@ TEST_F(ModulesTest, MaxPool1d) {
314314
TEST_F(ModulesTest, MaxPool1dReturnIndices) {
315315
MaxPool1d model(MaxPool1dOptions(3).stride(2));
316316
auto x = torch::ones({1, 1, 5}, torch::requires_grad());
317-
torch::Tensor y, indices;
318-
std::tie(y, indices) = model->forward_with_indices(x);
317+
auto [y, indices] = model->forward_with_indices(x);
319318

320319
ASSERT_EQ(y.dim(), 3);
321320
ASSERT_TRUE(torch::allclose(y, torch::ones({1, 1, 2})));
@@ -355,8 +354,7 @@ TEST_F(ModulesTest, MaxPool2dUneven) {
355354
TEST_F(ModulesTest, MaxPool2dReturnIndices) {
356355
MaxPool2d model(MaxPool2dOptions(3).stride(2));
357356
auto x = torch::ones({2, 5, 5}, torch::requires_grad());
358-
torch::Tensor y, indices;
359-
std::tie(y, indices) = model->forward_with_indices(x);
357+
auto [y, indices] = model->forward_with_indices(x);
360358

361359
ASSERT_EQ(y.dim(), 3);
362360
ASSERT_TRUE(torch::allclose(y, torch::ones({2, 2, 2})));
@@ -383,8 +381,7 @@ TEST_F(ModulesTest, MaxPool3d) {
383381
TEST_F(ModulesTest, MaxPool3dReturnIndices) {
384382
MaxPool3d model(MaxPool3dOptions(3).stride(2));
385383
auto x = torch::ones({2, 5, 5, 5}, torch::requires_grad());
386-
torch::Tensor y, indices;
387-
std::tie(y, indices) = model->forward_with_indices(x);
384+
auto [y, indices] = model->forward_with_indices(x);
388385

389386
ASSERT_EQ(y.dim(), 4);
390387
ASSERT_TRUE(torch::allclose(y, torch::ones({2, 2, 2, 2})));
@@ -467,8 +464,7 @@ TEST_F(ModulesTest, FractionalMaxPool2d) {
467464
TEST_F(ModulesTest, FractionalMaxPool2dReturnIndices) {
468465
FractionalMaxPool2d model(FractionalMaxPool2dOptions(3).output_size(2));
469466
auto x = torch::ones({2, 5, 5}, torch::requires_grad());
470-
torch::Tensor y, indices;
471-
std::tie(y, indices) = model->forward_with_indices(x);
467+
auto [y, indices] = model->forward_with_indices(x);
472468

473469
ASSERT_EQ(y.dim(), 3);
474470
ASSERT_TRUE(torch::allclose(y, torch::ones({2, 2, 2})));
@@ -494,8 +490,7 @@ TEST_F(ModulesTest, FractionalMaxPool3d) {
494490
TEST_F(ModulesTest, FractionalMaxPool3dReturnIndices) {
495491
FractionalMaxPool3d model(FractionalMaxPool3dOptions(3).output_size(2));
496492
auto x = torch::ones({2, 5, 5, 5}, torch::requires_grad());
497-
torch::Tensor y, indices;
498-
std::tie(y, indices) = model->forward_with_indices(x);
493+
auto [y, indices] = model->forward_with_indices(x);
499494

500495
ASSERT_EQ(y.dim(), 4);
501496
ASSERT_TRUE(torch::allclose(y, torch::ones({2, 2, 2, 2})));
@@ -655,8 +650,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool1dReturnIndices) {
655650
AdaptiveMaxPool1d model(3);
656651
auto x = torch::tensor(
657652
{{{1, 2, 3, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true));
658-
torch::Tensor y, indices;
659-
std::tie(y, indices) = model->forward_with_indices(x);
653+
auto [y, indices] = model->forward_with_indices(x);
660654

661655
ASSERT_EQ(y.dim(), 3);
662656
ASSERT_TRUE(torch::allclose(y, torch::tensor({{{2, 4, 5}}}, torch::kFloat)));
@@ -712,8 +706,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool2dReturnIndicesEven) {
712706
AdaptiveMaxPool2d model(3);
713707
auto x = torch::arange(0., 50);
714708
x.resize_({2, 5, 5}).set_requires_grad(true);
715-
torch::Tensor y, indices;
716-
std::tie(y, indices) = model->forward_with_indices(x);
709+
auto [y, indices] = model->forward_with_indices(x);
717710
torch::Tensor s = y.sum();
718711

719712
s.backward();
@@ -746,8 +739,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool2dReturnIndicesUneven) {
746739
AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2}));
747740
auto x = torch::arange(0., 40);
748741
x.resize_({2, 5, 4}).set_requires_grad(true);
749-
torch::Tensor y, indices;
750-
std::tie(y, indices) = model->forward_with_indices(x);
742+
auto [y, indices] = model->forward_with_indices(x);
751743
torch::Tensor s = y.sum();
752744

753745
s.backward();
@@ -803,8 +795,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool3dReturnIndices) {
803795
AdaptiveMaxPool3d model(3);
804796
auto x = torch::arange(0., 64);
805797
x.resize_({1, 4, 4, 4}).set_requires_grad(true);
806-
torch::Tensor y, indices;
807-
std::tie(y, indices) = model->forward_with_indices(x);
798+
auto [y, indices] = model->forward_with_indices(x);
808799
torch::Tensor s = y.sum();
809800

810801
s.backward();
@@ -946,8 +937,7 @@ TEST_F(ModulesTest, MaxPool1d_MaxUnpool1d) {
946937
MaxPool1d pool{MaxPool1dOptions(2).stride(2)};
947938
MaxUnpool1d unpool{MaxUnpool1dOptions(2).stride(2)};
948939
auto input = torch::tensor({{{1, 2, 3, 4, 5, 6, 7, 8}}}, torch::kFloat);
949-
torch::Tensor output, indices;
950-
std::tie(output, indices) = pool->forward_with_indices(input);
940+
auto [output, indices] = pool->forward_with_indices(input);
951941
ASSERT_TRUE(torch::allclose(
952942
unpool(output, indices),
953943
torch::tensor({{{0, 2, 0, 4, 0, 6, 0, 8}}}, torch::kFloat)));
@@ -999,8 +989,7 @@ TEST_F(ModulesTest, MaxPool2d_MaxUnpool2d) {
999989
auto input = torch::tensor(
1000990
{{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, {13, 14, 15, 16}}}},
1001991
torch::kFloat);
1002-
torch::Tensor output, indices;
1003-
std::tie(output, indices) = pool->forward_with_indices(input);
992+
auto [output, indices] = pool->forward_with_indices(input);
1004993
ASSERT_TRUE(torch::allclose(
1005994
unpool(output, indices),
1006995
torch::tensor(
@@ -1061,8 +1050,7 @@ TEST_F(ModulesTest, MaxPool3d_MaxUnpool3d) {
10611050
MaxPool3d pool{MaxPool3dOptions(3).stride(2)};
10621051
MaxUnpool3d unpool{MaxUnpool3dOptions(3).stride(2)};
10631052
auto input = torch::randn({20, 16, 51, 33, 15});
1064-
torch::Tensor output, indices;
1065-
std::tie(output, indices) = pool->forward_with_indices(input);
1053+
auto [output, indices] = pool->forward_with_indices(input);
10661054
auto unpooled_output = unpool(output, indices);
10671055
ASSERT_EQ(
10681056
unpooled_output.sizes(), std::vector<int64_t>({20, 16, 51, 33, 15}));
@@ -3755,9 +3743,7 @@ void _multihead_attn_test_helper(
37553743
/*dim=*/1);
37563744
}
37573745
}
3758-
torch::Tensor attn_heads;
3759-
torch::Tensor ref_attn_weight;
3760-
std::tie(attn_heads, ref_attn_weight) = _scaled_dot_attn_ref(
3746+
auto [attn_heads, ref_attn_weight] = _scaled_dot_attn_ref(
37613747
Q_split,
37623748
K_split,
37633749
V_split,

test/cpp/jit/test_autodiff.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -169,8 +169,7 @@ TEST(AutodiffTest, ADFormulas) {
169169
// Get outputs from the interpreter
170170
auto tensors_in = fmap(vars_in, cast);
171171
auto tensor_grads_in = fmap(var_grads_in, cast);
172-
tensor_list tensors_out, tensor_grads_out;
173-
std::tie(tensors_out, tensor_grads_out) =
172+
auto [tensors_out, tensor_grads_out] =
174173
runGradient(grad_spec, tensors_in, tensor_grads_in);
175174

176175
// Compare results

test/cpp/jit/test_misc.cpp

+5-10
Original file line numberDiff line numberDiff line change
@@ -170,8 +170,7 @@ TEST(THNNConvTest, Basic) {
170170
torch::randn_like(output, at::MemoryFormat::Preserve);
171171

172172
// run backward eagerly
173-
at::Tensor grad_input, grad_weight, grad_bias;
174-
std::tie(grad_input, grad_weight, grad_bias) = at::_slow_conv2d_backward(
173+
auto [grad_input, grad_weight, grad_bias] = at::_slow_conv2d_backward(
175174
grad_output,
176175
input,
177176
weight,
@@ -216,8 +215,7 @@ TEST(THNNConvTest, Basic) {
216215
tensor_grads_in.push_back(grad_output);
217216

218217
// Get outputs from the interpreter
219-
tensor_list tensors_out, tensor_grads_out;
220-
std::tie(tensors_out, tensor_grads_out) =
218+
auto [tensors_out, tensor_grads_out] =
221219
runGradient(grad_spec, tensors_in, tensor_grads_in);
222220

223221
// prepare expected structs
@@ -255,8 +253,7 @@ TEST(ATenNativeBatchNormTest, Basic) {
255253
at::Tensor running_var_jit = running_var.clone();
256254

257255
// run forward eagerly
258-
at::Tensor output, savemean, saveinvstd;
259-
std::tie(output, savemean, saveinvstd) = at::native_batch_norm(
256+
auto [output, savemean, saveinvstd] = at::native_batch_norm(
260257
input,
261258
weight,
262259
bias,
@@ -275,12 +272,11 @@ TEST(ATenNativeBatchNormTest, Basic) {
275272
torch::zeros_like(saveinvstd, at::MemoryFormat::Preserve);
276273

277274
// run backward eagerly
278-
at::Tensor grad_input, grad_weight, grad_bias;
279275
// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor
280276
// weight, Tensor running_mean, Tensor running_var, Tensor save_mean, Tensor
281277
// save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor,
282278
// Tensor, Tensor)
283-
std::tie(grad_input, grad_weight, grad_bias) = at::native_batch_norm_backward(
279+
auto [grad_input, grad_weight, grad_bias] = at::native_batch_norm_backward(
284280
grad_output,
285281
input,
286282
weight,
@@ -341,8 +337,7 @@ TEST(ATenNativeBatchNormTest, Basic) {
341337
tensor_grads_in.push_back(grad_saveinvstd);
342338

343339
// Get outputs from the interpreter
344-
tensor_list tensors_out, tensor_grads_out;
345-
std::tie(tensors_out, tensor_grads_out) =
340+
auto [tensors_out, tensor_grads_out] =
346341
runGradient(grad_spec, tensors_in, tensor_grads_in);
347342

348343
// prepare expected structs

test/cpp/tensorexpr/test_external_calls.cpp

+2-9
Original file line numberDiff line numberDiff line change
@@ -647,10 +647,7 @@ TEST(ExternalCall, BinaryFloat) {
647647
tests.push_back(
648648
Test{{100, 200}, {200, 300}, {100, 300}, at::mm, "nnc_aten_mm"});
649649
for (auto curTest : tests) {
650-
std::vector<int64_t> aShape, bShape, resShape;
651-
TensorFunc torchFunc;
652-
std::string externCallName;
653-
std::tie(aShape, bShape, resShape, torchFunc, externCallName) = curTest;
650+
auto [aShape, bShape, resShape, torchFunc, externCallName] = curTest;
654651
auto toExprHandleVec = [](std::vector<int64_t> v) {
655652
auto intV = std::vector<int>(v.begin(), v.end());
656653
return std::vector<ExprHandle>(intV.begin(), intV.end());
@@ -730,11 +727,7 @@ TEST(ExternalCall, UnaryFloat) {
730727
"nnc_aten_mean",
731728
toExprHandleVec({1, /*keepdim=*/0})});
732729
for (auto curTest : tests) {
733-
std::vector<int64_t> aShape, resShape;
734-
TensorFunc torchFunc;
735-
std::string externCallName;
736-
std::vector<ExprHandle> externCallArgs;
737-
std::tie(aShape, resShape, torchFunc, externCallName, externCallArgs) =
730+
auto [aShape, resShape, torchFunc, externCallName, externCallArgs] =
738731
curTest;
739732
BufHandle A("A", toExprHandleVec(aShape), kFloat);
740733
BufHandle ResultBuf("Result", toExprHandleVec(resShape), kFloat);

torch/csrc/jit/passes/onnx/shape_type_inference.cpp

+20-23
Original file line numberDiff line numberDiff line change
@@ -386,29 +386,26 @@ void ConvertGraphToONNXProto(
386386
SymbolDimMap& symbol_dim_map,
387387
DimSymbolMap& dim_symbol_map,
388388
int opset_version) {
389-
RawDataExportMap export_map;
390-
bool val_use_external_data_format;
391-
SymbolDimMap new_symbol_dim_map;
392-
NodeNameMap node_names;
393-
std::tie(
394-
model_proto,
395-
export_map,
396-
new_symbol_dim_map,
397-
val_use_external_data_format,
398-
node_names) =
399-
export_onnx(
400-
graph,
401-
{},
402-
opset_version,
403-
{},
404-
false,
405-
onnx_torch::OperatorExportTypes::ONNX,
406-
true,
407-
true,
408-
{},
409-
true,
410-
false,
411-
std::string());
389+
auto
390+
[model_proto_tmp,
391+
export_map,
392+
new_symbol_dim_map,
393+
val_use_external_data_format,
394+
node_names] =
395+
export_onnx(
396+
graph,
397+
{},
398+
opset_version,
399+
{},
400+
false,
401+
onnx_torch::OperatorExportTypes::ONNX,
402+
true,
403+
true,
404+
{},
405+
true,
406+
false,
407+
std::string());
408+
model_proto = std::move(model_proto_tmp);
412409
symbol_dim_map.insert(new_symbol_dim_map.begin(), new_symbol_dim_map.end());
413410
for (const auto& pair : new_symbol_dim_map) {
414411
dim_symbol_map[pair.second] = pair.first;

0 commit comments

Comments
 (0)