2
2
3
3
#include < c10/util/tempfile.h>
4
4
#include < c10/util/flat_hash_map.h>
5
+ #include < c10/util/irange.h>
5
6
6
7
#include < torch/torch.h>
7
8
@@ -41,7 +42,7 @@ void is_optimizer_param_group_equal(const OptimizerParamGroup& lhs, const Optimi
41
42
const auto & rhs_params = rhs.params ();
42
43
43
44
ASSERT_TRUE (lhs_params.size () == rhs_params.size ());
44
- for (size_t j = 0 ; j < lhs_params.size (); j++ ) {
45
+ for (const auto j : c10::irange ( lhs_params.size ()) ) {
45
46
ASSERT_TRUE (torch::equal (lhs_params[j], rhs_params[j]));
46
47
}
47
48
ASSERT_TRUE (static_cast <const DerivedOptions&>(lhs.options ()) == static_cast <const DerivedOptions&>(rhs.options ()));
@@ -136,7 +137,7 @@ void test_serialize_optimizer(DerivedOptimizerOptions options, bool only_has_glo
136
137
ASSERT_TRUE (optim3_2_state.size () == optim3_state.size ());
137
138
138
139
// checking correctness of serialization logic for optimizer.param_groups_ and optimizer.state_
139
- for (int i = 0 ; i < optim3_2_param_groups.size (); i++ ) {
140
+ for (const auto i : c10::irange ( optim3_2_param_groups.size ()) ) {
140
141
is_optimizer_param_group_equal<DerivedOptimizerOptions>(
141
142
optim3_2_param_groups[i], optim3_param_groups[i]);
142
143
is_optimizer_state_equal<DerivedOptimizerParamState>(optim3_2_state, optim3_state);
@@ -173,7 +174,7 @@ void write_tensors_to_archive(
173
174
const BufferContainer& buffers) {
174
175
archive.write (
175
176
key + " /size" , torch::tensor (static_cast <int64_t >(buffers.size ())));
176
- for (size_t index = 0 ; index < buffers.size (); ++ index ) {
177
+ for (const auto index : c10::irange ( buffers.size ()) ) {
177
178
archive.write (
178
179
key + " /" + c10::to_string (index ), buffers[index ], /* is_buffer=*/ true );
179
180
}
@@ -203,23 +204,23 @@ void write_step_buffers(
203
204
TEST (SerializeTest, KeysFunc) {
204
205
auto tempfile = c10::make_tempfile ();
205
206
torch::serialize::OutputArchive output_archive;
206
- for (size_t i = 0 ; i < 3 ; i++ ) {
207
+ for (const auto i : c10::irange ( 3 ) ) {
207
208
output_archive.write (" element/" + c10::to_string (i), c10::IValue (static_cast <int64_t >(i)));
208
209
}
209
210
output_archive.save_to (tempfile.name );
210
211
torch::serialize::InputArchive input_archive;
211
212
input_archive.load_from (tempfile.name );
212
213
std::vector<std::string> keys = input_archive.keys ();
213
214
ASSERT_EQ (keys.size (), 3 );
214
- for (size_t i = 0 ; i < keys.size (); i++ ) {
215
+ for (const auto i : c10::irange ( keys.size ()) ) {
215
216
ASSERT_EQ (keys[i], " element/" + c10::to_string (i));
216
217
}
217
218
}
218
219
219
220
TEST (SerializeTest, TryReadFunc) {
220
221
auto tempfile = c10::make_tempfile ();
221
222
torch::serialize::OutputArchive output_archive;
222
- for (size_t i = 0 ; i < 3 ; i++ ) {
223
+ for (const auto i : c10::irange ( 3 ) ) {
223
224
output_archive.write (" element/" + c10::to_string (i), c10::IValue (static_cast <int64_t >(i)));
224
225
}
225
226
output_archive.save_to (tempfile.name );
@@ -363,7 +364,7 @@ TEST(SerializeTest, XOR) {
363
364
auto getLoss = [](Sequential model, uint32_t batch_size) {
364
365
auto inputs = torch::empty ({batch_size, 2 });
365
366
auto labels = torch::empty ({batch_size});
366
- for (size_t i = 0 ; i < batch_size; i++ ) {
367
+ for (const auto i : c10::irange ( batch_size) ) {
367
368
inputs[i] = torch::randint (2 , {2 }, torch::kInt64 );
368
369
labels[i] = inputs[i][0 ].item <int64_t >() ^ inputs[i][1 ].item <int64_t >();
369
370
}
@@ -533,7 +534,7 @@ TEST(SerializeTest, Optim_SGD) {
533
534
int64_t iteration_{0 };
534
535
const auto & params_ = optim1.param_groups ()[0 ].params ();
535
536
const auto & optim1_state = optim1.state ();
536
- for (size_t i = 0 ; i < params_.size (); i++ ) {
537
+ for (const auto i : c10::irange ( params_.size ()) ) {
537
538
if (i != (params_.size () - 1 )) {
538
539
auto key_ = c10::guts::to_string (params_[i].unsafeGetTensorImpl ());
539
540
const SGDParamState& curr_state_ = static_cast <const SGDParamState&>(*(optim1_state.at (key_).get ()));
@@ -577,7 +578,7 @@ TEST(SerializeTest, Optim_Adam) {
577
578
std::vector<at::Tensor> max_exp_average_sq_buffers;
578
579
const auto & params_ = optim1.param_groups ()[0 ].params ();
579
580
const auto & optim1_state = optim1.state ();
580
- for (size_t i = 0 ; i < params_.size (); i++ ) {
581
+ for (const auto i : c10::irange ( params_.size ()) ) {
581
582
if (i != (params_.size () - 1 )) {
582
583
auto key_ = c10::guts::to_string (params_[i].unsafeGetTensorImpl ());
583
584
const AdamParamState& curr_state_ = static_cast <const AdamParamState&>(*(optim1_state.at (key_).get ()));
@@ -627,7 +628,7 @@ TEST(SerializeTest, Optim_AdamW) {
627
628
std::vector<at::Tensor> max_exp_average_sq_buffers;
628
629
const auto & params_ = optim1.param_groups ()[0 ].params ();
629
630
const auto & optim1_state = optim1.state ();
630
- for (size_t i = 0 ; i < params_.size (); i++ ) {
631
+ for (const auto i : c10::irange ( params_.size ()) ) {
631
632
if (i != (params_.size () - 1 )) {
632
633
auto key_ = c10::guts::to_string (params_[i].unsafeGetTensorImpl ());
633
634
const AdamWParamState& curr_state_ = static_cast <const AdamWParamState&>(*(optim1_state.at (key_).get ()));
@@ -678,7 +679,7 @@ TEST(SerializeTest, Optim_RMSprop) {
678
679
std::vector<at::Tensor> grad_average_buffers;
679
680
const auto & params_ = optim1.param_groups ()[0 ].params ();
680
681
const auto & optim1_state = optim1.state ();
681
- for (size_t i = 0 ; i < params_.size (); i++ ) {
682
+ for (const auto i : c10::irange ( params_.size ()) ) {
682
683
if (i != (params_.size () - 1 )) {
683
684
auto key_ = c10::guts::to_string (params_[i].unsafeGetTensorImpl ());
684
685
const RMSpropParamState& curr_state_ = static_cast <const RMSpropParamState&>(*(optim1_state.at (key_).get ()));
@@ -703,7 +704,7 @@ TEST(SerializeTest, Optim_RMSprop) {
703
704
const auto & params1_2_ = optim1_2.param_groups ()[0 ].params ();
704
705
auto & optim1_2_state = optim1_2.state ();
705
706
// old RMSprop didn't track step value
706
- for (size_t i = 0 ; i < params1_2_.size (); i++ ) {
707
+ for (const auto i : c10::irange ( params1_2_.size ()) ) {
707
708
if (i != (params1_2_.size () - 1 )) {
708
709
auto key_ = c10::guts::to_string (params_[i].unsafeGetTensorImpl ());
709
710
auto key1_2_ = c10::guts::to_string (params1_2_[i].unsafeGetTensorImpl ());
@@ -788,7 +789,7 @@ TEST(SerializeTest, XOR_CUDA) {
788
789
inputs = inputs.cuda ();
789
790
labels = labels.cuda ();
790
791
}
791
- for (size_t i = 0 ; i < batch_size; i++ ) {
792
+ for (const auto i : c10::irange ( batch_size) ) {
792
793
inputs[i] = torch::randint (2 , {2 }, torch::kInt64 );
793
794
labels[i] = inputs[i][0 ].item <int64_t >() ^ inputs[i][1 ].item <int64_t >();
794
795
}
@@ -879,7 +880,7 @@ TEST(SerializeTest, VectorOfTensors) {
879
880
std::vector<torch::Tensor> y_vec;
880
881
torch::load (y_vec, stream);
881
882
882
- for (int64_t i = 0 ; i < x_vec.size (); i++ ) {
883
+ for (const auto i : c10::irange ( x_vec.size ()) ) {
883
884
auto & x = x_vec[i];
884
885
auto & y = y_vec[i];
885
886
ASSERT_TRUE (y.defined ());
0 commit comments