@@ -168,7 +168,7 @@ def _():
168
168
trainer .run (data )
169
169
170
170
171
- @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.6 .0" ), reason = "Skip if < 1.6 .0" )
171
+ @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.12 .0" ), reason = "Skip if < 1.12 .0" )
172
172
def test_create_supervised_training_scalar_assignment ():
173
173
with mock .patch ("ignite.engine._check_arg" ) as check_arg_mock :
174
174
check_arg_mock .return_value = None , torch .cuda .amp .GradScaler (enabled = False )
@@ -447,21 +447,21 @@ def test_create_supervised_trainer_apex_error():
447
447
def mock_torch_cuda_amp_module ():
448
448
with patch .dict (
449
449
"sys.modules" ,
450
- {"torch.cuda. amp" : None , "torch.cuda.amp.grad_scaler " : None , "torch.cuda .amp.autocast_mode" : None },
450
+ {"torch.amp" : None , "torch.cuda.amp" : None , "torch.amp.autocast_mode" : None },
451
451
):
452
452
yield torch
453
453
454
454
455
455
def test_create_supervised_trainer_amp_error (mock_torch_cuda_amp_module ):
456
- with pytest .raises (ImportError , match = "Please install torch>=1.6 .0 to use amp_mode='amp'." ):
456
+ with pytest .raises (ImportError , match = "Please install torch>=1.12 .0 to use amp_mode='amp'." ):
457
457
_test_create_supervised_trainer_wrong_accumulation (trainer_device = "cpu" , amp_mode = "amp" )
458
- with pytest .raises (ImportError , match = "Please install torch>=1.6 .0 to use amp_mode='amp'." ):
458
+ with pytest .raises (ImportError , match = "Please install torch>=1.12 .0 to use amp_mode='amp'." ):
459
459
_test_create_supervised_trainer (amp_mode = "amp" )
460
460
with pytest .raises (ImportError , match = "Please install torch>=1.6.0 to use scaler argument." ):
461
461
_test_create_supervised_trainer (amp_mode = "amp" , scaler = True )
462
462
463
463
464
- @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.5 .0" ), reason = "Skip if < 1.5 .0" )
464
+ @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.12 .0" ), reason = "Skip if < 1.12 .0" )
465
465
def test_create_supervised_trainer_scaler_not_amp ():
466
466
scaler = torch .cuda .amp .GradScaler (enabled = torch .cuda .is_available ())
467
467
@@ -501,7 +501,7 @@ def test_create_supervised_trainer_on_mps():
501
501
_test_create_mocked_supervised_trainer (model_device = model_device , trainer_device = trainer_device )
502
502
503
503
504
- @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.6 .0" ), reason = "Skip if < 1.6 .0" )
504
+ @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.12 .0" ), reason = "Skip if < 1.12 .0" )
505
505
@pytest .mark .skipif (not torch .cuda .is_available (), reason = "Skip if no GPU" )
506
506
def test_create_supervised_trainer_on_cuda_amp ():
507
507
model_device = trainer_device = "cuda"
@@ -517,7 +517,7 @@ def test_create_supervised_trainer_on_cuda_amp():
517
517
_test_create_mocked_supervised_trainer (model_device = model_device , trainer_device = trainer_device , amp_mode = "amp" )
518
518
519
519
520
- @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.6 .0" ), reason = "Skip if < 1.6 .0" )
520
+ @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.12 .0" ), reason = "Skip if < 1.12 .0" )
521
521
@pytest .mark .skipif (not torch .cuda .is_available (), reason = "Skip if no GPU" )
522
522
def test_create_supervised_trainer_on_cuda_amp_scaler ():
523
523
model_device = trainer_device = "cuda"
@@ -630,8 +630,8 @@ def test_create_supervised_evaluator():
630
630
_test_mocked_supervised_evaluator ()
631
631
632
632
# older versions didn't have the autocast method so we skip the test for older builds
633
- if Version (torch .__version__ ) >= Version ("1.6 .0" ):
634
- with mock .patch ("torch.cuda. amp.autocast" ) as mock_torch_cuda_amp_module :
633
+ if Version (torch .__version__ ) >= Version ("1.12 .0" ):
634
+ with mock .patch ("torch.amp.autocast" ) as mock_torch_cuda_amp_module :
635
635
_test_create_evaluation_step_amp (mock_torch_cuda_amp_module )
636
636
637
637
@@ -640,8 +640,8 @@ def test_create_supervised_evaluator_on_cpu():
640
640
_test_mocked_supervised_evaluator (evaluator_device = "cpu" )
641
641
642
642
# older versions didn't have the autocast method so we skip the test for older builds
643
- if Version (torch .__version__ ) >= Version ("1.6 .0" ):
644
- with mock .patch ("torch.cuda. amp.autocast" ) as mock_torch_cuda_amp_module :
643
+ if Version (torch .__version__ ) >= Version ("1.12 .0" ):
644
+ with mock .patch ("torch.amp.autocast" ) as mock_torch_cuda_amp_module :
645
645
_test_create_evaluation_step (mock_torch_cuda_amp_module , evaluator_device = "cpu" )
646
646
_test_create_evaluation_step_amp (mock_torch_cuda_amp_module , evaluator_device = "cpu" )
647
647
@@ -651,8 +651,8 @@ def test_create_supervised_evaluator_traced_on_cpu():
651
651
_test_mocked_supervised_evaluator (evaluator_device = "cpu" , trace = True )
652
652
653
653
# older versions didn't have the autocast method so we skip the test for older builds
654
- if Version (torch .__version__ ) >= Version ("1.6 .0" ):
655
- with mock .patch ("torch.cuda. amp.autocast" ) as mock_torch_cuda_amp_module :
654
+ if Version (torch .__version__ ) >= Version ("1.12 .0" ):
655
+ with mock .patch ("torch.amp.autocast" ) as mock_torch_cuda_amp_module :
656
656
_test_create_evaluation_step (mock_torch_cuda_amp_module , evaluator_device = "cpu" , trace = True )
657
657
658
658
@@ -682,7 +682,7 @@ def test_create_supervised_evaluator_on_mps_with_model_on_cpu():
682
682
_test_mocked_supervised_evaluator (evaluator_device = "mps" )
683
683
684
684
685
- @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.6 .0" ), reason = "Skip if < 1.6 .0" )
685
+ @pytest .mark .skipif (Version (torch .__version__ ) < Version ("1.12 .0" ), reason = "Skip if < 1.12 .0" )
686
686
@pytest .mark .skipif (not torch .cuda .is_available (), reason = "Skip if no GPU" )
687
687
def test_create_supervised_evaluator_on_cuda_amp ():
688
688
model_device = evaluator_device = "cuda"
@@ -691,7 +691,7 @@ def test_create_supervised_evaluator_on_cuda_amp():
691
691
692
692
693
693
def test_create_supervised_evaluator_amp_error (mock_torch_cuda_amp_module ):
694
- with pytest .raises (ImportError , match = "Please install torch>=1.6 .0 to use amp_mode='amp'." ):
694
+ with pytest .raises (ImportError , match = "Please install torch>=1.12 .0 to use amp_mode='amp'." ):
695
695
_test_create_supervised_evaluator (amp_mode = "amp" )
696
696
697
697
0 commit comments