Skip to content

Commit 4f201bf

Browse files
authored
Workaround of [SW-208658] (#2162)
Signed-off-by: Xin He <[email protected]>
1 parent ff969bc commit 4f201bf

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

Diff for: test/3x/torch/quantization/fp8_quant/test_layer_wise.py

+4
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@
1111

1212
def test_two_step_layer_wise():
1313
# layer-wise is based on memory mapping technique and https://github.com/huggingface/transformers/pull/31771
14+
# Workaround of [SW-208658]: Memory mapping is blocked unreasonably
15+
tmp_deterministic_algorithms_flag = torch.are_deterministic_algorithms_enabled()
16+
torch.use_deterministic_algorithms(False)
1417
model_name = "facebook/opt-350m"
1518
config = AutoConfig.from_pretrained(model_name)
1619
# requires transformers >= 4.43.0, torch_dtype=config.torch_dtype
@@ -37,3 +40,4 @@ def test_two_step_layer_wise():
3740
cpu_mem2 = get_used_cpu_mem_MB()
3841
model = convert(new_model, qconfig)
3942
assert (cpu_mem2 - cpu_mem0) < 100, "model with memory mapping should use no more than 100MiB."
43+
torch.use_deterministic_algorithms(tmp_deterministic_algorithms_flag)

0 commit comments

Comments
 (0)