Support torch.export.Dim.AUTO in ONNX conversion pass #1586
Build #20250129.4 had test failures
Details
- Failed: 64 (2.75%)
- Passed: 2,137 (91.76%)
- Other: 128 (5.50%)
- Total: 2,329
Annotations
Check failure on line 10663 in Build log
azure-pipelines / Olive CI
Build log #L10663
Bash exited with code '1'.
Check failure on line 19 in Build log
azure-pipelines / Olive CI
Build log #L19
There are one or more test failures detected in result files. Detailed summary of published test results can be viewed in the Tests tab.
Check failure on line 20 in Build log
azure-pipelines / Olive CI
Build log #L20
There are one or more test failures detected in result files. Detailed summary of published test results can be viewed in the Tests tab.
Check failure on line 19 in Build log
azure-pipelines / Olive CI
Build log #L19
There are one or more test failures detected in result files. Detailed summary of published test results can be viewed in the Tests tab.
Check failure on line 1 in test_resnet[resnet_ptq_cpu.json-local_system-pass-by-pass-random]
azure-pipelines / Olive CI
test_resnet[resnet_ptq_cpu.json-local_system-pass-by-pass-random]
AssertionError: footprints is empty. The search must have failed for all accelerator specs.
Raw output
search_algorithm = 'random', execution_order = 'pass-by-pass'
system = 'local_system', olive_json = 'resnet_ptq_cpu.json'
@pytest.mark.parametrize("search_algorithm", ["random"])
@pytest.mark.parametrize("execution_order", ["pass-by-pass"])
@pytest.mark.parametrize("system", ["local_system"])
@pytest.mark.parametrize(
"olive_json",
[
"resnet_ptq_cpu.json",
# TODO(trajep): remove skip once the bug of azureml-fsspec is fixed
pytest.param(
"resnet_ptq_cpu_aml_dataset.json", marks=pytest.mark.skip(reason="credential bug in azureml-fsspec")
),
],
)
@pytest.mark.skipif(
version.parse(OrtVersion) == version.parse("1.16.0"),
reason="resnet is not supported in ORT 1.16.0 caused by https://github.com/microsoft/onnxruntime/issues/17627",
)
def test_resnet(search_algorithm, execution_order, system, olive_json):
from olive.workflows import run as olive_run
olive_config = patch_config(olive_json, search_algorithm, execution_order, system)
footprint = olive_run(olive_config, tempdir=os.environ.get("OLIVE_TEMPDIR", None))
> check_output(footprint)
D:\a\_work\1\s\examples\test\local\test_resnet_ptq_cpu.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\_work\1\s\examples\test\utils.py:20: in check_output
assert_nodes(footprints)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
footprints = {}
def assert_nodes(footprints):
> assert footprints, "footprints is empty. The search must have failed for all accelerator specs."
E AssertionError: footprints is empty. The search must have failed for all accelerator specs.
D:\a\_work\1\s\examples\test\utils.py:25: AssertionError
Check failure on line 1 in test_resnet[resnet_ptq_cpu.json-local_system-pass-by-pass-random]
azure-pipelines / Olive CI
test_resnet[resnet_ptq_cpu.json-local_system-pass-by-pass-random]
AssertionError: footprints is empty. The search must have failed for all accelerator specs.
Raw output
search_algorithm = 'random', execution_order = 'pass-by-pass'
system = 'local_system', olive_json = 'resnet_ptq_cpu.json'
@pytest.mark.parametrize("search_algorithm", ["random"])
@pytest.mark.parametrize("execution_order", ["pass-by-pass"])
@pytest.mark.parametrize("system", ["local_system"])
@pytest.mark.parametrize(
"olive_json",
[
"resnet_ptq_cpu.json",
# TODO(trajep): remove skip once the bug of azureml-fsspec is fixed
pytest.param(
"resnet_ptq_cpu_aml_dataset.json", marks=pytest.mark.skip(reason="credential bug in azureml-fsspec")
),
],
)
@pytest.mark.skipif(
version.parse(OrtVersion) == version.parse("1.16.0"),
reason="resnet is not supported in ORT 1.16.0 caused by https://github.com/microsoft/onnxruntime/issues/17627",
)
def test_resnet(search_algorithm, execution_order, system, olive_json):
from olive.workflows import run as olive_run
olive_config = patch_config(olive_json, search_algorithm, execution_order, system)
footprint = olive_run(olive_config, tempdir=os.environ.get("OLIVE_TEMPDIR", None))
> check_output(footprint)
/olive/examples/test/local/test_resnet_ptq_cpu.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/olive/examples/test/utils.py:20: in check_output
assert_nodes(footprints)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
footprints = {}
def assert_nodes(footprints):
> assert footprints, "footprints is empty. The search must have failed for all accelerator specs."
E AssertionError: footprints is empty. The search must have failed for all accelerator specs.
/olive/examples/test/utils.py:25: AssertionError
Check failure on line 1 in test_get_model_io_config[False]
azure-pipelines / Olive CI
test_get_model_io_config[False]
AssertionError: assert {'dynamic_axe...output_names'} == {'dynamic_axe...output_names'}
Extra items in the left set:
'dynamic_shapes'
Full diff:
{
'dynamic_axes',...
...Full output truncated (4 lines hidden), use '-vv' to show
Raw output
with_past = False
@pytest.mark.parametrize("with_past", [True, False])
def test_get_model_io_config(with_past):
model_name, task = get_model_name_task(with_past)
model = load_model_from_task(task, model_name)
io_config = get_model_io_config(model_name, task, model)
expected_keys = ["input_names", "output_names", "dynamic_axes"]
> assert set(io_config.keys()) == set(expected_keys)
E AssertionError: assert {'dynamic_axe...output_names'} == {'dynamic_axe...output_names'}
E
E Extra items in the left set:
E 'dynamic_shapes'
E
E Full diff:
E {
E 'dynamic_axes',...
E
E ...Full output truncated (4 lines hidden), use '-vv' to show
test/unit_test/common/test_hf.py:102: AssertionError
Check failure on line 1 in test_get_model_io_config[True]
azure-pipelines / Olive CI
test_get_model_io_config[True]
AssertionError: assert {'dynamic_axe...output_names'} == {'dynamic_axe...output_names'}
Extra items in the left set:
'dynamic_shapes'
Full diff:
{
'dynamic_axes',...
...Full output truncated (4 lines hidden), use '-vv' to show
Raw output
with_past = True
@pytest.mark.parametrize("with_past", [True, False])
def test_get_model_io_config(with_past):
model_name, task = get_model_name_task(with_past)
model = load_model_from_task(task, model_name)
io_config = get_model_io_config(model_name, task, model)
expected_keys = ["input_names", "output_names", "dynamic_axes"]
> assert set(io_config.keys()) == set(expected_keys)
E AssertionError: assert {'dynamic_axe...output_names'} == {'dynamic_axe...output_names'}
E
E Extra items in the left set:
E 'dynamic_shapes'
E
E Full diff:
E {
E 'dynamic_axes',...
E
E ...Full output truncated (4 lines hidden), use '-vv' to show
test/unit_test/common/test_hf.py:102: AssertionError