Delay pass creation until time to run #1621
Build #20250214.9 had test failures
Details
- Failed: 4 (0.15%)
- Passed: 2,387 (90.38%)
- Other: 250 (9.47%)
- Total: 2,641
- 16578 of 37620 lines covered (44.07%)
Annotations
Check failure on line 20 in Build log
azure-pipelines / Olive CI
Build log #L20
There are one or more test failures detected in result files. Detailed summary of published test results can be viewed in the Tests tab.
Check failure on line 19 in Build log
azure-pipelines / Olive CI
Build log #L19
There are one or more test failures detected in result files. Detailed summary of published test results can be viewed in the Tests tab.
Check failure on line 1 in test_aml_model_pass_run
azure-pipelines / Olive CI
test_aml_model_pass_run
azure.ai.ml.exceptions.JobException: Exception :
{
"error": {
"code": "UserError",
"message": "Pipeline has failed child jobs. Failed nodes: /component. For more details and logs, please go to the job detail page and check the child jobs.",
"message_format": "Pipeline has failed child jobs. {0}",
"message_parameters": {},
"reference_code": "PipelineHasStepJobFailed",
"details": []
},
"environment": "eastus",
"location": "eastus",
"time": "2025-02-14T22:36:40.971196Z",
"component_name": ""
}
Raw output
tmp_path = PosixPath('/mnt/vss/_work/1/.pytest_basetemp/test_aml_model_pass_run0')
def test_aml_model_pass_run(tmp_path):
# ------------------------------------------------------------------
# Azure ML System
aml_compute = "cpu-cluster"
folder_location = Path(__file__).absolute().parent
conda_file_location = folder_location / "conda.yaml"
workspace_config = get_olive_workspace_config()
azureml_client_config = AzureMLClientConfig(**workspace_config)
docker_config = AzureMLDockerConfig(
base_image="mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu22.04",
conda_file_path=conda_file_location,
)
aml_system = AzureMLSystem(
azureml_client_config=azureml_client_config,
aml_compute=aml_compute,
aml_docker_config=docker_config,
is_dev=True,
)
# ------------------------------------------------------------------
# Input model
pytorch_model_config = get_pytorch_model()
# ------------------------------------------------------------------
# Onnx conversion pass
# config can be a dictionary
onnx_model_file = tmp_path / "model.onnx"
full_pass_config = FullPassConfig.parse_obj({"type": OnnxConversion.__name__, "config": {"target_opset": 13}})
> onnx_model = aml_system.run_pass(full_pass_config, pytorch_model_config, onnx_model_file)
test/integ_test/aml_model_test/test_aml_model.py:44:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
olive/systems/azureml/aml_system.py:264: in run_pass
named_outputs_dir = self._run_job(
olive/systems/azureml/aml_system.py:520: in _run_job
ml_client.jobs.stream(job.name)
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/site-packages/azure/core/tracing/decorator.py:116: in wrapper_use_tracer
return func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/site-packages/azure/ai/ml/_telemetry/activity.py:288: in wrapper
return f(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/site-packages/azure/ai/ml/operations/_job_operations.py:838: in stream
self._stream_logs_until_completion(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
run_operations = <azure.ai.ml.operations._run_operations.RunOperations object at 0x7fc10c13d180>
job_resource = <azure.ai.ml._restclient.v2024_01_01_preview.models._models_py3.JobBase object at 0x7fc1140d3910>
datastore_operations = <azure.ai.ml.operations._datastore_operations.DatastoreOperations object at 0x7fc11567eda0>
raise_exception_on_failed_job = True
def stream_logs_until_completion(
run_operations: RunOperations,
job_resource: JobBase,
datastore_operations: Optional[DatastoreOperations] = None,
raise_exception_on_failed_job: bool = True,
*,
requests_pipeline: HttpPipeline
) -> None:
"""Stream the experiment run output to the specified file handle. By default the the file handle points to stdout.
:param run_operations: The run history operations class.
:type run_operations: RunOperations
:param job_resource: The job to stream
:type job_resource: JobBase
:param datastore_operations: Optional, the datastore operations class, used to get logs from datastore
:type datastore_operations: Optional[DatastoreOperations]
:param raise_exception_on_failed_job: Should this method fail if job fails
:type raise_exception_on_failed_job: Boolean
:keyword requests_pipeline: The HTTP pipeline to use for requests.
:type requests_pipeline: ~azure.ai.ml._utils._http_utils.HttpPipeline
:return:
:rtype: None
"""
job_type = job_resource.properties.job_type
job_name = job_resource.name
studio_endpoint = job_resource.properties.services.get("S
Check failure on line 1 in test_aml_model_pass_run
azure-pipelines / Olive CI
test_aml_model_pass_run
azure.ai.ml.exceptions.JobException: Exception :
{
"error": {
"code": "UserError",
"message": "Pipeline has failed child jobs. Failed nodes: /component. For more details and logs, please go to the job detail page and check the child jobs.",
"message_format": "Pipeline has failed child jobs. {0}",
"message_parameters": {},
"reference_code": "PipelineHasStepJobFailed",
"details": []
},
"environment": "eastus",
"location": "eastus",
"time": "2025-02-14T22:47:20.469237Z",
"component_name": ""
}
Raw output
tmp_path = WindowsPath('D:/a/_work/1/.pytest_basetemp/test_aml_model_pass_run0')
def test_aml_model_pass_run(tmp_path):
# ------------------------------------------------------------------
# Azure ML System
aml_compute = "cpu-cluster"
folder_location = Path(__file__).absolute().parent
conda_file_location = folder_location / "conda.yaml"
workspace_config = get_olive_workspace_config()
azureml_client_config = AzureMLClientConfig(**workspace_config)
docker_config = AzureMLDockerConfig(
base_image="mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu22.04",
conda_file_path=conda_file_location,
)
aml_system = AzureMLSystem(
azureml_client_config=azureml_client_config,
aml_compute=aml_compute,
aml_docker_config=docker_config,
is_dev=True,
)
# ------------------------------------------------------------------
# Input model
pytorch_model_config = get_pytorch_model()
# ------------------------------------------------------------------
# Onnx conversion pass
# config can be a dictionary
onnx_model_file = tmp_path / "model.onnx"
full_pass_config = FullPassConfig.parse_obj({"type": OnnxConversion.__name__, "config": {"target_opset": 13}})
> onnx_model = aml_system.run_pass(full_pass_config, pytorch_model_config, onnx_model_file)
test\integ_test\aml_model_test\test_aml_model.py:44:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
olive\systems\azureml\aml_system.py:264: in run_pass
named_outputs_dir = self._run_job(
olive\systems\azureml\aml_system.py:520: in _run_job
ml_client.jobs.stream(job.name)
C:\hostedtoolcache\windows\Python\3.10.16\x64\lib\site-packages\azure\core\tracing\decorator.py:116: in wrapper_use_tracer
return func(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.10.16\x64\lib\site-packages\azure\ai\ml\_telemetry\activity.py:288: in wrapper
return f(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.10.16\x64\lib\site-packages\azure\ai\ml\operations\_job_operations.py:838: in stream
self._stream_logs_until_completion(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
run_operations = <azure.ai.ml.operations._run_operations.RunOperations object at 0x000002F72BF0AD10>
job_resource = <azure.ai.ml._restclient.v2024_01_01_preview.models._models_py3.JobBase object at 0x000002F77F160070>
datastore_operations = <azure.ai.ml.operations._datastore_operations.DatastoreOperations object at 0x000002F727CBBFD0>
raise_exception_on_failed_job = True
def stream_logs_until_completion(
run_operations: RunOperations,
job_resource: JobBase,
datastore_operations: Optional[DatastoreOperations] = None,
raise_exception_on_failed_job: bool = True,
*,
requests_pipeline: HttpPipeline
) -> None:
"""Stream the experiment run output to the specified file handle. By default the the file handle points to stdout.
:param run_operations: The run history operations class.
:type run_operations: RunOperations
:param job_resource: The job to stream
:type job_resource: JobBase
:param datastore_operations: Optional, the datastore operations class, used to get logs from datastore
:type datastore_operations: Optional[DatastoreOperations]
:param raise_exception_on_failed_job: Should this method fail if job fails
:type raise_exception_on_failed_job: Boolean
:keyword requests_pipeline: The HTTP pipeline to use for requests.
:type requests_pipeline: ~azure.ai.ml._utils._http_utils.HttpPipeline
:return:
:rtype: None
"""
job_type = job_resource.properties.job_type
job_name = job_resource.name
studio_endpoint = job_resource.properties.services.get("Studio",
Check failure on line 1 in test_aml_model_pass_run
azure-pipelines / Olive CI
test_aml_model_pass_run
azure.ai.ml.exceptions.JobException: Exception :
{
"error": {
"code": "UserError",
"message": "Pipeline has failed child jobs. Failed nodes: /component. For more details and logs, please go to the job detail page and check the child jobs.",
"message_format": "Pipeline has failed child jobs. {0}",
"message_parameters": {},
"reference_code": "PipelineHasStepJobFailed",
"details": []
},
"environment": "eastus",
"location": "eastus",
"time": "2025-02-19T21:01:56.910173Z",
"component_name": ""
}
Raw output
tmp_path = PosixPath('/mnt/vss/_work/1/.pytest_basetemp/test_aml_model_pass_run0')
def test_aml_model_pass_run(tmp_path):
# ------------------------------------------------------------------
# Azure ML System
aml_compute = "cpu-cluster"
folder_location = Path(__file__).absolute().parent
conda_file_location = folder_location / "conda.yaml"
workspace_config = get_olive_workspace_config()
azureml_client_config = AzureMLClientConfig(**workspace_config)
docker_config = AzureMLDockerConfig(
base_image="mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu22.04",
conda_file_path=conda_file_location,
)
aml_system = AzureMLSystem(
azureml_client_config=azureml_client_config,
aml_compute=aml_compute,
aml_docker_config=docker_config,
is_dev=True,
)
# ------------------------------------------------------------------
# Input model
pytorch_model_config = get_pytorch_model()
# ------------------------------------------------------------------
# Onnx conversion pass
# config can be a dictionary
onnx_model_file = tmp_path / "model.onnx"
full_pass_config = FullPassConfig.parse_obj({"type": OnnxConversion.__name__, "config": {"target_opset": 13}})
> onnx_model = aml_system.run_pass(full_pass_config, pytorch_model_config, onnx_model_file)
test/integ_test/aml_model_test/test_aml_model.py:44:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
olive/systems/azureml/aml_system.py:264: in run_pass
named_outputs_dir = self._run_job(
olive/systems/azureml/aml_system.py:520: in _run_job
ml_client.jobs.stream(job.name)
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/site-packages/azure/core/tracing/decorator.py:116: in wrapper_use_tracer
return func(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/site-packages/azure/ai/ml/_telemetry/activity.py:288: in wrapper
return f(*args, **kwargs)
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/site-packages/azure/ai/ml/operations/_job_operations.py:838: in stream
self._stream_logs_until_completion(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
run_operations = <azure.ai.ml.operations._run_operations.RunOperations object at 0x7f7bb3a8beb0>
job_resource = <azure.ai.ml._restclient.v2024_01_01_preview.models._models_py3.JobBase object at 0x7f7bbcf107f0>
datastore_operations = <azure.ai.ml.operations._datastore_operations.DatastoreOperations object at 0x7f7bbd352c80>
raise_exception_on_failed_job = True
def stream_logs_until_completion(
run_operations: RunOperations,
job_resource: JobBase,
datastore_operations: Optional[DatastoreOperations] = None,
raise_exception_on_failed_job: bool = True,
*,
requests_pipeline: HttpPipeline
) -> None:
"""Stream the experiment run output to the specified file handle. By default the the file handle points to stdout.
:param run_operations: The run history operations class.
:type run_operations: RunOperations
:param job_resource: The job to stream
:type job_resource: JobBase
:param datastore_operations: Optional, the datastore operations class, used to get logs from datastore
:type datastore_operations: Optional[DatastoreOperations]
:param raise_exception_on_failed_job: Should this method fail if job fails
:type raise_exception_on_failed_job: Boolean
:keyword requests_pipeline: The HTTP pipeline to use for requests.
:type requests_pipeline: ~azure.ai.ml._utils._http_utils.HttpPipeline
:return:
:rtype: None
"""
job_type = job_resource.properties.job_type
job_name = job_resource.name
studio_endpoint = job_resource.properties.services.get("S
Check failure on line 1 in test_aml_model_pass_run
azure-pipelines / Olive CI
test_aml_model_pass_run
azure.ai.ml.exceptions.JobException: Exception :
{
"error": {
"code": "UserError",
"message": "Pipeline has failed child jobs. Failed nodes: /component. For more details and logs, please go to the job detail page and check the child jobs.",
"message_format": "Pipeline has failed child jobs. {0}",
"message_parameters": {},
"reference_code": "PipelineHasStepJobFailed",
"details": []
},
"environment": "eastus",
"location": "eastus",
"time": "2025-02-19T21:07:38.676756Z",
"component_name": ""
}
Raw output
tmp_path = WindowsPath('D:/a/_work/1/.pytest_basetemp/test_aml_model_pass_run0')
def test_aml_model_pass_run(tmp_path):
# ------------------------------------------------------------------
# Azure ML System
aml_compute = "cpu-cluster"
folder_location = Path(__file__).absolute().parent
conda_file_location = folder_location / "conda.yaml"
workspace_config = get_olive_workspace_config()
azureml_client_config = AzureMLClientConfig(**workspace_config)
docker_config = AzureMLDockerConfig(
base_image="mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu22.04",
conda_file_path=conda_file_location,
)
aml_system = AzureMLSystem(
azureml_client_config=azureml_client_config,
aml_compute=aml_compute,
aml_docker_config=docker_config,
is_dev=True,
)
# ------------------------------------------------------------------
# Input model
pytorch_model_config = get_pytorch_model()
# ------------------------------------------------------------------
# Onnx conversion pass
# config can be a dictionary
onnx_model_file = tmp_path / "model.onnx"
full_pass_config = FullPassConfig.parse_obj({"type": OnnxConversion.__name__, "config": {"target_opset": 13}})
> onnx_model = aml_system.run_pass(full_pass_config, pytorch_model_config, onnx_model_file)
test\integ_test\aml_model_test\test_aml_model.py:44:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
olive\systems\azureml\aml_system.py:264: in run_pass
named_outputs_dir = self._run_job(
olive\systems\azureml\aml_system.py:520: in _run_job
ml_client.jobs.stream(job.name)
C:\hostedtoolcache\windows\Python\3.10.16\x64\lib\site-packages\azure\core\tracing\decorator.py:116: in wrapper_use_tracer
return func(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.10.16\x64\lib\site-packages\azure\ai\ml\_telemetry\activity.py:288: in wrapper
return f(*args, **kwargs)
C:\hostedtoolcache\windows\Python\3.10.16\x64\lib\site-packages\azure\ai\ml\operations\_job_operations.py:838: in stream
self._stream_logs_until_completion(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
run_operations = <azure.ai.ml.operations._run_operations.RunOperations object at 0x00000284D860DC60>
job_resource = <azure.ai.ml._restclient.v2024_01_01_preview.models._models_py3.JobBase object at 0x00000284D485E620>
datastore_operations = <azure.ai.ml.operations._datastore_operations.DatastoreOperations object at 0x00000284D4433E80>
raise_exception_on_failed_job = True
def stream_logs_until_completion(
run_operations: RunOperations,
job_resource: JobBase,
datastore_operations: Optional[DatastoreOperations] = None,
raise_exception_on_failed_job: bool = True,
*,
requests_pipeline: HttpPipeline
) -> None:
"""Stream the experiment run output to the specified file handle. By default the the file handle points to stdout.
:param run_operations: The run history operations class.
:type run_operations: RunOperations
:param job_resource: The job to stream
:type job_resource: JobBase
:param datastore_operations: Optional, the datastore operations class, used to get logs from datastore
:type datastore_operations: Optional[DatastoreOperations]
:param raise_exception_on_failed_job: Should this method fail if job fails
:type raise_exception_on_failed_job: Boolean
:keyword requests_pipeline: The HTTP pipeline to use for requests.
:type requests_pipeline: ~azure.ai.ml._utils._http_utils.HttpPipeline
:return:
:rtype: None
"""
job_type = job_resource.properties.job_type
job_name = job_resource.name
studio_endpoint = job_resource.properties.services.get("Studio",