From 021d9e3a82170dda2c2d9f12c6e652623ea74d73 Mon Sep 17 00:00:00 2001 From: Billy Hu Date: Fri, 26 Apr 2024 12:49:48 -0700 Subject: [PATCH] [promptflow-evals] Remove skip marker for the tests previously failed by circular dependency (#3042) # Description Please add an informative description that covers that changes made by the pull request and link all relevant issues. # All Promptflow Contribution checklist: - [ ] **The pull request does not introduce [breaking changes].** - [ ] **CHANGELOG is updated for new features, bug fixes or other significant changes.** - [ ] **I have read the [contribution guidelines](../CONTRIBUTING.md).** - [ ] **Create an issue and link to the pull request to get dedicated review from promptflow team. Learn more: [suggested workflow](../CONTRIBUTING.md#suggested-workflow).** ## General Guidelines and Best Practices - [ ] Title of the pull request is clear and informative. - [ ] There are a small number of commits, each of which have an informative message. This means that previously merged commits do not appear in the history of the PR. For more information on cleaning up the commits in your PR, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). ### Testing Guidelines - [ ] Pull request includes test coverage for the included changes. --- src/promptflow-evals/tests/evals/unittests/test_evaluate.py | 2 -- src/promptflow-evals/tests/evals/unittests/test_save_eval.py | 1 - 2 files changed, 3 deletions(-) diff --git a/src/promptflow-evals/tests/evals/unittests/test_evaluate.py b/src/promptflow-evals/tests/evals/unittests/test_evaluate.py index 891e45357f8..7f668aa8b70 100644 --- a/src/promptflow-evals/tests/evals/unittests/test_evaluate.py +++ b/src/promptflow-evals/tests/evals/unittests/test_evaluate.py @@ -111,7 +111,6 @@ def test_evaluate_missing_required_inputs_target(self, questions_wrong_file): evaluate(data=questions_wrong_file, evaluators={"g": F1ScoreEvaluator()}, target=_target_fn) assert "Missing required inputs for target : ['question']." in exc_info.value.args[0] - @pytest.mark.skip(reason="TODO: Failed in CI due to SpawnedForkProcessManagerStartFailure") def test_wrong_target(self, questions_file): """Test error, when target function does not generate required column.""" with pytest.raises(ValueError) as exc_info: @@ -120,7 +119,6 @@ def test_wrong_target(self, questions_file): assert "Missing required inputs for evaluator g : ['ground_truth']." in exc_info.value.args[0] - @pytest.mark.skip(reason="TODO: Failed in CI due to SpawnedForkProcessManagerStartFailure") def test_apply_target_to_data(self, pf_client, questions_file, questions_answers_file): """Test that target was applied correctly.""" initial_data = pd.read_json(questions_file, lines=True) diff --git a/src/promptflow-evals/tests/evals/unittests/test_save_eval.py b/src/promptflow-evals/tests/evals/unittests/test_save_eval.py index 8488a6b0ebf..4259a3fbd95 100644 --- a/src/promptflow-evals/tests/evals/unittests/test_save_eval.py +++ b/src/promptflow-evals/tests/evals/unittests/test_save_eval.py @@ -44,7 +44,6 @@ def test_save_rai_evaluators(self, tmpdir, pf_client, rai_evaluator): pf_client.flows.save(rai_evaluator, path=tmpdir) assert os.path.isfile(os.path.join(tmpdir, "flow.flex.yaml")) - @pytest.mark.skip(reason="TODO: Failed in CI due to SpawnedForkProcessManagerStartFailure") def test_load_and_run_evaluators(self, tmpdir, pf_client, data_file) -> None: """Test regular evaluator saving.""" from promptflow.evals.evaluators import F1ScoreEvaluator