From ef76687b71e15dbf88bf63ae9d87490ba75d56dc Mon Sep 17 00:00:00 2001 From: chw-microsoft <95913588+chw-microsoft@users.noreply.github.com> Date: Tue, 22 Aug 2023 12:16:08 +0800 Subject: [PATCH] update WrappedOpenAIError definition and remove to_dict override (#135) Co-authored-by: Peiwen Gao --- .../promptflow/tools/exception.py | 45 ++++++-------- src/promptflow-tools/tests/test_common.py | 10 ++- src/promptflow-tools/tests/test_embedding.py | 5 +- .../tests/test_handle_openai_error.py | 61 ++++++++----------- 4 files changed, 51 insertions(+), 70 deletions(-) diff --git a/src/promptflow-tools/promptflow/tools/exception.py b/src/promptflow-tools/promptflow/tools/exception.py index 19fec59dfa4..f2a99391790 100644 --- a/src/promptflow-tools/promptflow/tools/exception.py +++ b/src/promptflow-tools/promptflow/tools/exception.py @@ -1,6 +1,6 @@ from openai.error import OpenAIError -from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException, infer_error_code_from_class +from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException openai_error_code_ref_message = "Error reference: https://platform.openai.com/docs/guides/error-codes/api-errors" @@ -44,38 +44,29 @@ def __init__(self, ex: OpenAIError, **kwargs): def message(self): return str(to_openai_error_message(self._ex)) - def to_dict(self, *, include_debug_info=False): - """Return a dict representation of the exception. + @property + def error_codes(self): + """The hierarchy of the error codes. - This dict specification corresponds to the specification of the Microsoft API Guidelines: + We follow the "Microsoft REST API Guidelines" to define error codes in a hierarchy style. + See the below link for details: https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses - Note that this dict representation the "error" field in the response body of the API. - The whole error response is then populated in another place outside of this class. - """ + This list will be converted into an error code hierarchy by the prompt flow framework. + For this case, it will be converted into a data structure that equivalent to: - result = { - "code": infer_error_code_from_class(UserErrorException), - "message": self.message, - "messageFormat": "", - "messageParameters": {}, - "innerError": { - "code": "OpenAIError", + { + "code": "UserError", "innerError": { - "code": self._ex.__class__.__name__, - "innerError": None + "code": "OpenAIError", + "innerError": { + "code": self._ex.__class__.__name__, + "innerError": None + } } - }, - "referenceCode": self.reference_code - } - - if self.additional_info: - result["additionalInfo"] = [{"type": k, "info": v} for k, v in self.additional_info.items()] - - if include_debug_info: - result["debugInfo"] = self.debug_info - - return result + } + """ + return ["UserError", "OpenAIError", self._ex.__class__.__name__] class ExceedMaxRetryTimes(WrappedOpenAIError): diff --git a/src/promptflow-tools/tests/test_common.py b/src/promptflow-tools/tests/test_common.py index 1bc2a5a7e88..545a9c04c00 100644 --- a/src/promptflow-tools/tests/test_common.py +++ b/src/promptflow-tools/tests/test_common.py @@ -1,6 +1,5 @@ import pytest -from promptflow.exceptions import ErrorResponse from promptflow.tools.common import parse_function_role_prompt, ChatAPIInvalidFunctions, validate_functions, \ process_function_call @@ -22,11 +21,11 @@ class TestCommon: ], ) def test_chat_api_invalid_functions(self, functions, error_message): + error_codes = "UserError/ToolValidationError/ChatAPIInvalidFunctions" with pytest.raises(ChatAPIInvalidFunctions) as exc_info: validate_functions(functions) assert error_message in exc_info.value.message - assert "UserError/ToolValidationError/ChatAPIInvalidFunctions" == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") @pytest.mark.parametrize( "function_call, error_message", @@ -40,12 +39,11 @@ def test_chat_api_invalid_functions(self, functions, error_message): ], ) def test_chat_api_invalid_function_call(self, function_call, error_message): + error_codes = "UserError/ToolValidationError/ChatAPIInvalidFunctions" with pytest.raises(ChatAPIInvalidFunctions) as exc_info: process_function_call(function_call) - assert error_message in exc_info.value.message - assert "UserError/ToolValidationError/ChatAPIInvalidFunctions" == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_parse_function_role_prompt(self): function_str = "name:\n get_location \n\ncontent:\nBoston\nabc" diff --git a/src/promptflow-tools/tests/test_embedding.py b/src/promptflow-tools/tests/test_embedding.py index bc85bb5c2c7..48b07fe4dc0 100644 --- a/src/promptflow-tools/tests/test_embedding.py +++ b/src/promptflow-tools/tests/test_embedding.py @@ -1,6 +1,5 @@ import pytest -from promptflow.exceptions import ErrorResponse from promptflow.tools.embedding import embedding from promptflow.tools.exception import InvalidConnectionType @@ -23,7 +22,7 @@ def test_embedding_conn_oai(self, open_ai_connection): assert len(result) == 1536 def test_embedding_invalid_connection_type(self, serp_connection): + error_codes = "UserError/ToolValidationError/InvalidConnectionType" with pytest.raises(InvalidConnectionType) as exc_info: embedding(connection=serp_connection, input="hello", deployment_name="text-embedding-ada-002") - assert "UserError/ToolValidationError/InvalidConnectionType" == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") diff --git a/src/promptflow-tools/tests/test_handle_openai_error.py b/src/promptflow-tools/tests/test_handle_openai_error.py index fa7b9f78f35..a7ccfeae130 100644 --- a/src/promptflow-tools/tests/test_handle_openai_error.py +++ b/src/promptflow-tools/tests/test_handle_openai_error.py @@ -12,7 +12,7 @@ ) from pytest_mock import MockerFixture -from promptflow.exceptions import UserErrorException, ErrorResponse +from promptflow.exceptions import UserErrorException from promptflow.tools.aoai import chat, completion from promptflow.tools.common import handle_openai_error @@ -26,11 +26,11 @@ class TestHandleOpenAIError: def test_aoai_chat_message_invalid_format(self, aoai_provider): # chat api prompt should follow the format of "system:\nmessage1\nuser:\nmessage2". prompt = "what is your name" + error_codes = "UserError/ToolValidationError/ChatAPIInvalidRole" with pytest.raises(ChatAPIInvalidRole, match="The Chat API requires a specific format for prompt") as exc_info: aoai_provider.chat(prompt=prompt, deployment_name="gpt-35-turbo") - assert "UserError/ToolValidationError/ChatAPIInvalidRole" == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_authencation_error_with_bad_api_key(self, azure_open_ai_connection): azure_open_ai_connection.api_key = "hello" @@ -41,12 +41,11 @@ def test_aoai_authencation_error_with_bad_api_key(self, azure_open_ai_connection "correct regional API endpoint for your resource." ) error_msg = to_openai_error_message(AuthenticationError(message=raw_message)) - error_code = "UserError/OpenAIError/AuthenticationError" + error_codes = "UserError/OpenAIError/AuthenticationError" with pytest.raises(WrappedOpenAIError) as exc_info: chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo") assert error_msg == exc_info.value.message - assert error_code == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_connection_error_with_bad_api_base(self, azure_open_ai_connection): """ @@ -57,12 +56,11 @@ def test_aoai_connection_error_with_bad_api_base(self, azure_open_ai_connection) """ azure_open_ai_connection.api_base = "https://gpt-test-eus11.openai.azure.com/" prompt_template = "please complete this sentence: world war II " - error_code = "UserError/OpenAIError/APIConnectionError" + error_codes = "UserError/OpenAIError/APIConnectionError" with pytest.raises(WrappedOpenAIError) as exc_info: chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo") assert openai_error_code_ref_message in exc_info.value.message - assert error_code == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_invalid_request_error_with_bad_api_version(self, azure_open_ai_connection): """InvalidRequestError: Resource not found""" @@ -70,13 +68,12 @@ def test_aoai_invalid_request_error_with_bad_api_version(self, azure_open_ai_con prompt_template = "please complete this sentence: world war II " raw_message = "Resource not found" error_msg = to_openai_error_message(InvalidRequestError(message=raw_message, param=None)) - error_code = "UserError/OpenAIError/InvalidRequestError" + error_codes = "UserError/OpenAIError/InvalidRequestError" # Chat will throw: Exception occurs: InvalidRequestError: Resource not found with pytest.raises(WrappedOpenAIError) as exc_info: chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo") assert error_msg == exc_info.value.message - assert error_code == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_invalid_request_error_with_bad_api_type(self, azure_open_ai_connection): """ @@ -90,12 +87,11 @@ def test_aoai_invalid_request_error_with_bad_api_type(self, azure_open_ai_connec "'azure', 'azure_ad', 'open_ai'" ) error_msg = to_openai_error_message(InvalidAPIType(message=raw_message)) - error_code = "UserError/OpenAIError/InvalidAPIType" + error_codes = "UserError/OpenAIError/InvalidAPIType" with pytest.raises(WrappedOpenAIError) as exc_info: chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo") assert error_msg == exc_info.value.message - assert error_code == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_invalid_request_error_with_bad_deployment(self, aoai_provider): """ @@ -110,34 +106,31 @@ def test_aoai_invalid_request_error_with_bad_deployment(self, aoai_provider): "within the last 5 minutes, please wait a moment and try again." ) error_msg = to_openai_error_message(InvalidRequestError(message=raw_message, param=None)) - error_code = "UserError/OpenAIError/InvalidRequestError" + error_codes = "UserError/OpenAIError/InvalidRequestError" with pytest.raises(WrappedOpenAIError) as exc_info: aoai_provider.chat(prompt=f"user:\n{prompt_template}", deployment_name=deployment) assert error_msg == exc_info.value.message - assert error_code == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_rate_limit_error_insufficient_quota(self, azure_open_ai_connection, mocker: MockerFixture): dummyEx = RateLimitError("Something went wrong", json_body={"error": {"type": "insufficient_quota"}}) mock_method = mocker.patch("promptflow.tools.aoai.openai.Completion.create", side_effect=dummyEx) - error_code = "UserError/OpenAIError/RateLimitError" + error_codes = "UserError/OpenAIError/RateLimitError" with pytest.raises(WrappedOpenAIError) as exc_info: completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="text-ada-001") assert to_openai_error_message(dummyEx) == exc_info.value.message assert mock_method.call_count == 1 - assert error_code == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_non_retriable_connection_error(self, azure_open_ai_connection, mocker: MockerFixture): dummyEx = APIConnectionError("Something went wrong") mock_method = mocker.patch("promptflow.tools.aoai.openai.Completion.create", side_effect=dummyEx) - error_code = "UserError/OpenAIError/APIConnectionError" + error_codes = "UserError/OpenAIError/APIConnectionError" with pytest.raises(WrappedOpenAIError) as exc_info: completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="text-ada-001") assert to_openai_error_message(dummyEx) == exc_info.value.message assert mock_method.call_count == 1 - assert error_code == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") @pytest.mark.parametrize( "dummyExceptionList", @@ -169,8 +162,8 @@ def test_retriable_openai_error_handle(self, mocker: MockerFixture, dummyExcepti assert patched_test_method.call_count == max_retry + 1 assert "Exceed max retry times. " + to_openai_error_message(dummyEx) == exc_info.value.message - assert "UserError/OpenAIError/" + type(dummyEx).__name__ == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__ + assert exc_info.value.error_codes == error_codes.split("/") expected_calls = [ mocker.call(delay), mocker.call(delay * 2), @@ -211,8 +204,8 @@ def test_retriable_openai_error_handle_with_header( assert patched_test_method.call_count == max_retry + 1 assert "Exceed max retry times. " + to_openai_error_message(dummyEx) == exc_info.value.message - assert "UserError/OpenAIError/" + type(dummyEx).__name__ == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__ + assert exc_info.value.error_codes == error_codes.split("/") expected_calls = [ mocker.call(header_delay), mocker.call(header_delay * 2), @@ -240,32 +233,32 @@ def test_non_retriable_openai_error_handle( with pytest.raises(UserErrorException) as exc_info: completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="text-ada-001") assert to_openai_error_message(dummyEx) == exc_info.value.message - assert "UserError/OpenAIError/" + type(dummyEx).__name__ == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__ + assert exc_info.value.error_codes == error_codes.split("/") assert mock_method.call_count == 1 def test_unexpected_error_handle(self, azure_open_ai_connection, mocker: MockerFixture): dummyEx = Exception("Something went wrong") mock_method = mocker.patch("promptflow.tools.aoai.openai.ChatCompletion.create", side_effect=dummyEx) + error_codes = "UserError/LLMError" with pytest.raises(LLMError) as exc_info: chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo") assert to_openai_error_message(dummyEx) != exc_info.value.args[0] assert "OpenAI API hits exception: Exception: Something went wrong" == exc_info.value.message assert mock_method.call_count == 1 - assert "UserError/LLMError" == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") def test_template_syntax_error_handle(self, azure_open_ai_connection, mocker: MockerFixture): dummyEx = TemplateSyntaxError(message="Something went wrong", lineno=1) mock_method = mocker.patch("jinja2.Template.__new__", side_effect=dummyEx) + error_codes = "UserError/ToolValidationError/JinjaTemplateError" with pytest.raises(JinjaTemplateError) as exc_info: chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo") error_message = "Failed to render jinja template: TemplateSyntaxError: Something went wrong\n line 1. " \ + "Please modify your prompt to fix the issue." assert error_message == exc_info.value.message assert mock_method.call_count == 1 - assert "UserError/ToolValidationError/JinjaTemplateError" == ErrorResponse.from_exception( - exc_info.value).error_code_hierarchy + assert exc_info.value.error_codes == error_codes.split("/") @pytest.mark.skip_if_no_key("open_ai_connection") def test_model_not_accept_functions_as_param(