diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py index f4f338bfc..67454e383 100644 --- a/newrelic/hooks/mlmodel_langchain.py +++ b/newrelic/hooks/mlmodel_langchain.py @@ -700,16 +700,19 @@ def _create_successful_chain_run_events( trace_id = linking_metadata.get("trace.id") input_message_list = [_input] output_message_list = [] - try: - output_message_list = [response[0]] if response else [] - except: + if isinstance(response, str): + output_message_list = [response] + else: try: - output_message_list = [str(response)] - except Exception as e: - _logger.warning( - "Unable to capture response inside langchain chain instrumentation. No response message event will be captured. Report this issue to New Relic Support.\n%s", - traceback.format_exception(*sys.exc_info()), - ) + output_message_list = [response[0]] if response else [] + except: + try: + output_message_list = [str(response)] + except Exception as e: + _logger.warning( + "Unable to capture response inside langchain chain instrumentation. No response message event will be captured. Report this issue to New Relic Support.\n%s", + traceback.format_exception(*sys.exc_info()), + ) # Make sure the builtin attributes take precedence over metadata attributes. full_chat_completion_summary_dict = {f"metadata.{key}": value for key, value in metadata.items()} diff --git a/tests/mlmodel_langchain/_mock_external_openai_server.py b/tests/mlmodel_langchain/_mock_external_openai_server.py index f56269b46..a951d499b 100644 --- a/tests/mlmodel_langchain/_mock_external_openai_server.py +++ b/tests/mlmodel_langchain/_mock_external_openai_server.py @@ -381,6 +381,42 @@ "system_fingerprint": None, }, ], + "You are a helpful assistant who generates a random first name. A user will pass in a first letter, and you should generate a name that starts with that first letter.": [ + { + "Content-Type": "application/json", + "openai-model": "gpt-3.5-turbo-0613", + "openai-organization": "foobar-jtbczk", + "openai-processing-ms": "488", + "openai-version": "2020-10-01", + "x-ratelimit-limit-requests": "200", + "x-ratelimit-limit-tokens": "40000", + "x-ratelimit-limit-tokens_usage_based": "40000", + "x-ratelimit-remaining-requests": "199", + "x-ratelimit-remaining-tokens": "39921", + "x-ratelimit-remaining-tokens_usage_based": "39921", + "x-ratelimit-reset-requests": "7m12s", + "x-ratelimit-reset-tokens": "118ms", + "x-ratelimit-reset-tokens_usage_based": "118ms", + "x-request-id": "f3de99e17ccc360430cffa243b74dcbd", + }, + 200, + { + "id": "chatcmpl-8XEjOPNHth7yS2jt1You3fEwB6w9i", + "object": "chat.completion", + "created": 1702932142, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": {"role": "assistant", "content": "Milo"}, + "logprobs": None, + "finish_reason": "stop", + } + ], + "usage": {"prompt_tokens": 60, "completion_tokens": 9, "total_tokens": 69}, + "system_fingerprint": None, + }, + ], "9906": [ { "content-type": "application/json", diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py index 4c5811113..1a3cbbfd7 100644 --- a/tests/mlmodel_langchain/test_chain.py +++ b/tests/mlmodel_langchain/test_chain.py @@ -532,7 +532,7 @@ "ingest_source": "Python", "is_response": True, "virtual_llm": True, - "content": "`", + "content": "```html\n\n\n\n Math Quiz\n\n\n

Math Quiz Questions

\n
    \n
  1. What is the result of 5 + 3?
  2. \n \n
  3. What is the product of 6 x 7?
  4. \n \n
  5. What is the square root of 64?
  6. \n \n
  7. What is the result of 12 / 4?
  8. \n \n
  9. What is the sum of 15 + 9?
  10. \n \n
\n\n\n```", }, ], [ @@ -553,6 +553,60 @@ ], ] +chat_completion_recorded_events_str_response = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + "request_id": None, + "duration": None, + "response.number_of_messages": 2, + "metadata.id": "123", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "{'text': 'M'}", + "completion_id": None, + "sequence": 0, + "vendor": "langchain", + "ingest_source": "Python", + "virtual_llm": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": None, + "span_id": None, + "trace_id": "trace-id", + "content": "Milo", + "completion_id": None, + "sequence": 1, + "vendor": "langchain", + "ingest_source": "Python", + "is_response": True, + "virtual_llm": True, + }, + ), +] chat_completion_recorded_events_list_response = [ ( {"type": "LlmChatCompletionSummary"}, @@ -682,6 +736,35 @@ ] +@reset_core_stats_engine() +@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events_str_response)) +@validate_custom_event_count(count=7) +@validate_transaction_metrics( + name="test_chain:test_langchain_chain_str_response", + scoped_metrics=[("Llm/chain/LangChain/invoke", 1)], + rollup_metrics=[("Llm/chain/LangChain/invoke", 1)], + custom_metrics=[(f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1)], + background_task=True, +) +@background_task() +def test_langchain_chain_str_response(set_trace_info, chat_openai_client): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + template = """You are a helpful assistant who generates a random first name. A user will pass in a first letter, and you should generate a name that starts with that first letter.""" + human_template = "{text}" + + chat_prompt = langchain_core.prompts.ChatPromptTemplate.from_messages( + [("system", template), ("human", human_template)] + ) + str_output_parser = langchain_core.output_parsers.string.StrOutputParser() + chain = chat_prompt | chat_openai_client | str_output_parser + with WithLlmCustomAttributes({"context": "attr"}): + chain.invoke({"text": "M"}, config={"metadata": {"id": "123"}}) + + @reset_core_stats_engine() @validate_custom_events(events_with_context_attrs(chat_completion_recorded_events_list_response)) @validate_custom_event_count(count=7)