You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
BadRequestError: Error code: 400 - {'error': {'message': "Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.", 'type': 'invalid_request_error', 'param': 'messages.[2].role', 'code': None}}
Steps to reproduce
fromtypingimportAnnotatedimportautogendeflength_calculator(question: Annotated[str, "question"]) ->int:
returnlen(question)
user_proxy=autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin.",
code_execution_config={
"work_dir": "groupchat",
"use_docker": False,
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.human_input_mode="NEVER",
)
coder=autogen.AssistantAgent(
name="Coder",
llm_config=gpt4_config,
system_message="You are responsible for calculating the length of the question. Please use the registered calculator tool to help you.",
)
number_agent=autogen.AssistantAgent(
name="number_agent",
system_message="You are responsible for returning the length of the question to the user.",
llm_config=gpt4_config,
)
autogen.register_function(
length_calculator,
caller=coder, # The assistant agent can suggest calls to the calculator.executor=user_proxy, # The user proxy agent can execute the calculator calls.name="length_calculator", # By default, the function name is used as the tool name.description="A simple length calculator.", # A description of the tool.
)
groupchat=autogen.GroupChat(agents=[user_proxy, coder, number_agent], messages=[], max_round=5)
manager=autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config)
chat_result=user_proxy.initiate_chat(
manager, message="how are you")
previous_state=manager.messages_to_string(manager.groupchat.messages)
# Prepare the group chat for resuming using the previous messages. We don't need to remove the TERMINATE string as we aren't using the last message for resuming.last_agent, last_message=manager.resume(messages=previous_state)
# Resume the chat using a different agent and messageresult=user_proxy.initiate_chat(
recipient=manager,
message="what is the length of the last question?",
clear_history=False,
)
Model Used
gpt-4o-mini
Resume chat without tool calls
If I don't register the tool and run the code, it can successfully generate the results.
fromtypingimportAnnotatedimportautogendeflength_calculator(question: Annotated[str, "question"]) ->int:
returnlen(question)
user_proxy=autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin.",
code_execution_config={
"work_dir": "groupchat",
"use_docker": False,
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.human_input_mode="NEVER",
)
coder=autogen.AssistantAgent(
name="Coder",
llm_config=gpt4_config,
system_message="You are responsible for calculating the length of the question. Please use the registered calculator tool to help you.",
)
number_agent=autogen.AssistantAgent(
name="number_agent",
system_message="You are responsible for returning the length of the question to the user.",
llm_config=gpt4_config,
)
# autogen.register_function(# length_calculator,# caller=coder, # The assistant agent can suggest calls to the calculator.# executor=user_proxy, # The user proxy agent can execute the calculator calls.# name="length_calculator", # By default, the function name is used as the tool name.# description="A simple length calculator.", # A description of the tool.# )groupchat=autogen.GroupChat(agents=[user_proxy, coder, number_agent], messages=[], max_round=5)
manager=autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config)
chat_result=user_proxy.initiate_chat(
manager, message="how are you")
previous_state=manager.messages_to_string(manager.groupchat.messages)
# Prepare the group chat for resuming using the previous messages. We don't need to remove the TERMINATE string as we aren't using the last message for resuming.last_agent, last_message=manager.resume(messages=previous_state)
# Resume the chat using a different agent and messageresult=user_proxy.initiate_chat(
recipient=manager,
message="what is the length of the last question?",
clear_history=False,
)
Screenshots and logs
---------------------------------------------------------------------------
BadRequestError Traceback (most recent call last)
Cell In[38], line 7
5 last_agent, last_message = manager.resume(messages=previous_state)
6 # Resume the chat using a different agent and message
----> 7 result = user_proxy.initiate_chat(
8 recipient=manager,
9 message="what is the length of the last question?",
10 clear_history=False,
11 )
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\conversable_agent.py:1018, in ConversableAgent.initiate_chat(self, recipient, clear_history, silent, cache, max_turns, summary_method, summary_args, message, **kwargs)
1016 else:
1017 msg2send = self.generate_init_message(message, **kwargs)
-> 1018 self.send(msg2send, recipient, silent=silent)
1019 summary = self._summarize_chat(
1020 summary_method,
1021 summary_args,
1022 recipient,
1023 cache=cache,
1024 )
1025 for agent in [self, recipient]:
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\conversable_agent.py:655, in ConversableAgent.send(self, message, recipient, request_reply, silent)
653 valid = self._append_oai_message(message, "assistant", recipient)
654 if valid:
--> 655 recipient.receive(message, self, request_reply, silent)
656 else:
657 raise ValueError(
658 "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
659 )
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\conversable_agent.py:818, in ConversableAgent.receive(self, message, sender, request_reply, silent)
816 if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
817 return
--> 818 reply = self.generate_reply(messages=self.chat_messages[sender], sender=sender)
819 if reply is not None:
820 self.send(reply, sender, silent=silent)
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\conversable_agent.py:1972, in ConversableAgent.generate_reply(self, messages, sender, **kwargs)
1970 continue
1971 if self._match_trigger(reply_func_tuple["trigger"], sender):
-> 1972 final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
1973 if logging_enabled():
1974 log_event(
1975 self,
1976 "reply_func_executed",
(...)
1980 reply=reply,
1981 )
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\groupchat.py:1052, in GroupChatManager.run_chat(self, messages, sender, config)
1050 iostream.print(colored(f"\nNext speaker: {speaker.name}\n", "green"), flush=True)
1051 # let the speaker speak
-> 1052 reply = speaker.generate_reply(sender=self)
1053 except KeyboardInterrupt:
1054 # let the admin agent speak if interrupted
1055 if groupchat.admin_name in groupchat.agent_names:
1056 # admin agent is one of the participants
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\conversable_agent.py:1972, in ConversableAgent.generate_reply(self, messages, sender, **kwargs)
1970 continue
1971 if self._match_trigger(reply_func_tuple["trigger"], sender):
-> 1972 final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
1973 if logging_enabled():
1974 log_event(
1975 self,
1976 "reply_func_executed",
(...)
1980 reply=reply,
1981 )
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\conversable_agent.py:1340, in ConversableAgent.generate_oai_reply(self, messages, sender, config)
1338 if messages is None:
1339 messages = self._oai_messages[sender]
-> 1340 extracted_response = self._generate_oai_reply_from_client(
1341 client, self._oai_system_message + messages, self.client_cache
1342 )
1343 return (False, None) if extracted_response is None else (True, extracted_response)
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\agentchat\conversable_agent.py:1359, in ConversableAgent._generate_oai_reply_from_client(self, llm_client, messages, cache)
1356 all_messages.append(message)
1358 # TODO: #1143 handle token limit exceeded error
-> 1359 response = llm_client.create(
1360 context=messages[-1].pop("context", None), messages=all_messages, cache=cache, agent=self
1361 )
1362 extracted_response = llm_client.extract_text_or_completion_object(response)[0]
1364 if extracted_response is None:
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\oai\client.py:697, in OpenAIWrapper.create(self, **config)
695 try:
696 request_ts = get_current_ts()
--> 697 response = client.create(params)
698 except APITimeoutError as err:
699 logger.debug(f"config {i} timed out", exc_info=True)
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\autogen\oai\client.py:306, in OpenAIClient.create(self, params)
304 params = params.copy()
305 params["stream"] = False
--> 306 response = completions.create(**params)
308 return response
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\openai\_utils\_utils.py:277, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
275 msg = f"Missing required argument: {quote(missing[0])}"
276 raise TypeError(msg)
--> 277 return func(*args, **kwargs)
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\openai\resources\chat\completions.py:606, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, parallel_tool_calls, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
573 @required_args(["messages", "model"], ["messages", "model", "stream"])
574 def create(
575 self,
(...)
604 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
605 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
--> 606 return self._post(
607 "/chat/completions",
608 body=maybe_transform(
609 {
610 "messages": messages,
611 "model": model,
612 "frequency_penalty": frequency_penalty,
613 "function_call": function_call,
614 "functions": functions,
615 "logit_bias": logit_bias,
616 "logprobs": logprobs,
617 "max_tokens": max_tokens,
618 "n": n,
619 "parallel_tool_calls": parallel_tool_calls,
620 "presence_penalty": presence_penalty,
621 "response_format": response_format,
622 "seed": seed,
623 "stop": stop,
624 "stream": stream,
625 "stream_options": stream_options,
626 "temperature": temperature,
627 "tool_choice": tool_choice,
628 "tools": tools,
629 "top_logprobs": top_logprobs,
630 "top_p": top_p,
631 "user": user,
632 },
633 completion_create_params.CompletionCreateParams,
634 ),
635 options=make_request_options(
636 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
637 ),
638 cast_to=ChatCompletion,
639 stream=stream or False,
640 stream_cls=Stream[ChatCompletionChunk],
641 )
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\openai\_base_client.py:1240, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1226 def post(
1227 self,
1228 path: str,
(...)
1235 stream_cls: type[_StreamT] | None = None,
1236 ) -> ResponseT | _StreamT:
1237 opts = FinalRequestOptions.construct(
1238 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1239 )
-> 1240 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\openai\_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
912 def request(
913 self,
914 cast_to: Type[ResponseT],
(...)
919 stream_cls: type[_StreamT] | None = None,
920 ) -> ResponseT | _StreamT:
--> 921 return self._request(
922 cast_to=cast_to,
923 options=options,
924 stream=stream,
925 stream_cls=stream_cls,
926 remaining_retries=remaining_retries,
927 )
File c:\Users\user_name\AppData\Local\anaconda3\envs\myenv\Lib\site-packages\openai\_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1017 err.response.read()
1019 log.debug("Re-raising status error")
-> 1020 raise self._make_status_error_from_response(err.response) from None
1022 return self._process_response(
1023 cast_to=cast_to,
1024 options=options,
(...)
1027 stream_cls=stream_cls,
1028 )
BadRequestError: Error code: 400 - {'error': {'message': "Invalid parameter: messages with role 'tool' must be a response to a preceeding message with 'tool_calls'.", 'type': 'invalid_request_error', 'param': 'messages.[2].role', 'code': None}}
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
I have a multi-agents group chat following the logic in https://microsoft.github.io/autogen/docs/topics/groupchat/resuming_groupchat
But when the group chat involves tool calls, it is not able to restart the group chat. It will cause this error
Steps to reproduce
Model Used
gpt-4o-mini
Resume chat without tool calls
If I don't register the tool and run the code, it can successfully generate the results.
Screenshots and logs
Additional Information
autogen version: '0.2.31'
Python 3.11
Beta Was this translation helpful? Give feedback.
All reactions