Skip to content

Commit 01f6d9d

Browse files
afourneyrysweet
authored andcommitted
Allow users to update the final answer prompt of MagenticOne orc. (#4476)
* Allow users to update the final answer prompt of MagenticOne orchestrator.
1 parent d245bf5 commit 01f6d9d

File tree

2 files changed

+11
-1
lines changed

2 files changed

+11
-1
lines changed

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_group_chat.py

+5
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from ....base import ChatAgent, TerminationCondition
88
from .._base_group_chat import BaseGroupChat
99
from ._magentic_one_orchestrator import MagenticOneOrchestrator
10+
from ._prompts import ORCHESTRATOR_FINAL_ANSWER_PROMPT
1011

1112
trace_logger = logging.getLogger(TRACE_LOGGER_NAME)
1213
event_logger = logging.getLogger(EVENT_LOGGER_NAME)
@@ -25,6 +26,7 @@ class MagenticOneGroupChat(BaseGroupChat):
2526
Without a termination condition, the group chat will run based on the orchestrator logic or until the maximum number of turns is reached.
2627
max_turns (int, optional): The maximum number of turns in the group chat before stopping. Defaults to 20.
2728
max_stalls (int, optional): The maximum number of stalls allowed before re-planning. Defaults to 3.
29+
final_answer_prompt (str, optional): The LLM prompt used to generate the final answer or response from the team's transcript. A default (sensible for GPT-4o class models) is provided.
2830
2931
Raises:
3032
ValueError: In orchestration logic if progress ledger does not have required keys or if next speaker is not valid.
@@ -64,6 +66,7 @@ def __init__(
6466
termination_condition: TerminationCondition | None = None,
6567
max_turns: int | None = 20,
6668
max_stalls: int = 3,
69+
final_answer_prompt: str = ORCHESTRATOR_FINAL_ANSWER_PROMPT,
6770
):
6871
super().__init__(
6972
participants,
@@ -77,6 +80,7 @@ def __init__(
7780
raise ValueError("At least one participant is required for MagenticOneGroupChat.")
7881
self._model_client = model_client
7982
self._max_stalls = max_stalls
83+
self._final_answer_prompt = final_answer_prompt
8084

8185
def _create_group_chat_manager_factory(
8286
self,
@@ -95,5 +99,6 @@ def _create_group_chat_manager_factory(
9599
max_turns,
96100
self._model_client,
97101
self._max_stalls,
102+
self._final_answer_prompt,
98103
termination_condition,
99104
)

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ def __init__(
4848
max_turns: int | None,
4949
model_client: ChatCompletionClient,
5050
max_stalls: int,
51+
final_answer_prompt: str,
5152
termination_condition: TerminationCondition | None,
5253
):
5354
super().__init__(
@@ -60,6 +61,7 @@ def __init__(
6061
)
6162
self._model_client = model_client
6263
self._max_stalls = max_stalls
64+
self._final_answer_prompt = final_answer_prompt
6365
self._name = "MagenticOneOrchestrator"
6466
self._max_json_retries = 10
6567
self._task = ""
@@ -95,7 +97,10 @@ def _get_task_ledger_plan_update_prompt(self, team: str) -> str:
9597
return ORCHESTRATOR_TASK_LEDGER_PLAN_UPDATE_PROMPT.format(team=team)
9698

9799
def _get_final_answer_prompt(self, task: str) -> str:
98-
return ORCHESTRATOR_FINAL_ANSWER_PROMPT.format(task=task)
100+
if self._final_answer_prompt == ORCHESTRATOR_FINAL_ANSWER_PROMPT:
101+
return ORCHESTRATOR_FINAL_ANSWER_PROMPT.format(task=task)
102+
else:
103+
return self._final_answer_prompt
99104

100105
async def _log_message(self, log_message: str) -> None:
101106
trace_logger.debug(log_message)

0 commit comments

Comments
 (0)