Skip to content

Commit f985f7d

Browse files
authored
Merge branch 'main' into kostapetan/hello-distributed
2 parents 2e3b9a6 + 52790a8 commit f985f7d

File tree

91 files changed

+4093
-4674
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

91 files changed

+4093
-4674
lines changed

docs/design/02 - Topics.md

+13
Original file line numberDiff line numberDiff line change
@@ -51,3 +51,16 @@ Agents are able to handle certain types of messages. This is an internal detail
5151

5252
> [!NOTE]
5353
> This might be revisited based on scaling and performance considerations.
54+
55+
## Well known topic types
56+
57+
Agents should subscribe via a prefix subscription to the `{AgentType}:` topic as a direct message channel for the agent type.
58+
59+
For this subscription source should map directly to agent key.
60+
61+
This subscription will therefore receive all events for the following well known topics:
62+
63+
- `{AgentType}:` - General purpose direct messages. These should be routed to the approriate message handler.
64+
- `{AgentType}:rpc_request` - RPC request messages. These should be routed to the approriate RPC handler.
65+
- `{AgentType}:rpc_response={RequestId}` - RPC response messages. These should be routed back to the response future of the caller.
66+
- `{AgentType}:error={RequestId}` - Error message that corresponds to the given request.

protos/agent_worker.proto

+1-2
Original file line numberDiff line numberDiff line change
@@ -117,12 +117,11 @@ message Message {
117117
oneof message {
118118
RpcRequest request = 1;
119119
RpcResponse response = 2;
120-
Event event = 3;
120+
cloudevent.CloudEvent cloudEvent = 3;
121121
RegisterAgentTypeRequest registerAgentTypeRequest = 4;
122122
RegisterAgentTypeResponse registerAgentTypeResponse = 5;
123123
AddSubscriptionRequest addSubscriptionRequest = 6;
124124
AddSubscriptionResponse addSubscriptionResponse = 7;
125-
cloudevent.CloudEvent cloudEvent = 8;
126125
}
127126
}
128127

python/packages/autogen-agentchat/pyproject.toml

+1
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ include = ["src/**", "tests/*.py"]
2929
[tool.pyright]
3030
extends = "../../pyproject.toml"
3131
include = ["src", "tests"]
32+
reportDeprecated = true
3233

3334
[tool.pytest.ini_options]
3435
minversion = "6.0"

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py

+54-4
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
AgentMessage,
2424
ChatMessage,
2525
HandoffMessage,
26+
MultiModalMessage,
2627
TextMessage,
2728
ToolCallMessage,
2829
ToolCallResultMessage,
@@ -113,7 +114,10 @@ class AssistantAgent(BaseChatAgent):
113114
114115
115116
async def main() -> None:
116-
model_client = OpenAIChatCompletionClient(model="gpt-4o")
117+
model_client = OpenAIChatCompletionClient(
118+
model="gpt-4o",
119+
# api_key = "your_openai_api_key"
120+
)
117121
agent = AssistantAgent(name="assistant", model_client=model_client)
118122
119123
response = await agent.on_messages(
@@ -144,7 +148,10 @@ async def get_current_time() -> str:
144148
145149
146150
async def main() -> None:
147-
model_client = OpenAIChatCompletionClient(model="gpt-4o")
151+
model_client = OpenAIChatCompletionClient(
152+
model="gpt-4o",
153+
# api_key = "your_openai_api_key"
154+
)
148155
agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time])
149156
150157
await Console(
@@ -156,6 +163,39 @@ async def main() -> None:
156163
157164
asyncio.run(main())
158165
166+
167+
The following example shows how to use `o1-mini` model with the assistant agent.
168+
169+
.. code-block:: python
170+
171+
import asyncio
172+
from autogen_core.base import CancellationToken
173+
from autogen_ext.models import OpenAIChatCompletionClient
174+
from autogen_agentchat.agents import AssistantAgent
175+
from autogen_agentchat.messages import TextMessage
176+
177+
178+
async def main() -> None:
179+
model_client = OpenAIChatCompletionClient(
180+
model="o1-mini",
181+
# api_key = "your_openai_api_key"
182+
)
183+
# The system message is not supported by the o1 series model.
184+
agent = AssistantAgent(name="assistant", model_client=model_client, system_message=None)
185+
186+
response = await agent.on_messages(
187+
[TextMessage(content="What is the capital of France?", source="user")], CancellationToken()
188+
)
189+
print(response)
190+
191+
192+
asyncio.run(main())
193+
194+
.. note::
195+
196+
The `o1-preview` and `o1-mini` models do not support system message and function calling.
197+
So the `system_message` should be set to `None` and the `tools` and `handoffs` should not be set.
198+
See `o1 beta limitations <https://platform.openai.com/docs/guides/reasoning#beta-limitations>`_ for more details.
159199
"""
160200

161201
def __init__(
@@ -166,13 +206,19 @@ def __init__(
166206
tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None,
167207
handoffs: List[Handoff | str] | None = None,
168208
description: str = "An agent that provides assistance with ability to use tools.",
169-
system_message: str = "You are a helpful AI assistant. Solve tasks using your tools. Reply with TERMINATE when the task has been completed.",
209+
system_message: str
210+
| None = "You are a helpful AI assistant. Solve tasks using your tools. Reply with TERMINATE when the task has been completed.",
170211
):
171212
super().__init__(name=name, description=description)
172213
self._model_client = model_client
173-
self._system_messages = [SystemMessage(content=system_message)]
214+
if system_message is None:
215+
self._system_messages = []
216+
else:
217+
self._system_messages = [SystemMessage(content=system_message)]
174218
self._tools: List[Tool] = []
175219
if tools is not None:
220+
if model_client.capabilities["function_calling"] is False:
221+
raise ValueError("The model does not support function calling.")
176222
for tool in tools:
177223
if isinstance(tool, Tool):
178224
self._tools.append(tool)
@@ -192,6 +238,8 @@ def __init__(
192238
self._handoff_tools: List[Tool] = []
193239
self._handoffs: Dict[str, Handoff] = {}
194240
if handoffs is not None:
241+
if model_client.capabilities["function_calling"] is False:
242+
raise ValueError("The model does not support function calling, which is needed for handoffs.")
195243
for handoff in handoffs:
196244
if isinstance(handoff, str):
197245
handoff = Handoff(target=handoff)
@@ -229,6 +277,8 @@ async def on_messages_stream(
229277
) -> AsyncGenerator[AgentMessage | Response, None]:
230278
# Add messages to the model context.
231279
for msg in messages:
280+
if isinstance(msg, MultiModalMessage) and self._model_client.capabilities["vision"] is False:
281+
raise ValueError("The model does not support vision.")
232282
self._model_context.append(UserMessage(content=msg.content, source=msg.source))
233283

234284
# Inner messages.

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py

+109-4
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
MessageContext,
1515
)
1616
from autogen_core.components import ClosureAgent, TypeSubscription
17+
from autogen_core.components._closure_agent import ClosureContext
1718

1819
from ... import EVENT_LOGGER_NAME
1920
from ...base import ChatAgent, TaskResult, Team, TerminationCondition
@@ -139,8 +140,7 @@ async def _init(self, runtime: AgentRuntime) -> None:
139140
)
140141

141142
async def collect_output_messages(
142-
_runtime: AgentRuntime,
143-
id: AgentId,
143+
_runtime: ClosureContext,
144144
message: GroupChatStart | GroupChatMessage | GroupChatTermination,
145145
ctx: MessageContext,
146146
) -> None:
@@ -150,7 +150,7 @@ async def collect_output_messages(
150150
return
151151
await self._output_message_queue.put(message.message)
152152

153-
await ClosureAgent.register(
153+
await ClosureAgent.register_closure(
154154
runtime,
155155
type=self._collector_agent_type,
156156
closure=collect_output_messages,
@@ -170,6 +170,13 @@ async def run(
170170
:meth:`run_stream` to run the team and then returns the final result.
171171
Once the team is stopped, the termination condition is reset.
172172
173+
Args:
174+
task (str | ChatMessage | None): The task to run the team with.
175+
cancellation_token (CancellationToken | None): The cancellation token to kill the task immediately.
176+
Setting the cancellation token potentially put the team in an inconsistent state,
177+
and it may not reset the termination condition.
178+
To gracefully stop the team, use :class:`~autogen_agentchat.task.ExternalTermination` instead.
179+
173180
Example using the :class:`~autogen_agentchat.teams.RoundRobinGroupChat` team:
174181
175182
@@ -198,6 +205,47 @@ async def main() -> None:
198205
print(result)
199206
200207
208+
asyncio.run(main())
209+
210+
211+
Example using the :class:`~autogen_core.base.CancellationToken` to cancel the task:
212+
213+
.. code-block:: python
214+
215+
import asyncio
216+
from autogen_agentchat.agents import AssistantAgent
217+
from autogen_agentchat.task import MaxMessageTermination
218+
from autogen_agentchat.teams import RoundRobinGroupChat
219+
from autogen_core.base import CancellationToken
220+
from autogen_ext.models import OpenAIChatCompletionClient
221+
222+
223+
async def main() -> None:
224+
model_client = OpenAIChatCompletionClient(model="gpt-4o")
225+
226+
agent1 = AssistantAgent("Assistant1", model_client=model_client)
227+
agent2 = AssistantAgent("Assistant2", model_client=model_client)
228+
termination = MaxMessageTermination(3)
229+
team = RoundRobinGroupChat([agent1, agent2], termination_condition=termination)
230+
231+
cancellation_token = CancellationToken()
232+
233+
# Create a task to run the team in the background.
234+
run_task = asyncio.create_task(
235+
team.run(
236+
task="Count from 1 to 10, respond one at a time.",
237+
cancellation_token=cancellation_token,
238+
)
239+
)
240+
241+
# Wait for 1 second and then cancel the task.
242+
await asyncio.sleep(1)
243+
cancellation_token.cancel()
244+
245+
# This will raise a cancellation error.
246+
await run_task
247+
248+
201249
asyncio.run(main())
202250
"""
203251
result: TaskResult | None = None
@@ -221,6 +269,13 @@ async def run_stream(
221269
of the type :class:`TaskResult` as the last item in the stream. Once the
222270
team is stopped, the termination condition is reset.
223271
272+
Args:
273+
task (str | ChatMessage | None): The task to run the team with.
274+
cancellation_token (CancellationToken | None): The cancellation token to kill the task immediately.
275+
Setting the cancellation token potentially put the team in an inconsistent state,
276+
and it may not reset the termination condition.
277+
To gracefully stop the team, use :class:`~autogen_agentchat.task.ExternalTermination` instead.
278+
224279
Example using the :class:`~autogen_agentchat.teams.RoundRobinGroupChat` team:
225280
226281
.. code-block:: python
@@ -251,7 +306,52 @@ async def main() -> None:
251306
252307
253308
asyncio.run(main())
309+
310+
311+
Example using the :class:`~autogen_core.base.CancellationToken` to cancel the task:
312+
313+
.. code-block:: python
314+
315+
import asyncio
316+
from autogen_agentchat.agents import AssistantAgent
317+
from autogen_agentchat.task import MaxMessageTermination, Console
318+
from autogen_agentchat.teams import RoundRobinGroupChat
319+
from autogen_core.base import CancellationToken
320+
from autogen_ext.models import OpenAIChatCompletionClient
321+
322+
323+
async def main() -> None:
324+
model_client = OpenAIChatCompletionClient(model="gpt-4o")
325+
326+
agent1 = AssistantAgent("Assistant1", model_client=model_client)
327+
agent2 = AssistantAgent("Assistant2", model_client=model_client)
328+
termination = MaxMessageTermination(3)
329+
team = RoundRobinGroupChat([agent1, agent2], termination_condition=termination)
330+
331+
cancellation_token = CancellationToken()
332+
333+
# Create a task to run the team in the background.
334+
run_task = asyncio.create_task(
335+
Console(
336+
team.run_stream(
337+
task="Count from 1 to 10, respond one at a time.",
338+
cancellation_token=cancellation_token,
339+
)
340+
)
341+
)
342+
343+
# Wait for 1 second and then cancel the task.
344+
await asyncio.sleep(1)
345+
cancellation_token.cancel()
346+
347+
# This will raise a cancellation error.
348+
await run_task
349+
350+
351+
asyncio.run(main())
352+
254353
"""
354+
255355
# Create the first chat message if the task is a string or a chat message.
256356
first_chat_message: ChatMessage | None = None
257357
if task is None:
@@ -288,12 +388,17 @@ async def stop_runtime() -> None:
288388
await self._runtime.send_message(
289389
GroupChatStart(message=first_chat_message),
290390
recipient=AgentId(type=self._group_chat_manager_topic_type, key=self._team_id),
391+
cancellation_token=cancellation_token,
291392
)
292393
# Collect the output messages in order.
293394
output_messages: List[AgentMessage] = []
294395
# Yield the messsages until the queue is empty.
295396
while True:
296-
message = await self._output_message_queue.get()
397+
message_future = asyncio.ensure_future(self._output_message_queue.get())
398+
if cancellation_token is not None:
399+
cancellation_token.link_future(message_future)
400+
# Wait for the next message, this will raise an exception if the task is cancelled.
401+
message = await message_future
297402
if message is None:
298403
break
299404
yield message

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py

+23-5
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import asyncio
12
from abc import ABC, abstractmethod
23
from typing import Any, List
34

@@ -78,7 +79,9 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No
7879
await self.publish_message(message, topic_id=DefaultTopicId(type=self._output_topic_type))
7980

8081
# Relay the start message to the participants.
81-
await self.publish_message(message, topic_id=DefaultTopicId(type=self._group_topic_type))
82+
await self.publish_message(
83+
message, topic_id=DefaultTopicId(type=self._group_topic_type), cancellation_token=ctx.cancellation_token
84+
)
8285

8386
# Append the user message to the message thread.
8487
self._message_thread.append(message.message)
@@ -95,8 +98,16 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No
9598
await self._termination_condition.reset()
9699
return
97100

98-
speaker_topic_type = await self.select_speaker(self._message_thread)
99-
await self.publish_message(GroupChatRequestPublish(), topic_id=DefaultTopicId(type=speaker_topic_type))
101+
# Select a speaker to start the conversation.
102+
speaker_topic_type_future = asyncio.ensure_future(self.select_speaker(self._message_thread))
103+
# Link the select speaker future to the cancellation token.
104+
ctx.cancellation_token.link_future(speaker_topic_type_future)
105+
speaker_topic_type = await speaker_topic_type_future
106+
await self.publish_message(
107+
GroupChatRequestPublish(),
108+
topic_id=DefaultTopicId(type=speaker_topic_type),
109+
cancellation_token=ctx.cancellation_token,
110+
)
100111

101112
@event
102113
async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: MessageContext) -> None:
@@ -140,8 +151,15 @@ async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: Mess
140151
return
141152

142153
# Select a speaker to continue the conversation.
143-
speaker_topic_type = await self.select_speaker(self._message_thread)
144-
await self.publish_message(GroupChatRequestPublish(), topic_id=DefaultTopicId(type=speaker_topic_type))
154+
speaker_topic_type_future = asyncio.ensure_future(self.select_speaker(self._message_thread))
155+
# Link the select speaker future to the cancellation token.
156+
ctx.cancellation_token.link_future(speaker_topic_type_future)
157+
speaker_topic_type = await speaker_topic_type_future
158+
await self.publish_message(
159+
GroupChatRequestPublish(),
160+
topic_id=DefaultTopicId(type=speaker_topic_type),
161+
cancellation_token=ctx.cancellation_token,
162+
)
145163

146164
@rpc
147165
async def handle_reset(self, message: GroupChatReset, ctx: MessageContext) -> None:

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py

+1
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ async def handle_request(self, message: GroupChatRequestPublish, ctx: MessageCon
7171
await self.publish_message(
7272
GroupChatAgentResponse(agent_response=response),
7373
topic_id=DefaultTopicId(type=self._parent_topic_type),
74+
cancellation_token=ctx.cancellation_token,
7475
)
7576

7677
async def on_unhandled_message(self, message: Any, ctx: MessageContext) -> None:

0 commit comments

Comments
 (0)