Skip to content

Commit 5b198d2

Browse files
committed
Merge remote-tracking branch 'upstream/main' into dotnet_unit_rt
2 parents 79da8a6 + 9030f75 commit 5b198d2

File tree

52 files changed

+2061
-380
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

52 files changed

+2061
-380
lines changed

python/README.md

+12
Original file line numberDiff line numberDiff line change
@@ -50,12 +50,24 @@ To create a pull request (PR), ensure the following checks are met. You can run
5050
- Pyright: `poe pyright`
5151
- Build docs: `poe --directory ./packages/autogen-core/ docs-build`
5252
- Auto rebuild+serve docs: `poe --directory ./packages/autogen-core/ docs-serve`
53+
- Check samples in `python/samples`: `poe samples-code-check`
5354
Alternatively, you can run all the checks with:
5455
- `poe check`
5556

5657
> [!NOTE]
5758
> These need to be run in the virtual environment.
5859
60+
### Syncing Dependencies
61+
62+
When you pull new changes, you may need to update the dependencies.
63+
To do so, first make sure you are in the virtual environment, and then in the `python` directory, run:
64+
65+
```sh
66+
uv sync --all-extras
67+
```
68+
69+
This will update the dependencies in the virtual environment.
70+
5971
### Creating a New Package
6072

6173
To create a new package, similar to `autogen-core` or `autogen-chat`, use the following:

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py

+13-6
Original file line numberDiff line numberDiff line change
@@ -42,13 +42,13 @@
4242
HandoffMessage,
4343
MemoryQueryEvent,
4444
ModelClientStreamingChunkEvent,
45-
MultiModalMessage,
4645
TextMessage,
4746
ToolCallExecutionEvent,
4847
ToolCallRequestEvent,
4948
ToolCallSummaryMessage,
5049
)
5150
from ..state import AssistantAgentState
51+
from ..utils import remove_images
5252
from ._base_chat_agent import BaseChatAgent
5353

5454
event_logger = logging.getLogger(EVENT_LOGGER_NAME)
@@ -64,7 +64,7 @@ class AssistantAgentConfig(BaseModel):
6464
model_context: ComponentModel | None = None
6565
description: str
6666
system_message: str | None = None
67-
model_client_stream: bool
67+
model_client_stream: bool = False
6868
reflect_on_tool_use: bool
6969
tool_call_summary_format: str
7070

@@ -375,8 +375,6 @@ async def on_messages_stream(
375375
) -> AsyncGenerator[AgentEvent | ChatMessage | Response, None]:
376376
# Add messages to the model context.
377377
for msg in messages:
378-
if isinstance(msg, MultiModalMessage) and self._model_client.model_info["vision"] is False:
379-
raise ValueError("The model does not support vision.")
380378
if isinstance(msg, HandoffMessage):
381379
# Add handoff context to the model context.
382380
for context_msg in msg.context:
@@ -398,7 +396,7 @@ async def on_messages_stream(
398396
yield memory_query_event_msg
399397

400398
# Generate an inference result based on the current model context.
401-
llm_messages = self._system_messages + await self._model_context.get_messages()
399+
llm_messages = self._get_compatible_context(self._system_messages + await self._model_context.get_messages())
402400
model_result: CreateResult | None = None
403401
if self._model_client_stream:
404402
# Stream the model client.
@@ -494,7 +492,9 @@ async def on_messages_stream(
494492

495493
if self._reflect_on_tool_use:
496494
# Generate another inference result based on the tool call and result.
497-
llm_messages = self._system_messages + await self._model_context.get_messages()
495+
llm_messages = self._get_compatible_context(
496+
self._system_messages + await self._model_context.get_messages()
497+
)
498498
reflection_model_result: CreateResult | None = None
499499
if self._model_client_stream:
500500
# Stream the model client.
@@ -575,6 +575,13 @@ async def load_state(self, state: Mapping[str, Any]) -> None:
575575
# Load the model context state.
576576
await self._model_context.load_state(assistant_agent_state.llm_context)
577577

578+
def _get_compatible_context(self, messages: List[LLMMessage]) -> Sequence[LLMMessage]:
579+
"""Ensure that the messages are compatible with the underlying client, by removing images if needed."""
580+
if self._model_client.model_info["vision"]:
581+
return messages
582+
else:
583+
return remove_images(messages)
584+
578585
def _to_config(self) -> AssistantAgentConfig:
579586
"""Convert the assistant agent to a declarative config."""
580587

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py

+24-19
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import logging
33
from typing import Any, Dict, List, Mapping
44

5-
from autogen_core import AgentId, CancellationToken, DefaultTopicId, Image, MessageContext, event, rpc
5+
from autogen_core import AgentId, CancellationToken, DefaultTopicId, MessageContext, event, rpc
66
from autogen_core.models import (
77
AssistantMessage,
88
ChatCompletionClient,
@@ -24,6 +24,7 @@
2424
ToolCallSummaryMessage,
2525
)
2626
from ....state import MagenticOneOrchestratorState
27+
from ....utils import content_to_str, remove_images
2728
from .._base_group_chat_manager import BaseGroupChatManager
2829
from .._events import (
2930
GroupChatAgentResponse,
@@ -138,15 +139,17 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No
138139
# Create the initial task ledger
139140
#################################
140141
# Combine all message contents for task
141-
self._task = " ".join([self._content_to_str(msg.content) for msg in message.messages])
142+
self._task = " ".join([content_to_str(msg.content) for msg in message.messages])
142143
planning_conversation: List[LLMMessage] = []
143144

144145
# 1. GATHER FACTS
145146
# create a closed book task and generate a response and update the chat history
146147
planning_conversation.append(
147148
UserMessage(content=self._get_task_ledger_facts_prompt(self._task), source=self._name)
148149
)
149-
response = await self._model_client.create(planning_conversation, cancellation_token=ctx.cancellation_token)
150+
response = await self._model_client.create(
151+
self._get_compatible_context(planning_conversation), cancellation_token=ctx.cancellation_token
152+
)
150153

151154
assert isinstance(response.content, str)
152155
self._facts = response.content
@@ -157,7 +160,9 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No
157160
planning_conversation.append(
158161
UserMessage(content=self._get_task_ledger_plan_prompt(self._team_description), source=self._name)
159162
)
160-
response = await self._model_client.create(planning_conversation, cancellation_token=ctx.cancellation_token)
163+
response = await self._model_client.create(
164+
self._get_compatible_context(planning_conversation), cancellation_token=ctx.cancellation_token
165+
)
161166

162167
assert isinstance(response.content, str)
163168
self._plan = response.content
@@ -281,7 +286,7 @@ async def _orchestrate_step(self, cancellation_token: CancellationToken) -> None
281286
assert self._max_json_retries > 0
282287
key_error: bool = False
283288
for _ in range(self._max_json_retries):
284-
response = await self._model_client.create(context, json_output=True)
289+
response = await self._model_client.create(self._get_compatible_context(context), json_output=True)
285290
ledger_str = response.content
286291
try:
287292
assert isinstance(ledger_str, str)
@@ -397,7 +402,9 @@ async def _update_task_ledger(self, cancellation_token: CancellationToken) -> No
397402
update_facts_prompt = self._get_task_ledger_facts_update_prompt(self._task, self._facts)
398403
context.append(UserMessage(content=update_facts_prompt, source=self._name))
399404

400-
response = await self._model_client.create(context, cancellation_token=cancellation_token)
405+
response = await self._model_client.create(
406+
self._get_compatible_context(context), cancellation_token=cancellation_token
407+
)
401408

402409
assert isinstance(response.content, str)
403410
self._facts = response.content
@@ -407,7 +414,9 @@ async def _update_task_ledger(self, cancellation_token: CancellationToken) -> No
407414
update_plan_prompt = self._get_task_ledger_plan_update_prompt(self._team_description)
408415
context.append(UserMessage(content=update_plan_prompt, source=self._name))
409416

410-
response = await self._model_client.create(context, cancellation_token=cancellation_token)
417+
response = await self._model_client.create(
418+
self._get_compatible_context(context), cancellation_token=cancellation_token
419+
)
411420

412421
assert isinstance(response.content, str)
413422
self._plan = response.content
@@ -420,7 +429,9 @@ async def _prepare_final_answer(self, reason: str, cancellation_token: Cancellat
420429
final_answer_prompt = self._get_final_answer_prompt(self._task)
421430
context.append(UserMessage(content=final_answer_prompt, source=self._name))
422431

423-
response = await self._model_client.create(context, cancellation_token=cancellation_token)
432+
response = await self._model_client.create(
433+
self._get_compatible_context(context), cancellation_token=cancellation_token
434+
)
424435
assert isinstance(response.content, str)
425436
message = TextMessage(content=response.content, source=self._name)
426437

@@ -464,15 +475,9 @@ def _thread_to_context(self) -> List[LLMMessage]:
464475
context.append(UserMessage(content=m.content, source=m.source))
465476
return context
466477

467-
def _content_to_str(self, content: str | List[str | Image]) -> str:
468-
"""Convert the content to a string."""
469-
if isinstance(content, str):
470-
return content
478+
def _get_compatible_context(self, messages: List[LLMMessage]) -> List[LLMMessage]:
479+
"""Ensure that the messages are compatible with the underlying client, by removing images if needed."""
480+
if self._model_client.model_info["vision"]:
481+
return messages
471482
else:
472-
result: List[str] = []
473-
for c in content:
474-
if isinstance(c, str):
475-
result.append(c)
476-
else:
477-
result.append("<image>")
478-
return "\n".join(result)
483+
return remove_images(messages)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
"""
2+
This module implements various utilities common to AgentChat agents and teams.
3+
"""
4+
5+
from ._utils import content_to_str, remove_images
6+
7+
__all__ = ["content_to_str", "remove_images"]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
from typing import List, Union
2+
3+
from autogen_core import FunctionCall, Image
4+
from autogen_core.models import FunctionExecutionResult, LLMMessage, UserMessage
5+
6+
# Type aliases for convenience
7+
_UserContent = Union[str, List[Union[str, Image]]]
8+
_AssistantContent = Union[str, List[FunctionCall]]
9+
_FunctionExecutionContent = List[FunctionExecutionResult]
10+
_SystemContent = str
11+
12+
13+
def content_to_str(content: _UserContent | _AssistantContent | _FunctionExecutionContent | _SystemContent) -> str:
14+
"""Convert the content of an LLMMessage to a string."""
15+
if isinstance(content, str):
16+
return content
17+
else:
18+
result: List[str] = []
19+
for c in content:
20+
if isinstance(c, str):
21+
result.append(c)
22+
elif isinstance(c, Image):
23+
result.append("<image>")
24+
else:
25+
result.append(str(c))
26+
27+
return "\n".join(result)
28+
29+
30+
def remove_images(messages: List[LLMMessage]) -> List[LLMMessage]:
31+
"""Remove images from a list of LLMMessages"""
32+
str_messages: List[LLMMessage] = []
33+
for message in messages:
34+
if isinstance(message, UserMessage) and isinstance(message.content, list):
35+
str_messages.append(UserMessage(content=content_to_str(message.content), source=message.source))
36+
else:
37+
str_messages.append(message)
38+
return str_messages

python/packages/autogen-agentchat/tests/test_assistant_agent.py

+43-6
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,15 @@
2121
from autogen_core import FunctionCall, Image
2222
from autogen_core.memory import ListMemory, Memory, MemoryContent, MemoryMimeType, MemoryQueryResult
2323
from autogen_core.model_context import BufferedChatCompletionContext
24-
from autogen_core.models import CreateResult, FunctionExecutionResult, LLMMessage, RequestUsage
24+
from autogen_core.models import (
25+
AssistantMessage,
26+
CreateResult,
27+
FunctionExecutionResult,
28+
LLMMessage,
29+
RequestUsage,
30+
SystemMessage,
31+
UserMessage,
32+
)
2533
from autogen_core.models._model_client import ModelFamily
2634
from autogen_core.tools import FunctionTool
2735
from autogen_ext.models.openai import OpenAIChatCompletionClient
@@ -541,15 +549,44 @@ async def test_invalid_model_capabilities() -> None:
541549
FunctionTool(_echo_function, description="Echo"),
542550
],
543551
)
552+
await agent.run(task=TextMessage(source="user", content="Test"))
544553

545554
with pytest.raises(ValueError):
546555
agent = AssistantAgent(name="assistant", model_client=model_client, handoffs=["agent2"])
556+
await agent.run(task=TextMessage(source="user", content="Test"))
547557

548-
with pytest.raises(ValueError):
549-
agent = AssistantAgent(name="assistant", model_client=model_client)
550-
# Generate a random base64 image.
551-
img_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4//8/AAX+Av4N70a4AAAAAElFTkSuQmCC"
552-
await agent.run(task=MultiModalMessage(source="user", content=["Test", Image.from_base64(img_base64)]))
558+
559+
@pytest.mark.asyncio
560+
async def test_remove_images(monkeypatch: pytest.MonkeyPatch) -> None:
561+
model = "random-model"
562+
model_client_1 = OpenAIChatCompletionClient(
563+
model=model,
564+
api_key="",
565+
model_info={"vision": False, "function_calling": False, "json_output": False, "family": ModelFamily.UNKNOWN},
566+
)
567+
model_client_2 = OpenAIChatCompletionClient(
568+
model=model,
569+
api_key="",
570+
model_info={"vision": True, "function_calling": False, "json_output": False, "family": ModelFamily.UNKNOWN},
571+
)
572+
573+
img_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4//8/AAX+Av4N70a4AAAAAElFTkSuQmCC"
574+
messages: List[LLMMessage] = [
575+
SystemMessage(content="System.1"),
576+
UserMessage(content=["User.1", Image.from_base64(img_base64)], source="user.1"),
577+
AssistantMessage(content="Assistant.1", source="assistant.1"),
578+
UserMessage(content="User.2", source="assistant.2"),
579+
]
580+
581+
agent_1 = AssistantAgent(name="assistant_1", model_client=model_client_1)
582+
result = agent_1._get_compatible_context(messages) # type: ignore
583+
assert len(result) == 4
584+
assert isinstance(result[1].content, str)
585+
586+
agent_2 = AssistantAgent(name="assistant_2", model_client=model_client_2)
587+
result = agent_2._get_compatible_context(messages) # type: ignore
588+
assert len(result) == 4
589+
assert isinstance(result[1].content, list)
553590

554591

555592
@pytest.mark.asyncio
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
from typing import List
2+
3+
import pytest
4+
from autogen_agentchat.utils import remove_images
5+
from autogen_core import Image
6+
from autogen_core.models import AssistantMessage, LLMMessage, SystemMessage, UserMessage
7+
8+
9+
@pytest.mark.asyncio
10+
async def test_remove_images() -> None:
11+
img_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4//8/AAX+Av4N70a4AAAAAElFTkSuQmCC"
12+
messages: List[LLMMessage] = [
13+
SystemMessage(content="System.1"),
14+
UserMessage(content=["User.1", Image.from_base64(img_base64)], source="user.1"),
15+
AssistantMessage(content="Assistant.1", source="assistant.1"),
16+
UserMessage(content="User.2", source="assistant.2"),
17+
]
18+
19+
result = remove_images(messages)
20+
21+
# Check all the invariants
22+
assert len(result) == 4
23+
assert isinstance(result[0], SystemMessage)
24+
assert isinstance(result[1], UserMessage)
25+
assert isinstance(result[2], AssistantMessage)
26+
assert isinstance(result[3], UserMessage)
27+
assert result[0].content == messages[0].content
28+
assert result[2].content == messages[2].content
29+
assert result[3].content == messages[3].content
30+
assert isinstance(messages[2], AssistantMessage)
31+
assert isinstance(messages[3], UserMessage)
32+
assert result[2].source == messages[2].source
33+
assert result[3].source == messages[3].source
34+
35+
# Check that the image was removed.
36+
assert result[1].content == "User.1\n<image>"

python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
"\n",
1111
"AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n",
1212
"\n",
13-
"A termination condition is a callable that takes a sequece of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n",
13+
"A termination condition is a callable that takes a sequence of {py:class}`~autogen_agentchat.messages.AgentEvent` or {py:class}`~autogen_agentchat.messages.ChatMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n",
1414
"Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n",
1515
"\n",
1616
"Some important things to note about termination conditions: \n",

python/packages/autogen-core/src/autogen_core/_serialization.py

+7
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,13 @@ class UnknownPayload:
192192

193193

194194
def _type_name(cls: type[Any] | Any) -> str:
195+
# If cls is a protobuf, then we need to determine the descriptor
196+
if isinstance(cls, type):
197+
if issubclass(cls, Message):
198+
return cast(str, cls.DESCRIPTOR.full_name)
199+
elif isinstance(cls, Message):
200+
return cast(str, cls.DESCRIPTOR.full_name)
201+
195202
if isinstance(cls, type):
196203
return cls.__name__
197204
else:

0 commit comments

Comments
 (0)