diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 65e01817f59b..5b0575d7c474 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -13,7 +13,7 @@ Sequence, ) -from autogen_core import CancellationToken, Component, ComponentModel, FunctionCall +from autogen_core import CancellationToken, Component, ComponentModel, FunctionCall, FunctionCalls from autogen_core.memory import Memory from autogen_core.model_context import ( ChatCompletionContext, @@ -401,9 +401,10 @@ async def on_messages_stream( return # Process tool calls. - assert isinstance(model_result.content, list) and all( - isinstance(item, FunctionCall) for item in model_result.content + assert isinstance(model_result.content, FunctionCalls) and all( + isinstance(item, FunctionCall) or isinstance(item, str) for item in model_result.content.function_calls ) + tool_call_msg = ToolCallRequestEvent( content=model_result.content, source=self.name, models_usage=model_result.usage ) @@ -412,9 +413,15 @@ async def on_messages_stream( inner_messages.append(tool_call_msg) yield tool_call_msg + function_calls = model_result.content.function_calls + # Execute the tool calls. exec_results = await asyncio.gather( - *[self._execute_tool_call(call, cancellation_token) for call in model_result.content] + *[ + self._execute_tool_call(call, cancellation_token) + for call in function_calls + if isinstance(call, FunctionCall) + ] ) tool_call_result_msg = ToolCallExecutionEvent(content=exec_results, source=self.name) event_logger.debug(tool_call_result_msg) @@ -423,7 +430,7 @@ async def on_messages_stream( yield tool_call_result_msg # Correlate tool call results with tool calls. - tool_calls = [call for call in model_result.content if call.name not in self._handoffs] + tool_calls = [call for call in function_calls if call.name not in self._handoffs] tool_call_results: List[FunctionExecutionResult] = [] for tool_call in tool_calls: found = False @@ -436,7 +443,7 @@ async def on_messages_stream( raise RuntimeError(f"Tool call result not found for call id: {tool_call.id}") # Detect handoff requests. - handoff_reqs = [call for call in model_result.content if call.name in self._handoffs] + handoff_reqs = [call for call in function_calls if call.name in self._handoffs] if len(handoff_reqs) > 0: handoffs = [self._handoffs[call.name] for call in handoff_reqs] if len(handoffs) > 1: @@ -451,7 +458,12 @@ async def on_messages_stream( # Current context for handoff. handoff_context: List[LLMMessage] = [] if len(tool_calls) > 0: - handoff_context.append(AssistantMessage(content=tool_calls, source=self.name)) + handoff_context.append( + AssistantMessage( + content=FunctionCalls(function_calls=tool_calls, thought=model_result.content.thought), + source=self.name, + ) + ) handoff_context.append(FunctionExecutionResultMessage(content=tool_call_results)) # Return the output messages to signal the handoff. yield Response( diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py index 25d9e732d335..7f1891076bce 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/messages.py @@ -7,7 +7,7 @@ class and includes specific fields relevant to the type of message being sent. from abc import ABC from typing import List, Literal -from autogen_core import FunctionCall, Image +from autogen_core import FunctionCalls, Image from autogen_core.memory import MemoryContent from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage from pydantic import BaseModel, ConfigDict, Field @@ -83,7 +83,7 @@ class HandoffMessage(BaseChatMessage): class ToolCallRequestEvent(BaseAgentEvent): """An event signaling a request to use tools.""" - content: List[FunctionCall] + content: FunctionCalls """The tool calls.""" type: Literal["ToolCallRequestEvent"] = "ToolCallRequestEvent" diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index 7b281e39f6f5..fade29b3ee99 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -2,6 +2,7 @@ import json import logging from typing import Any, AsyncGenerator, List +from unittest.mock import AsyncMock import pytest from autogen_agentchat import EVENT_LOGGER_NAME @@ -17,10 +18,10 @@ ToolCallRequestEvent, ToolCallSummaryMessage, ) -from autogen_core import FunctionCall, Image +from autogen_core import FunctionCall, FunctionCalls, Image from autogen_core.memory import ListMemory, Memory, MemoryContent, MemoryMimeType, MemoryQueryResult from autogen_core.model_context import BufferedChatCompletionContext -from autogen_core.models import FunctionExecutionResult, LLMMessage +from autogen_core.models import CreateResult, FunctionExecutionResult, LLMMessage, RequestUsage from autogen_core.models._model_client import ModelFamily from autogen_core.tools import FunctionTool from autogen_ext.models.openai import OpenAIChatCompletionClient @@ -176,6 +177,51 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None: assert state == state2 +@pytest.mark.asyncio +async def test_run_stream_with_thought() -> None: + mocked_model = AsyncMock() + mocked_model.create.side_effect = [ + CreateResult( + finish_reason="function_calls", + content=FunctionCalls( + function_calls=[FunctionCall(id="call_foo", name="echo", arguments=json.dumps({"input": "foo"}))], + thought="going to say foo!", + ), + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="stop", + content="ok!", + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="stop", + content="TERMINATE", + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + ] + + agent = AssistantAgent( + "thoughtfull_tool_use_agent", + model_client=mocked_model, + tools=[ + FunctionTool(_echo_function, description="Echo", name="echo"), + ], + ) + + streamed_messages = [item async for item in agent.run_stream(task="prompt")] + assert len(streamed_messages) == 5 + assert isinstance(streamed_messages[0], TextMessage) + assert isinstance(streamed_messages[1], ToolCallRequestEvent) + assert streamed_messages[1].content.thought == "going to say foo!" + assert isinstance(streamed_messages[2], ToolCallExecutionEvent) + assert isinstance(streamed_messages[3], ToolCallSummaryMessage) + assert isinstance(streamed_messages[4], TaskResult) + + @pytest.mark.asyncio async def test_run_with_tools_and_reflection(monkeypatch: pytest.MonkeyPatch) -> None: model = "gpt-4o-2024-05-13" @@ -374,7 +420,7 @@ async def test_run_with_parallel_tools(monkeypatch: pytest.MonkeyPatch) -> None: assert isinstance(result.messages[0], TextMessage) assert result.messages[0].models_usage is None assert isinstance(result.messages[1], ToolCallRequestEvent) - assert result.messages[1].content == [ + assert result.messages[1].content.function_calls == [ FunctionCall(id="1", arguments=r'{"input": "task1"}', name="_pass_function"), FunctionCall(id="2", arguments=r'{"input": "task2"}', name="_pass_function"), FunctionCall(id="3", arguments=r'{"input": "task3"}', name="_echo_function"), diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index c04d7344029f..b8d433f2f3b1 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -29,7 +29,7 @@ from autogen_agentchat.teams._group_chat._selector_group_chat import SelectorGroupChatManager from autogen_agentchat.teams._group_chat._swarm_group_chat import SwarmGroupChatManager from autogen_agentchat.ui import Console -from autogen_core import AgentId, CancellationToken, FunctionCall +from autogen_core import AgentId, CancellationToken, FunctionCall, FunctionCalls from autogen_core.models import ( AssistantMessage, FunctionExecutionResult, @@ -220,6 +220,7 @@ async def test_round_robin_group_chat(monkeypatch: pytest.MonkeyPatch) -> None: result_2 = await team.run( task=MultiModalMessage(content=["Write a program that prints 'Hello, world!'"], source="user") ) + assert isinstance(result_2.messages[0].content, list) assert result.messages[0].content == result_2.messages[0].content[0] assert result.messages[1:] == result_2.messages[1:] @@ -1065,10 +1066,12 @@ async def test_swarm_with_parallel_tool_calls(monkeypatch: pytest.MonkeyPatch) - expected_handoff_context: List[LLMMessage] = [ AssistantMessage( source="agent1", - content=[ - FunctionCall(id="1", name="tool1", arguments="{}"), - FunctionCall(id="2", name="tool2", arguments="{}"), - ], + content=FunctionCalls( + function_calls=[ + FunctionCall(id="1", name="tool1", arguments="{}"), + FunctionCall(id="2", name="tool2", arguments="{}"), + ] + ), ), FunctionExecutionResultMessage( content=[ diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md index a0beff3e2f8c..e796d19c313c 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/migration-guide.md @@ -627,7 +627,7 @@ from autogen_agentchat.messages import ( ToolCallRequestEvent, ToolCallSummaryMessage, ) -from autogen_core import FunctionCall, Image +from autogen_core import FunctionCall, FunctionCalls, Image from autogen_core.models import FunctionExecutionResult @@ -660,7 +660,7 @@ def convert_to_v02_message( raise ValueError(f"Invalid multimodal message content: {modal}") elif isinstance(message, ToolCallRequestEvent): v02_message = {"tool_calls": [], "role": "assistant", "content": None, "name": message.source} - for tool_call in message.content: + for tool_call in message.content.function_calls: v02_message["tool_calls"].append( { "id": tool_call.id, @@ -697,7 +697,7 @@ def convert_to_v04_message(message: Dict[str, Any]) -> AgentEvent | ChatMessage: arguments=tool_call["function"]["args"], ) ) - return ToolCallRequestEvent(source=message["name"], content=tool_calls) + return ToolCallRequestEvent(source=message["name"], content=FunctionCalls(function_calls=tool_calls)) elif "tool_responses" in message: tool_results: List[FunctionExecutionResult] = [] for tool_response in message["tool_responses"]: diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb index 31885a10e977..7e6c46f69297 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/handoffs.ipynb @@ -58,6 +58,7 @@ "\n", "from autogen_core import (\n", " FunctionCall,\n", + " FunctionCalls,\n", " MessageContext,\n", " RoutedAgent,\n", " SingleThreadedAgentRuntime,\n", @@ -173,11 +174,13 @@ " )\n", " print(f\"{'-'*80}\\n{self.id.type}:\\n{llm_result.content}\", flush=True)\n", " # Process the LLM result.\n", - " while isinstance(llm_result.content, list) and all(isinstance(m, FunctionCall) for m in llm_result.content):\n", + " while isinstance(llm_result.content, FunctionCalls) and all(\n", + " isinstance(m, FunctionCall) for m in llm_result.content.function_calls\n", + " ):\n", " tool_call_results: List[FunctionExecutionResult] = []\n", " delegate_targets: List[Tuple[str, UserTask]] = []\n", " # Process each function call.\n", - " for call in llm_result.content:\n", + " for call in llm_result.content.function_calls:\n", " arguments = json.loads(call.arguments)\n", " if call.name in self._tools:\n", " # Execute the tool directly.\n", @@ -190,7 +193,7 @@ " topic_type = self._delegate_tools[call.name].return_value_as_string(result)\n", " # Create the context for the delegate agent, including the function call and the result.\n", " delegate_messages = list(message.context) + [\n", - " AssistantMessage(content=[call], source=self.id.type),\n", + " AssistantMessage(content=FunctionCalls(function_calls=[call]), source=self.id.type),\n", " FunctionExecutionResultMessage(\n", " content=[\n", " FunctionExecutionResult(\n", diff --git a/python/packages/autogen-core/src/autogen_core/__init__.py b/python/packages/autogen-core/src/autogen_core/__init__.py index 0198544ca61e..1d3af45e19da 100644 --- a/python/packages/autogen-core/src/autogen_core/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/__init__.py @@ -62,7 +62,7 @@ from ._topic import TopicId from ._type_prefix_subscription import TypePrefixSubscription from ._type_subscription import TypeSubscription -from ._types import FunctionCall +from ._types import FunctionCall, FunctionCalls EVENT_LOGGER_NAME = EVENT_LOGGER_NAME_ALIAS """The name of the logger used for structured events.""" @@ -107,6 +107,7 @@ "event", "rpc", "FunctionCall", + "FunctionCalls", "TypeSubscription", "DefaultSubscription", "DefaultTopicId", diff --git a/python/packages/autogen-core/src/autogen_core/_types.py b/python/packages/autogen-core/src/autogen_core/_types.py index 5e3850ffae8b..ff54a76ce86f 100644 --- a/python/packages/autogen-core/src/autogen_core/_types.py +++ b/python/packages/autogen-core/src/autogen_core/_types.py @@ -1,6 +1,7 @@ from __future__ import annotations from dataclasses import dataclass +from typing import List, Optional @dataclass @@ -10,3 +11,9 @@ class FunctionCall: arguments: str # Function to call name: str + + +@dataclass +class FunctionCalls: + function_calls: List[FunctionCall] + thought: Optional[str] = None diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py index a37d5927b19f..982084a43370 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py @@ -4,7 +4,7 @@ from typing_extensions import Self from .._component_config import Component -from .._types import FunctionCall +from .._types import FunctionCall, FunctionCalls from ..models import AssistantMessage, FunctionExecutionResultMessage, LLMMessage, UserMessage from ._chat_completion_context import ChatCompletionContext @@ -45,8 +45,8 @@ async def get_messages(self) -> List[LLMMessage]: if ( head_messages and isinstance(head_messages[-1], AssistantMessage) - and isinstance(head_messages[-1].content, list) - and all(isinstance(item, FunctionCall) for item in head_messages[-1].content) + and isinstance(head_messages[-1].content, FunctionCalls) + and all(isinstance(item, FunctionCall) for item in head_messages[-1].content.function_calls) ): # Remove the last message from the head. head_messages = head_messages[:-1] diff --git a/python/packages/autogen-core/src/autogen_core/models/_types.py b/python/packages/autogen-core/src/autogen_core/models/_types.py index a3d6af1edde4..5944730645f0 100644 --- a/python/packages/autogen-core/src/autogen_core/models/_types.py +++ b/python/packages/autogen-core/src/autogen_core/models/_types.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, Field from typing_extensions import Annotated -from .. import FunctionCall, Image +from .. import FunctionCalls, Image class SystemMessage(BaseModel): @@ -22,7 +22,7 @@ class UserMessage(BaseModel): class AssistantMessage(BaseModel): - content: Union[str, List[FunctionCall]] + content: Union[str, FunctionCalls] # Name of the agent that sent this message source: str @@ -70,7 +70,7 @@ class ChatCompletionTokenLogprob(BaseModel): class CreateResult(BaseModel): finish_reason: FinishReasons - content: Union[str, List[FunctionCall]] + content: Union[str, FunctionCalls] usage: RequestUsage cached: bool logprobs: Optional[List[ChatCompletionTokenLogprob] | None] = None diff --git a/python/packages/autogen-core/src/autogen_core/tool_agent/_caller_loop.py b/python/packages/autogen-core/src/autogen_core/tool_agent/_caller_loop.py index 2353ca184f86..a12634907434 100644 --- a/python/packages/autogen-core/src/autogen_core/tool_agent/_caller_loop.py +++ b/python/packages/autogen-core/src/autogen_core/tool_agent/_caller_loop.py @@ -1,7 +1,7 @@ import asyncio from typing import List -from .. import AgentId, AgentRuntime, BaseAgent, CancellationToken, FunctionCall +from .. import AgentId, AgentRuntime, BaseAgent, CancellationToken, FunctionCall, FunctionCalls from ..models import ( AssistantMessage, ChatCompletionClient, @@ -43,7 +43,9 @@ async def tool_agent_caller_loop( generated_messages.append(AssistantMessage(content=response.content, source=caller_source)) # Keep iterating until the model stops generating tool calls. - while isinstance(response.content, list) and all(isinstance(item, FunctionCall) for item in response.content): + while isinstance(response.content, FunctionCalls) and all( + isinstance(item, FunctionCall) for item in response.content.function_calls + ): # Execute functions called by the model by sending messages to tool agent. results: List[FunctionExecutionResult | BaseException] = await asyncio.gather( *[ @@ -52,7 +54,7 @@ async def tool_agent_caller_loop( recipient=tool_agent_id, cancellation_token=cancellation_token, ) - for call in response.content + for call in response.content.function_calls ], return_exceptions=True, ) diff --git a/python/packages/autogen-core/tests/test_memory.py b/python/packages/autogen-core/tests/test_memory.py index 04054e1b2250..bab41fb2aed6 100644 --- a/python/packages/autogen-core/tests/test_memory.py +++ b/python/packages/autogen-core/tests/test_memory.py @@ -115,6 +115,7 @@ async def test_list_memory_update_context() -> None: context_messages = await context.get_messages() assert len(results.memories.results) == 2 assert len(context_messages) == 1 + assert isinstance(context_messages[0].content, str) assert "test1" in context_messages[0].content assert "test2" in context_messages[0].content diff --git a/python/packages/autogen-core/tests/test_tool_agent.py b/python/packages/autogen-core/tests/test_tool_agent.py index 85fcd3892c97..7023be6654a2 100644 --- a/python/packages/autogen-core/tests/test_tool_agent.py +++ b/python/packages/autogen-core/tests/test_tool_agent.py @@ -3,7 +3,7 @@ from typing import Any, AsyncGenerator, List, Mapping, Optional, Sequence, Union import pytest -from autogen_core import AgentId, CancellationToken, FunctionCall, SingleThreadedAgentRuntime +from autogen_core import AgentId, CancellationToken, FunctionCall, FunctionCalls, SingleThreadedAgentRuntime from autogen_core.models import ( AssistantMessage, ChatCompletionClient, @@ -101,7 +101,9 @@ async def create( ) -> CreateResult: if len(messages) == 1: return CreateResult( - content=[FunctionCall(id="1", name="pass", arguments=json.dumps({"input": "test"}))], + content=FunctionCalls( + function_calls=[FunctionCall(id="1", name="pass", arguments=json.dumps({"input": "test"}))] + ), finish_reason="stop", usage=RequestUsage(prompt_tokens=0, completion_tokens=0), cached=False, diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py index 0f389313057a..662766eddbb6 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/file_surfer/_file_surfer.py @@ -9,7 +9,7 @@ MultiModalMessage, TextMessage, ) -from autogen_core import CancellationToken, FunctionCall +from autogen_core import CancellationToken, FunctionCall, FunctionCalls from autogen_core.models import ( AssistantMessage, ChatCompletionClient, @@ -143,8 +143,10 @@ async def _generate_reply(self, cancellation_token: CancellationToken) -> Tuple[ # Answer directly. return False, response - elif isinstance(response, list) and all(isinstance(item, FunctionCall) for item in response): - function_calls = response + elif isinstance(response, FunctionCalls) and all( + isinstance(item, FunctionCall) for item in response.function_calls + ): + function_calls = response.function_calls for function_call in function_calls: tool_name = function_call.name diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py index d738a5fae894..30a04e7cbd07 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/openai/_openai_assistant_agent.py @@ -32,7 +32,7 @@ ToolCallExecutionEvent, ToolCallRequestEvent, ) -from autogen_core import CancellationToken, FunctionCall +from autogen_core import CancellationToken, FunctionCall, FunctionCalls from autogen_core.models._model_client import ChatCompletionClient from autogen_core.models._types import FunctionExecutionResult from autogen_core.tools import FunctionTool, Tool @@ -395,7 +395,7 @@ async def on_messages_stream( ) # Add tool call message to inner messages - tool_call_msg = ToolCallRequestEvent(source=self.name, content=tool_calls) + tool_call_msg = ToolCallRequestEvent(source=self.name, content=FunctionCalls(function_calls=tool_calls)) inner_messages.append(tool_call_msg) event_logger.debug(tool_call_msg) yield tool_call_msg diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index f90dc01cdda2..db4467fe3510 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -24,7 +24,7 @@ from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, TextMessage -from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel, FunctionCall +from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel, FunctionCall, FunctionCalls from autogen_core import Image as AGImage from autogen_core.models import ( AssistantMessage, @@ -541,9 +541,11 @@ async def _generate_reply(self, cancellation_token: CancellationToken) -> UserCo # Answer directly self.inner_messages.append(TextMessage(content=message, source=self.name)) return message - elif isinstance(message, list): + elif isinstance(message, FunctionCalls): # Take an action - return await self._execute_tool(message, rects, tool_names, cancellation_token=cancellation_token) + return await self._execute_tool( + message.function_calls, rects, tool_names, cancellation_token=cancellation_token + ) else: # Not sure what happened here raise AssertionError(f"Unknown response format '{message}'") diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_types.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_types.py index d626b086961d..29b77acc86e5 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_types.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_types.py @@ -1,10 +1,10 @@ from typing import Any, Dict, List, TypedDict, Union -from autogen_core import FunctionCall, Image +from autogen_core import FunctionCalls, Image from autogen_core.models import FunctionExecutionResult UserContent = Union[str, List[Union[str, Image]]] -AssistantContent = Union[str, List[FunctionCall]] +AssistantContent = Union[str, FunctionCalls] FunctionExecutionContent = List[FunctionExecutionResult] SystemContent = str diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 79c13442c7dc..09b3390c491e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -27,6 +27,7 @@ CancellationToken, Component, FunctionCall, + FunctionCalls, Image, MessageHandlerContext, ) @@ -196,9 +197,9 @@ def assistant_message_to_oai( message: AssistantMessage, ) -> ChatCompletionAssistantMessageParam: assert_valid_name(message.source) - if isinstance(message.content, list): + if isinstance(message.content, FunctionCalls): return ChatCompletionAssistantMessageParam( - tool_calls=[func_call_to_oai(x) for x in message.content], + tool_calls=[func_call_to_oai(x) for x in message.content.function_calls], role="assistant", name=message.source, ) @@ -549,7 +550,7 @@ async def create( # Detect whether it is a function call or not. # We don't rely on choice.finish_reason as it is not always accurate, depending on the API used. - content: Union[str, List[FunctionCall]] + content: Union[str, FunctionCalls] if choice.message.function_call is not None: raise ValueError("function_call is deprecated and is not supported by this model client.") elif choice.message.tool_calls is not None: @@ -567,14 +568,16 @@ async def create( stacklevel=2, ) # NOTE: If OAI response type changes, this will need to be updated - content = [ - FunctionCall( - id=x.id, - arguments=x.function.arguments, - name=normalize_name(x.function.name), - ) - for x in choice.message.tool_calls - ] + content = FunctionCalls( + function_calls=[ + FunctionCall( + id=x.id, + arguments=x.function.arguments, + name=normalize_name(x.function.name), + ) + for x in choice.message.tool_calls + ] + ) finish_reason = "tool_calls" else: finish_reason = choice.finish_reason @@ -783,7 +786,7 @@ async def create_stream( if stop_reason == "function_call": raise ValueError("Function calls are not supported in this context") - content: Union[str, List[FunctionCall]] + content: Union[str, FunctionCalls] if len(content_deltas) > 1: content = "".join(content_deltas) if chunk and chunk.usage: @@ -797,7 +800,7 @@ async def create_stream( # # value = json.dumps(tool_call) # # completion_tokens += count_token(value, model=model) # completion_tokens += 0 - content = list(full_tool_calls.values()) + content = FunctionCalls(function_calls=list(full_tool_calls.values())) usage = RequestUsage( prompt_tokens=prompt_tokens, diff --git a/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py b/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py index 9f501fbbffcf..e6b910fe425f 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/semantic_kernel/_sk_chat_completion_adapter.py @@ -1,7 +1,7 @@ import json from typing import Any, Literal, Mapping, Optional, Sequence -from autogen_core import FunctionCall +from autogen_core import FunctionCall, FunctionCalls from autogen_core._cancellation_token import CancellationToken from autogen_core.models import ( ChatCompletionClient, @@ -361,9 +361,9 @@ async def create( self._total_completion_tokens += completion_tokens # Process content based on whether there are tool calls - content: Union[str, list[FunctionCall]] + content: Union[str, FunctionCalls] if any(isinstance(item, FunctionCallContent) for item in result[0].items): - content = self._process_tool_calls(result[0]) + content = FunctionCalls(function_calls=self._process_tool_calls(result[0])) finish_reason: Literal["function_calls", "stop"] = "function_calls" else: content = result[0].content @@ -436,7 +436,7 @@ async def create_stream( if any(isinstance(item, FunctionCallContent) for item in msg.items): function_calls = self._process_tool_calls(msg) yield CreateResult( - content=function_calls, + content=FunctionCalls(function_calls=function_calls), finish_reason="function_calls", usage=RequestUsage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens), cached=False, diff --git a/python/packages/autogen-ext/tests/models/test_openai_model_client.py b/python/packages/autogen-ext/tests/models/test_openai_model_client.py index d629cdc428a5..7957c7f2bfde 100644 --- a/python/packages/autogen-ext/tests/models/test_openai_model_client.py +++ b/python/packages/autogen-ext/tests/models/test_openai_model_client.py @@ -5,7 +5,7 @@ from unittest.mock import MagicMock import pytest -from autogen_core import CancellationToken, FunctionCall, Image +from autogen_core import CancellationToken, FunctionCall, FunctionCalls, Image from autogen_core.models import ( AssistantMessage, CreateResult, @@ -610,7 +610,10 @@ async def test_tool_calling(monkeypatch: pytest.MonkeyPatch) -> None: # Single tool call create_result = await model_client.create(messages=[UserMessage(content="Hello", source="user")], tools=[pass_tool]) - assert create_result.content == [FunctionCall(id="1", arguments=r'{"input": "task"}', name="_pass_function")] + assert isinstance(create_result.content, FunctionCalls) + assert create_result.content.function_calls == [ + FunctionCall(id="1", arguments=r'{"input": "task"}', name="_pass_function") + ] # Verify that the tool schema was passed to the model client. kwargs = mock.calls[0] assert kwargs["tools"] == [{"function": pass_tool.schema, "type": "function"}] @@ -621,7 +624,8 @@ async def test_tool_calling(monkeypatch: pytest.MonkeyPatch) -> None: create_result = await model_client.create( messages=[UserMessage(content="Hello", source="user")], tools=[pass_tool, fail_tool, echo_tool] ) - assert create_result.content == [ + assert isinstance(create_result.content, FunctionCalls) + assert create_result.content.function_calls == [ FunctionCall(id="1", arguments=r'{"input": "task"}', name="_pass_function"), FunctionCall(id="2", arguments=r'{"input": "task"}', name="_fail_function"), FunctionCall(id="3", arguments=r'{"input": "task"}', name="_echo_function"), @@ -641,7 +645,10 @@ async def test_tool_calling(monkeypatch: pytest.MonkeyPatch) -> None: create_result = await model_client.create( messages=[UserMessage(content="Hello", source="user")], tools=[pass_tool] ) - assert create_result.content == [FunctionCall(id="1", arguments=r'{"input": "task"}', name="_pass_function")] + assert isinstance(create_result.content, FunctionCalls) + assert create_result.content.function_calls == [ + FunctionCall(id="1", arguments=r'{"input": "task"}', name="_pass_function") + ] assert create_result.finish_reason == "function_calls" # Warning completion when content is not None. @@ -649,7 +656,10 @@ async def test_tool_calling(monkeypatch: pytest.MonkeyPatch) -> None: create_result = await model_client.create( messages=[UserMessage(content="Hello", source="user")], tools=[pass_tool] ) - assert create_result.content == [FunctionCall(id="1", arguments=r'{"input": "task"}', name="_pass_function")] + assert isinstance(create_result.content, FunctionCalls) + assert create_result.content.function_calls == [ + FunctionCall(id="1", arguments=r'{"input": "task"}', name="_pass_function") + ] assert create_result.finish_reason == "function_calls" @@ -671,9 +681,10 @@ async def _test_model_client(model_client: OpenAIChatCompletionClient) -> None: create_result = await model_client.create(messages=messages, tools=[pass_tool, fail_tool]) assert isinstance(create_result.content, list) assert len(create_result.content) == 1 - assert isinstance(create_result.content[0], FunctionCall) - assert create_result.content[0].name == "pass_tool" - assert json.loads(create_result.content[0].arguments) == {"input": "task"} + assert isinstance(create_result.content, FunctionCalls) + assert isinstance(create_result.content.function_calls[0], FunctionCall) + assert create_result.content.function_calls[0].name == "pass_tool" + assert json.loads(create_result.content.function_calls[0].arguments) == {"input": "task"} assert create_result.finish_reason == "function_calls" assert create_result.usage is not None @@ -681,7 +692,7 @@ async def _test_model_client(model_client: OpenAIChatCompletionClient) -> None: messages.append(AssistantMessage(content=create_result.content, source="assistant")) messages.append( FunctionExecutionResultMessage( - content=[FunctionExecutionResult(content="passed", call_id=create_result.content[0].id)] + content=[FunctionExecutionResult(content="passed", call_id=create_result.content.function_calls[0].id)] ) ) create_result = await model_client.create(messages=messages) @@ -697,12 +708,13 @@ async def _test_model_client(model_client: OpenAIChatCompletionClient) -> None: create_result = await model_client.create(messages=messages, tools=[pass_tool, fail_tool]) assert isinstance(create_result.content, list) assert len(create_result.content) == 2 - assert isinstance(create_result.content[0], FunctionCall) - assert create_result.content[0].name == "pass_tool" - assert json.loads(create_result.content[0].arguments) == {"input": "task"} + assert isinstance(create_result.content, FunctionCalls) + assert isinstance(create_result.content.function_calls[0], FunctionCall) + assert create_result.content.function_calls[0].name == "pass_tool" + assert json.loads(create_result.content.function_calls[0].arguments) == {"input": "task"} assert isinstance(create_result.content[1], FunctionCall) - assert create_result.content[1].name == "fail_tool" - assert json.loads(create_result.content[1].arguments) == {"input": "task"} + assert create_result.content.function_calls[1].name == "fail_tool" + assert json.loads(create_result.content.function_calls[1].arguments) == {"input": "task"} assert create_result.finish_reason == "function_calls" assert create_result.usage is not None @@ -711,8 +723,8 @@ async def _test_model_client(model_client: OpenAIChatCompletionClient) -> None: messages.append( FunctionExecutionResultMessage( content=[ - FunctionExecutionResult(content="passed", call_id=create_result.content[0].id), - FunctionExecutionResult(content="failed", call_id=create_result.content[1].id), + FunctionExecutionResult(content="passed", call_id=create_result.content.function_calls[0].id), + FunctionExecutionResult(content="failed", call_id=create_result.content.function_calls[1].id), ] ) ) diff --git a/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py b/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py index 69be349c38c9..9edb25e185ec 100644 --- a/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py +++ b/python/packages/autogen-ext/tests/models/test_sk_chat_completion_adapter.py @@ -3,7 +3,7 @@ from unittest.mock import AsyncMock import pytest -from autogen_core import CancellationToken +from autogen_core import CancellationToken, FunctionCalls from autogen_core.models import CreateResult, LLMMessage, ModelFamily, ModelInfo, SystemMessage, UserMessage from autogen_core.tools import BaseTool from autogen_ext.models.semantic_kernel import SKChatCompletionAdapter @@ -252,7 +252,7 @@ async def test_sk_chat_completion_with_tools(sk_client: AzureChatCompletion) -> result = await adapter.create(messages=messages, tools=[tool], extra_create_args={"kernel": kernel}) # Verify response - assert isinstance(result.content, list) + assert isinstance(result.content, FunctionCalls) assert result.finish_reason == "function_calls" assert result.usage.prompt_tokens >= 0 assert result.usage.completion_tokens >= 0 @@ -306,7 +306,7 @@ async def test_sk_chat_completion_stream_with_tools(sk_client: AzureChatCompleti assert len(response_chunks) > 0 final_chunk = response_chunks[-1] assert isinstance(final_chunk, CreateResult) - assert isinstance(final_chunk.content, list) # Function calls + assert isinstance(final_chunk.content, FunctionCalls) # Function calls assert final_chunk.finish_reason == "function_calls" assert final_chunk.usage.prompt_tokens >= 0 assert final_chunk.usage.completion_tokens >= 0 diff --git a/python/packages/autogen-ext/tests/test_filesurfer_agent.py b/python/packages/autogen-ext/tests/test_filesurfer_agent.py index 513a1fe8444c..a48e03c5a594 100644 --- a/python/packages/autogen-ext/tests/test_filesurfer_agent.py +++ b/python/packages/autogen-ext/tests/test_filesurfer_agent.py @@ -140,8 +140,10 @@ async def test_run_filesurfer(monkeypatch: pytest.MonkeyPatch) -> None: # Get the FileSurfer to read the file, and the directory assert agent._name == "FileSurfer" # pyright: ignore[reportPrivateUsage] result = await agent.run(task="Please read the test file") + assert isinstance(result.messages[1].content, str) assert "# FileSurfer test H1" in result.messages[1].content result = await agent.run(task="Please read the test directory") + assert isinstance(result.messages[1].content, str) assert "# Index of " in result.messages[1].content assert "test_filesurfer_agent.html" in result.messages[1].content diff --git a/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/file_surfer/file_surfer.py b/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/file_surfer/file_surfer.py index 7e17acdeb0f7..cc3dc6110d49 100644 --- a/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/file_surfer/file_surfer.py +++ b/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/file_surfer/file_surfer.py @@ -2,7 +2,7 @@ import time from typing import List, Optional, Tuple -from autogen_core import CancellationToken, FunctionCall, default_subscription +from autogen_core import CancellationToken, FunctionCall, FunctionCalls, default_subscription from autogen_core.models import ( ChatCompletionClient, SystemMessage, @@ -99,8 +99,10 @@ async def _generate_reply(self, cancellation_token: CancellationToken) -> Tuple[ if isinstance(response, str): return False, response - elif isinstance(response, list) and all(isinstance(item, FunctionCall) for item in response): - function_calls = response + elif isinstance(response, FunctionCalls) and all( + isinstance(item, FunctionCall) for item in response.function_calls + ): + function_calls = response.function_calls for function_call in function_calls: tool_name = function_call.name diff --git a/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/orchestrator.py b/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/orchestrator.py index 2bcd3020fb64..0a89b6c8a2a2 100644 --- a/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/orchestrator.py +++ b/python/packages/autogen-magentic-one/src/autogen_magentic_one/agents/orchestrator.py @@ -1,7 +1,7 @@ import json from typing import Any, Dict, List, Optional -from autogen_core import AgentProxy, CancellationToken, MessageContext, TopicId, default_subscription +from autogen_core import AgentProxy, CancellationToken, FunctionCalls, MessageContext, TopicId, default_subscription from autogen_core.models import ( AssistantMessage, ChatCompletionClient, @@ -130,9 +130,12 @@ def _get_message_str(self, message: LLMMessage) -> str: return message.content else: result = "" - for content in message.content: - if isinstance(content, str): - result += content + "\n" + if isinstance(message.content, FunctionCalls) and message.content.thought: + return message.content.thought + elif isinstance(message.content, list): + for content in message.content: + if isinstance(content, str): + result += content + "\n" assert len(result) > 0 return result diff --git a/python/packages/autogen-magentic-one/src/autogen_magentic_one/messages.py b/python/packages/autogen-magentic-one/src/autogen_magentic_one/messages.py index b4ffdd66b1ad..44d4ebb70dc4 100644 --- a/python/packages/autogen-magentic-one/src/autogen_magentic_one/messages.py +++ b/python/packages/autogen-magentic-one/src/autogen_magentic_one/messages.py @@ -1,13 +1,13 @@ from dataclasses import dataclass from typing import Any, Dict, List, Union -from autogen_core import FunctionCall, Image +from autogen_core import FunctionCalls, Image from autogen_core.models import FunctionExecutionResult, LLMMessage from pydantic import BaseModel # Convenience type UserContent = Union[str, List[Union[str, Image]]] -AssistantContent = Union[str, List[FunctionCall]] +AssistantContent = Union[str, FunctionCalls] FunctionExecutionContent = List[FunctionExecutionResult] SystemContent = str diff --git a/python/samples/core_async_human_in_the_loop/main.py b/python/samples/core_async_human_in_the_loop/main.py index ccae231b2118..ce483f3d0cbf 100644 --- a/python/samples/core_async_human_in_the_loop/main.py +++ b/python/samples/core_async_human_in_the_loop/main.py @@ -35,6 +35,7 @@ DefaultInterventionHandler, DefaultTopicId, FunctionCall, + FunctionCalls, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, @@ -182,8 +183,8 @@ async def handle_message(self, message: UserTextMessage, ctx: MessageContext) -> self._system_messages + (await self._model_context.get_messages()), tools=tools ) - if isinstance(response.content, list) and all(isinstance(item, FunctionCall) for item in response.content): - for call in response.content: + if isinstance(response.content, FunctionCalls) and all(isinstance(item, FunctionCall) for item in response.content.function_calls): + for call in response.content.function_calls: tool = next((tool for tool in tools if tool.name == call.name), None) if tool is None: raise ValueError(f"Tool not found: {call.name}") diff --git a/python/uv.lock b/python/uv.lock index 21adda9f946b..1803fd701f01 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -89,7 +89,6 @@ wheels = [ [[package]] name = "agbench" -version = "0.0.1a1" source = { editable = "packages/agbench" } dependencies = [ { name = "azure-identity" }, @@ -796,7 +795,6 @@ requires-dist = [ [[package]] name = "autogenstudio" -version = "0.4.0" source = { editable = "packages/autogen-studio" } dependencies = [ { name = "aiofiles" }, @@ -4217,7 +4215,6 @@ name = "nvidia-cublas-cu12" version = "12.4.5.8" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/7f/7fbae15a3982dc9595e49ce0f19332423b260045d0a6afe93cdbe2f1f624/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", size = 363333771 }, { url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805 }, ] @@ -4226,7 +4223,6 @@ name = "nvidia-cuda-cupti-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/93/b5/9fb3d00386d3361b03874246190dfec7b206fd74e6e287b26a8fcb359d95/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", size = 12354556 }, { url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957 }, ] @@ -4235,7 +4231,6 @@ name = "nvidia-cuda-nvrtc-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/aa/083b01c427e963ad0b314040565ea396f914349914c298556484f799e61b/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", size = 24133372 }, { url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306 }, ] @@ -4244,7 +4239,6 @@ name = "nvidia-cuda-runtime-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/aa/b656d755f474e2084971e9a297def515938d56b466ab39624012070cb773/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3", size = 894177 }, { url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737 }, ] @@ -4267,7 +4261,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 }, { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 }, ] @@ -4276,7 +4269,6 @@ name = "nvidia-curand-cu12" version = "10.3.5.147" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/80/9c/a79180e4d70995fdf030c6946991d0171555c6edf95c265c6b2bf7011112/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", size = 56314811 }, { url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206 }, ] @@ -4290,7 +4282,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 }, { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 }, ] @@ -4302,7 +4293,6 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 }, { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 }, ] @@ -4319,7 +4309,6 @@ name = "nvidia-nvjitlink-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/45/239d52c05074898a80a900f49b1615d81c07fceadd5ad6c4f86a987c0bc4/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", size = 20552510 }, { url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810 }, ] @@ -4328,7 +4317,6 @@ name = "nvidia-nvtx-cu12" version = "12.4.127" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/06/39/471f581edbb7804b39e8063d92fc8305bdc7a80ae5c07dbe6ea5c50d14a5/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3", size = 100417 }, { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 }, ]