Skip to content

Commit

Permalink
Replace create_completion_client_from_env with component config (#4928)
Browse files Browse the repository at this point in the history
* Replace create_completion_client_from_env with component config

* json load
  • Loading branch information
jackgerrits authored Jan 8, 2025
1 parent b850dcd commit 538f394
Show file tree
Hide file tree
Showing 13 changed files with 99 additions and 127 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import asyncio
import json
import logging
import os
import re
Expand All @@ -10,7 +11,7 @@

from autogen_core import AgentId, AgentProxy, TopicId
from autogen_core import SingleThreadedAgentRuntime
from autogen_core.logging import EVENT_LOGGER_NAME
from autogen_core import EVENT_LOGGER_NAME
from autogen_core.models import (
ChatCompletionClient,
UserMessage,
Expand All @@ -26,7 +27,7 @@
from autogen_magentic_one.messages import BroadcastMessage
from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer
from autogen_magentic_one.agents.file_surfer import FileSurfer
from autogen_magentic_one.utils import LogHandler, message_content_to_str, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler, message_content_to_str

encoding = None
def count_token(value: str) -> int:
Expand Down Expand Up @@ -123,10 +124,8 @@ async def main() -> None:
runtime = SingleThreadedAgentRuntime()

# Create the AzureOpenAI client from the environment file
client = create_completion_client_from_env()


mlm_client = create_completion_client_from_env()
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))
mlm_client = ChatCompletionClient.load_component(json.loads(os.environ["MLM_CHAT_COMPLETION_CLIENT_CONFIG"]))


# Register agents.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import asyncio
import json
import logging
import os
import re
Expand Down Expand Up @@ -27,7 +28,7 @@
from autogen_magentic_one.messages import BroadcastMessage
from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer
from autogen_magentic_one.agents.file_surfer import FileSurfer
from autogen_magentic_one.utils import LogHandler, message_content_to_str, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler, message_content_to_str

encoding = None
def count_token(value: str) -> int:
Expand Down Expand Up @@ -124,11 +125,10 @@ async def main() -> None:
runtime = SingleThreadedAgentRuntime()

# Create the AzureOpenAI client, with AAD auth, from environment
client = create_completion_client_from_env()
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))
mlm_client = ChatCompletionClient.load_component(json.loads(os.environ["MLM_CHAT_COMPLETION_CLIENT_CONFIG"]))


mlm_client = create_completion_client_from_env()

# Register agents.
await runtime.register(
"Assistant",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@
from autogen_magentic_one.agents.coder import Coder, Executor
from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator
from autogen_magentic_one.messages import BroadcastMessage, OrchestrationEvent
from autogen_magentic_one.utils import create_completion_client_from_env


async def main() -> None:
# Create the runtime.
runtime = SingleThreadedAgentRuntime()

# Create the AzureOpenAI client
client = create_completion_client_from_env()
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))


# Register agents.
await runtime.register(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from autogen_magentic_one.messages import BroadcastMessage, OrchestrationEvent, RequestReplyMessage, ResetMessage, DeactivateMessage
from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer
from autogen_magentic_one.agents.file_surfer import FileSurfer
from autogen_magentic_one.utils import LogHandler, message_content_to_str, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler, message_content_to_str


import evaluation_harness
Expand Down Expand Up @@ -120,7 +120,8 @@ async def main() -> None:
runtime = SingleThreadedAgentRuntime()

# Create the AzureOpenAI client, with AAD auth
client = create_completion_client_from_env()
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))

# Login assistant
await runtime.register(
"LoginAssistant",
Expand Down
66 changes: 46 additions & 20 deletions python/packages/autogen-magentic-one/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,28 +119,39 @@ playwright install --with-deps chromium

## Environment Configuration for Chat Completion Client

This guide outlines how to configure your environment to use the `create_completion_client_from_env` function, which reads environment variables to return an appropriate `ChatCompletionClient`.
This guide outlines how to structure the config to load a ChatCompletionClient for Magentic-One.

```python
from autogen_core.models import ChatCompletionClient
config = {}
client = ChatCompletionClient.load_component(config)
```

Currently, Magentic-One only supports OpenAI's GPT-4o as the underlying LLM.
### Azure OpenAI service
To configure for Azure OpenAI service, set the following environment variables:
- `CHAT_COMPLETION_PROVIDER='azure'`
- `CHAT_COMPLETION_KWARGS_JSON` with the following JSON structure:
To configure for Azure OpenAI service, use the following config:
```json
{
"api_version": "2024-02-15-preview",
"azure_endpoint": "REPLACE_WITH_YOUR_ENDPOINT",
"model_capabilities": {
"function_calling": true,
"json_output": true,
"vision": true
},
"azure_ad_token_provider": "DEFAULT",
"model": "gpt-4o-2024-05-13"
"provider": "AzureOpenAIChatCompletionClient",
"config": {
"model": "gpt-4o-2024-05-13",
"azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
"azure_deployment": "{your-azure-deployment}",
"api_version": "2024-06-01",
"azure_ad_token_provider": {
"provider": "autogen_ext.models.openai.AzureTokenProvider",
"config": {
"provider_kind": "DefaultAzureCredential",
"scopes": [
"https://cognitiveservices.azure.com/.default"
]
}
}
}
}
```
Expand All @@ -150,19 +161,34 @@ Log in to Azure using `az login`, and then run the examples. The account used mu
Note that even if you are the owner of the subscription, you still need to grant the necessary Azure Cognitive Services OpenAI permissions to call the API.
### With OpenAI
Or, to use an API key:
```json
{
"provider": "AzureOpenAIChatCompletionClient",
"config": {
"model": "gpt-4o-2024-05-13",
"azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
"azure_deployment": "{your-azure-deployment}",
"api_version": "2024-06-01",
"api_key": "REPLACE_WITH_YOUR_API_KEY"
}
}
```
To configure for OpenAI, set the following environment variables:
### With OpenAI
- `CHAT_COMPLETION_PROVIDER='openai'`
- `CHAT_COMPLETION_KWARGS_JSON` with the following JSON structure:
To configure for OpenAI, use the following config:
```json
{
"api_key": "REPLACE_WITH_YOUR_API",
"model": "gpt-4o-2024-05-13"
"provider": "OpenAIChatCompletionClient",
"config": {
"model": "gpt-4o-2024-05-13",
"api_key": "REPLACE_WITH_YOUR_API_KEY"
}
}
```
Feel free to replace the model with newer versions of gpt-4o if needed.
### Other Keys (Optional)
Expand Down
7 changes: 5 additions & 2 deletions python/packages/autogen-magentic-one/examples/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,21 @@

import argparse
import asyncio
import json
import logging
import os

from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime
from autogen_core.code_executor import CodeBlock
from autogen_core.models._model_client import ChatCompletionClient
from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor
from autogen_magentic_one.agents.coder import Coder, Executor
from autogen_magentic_one.agents.file_surfer import FileSurfer
from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer
from autogen_magentic_one.agents.orchestrator import LedgerOrchestrator
from autogen_magentic_one.agents.user_proxy import UserProxy
from autogen_magentic_one.messages import RequestReplyMessage
from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler

# NOTE: Don't forget to 'playwright install --with-deps chromium'

Expand All @@ -32,7 +34,8 @@ async def main(logs_dir: str, hil_mode: bool, save_screenshots: bool) -> None:
runtime = SingleThreadedAgentRuntime()

# Create an appropriate client
client = create_completion_client_from_env(model="gpt-4o")
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))
assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model"

async with DockerCommandLineCodeExecutor(work_dir=logs_dir) as code_executor:
# Register agents.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,19 @@
"""

import asyncio
import json
import logging
import os

from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime
from autogen_core.code_executor import CodeBlock
from autogen_core.models._model_client import ChatCompletionClient
from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor
from autogen_magentic_one.agents.coder import Coder, Executor
from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator
from autogen_magentic_one.agents.user_proxy import UserProxy
from autogen_magentic_one.messages import RequestReplyMessage
from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler


async def confirm_code(code: CodeBlock) -> bool:
Expand All @@ -29,9 +32,10 @@ async def main() -> None:
# Create the runtime.
runtime = SingleThreadedAgentRuntime()

model_client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))
async with DockerCommandLineCodeExecutor() as code_executor:
# Register agents.
await Coder.register(runtime, "Coder", lambda: Coder(model_client=create_completion_client_from_env()))
await Coder.register(runtime, "Coder", lambda: Coder(model_client=model_client))
coder = AgentProxy(AgentId("Coder", "default"), runtime)

await Executor.register(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,25 @@
to write input or perform actions, orchestrated by an round-robin orchestrator agent."""

import asyncio
import json
import logging
import os

from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime
from autogen_core.models._model_client import ChatCompletionClient
from autogen_magentic_one.agents.file_surfer import FileSurfer
from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator
from autogen_magentic_one.agents.user_proxy import UserProxy
from autogen_magentic_one.messages import RequestReplyMessage
from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler


async def main() -> None:
# Create the runtime.
runtime = SingleThreadedAgentRuntime()

# Get an appropriate client
client = create_completion_client_from_env()
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))

# Register agents.
await FileSurfer.register(runtime, "file_surfer", lambda: FileSurfer(model_client=client))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,27 @@
The code snippets are not executed in this example."""

import asyncio
import json
import logging
import os

from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime

# from typing import Any, Dict, List, Tuple, Union
from autogen_core.models._model_client import ChatCompletionClient
from autogen_magentic_one.agents.coder import Coder
from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator
from autogen_magentic_one.agents.user_proxy import UserProxy
from autogen_magentic_one.messages import RequestReplyMessage
from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler


async def main() -> None:
# Create the runtime.
runtime = SingleThreadedAgentRuntime()

# Get an appropriate client
client = create_completion_client_from_env()
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))

# Register agents.
await Coder.register(runtime, "Coder", lambda: Coder(model_client=client))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,17 @@
orchestrated by an round-robin orchestrator agent."""

import asyncio
import json
import logging
import os

from autogen_core import EVENT_LOGGER_NAME, AgentId, AgentProxy, SingleThreadedAgentRuntime
from autogen_core.models import ChatCompletionClient
from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer
from autogen_magentic_one.agents.orchestrator import RoundRobinOrchestrator
from autogen_magentic_one.agents.user_proxy import UserProxy
from autogen_magentic_one.messages import RequestReplyMessage
from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler

# NOTE: Don't forget to 'playwright install --with-deps chromium'

Expand All @@ -22,7 +24,8 @@ async def main() -> None:
runtime = SingleThreadedAgentRuntime()

# Create an appropriate client
client = create_completion_client_from_env(model="gpt-4o")
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))
assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model"

# Register agents.
await MultimodalWebSurfer.register(runtime, "WebSurfer", MultimodalWebSurfer)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from autogen_core import EVENT_LOGGER_NAME
from autogen_core import AgentId, AgentProxy
from autogen_core import DefaultTopicId
from autogen_core.models._model_client import ChatCompletionClient
from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
from autogen_ext.code_executors.docker import DockerCommandLineCodeExecutor
from autogen_core.code_executor import CodeBlock
Expand All @@ -19,7 +20,7 @@
from autogen_magentic_one.agents.orchestrator import LedgerOrchestrator
from autogen_magentic_one.agents.user_proxy import UserProxy
from autogen_magentic_one.messages import BroadcastMessage
from autogen_magentic_one.utils import LogHandler, create_completion_client_from_env
from autogen_magentic_one.utils import LogHandler
from autogen_core.models import UserMessage
from threading import Lock

Expand Down Expand Up @@ -60,7 +61,9 @@ async def initialize(self) -> None:
logger.handlers = [self.log_handler]

# Create client
client = create_completion_client_from_env(model="gpt-4o")
client = ChatCompletionClient.load_component(json.loads(os.environ["CHAT_COMPLETION_CLIENT_CONFIG"]))
assert client.model_info["family"] == "gpt-4o", "This example requires the gpt-4o model"


# Set up code executor
self.code_executor = DockerCommandLineCodeExecutor(work_dir=self.logs_dir)
Expand Down
Loading

0 comments on commit 538f394

Please sign in to comment.