Skip to content

Commit 6e7f1ec

Browse files
authored
Merge branch 'main' into rysweet-subscription-type-apis-4154
2 parents 08f760f + 1bf5fbb commit 6e7f1ec

File tree

7 files changed

+126
-351
lines changed

7 files changed

+126
-351
lines changed

python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py

+110
Original file line numberDiff line numberDiff line change
@@ -851,6 +851,65 @@ def capabilities(self) -> ModelCapabilities:
851851

852852

853853
class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient):
854+
"""Chat completion client for OpenAI hosted models.
855+
856+
You can also use this client for OpenAI-compatible ChatCompletion endpoints.
857+
**Using this client for non-OpenAI models is not tested or guaranteed.**
858+
859+
For non-OpenAI models, please first take a look at our `community extensions <https://microsoft.github.io/autogen/dev/user-guide/extensions-user-guide/index.html>`_
860+
for additional model clients.
861+
862+
Args:
863+
model (str): The model to use. **Required.**
864+
api_key (str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.**
865+
timeout (optional, int): The timeout for the request in seconds.
866+
max_retries (optional, int): The maximum number of retries to attempt.
867+
organization_id (optional, str): The organization ID to use.
868+
base_url (optional, str): The base URL to use. **Required if the model is not hosted on OpenAI.**
869+
model_capabilities (optional, ModelCapabilities): The capabilities of the model. **Required if the model name is not a valid OpenAI model.**
870+
871+
To use this client, you must install the `openai` extension:
872+
873+
.. code-block:: bash
874+
875+
pip install 'autogen-ext[openai]==0.4.0.dev6'
876+
877+
The following code snippet shows how to use the client with an OpenAI model:
878+
879+
.. code-block:: python
880+
881+
from autogen_ext.models import OpenAIChatCompletionClient
882+
from autogen_core.components.models import UserMessage
883+
884+
opneai_model_client = OpenAIChatCompletionClient(
885+
model="gpt-4o-2024-08-06",
886+
# api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set.
887+
)
888+
889+
result = await opneai_model_client.create([UserMessage(content="What is the capital of France?", source="user")])
890+
print(result)
891+
892+
893+
To use the client with a non-OpenAI model, you need to provide the base URL of the model and the model capabilities:
894+
895+
.. code-block:: python
896+
897+
from autogen_ext.models import OpenAIChatCompletionClient
898+
from autogen_core.components.models import UserMessage
899+
900+
custom_model_client = OpenAIChatCompletionClient(
901+
model="custom-model-name",
902+
base_url="https://custom-model.com/reset/of/the/path",
903+
api_key="placeholder",
904+
model_capabilities={
905+
"vision": True,
906+
"function_calling": True,
907+
"json_output": True,
908+
},
909+
)
910+
911+
"""
912+
854913
def __init__(self, **kwargs: Unpack[OpenAIClientConfiguration]):
855914
if "model" not in kwargs:
856915
raise ValueError("model is required for OpenAIChatCompletionClient")
@@ -877,6 +936,57 @@ def __setstate__(self, state: Dict[str, Any]) -> None:
877936

878937

879938
class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient):
939+
"""Chat completion client for Azure OpenAI hosted models.
940+
941+
Args:
942+
azure_endpoint (str): The endpoint for the Azure model. **Required for Azure models.**
943+
model (str): The deployment ID for the Azure model. **Required for Azure models.**
944+
api_version (str): The API version to use. **Required for Azure models.**
945+
azure_ad_token (str): The Azure AD token to use. Provide this or `azure_ad_token_provider` for token-based authentication.
946+
azure_ad_token_provider (Callable[[], Awaitable[str]]): The Azure AD token provider to use. Provide this or `azure_ad_token` for token-based authentication.
947+
model_capabilities (ModelCapabilities): The capabilities of the model. **Required for Azure models.**
948+
api_key (optional, str): The API key to use, use this if you are using key based authentication. It is optional if you are using Azure AD token based authentication or `AZURE_OPENAI_API_KEY` environment variable.
949+
timeout (optional, int): The timeout for the request in seconds.
950+
max_retries (optional, int): The maximum number of retries to attempt.
951+
952+
To use this client, you must install the `azure` and `openai` extensions:
953+
954+
.. code-block:: bash
955+
956+
pip install 'autogen-ext[openai,azure]==0.4.0.dev6'
957+
958+
To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint,
959+
api version, and model capabilities.
960+
For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.
961+
962+
The following code snippet shows how to use AAD authentication.
963+
The identity used must be assigned the `Cognitive Services OpenAI User <https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user>`_ role.
964+
965+
.. code-block:: python
966+
967+
from autogen_ext.models import AzureOpenAIChatCompletionClient
968+
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
969+
970+
# Create the token provider
971+
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
972+
973+
az_model_client = AzureOpenAIChatCompletionClient(
974+
model="{your-azure-deployment}",
975+
api_version="2024-06-01",
976+
azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/",
977+
azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.
978+
# api_key="sk-...", # For key-based authentication. `AZURE_OPENAI_API_KEY` environment variable can also be used instead.
979+
model_capabilities={
980+
"vision": True,
981+
"function_calling": True,
982+
"json_output": True,
983+
},
984+
)
985+
986+
See `here <https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions>`_ for how to use the Azure client directly or for more info.
987+
988+
"""
989+
880990
def __init__(self, **kwargs: Unpack[AzureOpenAIClientConfiguration]):
881991
if "model" not in kwargs:
882992
raise ValueError("model is required for OpenAIChatCompletionClient")

python/packages/autogen-studio/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ AutoGen Studio also takes several parameters to customize the application:
8585
- `--port <port>` argument to specify the port number. By default, it is set to `8080`.
8686
- `--reload` argument to enable auto-reloading of the server when changes are made to the code. By default, it is set to `False`.
8787
- `--database-uri` argument to specify the database URI. Example values include `sqlite:///database.sqlite` for SQLite and `postgresql+psycopg://user:password@localhost/dbname` for PostgreSQL. If this is not specified, the database URIL defaults to a `database.sqlite` file in the `--appdir` directory.
88+
- `--upgrade-database` argument to upgrade the database schema to the latest version. By default, it is set to `False`.
8889

8990
Now that you have AutoGen Studio installed and running, you are ready to explore its capabilities, including defining and modifying agent workflows, interacting with agents and sessions, and expanding agent skills.
9091

@@ -115,12 +116,11 @@ npm run start
115116
We welcome contributions to AutoGen Studio. We recommend the following general steps to contribute to the project:
116117

117118
- Review the overall AutoGen project [contribution guide](https://github.com/microsoft/autogen?tab=readme-ov-file#contributing)
118-
- Please review the AutoGen Studio [roadmap](https://github.com/microsoft/autogen/issues/737) to get a sense of the current priorities for the project. Help is appreciated especially with Studio issues tagged with `help-wanted`
119+
- Please review the AutoGen Studio [roadmap](https://github.com/microsoft/autogen/issues/4006) to get a sense of the current priorities for the project. Help is appreciated especially with Studio issues tagged with `help-wanted`
119120
- Please initiate a discussion on the roadmap issue or a new issue to discuss your proposed contribution.
120-
- Please review the autogenstudio dev branch here [dev branch](https://github.com/microsoft/autogen/tree/autogenstudio) and use as a base for your contribution. This way, your contribution will be aligned with the latest changes in the AutoGen Studio project.
121121
- Submit a pull request with your contribution!
122122
- If you are modifying AutoGen Studio, it has its own devcontainer. See instructions in `.devcontainer/README.md` to use it
123-
- Please use the tag `studio` for any issues, questions, and PRs related to Studio
123+
- Please use the tag `proj-studio` for any issues, questions, and PRs related to Studio
124124

125125
## FAQ
126126

python/packages/autogen-studio/autogenstudio/teammanager.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import time
33
from .database import ComponentFactory, Component
44
from .datamodel import TeamResult, TaskResult, ComponentConfigInput
5-
from autogen_agentchat.messages import InnerMessage, ChatMessage
5+
from autogen_agentchat.messages import ChatMessage, AgentMessage
66
from autogen_core.base import CancellationToken
77

88

@@ -35,7 +35,7 @@ async def run_stream(
3535
team_config: ComponentConfigInput,
3636
input_func: Optional[Callable] = None,
3737
cancellation_token: Optional[CancellationToken] = None
38-
) -> AsyncGenerator[Union[InnerMessage, ChatMessage, TaskResult], None]:
38+
) -> AsyncGenerator[Union[AgentMessage, ChatMessage, TaskResult], None]:
3939
"""Stream the team's execution results"""
4040
start_time = time.time()
4141

Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
VERSION = "0.4.0.dev35"
1+
VERSION = "0.4.0.dev37"
22
__version__ = VERSION
33
APP_NAME = "autogenstudio"

python/packages/autogen-studio/autogenstudio/web/managers/connection.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from ...datamodel import Run, RunStatus, TeamResult
1010
from ...database import DatabaseManager
1111
from ...teammanager import TeamManager
12-
from autogen_agentchat.messages import InnerMessage, ChatMessage, TextMessage
12+
from autogen_agentchat.messages import AgentMessage, ChatMessage, TextMessage
1313
from autogen_core.base import CancellationToken
1414

1515
logger = logging.getLogger(__name__)
@@ -235,7 +235,7 @@ def _format_message(self, message: Any) -> Optional[dict]:
235235
Optional[dict]: Formatted message or None if formatting fails
236236
"""
237237
try:
238-
if isinstance(message, (InnerMessage, ChatMessage)):
238+
if isinstance(message, (AgentMessage, ChatMessage)):
239239
return {
240240
"type": "message",
241241
"data": message.model_dump()

python/packages/autogen-studio/frontend/src/components/views/playground/chat/agentflow/edge.tsx

+1
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,7 @@ export const CustomEdge: React.FC<CustomEdgeProps> = ({
126126
style={{
127127
...style,
128128
strokeWidth: finalStrokeWidth,
129+
stroke: data.routingType === "secondary" ? "#0891b2" : style.stroke,
129130
}}
130131
markerEnd={markerEnd}
131132
/>

0 commit comments

Comments
 (0)