diff --git a/python/samples/agentchat_chess_game/.gitignore b/python/samples/agentchat_chess_game/.gitignore
new file mode 100644
index 000000000000..189b1a838595
--- /dev/null
+++ b/python/samples/agentchat_chess_game/.gitignore
@@ -0,0 +1 @@
+model_config.yml
diff --git a/python/samples/agentchat_chess_game/README.md b/python/samples/agentchat_chess_game/README.md
new file mode 100644
index 000000000000..00251bcfd7c1
--- /dev/null
+++ b/python/samples/agentchat_chess_game/README.md
@@ -0,0 +1,65 @@
+# AgentChat Chess Game
+
+This is a simple chess game that you can play with an AI agent.
+
+## Setup
+
+Install the `chess` package with the following command:
+
+```bash
+pip install "chess"
+```
+
+To use OpenAI models or models hosted on OpenAI-compatible API endpoints,
+you need to install the `autogen-ext[openai]` package. You can install it with the following command:
+
+```bash
+pip install "autogen-ext[openai]"
+# pip install "autogen-ext[openai,azure]" for Azure OpenAI models
+```
+
+Create a new file named `model_config.yaml` in the the same directory as the script
+to configure the model you want to use.
+
+For example, to use `gpt-4o` model from OpenAI, you can use the following configuration:
+
+```yaml
+provider: autogen_ext.models.openai.OpenAIChatCompletionClient
+config:
+ model: gpt-4o
+ api_key: REPLACE_WITH_YOUR_API_KEY
+```
+
+To use a locally hosted DeepSeek-R1:8b model using Ollama throught its compatibility endpoint,
+you can use the following configuration:
+
+```yaml
+provider: autogen_ext.models.openai.OpenAIChatCompletionClient
+config:
+ model: deepseek-r1:8b
+ base_url: http://localhost:11434/v1
+ api_key: ollama
+ model_info:
+ function_calling: false
+ json_output: false
+ vision: false
+ family: r1
+```
+
+For more information on how to configure the model and use other providers,
+please refer to the [Models documentation](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/models.html).
+
+## Run
+
+Run the following command to start the game:
+
+```bash
+python chess_game.py
+```
+
+By default, the game will use a random agent to play against the AI agent.
+You can enable human vs AI mode by setting the `--human` flag:
+
+```bash
+python chess_game.py --human
+```
diff --git a/python/samples/agentchat_chess_game/main.py b/python/samples/agentchat_chess_game/main.py
new file mode 100644
index 000000000000..51870a9135cb
--- /dev/null
+++ b/python/samples/agentchat_chess_game/main.py
@@ -0,0 +1,137 @@
+import argparse
+import asyncio
+import yaml
+import random
+
+import chess
+from autogen_agentchat.agents import AssistantAgent
+from autogen_agentchat.ui import Console
+from autogen_core.model_context import BufferedChatCompletionContext
+from autogen_core.models import ChatCompletionClient
+
+
+def create_ai_player() -> AssistantAgent:
+ # Load the model client from config.
+ with open("model_config.yml", "r") as f:
+ model_config = yaml.safe_load(f)
+ model_client = ChatCompletionClient.load_component(model_config)
+ # Create an agent that can use the model client.
+ player = AssistantAgent(
+ name="ai_player",
+ model_client=model_client,
+ system_message=None,
+ model_client_stream=True, # Enable streaming for the model client.
+ model_context=BufferedChatCompletionContext(buffer_size=10), # Model context limited to the last 10 messages.
+ )
+ return player
+
+
+def get_random_move(board: chess.Board) -> str:
+ legal_moves = list(board.legal_moves)
+ move = random.choice(legal_moves)
+ return move.uci()
+
+
+def get_ai_prompt(board: chess.Board) -> str:
+ try:
+ last_move = board.peek().uci()
+ except IndexError:
+ last_move = None
+ # Current player color.
+ player_color = "white" if board.turn == chess.WHITE else "black"
+ user_color = "black" if player_color == "white" else "white"
+ legal_moves = ", ".join([move.uci() for move in board.legal_moves])
+ if last_move is None:
+ prompt = f"New Game!\nBoard: {board.fen()}\nYou play {player_color}\nYour legal moves: {legal_moves}\n"
+ else:
+ prompt = f"Board: {board.fen()}\nYou play {player_color}\nUser ({user_color})'s last move: {last_move}\nYour legal moves: {legal_moves}\n"
+ example_move = get_random_move(board)
+ return (
+ prompt
+ + "Respond with this format: {your move in UCI format}. "
+ + f"For example, {example_move}."
+ )
+
+
+def get_user_prompt(board: chess.Board) -> str:
+ try:
+ last_move = board.peek().uci()
+ except IndexError:
+ last_move = None
+ # Current player color.
+ player_color = "white" if board.turn == chess.WHITE else "black"
+ legal_moves = ", ".join([move.uci() for move in board.legal_moves])
+ board_display = board.unicode(borders=True)
+ if last_move is None:
+ prompt = f"New Game!\nBoard:\n{board_display}\nYou play {player_color}\nYour legal moves: {legal_moves}\n"
+ prompt = f"Board:\n{board_display}\nYou play {player_color}\nAI's last move: {last_move}\nYour legal moves: {legal_moves}\n"
+ return prompt + "Enter your move in UCI format: "
+
+
+def extract_move(response: str) -> str:
+ start = response.find("") + len("")
+ end = response.find("")
+ if start == -1 or end == -1:
+ raise ValueError("Invalid response format.")
+ return response[start:end]
+
+
+async def get_ai_move(board: chess.Board, player: AssistantAgent, max_tries: int) -> str:
+ task = get_ai_prompt(board)
+ count = 0
+ while count < max_tries:
+ result = await Console(player.run_stream(task=task))
+ count += 1
+ response = result.messages[-1].content
+ assert isinstance(response, str)
+ # Check if the response is a valid UC move.
+ try:
+ move = chess.Move.from_uci(extract_move(response))
+ except (ValueError, IndexError):
+ task = "Invalid format. Please read instruction.\n" + get_ai_prompt(board)
+ continue
+ # Check if the move is legal.
+ if move not in board.legal_moves:
+ task = "Invalid move. Please enter a move from the list of legal moves.\n" + get_ai_prompt(board)
+ continue
+ return move.uci()
+ # If the player does not provide a valid move, return a random move.
+ return get_random_move(board)
+
+
+async def main(human_player: bool, max_tries: int) -> None:
+ board = chess.Board()
+ player = create_ai_player()
+ while not board.is_game_over():
+ # Get the AI's move.
+ ai_move = await get_ai_move(board, player, max_tries)
+ # Make the AI's move.
+ board.push(chess.Move.from_uci(ai_move))
+ # Check if the game is over.
+ if board.is_game_over():
+ break
+ # Get the user's move.
+ if human_player:
+ user_move = input(get_user_prompt(board))
+ else:
+ user_move = get_random_move(board)
+ # Make the user's move.
+ board.push(chess.Move.from_uci(user_move))
+ print("--------- User --------")
+ print(user_move)
+ print("-------- Board --------")
+ print(board.unicode(borders=True))
+
+ result = "AI wins!" if board.result() == "1-0" else "User wins!" if board.result() == "0-1" else "Draw!"
+ print("----------------")
+ print(f"Game over! Result: {result}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--human", action="store_true", help="Enable human vs. AI mode.")
+ parser.add_argument(
+ "--max-tries", type=int, default=10, help="Maximum number of tries for AI input before a random move take over."
+ )
+ args = parser.parse_args()
+ asyncio.run(main(args.human, args.max_tries))
diff --git a/python/samples/agentchat_graphrag/.gitignore b/python/samples/agentchat_graphrag/.gitignore
index 7eb281128444..f425f8f478f1 100644
--- a/python/samples/agentchat_graphrag/.gitignore
+++ b/python/samples/agentchat_graphrag/.gitignore
@@ -1,3 +1,3 @@
-model_config.json
+model_config.yaml
data
cache
\ No newline at end of file
diff --git a/python/samples/agentchat_graphrag/README.md b/python/samples/agentchat_graphrag/README.md
index 7c0f85b038ca..6a606192ece2 100644
--- a/python/samples/agentchat_graphrag/README.md
+++ b/python/samples/agentchat_graphrag/README.md
@@ -36,7 +36,7 @@ pip install -r requirements.txt
3. Adjust the `settings.yaml` file with your LLM and embedding configuration. Ensure that the API keys and other necessary details are correctly set.
-4. Create a `model_config.json` file with the Assistant model configuration. Use the `model_config_template.json` file as a reference. Make sure to remove the comments in the template file.
+4. Create a `model_config.yaml` file with the Assistant model configuration. Use the `model_config_template.yaml` file as a reference. Make sure to remove the comments in the template file.
5. Run the `graphrag prompt-tune` command to tune the prompts. This step adjusts the prompts to better fit the context of the downloaded text.
diff --git a/python/samples/agentchat_graphrag/app.py b/python/samples/agentchat_graphrag/app.py
index fd3b2aedf884..6966b2f20637 100644
--- a/python/samples/agentchat_graphrag/app.py
+++ b/python/samples/agentchat_graphrag/app.py
@@ -1,15 +1,16 @@
import argparse
import asyncio
-import json
import logging
from typing import Any, Dict
+
+import yaml
+from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
+from autogen_core.models import ChatCompletionClient
from autogen_ext.tools.graphrag import (
GlobalSearchTool,
LocalSearchTool,
)
-from autogen_agentchat.agents import AssistantAgent
-from autogen_core.models import ChatCompletionClient
async def main(model_config: Dict[str, Any]) -> None:
@@ -17,13 +18,9 @@ async def main(model_config: Dict[str, Any]) -> None:
model_client = ChatCompletionClient.load_component(model_config)
# Set up global search tool
- global_tool = GlobalSearchTool.from_settings(
- settings_path="./settings.yaml"
- )
+ global_tool = GlobalSearchTool.from_settings(settings_path="./settings.yaml")
- local_tool = LocalSearchTool.from_settings(
- settings_path="./settings.yaml"
- )
+ local_tool = LocalSearchTool.from_settings(settings_path="./settings.yaml")
# Create assistant agent with both search tools
assistant_agent = AssistantAgent(
@@ -36,23 +33,21 @@ async def main(model_config: Dict[str, Any]) -> None:
"For specific, detailed information about particular entities or relationships, call the 'local_search' function. "
"For broader, abstract questions requiring a comprehensive understanding of the dataset, call the 'global_search' function. "
"Do not attempt to answer the query directly; focus solely on selecting and calling the correct function."
- )
+ ),
)
-
# Run a sample query
query = "What does the station-master says about Dr. Becher?"
print(f"\nQuery: {query}")
-
- await Console(assistant_agent.run_stream(task=query))
+ await Console(assistant_agent.run_stream(task=query))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a GraphRAG search with an agent.")
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.")
parser.add_argument(
- "--model-config", type=str, help="Path to the model configuration file.", default="model_config.json"
+ "--model-config", type=str, help="Path to the model configuration file.", default="model_config.yaml"
)
args = parser.parse_args()
if args.verbose:
@@ -62,5 +57,5 @@ async def main(model_config: Dict[str, Any]) -> None:
logging.getLogger("autogen_core").addHandler(handler)
with open(args.model_config, "r") as f:
- model_config = json.load(f)
+ model_config = yaml.safe_load(f)
asyncio.run(main(model_config))
diff --git a/python/samples/agentchat_graphrag/model_config_template.json b/python/samples/agentchat_graphrag/model_config_template.json
deleted file mode 100644
index a66457f3b8e3..000000000000
--- a/python/samples/agentchat_graphrag/model_config_template.json
+++ /dev/null
@@ -1,38 +0,0 @@
-// Use Azure OpenAI with AD token provider.
-// {
-// "provider": "AzureOpenAIChatCompletionClient",
-// "config": {
-// "model": "gpt-4o-2024-05-13",
-// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
-// "azure_deployment": "{your-azure-deployment}",
-// "api_version": "2024-06-01",
-// "azure_ad_token_provider": {
-// "provider": "autogen_ext.auth.azure.AzureTokenProvider",
-// "config": {
-// "provider_kind": "DefaultAzureCredential",
-// "scopes": [
-// "https://cognitiveservices.azure.com/.default"
-// ]
-// }
-// }
-// }
-// }
-// Use Azure Open AI with key
-// {
-// "provider": "AzureOpenAIChatCompletionClient",
-// "config": {
-// "model": "gpt-4o-2024-05-13",
-// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
-// "azure_deployment": "{your-azure-deployment}",
-// "api_version": "2024-06-01",
-// "api_key": "REPLACE_WITH_YOUR_API_KEY"
-// }
-// }
-// Use Open AI with key
-{
- "provider": "OpenAIChatCompletionClient",
- "config": {
- "model": "gpt-4o-2024-05-13",
- "api_key": "REPLACE_WITH_YOUR_API_KEY"
- }
-}
\ No newline at end of file
diff --git a/python/samples/agentchat_graphrag/model_config_template.yaml b/python/samples/agentchat_graphrag/model_config_template.yaml
new file mode 100644
index 000000000000..9768f5df0fe1
--- /dev/null
+++ b/python/samples/agentchat_graphrag/model_config_template.yaml
@@ -0,0 +1,26 @@
+# Use Open AI with key
+provider: autogen_ext.models.openai.OpenAIChatCompletionClient
+config:
+ model: gpt-4o
+ api_key: REPLACE_WITH_YOUR_API_KEY
+# Use Azure Open AI with key
+# provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient
+# config:
+# model: gpt-4o
+# azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/
+# azure_deployment: {your-azure-deployment}
+# api_version: {your-api-version}
+# api_key: REPLACE_WITH_YOUR_API_KEY
+# Use Azure OpenAI with AD token provider.
+# provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient
+# config:
+# model: gpt-4o
+# azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/
+# azure_deployment: {your-azure-deployment}
+# api_version: {your-api-version}
+# azure_ad_token_provider:
+# provider: autogen_ext.auth.azure.AzureTokenProvider
+# config:
+# provider_kind: DefaultAzureCredential
+# scopes:
+# - https://cognitiveservices.azure.com/.default
diff --git a/python/samples/core_async_human_in_the_loop/.gitignore b/python/samples/core_async_human_in_the_loop/.gitignore
index f228262f0a35..189b1a838595 100644
--- a/python/samples/core_async_human_in_the_loop/.gitignore
+++ b/python/samples/core_async_human_in_the_loop/.gitignore
@@ -1 +1 @@
-model_config.json
\ No newline at end of file
+model_config.yml
diff --git a/python/samples/core_async_human_in_the_loop/README.md b/python/samples/core_async_human_in_the_loop/README.md
index 7ed54cc92416..f97942a74409 100644
--- a/python/samples/core_async_human_in_the_loop/README.md
+++ b/python/samples/core_async_human_in_the_loop/README.md
@@ -2,23 +2,21 @@
An example showing human-in-the-loop which waits for human input before making the tool call.
-## Running the examples
-
-### Prerequisites
+## Prerequisites
First, you need a shell with AutoGen core and required dependencies installed.
```bash
-pip install "autogen-core" "autogen-ext[openai,azure]"
+pip install "autogen-ext[openai,azure]"
```
-### Model Configuration
+## Model Configuration
-The model configuration should defined in a `model_config.json` file.
-Use `model_config_template.json` as a template.
+The model configuration should defined in a `model_config.yml` file.
+Use `model_config_template.yml` as a template.
-### Running the example
+## Running the example
```bash
python main.py
-```
\ No newline at end of file
+```
diff --git a/python/samples/core_async_human_in_the_loop/main.py b/python/samples/core_async_human_in_the_loop/main.py
index ccae231b2118..2b19179de6a0 100644
--- a/python/samples/core_async_human_in_the_loop/main.py
+++ b/python/samples/core_async_human_in_the_loop/main.py
@@ -50,6 +50,7 @@
)
from autogen_core.tools import BaseTool
from pydantic import BaseModel, Field
+import yaml
@dataclass
@@ -332,8 +333,8 @@ async def ainput(prompt: str = "") -> str:
# if os.path.exists("state.json"):
# os.remove("state.json")
- with open("model_config.json") as f:
- model_config = json.load(f)
+ with open("model_config.yml") as f:
+ model_config = yaml.safe_load(f)
def get_user_input(question_for_user: str):
print("--------------------------QUESTION_FOR_USER--------------------------")
diff --git a/python/samples/core_async_human_in_the_loop/model_config_template.json b/python/samples/core_async_human_in_the_loop/model_config_template.json
deleted file mode 100644
index a66457f3b8e3..000000000000
--- a/python/samples/core_async_human_in_the_loop/model_config_template.json
+++ /dev/null
@@ -1,38 +0,0 @@
-// Use Azure OpenAI with AD token provider.
-// {
-// "provider": "AzureOpenAIChatCompletionClient",
-// "config": {
-// "model": "gpt-4o-2024-05-13",
-// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
-// "azure_deployment": "{your-azure-deployment}",
-// "api_version": "2024-06-01",
-// "azure_ad_token_provider": {
-// "provider": "autogen_ext.auth.azure.AzureTokenProvider",
-// "config": {
-// "provider_kind": "DefaultAzureCredential",
-// "scopes": [
-// "https://cognitiveservices.azure.com/.default"
-// ]
-// }
-// }
-// }
-// }
-// Use Azure Open AI with key
-// {
-// "provider": "AzureOpenAIChatCompletionClient",
-// "config": {
-// "model": "gpt-4o-2024-05-13",
-// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
-// "azure_deployment": "{your-azure-deployment}",
-// "api_version": "2024-06-01",
-// "api_key": "REPLACE_WITH_YOUR_API_KEY"
-// }
-// }
-// Use Open AI with key
-{
- "provider": "OpenAIChatCompletionClient",
- "config": {
- "model": "gpt-4o-2024-05-13",
- "api_key": "REPLACE_WITH_YOUR_API_KEY"
- }
-}
\ No newline at end of file
diff --git a/python/samples/core_async_human_in_the_loop/model_config_template.yml b/python/samples/core_async_human_in_the_loop/model_config_template.yml
new file mode 100644
index 000000000000..9768f5df0fe1
--- /dev/null
+++ b/python/samples/core_async_human_in_the_loop/model_config_template.yml
@@ -0,0 +1,26 @@
+# Use Open AI with key
+provider: autogen_ext.models.openai.OpenAIChatCompletionClient
+config:
+ model: gpt-4o
+ api_key: REPLACE_WITH_YOUR_API_KEY
+# Use Azure Open AI with key
+# provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient
+# config:
+# model: gpt-4o
+# azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/
+# azure_deployment: {your-azure-deployment}
+# api_version: {your-api-version}
+# api_key: REPLACE_WITH_YOUR_API_KEY
+# Use Azure OpenAI with AD token provider.
+# provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient
+# config:
+# model: gpt-4o
+# azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/
+# azure_deployment: {your-azure-deployment}
+# api_version: {your-api-version}
+# azure_ad_token_provider:
+# provider: autogen_ext.auth.azure.AzureTokenProvider
+# config:
+# provider_kind: DefaultAzureCredential
+# scopes:
+# - https://cognitiveservices.azure.com/.default
diff --git a/python/samples/core_async_human_in_the_loop/utils.py b/python/samples/core_async_human_in_the_loop/utils.py
deleted file mode 100644
index ee412c5eefe8..000000000000
--- a/python/samples/core_async_human_in_the_loop/utils.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-from typing import Any
-
-from autogen_core.models import (
- ChatCompletionClient,
-)
-from autogen_ext.models.openai import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient
-from azure.identity import DefaultAzureCredential, get_bearer_token_provider
-
-
-def get_chat_completion_client_from_envs(**kwargs: Any) -> ChatCompletionClient:
- # Check API type.
- api_type = os.getenv("OPENAI_API_TYPE", "openai")
- if api_type == "openai":
- # Check API key.
- api_key = os.getenv("OPENAI_API_KEY")
- if api_key is None:
- raise ValueError("OPENAI_API_KEY is not set")
- kwargs["api_key"] = api_key
- return OpenAIChatCompletionClient(**kwargs)
- elif api_type == "azure":
- # Check Azure API key.
- azure_api_key = os.getenv("AZURE_OPENAI_API_KEY")
- if azure_api_key is not None:
- kwargs["api_key"] = azure_api_key
- else:
- # Try to use token from Azure CLI.
- token_provider = get_bearer_token_provider(
- DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
- )
- kwargs["azure_ad_token_provider"] = token_provider
- # Check Azure API endpoint.
- azure_api_endpoint = os.getenv("AZURE_OPENAI_API_ENDPOINT")
- if azure_api_endpoint is None:
- raise ValueError("AZURE_OPENAI_API_ENDPOINT is not set")
- kwargs["azure_endpoint"] = azure_api_endpoint
- # Get Azure API version.
- kwargs["api_version"] = os.getenv("AZURE_OPENAI_API_VERSION", "2024-06-01")
- # Set model capabilities.
- if "model_capabilities" not in kwargs or kwargs["model_capabilities"] is None:
- kwargs["model_capabilities"] = {
- "vision": True,
- "function_calling": True,
- "json_output": True,
- }
- return AzureOpenAIChatCompletionClient(**kwargs) # type: ignore
- raise ValueError(f"Unknown API type: {api_type}")
diff --git a/python/samples/core_chess_game/.gitignore b/python/samples/core_chess_game/.gitignore
index f228262f0a35..189b1a838595 100644
--- a/python/samples/core_chess_game/.gitignore
+++ b/python/samples/core_chess_game/.gitignore
@@ -1 +1 @@
-model_config.json
\ No newline at end of file
+model_config.yml
diff --git a/python/samples/core_chess_game/README.md b/python/samples/core_chess_game/README.md
index a27c5cb99025..487c205a4e5d 100644
--- a/python/samples/core_chess_game/README.md
+++ b/python/samples/core_chess_game/README.md
@@ -2,22 +2,21 @@
An example with two chess player agents that executes its own tools to demonstrate tool use and reflection on tool use.
-## Running the example
-
-### Prerequisites
+## Prerequisites
First, you need a shell with AutoGen core and required dependencies installed.
```bash
-pip install "autogen-core" "autogen-ext[openai,azure]" "chess"
+pip install "autogen-ext[openai,azure]" "chess"
```
-### Model Configuration
-The model configuration should defined in a `model_config.json` file.
-Use `model_config_template.json` as a template.
+## Model Configuration
-### Running the example
+The model configuration should defined in a `model_config.yml` file.
+Use `model_config_template.yml` as a template.
+
+## Running the example
```bash
python main.py
-```
\ No newline at end of file
+```
diff --git a/python/samples/core_chess_game/main.py b/python/samples/core_chess_game/main.py
index ccc77deba0f2..7a97eaa00b18 100644
--- a/python/samples/core_chess_game/main.py
+++ b/python/samples/core_chess_game/main.py
@@ -5,8 +5,8 @@
import argparse
import asyncio
-import json
import logging
+import yaml
from typing import Annotated, Any, Dict, List, Literal
from autogen_core import (
@@ -266,7 +266,7 @@ async def main(model_config: Dict[str, Any]) -> None:
parser = argparse.ArgumentParser(description="Run a chess game between two agents.")
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.")
parser.add_argument(
- "--model-config", type=str, help="Path to the model configuration file.", default="model_config.json"
+ "--model-config", type=str, help="Path to the model configuration file.", default="model_config.yml"
)
args = parser.parse_args()
if args.verbose:
@@ -276,5 +276,5 @@ async def main(model_config: Dict[str, Any]) -> None:
logging.getLogger("autogen_core").addHandler(handler)
with open(args.model_config, "r") as f:
- model_config = json.load(f)
+ model_config = yaml.safe_load(f)
asyncio.run(main(model_config))
diff --git a/python/samples/core_chess_game/model_config_template.json b/python/samples/core_chess_game/model_config_template.json
deleted file mode 100644
index a66457f3b8e3..000000000000
--- a/python/samples/core_chess_game/model_config_template.json
+++ /dev/null
@@ -1,38 +0,0 @@
-// Use Azure OpenAI with AD token provider.
-// {
-// "provider": "AzureOpenAIChatCompletionClient",
-// "config": {
-// "model": "gpt-4o-2024-05-13",
-// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
-// "azure_deployment": "{your-azure-deployment}",
-// "api_version": "2024-06-01",
-// "azure_ad_token_provider": {
-// "provider": "autogen_ext.auth.azure.AzureTokenProvider",
-// "config": {
-// "provider_kind": "DefaultAzureCredential",
-// "scopes": [
-// "https://cognitiveservices.azure.com/.default"
-// ]
-// }
-// }
-// }
-// }
-// Use Azure Open AI with key
-// {
-// "provider": "AzureOpenAIChatCompletionClient",
-// "config": {
-// "model": "gpt-4o-2024-05-13",
-// "azure_endpoint": "https://{your-custom-endpoint}.openai.azure.com/",
-// "azure_deployment": "{your-azure-deployment}",
-// "api_version": "2024-06-01",
-// "api_key": "REPLACE_WITH_YOUR_API_KEY"
-// }
-// }
-// Use Open AI with key
-{
- "provider": "OpenAIChatCompletionClient",
- "config": {
- "model": "gpt-4o-2024-05-13",
- "api_key": "REPLACE_WITH_YOUR_API_KEY"
- }
-}
\ No newline at end of file
diff --git a/python/samples/core_chess_game/model_config_template.yml b/python/samples/core_chess_game/model_config_template.yml
new file mode 100644
index 000000000000..9768f5df0fe1
--- /dev/null
+++ b/python/samples/core_chess_game/model_config_template.yml
@@ -0,0 +1,26 @@
+# Use Open AI with key
+provider: autogen_ext.models.openai.OpenAIChatCompletionClient
+config:
+ model: gpt-4o
+ api_key: REPLACE_WITH_YOUR_API_KEY
+# Use Azure Open AI with key
+# provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient
+# config:
+# model: gpt-4o
+# azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/
+# azure_deployment: {your-azure-deployment}
+# api_version: {your-api-version}
+# api_key: REPLACE_WITH_YOUR_API_KEY
+# Use Azure OpenAI with AD token provider.
+# provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient
+# config:
+# model: gpt-4o
+# azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/
+# azure_deployment: {your-azure-deployment}
+# api_version: {your-api-version}
+# azure_ad_token_provider:
+# provider: autogen_ext.auth.azure.AzureTokenProvider
+# config:
+# provider_kind: DefaultAzureCredential
+# scopes:
+# - https://cognitiveservices.azure.com/.default