Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 4964493

Browse files
committedOct 9, 2024·
Use script-friendly example in README and quickstart
1 parent f674f2c commit 4964493

File tree

5 files changed

+69
-27
lines changed

5 files changed

+69
-27
lines changed
 

‎README.md

+21-10
Original file line numberDiff line numberDiff line change
@@ -106,21 +106,32 @@ The following code uses code execution, you need to have [Docker installed](http
106106
and running on your machine.
107107

108108
```python
109+
import asyncio
110+
import logging
111+
from autogen_agentchat import EVENT_LOGGER_NAME
109112
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
113+
from autogen_agentchat.logging import ConsoleLogHandler
110114
from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination
111115
from autogen_core.components.code_executor import DockerCommandLineCodeExecutor
112116
from autogen_core.components.models import OpenAIChatCompletionClient
113117

114-
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
115-
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
116-
coding_assistant_agent = CodingAssistantAgent(
117-
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o")
118-
)
119-
group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent])
120-
result = await group_chat.run(
121-
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
122-
termination_condition=StopMessageTermination(),
123-
)
118+
logger = logging.getLogger(EVENT_LOGGER_NAME)
119+
logger.addHandler(ConsoleLogHandler())
120+
logger.setLevel(logging.INFO)
121+
122+
async def main() -> None:
123+
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
124+
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
125+
coding_assistant_agent = CodingAssistantAgent(
126+
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o")
127+
)
128+
group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent])
129+
result = await group_chat.run(
130+
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
131+
termination_condition=StopMessageTermination(),
132+
)
133+
134+
asyncio.run(main())
124135
```
125136

126137
### C#

‎python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def __init__(
3434
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
3535
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
3636
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
37-
Reply "TERMINATE" in the end when everything is done.""",
37+
Reply "TERMINATE" in the end when code has been executed and task is complete.""",
3838
):
3939
super().__init__(name=name, description=description)
4040
self._model_client = model_client

‎python/packages/autogen-agentchat/src/autogen_agentchat/logging/_console_log_handler.py

-6
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import sys
44
from datetime import datetime
55

6-
from .. import EVENT_LOGGER_NAME
76
from ..agents import ChatMessage, StopMessage, TextMessage
87
from ..teams._events import (
98
ContentPublishEvent,
@@ -68,8 +67,3 @@ def emit(self, record: logging.LogRecord) -> None:
6867
sys.stdout.flush()
6968
else:
7069
raise ValueError(f"Unexpected log record: {record.msg}")
71-
72-
73-
logger = logging.getLogger(EVENT_LOGGER_NAME)
74-
logger.setLevel(logging.INFO)
75-
logger.addHandler(ConsoleLogHandler())
+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import asyncio
2+
import logging
3+
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
4+
from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination
5+
from autogen_agentchat import EVENT_LOGGER_NAME
6+
from autogen_agentchat.logging import ConsoleLogHandler
7+
from autogen_core.components.code_executor import DockerCommandLineCodeExecutor
8+
from autogen_core.components.models import OpenAIChatCompletionClient
9+
10+
logger = logging.getLogger(EVENT_LOGGER_NAME)
11+
logger.addHandler(ConsoleLogHandler())
12+
logger.setLevel(logging.INFO)
13+
14+
async def main() -> None:
15+
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
16+
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
17+
coding_assistant_agent = CodingAssistantAgent(
18+
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o")
19+
)
20+
group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent])
21+
result = await group_chat.run(
22+
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
23+
termination_condition=StopMessageTermination(),
24+
)
25+
26+
asyncio.run(main())

‎python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/stocksnippet.md

+21-10
Original file line numberDiff line numberDiff line change
@@ -2,21 +2,32 @@
22
33
`````{tab-item} AgentChat (v0.4x)
44
```python
5+
import asyncio
6+
import logging
7+
from autogen_agentchat import EVENT_LOGGER_NAME
58
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
9+
from autogen_agentchat.logging import ConsoleLogHandler
610
from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination
711
from autogen_core.components.code_executor import DockerCommandLineCodeExecutor
812
from autogen_core.components.models import OpenAIChatCompletionClient
913
10-
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
11-
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
12-
coding_assistant_agent = CodingAssistantAgent(
13-
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4")
14-
)
15-
group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent])
16-
result = await group_chat.run(
17-
task="Create a plot of NVIDIA and TESLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
18-
termination_condition=StopMessageTermination(),
19-
)
14+
logger = logging.getLogger(EVENT_LOGGER_NAME)
15+
logger.addHandler(ConsoleLogHandler())
16+
logger.setLevel(logging.INFO)
17+
18+
async def main() -> None:
19+
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
20+
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
21+
coding_assistant_agent = CodingAssistantAgent(
22+
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o")
23+
)
24+
group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent])
25+
result = await group_chat.run(
26+
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
27+
termination_condition=StopMessageTermination(),
28+
)
29+
30+
asyncio.run(main())
2031
```
2132
`````
2233

0 commit comments

Comments
 (0)