|
29 | 29 | "source": [
|
30 | 30 | "## Requirements\n",
|
31 | 31 | "\n",
|
32 |
| - "AutoGen requires `Python>=3.8`. To run this notebook example, please install pyautogen:\n", |
| 32 | + "AutoGen requires `Python>=3.8`. To run this notebook example, please install autogen-agentchat:\n", |
33 | 33 | "\n",
|
34 | 34 | "````{=mdx}\n",
|
35 | 35 | ":::info Requirements\n",
|
36 | 36 | "Install `pyautogen`:\n",
|
37 | 37 | "```bash\n",
|
38 |
| - "pip install pyautogen\n", |
| 38 | + "pip install autogen-agentchat~=0.2\n", |
39 | 39 | "```\n",
|
40 | 40 | "\n",
|
41 | 41 | "For more information, please refer to the [installation guide](/docs/installation/).\n",
|
|
49 | 49 | "metadata": {},
|
50 | 50 | "outputs": [],
|
51 | 51 | "source": [
|
52 |
| - "from typing import Dict, Union, Optional\n", |
53 |
| - "import os\n", |
54 | 52 | "import copy\n",
|
| 53 | + "import os\n", |
| 54 | + "from typing import Dict, Optional, Union\n", |
55 | 55 | "\n",
|
56 | 56 | "import autogen\n",
|
57 |
| - "from autogen import ConversableAgent, Agent\n", |
58 |
| - "from autogen.coding import LocalCommandLineCodeExecutor\n", |
59 |
| - "from autogen.code_utils import create_virtual_env" |
| 57 | + "from autogen import Agent, ConversableAgent\n", |
| 58 | + "from autogen.code_utils import create_virtual_env\n", |
| 59 | + "from autogen.coding import LocalCommandLineCodeExecutor" |
60 | 60 | ]
|
61 | 61 | },
|
62 | 62 | {
|
|
72 | 72 | "metadata": {},
|
73 | 73 | "outputs": [],
|
74 | 74 | "source": [
|
75 |
| - "os.environ['VENV_DIR'] = \"<Your venv directory that is used for code executor agent>\"\n", |
76 |
| - "os.environ['TEMP_FILE_DIR'] = \"<Your directory for temporary code files>\"\n", |
77 |
| - "os.environ['LLM_API_KEY'] = \"<Your API key for llm config>\"\n", |
78 |
| - "os.environ['LLM_API_TYPE'] = \"<Your API type for llm config>\"\n", |
79 |
| - "os.environ['LLM_API_VERSION'] = \"<Your API version for llm config>\"\n", |
80 |
| - "os.environ['LLM_API_URL'] = \"<Your API Url for llm config>\"\n", |
| 75 | + "os.environ[\"VENV_DIR\"] = \"<Your venv directory that is used for code executor agent>\"\n", |
| 76 | + "os.environ[\"TEMP_FILE_DIR\"] = \"<Your directory for temporary code files>\"\n", |
| 77 | + "os.environ[\"LLM_API_KEY\"] = \"<Your API key for llm config>\"\n", |
| 78 | + "os.environ[\"LLM_API_TYPE\"] = \"<Your API type for llm config>\"\n", |
| 79 | + "os.environ[\"LLM_API_VERSION\"] = \"<Your API version for llm config>\"\n", |
| 80 | + "os.environ[\"LLM_API_URL\"] = \"<Your API Url for llm config>\"\n", |
81 | 81 | "\n",
|
82 | 82 | "# Get the env variables\n",
|
83 |
| - "venv_dir = os.environ['VENV_DIR']\n", |
84 |
| - "temp_file_fir = os.environ['TEMP_FILE_DIR'] \n", |
| 83 | + "venv_dir = os.environ[\"VENV_DIR\"]\n", |
| 84 | + "temp_file_fir = os.environ[\"TEMP_FILE_DIR\"]\n", |
85 | 85 | "\n",
|
86 | 86 | "llm_config = {\n",
|
87 |
| - " \"config_list\": [{\n", |
88 |
| - " \"model\": \"gpt-4\", \n", |
89 |
| - " \"api_key\": os.environ[\"LLM_API_KEY\"], \n", |
90 |
| - " \"api_type\": os.environ[\"LLM_API_TYPE\"], \n", |
91 |
| - " \"api_version\": os.environ[\"LLM_API_VERSION\"], \n", |
92 |
| - " \"base_url\": os.environ[\"LLM_API_URL\"]}],\n", |
| 87 | + " \"config_list\": [\n", |
| 88 | + " {\n", |
| 89 | + " \"model\": \"gpt-4\",\n", |
| 90 | + " \"api_key\": os.environ[\"LLM_API_KEY\"],\n", |
| 91 | + " \"api_type\": os.environ[\"LLM_API_TYPE\"],\n", |
| 92 | + " \"api_version\": os.environ[\"LLM_API_VERSION\"],\n", |
| 93 | + " \"base_url\": os.environ[\"LLM_API_URL\"],\n", |
| 94 | + " }\n", |
| 95 | + " ],\n", |
93 | 96 | "}\n",
|
94 | 97 | "\n",
|
95 | 98 | "# The string that sensitive data will be redated to\n",
|
|
116 | 119 | "metadata": {},
|
117 | 120 | "outputs": [],
|
118 | 121 | "source": [
|
119 |
| - "executor = LocalCommandLineCodeExecutor(\n", |
120 |
| - " timeout=10, \n", |
121 |
| - " work_dir=temp_file_fir, \n", |
122 |
| - " virtual_env_context=venv_context\n", |
123 |
| - ")\n", |
| 122 | + "executor = LocalCommandLineCodeExecutor(timeout=10, work_dir=temp_file_fir, virtual_env_context=venv_context)\n", |
124 | 123 | "\n",
|
125 | 124 | "code_executor_agent = ConversableAgent(\n",
|
126 | 125 | " \"code_executor_agent\",\n",
|
127 | 126 | " llm_config=False,\n",
|
128 |
| - " code_execution_config={\"executor\": executor}, \n", |
129 |
| - " human_input_mode=\"NEVER\", \n", |
| 127 | + " code_execution_config={\"executor\": executor},\n", |
| 128 | + " human_input_mode=\"NEVER\",\n", |
130 | 129 | " max_consecutive_auto_reply=1,\n",
|
131 | 130 | ")\n",
|
132 | 131 | "\n",
|
|
136 | 135 | " llm_config=llm_config,\n",
|
137 | 136 | " system_message=\"DO NOT display any thing that is sensitive\",\n",
|
138 | 137 | " max_consecutive_auto_reply=1,\n",
|
139 |
| - " code_execution_config={\"use_docker\": False}\n", |
| 138 | + " code_execution_config={\"use_docker\": False},\n", |
140 | 139 | ")"
|
141 | 140 | ]
|
142 | 141 | },
|
|
155 | 154 | "metadata": {},
|
156 | 155 | "outputs": [],
|
157 | 156 | "source": [
|
158 |
| - "def transform_generated_response(message: Union[Dict, str], sender: Optional[Agent] = None, recipient: Agent = None, silent: bool = None ) -> Union[Dict, str]:\n", |
159 |
| - " temp_message = copy.deepcopy(message)\n", |
160 |
| - " all_secrets = sorted(env_secrets.values(), key=len, reverse=True)\n", |
161 |
| - " if isinstance(temp_message, Dict):\n", |
162 |
| - " for secret in all_secrets:\n", |
163 |
| - " if isinstance(temp_message[\"content\"], str):\n", |
164 |
| - " if secret != '' and secret in temp_message[\"content\"]:\n", |
165 |
| - " temp_message[\"content\"] = temp_message[\"content\"].replace(secret, replacementString)\n", |
166 |
| - " elif isinstance(temp_message[\"content\"], list):\n", |
167 |
| - " for item in temp_message[\"content\"]:\n", |
168 |
| - " if item[\"type\"] == \"text\":\n", |
169 |
| - " if secret != '' and secret in item[\"text\"]:\n", |
170 |
| - " item[\"text\"] = item[\"text\"].replace(secret, replacementString)\n", |
171 |
| - " if isinstance(temp_message, str):\n", |
172 |
| - " for secret in all_secrets:\n", |
173 |
| - " if secret != '' and secret in temp_message:\n", |
174 |
| - " temp_message = temp_message.replace(secret, replacementString)\n", |
| 157 | + "def transform_generated_response(\n", |
| 158 | + " message: Union[Dict, str], sender: Optional[Agent] = None, recipient: Agent = None, silent: bool = None\n", |
| 159 | + ") -> Union[Dict, str]:\n", |
| 160 | + " temp_message = copy.deepcopy(message)\n", |
| 161 | + " all_secrets = sorted(env_secrets.values(), key=len, reverse=True)\n", |
| 162 | + " if isinstance(temp_message, Dict):\n", |
| 163 | + " for secret in all_secrets:\n", |
| 164 | + " if isinstance(temp_message[\"content\"], str):\n", |
| 165 | + " if secret != \"\" and secret in temp_message[\"content\"]:\n", |
| 166 | + " temp_message[\"content\"] = temp_message[\"content\"].replace(secret, replacementString)\n", |
| 167 | + " elif isinstance(temp_message[\"content\"], list):\n", |
| 168 | + " for item in temp_message[\"content\"]:\n", |
| 169 | + " if item[\"type\"] == \"text\":\n", |
| 170 | + " if secret != \"\" and secret in item[\"text\"]:\n", |
| 171 | + " item[\"text\"] = item[\"text\"].replace(secret, replacementString)\n", |
| 172 | + " if isinstance(temp_message, str):\n", |
| 173 | + " for secret in all_secrets:\n", |
| 174 | + " if secret != \"\" and secret in temp_message:\n", |
| 175 | + " temp_message = temp_message.replace(secret, replacementString)\n", |
175 | 176 | "\n",
|
176 |
| - " return temp_message" |
| 177 | + " return temp_message" |
177 | 178 | ]
|
178 | 179 | },
|
179 | 180 | {
|
|
209 | 210 | "metadata": {},
|
210 | 211 | "outputs": [],
|
211 | 212 | "source": [
|
212 |
| - "agent_message =\"\"\"Run the code and show me the printed variable.\n", |
| 213 | + "agent_message = \"\"\"Run the code and show me the printed variable.\n", |
213 | 214 | "The code block is below:\n",
|
214 | 215 | "```python\n",
|
215 | 216 | "import os\n",
|
|
258 | 259 | }
|
259 | 260 | ],
|
260 | 261 | "source": [
|
261 |
| - "result = proxy_agent.initiate_chat(\n", |
262 |
| - " code_executor_agent, message=agent_message, clear_history=False\n", |
263 |
| - ")" |
| 262 | + "result = proxy_agent.initiate_chat(code_executor_agent, message=agent_message, clear_history=False)" |
264 | 263 | ]
|
265 | 264 | },
|
266 | 265 | {
|
|
0 commit comments