Skip to content

Commit 139d28b

Browse files
author
DONG, JERRY
committed
added changes per github comments and run precommit
1 parent 49a392a commit 139d28b

File tree

1 file changed

+50
-51
lines changed

1 file changed

+50
-51
lines changed

notebook/agentchat_postresponse_secret_redaction.ipynb

+50-51
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,13 @@
2929
"source": [
3030
"## Requirements\n",
3131
"\n",
32-
"AutoGen requires `Python>=3.8`. To run this notebook example, please install pyautogen:\n",
32+
"AutoGen requires `Python>=3.8`. To run this notebook example, please install autogen-agentchat:\n",
3333
"\n",
3434
"````{=mdx}\n",
3535
":::info Requirements\n",
3636
"Install `pyautogen`:\n",
3737
"```bash\n",
38-
"pip install pyautogen\n",
38+
"pip install autogen-agentchat~=0.2\n",
3939
"```\n",
4040
"\n",
4141
"For more information, please refer to the [installation guide](/docs/installation/).\n",
@@ -49,14 +49,14 @@
4949
"metadata": {},
5050
"outputs": [],
5151
"source": [
52-
"from typing import Dict, Union, Optional\n",
53-
"import os\n",
5452
"import copy\n",
53+
"import os\n",
54+
"from typing import Dict, Optional, Union\n",
5555
"\n",
5656
"import autogen\n",
57-
"from autogen import ConversableAgent, Agent\n",
58-
"from autogen.coding import LocalCommandLineCodeExecutor\n",
59-
"from autogen.code_utils import create_virtual_env"
57+
"from autogen import Agent, ConversableAgent\n",
58+
"from autogen.code_utils import create_virtual_env\n",
59+
"from autogen.coding import LocalCommandLineCodeExecutor"
6060
]
6161
},
6262
{
@@ -72,24 +72,27 @@
7272
"metadata": {},
7373
"outputs": [],
7474
"source": [
75-
"os.environ['VENV_DIR'] = \"<Your venv directory that is used for code executor agent>\"\n",
76-
"os.environ['TEMP_FILE_DIR'] = \"<Your directory for temporary code files>\"\n",
77-
"os.environ['LLM_API_KEY'] = \"<Your API key for llm config>\"\n",
78-
"os.environ['LLM_API_TYPE'] = \"<Your API type for llm config>\"\n",
79-
"os.environ['LLM_API_VERSION'] = \"<Your API version for llm config>\"\n",
80-
"os.environ['LLM_API_URL'] = \"<Your API Url for llm config>\"\n",
75+
"os.environ[\"VENV_DIR\"] = \"<Your venv directory that is used for code executor agent>\"\n",
76+
"os.environ[\"TEMP_FILE_DIR\"] = \"<Your directory for temporary code files>\"\n",
77+
"os.environ[\"LLM_API_KEY\"] = \"<Your API key for llm config>\"\n",
78+
"os.environ[\"LLM_API_TYPE\"] = \"<Your API type for llm config>\"\n",
79+
"os.environ[\"LLM_API_VERSION\"] = \"<Your API version for llm config>\"\n",
80+
"os.environ[\"LLM_API_URL\"] = \"<Your API Url for llm config>\"\n",
8181
"\n",
8282
"# Get the env variables\n",
83-
"venv_dir = os.environ['VENV_DIR']\n",
84-
"temp_file_fir = os.environ['TEMP_FILE_DIR'] \n",
83+
"venv_dir = os.environ[\"VENV_DIR\"]\n",
84+
"temp_file_fir = os.environ[\"TEMP_FILE_DIR\"]\n",
8585
"\n",
8686
"llm_config = {\n",
87-
" \"config_list\": [{\n",
88-
" \"model\": \"gpt-4\", \n",
89-
" \"api_key\": os.environ[\"LLM_API_KEY\"], \n",
90-
" \"api_type\": os.environ[\"LLM_API_TYPE\"], \n",
91-
" \"api_version\": os.environ[\"LLM_API_VERSION\"], \n",
92-
" \"base_url\": os.environ[\"LLM_API_URL\"]}],\n",
87+
" \"config_list\": [\n",
88+
" {\n",
89+
" \"model\": \"gpt-4\",\n",
90+
" \"api_key\": os.environ[\"LLM_API_KEY\"],\n",
91+
" \"api_type\": os.environ[\"LLM_API_TYPE\"],\n",
92+
" \"api_version\": os.environ[\"LLM_API_VERSION\"],\n",
93+
" \"base_url\": os.environ[\"LLM_API_URL\"],\n",
94+
" }\n",
95+
" ],\n",
9396
"}\n",
9497
"\n",
9598
"# The string that sensitive data will be redated to\n",
@@ -116,17 +119,13 @@
116119
"metadata": {},
117120
"outputs": [],
118121
"source": [
119-
"executor = LocalCommandLineCodeExecutor(\n",
120-
" timeout=10, \n",
121-
" work_dir=temp_file_fir, \n",
122-
" virtual_env_context=venv_context\n",
123-
")\n",
122+
"executor = LocalCommandLineCodeExecutor(timeout=10, work_dir=temp_file_fir, virtual_env_context=venv_context)\n",
124123
"\n",
125124
"code_executor_agent = ConversableAgent(\n",
126125
" \"code_executor_agent\",\n",
127126
" llm_config=False,\n",
128-
" code_execution_config={\"executor\": executor}, \n",
129-
" human_input_mode=\"NEVER\", \n",
127+
" code_execution_config={\"executor\": executor},\n",
128+
" human_input_mode=\"NEVER\",\n",
130129
" max_consecutive_auto_reply=1,\n",
131130
")\n",
132131
"\n",
@@ -136,7 +135,7 @@
136135
" llm_config=llm_config,\n",
137136
" system_message=\"DO NOT display any thing that is sensitive\",\n",
138137
" max_consecutive_auto_reply=1,\n",
139-
" code_execution_config={\"use_docker\": False}\n",
138+
" code_execution_config={\"use_docker\": False},\n",
140139
")"
141140
]
142141
},
@@ -155,25 +154,27 @@
155154
"metadata": {},
156155
"outputs": [],
157156
"source": [
158-
"def transform_generated_response(message: Union[Dict, str], sender: Optional[Agent] = None, recipient: Agent = None, silent: bool = None ) -> Union[Dict, str]:\n",
159-
" temp_message = copy.deepcopy(message)\n",
160-
" all_secrets = sorted(env_secrets.values(), key=len, reverse=True)\n",
161-
" if isinstance(temp_message, Dict):\n",
162-
" for secret in all_secrets:\n",
163-
" if isinstance(temp_message[\"content\"], str):\n",
164-
" if secret != '' and secret in temp_message[\"content\"]:\n",
165-
" temp_message[\"content\"] = temp_message[\"content\"].replace(secret, replacementString)\n",
166-
" elif isinstance(temp_message[\"content\"], list):\n",
167-
" for item in temp_message[\"content\"]:\n",
168-
" if item[\"type\"] == \"text\":\n",
169-
" if secret != '' and secret in item[\"text\"]:\n",
170-
" item[\"text\"] = item[\"text\"].replace(secret, replacementString)\n",
171-
" if isinstance(temp_message, str):\n",
172-
" for secret in all_secrets:\n",
173-
" if secret != '' and secret in temp_message:\n",
174-
" temp_message = temp_message.replace(secret, replacementString)\n",
157+
"def transform_generated_response(\n",
158+
" message: Union[Dict, str], sender: Optional[Agent] = None, recipient: Agent = None, silent: bool = None\n",
159+
") -> Union[Dict, str]:\n",
160+
" temp_message = copy.deepcopy(message)\n",
161+
" all_secrets = sorted(env_secrets.values(), key=len, reverse=True)\n",
162+
" if isinstance(temp_message, Dict):\n",
163+
" for secret in all_secrets:\n",
164+
" if isinstance(temp_message[\"content\"], str):\n",
165+
" if secret != \"\" and secret in temp_message[\"content\"]:\n",
166+
" temp_message[\"content\"] = temp_message[\"content\"].replace(secret, replacementString)\n",
167+
" elif isinstance(temp_message[\"content\"], list):\n",
168+
" for item in temp_message[\"content\"]:\n",
169+
" if item[\"type\"] == \"text\":\n",
170+
" if secret != \"\" and secret in item[\"text\"]:\n",
171+
" item[\"text\"] = item[\"text\"].replace(secret, replacementString)\n",
172+
" if isinstance(temp_message, str):\n",
173+
" for secret in all_secrets:\n",
174+
" if secret != \"\" and secret in temp_message:\n",
175+
" temp_message = temp_message.replace(secret, replacementString)\n",
175176
"\n",
176-
" return temp_message"
177+
" return temp_message"
177178
]
178179
},
179180
{
@@ -209,7 +210,7 @@
209210
"metadata": {},
210211
"outputs": [],
211212
"source": [
212-
"agent_message =\"\"\"Run the code and show me the printed variable.\n",
213+
"agent_message = \"\"\"Run the code and show me the printed variable.\n",
213214
"The code block is below:\n",
214215
"```python\n",
215216
"import os\n",
@@ -258,9 +259,7 @@
258259
}
259260
],
260261
"source": [
261-
"result = proxy_agent.initiate_chat(\n",
262-
" code_executor_agent, message=agent_message, clear_history=False\n",
263-
")"
262+
"result = proxy_agent.initiate_chat(code_executor_agent, message=agent_message, clear_history=False)"
264263
]
265264
},
266265
{

0 commit comments

Comments
 (0)