Skip to content

Commit 7402cb6

Browse files
feat: Update anthropic config and integrate Claude 3.7 (#1688)
Co-authored-by: Wendong-Fan <[email protected]> Co-authored-by: Wendong <[email protected]>
1 parent 4582aa9 commit 7402cb6

File tree

4 files changed

+191
-12
lines changed

4 files changed

+191
-12
lines changed

camel/configs/anthropic_config.py

+45-11
Original file line numberDiff line numberDiff line change
@@ -23,23 +23,24 @@ class AnthropicConfig(BaseConfig):
2323
r"""Defines the parameters for generating chat completions using the
2424
Anthropic API.
2525
26-
See: https://docs.anthropic.com/claude/reference/complete_post
26+
See: https://docs.anthropic.com/en/api/messages
2727
Args:
2828
max_tokens (int, optional): The maximum number of tokens to
2929
generate before stopping. Note that Anthropic models may stop
3030
before reaching this maximum. This parameter only specifies the
3131
absolute maximum number of tokens to generate.
3232
(default: :obj:`8192`)
33-
stop_sequences (List[str], optional): Sequences that will cause the
34-
model to stop generating completion text. Anthropic models stop
35-
on "\n\nHuman:", and may include additional built-in stop sequences
36-
in the future. By providing the stop_sequences parameter, you may
37-
include additional strings that will cause the model to stop
38-
generating. (default: :obj:`[]`)
33+
stop_sequences (List[str], optional): Custom text sequences that will
34+
cause the model to stop generating. The models will normally stop
35+
when they have naturally completed their turn. If the model
36+
encounters one of these custom sequences, the response will be
37+
terminated and the stop_reason will be "stop_sequence".
38+
(default: :obj:`[]`)
3939
temperature (float, optional): Amount of randomness injected into the
4040
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
4141
for analytical / multiple choice, and closer to 1 for creative
42-
and generative tasks. (default: :obj:`1`)
42+
and generative tasks. Note that even with temperature of 0.0, the
43+
results will not be fully deterministic. (default: :obj:`1`)
4344
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
4445
compute the cumulative distribution over all the options for each
4546
subsequent token in decreasing probability order and cut it off
@@ -49,9 +50,20 @@ class AnthropicConfig(BaseConfig):
4950
top_k (int, optional): Only sample from the top K options for each
5051
subsequent token. Used to remove "long tail" low probability
5152
responses. (default: :obj:`5`)
52-
metadata: An object describing metadata about the request.
5353
stream (bool, optional): Whether to incrementally stream the response
5454
using server-sent events. (default: :obj:`False`)
55+
metadata (Union[dict, NotGiven], optional): An object describing
56+
metadata about the request. Can include user_id as an external
57+
identifier for the user associated with the request.
58+
(default: :obj:`NotGiven()`)
59+
thinking (Union[dict, NotGiven], optional): Configuration for enabling
60+
Claude's extended thinking. When enabled, responses include
61+
thinking content blocks showing Claude's thinking process.
62+
(default: :obj:`NotGiven()`)
63+
tool_choice (Union[dict, NotGiven], optional): How the model should
64+
use the provided tools. The model can use a specific tool, any
65+
available tool, decide by itself, or not use tools at all.
66+
(default: :obj:`NotGiven()`)
5567
"""
5668

5769
max_tokens: int = 8192
@@ -60,11 +72,33 @@ class AnthropicConfig(BaseConfig):
6072
top_p: Union[float, NotGiven] = 0.7
6173
top_k: Union[int, NotGiven] = 5
6274
stream: bool = False
75+
metadata: Union[dict, NotGiven] = NotGiven()
76+
thinking: Union[dict, NotGiven] = NotGiven()
77+
tool_choice: Union[dict, NotGiven] = NotGiven()
6378

6479
def as_dict(self) -> dict[str, Any]:
6580
config_dict = super().as_dict()
66-
if "tools" in config_dict:
67-
del config_dict["tools"] # TODO: Support tool calling.
81+
# Create a list of keys to remove to avoid modifying dict
82+
keys_to_remove = [
83+
key
84+
for key, value in config_dict.items()
85+
if isinstance(value, NotGiven)
86+
]
87+
88+
for key in keys_to_remove:
89+
del config_dict[key]
90+
91+
# remove some keys if thinking is enabled
92+
thinking_enabled = (
93+
not isinstance(self.thinking, NotGiven)
94+
and self.thinking["type"] == "enabled"
95+
)
96+
if thinking_enabled:
97+
# `top_p`, `top_k`, `temperature` must be unset when thinking is
98+
# enabled.
99+
config_dict.pop("top_k", None)
100+
config_dict.pop("top_p", None)
101+
config_dict.pop("temperature", None)
68102
return config_dict
69103

70104

camel/models/anthropic_model.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,11 @@ def _convert_response_from_anthropic_to_openai(self, response):
8484
index=0,
8585
message={
8686
"role": "assistant",
87-
"content": response.content[0].text,
87+
"content": next(
88+
content.text
89+
for content in response.content
90+
if content.type == "text"
91+
),
8892
},
8993
finish_reason=response.stop_reason,
9094
)

camel/types/enums.py

+3
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ class ModelType(UnifiedModelType, Enum):
101101
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
102102
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
103103
CLAUDE_3_5_HAIKU = "claude-3-5-haiku-latest"
104+
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-latest"
104105

105106
# Nvidia models
106107
NVIDIA_NEMOTRON_340B_INSTRUCT = "nvidia/nemotron-4-340b-instruct"
@@ -315,6 +316,7 @@ def is_anthropic(self) -> bool:
315316
ModelType.CLAUDE_3_HAIKU,
316317
ModelType.CLAUDE_3_5_SONNET,
317318
ModelType.CLAUDE_3_5_HAIKU,
319+
ModelType.CLAUDE_3_7_SONNET,
318320
}
319321

320322
@property
@@ -683,6 +685,7 @@ def token_limit(self) -> int:
683685
ModelType.CLAUDE_3_HAIKU,
684686
ModelType.CLAUDE_3_5_SONNET,
685687
ModelType.CLAUDE_3_5_HAIKU,
688+
ModelType.CLAUDE_3_7_SONNET,
686689
ModelType.YI_MEDIUM_200K,
687690
}:
688691
return 200_000
+138
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
#
6+
# http://www.apache.org/licenses/LICENSE-2.0
7+
#
8+
# Unless required by applicable law or agreed to in writing, software
9+
# distributed under the License is distributed on an "AS IS" BASIS,
10+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
# See the License for the specific language governing permissions and
12+
# limitations under the License.
13+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14+
15+
from camel.agents import ChatAgent
16+
from camel.configs import AnthropicConfig
17+
from camel.models import ModelFactory
18+
from camel.types import ModelPlatformType, ModelType
19+
20+
"""
21+
please set the below os environment:
22+
export ANTHROPIC_API_KEY=""
23+
"""
24+
25+
model = ModelFactory.create(
26+
model_platform=ModelPlatformType.ANTHROPIC,
27+
model_type=ModelType.CLAUDE_3_5_SONNET,
28+
model_config_dict=AnthropicConfig(temperature=0.2).as_dict(),
29+
)
30+
31+
# Define system message
32+
sys_msg = "You are a helpful assistant."
33+
34+
# Set agent
35+
camel_agent = ChatAgent(system_message=sys_msg, model=model)
36+
37+
user_msg = """Say hi to CAMEL AI, one open-source community dedicated to the
38+
study of autonomous and communicative agents."""
39+
40+
# Get response information
41+
response = camel_agent.step(user_msg)
42+
print(response.msgs[0].content)
43+
'''
44+
===============================================================================
45+
Hi CAMEL AI! It's great to meet an open-source community focused on advancing research in autonomous and communicative agents. Your work on developing and studying AI systems that can effectively communicate and operate autonomously is fascinating and important for the field. I appreciate communities like yours that contribute to open research and development in AI. Wishing you continued success in your mission!
46+
===============================================================================
47+
''' # noqa: E501
48+
49+
# Use the extended thinking model with Claude 3.7 Sonnet
50+
config = AnthropicConfig(
51+
thinking={"type": "enabled", "budget_tokens": 2048}
52+
).as_dict()
53+
54+
model = ModelFactory.create(
55+
model_platform=ModelPlatformType.ANTHROPIC,
56+
model_type=ModelType.CLAUDE_3_7_SONNET,
57+
model_config_dict=config,
58+
)
59+
60+
camel_agent = ChatAgent(model=model)
61+
62+
user_msg = """Write a bash script that takes a matrix represented as a string with
63+
format '[1,2],[3,4],[5,6]' and prints the transpose in the same format.
64+
""" # noqa: E501
65+
66+
response = camel_agent.step(user_msg)
67+
print(response.msgs[0].content)
68+
'''
69+
===============================================================================
70+
# Matrix Transpose Bash Script
71+
72+
Here's a bash script that transposes a matrix from the format `[1,2],[3,4],[5,6]` to `[1,3,5],[2,4,6]`:
73+
74+
```bash
75+
#!/bin/bash
76+
77+
# Check if input argument is provided
78+
if [ $# -lt 1 ]; then
79+
echo "Usage: $0 '[row1],[row2],...'"
80+
exit 1
81+
fi
82+
83+
# Input matrix as string
84+
input="$1"
85+
86+
# Remove outer brackets and split into rows
87+
input="${input//\]\,\[/]|[}" # Replace "],[" with "]|["
88+
input="${input#\[}" # Remove leading "["
89+
input="${input%\]}" # Remove trailing "]"
90+
IFS='|' read -ra rows <<< "$input"
91+
92+
# Determine dimensions of the matrix
93+
row_count="${#rows[@]}"
94+
IFS=',' read -ra first_row <<< "${rows[0]//[\[\]]}" # Remove brackets from first row
95+
col_count="${#first_row[@]}"
96+
97+
# Create transpose
98+
result=""
99+
for (( col=0; col<col_count; col++ )); do
100+
result+="["
101+
for (( row=0; row<row_count; row++ )); do
102+
# Extract current row without brackets
103+
current="${rows[row]//[\[\]]}"
104+
# Split by commas
105+
IFS=',' read -ra elements <<< "$current"
106+
# Add element to transpose
107+
result+="${elements[col]}"
108+
# Add comma if not the last element
109+
if (( row < row_count-1 )); then
110+
result+=","
111+
fi
112+
done
113+
result+="]"
114+
# Add comma if not the last row
115+
if (( col < col_count-1 )); then
116+
result+=","
117+
fi
118+
done
119+
120+
echo "$result"
121+
```
122+
123+
## How to Use:
124+
125+
1. Save the script to a file (e.g., `transpose.sh`)
126+
2. Make it executable: `chmod +x transpose.sh`
127+
3. Run it with your matrix: `./transpose.sh "[1,2],[3,4],[5,6]"`
128+
129+
## Example:
130+
- Input: `[1,2],[3,4],[5,6]`
131+
- Output: `[1,3,5],[2,4,6]`
132+
133+
The script works by:
134+
1. Parsing the input string to extract rows and elements
135+
2. Finding the dimensions of the original matrix
136+
3. Creating the transpose by iterating through columns first, then rows
137+
4. Formatting the result with proper brackets and commas
138+
''' # noqa: E501

0 commit comments

Comments
 (0)