-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathfunction_calling.py
53 lines (40 loc) · 1.54 KB
/
function_calling.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import json
import os
from langchain_core.messages import HumanMessage, ToolMessage
from langchain_core.tools import tool
from langchain_google_vertexai import ChatVertexAI
# Reference: https://python.langchain.com/v0.2/docs/how_to/function_calling/
@tool
def get_weather_forecast(location: str) -> str:
"""Get the weather forecast for a given location or city"""
data = {
"location": location,
"forecast": "sunny",
"temperature": 20
}
json_data = json.dumps(data)
return json_data
if __name__ == "__main__":
tools = [get_weather_forecast]
llm = ChatVertexAI(
project=os.environ["PROJECT_ID"],
location="us-central1",
model="gemini-1.5-flash-002"
)
llm_with_tools = llm.bind_tools(tools)
# Ask about the weather
query = "How's the weather in Paris?"
print(f"User: {query}")
messages = [HumanMessage(query)]
response = llm_with_tools.invoke(messages)
messages.append(response)
# The model replies with a function call request
print(f"Response: {response.tool_calls}")
for tool_call in response.tool_calls:
selected_tool = {"get_weather_forecast": get_weather_forecast}[tool_call["name"].lower()]
tool_output = selected_tool.invoke(tool_call["args"])
# Send back the result of the function call
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
# Invoke the model again with function call response
response = llm_with_tools.invoke(messages)
print(f"Response: {response.content}")