Skip to content

Commit c1010e0

Browse files
authored
feat: Add support for get AI agent default (#883)
1 parent cc6b0cc commit c1010e0

File tree

4 files changed

+182
-11
lines changed

4 files changed

+182
-11
lines changed

boxsdk/client/client.py

+49-1
Original file line numberDiff line numberDiff line change
@@ -1784,7 +1784,8 @@ def send_ai_question(
17841784
self,
17851785
items: Iterable,
17861786
prompt: str,
1787-
mode: Optional[str] = None
1787+
mode: Optional[str] = None,
1788+
ai_agent: Optional[dict] = None
17881789
) -> Any:
17891790
"""
17901791
Sends an AI request to supported LLMs and returns an answer specifically focused on the user's
@@ -1801,6 +1802,8 @@ def send_ai_question(
18011802
Selecting multiple_item_qa allows you to provide up to 25 items.
18021803
18031804
Value is one of `multiple_item_qa`, `single_item_qa`
1805+
:param ai_agent:
1806+
The AI agent used to handle queries.
18041807
:returns:
18051808
A response including the answer from the LLM.
18061809
"""
@@ -1813,6 +1816,9 @@ def send_ai_question(
18131816
'mode': mode
18141817
}
18151818

1819+
if ai_agent is not None:
1820+
body['ai_agent'] = ai_agent
1821+
18161822
box_response = self._session.post(url, data=json.dumps(body))
18171823
response = box_response.json()
18181824
return self.translator.translate(
@@ -1826,6 +1832,7 @@ def send_ai_text_gen(
18261832
dialogue_history: Iterable,
18271833
items: Iterable,
18281834
prompt: str,
1835+
ai_agent: Optional[dict] = None
18291836
):
18301837
"""
18311838
Sends an AI request to supported LLMs and returns an answer specifically focused on the creation of new text.
@@ -1838,6 +1845,8 @@ def send_ai_text_gen(
18381845
:param prompt:
18391846
The prompt provided by the client to be answered by the LLM.
18401847
The prompt's length is limited to 10000 characters.
1848+
:param ai_agent:
1849+
The AI agent used for generating text.
18411850
:returns:
18421851
A response including the generated text from the LLM.
18431852
"""
@@ -1848,9 +1857,48 @@ def send_ai_text_gen(
18481857
'prompt': prompt
18491858
}
18501859

1860+
if ai_agent is not None:
1861+
body['ai_agent'] = ai_agent
1862+
18511863
box_response = self._session.post(url, data=json.dumps(body))
18521864
response = box_response.json()
18531865
return self.translator.translate(
18541866
session=self._session,
18551867
response_object=response,
18561868
)
1869+
1870+
@api_call
1871+
def get_ai_agent_default_config(
1872+
self,
1873+
mode: str,
1874+
language: Optional[str] = None,
1875+
model: Optional[str] = None,
1876+
):
1877+
"""
1878+
Get the AI agent default configuration.
1879+
1880+
:param mode:
1881+
The mode to filter the agent config to return.
1882+
:param language:
1883+
The ISO language code to return the agent config for.
1884+
If the language is not supported the default agent configuration is returned.
1885+
:param model:
1886+
The model to return the default agent config for.
1887+
:returns:
1888+
A default agent configuration.
1889+
This can be one of the following two objects:
1890+
AI agent for questions and AI agent for text generation.
1891+
The response depends on the agent configuration requested in this endpoint.
1892+
"""
1893+
url = self._session.get_url('ai_agent_default')
1894+
params = {'mode': mode}
1895+
if language is not None:
1896+
params['language'] = language
1897+
if model is not None:
1898+
params['model'] = model
1899+
1900+
box_response = self._session.get(url, params=params)
1901+
return self.translator.translate(
1902+
session=self._session,
1903+
response_object=box_response.json(),
1904+
)

docs/usage/ai.md

+36-4
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,14 @@ AI allows to send an intelligence request to supported large language models and
88

99
- [Send AI request](#send-ai-request)
1010
- [Send AI text generation request](#send-ai-text-generation-request)
11+
- [Get AI agent default configuration](#get-ai-agent-default-configuration)
1112

1213
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
1314

1415
Send AI request
1516
------------------------
1617

17-
Calling the [`client.send_ai_question(items, prompt, mode)`][send-ai-question] method will send an AI request to the supported large language models. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `mode` specifies if this request is for a single or multiple items. If you select `single_item_qa` the items array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items.
18+
Calling the [`client.send_ai_question(items, prompt, mode, ai_agent)`][send-ai-question] method will send an AI request to the supported large language models. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `mode` specifies if this request is for a single or multiple items. If you select `single_item_qa` the items array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items. The `ai_agent` specifies the AI agent which will be used to handle queries.
1819

1920

2021

@@ -25,10 +26,17 @@ items = [{
2526
"type": "file",
2627
"content": "More information about public APIs"
2728
}]
29+
ai_agent = {
30+
'type': 'ai_agent_ask',
31+
'basic_text_multi': {
32+
'model': 'openai__gpt_3_5_turbo'
33+
}
34+
}
2835
answer = client.send_ai_question(
2936
items=items,
3037
prompt="What is this file?",
31-
mode="single_item_qa"
38+
mode="single_item_qa",
39+
ai_agent=ai_agent
3240
)
3341
print(answer)
3442
```
@@ -41,7 +49,7 @@ It usually takes a few seconds for the file to be indexed and available for the
4149
Send AI text generation request
4250
------------------------
4351

44-
Calling the [`client.send_ai_text_gen(dialogue_history, items, prompt)`][send-ai-text-gen] method will send an AI text generation request to the supported large language models. The `dialogue_history` parameter is history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters.
52+
Calling the [`client.send_ai_text_gen(dialogue_history, items, prompt, ai_agent)`][send-ai-text-gen] method will send an AI text generation request to the supported large language models. The `dialogue_history` parameter is history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. The `items` parameter is a list of items to be processed by the LLM, often files. The `prompt` provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. The `ai_agent` specifies the AI agent which will be used for generating text.
4553

4654
<!-- sample post_ai_text_gen -->
4755
```python
@@ -60,12 +68,36 @@ dialogue_history = [{
6068
"answer": "Public API schemas provide necessary information to integrate with APIs...",
6169
"created_at": "2013-12-12T11:20:43-08:00"
6270
}]
71+
ai_agent = {
72+
'type': 'ai_agent_text_gen',
73+
'basic_gen': {
74+
'model': 'openai__gpt_3_5_turbo_16k'
75+
}
76+
}
6377
answer = client.send_ai_text_gen(
6478
dialogue_history=dialogue_history,
6579
items=items,
66-
prompt="Write an email to a client about the importance of public APIs."
80+
prompt="Write an email to a client about the importance of public APIs.",
81+
ai_agent=ai_agent
6782
)
6883
print(answer)
6984
```
7085

7186
[send-ai-text-gen]: https://box-python-sdk.readthedocs.io/en/latest/boxsdk.client.html#boxsdk.client.client.Client.send_ai_text_gen
87+
88+
Get AI agent default configuration
89+
------------------------
90+
91+
To get an AI agent default configuration call the [`client.get_ai_agent_default_config(mode, language, model)`][get-ai-agent-default] method. The `mode` parameter filters the agent configuration to be returned. It can be either `ask` or `text_gen`. The `language` parameter specifies the ISO language code to return the agent config for. If the language is not supported, the default agent configuration is returned. The `model` parameter specifies the model for which the default agent configuration should be returned.
92+
93+
<!-- sample get_ai_agent_default -->
94+
```python
95+
config = client.get_ai_agent_default_config(
96+
mode='text_gen',
97+
language='en',
98+
model='openai__gpt_3_5_turbo'
99+
)
100+
print(config)
101+
```
102+
103+
[get-ai-agent-default]: https://box-python-sdk.readthedocs.io/en/latest/boxsdk.client.html#boxsdk.client.client.Client.get_ai_agent_default_config

test/integration_new/object/ai_itest.py

+26-2
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,17 @@ def test_send_ai_question(parent_folder, small_file_path):
2222
'type': 'file',
2323
'content': 'The sun raises in the east.'
2424
}]
25+
ai_agent = {
26+
'type': 'ai_agent_ask',
27+
'basic_text_multi': {
28+
'model': 'openai__gpt_3_5_turbo'
29+
}
30+
}
2531
answer = CLIENT.send_ai_question(
2632
items=items,
2733
prompt='Which direction does the sun raise?',
28-
mode='single_item_qa'
34+
mode='single_item_qa',
35+
ai_agent=ai_agent
2936
)
3037
assert 'east' in answer['answer'].lower()
3138
assert answer['completion_reason'] == 'done'
@@ -47,10 +54,27 @@ def test_send_ai_text_gen(parent_folder, small_file_path):
4754
'answer': 'It takes 24 hours for the sun to rise.',
4855
'created_at': '2013-12-12T11:20:43-08:00'
4956
}]
57+
ai_agent = {
58+
'type': 'ai_agent_text_gen',
59+
'basic_gen': {
60+
'model': 'openai__gpt_3_5_turbo_16k'
61+
}
62+
}
5063
answer = CLIENT.send_ai_text_gen(
5164
dialogue_history=dialogue_history,
5265
items=items,
53-
prompt='Which direction does the sun raise?'
66+
prompt='Which direction does the sun raise?',
67+
ai_agent=ai_agent
5468
)
5569
assert 'east' in answer['answer'].lower()
5670
assert answer['completion_reason'] == 'done'
71+
72+
73+
def test_get_ai_agent_default_config():
74+
config = CLIENT.get_ai_agent_default_config(
75+
mode='text_gen',
76+
language='en',
77+
model='openai__gpt_3_5_turbo'
78+
)
79+
assert config['type'] == 'ai_agent_text_gen'
80+
assert config['basic_gen']['model'] == 'openai__gpt_3_5_turbo'

test/unit/client/test_client.py

+71-4
Original file line numberDiff line numberDiff line change
@@ -1776,6 +1776,37 @@ def mock_ai_question_response():
17761776
return mock_ai_question_response
17771777

17781778

1779+
@pytest.fixture(scope='module')
1780+
def mock_ai_agent_default_config_response():
1781+
mock_ai_agent_default_config_response = {
1782+
'type': 'ai_agent_text_gen',
1783+
'basic_gen': {
1784+
'content_template': '---{content}---',
1785+
'embeddings': {
1786+
'model': 'openai__text_embedding_ada_002',
1787+
'strategy': {
1788+
'id': 'basic',
1789+
'num_tokens_per_chunk': 64
1790+
}
1791+
},
1792+
'llm_endpoint_params': {
1793+
'type': 'openai_params',
1794+
'frequency_penalty': 1.5,
1795+
'presence_penalty': 1.5,
1796+
'stop': '<|im_end|>',
1797+
'temperature': 0,
1798+
'top_p': 1
1799+
},
1800+
'model': 'openai__gpt_3_5_turbo',
1801+
'num_tokens_for_completion': 8400,
1802+
'prompt_template': 'It is `{current_date}`, and I have $8000 and want to spend a week in the Azores. What '
1803+
'should I see?',
1804+
'system_message': 'You are a helpful travel assistant specialized in budget travel'
1805+
}
1806+
}
1807+
return mock_ai_agent_default_config_response
1808+
1809+
17791810
def test_get_sign_requests(mock_client, mock_box_session, mock_sign_request_response):
17801811
expected_url = f'{API.BASE_API_URL}/sign_requests'
17811812

@@ -1963,13 +1994,25 @@ def test_send_ai_question(mock_client, mock_box_session, mock_ai_question_respon
19631994
}]
19641995
question = 'Why are public APIs important?'
19651996
mode = 'single_item_qa'
1997+
ai_agent = {
1998+
'type': 'ai_agent_ask',
1999+
'basic_text_multi': {
2000+
'model': 'openai__gpt_3_5_turbo'
2001+
}
2002+
}
19662003

1967-
answer = mock_client.send_ai_question(items, question, mode)
2004+
answer = mock_client.send_ai_question(items, question, mode, ai_agent)
19682005

19692006
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps({
19702007
'items': items,
19712008
'prompt': question,
1972-
'mode': mode
2009+
'mode': mode,
2010+
'ai_agent': {
2011+
'type': 'ai_agent_ask',
2012+
'basic_text_multi': {
2013+
'model': 'openai__gpt_3_5_turbo'
2014+
}
2015+
}
19732016
}))
19742017
assert answer['answer'] == 'Public APIs are important because of key and important reasons.'
19752018
assert answer['completion_reason'] == 'done'
@@ -1993,17 +2036,41 @@ def test_send_ai_text_gen(mock_client, mock_box_session, mock_ai_question_respon
19932036
"answer": "Public API schemas provide necessary information to integrate with APIs...",
19942037
"created_at": "2013-12-12T11:20:43-08:00"
19952038
}]
2039+
ai_agent = {
2040+
'type': 'ai_agent_text_gen',
2041+
'basic_gen': {
2042+
'model': 'openai__gpt_3_5_turbo_16k'
2043+
}
2044+
}
19962045
answer = mock_client.send_ai_text_gen(
19972046
dialogue_history=dialogue_history,
19982047
items=items,
1999-
prompt="Write an email to a client about the importance of public APIs."
2048+
prompt="Write an email to a client about the importance of public APIs.",
2049+
ai_agent=ai_agent
20002050
)
20012051

20022052
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps({
20032053
'dialogue_history': dialogue_history,
20042054
'items': items,
2005-
'prompt': "Write an email to a client about the importance of public APIs."
2055+
'prompt': "Write an email to a client about the importance of public APIs.",
2056+
'ai_agent': ai_agent
20062057
}))
20072058
assert answer['answer'] == 'Public APIs are important because of key and important reasons.'
20082059
assert answer['completion_reason'] == 'done'
20092060
assert answer['created_at'] == '2021-04-26T08:12:13.982Z'
2061+
2062+
2063+
def test_get_ai_agent_default_config(mock_client, mock_box_session, mock_ai_agent_default_config_response):
2064+
expected_url = f'{API.BASE_API_URL}/ai_agent_default'
2065+
mock_box_session.get.return_value.json.return_value = mock_ai_agent_default_config_response
2066+
2067+
config = mock_client.get_ai_agent_default_config(
2068+
mode='text_gen',
2069+
language='en',
2070+
model='openai__gpt_3_5_turbo'
2071+
)
2072+
2073+
mock_box_session.get.assert_called_once_with(expected_url, params={'mode': 'text_gen', 'language': 'en', 'model': 'openai__gpt_3_5_turbo'})
2074+
assert config['type'] == 'ai_agent_text_gen'
2075+
assert config['basic_gen']['model'] == 'openai__gpt_3_5_turbo'
2076+
assert config['basic_gen']['embeddings']['model'] == 'openai__text_embedding_ada_002'

0 commit comments

Comments
 (0)