Skip to content

Commit

Permalink
Switch from GPT-3.5 to GPT-4o Mini
Browse files Browse the repository at this point in the history
  • Loading branch information
ariya committed Jul 19, 2024
1 parent b6aca64 commit 4ec4084
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 5 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test-openai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
env:
LLM_API_BASE_URL: 'https://api.openai.com/v1'
LLM_API_KEY: ${{ secrets.OPENAI_API_KEY }}
LLM_CHAT_MODEL: 'gpt-3.5-turbo-0125'
LLM_CHAT_MODEL: 'gpt-4o-mini'
LLM_STREAMING: 'no'

- run: cat output.txt
2 changes: 1 addition & 1 deletion ask-llm.clj
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

(defn chat [messages]
(let [body {:messages messages
:model (or LLM-CHAT-MODEL "gpt-3.5-turbo")
:model (or LLM-CHAT-MODEL "gpt-4o-mini")
:stop ["<|im_end|>" "<|end|>" "<|eot_id|>"]
:max_tokens 200
:temperature 0}
Expand Down
4 changes: 2 additions & 2 deletions ask-llm.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ const LLM_DEBUG = process.env.LLM_DEBUG;
const chat = async (messages, handler) => {
const url = `${LLM_API_BASE_URL}/chat/completions`;
const auth = LLM_API_KEY ? { 'Authorization': `Bearer ${LLM_API_KEY}` } : {};
const model = LLM_CHAT_MODEL || 'gpt-3.5-turbo';
const model = LLM_CHAT_MODEL || 'gpt-4o-mini';
const stop = ['<|im_end|>', '<|end|>', '<|eot_id|>'];
const max_tokens = 200;
const temperature = 0;
Expand Down Expand Up @@ -149,4 +149,4 @@ const SYSTEM_PROMPT = 'Answer the question politely and concisely.';
}

qa();
})();
})();
2 changes: 1 addition & 1 deletion ask-llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ async def chat(messages, handler=None):
if auth_header:
headers["Authorization"] = auth_header

model = LLM_CHAT_MODEL or "gpt-3.5-turbo"
model = LLM_CHAT_MODEL or "gpt-4o-mini"
stop = ["<|im_end|>", "<|end|>", "<|eot_id|>"]
max_tokens = 200
temperature = 0
Expand Down

0 comments on commit 4ec4084

Please sign in to comment.