Skip to content

Commit 71cc32b

Browse files
authored
🐛 fix: Multiple deepseek-reasoner request errors (#5601)
* Update index.ts * Update index.ts * Update index.test.ts
1 parent 4032658 commit 71cc32b

File tree

2 files changed

+163
-3
lines changed

2 files changed

+163
-3
lines changed

src/libs/agent-runtime/deepseek/index.test.ts

+135
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,15 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
44

55
import {
66
ChatStreamCallbacks,
7+
ChatStreamPayload,
8+
LLMRoleType,
79
LobeOpenAICompatibleRuntime,
810
ModelProvider,
911
} from '@/libs/agent-runtime';
1012

1113
import * as debugStreamModule from '../utils/debugStream';
1214
import { LobeDeepSeekAI } from './index';
15+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
1316

1417
const provider = ModelProvider.DeepSeek;
1518
const defaultBaseURL = 'https://api.deepseek.com/v1';
@@ -22,6 +25,17 @@ vi.spyOn(console, 'error').mockImplementation(() => {});
2225

2326
let instance: LobeOpenAICompatibleRuntime;
2427

28+
const createDeepSeekAIInstance = () => new LobeDeepSeekAI({ apiKey: 'test' });
29+
30+
const mockSuccessfulChatCompletion = () => {
31+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue({
32+
id: 'cmpl-mock',
33+
object: 'chat.completion',
34+
created: Date.now(),
35+
choices: [{ index: 0, message: { role: 'assistant', content: 'Mock response' }, finish_reason: 'stop' }],
36+
} as any);
37+
};
38+
2539
beforeEach(() => {
2640
instance = new LobeDeepSeekAI({ apiKey: 'test' });
2741

@@ -251,5 +265,126 @@ describe('LobeDeepSeekAI', () => {
251265
process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION = originalDebugValue;
252266
});
253267
});
268+
269+
describe('deepseek-reasoner', () => {
270+
beforeEach(() => {
271+
instance = createDeepSeekAIInstance();
272+
mockSuccessfulChatCompletion();
273+
});
274+
275+
it('should insert a user message if the first message is from assistant', async () => {
276+
const payloadMessages = [{ content: 'Hello', role: 'assistant' as LLMRoleType }];
277+
const expectedMessages = [
278+
{ content: '', role: 'user' },
279+
...payloadMessages,
280+
];
281+
282+
const payload: ChatStreamPayload = {
283+
messages: payloadMessages,
284+
model: 'deepseek-reasoner',
285+
temperature: 0,
286+
};
287+
288+
await instance.chat(payload);
289+
290+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
291+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
292+
const actualMessages = actualArgs[0].messages;
293+
expect(actualMessages).toEqual(expectedMessages);
294+
});
295+
296+
it('should insert a user message if the first message is from assistant (with system summary)', async () => {
297+
const payloadMessages = [
298+
{ content: 'System summary', role: 'system' as LLMRoleType },
299+
{ content: 'Hello', role: 'assistant' as LLMRoleType },
300+
];
301+
const expectedMessages = [
302+
{ content: 'System summary', role: 'system' },
303+
{ content: '', role: 'user' },
304+
{ content: 'Hello', role: 'assistant' },
305+
];
306+
307+
const payload: ChatStreamPayload = {
308+
messages: payloadMessages,
309+
model: 'deepseek-reasoner',
310+
temperature: 0,
311+
};
312+
313+
await instance.chat(payload);
314+
315+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
316+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
317+
const actualMessages = actualArgs[0].messages;
318+
expect(actualMessages).toEqual(expectedMessages);
319+
});
320+
321+
it('should insert alternating roles if messages do not alternate', async () => {
322+
const payloadMessages = [
323+
{ content: 'user1', role: 'user' as LLMRoleType },
324+
{ content: 'user2', role: 'user' as LLMRoleType },
325+
{ content: 'assistant1', role: 'assistant' as LLMRoleType },
326+
{ content: 'assistant2', role: 'assistant' as LLMRoleType },
327+
];
328+
const expectedMessages = [
329+
{ content: 'user1', role: 'user' },
330+
{ content: '', role: 'assistant' },
331+
{ content: 'user2', role: 'user' },
332+
{ content: 'assistant1', role: 'assistant' },
333+
{ content: '', role: 'user' },
334+
{ content: 'assistant2', role: 'assistant' },
335+
];
336+
337+
const payload: ChatStreamPayload = {
338+
messages: payloadMessages,
339+
model: 'deepseek-reasoner',
340+
temperature: 0,
341+
};
342+
343+
await instance.chat(payload);
344+
345+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
346+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
347+
const actualMessages = actualArgs[0].messages;
348+
expect(actualMessages).toEqual(expectedMessages);
349+
});
350+
351+
it('complex condition', async () => {
352+
const payloadMessages = [
353+
{ content: 'system', role: 'system' as LLMRoleType },
354+
{ content: 'assistant', role: 'assistant' as LLMRoleType },
355+
{ content: 'user1', role: 'user' as LLMRoleType },
356+
{ content: 'user2', role: 'user' as LLMRoleType },
357+
{ content: 'user3', role: 'user' as LLMRoleType },
358+
{ content: 'assistant1', role: 'assistant' as LLMRoleType },
359+
{ content: 'assistant2', role: 'assistant' as LLMRoleType },
360+
];
361+
const expectedMessages = [
362+
{ content: 'system', role: 'system' },
363+
{ content: '', role: 'user' },
364+
{ content: 'assistant', role: 'assistant' },
365+
{ content: 'user1', role: 'user' },
366+
{ content: '', role: 'assistant' },
367+
{ content: 'user2', role: 'user' },
368+
{ content: '', role: 'assistant' },
369+
{ content: 'user3', role: 'user' },
370+
{ content: 'assistant1', role: 'assistant' },
371+
{ content: '', role: 'user' },
372+
{ content: 'assistant2', role: 'assistant' },
373+
];
374+
375+
const payload: ChatStreamPayload = {
376+
messages: payloadMessages,
377+
model: 'deepseek-reasoner',
378+
temperature: 0,
379+
};
380+
381+
await instance.chat(payload);
382+
383+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
384+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
385+
const actualMessages = actualArgs[0].messages;
386+
expect(actualMessages).toEqual(expectedMessages);
387+
});
388+
});
254389
});
255390
});

src/libs/agent-runtime/deepseek/index.ts

+28-3
Original file line numberDiff line numberDiff line change
@@ -12,24 +12,49 @@ export interface DeepSeekModelCard {
1212
export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
1313
baseURL: 'https://api.deepseek.com/v1',
1414
chatCompletion: {
15-
handlePayload: ({ frequency_penalty, model, presence_penalty, temperature, top_p, ...payload }: ChatStreamPayload) =>
16-
({
15+
handlePayload: ({ frequency_penalty, messages, model, presence_penalty, temperature, top_p, ...payload }: ChatStreamPayload) => {
16+
// github.com/lobehub/lobe-chat/pull/5548
17+
let filteredMessages = messages.filter(message => message.role !== 'system');
18+
19+
if (filteredMessages.length > 0 && filteredMessages[0].role === 'assistant') {
20+
filteredMessages.unshift({ content: "", role: "user" });
21+
}
22+
23+
let lastRole = '';
24+
for (let i = 0; i < filteredMessages.length; i++) {
25+
const message = filteredMessages[i];
26+
if (message.role === lastRole) {
27+
const newRole = lastRole === 'assistant' ? 'user' : 'assistant';
28+
filteredMessages.splice(i, 0, { content: "", role: newRole });
29+
i++;
30+
}
31+
lastRole = message.role;
32+
}
33+
34+
if (messages.length > 0 && messages[0].role === 'system') {
35+
filteredMessages.unshift(messages[0]);
36+
}
37+
38+
return {
1739
...payload,
1840
model,
1941
...(model === 'deepseek-reasoner'
2042
? {
2143
frequency_penalty: undefined,
44+
messages: filteredMessages,
2245
presence_penalty: undefined,
2346
temperature: undefined,
2447
top_p: undefined,
2548
}
2649
: {
2750
frequency_penalty,
51+
messages,
2852
presence_penalty,
2953
temperature,
3054
top_p,
3155
}),
32-
}) as OpenAI.ChatCompletionCreateParamsStreaming,
56+
} as OpenAI.ChatCompletionCreateParamsStreaming;
57+
},
3358
},
3459
debug: {
3560
chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',

0 commit comments

Comments
 (0)