@@ -4,12 +4,15 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
4
5
5
import {
6
6
ChatStreamCallbacks ,
7
+ ChatStreamPayload ,
8
+ LLMRoleType ,
7
9
LobeOpenAICompatibleRuntime ,
8
10
ModelProvider ,
9
11
} from '@/libs/agent-runtime' ;
10
12
11
13
import * as debugStreamModule from '../utils/debugStream' ;
12
14
import { LobeDeepSeekAI } from './index' ;
15
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory' ;
13
16
14
17
const provider = ModelProvider . DeepSeek ;
15
18
const defaultBaseURL = 'https://api.deepseek.com/v1' ;
@@ -22,6 +25,17 @@ vi.spyOn(console, 'error').mockImplementation(() => {});
22
25
23
26
let instance : LobeOpenAICompatibleRuntime ;
24
27
28
+ const createDeepSeekAIInstance = ( ) => new LobeDeepSeekAI ( { apiKey : 'test' } ) ;
29
+
30
+ const mockSuccessfulChatCompletion = ( ) => {
31
+ vi . spyOn ( instance [ 'client' ] . chat . completions , 'create' ) . mockResolvedValue ( {
32
+ id : 'cmpl-mock' ,
33
+ object : 'chat.completion' ,
34
+ created : Date . now ( ) ,
35
+ choices : [ { index : 0 , message : { role : 'assistant' , content : 'Mock response' } , finish_reason : 'stop' } ] ,
36
+ } as any ) ;
37
+ } ;
38
+
25
39
beforeEach ( ( ) => {
26
40
instance = new LobeDeepSeekAI ( { apiKey : 'test' } ) ;
27
41
@@ -251,5 +265,126 @@ describe('LobeDeepSeekAI', () => {
251
265
process . env . DEBUG_DEEPSEEK_CHAT_COMPLETION = originalDebugValue ;
252
266
} ) ;
253
267
} ) ;
268
+
269
+ describe ( 'deepseek-reasoner' , ( ) => {
270
+ beforeEach ( ( ) => {
271
+ instance = createDeepSeekAIInstance ( ) ;
272
+ mockSuccessfulChatCompletion ( ) ;
273
+ } ) ;
274
+
275
+ it ( 'should insert a user message if the first message is from assistant' , async ( ) => {
276
+ const payloadMessages = [ { content : 'Hello' , role : 'assistant' as LLMRoleType } ] ;
277
+ const expectedMessages = [
278
+ { content : '' , role : 'user' } ,
279
+ ...payloadMessages ,
280
+ ] ;
281
+
282
+ const payload : ChatStreamPayload = {
283
+ messages : payloadMessages ,
284
+ model : 'deepseek-reasoner' ,
285
+ temperature : 0 ,
286
+ } ;
287
+
288
+ await instance . chat ( payload ) ;
289
+
290
+ expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalled ( ) ;
291
+ const actualArgs = ( instance [ 'client' ] . chat . completions . create as Mock ) . mock . calls [ 0 ] ;
292
+ const actualMessages = actualArgs [ 0 ] . messages ;
293
+ expect ( actualMessages ) . toEqual ( expectedMessages ) ;
294
+ } ) ;
295
+
296
+ it ( 'should insert a user message if the first message is from assistant (with system summary)' , async ( ) => {
297
+ const payloadMessages = [
298
+ { content : 'System summary' , role : 'system' as LLMRoleType } ,
299
+ { content : 'Hello' , role : 'assistant' as LLMRoleType } ,
300
+ ] ;
301
+ const expectedMessages = [
302
+ { content : 'System summary' , role : 'system' } ,
303
+ { content : '' , role : 'user' } ,
304
+ { content : 'Hello' , role : 'assistant' } ,
305
+ ] ;
306
+
307
+ const payload : ChatStreamPayload = {
308
+ messages : payloadMessages ,
309
+ model : 'deepseek-reasoner' ,
310
+ temperature : 0 ,
311
+ } ;
312
+
313
+ await instance . chat ( payload ) ;
314
+
315
+ expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalled ( ) ;
316
+ const actualArgs = ( instance [ 'client' ] . chat . completions . create as Mock ) . mock . calls [ 0 ] ;
317
+ const actualMessages = actualArgs [ 0 ] . messages ;
318
+ expect ( actualMessages ) . toEqual ( expectedMessages ) ;
319
+ } ) ;
320
+
321
+ it ( 'should insert alternating roles if messages do not alternate' , async ( ) => {
322
+ const payloadMessages = [
323
+ { content : 'user1' , role : 'user' as LLMRoleType } ,
324
+ { content : 'user2' , role : 'user' as LLMRoleType } ,
325
+ { content : 'assistant1' , role : 'assistant' as LLMRoleType } ,
326
+ { content : 'assistant2' , role : 'assistant' as LLMRoleType } ,
327
+ ] ;
328
+ const expectedMessages = [
329
+ { content : 'user1' , role : 'user' } ,
330
+ { content : '' , role : 'assistant' } ,
331
+ { content : 'user2' , role : 'user' } ,
332
+ { content : 'assistant1' , role : 'assistant' } ,
333
+ { content : '' , role : 'user' } ,
334
+ { content : 'assistant2' , role : 'assistant' } ,
335
+ ] ;
336
+
337
+ const payload : ChatStreamPayload = {
338
+ messages : payloadMessages ,
339
+ model : 'deepseek-reasoner' ,
340
+ temperature : 0 ,
341
+ } ;
342
+
343
+ await instance . chat ( payload ) ;
344
+
345
+ expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalled ( ) ;
346
+ const actualArgs = ( instance [ 'client' ] . chat . completions . create as Mock ) . mock . calls [ 0 ] ;
347
+ const actualMessages = actualArgs [ 0 ] . messages ;
348
+ expect ( actualMessages ) . toEqual ( expectedMessages ) ;
349
+ } ) ;
350
+
351
+ it ( 'complex condition' , async ( ) => {
352
+ const payloadMessages = [
353
+ { content : 'system' , role : 'system' as LLMRoleType } ,
354
+ { content : 'assistant' , role : 'assistant' as LLMRoleType } ,
355
+ { content : 'user1' , role : 'user' as LLMRoleType } ,
356
+ { content : 'user2' , role : 'user' as LLMRoleType } ,
357
+ { content : 'user3' , role : 'user' as LLMRoleType } ,
358
+ { content : 'assistant1' , role : 'assistant' as LLMRoleType } ,
359
+ { content : 'assistant2' , role : 'assistant' as LLMRoleType } ,
360
+ ] ;
361
+ const expectedMessages = [
362
+ { content : 'system' , role : 'system' } ,
363
+ { content : '' , role : 'user' } ,
364
+ { content : 'assistant' , role : 'assistant' } ,
365
+ { content : 'user1' , role : 'user' } ,
366
+ { content : '' , role : 'assistant' } ,
367
+ { content : 'user2' , role : 'user' } ,
368
+ { content : '' , role : 'assistant' } ,
369
+ { content : 'user3' , role : 'user' } ,
370
+ { content : 'assistant1' , role : 'assistant' } ,
371
+ { content : '' , role : 'user' } ,
372
+ { content : 'assistant2' , role : 'assistant' } ,
373
+ ] ;
374
+
375
+ const payload : ChatStreamPayload = {
376
+ messages : payloadMessages ,
377
+ model : 'deepseek-reasoner' ,
378
+ temperature : 0 ,
379
+ } ;
380
+
381
+ await instance . chat ( payload ) ;
382
+
383
+ expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalled ( ) ;
384
+ const actualArgs = ( instance [ 'client' ] . chat . completions . create as Mock ) . mock . calls [ 0 ] ;
385
+ const actualMessages = actualArgs [ 0 ] . messages ;
386
+ expect ( actualMessages ) . toEqual ( expectedMessages ) ;
387
+ } ) ;
388
+ } ) ;
254
389
} ) ;
255
390
} ) ;
0 commit comments