@lobehub/chat 1.47.22 → 1.48.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/chat.json +4 -0
  4. package/locales/ar/components.json +1 -0
  5. package/locales/ar/models.json +6 -0
  6. package/locales/bg-BG/chat.json +4 -0
  7. package/locales/bg-BG/components.json +1 -0
  8. package/locales/bg-BG/models.json +6 -0
  9. package/locales/de-DE/chat.json +4 -0
  10. package/locales/de-DE/components.json +1 -0
  11. package/locales/de-DE/models.json +6 -0
  12. package/locales/en-US/chat.json +4 -0
  13. package/locales/en-US/components.json +1 -0
  14. package/locales/en-US/models.json +6 -0
  15. package/locales/es-ES/chat.json +4 -0
  16. package/locales/es-ES/components.json +1 -0
  17. package/locales/es-ES/models.json +6 -0
  18. package/locales/fa-IR/chat.json +4 -0
  19. package/locales/fa-IR/components.json +1 -0
  20. package/locales/fa-IR/models.json +6 -0
  21. package/locales/fr-FR/chat.json +4 -0
  22. package/locales/fr-FR/components.json +1 -0
  23. package/locales/fr-FR/models.json +6 -0
  24. package/locales/it-IT/chat.json +4 -0
  25. package/locales/it-IT/components.json +1 -0
  26. package/locales/it-IT/models.json +6 -0
  27. package/locales/ja-JP/chat.json +4 -0
  28. package/locales/ja-JP/components.json +1 -0
  29. package/locales/ja-JP/models.json +6 -0
  30. package/locales/ko-KR/chat.json +4 -0
  31. package/locales/ko-KR/components.json +1 -0
  32. package/locales/ko-KR/models.json +6 -0
  33. package/locales/nl-NL/chat.json +4 -0
  34. package/locales/nl-NL/components.json +1 -0
  35. package/locales/nl-NL/models.json +6 -0
  36. package/locales/pl-PL/chat.json +4 -0
  37. package/locales/pl-PL/components.json +1 -0
  38. package/locales/pl-PL/models.json +6 -0
  39. package/locales/pt-BR/chat.json +4 -0
  40. package/locales/pt-BR/components.json +1 -0
  41. package/locales/pt-BR/models.json +6 -0
  42. package/locales/ru-RU/chat.json +4 -0
  43. package/locales/ru-RU/components.json +1 -0
  44. package/locales/ru-RU/models.json +6 -0
  45. package/locales/tr-TR/chat.json +4 -0
  46. package/locales/tr-TR/components.json +1 -0
  47. package/locales/tr-TR/models.json +6 -0
  48. package/locales/vi-VN/chat.json +4 -0
  49. package/locales/vi-VN/components.json +1 -0
  50. package/locales/vi-VN/models.json +6 -0
  51. package/locales/zh-CN/chat.json +4 -0
  52. package/locales/zh-CN/components.json +1 -0
  53. package/locales/zh-CN/modelProvider.json +2 -2
  54. package/locales/zh-CN/models.json +7 -1
  55. package/locales/zh-TW/chat.json +4 -0
  56. package/locales/zh-TW/components.json +1 -0
  57. package/locales/zh-TW/models.json +6 -0
  58. package/package.json +1 -1
  59. package/src/components/ModelSelect/index.tsx +16 -1
  60. package/src/config/aiModels/deepseek.ts +3 -0
  61. package/src/config/aiModels/hunyuan.ts +132 -12
  62. package/src/config/aiModels/qwen.ts +19 -2
  63. package/src/config/modelProviders/hunyuan.ts +2 -0
  64. package/src/database/client/migrations.json +13 -2
  65. package/src/database/migrations/0014_add_message_reasoning.sql +1 -0
  66. package/src/database/migrations/meta/0014_snapshot.json +3961 -0
  67. package/src/database/migrations/meta/_journal.json +7 -0
  68. package/src/database/schemas/message.ts +2 -3
  69. package/src/database/server/models/__tests__/message.test.ts +5 -4
  70. package/src/database/server/models/message.ts +35 -13
  71. package/src/database/server/models/topic.ts +3 -2
  72. package/src/features/Conversation/Messages/Assistant/Reasoning/index.tsx +123 -0
  73. package/src/features/Conversation/Messages/Assistant/index.tsx +8 -1
  74. package/src/features/Conversation/components/MarkdownElements/LobeThinking/index.ts +2 -2
  75. package/src/libs/agent-runtime/deepseek/index.ts +1 -1
  76. package/src/libs/agent-runtime/google/index.ts +7 -5
  77. package/src/libs/agent-runtime/hunyuan/index.ts +24 -0
  78. package/src/libs/agent-runtime/qwen/index.ts +8 -3
  79. package/src/libs/agent-runtime/stepfun/index.ts +7 -1
  80. package/src/libs/agent-runtime/utils/streams/openai.test.ts +203 -0
  81. package/src/libs/agent-runtime/utils/streams/openai.ts +8 -1
  82. package/src/libs/agent-runtime/utils/streams/protocol.ts +1 -1
  83. package/src/locales/default/chat.ts +4 -0
  84. package/src/locales/default/components.ts +1 -0
  85. package/src/server/routers/lambda/message.ts +4 -2
  86. package/src/services/message/client.test.ts +1 -1
  87. package/src/services/message/type.ts +1 -1
  88. package/src/store/chat/selectors.ts +1 -0
  89. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +60 -14
  90. package/src/store/chat/slices/aiChat/initialState.ts +5 -0
  91. package/src/store/chat/slices/aiChat/selectors.ts +9 -0
  92. package/src/store/chat/slices/message/action.ts +4 -1
  93. package/src/types/aiModel.ts +5 -14
  94. package/src/types/message/base.ts +59 -0
  95. package/src/types/message/chat.ts +136 -0
  96. package/src/types/message/index.ts +2 -135
  97. package/src/utils/fetch/__tests__/fetchSSE.test.ts +34 -0
  98. package/src/utils/fetch/fetchSSE.ts +38 -3
@@ -552,4 +552,207 @@ describe('OpenAIStream', () => {
552
552
  expect(onToolCallMock).toHaveBeenCalledTimes(2);
553
553
  });
554
554
  });
555
+
556
+ describe('Reasoning', () => {
557
+ it('should handle reasoning event', async () => {
558
+ const data = [
559
+ {
560
+ id: '1',
561
+ object: 'chat.completion.chunk',
562
+ created: 1737563070,
563
+ model: 'deepseek-reasoner',
564
+ system_fingerprint: 'fp_1c5d8833bc',
565
+ choices: [
566
+ {
567
+ index: 0,
568
+ delta: { role: 'assistant', content: null, reasoning_content: '' },
569
+ logprobs: null,
570
+ finish_reason: null,
571
+ },
572
+ ],
573
+ },
574
+ {
575
+ id: '1',
576
+ object: 'chat.completion.chunk',
577
+ created: 1737563070,
578
+ model: 'deepseek-reasoner',
579
+ system_fingerprint: 'fp_1c5d8833bc',
580
+ choices: [
581
+ {
582
+ index: 0,
583
+ delta: { content: null, reasoning_content: '您好' },
584
+ logprobs: null,
585
+ finish_reason: null,
586
+ },
587
+ ],
588
+ },
589
+ {
590
+ id: '1',
591
+ object: 'chat.completion.chunk',
592
+ created: 1737563070,
593
+ model: 'deepseek-reasoner',
594
+ system_fingerprint: 'fp_1c5d8833bc',
595
+ choices: [
596
+ {
597
+ index: 0,
598
+ delta: { content: null, reasoning_content: '!' },
599
+ logprobs: null,
600
+ finish_reason: null,
601
+ },
602
+ ],
603
+ },
604
+ {
605
+ id: '1',
606
+ object: 'chat.completion.chunk',
607
+ created: 1737563070,
608
+ model: 'deepseek-reasoner',
609
+ system_fingerprint: 'fp_1c5d8833bc',
610
+ choices: [
611
+ {
612
+ index: 0,
613
+ delta: { content: '你好', reasoning_content: null },
614
+ logprobs: null,
615
+ finish_reason: null,
616
+ },
617
+ ],
618
+ },
619
+ {
620
+ id: '1',
621
+ object: 'chat.completion.chunk',
622
+ created: 1737563070,
623
+ model: 'deepseek-reasoner',
624
+ system_fingerprint: 'fp_1c5d8833bc',
625
+ choices: [
626
+ {
627
+ index: 0,
628
+ delta: { content: '很高兴', reasoning_cont: null },
629
+ logprobs: null,
630
+ finish_reason: null,
631
+ },
632
+ ],
633
+ },
634
+ {
635
+ id: '1',
636
+ object: 'chat.completion.chunk',
637
+ created: 1737563070,
638
+ model: 'deepseek-reasoner',
639
+ system_fingerprint: 'fp_1c5d8833bc',
640
+ choices: [
641
+ {
642
+ index: 0,
643
+ delta: { content: '为您', reasoning_content: null },
644
+ logprobs: null,
645
+ finish_reason: null,
646
+ },
647
+ ],
648
+ },
649
+ {
650
+ id: '1',
651
+ object: 'chat.completion.chunk',
652
+ created: 1737563070,
653
+ model: 'deepseek-reasoner',
654
+ system_fingerprint: 'fp_1c5d8833bc',
655
+ choices: [
656
+ {
657
+ index: 0,
658
+ delta: { content: '提供', reasoning_content: null },
659
+ logprobs: null,
660
+ finish_reason: null,
661
+ },
662
+ ],
663
+ },
664
+ {
665
+ id: '1',
666
+ object: 'chat.completion.chunk',
667
+ created: 1737563070,
668
+ model: 'deepseek-reasoner',
669
+ system_fingerprint: 'fp_1c5d8833bc',
670
+ choices: [
671
+ {
672
+ index: 0,
673
+ delta: { content: '帮助。', reasoning_content: null },
674
+ logprobs: null,
675
+ finish_reason: null,
676
+ },
677
+ ],
678
+ },
679
+ {
680
+ id: '1',
681
+ object: 'chat.completion.chunk',
682
+ created: 1737563070,
683
+ model: 'deepseek-reasoner',
684
+ system_fingerprint: 'fp_1c5d8833bc',
685
+ choices: [
686
+ {
687
+ index: 0,
688
+ delta: { content: '', reasoning_content: null },
689
+ logprobs: null,
690
+ finish_reason: 'stop',
691
+ },
692
+ ],
693
+ usage: {
694
+ prompt_tokens: 6,
695
+ completion_tokens: 104,
696
+ total_tokens: 110,
697
+ prompt_tokens_details: { cached_tokens: 0 },
698
+ completion_tokens_details: { reasoning_tokens: 70 },
699
+ prompt_cache_hit_tokens: 0,
700
+ prompt_cache_miss_tokens: 6,
701
+ },
702
+ },
703
+ ];
704
+
705
+ const mockOpenAIStream = new ReadableStream({
706
+ start(controller) {
707
+ data.forEach((chunk) => {
708
+ controller.enqueue(chunk);
709
+ });
710
+
711
+ controller.close();
712
+ },
713
+ });
714
+
715
+ const protocolStream = OpenAIStream(mockOpenAIStream);
716
+
717
+ const decoder = new TextDecoder();
718
+ const chunks = [];
719
+
720
+ // @ts-ignore
721
+ for await (const chunk of protocolStream) {
722
+ chunks.push(decoder.decode(chunk, { stream: true }));
723
+ }
724
+
725
+ expect(chunks).toEqual(
726
+ [
727
+ 'id: 1',
728
+ 'event: reasoning',
729
+ `data: ""\n`,
730
+ 'id: 1',
731
+ 'event: reasoning',
732
+ `data: "您好"\n`,
733
+ 'id: 1',
734
+ 'event: reasoning',
735
+ `data: "!"\n`,
736
+ 'id: 1',
737
+ 'event: text',
738
+ `data: "你好"\n`,
739
+ 'id: 1',
740
+ 'event: text',
741
+ `data: "很高兴"\n`,
742
+ 'id: 1',
743
+ 'event: text',
744
+ `data: "为您"\n`,
745
+ 'id: 1',
746
+ 'event: text',
747
+ `data: "提供"\n`,
748
+ 'id: 1',
749
+ 'event: text',
750
+ `data: "帮助。"\n`,
751
+ 'id: 1',
752
+ 'event: stop',
753
+ `data: "stop"\n`,
754
+ ].map((i) => `${i}\n`),
755
+ );
756
+ });
757
+ });
555
758
  });
@@ -45,6 +45,7 @@ export const transformOpenAIStream = (
45
45
  return { data: chunk, id: chunk.id, type: 'data' };
46
46
  }
47
47
 
48
+ // tools calling
48
49
  if (typeof item.delta?.tool_calls === 'object' && item.delta.tool_calls?.length > 0) {
49
50
  return {
50
51
  data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
@@ -91,7 +92,13 @@ export const transformOpenAIStream = (
91
92
  return { data: item.delta.content, id: chunk.id, type: 'text' };
92
93
  }
93
94
 
94
- if (item.delta?.content === null) {
95
+ // 无内容情况
96
+ if (item.delta && item.delta.content === null) {
97
+ // deepseek reasoner 会将 thinking 放在 reasoning_content 字段中
98
+ if ('reasoning_content' in item.delta && typeof item.delta.reasoning_content === 'string') {
99
+ return { data: item.delta.reasoning_content, id: chunk.id, type: 'reasoning' };
100
+ }
101
+
95
102
  return { data: item.delta, id: chunk.id, type: 'data' };
96
103
  }
97
104
 
@@ -15,7 +15,7 @@ export interface StreamStack {
15
15
  export interface StreamProtocolChunk {
16
16
  data: any;
17
17
  id?: string;
18
- type: 'text' | 'tool_calls' | 'data' | 'stop' | 'error';
18
+ type: 'text' | 'tool_calls' | 'data' | 'stop' | 'error' | 'reasoning';
19
19
  }
20
20
 
21
21
  export interface StreamToolCallChunkData {
@@ -84,6 +84,10 @@ export default {
84
84
  },
85
85
  },
86
86
  },
87
+ reasoning: {
88
+ thinking: '深度思考中',
89
+ thought: '已深度思考(用时 {{duration}} 秒)',
90
+ },
87
91
  regenerate: '重新生成',
88
92
  roleAndArchive: '角色与记录',
89
93
  searchAgentPlaceholder: '搜索助手...',
@@ -78,6 +78,7 @@ export default {
78
78
  custom: '自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性',
79
79
  file: '该模型支持上传文件读取与识别',
80
80
  functionCall: '该模型支持函数调用(Function Call)',
81
+ reasoning: '该模型支持深度思考',
81
82
  tokens: '该模型单个会话最多支持 {{tokens}} Tokens',
82
83
  vision: '该模型支持视觉识别',
83
84
  },
@@ -63,10 +63,12 @@ export const messageRouter = router({
63
63
  return data.id;
64
64
  }),
65
65
 
66
+ // TODO: it will be removed in V2
66
67
  getAllMessages: messageProcedure.query(async ({ ctx }): Promise<ChatMessageList> => {
67
- return ctx.messageModel.queryAll();
68
+ return ctx.messageModel.queryAll() as any;
68
69
  }),
69
70
 
71
+ // TODO: it will be removed in V2
70
72
  getAllMessagesInSession: messageProcedure
71
73
  .input(
72
74
  z.object({
@@ -74,7 +76,7 @@ export const messageRouter = router({
74
76
  }),
75
77
  )
76
78
  .query(async ({ ctx, input }): Promise<ChatMessageList> => {
77
- return ctx.messageModel.queryBySessionId(input.sessionId);
79
+ return ctx.messageModel.queryBySessionId(input.sessionId) as any;
78
80
  }),
79
81
 
80
82
  getHeatmaps: messageProcedure.query(async ({ ctx }) => {
@@ -5,7 +5,6 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
5
  import { MessageModel } from '@/database/_deprecated/models/message';
6
6
  import { clientDB, initializeDB } from '@/database/client/db';
7
7
  import {
8
- MessageItem,
9
8
  files,
10
9
  messagePlugins,
11
10
  messageTTS,
@@ -21,6 +20,7 @@ import {
21
20
  ChatTTS,
22
21
  ChatTranslate,
23
22
  CreateMessageParams,
23
+ MessageItem,
24
24
  } from '@/types/message';
25
25
 
26
26
  import { ClientService } from './client';
@@ -1,12 +1,12 @@
1
1
  import type { HeatmapsProps } from '@lobehub/charts';
2
2
 
3
- import { MessageItem } from '@/database/schemas';
4
3
  import {
5
4
  ChatMessage,
6
5
  ChatMessageError,
7
6
  ChatTTS,
8
7
  ChatTranslate,
9
8
  CreateMessageParams,
9
+ MessageItem,
10
10
  ModelRankItem,
11
11
  } from '@/types/message';
12
12
 
@@ -1,3 +1,4 @@
1
+ export { aiChatSelectors } from './slices/aiChat/selectors';
1
2
  export { chatToolSelectors } from './slices/builtinTool/selectors';
2
3
  export { chatSelectors } from './slices/message/selectors';
3
4
  export * from './slices/portal/selectors';
@@ -72,7 +72,7 @@ export interface AIGenerateAction {
72
72
  */
73
73
  internal_fetchAIChatMessage: (
74
74
  messages: ChatMessage[],
75
- assistantMessageId: string,
75
+ messageId: string,
76
76
  params?: ProcessMessageParams,
77
77
  ) => Promise<{
78
78
  isFunctionCall: boolean;
@@ -102,6 +102,14 @@ export interface AIGenerateAction {
102
102
  * Controls the streaming state of tool calling processes, updating the UI accordingly
103
103
  */
104
104
  internal_toggleToolCallingStreaming: (id: string, streaming: boolean[] | undefined) => void;
105
+ /**
106
+ * Toggles the loading state for AI message reasoning, managing the UI feedback
107
+ */
108
+ internal_toggleChatReasoning: (
109
+ loading: boolean,
110
+ id?: string,
111
+ action?: string,
112
+ ) => AbortController | undefined;
105
113
  }
106
114
 
107
115
  export const generateAIChat: StateCreator<
@@ -357,19 +365,20 @@ export const generateAIChat: StateCreator<
357
365
  await get().internal_summaryHistory(historyMessages);
358
366
  }
359
367
  },
360
- internal_fetchAIChatMessage: async (messages, assistantId, params) => {
368
+ internal_fetchAIChatMessage: async (messages, messageId, params) => {
361
369
  const {
362
370
  internal_toggleChatLoading,
363
371
  refreshMessages,
364
372
  internal_updateMessageContent,
365
373
  internal_dispatchMessage,
366
374
  internal_toggleToolCallingStreaming,
375
+ internal_toggleChatReasoning,
367
376
  } = get();
368
377
 
369
378
  const abortController = internal_toggleChatLoading(
370
379
  true,
371
- assistantId,
372
- n('generateMessage(start)', { assistantId, messages }) as string,
380
+ messageId,
381
+ n('generateMessage(start)', { messageId, messages }) as string,
373
382
  );
374
383
 
375
384
  const agentConfig = getAgentConfig();
@@ -414,6 +423,9 @@ export const generateAIChat: StateCreator<
414
423
  let isFunctionCall = false;
415
424
  let msgTraceId: string | undefined;
416
425
  let output = '';
426
+ let thinking = '';
427
+ let thinkingStartAt: number;
428
+ let duration: number;
417
429
 
418
430
  const historySummary = topicSelectors.currentActiveTopicSummary(get());
419
431
  await chatService.createAssistantMessageStream({
@@ -434,43 +446,74 @@ export const generateAIChat: StateCreator<
434
446
  },
435
447
  isWelcomeQuestion: params?.isWelcomeQuestion,
436
448
  onErrorHandle: async (error) => {
437
- await messageService.updateMessageError(assistantId, error);
449
+ await messageService.updateMessageError(messageId, error);
438
450
  await refreshMessages();
439
451
  },
440
- onFinish: async (content, { traceId, observationId, toolCalls }) => {
452
+ onFinish: async (content, { traceId, observationId, toolCalls, reasoning }) => {
441
453
  // if there is traceId, update it
442
454
  if (traceId) {
443
455
  msgTraceId = traceId;
444
- await messageService.updateMessage(assistantId, {
456
+ await messageService.updateMessage(messageId, {
445
457
  traceId,
446
458
  observationId: observationId ?? undefined,
447
459
  });
448
460
  }
449
461
 
450
462
  if (toolCalls && toolCalls.length > 0) {
451
- internal_toggleToolCallingStreaming(assistantId, undefined);
463
+ internal_toggleToolCallingStreaming(messageId, undefined);
452
464
  }
453
465
 
454
466
  // update the content after fetch result
455
- await internal_updateMessageContent(assistantId, content, toolCalls);
467
+ await internal_updateMessageContent(
468
+ messageId,
469
+ content,
470
+ toolCalls,
471
+ !!reasoning ? { content: reasoning, duration } : undefined,
472
+ );
456
473
  },
457
474
  onMessageHandle: async (chunk) => {
458
475
  switch (chunk.type) {
459
476
  case 'text': {
460
477
  output += chunk.text;
478
+
479
+ // if there is no duration, it means the end of reasoning
480
+ if (!duration) {
481
+ duration = Date.now() - thinkingStartAt;
482
+ internal_toggleChatReasoning(false, messageId, n('generateMessage(end)') as string);
483
+ }
484
+
461
485
  internal_dispatchMessage({
462
- id: assistantId,
486
+ id: messageId,
463
487
  type: 'updateMessage',
464
- value: { content: output },
488
+ value: {
489
+ content: output,
490
+ reasoning: !!thinking ? { content: thinking, duration } : undefined,
491
+ },
492
+ });
493
+ break;
494
+ }
495
+ case 'reasoning': {
496
+ // if there is no thinkingStartAt, it means the start of reasoning
497
+ if (!thinkingStartAt) {
498
+ thinkingStartAt = Date.now();
499
+ internal_toggleChatReasoning(true, messageId, n('generateMessage(end)') as string);
500
+ }
501
+
502
+ thinking += chunk.text;
503
+
504
+ internal_dispatchMessage({
505
+ id: messageId,
506
+ type: 'updateMessage',
507
+ value: { reasoning: { content: thinking } },
465
508
  });
466
509
  break;
467
510
  }
468
511
 
469
512
  // is this message is just a tool call
470
513
  case 'tool_calls': {
471
- internal_toggleToolCallingStreaming(assistantId, chunk.isAnimationActives);
514
+ internal_toggleToolCallingStreaming(messageId, chunk.isAnimationActives);
472
515
  internal_dispatchMessage({
473
- id: assistantId,
516
+ id: messageId,
474
517
  type: 'updateMessage',
475
518
  value: { tools: get().internal_transformToolCalls(chunk.tool_calls) },
476
519
  });
@@ -480,7 +523,7 @@ export const generateAIChat: StateCreator<
480
523
  },
481
524
  });
482
525
 
483
- internal_toggleChatLoading(false, assistantId, n('generateMessage(end)') as string);
526
+ internal_toggleChatLoading(false, messageId, n('generateMessage(end)') as string);
484
527
 
485
528
  return {
486
529
  isFunctionCall,
@@ -540,6 +583,9 @@ export const generateAIChat: StateCreator<
540
583
  internal_toggleChatLoading: (loading, id, action) => {
541
584
  return get().internal_toggleLoadingArrays('chatLoadingIds', loading, id, action);
542
585
  },
586
+ internal_toggleChatReasoning: (loading, id, action) => {
587
+ return get().internal_toggleLoadingArrays('reasoningLoadingIds', loading, id, action);
588
+ },
543
589
  internal_toggleToolCallingStreaming: (id, streaming) => {
544
590
  set(
545
591
  {
@@ -11,6 +11,10 @@ export interface ChatAIChatState {
11
11
  */
12
12
  messageRAGLoadingIds: string[];
13
13
  pluginApiLoadingIds: string[];
14
+ /**
15
+ * is the AI message is reasoning
16
+ */
17
+ reasoningLoadingIds: string[];
14
18
  /**
15
19
  * the tool calling stream ids
16
20
  */
@@ -23,5 +27,6 @@ export const initialAiChatState: ChatAIChatState = {
23
27
  inputMessage: '',
24
28
  messageRAGLoadingIds: [],
25
29
  pluginApiLoadingIds: [],
30
+ reasoningLoadingIds: [],
26
31
  toolCallingStreamIds: {},
27
32
  };
@@ -0,0 +1,9 @@
1
+
2
+ import type { ChatStoreState } from '../../initialState';
3
+
4
+ const isMessageInReasoning = (id: string) => (s: ChatStoreState) =>
5
+ s.reasoningLoadingIds.includes(id);
6
+
7
+ export const aiChatSelectors = {
8
+ isMessageInReasoning,
9
+ };
@@ -17,6 +17,7 @@ import {
17
17
  ChatMessageError,
18
18
  CreateMessageParams,
19
19
  MessageToolCall,
20
+ ModelReasoning,
20
21
  } from '@/types/message';
21
22
  import { TraceEventPayloads } from '@/types/trace';
22
23
  import { setNamespace } from '@/utils/storeDebug';
@@ -73,6 +74,7 @@ export interface ChatMessageAction {
73
74
  id: string,
74
75
  content: string,
75
76
  toolCalls?: MessageToolCall[],
77
+ reasoning?: ModelReasoning,
76
78
  ) => Promise<void>;
77
79
  /**
78
80
  * update the message error with optimistic update
@@ -270,7 +272,7 @@ export const chatMessage: StateCreator<
270
272
  await messageService.updateMessage(id, { error });
271
273
  await get().refreshMessages();
272
274
  },
273
- internal_updateMessageContent: async (id, content, toolCalls) => {
275
+ internal_updateMessageContent: async (id, content, toolCalls, reasoning) => {
274
276
  const { internal_dispatchMessage, refreshMessages, internal_transformToolCalls } = get();
275
277
 
276
278
  // Due to the async update method and refresh need about 100ms
@@ -289,6 +291,7 @@ export const chatMessage: StateCreator<
289
291
  await messageService.updateMessage(id, {
290
292
  content,
291
293
  tools: toolCalls ? internal_transformToolCalls(toolCalls) : undefined,
294
+ reasoning,
292
295
  });
293
296
  await refreshMessages();
294
297
  },
@@ -30,6 +30,10 @@ export interface ModelAbilities {
30
30
  * whether model supports function call
31
31
  */
32
32
  functionCall?: boolean;
33
+ /**
34
+ * whether model supports reasoning
35
+ */
36
+ reasoning?: boolean;
33
37
  /**
34
38
  * whether model supports vision
35
39
  */
@@ -126,20 +130,7 @@ export interface AiModelConfig {
126
130
  }
127
131
 
128
132
  export interface AIChatModelCard extends AIBaseModelCard {
129
- abilities?: {
130
- /**
131
- * whether model supports file upload
132
- */
133
- files?: boolean;
134
- /**
135
- * whether model supports function call
136
- */
137
- functionCall?: boolean;
138
- /**
139
- * whether model supports vision
140
- */
141
- vision?: boolean;
142
- };
133
+ abilities?: ModelAbilities;
143
134
  config?: AiModelConfig;
144
135
  maxOutput?: number;
145
136
  pricing?: ChatModelPricing;
@@ -0,0 +1,59 @@
1
+ export interface ModelReasoning {
2
+ content?: string;
3
+ duration?: number;
4
+ }
5
+
6
+ export type MessageRoleType = 'user' | 'system' | 'assistant' | 'tool';
7
+
8
+ export interface MessageItem {
9
+ agentId: string | null;
10
+ clientId: string | null;
11
+ content: string | null;
12
+ createdAt: Date;
13
+ error: any | null;
14
+ favorite: boolean | null;
15
+ id: string;
16
+ model: string | null;
17
+ observationId: string | null;
18
+ parentId: string | null;
19
+ provider: string | null;
20
+ quotaId: string | null;
21
+ reasoning: ModelReasoning | null;
22
+ role: string;
23
+ sessionId: string | null;
24
+ threadId: string | null;
25
+ // jsonb type
26
+ tools: any | null;
27
+ topicId: string | null;
28
+ // jsonb type
29
+ traceId: string | null;
30
+ updatedAt: Date;
31
+ userId: string;
32
+ }
33
+
34
+ export interface NewMessage {
35
+ agentId?: string | null;
36
+ clientId?: string | null;
37
+ content?: string | null;
38
+ createdAt?: Date;
39
+ // optional because it has a default value
40
+ error?: any | null;
41
+ favorite?: boolean;
42
+ id?: string;
43
+ model?: string | null;
44
+ observationId?: string | null;
45
+ parentId?: string | null;
46
+ provider?: string | null;
47
+ quotaId?: string | null;
48
+ // optional because it has a default function
49
+ role: 'user' | 'system' | 'assistant' | 'tool';
50
+ // required because it's notNull
51
+ sessionId?: string | null;
52
+ threadId?: string | null;
53
+ tools?: any | null;
54
+ topicId?: string | null;
55
+ traceId?: string | null;
56
+ // optional because it's generated
57
+ updatedAt?: Date;
58
+ userId: string; // optional because it's generated
59
+ }