@lobehub/chat 1.64.3 → 1.65.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/README.md +1 -1
  3. package/changelog/v1.json +21 -0
  4. package/locales/ar/chat.json +7 -1
  5. package/locales/ar/models.json +6 -9
  6. package/locales/bg-BG/chat.json +7 -1
  7. package/locales/bg-BG/models.json +6 -9
  8. package/locales/de-DE/chat.json +7 -1
  9. package/locales/de-DE/models.json +6 -9
  10. package/locales/en-US/chat.json +7 -1
  11. package/locales/en-US/models.json +6 -9
  12. package/locales/es-ES/chat.json +8 -2
  13. package/locales/es-ES/models.json +6 -9
  14. package/locales/fa-IR/chat.json +7 -1
  15. package/locales/fa-IR/models.json +6 -3
  16. package/locales/fr-FR/chat.json +7 -1
  17. package/locales/fr-FR/models.json +6 -9
  18. package/locales/it-IT/chat.json +7 -1
  19. package/locales/it-IT/models.json +6 -9
  20. package/locales/ja-JP/chat.json +7 -1
  21. package/locales/ja-JP/models.json +6 -9
  22. package/locales/ko-KR/chat.json +7 -1
  23. package/locales/ko-KR/models.json +6 -9
  24. package/locales/nl-NL/chat.json +8 -2
  25. package/locales/nl-NL/models.json +6 -9
  26. package/locales/pl-PL/chat.json +7 -1
  27. package/locales/pl-PL/models.json +6 -9
  28. package/locales/pt-BR/chat.json +7 -1
  29. package/locales/pt-BR/models.json +6 -9
  30. package/locales/ru-RU/chat.json +8 -2
  31. package/locales/ru-RU/models.json +6 -9
  32. package/locales/tr-TR/chat.json +7 -1
  33. package/locales/tr-TR/models.json +6 -9
  34. package/locales/vi-VN/chat.json +7 -1
  35. package/locales/vi-VN/models.json +6 -9
  36. package/locales/zh-CN/chat.json +7 -1
  37. package/locales/zh-CN/models.json +6 -9
  38. package/locales/zh-TW/chat.json +7 -1
  39. package/locales/zh-TW/models.json +6 -9
  40. package/package.json +2 -2
  41. package/src/app/(backend)/middleware/auth/index.ts +6 -0
  42. package/src/config/aiModels/anthropic.ts +5 -2
  43. package/src/config/aiModels/google.ts +7 -0
  44. package/src/const/message.ts +3 -0
  45. package/src/const/settings/agent.ts +2 -0
  46. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +38 -13
  47. package/src/features/ChatInput/ActionBar/Model/ReasoningTokenSlider.tsx +92 -0
  48. package/src/features/ChatInput/ActionBar/Model/index.tsx +13 -18
  49. package/src/libs/agent-runtime/anthropic/index.ts +32 -14
  50. package/src/libs/agent-runtime/google/index.test.ts +8 -0
  51. package/src/libs/agent-runtime/google/index.ts +18 -5
  52. package/src/libs/agent-runtime/types/chat.ts +16 -2
  53. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +113 -0
  54. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +7 -4
  55. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +371 -0
  56. package/src/libs/agent-runtime/utils/streams/anthropic.ts +80 -30
  57. package/src/libs/agent-runtime/utils/streams/openai.test.ts +181 -0
  58. package/src/libs/agent-runtime/utils/streams/openai.ts +40 -30
  59. package/src/libs/agent-runtime/utils/streams/protocol.ts +8 -0
  60. package/src/locales/default/chat.ts +7 -1
  61. package/src/services/__tests__/chat.test.ts +89 -50
  62. package/src/services/chat.ts +39 -1
  63. package/src/store/agent/slices/chat/__snapshots__/selectors.test.ts.snap +2 -0
  64. package/src/store/aiInfra/slices/aiModel/selectors.ts +6 -6
  65. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +1 -1
  66. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +2 -0
  67. package/src/types/agent/index.ts +23 -9
  68. package/src/types/aiModel.ts +3 -8
  69. package/src/types/message/base.ts +1 -0
  70. package/src/utils/fetch/__tests__/fetchSSE.test.ts +113 -10
  71. package/src/utils/fetch/fetchSSE.ts +12 -3
  72. package/src/features/ChatInput/ActionBar/Model/ExtendControls.tsx +0 -40
@@ -1576,5 +1576,186 @@ describe('OpenAIStream', () => {
1576
1576
  ].map((i) => `${i}\n`),
1577
1577
  );
1578
1578
  });
1579
+
1580
+ it('should handle claude reasoning in litellm openai mode', async () => {
1581
+ const data = [
1582
+ {
1583
+ id: '1',
1584
+ created: 1740505568,
1585
+ model: 'claude-3-7-sonnet-latest',
1586
+ object: 'chat.completion.chunk',
1587
+ choices: [
1588
+ {
1589
+ index: 0,
1590
+ delta: {
1591
+ thinking_blocks: [
1592
+ { type: 'thinking', thinking: '我需要找94的所有质', ignature_delta: null },
1593
+ ],
1594
+ reasoning_content: '我需要找出394的所有质',
1595
+ content: '',
1596
+ role: 'assistant',
1597
+ },
1598
+ },
1599
+ ],
1600
+ thinking_blocks: [
1601
+ { type: 'thinking', thinking: '我需要找94的所有质', signature_delta: null },
1602
+ ],
1603
+ },
1604
+ {
1605
+ id: '1',
1606
+ created: 1740505569,
1607
+ model: 'claude-3-7-sonnet-latest',
1608
+ object: 'chat.completion.chunk',
1609
+ choices: [
1610
+ {
1611
+ index: 0,
1612
+ delta: {
1613
+ thinking_blocks: [
1614
+ { type: 'thinking', thinking: '因数。\n质因数是', signature_delta: null },
1615
+ ],
1616
+ reasoning_content: '因数。\n\n质因数是',
1617
+ content: '',
1618
+ },
1619
+ },
1620
+ ],
1621
+ thinking_blocks: [
1622
+ { type: 'thinking', thinking: '因数。\n\n质因数是', signature_delta: null },
1623
+ ],
1624
+ },
1625
+ {
1626
+ id: '1',
1627
+ created: 1740505569,
1628
+ model: 'claude-3-7-sonnet-latest',
1629
+ object: 'chat.completion.chunk',
1630
+ choices: [
1631
+ {
1632
+ index: 0,
1633
+ delta: {
1634
+ thinking_blocks: [
1635
+ { type: 'thinking', thinking: '÷ 2 = 197', signature_delta: null },
1636
+ ],
1637
+ reasoning_content: '÷ 2 = 197',
1638
+ content: '',
1639
+ },
1640
+ },
1641
+ ],
1642
+ thinking_blocks: [{ type: 'thinking', thinking: '÷ 2 = 197', signature_delta: null }],
1643
+ },
1644
+ {
1645
+ id: '1',
1646
+ created: 1740505571,
1647
+ model: 'claude-3-7-sonnet-latest',
1648
+ object: 'chat.completion.chunk',
1649
+ choices: [
1650
+ {
1651
+ index: 0,
1652
+ delta: {
1653
+ thinking_blocks: [
1654
+ { type: 'thinking', thinking: '197。n394 = 2 ', signature_delta: null },
1655
+ ],
1656
+ reasoning_content: '197。\n394 = 2 ',
1657
+ content: '',
1658
+ },
1659
+ },
1660
+ ],
1661
+ thinking_blocks: [{ type: 'thinking', thinking: '\n394 = 2 ', signature_delta: null }],
1662
+ },
1663
+ {
1664
+ id: '1',
1665
+ created: 1740505571,
1666
+ model: 'claude-3-7-sonnet-latest',
1667
+ object: 'chat.completion.chunk',
1668
+ choices: [
1669
+ {
1670
+ index: 0,
1671
+ delta: {
1672
+ content: '',
1673
+ tool_calls: [{ function: { arguments: '{}' }, type: 'function', index: -1 }],
1674
+ },
1675
+ },
1676
+ ],
1677
+ },
1678
+ {
1679
+ id: '1',
1680
+ created: 1740505571,
1681
+ model: 'claude-3-7-sonnet-latest',
1682
+ object: 'chat.completion.chunk',
1683
+ choices: [{ index: 0, delta: { content: '要找出394的质因数,我需要将' } }],
1684
+ },
1685
+ {
1686
+ id: '1',
1687
+ created: 1740505571,
1688
+ model: 'claude-3-7-sonnet-latest',
1689
+ object: 'chat.completion.chunk',
1690
+ choices: [{ index: 0, delta: { content: '394分解为质数的乘积' } }],
1691
+ },
1692
+ {
1693
+ id: '1',
1694
+ created: 1740505573,
1695
+ model: 'claude-3-7-sonnet-latest',
1696
+ object: 'chat.completion.chunk',
1697
+ choices: [{ index: 0, delta: { content: '2和197。' } }],
1698
+ },
1699
+ {
1700
+ id: '1',
1701
+ created: 1740505573,
1702
+ model: 'claude-3-7-sonnet-latest',
1703
+ object: 'chat.completion.chunk',
1704
+ choices: [{ finish_reason: 'stop', index: 0, delta: {} }],
1705
+ },
1706
+ ];
1707
+
1708
+ const mockOpenAIStream = new ReadableStream({
1709
+ start(controller) {
1710
+ data.forEach((chunk) => {
1711
+ controller.enqueue(chunk);
1712
+ });
1713
+
1714
+ controller.close();
1715
+ },
1716
+ });
1717
+
1718
+ const protocolStream = OpenAIStream(mockOpenAIStream);
1719
+
1720
+ const decoder = new TextDecoder();
1721
+ const chunks = [];
1722
+
1723
+ // @ts-ignore
1724
+ for await (const chunk of protocolStream) {
1725
+ chunks.push(decoder.decode(chunk, { stream: true }));
1726
+ }
1727
+
1728
+ expect(chunks).toEqual(
1729
+ [
1730
+ 'id: 1',
1731
+ 'event: reasoning',
1732
+ `data: "我需要找出394的所有质"\n`,
1733
+ 'id: 1',
1734
+ 'event: reasoning',
1735
+ `data: "因数。\\n\\n质因数是"\n`,
1736
+ 'id: 1',
1737
+ 'event: reasoning',
1738
+ `data: "÷ 2 = 197"\n`,
1739
+ 'id: 1',
1740
+ 'event: reasoning',
1741
+ `data: "197。\\n394 = 2 "\n`,
1742
+ 'id: 1',
1743
+ 'event: text',
1744
+ `data: ""\n`,
1745
+ 'id: 1',
1746
+ 'event: text',
1747
+ `data: "要找出394的质因数,我需要将"\n`,
1748
+ 'id: 1',
1749
+ 'event: text',
1750
+ `data: "394分解为质数的乘积"\n`,
1751
+ 'id: 1',
1752
+ 'event: text',
1753
+ `data: "2和197。"\n`,
1754
+ 'id: 1',
1755
+ 'event: stop',
1756
+ `data: "stop"\n`,
1757
+ ].map((i) => `${i}\n`),
1758
+ );
1759
+ });
1579
1760
  });
1580
1761
  });
@@ -46,36 +46,46 @@ export const transformOpenAIStream = (
46
46
 
47
47
  // tools calling
48
48
  if (typeof item.delta?.tool_calls === 'object' && item.delta.tool_calls?.length > 0) {
49
- return {
50
- data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
51
- if (streamContext && !streamContext.tool) {
52
- streamContext.tool = { id: value.id!, index: value.index, name: value.function!.name! };
53
- }
54
-
55
- return {
56
- function: {
57
- arguments: value.function?.arguments ?? '{}',
58
- name: value.function?.name ?? null,
59
- },
60
- id:
61
- value.id ||
62
- streamContext?.tool?.id ||
63
- generateToolCallId(index, value.function?.name),
64
-
65
- // mistral's tool calling don't have index and function field, it's data like:
66
- // [{"id":"xbhnmTtY7","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"A photo of a small, fluffy dog with a playful expression and wagging tail.\", \"A watercolor painting of a small, energetic dog with a glossy coat and bright eyes.\", \"A vector illustration of a small, adorable dog with a short snout and perky ears.\", \"A drawing of a small, scruffy dog with a mischievous grin and a wagging tail.\"], \"quality\": \"standard\", \"seeds\": [123456, 654321, 111222, 333444], \"size\": \"1024x1024\", \"style\": \"vivid\"}"}}]
67
-
68
- // minimax's tool calling don't have index field, it's data like:
69
- // [{"id":"call_function_4752059746","type":"function","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"一个流浪的地球,背景是浩瀚"}}]
70
-
71
- // so we need to add these default values
72
- index: typeof value.index !== 'undefined' ? value.index : index,
73
- type: value.type || 'function',
74
- };
75
- }),
76
- id: chunk.id,
77
- type: 'tool_calls',
78
- } as StreamProtocolToolCallChunk;
49
+ const tool_calls = item.delta.tool_calls.filter(
50
+ (value) => value.index >= 0 || typeof value.index === 'undefined',
51
+ );
52
+
53
+ if (tool_calls.length > 0) {
54
+ return {
55
+ data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
56
+ if (streamContext && !streamContext.tool) {
57
+ streamContext.tool = {
58
+ id: value.id!,
59
+ index: value.index,
60
+ name: value.function!.name!,
61
+ };
62
+ }
63
+
64
+ return {
65
+ function: {
66
+ arguments: value.function?.arguments ?? '{}',
67
+ name: value.function?.name ?? null,
68
+ },
69
+ id:
70
+ value.id ||
71
+ streamContext?.tool?.id ||
72
+ generateToolCallId(index, value.function?.name),
73
+
74
+ // mistral's tool calling don't have index and function field, it's data like:
75
+ // [{"id":"xbhnmTtY7","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"A photo of a small, fluffy dog with a playful expression and wagging tail.\", \"A watercolor painting of a small, energetic dog with a glossy coat and bright eyes.\", \"A vector illustration of a small, adorable dog with a short snout and perky ears.\", \"A drawing of a small, scruffy dog with a mischievous grin and a wagging tail.\"], \"quality\": \"standard\", \"seeds\": [123456, 654321, 111222, 333444], \"size\": \"1024x1024\", \"style\": \"vivid\"}"}}]
76
+
77
+ // minimax's tool calling don't have index field, it's data like:
78
+ // [{"id":"call_function_4752059746","type":"function","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"一个流浪的地球,背景是浩瀚"}}]
79
+
80
+ // so we need to add these default values
81
+ index: typeof value.index !== 'undefined' ? value.index : index,
82
+ type: value.type || 'function',
83
+ };
84
+ }),
85
+ id: chunk.id,
86
+ type: 'tool_calls',
87
+ } as StreamProtocolToolCallChunk;
88
+ }
79
89
  }
80
90
 
81
91
  // 给定结束原因
@@ -12,6 +12,10 @@ export interface StreamContext {
12
12
  * this flag is used to check if the pplx citation is returned,and then not return it again
13
13
  */
14
14
  returnedPplxCitation?: boolean;
15
+ thinking?: {
16
+ id: string;
17
+ name: string;
18
+ };
15
19
  tool?: {
16
20
  id: string;
17
21
  index: number;
@@ -29,6 +33,10 @@ export interface StreamProtocolChunk {
29
33
  | 'tool_calls'
30
34
  // Model Thinking
31
35
  | 'reasoning'
36
+ // use for reasoning signature, maybe only anthropic
37
+ | 'reasoning_signature'
38
+ // flagged reasoning signature
39
+ | 'flagged_reasoning_signature'
32
40
  // Search or Grounding
33
41
  | 'grounding'
34
42
  // stop signal
@@ -32,7 +32,13 @@ export default {
32
32
  },
33
33
  duplicateTitle: '{{title}} 副本',
34
34
  emptyAgent: '暂无助手',
35
- extendControls: {
35
+ extendParams: {
36
+ enableReasoning: {
37
+ title: '开启深度思考',
38
+ },
39
+ reasoningBudgetToken: {
40
+ title: '思考消耗 Token',
41
+ },
36
42
  title: '模型扩展功能',
37
43
  },
38
44
  historyRange: '历史范围',
@@ -635,7 +635,7 @@ describe('ChatService', () => {
635
635
  });
636
636
  });
637
637
 
638
- describe('processMessage', () => {
638
+ describe('reorderToolMessages', () => {
639
639
  it('should reorderToolMessages', () => {
640
640
  const input: OpenAIChatMessage[] = [
641
641
  {
@@ -746,7 +746,9 @@ describe('ChatService', () => {
746
746
  },
747
747
  ]);
748
748
  });
749
+ });
749
750
 
751
+ describe('processMessage', () => {
750
752
  describe('handle with files content in server mode', () => {
751
753
  it('should includes files', async () => {
752
754
  // 重新模拟模块,设置 isServerMode 为 true
@@ -833,46 +835,45 @@ describe('ChatService', () => {
833
835
  },
834
836
  ]);
835
837
  });
836
- });
837
838
 
838
- it('should include image files in server mode', async () => {
839
- // 重新模拟模块,设置 isServerMode 为 true
840
- vi.doMock('@/const/version', () => ({
841
- isServerMode: true,
842
- isDeprecatedEdition: true,
843
- }));
839
+ it('should include image files in server mode', async () => {
840
+ // 重新模拟模块,设置 isServerMode 为 true
841
+ vi.doMock('@/const/version', () => ({
842
+ isServerMode: true,
843
+ isDeprecatedEdition: true,
844
+ }));
844
845
 
845
- // 需要在修改模拟后重新导入相关模块
846
- const { chatService } = await import('../chat');
847
- const messages = [
848
- {
849
- content: 'Hello',
850
- role: 'user',
851
- imageList: [
852
- {
853
- id: 'file1',
854
- url: 'http://example.com/image.jpg',
855
- alt: 'abc.png',
856
- },
857
- ],
858
- }, // Message with files
859
- { content: 'Hey', role: 'assistant' }, // Regular user message
860
- ] as ChatMessage[];
846
+ // 需要在修改模拟后重新导入相关模块
847
+ const { chatService } = await import('../chat');
848
+ const messages = [
849
+ {
850
+ content: 'Hello',
851
+ role: 'user',
852
+ imageList: [
853
+ {
854
+ id: 'file1',
855
+ url: 'http://example.com/image.jpg',
856
+ alt: 'abc.png',
857
+ },
858
+ ],
859
+ }, // Message with files
860
+ { content: 'Hey', role: 'assistant' }, // Regular user message
861
+ ] as ChatMessage[];
861
862
 
862
- const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
863
- await chatService.createAssistantMessage({
864
- messages,
865
- plugins: [],
866
- model: 'gpt-4-vision-preview',
867
- });
863
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
864
+ await chatService.createAssistantMessage({
865
+ messages,
866
+ plugins: [],
867
+ model: 'gpt-4-vision-preview',
868
+ });
868
869
 
869
- expect(getChatCompletionSpy).toHaveBeenCalledWith(
870
- {
871
- messages: [
872
- {
873
- content: [
874
- {
875
- text: `Hello
870
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
871
+ {
872
+ messages: [
873
+ {
874
+ content: [
875
+ {
876
+ text: `Hello
876
877
 
877
878
  <!-- SYSTEM CONTEXT (NOT PART OF USER QUERY) -->
878
879
  <context.instruction>following part contains context information injected by the system. Please follow these instructions:
@@ -888,24 +889,61 @@ describe('ChatService', () => {
888
889
 
889
890
  </files_info>
890
891
  <!-- END SYSTEM CONTEXT -->`,
891
- type: 'text',
892
- },
893
- {
894
- image_url: { detail: 'auto', url: 'http://example.com/image.jpg' },
895
- type: 'image_url',
896
- },
897
- ],
898
- role: 'user',
892
+ type: 'text',
893
+ },
894
+ {
895
+ image_url: { detail: 'auto', url: 'http://example.com/image.jpg' },
896
+ type: 'image_url',
897
+ },
898
+ ],
899
+ role: 'user',
900
+ },
901
+ {
902
+ content: 'Hey',
903
+ role: 'assistant',
904
+ },
905
+ ],
906
+ model: 'gpt-4-vision-preview',
907
+ },
908
+ undefined,
909
+ );
910
+ });
911
+ });
912
+
913
+ it('should handle assistant messages with reasoning correctly', () => {
914
+ const messages = [
915
+ {
916
+ role: 'assistant',
917
+ content: 'The answer is 42.',
918
+ reasoning: {
919
+ content: 'I need to calculate the answer to life, universe, and everything.',
920
+ signature: 'thinking_process',
921
+ },
922
+ },
923
+ ] as ChatMessage[];
924
+
925
+ const result = chatService['processMessages']({
926
+ messages,
927
+ model: 'gpt-4',
928
+ provider: 'openai',
929
+ });
930
+
931
+ expect(result).toEqual([
932
+ {
933
+ content: [
934
+ {
935
+ signature: 'thinking_process',
936
+ thinking: 'I need to calculate the answer to life, universe, and everything.',
937
+ type: 'thinking',
899
938
  },
900
939
  {
901
- content: 'Hey',
902
- role: 'assistant',
940
+ text: 'The answer is 42.',
941
+ type: 'text',
903
942
  },
904
943
  ],
905
- model: 'gpt-4-vision-preview',
944
+ role: 'assistant',
906
945
  },
907
- undefined,
908
- );
946
+ ]);
909
947
  });
910
948
  });
911
949
  });
@@ -917,6 +955,7 @@ describe('ChatService', () => {
917
955
  vi.mock('../_auth', async (importOriginal) => {
918
956
  return importOriginal();
919
957
  });
958
+
920
959
  describe('AgentRuntimeOnClient', () => {
921
960
  describe('initializeWithClientStore', () => {
922
961
  describe('should initialize with options correctly', () => {
@@ -214,9 +214,35 @@ class ChatService {
214
214
 
215
215
  const tools = shouldUseTools ? filterTools : undefined;
216
216
 
217
+ // ============ 3. process extend params ============ //
218
+
219
+ let extendParams: Record<string, any> = {};
220
+
221
+ const isModelHasExtendParams = aiModelSelectors.isModelHasExtendParams(
222
+ payload.model,
223
+ payload.provider!,
224
+ )(useAiInfraStore.getState());
225
+
226
+ // model
227
+ if (isModelHasExtendParams) {
228
+ const modelExtendParams = aiModelSelectors.modelExtendParams(
229
+ payload.model,
230
+ payload.provider!,
231
+ )(useAiInfraStore.getState());
232
+ // if model has extended params, then we need to check if the model can use reasoning
233
+
234
+ if (modelExtendParams!.includes('enableReasoning') && chatConfig.enableReasoning) {
235
+ extendParams.thinking = {
236
+ budget_tokens: chatConfig.reasoningBudgetToken || 1024,
237
+ type: 'enabled',
238
+ };
239
+ }
240
+ }
241
+
217
242
  return this.getChatCompletion(
218
243
  {
219
244
  ...params,
245
+ ...extendParams,
220
246
  enabledSearch: enabledSearch && isModelHasBuiltinSearch ? true : undefined,
221
247
  messages: oaiMessages,
222
248
  tools,
@@ -459,8 +485,20 @@ class ChatService {
459
485
  }
460
486
 
461
487
  case 'assistant': {
488
+ // signature is a signal of anthropic thinking mode
489
+ const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
490
+
462
491
  return {
463
- content: m.content,
492
+ content: shouldIncludeThinking
493
+ ? [
494
+ {
495
+ signature: m.reasoning!.signature,
496
+ thinking: m.reasoning!.content,
497
+ type: 'thinking',
498
+ } as any,
499
+ { text: m.content, type: 'text' },
500
+ ]
501
+ : m.content,
464
502
  role: m.role,
465
503
  tool_calls: m.tools?.map(
466
504
  (tool): MessageToolCall => ({
@@ -8,7 +8,9 @@ exports[`agentSelectors > defaultAgentConfig > should merge DEFAULT_AGENT_CONFIG
8
8
  "enableAutoCreateTopic": true,
9
9
  "enableCompressHistory": true,
10
10
  "enableHistoryCount": true,
11
+ "enableReasoning": true,
11
12
  "historyCount": 8,
13
+ "reasoningBudgetToken": 1024,
12
14
  "searchMode": "off",
13
15
  },
14
16
  "model": "gpt-3.5-turbo",
@@ -70,14 +70,14 @@ const modelContextWindowTokens = (id: string, provider: string) => (s: AIProvide
70
70
  return model?.contextWindowTokens;
71
71
  };
72
72
 
73
- const modelExtendControls = (id: string, provider: string) => (s: AIProviderStoreState) => {
73
+ const modelExtendParams = (id: string, provider: string) => (s: AIProviderStoreState) => {
74
74
  const model = getEnabledModelById(id, provider)(s);
75
75
 
76
- return model?.settings?.extendControls;
76
+ return model?.settings?.extendParams;
77
77
  };
78
78
 
79
- const isModelHasExtendControls = (id: string, provider: string) => (s: AIProviderStoreState) => {
80
- const controls = modelExtendControls(id, provider)(s);
79
+ const isModelHasExtendParams = (id: string, provider: string) => (s: AIProviderStoreState) => {
80
+ const controls = modelExtendParams(id, provider)(s);
81
81
 
82
82
  return !!controls && controls.length > 0;
83
83
  };
@@ -119,13 +119,13 @@ export const aiModelSelectors = {
119
119
  isModelHasBuiltinSearch,
120
120
  isModelHasBuiltinSearchConfig,
121
121
  isModelHasContextWindowToken,
122
- isModelHasExtendControls,
122
+ isModelHasExtendParams,
123
123
  isModelLoading,
124
124
  isModelSupportReasoning,
125
125
  isModelSupportToolUse,
126
126
  isModelSupportVision,
127
127
  modelBuiltinSearchImpl,
128
128
  modelContextWindowTokens,
129
- modelExtendControls,
129
+ modelExtendParams,
130
130
  totalAiProviderModelList,
131
131
  };
@@ -472,7 +472,7 @@ export const generateAIChat: StateCreator<
472
472
  // update the content after fetch result
473
473
  await internal_updateMessageContent(messageId, content, {
474
474
  toolCalls,
475
- reasoning: !!reasoning ? { content: reasoning, duration } : undefined,
475
+ reasoning: !!reasoning ? { ...reasoning, duration } : undefined,
476
476
  search: !!grounding?.citations ? grounding : undefined,
477
477
  });
478
478
  },
@@ -75,7 +75,9 @@ exports[`settingsSelectors > defaultAgent > should merge DEFAULT_AGENT and s.set
75
75
  "enableAutoCreateTopic": true,
76
76
  "enableCompressHistory": true,
77
77
  "enableHistoryCount": true,
78
+ "enableReasoning": true,
78
79
  "historyCount": 8,
80
+ "reasoningBudgetToken": 1024,
79
81
  "searchMode": "off",
80
82
  },
81
83
  "model": "gpt-3.5-turbo",
@@ -55,33 +55,45 @@ export interface LobeAgentConfig {
55
55
  tts: LobeAgentTTSConfig;
56
56
  }
57
57
 
58
+ /* eslint-disable sort-keys-fix/sort-keys-fix, typescript-sort-keys/interface */
59
+
58
60
  export interface LobeAgentChatConfig {
59
- autoCreateTopicThreshold: number;
60
61
  displayMode?: 'chat' | 'docs';
62
+
61
63
  enableAutoCreateTopic?: boolean;
62
- /**
63
- * 历史消息长度压缩阈值
64
- */
65
- enableCompressHistory?: boolean;
66
- /**
67
- * 开启历史记录条数
68
- */
69
- enableHistoryCount?: boolean;
64
+ autoCreateTopicThreshold: number;
65
+
70
66
  enableMaxTokens?: boolean;
71
67
 
68
+ /**
69
+ * 是否开启推理
70
+ */
71
+ enableReasoning?: boolean;
72
72
  /**
73
73
  * 自定义推理强度
74
74
  */
75
75
  enableReasoningEffort?: boolean;
76
+ reasoningBudgetToken?: number;
76
77
 
77
78
  /**
78
79
  * 历史消息条数
79
80
  */
80
81
  historyCount?: number;
82
+ /**
83
+ * 开启历史记录条数
84
+ */
85
+ enableHistoryCount?: boolean;
86
+ /**
87
+ * 历史消息长度压缩阈值
88
+ */
89
+ enableCompressHistory?: boolean;
90
+
81
91
  inputTemplate?: string;
92
+
82
93
  searchMode?: SearchMode;
83
94
  useModelBuiltinSearch?: boolean;
84
95
  }
96
+ /* eslint-enable */
85
97
 
86
98
  export const AgentChatConfigSchema = z.object({
87
99
  autoCreateTopicThreshold: z.number().default(2),
@@ -90,8 +102,10 @@ export const AgentChatConfigSchema = z.object({
90
102
  enableCompressHistory: z.boolean().optional(),
91
103
  enableHistoryCount: z.boolean().optional(),
92
104
  enableMaxTokens: z.boolean().optional(),
105
+ enableReasoning: z.boolean().optional(),
93
106
  enableReasoningEffort: z.boolean().optional(),
94
107
  historyCount: z.number().optional(),
108
+ reasoningBudgetToken: z.number().optional(),
95
109
  searchMode: z.enum(['off', 'on', 'auto']).optional(),
96
110
  });
97
111