@lobehub/chat 1.65.0 → 1.65.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +51 -0
  2. package/changelog/v1.json +18 -0
  3. package/docker-compose/local/docker-compose.yml +14 -0
  4. package/docker-compose/local/searxng-settings.yml +2582 -0
  5. package/docker-compose/setup.sh +3 -1
  6. package/docs/self-hosting/advanced/model-list.mdx +4 -2
  7. package/docs/self-hosting/advanced/model-list.zh-CN.mdx +4 -2
  8. package/package.json +7 -7
  9. package/src/app/(backend)/middleware/auth/index.ts +6 -0
  10. package/src/config/aiModels/google.ts +3 -3
  11. package/src/config/aiModels/groq.ts +10 -0
  12. package/src/config/aiModels/qwen.ts +43 -26
  13. package/src/const/message.ts +3 -0
  14. package/src/features/Conversation/Messages/Assistant/Tool/Render/CustomRender.tsx +7 -7
  15. package/src/features/MobileSwitchLoading/index.tsx +0 -1
  16. package/src/libs/agent-runtime/google/index.test.ts +8 -0
  17. package/src/libs/agent-runtime/google/index.ts +18 -5
  18. package/src/libs/agent-runtime/types/chat.ts +9 -1
  19. package/src/libs/agent-runtime/utils/anthropicHelpers.test.ts +113 -0
  20. package/src/libs/agent-runtime/utils/anthropicHelpers.ts +7 -4
  21. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +339 -94
  22. package/src/libs/agent-runtime/utils/streams/anthropic.ts +54 -34
  23. package/src/libs/agent-runtime/utils/streams/openai.test.ts +181 -0
  24. package/src/libs/agent-runtime/utils/streams/openai.ts +40 -30
  25. package/src/libs/agent-runtime/utils/streams/protocol.ts +4 -0
  26. package/src/services/__tests__/chat.test.ts +89 -50
  27. package/src/services/chat.ts +13 -1
  28. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +1 -1
  29. package/src/types/message/base.ts +1 -0
  30. package/src/utils/fetch/__tests__/fetchSSE.test.ts +113 -10
  31. package/src/utils/fetch/fetchSSE.ts +12 -3
  32. package/src/utils/parseModels.test.ts +14 -0
  33. package/src/utils/parseModels.ts +4 -0
@@ -1576,5 +1576,186 @@ describe('OpenAIStream', () => {
1576
1576
  ].map((i) => `${i}\n`),
1577
1577
  );
1578
1578
  });
1579
+
1580
+ it('should handle claude reasoning in litellm openai mode', async () => {
1581
+ const data = [
1582
+ {
1583
+ id: '1',
1584
+ created: 1740505568,
1585
+ model: 'claude-3-7-sonnet-latest',
1586
+ object: 'chat.completion.chunk',
1587
+ choices: [
1588
+ {
1589
+ index: 0,
1590
+ delta: {
1591
+ thinking_blocks: [
1592
+ { type: 'thinking', thinking: '我需要找94的所有质', ignature_delta: null },
1593
+ ],
1594
+ reasoning_content: '我需要找出394的所有质',
1595
+ content: '',
1596
+ role: 'assistant',
1597
+ },
1598
+ },
1599
+ ],
1600
+ thinking_blocks: [
1601
+ { type: 'thinking', thinking: '我需要找94的所有质', signature_delta: null },
1602
+ ],
1603
+ },
1604
+ {
1605
+ id: '1',
1606
+ created: 1740505569,
1607
+ model: 'claude-3-7-sonnet-latest',
1608
+ object: 'chat.completion.chunk',
1609
+ choices: [
1610
+ {
1611
+ index: 0,
1612
+ delta: {
1613
+ thinking_blocks: [
1614
+ { type: 'thinking', thinking: '因数。\n质因数是', signature_delta: null },
1615
+ ],
1616
+ reasoning_content: '因数。\n\n质因数是',
1617
+ content: '',
1618
+ },
1619
+ },
1620
+ ],
1621
+ thinking_blocks: [
1622
+ { type: 'thinking', thinking: '因数。\n\n质因数是', signature_delta: null },
1623
+ ],
1624
+ },
1625
+ {
1626
+ id: '1',
1627
+ created: 1740505569,
1628
+ model: 'claude-3-7-sonnet-latest',
1629
+ object: 'chat.completion.chunk',
1630
+ choices: [
1631
+ {
1632
+ index: 0,
1633
+ delta: {
1634
+ thinking_blocks: [
1635
+ { type: 'thinking', thinking: '÷ 2 = 197', signature_delta: null },
1636
+ ],
1637
+ reasoning_content: '÷ 2 = 197',
1638
+ content: '',
1639
+ },
1640
+ },
1641
+ ],
1642
+ thinking_blocks: [{ type: 'thinking', thinking: '÷ 2 = 197', signature_delta: null }],
1643
+ },
1644
+ {
1645
+ id: '1',
1646
+ created: 1740505571,
1647
+ model: 'claude-3-7-sonnet-latest',
1648
+ object: 'chat.completion.chunk',
1649
+ choices: [
1650
+ {
1651
+ index: 0,
1652
+ delta: {
1653
+ thinking_blocks: [
1654
+ { type: 'thinking', thinking: '197。n394 = 2 ', signature_delta: null },
1655
+ ],
1656
+ reasoning_content: '197。\n394 = 2 ',
1657
+ content: '',
1658
+ },
1659
+ },
1660
+ ],
1661
+ thinking_blocks: [{ type: 'thinking', thinking: '\n394 = 2 ', signature_delta: null }],
1662
+ },
1663
+ {
1664
+ id: '1',
1665
+ created: 1740505571,
1666
+ model: 'claude-3-7-sonnet-latest',
1667
+ object: 'chat.completion.chunk',
1668
+ choices: [
1669
+ {
1670
+ index: 0,
1671
+ delta: {
1672
+ content: '',
1673
+ tool_calls: [{ function: { arguments: '{}' }, type: 'function', index: -1 }],
1674
+ },
1675
+ },
1676
+ ],
1677
+ },
1678
+ {
1679
+ id: '1',
1680
+ created: 1740505571,
1681
+ model: 'claude-3-7-sonnet-latest',
1682
+ object: 'chat.completion.chunk',
1683
+ choices: [{ index: 0, delta: { content: '要找出394的质因数,我需要将' } }],
1684
+ },
1685
+ {
1686
+ id: '1',
1687
+ created: 1740505571,
1688
+ model: 'claude-3-7-sonnet-latest',
1689
+ object: 'chat.completion.chunk',
1690
+ choices: [{ index: 0, delta: { content: '394分解为质数的乘积' } }],
1691
+ },
1692
+ {
1693
+ id: '1',
1694
+ created: 1740505573,
1695
+ model: 'claude-3-7-sonnet-latest',
1696
+ object: 'chat.completion.chunk',
1697
+ choices: [{ index: 0, delta: { content: '2和197。' } }],
1698
+ },
1699
+ {
1700
+ id: '1',
1701
+ created: 1740505573,
1702
+ model: 'claude-3-7-sonnet-latest',
1703
+ object: 'chat.completion.chunk',
1704
+ choices: [{ finish_reason: 'stop', index: 0, delta: {} }],
1705
+ },
1706
+ ];
1707
+
1708
+ const mockOpenAIStream = new ReadableStream({
1709
+ start(controller) {
1710
+ data.forEach((chunk) => {
1711
+ controller.enqueue(chunk);
1712
+ });
1713
+
1714
+ controller.close();
1715
+ },
1716
+ });
1717
+
1718
+ const protocolStream = OpenAIStream(mockOpenAIStream);
1719
+
1720
+ const decoder = new TextDecoder();
1721
+ const chunks = [];
1722
+
1723
+ // @ts-ignore
1724
+ for await (const chunk of protocolStream) {
1725
+ chunks.push(decoder.decode(chunk, { stream: true }));
1726
+ }
1727
+
1728
+ expect(chunks).toEqual(
1729
+ [
1730
+ 'id: 1',
1731
+ 'event: reasoning',
1732
+ `data: "我需要找出394的所有质"\n`,
1733
+ 'id: 1',
1734
+ 'event: reasoning',
1735
+ `data: "因数。\\n\\n质因数是"\n`,
1736
+ 'id: 1',
1737
+ 'event: reasoning',
1738
+ `data: "÷ 2 = 197"\n`,
1739
+ 'id: 1',
1740
+ 'event: reasoning',
1741
+ `data: "197。\\n394 = 2 "\n`,
1742
+ 'id: 1',
1743
+ 'event: text',
1744
+ `data: ""\n`,
1745
+ 'id: 1',
1746
+ 'event: text',
1747
+ `data: "要找出394的质因数,我需要将"\n`,
1748
+ 'id: 1',
1749
+ 'event: text',
1750
+ `data: "394分解为质数的乘积"\n`,
1751
+ 'id: 1',
1752
+ 'event: text',
1753
+ `data: "2和197。"\n`,
1754
+ 'id: 1',
1755
+ 'event: stop',
1756
+ `data: "stop"\n`,
1757
+ ].map((i) => `${i}\n`),
1758
+ );
1759
+ });
1579
1760
  });
1580
1761
  });
@@ -46,36 +46,46 @@ export const transformOpenAIStream = (
46
46
 
47
47
  // tools calling
48
48
  if (typeof item.delta?.tool_calls === 'object' && item.delta.tool_calls?.length > 0) {
49
- return {
50
- data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
51
- if (streamContext && !streamContext.tool) {
52
- streamContext.tool = { id: value.id!, index: value.index, name: value.function!.name! };
53
- }
54
-
55
- return {
56
- function: {
57
- arguments: value.function?.arguments ?? '{}',
58
- name: value.function?.name ?? null,
59
- },
60
- id:
61
- value.id ||
62
- streamContext?.tool?.id ||
63
- generateToolCallId(index, value.function?.name),
64
-
65
- // mistral's tool calling don't have index and function field, it's data like:
66
- // [{"id":"xbhnmTtY7","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"A photo of a small, fluffy dog with a playful expression and wagging tail.\", \"A watercolor painting of a small, energetic dog with a glossy coat and bright eyes.\", \"A vector illustration of a small, adorable dog with a short snout and perky ears.\", \"A drawing of a small, scruffy dog with a mischievous grin and a wagging tail.\"], \"quality\": \"standard\", \"seeds\": [123456, 654321, 111222, 333444], \"size\": \"1024x1024\", \"style\": \"vivid\"}"}}]
67
-
68
- // minimax's tool calling don't have index field, it's data like:
69
- // [{"id":"call_function_4752059746","type":"function","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"一个流浪的地球,背景是浩瀚"}}]
70
-
71
- // so we need to add these default values
72
- index: typeof value.index !== 'undefined' ? value.index : index,
73
- type: value.type || 'function',
74
- };
75
- }),
76
- id: chunk.id,
77
- type: 'tool_calls',
78
- } as StreamProtocolToolCallChunk;
49
+ const tool_calls = item.delta.tool_calls.filter(
50
+ (value) => value.index >= 0 || typeof value.index === 'undefined',
51
+ );
52
+
53
+ if (tool_calls.length > 0) {
54
+ return {
55
+ data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
56
+ if (streamContext && !streamContext.tool) {
57
+ streamContext.tool = {
58
+ id: value.id!,
59
+ index: value.index,
60
+ name: value.function!.name!,
61
+ };
62
+ }
63
+
64
+ return {
65
+ function: {
66
+ arguments: value.function?.arguments ?? '{}',
67
+ name: value.function?.name ?? null,
68
+ },
69
+ id:
70
+ value.id ||
71
+ streamContext?.tool?.id ||
72
+ generateToolCallId(index, value.function?.name),
73
+
74
+ // mistral's tool calling don't have index and function field, it's data like:
75
+ // [{"id":"xbhnmTtY7","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"A photo of a small, fluffy dog with a playful expression and wagging tail.\", \"A watercolor painting of a small, energetic dog with a glossy coat and bright eyes.\", \"A vector illustration of a small, adorable dog with a short snout and perky ears.\", \"A drawing of a small, scruffy dog with a mischievous grin and a wagging tail.\"], \"quality\": \"standard\", \"seeds\": [123456, 654321, 111222, 333444], \"size\": \"1024x1024\", \"style\": \"vivid\"}"}}]
76
+
77
+ // minimax's tool calling don't have index field, it's data like:
78
+ // [{"id":"call_function_4752059746","type":"function","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"一个流浪的地球,背景是浩瀚"}}]
79
+
80
+ // so we need to add these default values
81
+ index: typeof value.index !== 'undefined' ? value.index : index,
82
+ type: value.type || 'function',
83
+ };
84
+ }),
85
+ id: chunk.id,
86
+ type: 'tool_calls',
87
+ } as StreamProtocolToolCallChunk;
88
+ }
79
89
  }
80
90
 
81
91
  // 给定结束原因
@@ -33,6 +33,10 @@ export interface StreamProtocolChunk {
33
33
  | 'tool_calls'
34
34
  // Model Thinking
35
35
  | 'reasoning'
36
+ // use for reasoning signature, maybe only anthropic
37
+ | 'reasoning_signature'
38
+ // flagged reasoning signature
39
+ | 'flagged_reasoning_signature'
36
40
  // Search or Grounding
37
41
  | 'grounding'
38
42
  // stop signal
@@ -635,7 +635,7 @@ describe('ChatService', () => {
635
635
  });
636
636
  });
637
637
 
638
- describe('processMessage', () => {
638
+ describe('reorderToolMessages', () => {
639
639
  it('should reorderToolMessages', () => {
640
640
  const input: OpenAIChatMessage[] = [
641
641
  {
@@ -746,7 +746,9 @@ describe('ChatService', () => {
746
746
  },
747
747
  ]);
748
748
  });
749
+ });
749
750
 
751
+ describe('processMessage', () => {
750
752
  describe('handle with files content in server mode', () => {
751
753
  it('should includes files', async () => {
752
754
  // 重新模拟模块,设置 isServerMode 为 true
@@ -833,46 +835,45 @@ describe('ChatService', () => {
833
835
  },
834
836
  ]);
835
837
  });
836
- });
837
838
 
838
- it('should include image files in server mode', async () => {
839
- // 重新模拟模块,设置 isServerMode 为 true
840
- vi.doMock('@/const/version', () => ({
841
- isServerMode: true,
842
- isDeprecatedEdition: true,
843
- }));
839
+ it('should include image files in server mode', async () => {
840
+ // 重新模拟模块,设置 isServerMode 为 true
841
+ vi.doMock('@/const/version', () => ({
842
+ isServerMode: true,
843
+ isDeprecatedEdition: true,
844
+ }));
844
845
 
845
- // 需要在修改模拟后重新导入相关模块
846
- const { chatService } = await import('../chat');
847
- const messages = [
848
- {
849
- content: 'Hello',
850
- role: 'user',
851
- imageList: [
852
- {
853
- id: 'file1',
854
- url: 'http://example.com/image.jpg',
855
- alt: 'abc.png',
856
- },
857
- ],
858
- }, // Message with files
859
- { content: 'Hey', role: 'assistant' }, // Regular user message
860
- ] as ChatMessage[];
846
+ // 需要在修改模拟后重新导入相关模块
847
+ const { chatService } = await import('../chat');
848
+ const messages = [
849
+ {
850
+ content: 'Hello',
851
+ role: 'user',
852
+ imageList: [
853
+ {
854
+ id: 'file1',
855
+ url: 'http://example.com/image.jpg',
856
+ alt: 'abc.png',
857
+ },
858
+ ],
859
+ }, // Message with files
860
+ { content: 'Hey', role: 'assistant' }, // Regular user message
861
+ ] as ChatMessage[];
861
862
 
862
- const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
863
- await chatService.createAssistantMessage({
864
- messages,
865
- plugins: [],
866
- model: 'gpt-4-vision-preview',
867
- });
863
+ const getChatCompletionSpy = vi.spyOn(chatService, 'getChatCompletion');
864
+ await chatService.createAssistantMessage({
865
+ messages,
866
+ plugins: [],
867
+ model: 'gpt-4-vision-preview',
868
+ });
868
869
 
869
- expect(getChatCompletionSpy).toHaveBeenCalledWith(
870
- {
871
- messages: [
872
- {
873
- content: [
874
- {
875
- text: `Hello
870
+ expect(getChatCompletionSpy).toHaveBeenCalledWith(
871
+ {
872
+ messages: [
873
+ {
874
+ content: [
875
+ {
876
+ text: `Hello
876
877
 
877
878
  <!-- SYSTEM CONTEXT (NOT PART OF USER QUERY) -->
878
879
  <context.instruction>following part contains context information injected by the system. Please follow these instructions:
@@ -888,24 +889,61 @@ describe('ChatService', () => {
888
889
 
889
890
  </files_info>
890
891
  <!-- END SYSTEM CONTEXT -->`,
891
- type: 'text',
892
- },
893
- {
894
- image_url: { detail: 'auto', url: 'http://example.com/image.jpg' },
895
- type: 'image_url',
896
- },
897
- ],
898
- role: 'user',
892
+ type: 'text',
893
+ },
894
+ {
895
+ image_url: { detail: 'auto', url: 'http://example.com/image.jpg' },
896
+ type: 'image_url',
897
+ },
898
+ ],
899
+ role: 'user',
900
+ },
901
+ {
902
+ content: 'Hey',
903
+ role: 'assistant',
904
+ },
905
+ ],
906
+ model: 'gpt-4-vision-preview',
907
+ },
908
+ undefined,
909
+ );
910
+ });
911
+ });
912
+
913
+ it('should handle assistant messages with reasoning correctly', () => {
914
+ const messages = [
915
+ {
916
+ role: 'assistant',
917
+ content: 'The answer is 42.',
918
+ reasoning: {
919
+ content: 'I need to calculate the answer to life, universe, and everything.',
920
+ signature: 'thinking_process',
921
+ },
922
+ },
923
+ ] as ChatMessage[];
924
+
925
+ const result = chatService['processMessages']({
926
+ messages,
927
+ model: 'gpt-4',
928
+ provider: 'openai',
929
+ });
930
+
931
+ expect(result).toEqual([
932
+ {
933
+ content: [
934
+ {
935
+ signature: 'thinking_process',
936
+ thinking: 'I need to calculate the answer to life, universe, and everything.',
937
+ type: 'thinking',
899
938
  },
900
939
  {
901
- content: 'Hey',
902
- role: 'assistant',
940
+ text: 'The answer is 42.',
941
+ type: 'text',
903
942
  },
904
943
  ],
905
- model: 'gpt-4-vision-preview',
944
+ role: 'assistant',
906
945
  },
907
- undefined,
908
- );
946
+ ]);
909
947
  });
910
948
  });
911
949
  });
@@ -917,6 +955,7 @@ describe('ChatService', () => {
917
955
  vi.mock('../_auth', async (importOriginal) => {
918
956
  return importOriginal();
919
957
  });
958
+
920
959
  describe('AgentRuntimeOnClient', () => {
921
960
  describe('initializeWithClientStore', () => {
922
961
  describe('should initialize with options correctly', () => {
@@ -485,8 +485,20 @@ class ChatService {
485
485
  }
486
486
 
487
487
  case 'assistant': {
488
+ // signature is a signal of anthropic thinking mode
489
+ const shouldIncludeThinking = m.reasoning && !!m.reasoning?.signature;
490
+
488
491
  return {
489
- content: m.content,
492
+ content: shouldIncludeThinking
493
+ ? [
494
+ {
495
+ signature: m.reasoning!.signature,
496
+ thinking: m.reasoning!.content,
497
+ type: 'thinking',
498
+ } as any,
499
+ { text: m.content, type: 'text' },
500
+ ]
501
+ : m.content,
490
502
  role: m.role,
491
503
  tool_calls: m.tools?.map(
492
504
  (tool): MessageToolCall => ({
@@ -472,7 +472,7 @@ export const generateAIChat: StateCreator<
472
472
  // update the content after fetch result
473
473
  await internal_updateMessageContent(messageId, content, {
474
474
  toolCalls,
475
- reasoning: !!reasoning ? { content: reasoning, duration } : undefined,
475
+ reasoning: !!reasoning ? { ...reasoning, duration } : undefined,
476
476
  search: !!grounding?.citations ? grounding : undefined,
477
477
  });
478
478
  },
@@ -10,6 +10,7 @@ export interface CitationItem {
10
10
  export interface ModelReasoning {
11
11
  content?: string;
12
12
  duration?: number;
13
+ signature?: string;
13
14
  }
14
15
 
15
16
  export type MessageRoleType = 'user' | 'system' | 'assistant' | 'tool';
@@ -154,16 +154,119 @@ describe('fetchSSE', () => {
154
154
  });
155
155
  });
156
156
 
157
- it('should handle reasoning event with smoothing correctly', async () => {
157
+ describe('reasoning', () => {
158
+ it('should handle reasoning event without smoothing', async () => {
159
+ const mockOnMessageHandle = vi.fn();
160
+ const mockOnFinish = vi.fn();
161
+
162
+ (fetchEventSource as any).mockImplementationOnce(
163
+ async (url: string, options: FetchEventSourceInit) => {
164
+ options.onopen!({ clone: () => ({ ok: true, headers: new Headers() }) } as any);
165
+ options.onmessage!({ event: 'reasoning', data: JSON.stringify('Hello') } as any);
166
+ await sleep(100);
167
+ options.onmessage!({ event: 'reasoning', data: JSON.stringify(' World') } as any);
168
+ await sleep(100);
169
+ options.onmessage!({ event: 'text', data: JSON.stringify('hi') } as any);
170
+ },
171
+ );
172
+
173
+ await fetchSSE('/', {
174
+ onMessageHandle: mockOnMessageHandle,
175
+ onFinish: mockOnFinish,
176
+ });
177
+
178
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'Hello', type: 'reasoning' });
179
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: ' World', type: 'reasoning' });
180
+
181
+ expect(mockOnFinish).toHaveBeenCalledWith('hi', {
182
+ observationId: null,
183
+ toolCalls: undefined,
184
+ reasoning: { content: 'Hello World' },
185
+ traceId: null,
186
+ type: 'done',
187
+ });
188
+ });
189
+
190
+ it('should handle reasoning event with smoothing correctly', async () => {
191
+ const mockOnMessageHandle = vi.fn();
192
+ const mockOnFinish = vi.fn();
193
+
194
+ (fetchEventSource as any).mockImplementationOnce(
195
+ async (url: string, options: FetchEventSourceInit) => {
196
+ options.onopen!({ clone: () => ({ ok: true, headers: new Headers() }) } as any);
197
+ options.onmessage!({ event: 'reasoning', data: JSON.stringify('Hello') } as any);
198
+ await sleep(100);
199
+ options.onmessage!({ event: 'reasoning', data: JSON.stringify(' World') } as any);
200
+ await sleep(100);
201
+ options.onmessage!({ event: 'text', data: JSON.stringify('hi') } as any);
202
+ },
203
+ );
204
+
205
+ await fetchSSE('/', {
206
+ onMessageHandle: mockOnMessageHandle,
207
+ onFinish: mockOnFinish,
208
+ smoothing: true,
209
+ });
210
+
211
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'Hell', type: 'reasoning' });
212
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'o', type: 'reasoning' });
213
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(3, { text: ' Wor', type: 'reasoning' });
214
+ // more assertions for each character...
215
+ expect(mockOnFinish).toHaveBeenCalledWith('hi', {
216
+ observationId: null,
217
+ toolCalls: undefined,
218
+ reasoning: { content: 'Hello World' },
219
+ traceId: null,
220
+ type: 'done',
221
+ });
222
+ });
223
+ it('should handle reasoning with signature', async () => {
224
+ const mockOnMessageHandle = vi.fn();
225
+ const mockOnFinish = vi.fn();
226
+
227
+ (fetchEventSource as any).mockImplementationOnce(
228
+ async (url: string, options: FetchEventSourceInit) => {
229
+ options.onopen!({ clone: () => ({ ok: true, headers: new Headers() }) } as any);
230
+ options.onmessage!({ event: 'reasoning', data: JSON.stringify('Hello') } as any);
231
+ await sleep(100);
232
+ options.onmessage!({ event: 'reasoning', data: JSON.stringify(' World') } as any);
233
+ options.onmessage!({
234
+ event: 'reasoning_signature',
235
+ data: JSON.stringify('abcbcd'),
236
+ } as any);
237
+ await sleep(100);
238
+ options.onmessage!({ event: 'text', data: JSON.stringify('hi') } as any);
239
+ },
240
+ );
241
+
242
+ await fetchSSE('/', {
243
+ onMessageHandle: mockOnMessageHandle,
244
+ onFinish: mockOnFinish,
245
+ smoothing: true,
246
+ });
247
+
248
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'Hell', type: 'reasoning' });
249
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'o', type: 'reasoning' });
250
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(3, { text: ' Wor', type: 'reasoning' });
251
+ // more assertions for each character...
252
+ expect(mockOnFinish).toHaveBeenCalledWith('hi', {
253
+ observationId: null,
254
+ toolCalls: undefined,
255
+ reasoning: { content: 'Hello World', signature: 'abcbcd' },
256
+ traceId: null,
257
+ type: 'done',
258
+ });
259
+ });
260
+ });
261
+
262
+ it('should handle grounding event', async () => {
158
263
  const mockOnMessageHandle = vi.fn();
159
264
  const mockOnFinish = vi.fn();
160
265
 
161
266
  (fetchEventSource as any).mockImplementationOnce(
162
267
  async (url: string, options: FetchEventSourceInit) => {
163
268
  options.onopen!({ clone: () => ({ ok: true, headers: new Headers() }) } as any);
164
- options.onmessage!({ event: 'reasoning', data: JSON.stringify('Hello') } as any);
165
- await sleep(100);
166
- options.onmessage!({ event: 'reasoning', data: JSON.stringify(' World') } as any);
269
+ options.onmessage!({ event: 'grounding', data: JSON.stringify('Hello') } as any);
167
270
  await sleep(100);
168
271
  options.onmessage!({ event: 'text', data: JSON.stringify('hi') } as any);
169
272
  },
@@ -172,17 +275,17 @@ describe('fetchSSE', () => {
172
275
  await fetchSSE('/', {
173
276
  onMessageHandle: mockOnMessageHandle,
174
277
  onFinish: mockOnFinish,
175
- smoothing: true,
176
278
  });
177
279
 
178
- expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'Hell', type: 'reasoning' });
179
- expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'o', type: 'reasoning' });
180
- expect(mockOnMessageHandle).toHaveBeenNthCalledWith(3, { text: ' Wor', type: 'reasoning' });
181
- // more assertions for each character...
280
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, {
281
+ grounding: 'Hello',
282
+ type: 'grounding',
283
+ });
284
+
182
285
  expect(mockOnFinish).toHaveBeenCalledWith('hi', {
183
286
  observationId: null,
184
287
  toolCalls: undefined,
185
- reasoning: 'Hello World',
288
+ grounding: 'Hello',
186
289
  traceId: null,
187
290
  type: 'done',
188
291
  });