@lobehub/lobehub 2.0.0-next.87 → 2.0.0-next.89

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/next.config.ts +0 -1
  4. package/package.json +2 -2
  5. package/packages/context-engine/src/processors/ToolCall.ts +1 -0
  6. package/packages/context-engine/src/processors/__tests__/ToolCall.test.ts +59 -0
  7. package/packages/context-engine/src/tools/ToolNameResolver.ts +1 -0
  8. package/packages/context-engine/src/tools/__tests__/ToolNameResolver.test.ts +57 -0
  9. package/packages/context-engine/src/types.ts +1 -0
  10. package/packages/fetch-sse/src/fetchSSE.ts +12 -2
  11. package/packages/model-bank/src/aiModels/aihubmix.ts +60 -1
  12. package/packages/model-bank/src/aiModels/google.ts +21 -86
  13. package/packages/model-bank/src/aiModels/ollamacloud.ts +7 -6
  14. package/packages/model-bank/src/types/aiModel.ts +1 -0
  15. package/packages/model-runtime/src/core/contextBuilders/google.test.ts +479 -0
  16. package/packages/model-runtime/src/core/contextBuilders/google.ts +44 -1
  17. package/packages/model-runtime/src/core/streams/google/google-ai.test.ts +1115 -814
  18. package/packages/model-runtime/src/core/streams/google/index.ts +19 -5
  19. package/packages/model-runtime/src/core/streams/protocol.ts +1 -0
  20. package/packages/model-runtime/src/providers/google/index.test.ts +1 -1
  21. package/packages/model-runtime/src/providers/google/index.ts +17 -10
  22. package/packages/model-runtime/src/types/chat.ts +4 -0
  23. package/packages/model-runtime/src/types/toolsCalling.ts +3 -1
  24. package/packages/types/src/agent/chatConfig.ts +2 -0
  25. package/packages/types/src/message/common/tools.ts +3 -0
  26. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +11 -0
  27. package/src/features/ChatInput/ActionBar/Model/ThinkingLevelSlider.tsx +56 -0
  28. package/src/features/Conversation/Messages/Group/Error/index.tsx +3 -2
  29. package/src/features/Conversation/Messages/Group/GroupItem.tsx +2 -2
  30. package/src/locales/default/chat.ts +3 -0
  31. package/src/services/chat/index.ts +4 -0
  32. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +6 -5
  33. package/src/store/chat/slices/message/actions/optimisticUpdate.ts +6 -11
  34. package/src/store/chat/slices/plugin/actions/internals.ts +2 -2
@@ -4,938 +4,1239 @@ import { describe, expect, it, vi } from 'vitest';
4
4
  import * as uuidModule from '../../../utils/uuid';
5
5
  import { GoogleGenerativeAIStream, LOBE_ERROR_KEY } from './index';
6
6
 
7
+ /**
8
+ * Helper function to decode stream chunks into string array
9
+ */
10
+ async function decodeStreamChunks(stream: ReadableStream): Promise<string[]> {
11
+ const decoder = new TextDecoder();
12
+ const chunks: string[] = [];
13
+
14
+ // @ts-ignore
15
+ for await (const chunk of stream) {
16
+ chunks.push(decoder.decode(chunk, { stream: true }));
17
+ }
18
+
19
+ return chunks;
20
+ }
21
+
7
22
  describe('GoogleGenerativeAIStream', () => {
8
- it('should transform Google Generative AI stream to protocol stream', async () => {
9
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
10
-
11
- const mockGenerateContentResponse = (text: string, functionCalls?: any[]) =>
12
- ({
13
- text: text,
14
- functionCalls: functionCalls,
15
- }) as unknown as GenerateContentResponse;
16
-
17
- const mockGoogleStream = new ReadableStream({
18
- start(controller) {
19
- controller.enqueue(mockGenerateContentResponse('Hello'));
20
-
21
- controller.enqueue(
22
- mockGenerateContentResponse('', [{ name: 'testFunction', args: { arg1: 'value1' } }]),
23
- );
24
-
25
- // final chunk should include finishReason and usageMetadata to mark terminal event
26
- controller.enqueue({
27
- text: ' world!',
28
- candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
29
- usageMetadata: {
30
- promptTokenCount: 1,
31
- totalTokenCount: 1,
32
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 1 }],
33
- },
34
- modelVersion: 'gemini-test',
35
- } as unknown as GenerateContentResponse);
23
+ describe('Basic functionality', () => {
24
+ it('should transform Google Generative AI stream to protocol stream', async () => {
25
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
26
+
27
+ const mockGoogleStream = new ReadableStream({
28
+ start(controller) {
29
+ // Text chunk
30
+ controller.enqueue({
31
+ text: 'Hello',
32
+ candidates: [{ content: { parts: [{ text: 'Hello' }], role: 'model' }, index: 0 }],
33
+ } as unknown as GenerateContentResponse);
34
+
35
+ // Function call chunk
36
+ controller.enqueue({
37
+ text: '',
38
+ candidates: [
39
+ {
40
+ content: {
41
+ parts: [
42
+ {
43
+ functionCall: {
44
+ name: 'testFunction',
45
+ args: { arg1: 'value1' },
46
+ },
47
+ },
48
+ ],
49
+ role: 'model',
50
+ },
51
+ index: 0,
52
+ },
53
+ ],
54
+ } as unknown as GenerateContentResponse);
55
+
56
+ // Final chunk with finishReason and usageMetadata to mark terminal event
57
+ controller.enqueue({
58
+ text: ' world!',
59
+ candidates: [
60
+ {
61
+ content: { parts: [{ text: ' world!' }], role: 'model' },
62
+ finishReason: 'STOP',
63
+ index: 0,
64
+ },
65
+ ],
66
+ usageMetadata: {
67
+ promptTokenCount: 1,
68
+ totalTokenCount: 1,
69
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 1 }],
70
+ },
71
+ modelVersion: 'gemini-test',
72
+ } as unknown as GenerateContentResponse);
36
73
 
37
- controller.close();
38
- },
39
- });
74
+ controller.close();
75
+ },
76
+ });
77
+
78
+ const onStartMock = vi.fn();
79
+ const onTextMock = vi.fn();
80
+ const onToolCallMock = vi.fn();
81
+ const onCompletionMock = vi.fn();
82
+
83
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream, {
84
+ callbacks: {
85
+ onStart: onStartMock,
86
+ onText: onTextMock,
87
+ onToolsCalling: onToolCallMock,
88
+ onCompletion: onCompletionMock,
89
+ },
90
+ });
40
91
 
41
- const onStartMock = vi.fn();
42
- const onTextMock = vi.fn();
43
- const onToolCallMock = vi.fn();
44
- const onCompletionMock = vi.fn();
45
-
46
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream, {
47
- callbacks: {
48
- onStart: onStartMock,
49
- onText: onTextMock,
50
- onToolsCalling: onToolCallMock,
51
- onCompletion: onCompletionMock,
52
- },
53
- });
92
+ const chunks = await decodeStreamChunks(protocolStream);
54
93
 
55
- const decoder = new TextDecoder();
56
- const chunks = [];
57
-
58
- // @ts-ignore
59
- for await (const chunk of protocolStream) {
60
- chunks.push(decoder.decode(chunk, { stream: true }));
61
- }
62
-
63
- expect(chunks).toEqual([
64
- // text
65
- 'id: chat_1\n',
66
- 'event: text\n',
67
- `data: "Hello"\n\n`,
68
-
69
- // tool call
70
- 'id: chat_1\n',
71
- 'event: tool_calls\n',
72
- `data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"testFunction_0_abcd1234","index":0,"type":"function"}]\n\n`,
73
-
74
- // text
75
- 'id: chat_1\n',
76
- 'event: text\n',
77
- `data: " world!"\n\n`,
78
- // stop
79
- 'id: chat_1\n',
80
- 'event: stop\n',
81
- `data: "STOP"\n\n`,
82
- // usage
83
- 'id: chat_1\n',
84
- 'event: usage\n',
85
- `data: {"inputTextTokens":1,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":1,"totalOutputTokens":0,"totalTokens":1}\n\n`,
86
- ]);
87
-
88
- expect(onStartMock).toHaveBeenCalledTimes(1);
89
- expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
90
- expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
91
- expect(onToolCallMock).toHaveBeenCalledTimes(1);
92
- expect(onCompletionMock).toHaveBeenCalledTimes(1);
93
- });
94
+ expect(chunks).toEqual([
95
+ // text
96
+ 'id: chat_1\n',
97
+ 'event: text\n',
98
+ `data: "Hello"\n\n`,
94
99
 
95
- it('should handle empty stream', async () => {
96
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('E5M9dFKw');
97
- const mockGoogleStream = new ReadableStream({
98
- start(controller) {
99
- controller.enqueue({
100
- candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
101
- usageMetadata: {
102
- promptTokenCount: 0,
103
- cachedContentTokenCount: 0,
104
- totalTokenCount: 0,
105
- promptTokensDetails: [
106
- { modality: 'TEXT', tokenCount: 0 },
107
- { modality: 'IMAGE', tokenCount: 0 },
108
- ],
109
- },
110
- modelVersion: 'gemini-test',
111
- } as unknown as GenerateContentResponse);
100
+ // tool call
101
+ 'id: chat_1\n',
102
+ 'event: tool_calls\n',
103
+ `data: [{"function":{"arguments":"{\\"arg1\\":\\"value1\\"}","name":"testFunction"},"id":"testFunction_0_abcd1234","index":0,"type":"function"}]\n\n`,
112
104
 
113
- controller.close();
114
- },
105
+ // text
106
+ 'id: chat_1\n',
107
+ 'event: text\n',
108
+ `data: " world!"\n\n`,
109
+ // stop
110
+ 'id: chat_1\n',
111
+ 'event: stop\n',
112
+ `data: "STOP"\n\n`,
113
+ // usage
114
+ 'id: chat_1\n',
115
+ 'event: usage\n',
116
+ `data: {"inputTextTokens":1,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":1,"totalOutputTokens":0,"totalTokens":1}\n\n`,
117
+ ]);
118
+
119
+ expect(onStartMock).toHaveBeenCalledTimes(1);
120
+ expect(onTextMock).toHaveBeenNthCalledWith(1, 'Hello');
121
+ expect(onTextMock).toHaveBeenNthCalledWith(2, ' world!');
122
+ expect(onToolCallMock).toHaveBeenCalledTimes(1);
123
+ expect(onCompletionMock).toHaveBeenCalledTimes(1);
115
124
  });
116
125
 
117
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
126
+ it('should handle empty stream', async () => {
127
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('E5M9dFKw');
128
+ const mockGoogleStream = new ReadableStream({
129
+ start(controller) {
130
+ controller.enqueue({
131
+ candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
132
+ usageMetadata: {
133
+ promptTokenCount: 0,
134
+ cachedContentTokenCount: 0,
135
+ totalTokenCount: 0,
136
+ promptTokensDetails: [
137
+ { modality: 'TEXT', tokenCount: 0 },
138
+ { modality: 'IMAGE', tokenCount: 0 },
139
+ ],
140
+ },
141
+ modelVersion: 'gemini-test',
142
+ } as unknown as GenerateContentResponse);
143
+
144
+ controller.close();
145
+ },
146
+ });
118
147
 
119
- const decoder = new TextDecoder();
120
- const chunks = [];
148
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
121
149
 
122
- // @ts-ignore
123
- for await (const chunk of protocolStream) {
124
- chunks.push(decoder.decode(chunk, { stream: true }));
125
- }
150
+ const chunks = await decodeStreamChunks(protocolStream);
126
151
 
127
- expect(chunks).toEqual([
128
- 'id: chat_E5M9dFKw\n',
129
- 'event: stop\n',
130
- `data: "STOP"\n\n`,
131
- 'id: chat_E5M9dFKw\n',
132
- 'event: usage\n',
133
- `data: {"inputCachedTokens":0,"inputImageTokens":0,"inputTextTokens":0,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":0,"totalOutputTokens":0,"totalTokens":0}\n\n`,
134
- ]);
135
- });
152
+ expect(chunks).toEqual([
153
+ 'id: chat_E5M9dFKw\n',
154
+ 'event: stop\n',
155
+ `data: "STOP"\n\n`,
156
+ 'id: chat_E5M9dFKw\n',
157
+ 'event: usage\n',
158
+ `data: {"inputCachedTokens":0,"inputImageTokens":0,"inputTextTokens":0,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":0,"totalOutputTokens":0,"totalTokens":0}\n\n`,
159
+ ]);
160
+ });
136
161
 
137
- it('should handle image', async () => {
138
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
162
+ it('should return undefined data without text', async () => {
163
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
139
164
 
140
- const data = {
141
- candidates: [
165
+ const data = [
142
166
  {
143
- content: {
144
- parts: [{ inlineData: { mimeType: 'image/png', data: 'iVBORw0KGgoAA' } }],
145
- role: 'model',
167
+ candidates: [
168
+ {
169
+ content: { parts: [{ text: '234' }], role: 'model' },
170
+ safetyRatings: [
171
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
172
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
173
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
174
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
175
+ ],
176
+ },
177
+ ],
178
+ text: '234',
179
+ usageMetadata: {
180
+ promptTokenCount: 19,
181
+ candidatesTokenCount: 3,
182
+ totalTokenCount: 122,
183
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
184
+ thoughtsTokenCount: 100,
146
185
  },
147
- finishReason: 'STOP',
148
- index: 0,
149
- },
150
- ],
151
- usageMetadata: {
152
- promptTokenCount: 6,
153
- totalTokenCount: 6,
154
- promptTokensDetails: [
155
- { modality: 'TEXT', tokenCount: 6 },
156
- { modality: 'IMAGE', tokenCount: 0 },
157
- ],
158
- },
159
- modelVersion: 'gemini-2.0-flash-exp',
160
- };
186
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
187
+ },
188
+ {
189
+ text: '',
190
+ candidates: [
191
+ {
192
+ content: { parts: [{ text: '' }], role: 'model' },
193
+ safetyRatings: [
194
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
195
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
196
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
197
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
198
+ ],
199
+ },
200
+ ],
201
+ usageMetadata: {
202
+ promptTokenCount: 19,
203
+ candidatesTokenCount: 3,
204
+ totalTokenCount: 122,
205
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
206
+ candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 3 }],
207
+ thoughtsTokenCount: 100,
208
+ },
209
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
210
+ },
211
+ {
212
+ text: '567890\n',
213
+ candidates: [
214
+ {
215
+ content: { parts: [{ text: '567890\n' }], role: 'model' },
216
+ finishReason: 'STOP',
217
+ safetyRatings: [
218
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
219
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
220
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
221
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
222
+ ],
223
+ },
224
+ ],
225
+ usageMetadata: {
226
+ promptTokenCount: 19,
227
+ candidatesTokenCount: 11,
228
+ totalTokenCount: 131,
229
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
230
+ candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
231
+ thoughtsTokenCount: 100,
232
+ },
233
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
234
+ },
235
+ ];
161
236
 
162
- const mockGoogleStream = new ReadableStream({
163
- start(controller) {
164
- controller.enqueue(data);
237
+ const mockGoogleStream = new ReadableStream({
238
+ start(controller) {
239
+ data.forEach((item) => {
240
+ controller.enqueue(item);
241
+ });
165
242
 
166
- controller.close();
167
- },
243
+ controller.close();
244
+ },
245
+ });
246
+
247
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
248
+
249
+ const chunks = await decodeStreamChunks(protocolStream);
250
+
251
+ expect(chunks).toEqual(
252
+ [
253
+ 'id: chat_1',
254
+ 'event: text',
255
+ 'data: "234"\n',
256
+
257
+ 'id: chat_1',
258
+ 'event: text',
259
+ 'data: ""\n',
260
+
261
+ 'id: chat_1',
262
+ 'event: text',
263
+ `data: "567890\\n"\n`,
264
+ // stop
265
+ 'id: chat_1',
266
+ 'event: stop',
267
+ `data: "STOP"\n`,
268
+ // usage
269
+ 'id: chat_1',
270
+ 'event: usage',
271
+ `data: {"inputTextTokens":19,"outputImageTokens":0,"outputReasoningTokens":100,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":111,"totalTokens":131}\n`,
272
+ ].map((i) => i + '\n'),
273
+ );
168
274
  });
169
-
170
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
171
-
172
- const decoder = new TextDecoder();
173
- const chunks = [];
174
-
175
- // @ts-ignore
176
- for await (const chunk of protocolStream) {
177
- chunks.push(decoder.decode(chunk, { stream: true }));
178
- }
179
-
180
- expect(chunks).toEqual([
181
- // image
182
- 'id: chat_1\n',
183
- 'event: base64_image\n',
184
- `data: "data:image/png;base64,iVBORw0KGgoAA"\n\n`,
185
- // stop
186
- 'id: chat_1\n',
187
- 'event: stop\n',
188
- `data: "STOP"\n\n`,
189
- // usage
190
- 'id: chat_1\n',
191
- 'event: usage\n',
192
- `data: {"inputImageTokens":0,"inputTextTokens":6,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":6,"totalOutputTokens":0,"totalTokens":6}\n\n`,
193
- ]);
194
275
  });
195
276
 
196
- it('should handle token count', async () => {
197
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
198
-
199
- const data = {
200
- candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
201
- usageMetadata: {
202
- promptTokenCount: 266,
203
- totalTokenCount: 266,
204
- promptTokensDetails: [
205
- { modality: 'TEXT', tokenCount: 8 },
206
- { modality: 'IMAGE', tokenCount: 258 },
207
- ],
208
- },
209
- modelVersion: 'gemini-2.0-flash-exp',
210
- };
277
+ describe('Reasoning and Thought', () => {
278
+ it('should handle thought candidate part', async () => {
279
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
211
280
 
212
- const mockGoogleStream = new ReadableStream({
213
- start(controller) {
214
- controller.enqueue(data);
281
+ const data = [
282
+ {
283
+ candidates: [
284
+ {
285
+ content: {
286
+ parts: [{ text: '**Understanding the Conditional Logic**\n\n', thought: true }],
287
+ role: 'model',
288
+ },
289
+ index: 0,
290
+ },
291
+ ],
292
+ text: '**Understanding the Conditional Logic**\n\n',
293
+ usageMetadata: {
294
+ promptTokenCount: 38,
295
+ candidatesTokenCount: 7,
296
+ totalTokenCount: 301,
297
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
298
+ thoughtsTokenCount: 256,
299
+ },
300
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
301
+ },
302
+ {
303
+ candidates: [
304
+ {
305
+ content: {
306
+ parts: [{ text: '**Finalizing Interpretation**\n\n', thought: true }],
307
+ role: 'model',
308
+ },
309
+ index: 0,
310
+ },
311
+ ],
312
+ text: '**Finalizing Interpretation**\n\n',
313
+ usageMetadata: {
314
+ promptTokenCount: 38,
315
+ candidatesTokenCount: 13,
316
+ totalTokenCount: 355,
317
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
318
+ thoughtsTokenCount: 304,
319
+ },
320
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
321
+ },
322
+ {
323
+ candidates: [
324
+ {
325
+ content: {
326
+ parts: [{ text: '简单来说,' }],
327
+ role: 'model',
328
+ },
329
+ index: 0,
330
+ },
331
+ ],
332
+ text: '简单来说,',
333
+ usageMetadata: {
334
+ promptTokenCount: 38,
335
+ candidatesTokenCount: 16,
336
+ totalTokenCount: 358,
337
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
338
+ thoughtsTokenCount: 304,
339
+ },
340
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
341
+ },
342
+ {
343
+ candidates: [
344
+ {
345
+ content: { parts: [{ text: '文本内容。' }], role: 'model' },
346
+ finishReason: 'STOP',
347
+ index: 0,
348
+ },
349
+ ],
350
+ text: '文本内容。',
351
+ usageMetadata: {
352
+ promptTokenCount: 38,
353
+ candidatesTokenCount: 19,
354
+ totalTokenCount: 361,
355
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
356
+ thoughtsTokenCount: 304,
357
+ },
358
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
359
+ },
360
+ ];
215
361
 
216
- controller.close();
217
- },
362
+ const mockGoogleStream = new ReadableStream({
363
+ start(controller) {
364
+ data.forEach((item) => {
365
+ controller.enqueue(item);
366
+ });
367
+
368
+ controller.close();
369
+ },
370
+ });
371
+
372
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
373
+
374
+ const chunks = await decodeStreamChunks(protocolStream);
375
+
376
+ expect(chunks).toEqual(
377
+ [
378
+ 'id: chat_1',
379
+ 'event: reasoning',
380
+ 'data: "**Understanding the Conditional Logic**\\n\\n"\n',
381
+
382
+ 'id: chat_1',
383
+ 'event: reasoning',
384
+ `data: "**Finalizing Interpretation**\\n\\n"\n`,
385
+
386
+ 'id: chat_1',
387
+ 'event: text',
388
+ `data: "简单来说,"\n`,
389
+
390
+ 'id: chat_1',
391
+ 'event: text',
392
+ `data: "文本内容。"\n`,
393
+ // stop
394
+ 'id: chat_1',
395
+ 'event: stop',
396
+ `data: "STOP"\n`,
397
+ // usage
398
+ 'id: chat_1',
399
+ 'event: usage',
400
+ `data: {"inputTextTokens":38,"outputImageTokens":0,"outputReasoningTokens":304,"outputTextTokens":19,"totalInputTokens":38,"totalOutputTokens":323,"totalTokens":361}\n`,
401
+ ].map((i) => i + '\n'),
402
+ );
218
403
  });
219
404
 
220
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
221
-
222
- const decoder = new TextDecoder();
223
- const chunks = [];
224
-
225
- // @ts-ignore
226
- for await (const chunk of protocolStream) {
227
- chunks.push(decoder.decode(chunk, { stream: true }));
228
- }
229
-
230
- expect(chunks).toEqual([
231
- // stop
232
- 'id: chat_1\n',
233
- 'event: stop\n',
234
- `data: "STOP"\n\n`,
235
- // usage
236
- 'id: chat_1\n',
237
- 'event: usage\n',
238
- `data: {"inputImageTokens":258,"inputTextTokens":8,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":266,"totalOutputTokens":0,"totalTokens":266}\n\n`,
239
- ]);
240
- });
405
+ it('should handle stop with content and thought', async () => {
406
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
241
407
 
242
- it('should handle token count with cached token count', async () => {
243
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
244
-
245
- const data = {
246
- candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
247
- usageMetadata: {
248
- promptTokenCount: 15725,
249
- candidatesTokenCount: 1053,
250
- totalTokenCount: 16778,
251
- cachedContentTokenCount: 14286,
252
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 15725 }],
253
- cacheTokensDetails: [{ modality: 'TEXT', tokenCount: 14286 }],
254
- },
255
- modelVersion: 'gemini-2.0-flash-exp',
256
- };
257
-
258
- const mockGoogleStream = new ReadableStream({
259
- start(controller) {
260
- controller.enqueue(data);
261
-
262
- controller.close();
263
- },
264
- });
408
+ const data = [
409
+ {
410
+ candidates: [
411
+ {
412
+ content: { parts: [{ text: '234' }], role: 'model' },
413
+ safetyRatings: [
414
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
415
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
416
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
417
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
418
+ ],
419
+ },
420
+ ],
421
+ text: '234',
422
+ usageMetadata: {
423
+ promptTokenCount: 19,
424
+ candidatesTokenCount: 3,
425
+ totalTokenCount: 122,
426
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
427
+ thoughtsTokenCount: 100,
428
+ },
429
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
430
+ },
431
+ {
432
+ text: '567890\n',
433
+ candidates: [
434
+ {
435
+ content: { parts: [{ text: '567890\n' }], role: 'model' },
436
+ finishReason: 'STOP',
437
+ safetyRatings: [
438
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
439
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
440
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
441
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
442
+ ],
443
+ },
444
+ ],
445
+ usageMetadata: {
446
+ promptTokenCount: 19,
447
+ candidatesTokenCount: 11,
448
+ totalTokenCount: 131,
449
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
450
+ candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
451
+ thoughtsTokenCount: 100,
452
+ },
453
+ modelVersion: 'gemini-2.5-flash-preview-04-17',
454
+ },
455
+ ];
265
456
 
266
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
267
-
268
- const decoder = new TextDecoder();
269
- const chunks = [];
270
-
271
- // @ts-ignore
272
- for await (const chunk of protocolStream) {
273
- chunks.push(decoder.decode(chunk, { stream: true }));
274
- }
275
-
276
- expect(chunks).toEqual([
277
- // stop
278
- 'id: chat_1\n',
279
- 'event: stop\n',
280
- `data: "STOP"\n\n`,
281
- // usage
282
- 'id: chat_1\n',
283
- 'event: usage\n',
284
- `data: {"inputCacheMissTokens":1439,"inputCachedTokens":14286,"inputTextTokens":15725,"outputImageTokens":0,"outputTextTokens":1053,"totalInputTokens":15725,"totalOutputTokens":1053,"totalTokens":16778}\n\n`,
285
- ]);
457
+ const mockGoogleStream = new ReadableStream({
458
+ start(controller) {
459
+ data.forEach((item) => {
460
+ controller.enqueue(item);
461
+ });
462
+
463
+ controller.close();
464
+ },
465
+ });
466
+
467
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
468
+
469
+ const chunks = await decodeStreamChunks(protocolStream);
470
+
471
+ expect(chunks).toEqual(
472
+ [
473
+ 'id: chat_1',
474
+ 'event: text',
475
+ 'data: "234"\n',
476
+
477
+ 'id: chat_1',
478
+ 'event: text',
479
+ `data: "567890\\n"\n`,
480
+ // stop
481
+ 'id: chat_1',
482
+ 'event: stop',
483
+ `data: "STOP"\n`,
484
+ // usage
485
+ 'id: chat_1',
486
+ 'event: usage',
487
+ `data: {"inputTextTokens":19,"outputImageTokens":0,"outputReasoningTokens":100,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":111,"totalTokens":131}\n`,
488
+ ].map((i) => i + '\n'),
489
+ );
490
+ });
286
491
  });
287
492
 
288
- it('should handle stop with content', async () => {
289
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
493
+ describe('Usage and Token counting', () => {
494
+ it('should handle token count', async () => {
495
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
290
496
 
291
- const data = [
292
- {
293
- candidates: [
294
- {
295
- content: { parts: [{ text: '234' }], role: 'model' },
296
- safetyRatings: [
297
- { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
298
- { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
299
- { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
300
- { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
301
- ],
302
- },
303
- ],
304
- text: '234',
497
+ const data = {
498
+ candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
305
499
  usageMetadata: {
306
- promptTokenCount: 20,
307
- totalTokenCount: 20,
308
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 20 }],
309
- },
310
- modelVersion: 'gemini-2.0-flash-exp-image-generation',
311
- },
312
- {
313
- text: '567890\n',
314
- candidates: [
315
- {
316
- content: { parts: [{ text: '567890\n' }], role: 'model' },
317
- finishReason: 'STOP',
318
- safetyRatings: [
319
- { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
320
- { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
321
- { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
322
- { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
323
- ],
324
- },
325
- ],
326
- usageMetadata: {
327
- promptTokenCount: 19,
328
- candidatesTokenCount: 11,
329
- totalTokenCount: 30,
330
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
331
- candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
332
- },
333
- modelVersion: 'gemini-2.0-flash-exp-image-generation',
334
- },
335
- ];
336
-
337
- const mockGoogleStream = new ReadableStream({
338
- start(controller) {
339
- data.forEach((item) => {
340
- controller.enqueue(item);
341
- });
342
-
343
- controller.close();
344
- },
345
- });
500
+ promptTokenCount: 266,
501
+ totalTokenCount: 266,
502
+ promptTokensDetails: [
503
+ { modality: 'TEXT', tokenCount: 8 },
504
+ { modality: 'IMAGE', tokenCount: 258 },
505
+ ],
506
+ },
507
+ modelVersion: 'gemini-2.0-flash-exp',
508
+ };
346
509
 
347
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
510
+ const mockGoogleStream = new ReadableStream({
511
+ start(controller) {
512
+ controller.enqueue(data);
348
513
 
349
- const decoder = new TextDecoder();
350
- const chunks = [];
514
+ controller.close();
515
+ },
516
+ });
351
517
 
352
- // @ts-ignore
353
- for await (const chunk of protocolStream) {
354
- chunks.push(decoder.decode(chunk, { stream: true }));
355
- }
518
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
356
519
 
357
- expect(chunks).toEqual(
358
- [
359
- 'id: chat_1',
360
- 'event: text',
361
- 'data: "234"\n',
520
+ const chunks = await decodeStreamChunks(protocolStream);
362
521
 
363
- 'id: chat_1',
364
- 'event: text',
365
- `data: "567890\\n"\n`,
522
+ expect(chunks).toEqual([
366
523
  // stop
367
- 'id: chat_1',
368
- 'event: stop',
369
- `data: "STOP"\n`,
524
+ 'id: chat_1\n',
525
+ 'event: stop\n',
526
+ `data: "STOP"\n\n`,
370
527
  // usage
371
- 'id: chat_1',
372
- 'event: usage',
373
- `data: {"inputTextTokens":19,"outputImageTokens":0,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":11,"totalTokens":30}\n`,
374
- ].map((i) => i + '\n'),
375
- );
376
- });
528
+ 'id: chat_1\n',
529
+ 'event: usage\n',
530
+ `data: {"inputImageTokens":258,"inputTextTokens":8,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":266,"totalOutputTokens":0,"totalTokens":266}\n\n`,
531
+ ]);
532
+ });
377
533
 
378
- it('should handle stop with content and thought', async () => {
379
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
534
+ it('should handle token count with cached token count', async () => {
535
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
380
536
 
381
- const data = [
382
- {
383
- candidates: [
384
- {
385
- content: { parts: [{ text: '234' }], role: 'model' },
386
- safetyRatings: [
387
- { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
388
- { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
389
- { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
390
- { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
391
- ],
392
- },
393
- ],
394
- text: '234',
537
+ const data = {
538
+ candidates: [{ content: { role: 'model' }, finishReason: 'STOP', index: 0 }],
395
539
  usageMetadata: {
396
- promptTokenCount: 19,
397
- candidatesTokenCount: 3,
398
- totalTokenCount: 122,
399
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
400
- thoughtsTokenCount: 100,
401
- },
402
- modelVersion: 'gemini-2.5-flash-preview-04-17',
403
- },
404
- {
405
- text: '567890\n',
406
- candidates: [
407
- {
408
- content: { parts: [{ text: '567890\n' }], role: 'model' },
409
- finishReason: 'STOP',
410
- safetyRatings: [
411
- { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
412
- { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
413
- { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
414
- { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
415
- ],
416
- },
417
- ],
418
- usageMetadata: {
419
- promptTokenCount: 19,
420
- candidatesTokenCount: 11,
421
- totalTokenCount: 131,
422
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
423
- candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
424
- thoughtsTokenCount: 100,
425
- },
426
- modelVersion: 'gemini-2.5-flash-preview-04-17',
427
- },
428
- ];
429
-
430
- const mockGoogleStream = new ReadableStream({
431
- start(controller) {
432
- data.forEach((item) => {
433
- controller.enqueue(item);
434
- });
435
-
436
- controller.close();
437
- },
438
- });
540
+ promptTokenCount: 15725,
541
+ candidatesTokenCount: 1053,
542
+ totalTokenCount: 16778,
543
+ cachedContentTokenCount: 14286,
544
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 15725 }],
545
+ cacheTokensDetails: [{ modality: 'TEXT', tokenCount: 14286 }],
546
+ },
547
+ modelVersion: 'gemini-2.0-flash-exp',
548
+ };
439
549
 
440
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
550
+ const mockGoogleStream = new ReadableStream({
551
+ start(controller) {
552
+ controller.enqueue(data);
441
553
 
442
- const decoder = new TextDecoder();
443
- const chunks = [];
554
+ controller.close();
555
+ },
556
+ });
444
557
 
445
- // @ts-ignore
446
- for await (const chunk of protocolStream) {
447
- chunks.push(decoder.decode(chunk, { stream: true }));
448
- }
558
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
449
559
 
450
- expect(chunks).toEqual(
451
- [
452
- 'id: chat_1',
453
- 'event: text',
454
- 'data: "234"\n',
560
+ const chunks = await decodeStreamChunks(protocolStream);
455
561
 
456
- 'id: chat_1',
457
- 'event: text',
458
- `data: "567890\\n"\n`,
562
+ expect(chunks).toEqual([
459
563
  // stop
460
- 'id: chat_1',
461
- 'event: stop',
462
- `data: "STOP"\n`,
564
+ 'id: chat_1\n',
565
+ 'event: stop\n',
566
+ `data: "STOP"\n\n`,
463
567
  // usage
464
- 'id: chat_1',
465
- 'event: usage',
466
- `data: {"inputTextTokens":19,"outputImageTokens":0,"outputReasoningTokens":100,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":111,"totalTokens":131}\n`,
467
- ].map((i) => i + '\n'),
468
- );
469
- });
568
+ 'id: chat_1\n',
569
+ 'event: usage\n',
570
+ `data: {"inputCacheMissTokens":1439,"inputCachedTokens":14286,"inputTextTokens":15725,"outputImageTokens":0,"outputTextTokens":1053,"totalInputTokens":15725,"totalOutputTokens":1053,"totalTokens":16778}\n\n`,
571
+ ]);
572
+ });
470
573
 
471
- it('should handle thought candidate part', async () => {
472
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
574
+ it('should handle stop with content', async () => {
575
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
473
576
 
474
- const data = [
475
- {
476
- candidates: [
477
- {
478
- content: {
479
- parts: [{ text: '**Understanding the Conditional Logic**\n\n', thought: true }],
480
- role: 'model',
577
+ const data = [
578
+ {
579
+ candidates: [
580
+ {
581
+ content: { parts: [{ text: '234' }], role: 'model' },
582
+ safetyRatings: [
583
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
584
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
585
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
586
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
587
+ ],
481
588
  },
482
- index: 0,
589
+ ],
590
+ text: '234',
591
+ usageMetadata: {
592
+ promptTokenCount: 20,
593
+ totalTokenCount: 20,
594
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 20 }],
483
595
  },
484
- ],
485
- text: '**Understanding the Conditional Logic**\n\n',
486
- usageMetadata: {
487
- promptTokenCount: 38,
488
- candidatesTokenCount: 7,
489
- totalTokenCount: 301,
490
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
491
- thoughtsTokenCount: 256,
492
- },
493
- modelVersion: 'models/gemini-2.5-flash-preview-04-17',
494
- },
495
- {
496
- candidates: [
497
- {
498
- content: {
499
- parts: [{ text: '**Finalizing Interpretation**\n\n', thought: true }],
500
- role: 'model',
596
+ modelVersion: 'gemini-2.0-flash-exp-image-generation',
597
+ },
598
+ {
599
+ text: '567890\n',
600
+ candidates: [
601
+ {
602
+ content: { parts: [{ text: '567890\n' }], role: 'model' },
603
+ finishReason: 'STOP',
604
+ safetyRatings: [
605
+ { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
606
+ { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
607
+ { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
608
+ { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
609
+ ],
501
610
  },
502
- index: 0,
611
+ ],
612
+ usageMetadata: {
613
+ promptTokenCount: 19,
614
+ candidatesTokenCount: 11,
615
+ totalTokenCount: 30,
616
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
617
+ candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
503
618
  },
504
- ],
505
- text: '**Finalizing Interpretation**\n\n',
506
- usageMetadata: {
507
- promptTokenCount: 38,
508
- candidatesTokenCount: 13,
509
- totalTokenCount: 355,
510
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
511
- thoughtsTokenCount: 304,
512
- },
513
- modelVersion: 'models/gemini-2.5-flash-preview-04-17',
514
- },
515
- {
619
+ modelVersion: 'gemini-2.0-flash-exp-image-generation',
620
+ },
621
+ ];
622
+
623
+ const mockGoogleStream = new ReadableStream({
624
+ start(controller) {
625
+ data.forEach((item) => {
626
+ controller.enqueue(item);
627
+ });
628
+
629
+ controller.close();
630
+ },
631
+ });
632
+
633
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
634
+
635
+ const chunks = await decodeStreamChunks(protocolStream);
636
+
637
+ expect(chunks).toEqual(
638
+ [
639
+ 'id: chat_1',
640
+ 'event: text',
641
+ 'data: "234"\n',
642
+
643
+ 'id: chat_1',
644
+ 'event: text',
645
+ `data: "567890\\n"\n`,
646
+ // stop
647
+ 'id: chat_1',
648
+ 'event: stop',
649
+ `data: "STOP"\n`,
650
+ // usage
651
+ 'id: chat_1',
652
+ 'event: usage',
653
+ `data: {"inputTextTokens":19,"outputImageTokens":0,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":11,"totalTokens":30}\n`,
654
+ ].map((i) => i + '\n'),
655
+ );
656
+ });
657
+ });
658
+
659
+ describe('Special content types', () => {
660
+ it('should handle image', async () => {
661
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
662
+
663
+ const data = {
516
664
  candidates: [
517
665
  {
518
666
  content: {
519
- parts: [{ text: '简单来说,' }],
667
+ parts: [{ inlineData: { mimeType: 'image/png', data: 'iVBORw0KGgoAA' } }],
520
668
  role: 'model',
521
669
  },
522
- index: 0,
523
- },
524
- ],
525
- text: '简单来说,',
526
- usageMetadata: {
527
- promptTokenCount: 38,
528
- candidatesTokenCount: 16,
529
- totalTokenCount: 358,
530
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
531
- thoughtsTokenCount: 304,
532
- },
533
- modelVersion: 'models/gemini-2.5-flash-preview-04-17',
534
- },
535
- {
536
- candidates: [
537
- {
538
- content: { parts: [{ text: '文本内容。' }], role: 'model' },
539
670
  finishReason: 'STOP',
540
671
  index: 0,
541
672
  },
542
673
  ],
543
- text: '文本内容。',
544
674
  usageMetadata: {
545
- promptTokenCount: 38,
546
- candidatesTokenCount: 19,
547
- totalTokenCount: 361,
548
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 38 }],
549
- thoughtsTokenCount: 304,
550
- },
551
- modelVersion: 'models/gemini-2.5-flash-preview-04-17',
552
- },
553
- ];
554
-
555
- const mockGoogleStream = new ReadableStream({
556
- start(controller) {
557
- data.forEach((item) => {
558
- controller.enqueue(item);
559
- });
560
-
561
- controller.close();
562
- },
563
- });
564
-
565
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
566
-
567
- const decoder = new TextDecoder();
568
- const chunks = [];
675
+ promptTokenCount: 6,
676
+ totalTokenCount: 6,
677
+ promptTokensDetails: [
678
+ { modality: 'TEXT', tokenCount: 6 },
679
+ { modality: 'IMAGE', tokenCount: 0 },
680
+ ],
681
+ },
682
+ modelVersion: 'gemini-2.0-flash-exp',
683
+ };
569
684
 
570
- // @ts-ignore
571
- for await (const chunk of protocolStream) {
572
- chunks.push(decoder.decode(chunk, { stream: true }));
573
- }
685
+ const mockGoogleStream = new ReadableStream({
686
+ start(controller) {
687
+ controller.enqueue(data);
574
688
 
575
- expect(chunks).toEqual(
576
- [
577
- 'id: chat_1',
578
- 'event: reasoning',
579
- 'data: "**Understanding the Conditional Logic**\\n\\n"\n',
689
+ controller.close();
690
+ },
691
+ });
580
692
 
581
- 'id: chat_1',
582
- 'event: reasoning',
583
- `data: "**Finalizing Interpretation**\\n\\n"\n`,
693
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
584
694
 
585
- 'id: chat_1',
586
- 'event: text',
587
- `data: "简单来说,"\n`,
695
+ const chunks = await decodeStreamChunks(protocolStream);
588
696
 
589
- 'id: chat_1',
590
- 'event: text',
591
- `data: "文本内容。"\n`,
697
+ expect(chunks).toEqual([
698
+ // image
699
+ 'id: chat_1\n',
700
+ 'event: base64_image\n',
701
+ `data: "data:image/png;base64,iVBORw0KGgoAA"\n\n`,
592
702
  // stop
593
- 'id: chat_1',
594
- 'event: stop',
595
- `data: "STOP"\n`,
703
+ 'id: chat_1\n',
704
+ 'event: stop\n',
705
+ `data: "STOP"\n\n`,
596
706
  // usage
597
- 'id: chat_1',
598
- 'event: usage',
599
- `data: {"inputTextTokens":38,"outputImageTokens":0,"outputReasoningTokens":304,"outputTextTokens":19,"totalInputTokens":38,"totalOutputTokens":323,"totalTokens":361}\n`,
600
- ].map((i) => i + '\n'),
601
- );
602
- });
707
+ 'id: chat_1\n',
708
+ 'event: usage\n',
709
+ `data: {"inputImageTokens":0,"inputTextTokens":6,"outputImageTokens":0,"outputTextTokens":0,"totalInputTokens":6,"totalOutputTokens":0,"totalTokens":6}\n\n`,
710
+ ]);
711
+ });
603
712
 
604
- it('should return undefined data without text', async () => {
605
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
713
+ it('should handle groundingMetadata', async () => {
714
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
606
715
 
607
- const data = [
608
- {
609
- candidates: [
610
- {
611
- content: { parts: [{ text: '234' }], role: 'model' },
612
- safetyRatings: [
613
- { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
614
- { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
615
- { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
616
- { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
617
- ],
618
- },
619
- ],
620
- text: '234',
621
- usageMetadata: {
622
- promptTokenCount: 19,
623
- candidatesTokenCount: 3,
624
- totalTokenCount: 122,
625
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
626
- thoughtsTokenCount: 100,
627
- },
628
- modelVersion: 'gemini-2.5-flash-preview-04-17',
629
- },
630
- {
631
- text: '',
632
- candidates: [
633
- {
634
- content: { parts: [{ text: '' }], role: 'model' },
635
- safetyRatings: [
636
- { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
637
- { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
638
- { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
639
- { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
716
+ const data = [
717
+ {
718
+ text: '123',
719
+ candidates: [
720
+ {
721
+ content: {
722
+ parts: [
723
+ {
724
+ text: '123',
725
+ },
726
+ ],
727
+ role: 'model',
728
+ },
729
+ index: 0,
730
+ groundingMetadata: {},
731
+ },
732
+ ],
733
+ usageMetadata: {
734
+ promptTokenCount: 9,
735
+ candidatesTokenCount: 18,
736
+ totalTokenCount: 27,
737
+ promptTokensDetails: [
738
+ {
739
+ modality: 'TEXT',
740
+ tokenCount: 9,
741
+ },
640
742
  ],
641
743
  },
642
- ],
643
- usageMetadata: {
644
- promptTokenCount: 19,
645
- candidatesTokenCount: 3,
646
- totalTokenCount: 122,
647
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
648
- candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 3 }],
649
- thoughtsTokenCount: 100,
650
- },
651
- modelVersion: 'gemini-2.5-flash-preview-04-17',
652
- },
653
- {
654
- text: '567890\n',
655
- candidates: [
656
- {
657
- content: { parts: [{ text: '567890\n' }], role: 'model' },
658
- finishReason: 'STOP',
659
- safetyRatings: [
660
- { category: 'HARM_CATEGORY_HATE_SPEECH', probability: 'NEGLIGIBLE' },
661
- { category: 'HARM_CATEGORY_DANGEROUS_CONTENT', probability: 'NEGLIGIBLE' },
662
- { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' },
663
- { category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', probability: 'NEGLIGIBLE' },
744
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
745
+ },
746
+ {
747
+ text: '45678',
748
+ candidates: [
749
+ {
750
+ content: {
751
+ parts: [
752
+ {
753
+ text: '45678',
754
+ },
755
+ ],
756
+ role: 'model',
757
+ },
758
+ finishReason: 'STOP',
759
+ index: 0,
760
+ groundingMetadata: {
761
+ searchEntryPoint: {
762
+ renderedContent: 'content\n',
763
+ },
764
+ groundingChunks: [
765
+ {
766
+ web: {
767
+ uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXG1234545',
768
+ title: 'npmjs.com',
769
+ },
770
+ },
771
+ {
772
+ web: {
773
+ uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXE9288334',
774
+ title: 'google.dev',
775
+ },
776
+ },
777
+ ],
778
+ groundingSupports: [
779
+ {
780
+ segment: {
781
+ startIndex: 63,
782
+ endIndex: 67,
783
+ text: '1。',
784
+ },
785
+ groundingChunkIndices: [0],
786
+ confidenceScores: [1],
787
+ },
788
+ {
789
+ segment: {
790
+ startIndex: 69,
791
+ endIndex: 187,
792
+ text: 'SDK。',
793
+ },
794
+ groundingChunkIndices: [1],
795
+ confidenceScores: [1],
796
+ },
797
+ ],
798
+ webSearchQueries: ['sdk latest version'],
799
+ },
800
+ },
801
+ ],
802
+ usageMetadata: {
803
+ promptTokenCount: 9,
804
+ candidatesTokenCount: 122,
805
+ totalTokenCount: 131,
806
+ promptTokensDetails: [
807
+ {
808
+ modality: 'TEXT',
809
+ tokenCount: 9,
810
+ },
664
811
  ],
665
812
  },
666
- ],
667
- usageMetadata: {
668
- promptTokenCount: 19,
669
- candidatesTokenCount: 11,
670
- totalTokenCount: 131,
671
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 19 }],
672
- candidatesTokensDetails: [{ modality: 'TEXT', tokenCount: 11 }],
673
- thoughtsTokenCount: 100,
674
- },
675
- modelVersion: 'gemini-2.5-flash-preview-04-17',
676
- },
677
- ];
678
-
679
- const mockGoogleStream = new ReadableStream({
680
- start(controller) {
681
- data.forEach((item) => {
682
- controller.enqueue(item);
683
- });
684
-
685
- controller.close();
686
- },
813
+ modelVersion: 'models/gemini-2.5-flash-preview-04-17',
814
+ },
815
+ ];
816
+
817
+ const mockGoogleStream = new ReadableStream({
818
+ start(controller) {
819
+ data.forEach((item) => {
820
+ controller.enqueue(item);
821
+ });
822
+
823
+ controller.close();
824
+ },
825
+ });
826
+
827
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
828
+
829
+ const chunks = await decodeStreamChunks(protocolStream);
830
+
831
+ expect(chunks).toEqual(
832
+ [
833
+ 'id: chat_1',
834
+ 'event: text',
835
+ 'data: "123"\n',
836
+
837
+ 'id: chat_1',
838
+ 'event: text',
839
+ 'data: "45678"\n',
840
+
841
+ 'id: chat_1',
842
+ 'event: grounding',
843
+ `data: {\"citations\":[{\"favicon\":\"npmjs.com\",\"title\":\"npmjs.com\",\"url\":\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXG1234545\"},{\"favicon\":\"google.dev\",\"title\":\"google.dev\",\"url\":\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXE9288334\"}],\"searchQueries\":[\"sdk latest version\"]}\n`,
844
+ // stop
845
+ 'id: chat_1',
846
+ 'event: stop',
847
+ `data: "STOP"\n`,
848
+ // usage
849
+ 'id: chat_1',
850
+ 'event: usage',
851
+ `data: {"inputTextTokens":9,"outputImageTokens":0,"outputTextTokens":122,"totalInputTokens":9,"totalOutputTokens":122,"totalTokens":131}\n`,
852
+ ].map((i) => i + '\n'),
853
+ );
687
854
  });
855
+ });
688
856
 
689
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
857
+ describe('Tool calls', () => {
858
+ it('should handle tool calls with thoughtSignature', async () => {
859
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1').mockReturnValueOnce('abcd1234');
690
860
 
691
- const decoder = new TextDecoder();
692
- const chunks = [];
861
+ const data = [
862
+ {
863
+ candidates: [
864
+ {
865
+ content: {
866
+ parts: [
867
+ {
868
+ functionCall: {
869
+ name: 'grep____searchGitHub____mcp',
870
+ args: {
871
+ query: '"version":',
872
+ repo: 'lobehub/lobe-chat',
873
+ path: 'package.json',
874
+ },
875
+ },
876
+ thoughtSignature: '123',
877
+ },
878
+ ],
879
+ role: 'model',
880
+ },
881
+ index: 0,
882
+ },
883
+ ],
884
+ modelVersion: 'gemini-3-pro-preview',
885
+ responseId: 'UVcdaZ26ILac_uMP9ZOeiQ0',
886
+ usageMetadata: {
887
+ promptTokenCount: 1171,
888
+ candidatesTokenCount: 41,
889
+ totalTokenCount: 1408,
890
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 1171 }],
891
+ thoughtsTokenCount: 196,
892
+ },
893
+ },
894
+ {
895
+ candidates: [
896
+ {
897
+ content: { parts: [{ text: '' }], role: 'model' },
898
+ finishReason: 'STOP',
899
+ index: 0,
900
+ },
901
+ ],
902
+ modelVersion: 'gemini-3-pro-preview',
903
+ responseId: 'UVcdaZ26ILac_uMP9ZOeiQ0',
904
+ usageMetadata: {
905
+ promptTokenCount: 1171,
906
+ candidatesTokenCount: 41,
907
+ totalTokenCount: 1408,
908
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 1171 }],
909
+ thoughtsTokenCount: 196,
910
+ },
911
+ },
912
+ ];
913
+
914
+ const mockGoogleStream = new ReadableStream({
915
+ start(controller) {
916
+ data.forEach((item) => {
917
+ controller.enqueue(item);
918
+ });
919
+ controller.close();
920
+ },
921
+ });
693
922
 
694
- // @ts-ignore
695
- for await (const chunk of protocolStream) {
696
- chunks.push(decoder.decode(chunk, { stream: true }));
697
- }
923
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
698
924
 
699
- expect(chunks).toEqual(
700
- [
701
- 'id: chat_1',
702
- 'event: text',
703
- 'data: "234"\n',
925
+ const chunks = await decodeStreamChunks(protocolStream);
704
926
 
705
- 'id: chat_1',
706
- 'event: text',
707
- 'data: ""\n',
927
+ expect(chunks).toEqual(
928
+ [
929
+ 'id: chat_1',
930
+ 'event: tool_calls',
931
+ 'data: [{"function":{"arguments":"{\\"query\\":\\"\\\\\\\"version\\\\\\":\\",\\"repo\\":\\"lobehub/lobe-chat\\",\\"path\\":\\"package.json\\"}","name":"grep____searchGitHub____mcp"},"id":"grep____searchGitHub____mcp_0_abcd1234","index":0,"thoughtSignature":"123","type":"function"}]\n',
708
932
 
709
- 'id: chat_1',
710
- 'event: text',
711
- `data: "567890\\n"\n`,
712
- // stop
713
- 'id: chat_1',
714
- 'event: stop',
715
- `data: "STOP"\n`,
716
- // usage
717
- 'id: chat_1',
718
- 'event: usage',
719
- `data: {"inputTextTokens":19,"outputImageTokens":0,"outputReasoningTokens":100,"outputTextTokens":11,"totalInputTokens":19,"totalOutputTokens":111,"totalTokens":131}\n`,
720
- ].map((i) => i + '\n'),
721
- );
722
- });
933
+ 'id: chat_1',
934
+ 'event: stop',
935
+ 'data: "STOP"\n',
723
936
 
724
- it('should handle groundingMetadata', async () => {
725
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
937
+ 'id: chat_1',
938
+ 'event: usage',
939
+ 'data: {"inputTextTokens":1171,"outputImageTokens":0,"outputReasoningTokens":196,"outputTextTokens":41,"totalInputTokens":1171,"totalOutputTokens":237,"totalTokens":1408}\n',
940
+ ].map((i) => i + '\n'),
941
+ );
942
+ });
726
943
 
727
- const data = [
728
- {
729
- text: '123',
730
- candidates: [
731
- {
732
- content: {
733
- parts: [
734
- {
735
- text: '123',
736
- },
737
- ],
738
- role: 'model',
944
+ it('should handle parallel tool calls', async () => {
945
+ vi.spyOn(uuidModule, 'nanoid')
946
+ .mockReturnValueOnce('1')
947
+ .mockReturnValueOnce('abcd1234')
948
+ .mockReturnValueOnce('efgh5678');
949
+
950
+ const data = [
951
+ {
952
+ candidates: [
953
+ {
954
+ content: {
955
+ parts: [
956
+ {
957
+ functionCall: {
958
+ name: 'get_current_temperature',
959
+ args: {
960
+ location: 'Paris',
961
+ },
962
+ },
963
+ thoughtSignature: 'ErEDCq4DAdHtim...',
964
+ },
965
+ ],
966
+ role: 'model',
967
+ },
968
+ index: 0,
739
969
  },
740
- index: 0,
741
- groundingMetadata: {},
970
+ ],
971
+ usageMetadata: {
972
+ promptTokenCount: 72,
973
+ candidatesTokenCount: 18,
974
+ totalTokenCount: 167,
975
+ promptTokensDetails: [
976
+ {
977
+ modality: 'TEXT',
978
+ tokenCount: 72,
979
+ },
980
+ ],
981
+ thoughtsTokenCount: 77,
742
982
  },
743
- ],
744
- usageMetadata: {
745
- promptTokenCount: 9,
746
- candidatesTokenCount: 18,
747
- totalTokenCount: 27,
748
- promptTokensDetails: [
983
+ modelVersion: 'gemini-3-pro-preview',
984
+ responseId: 'UDcdaZviO4jojMcPycPDkQY',
985
+ },
986
+ {
987
+ candidates: [
749
988
  {
750
- modality: 'TEXT',
751
- tokenCount: 9,
989
+ content: {
990
+ parts: [
991
+ {
992
+ functionCall: {
993
+ name: 'get_current_temperature',
994
+ args: {
995
+ location: 'London',
996
+ },
997
+ },
998
+ },
999
+ ],
1000
+ role: 'model',
1001
+ },
1002
+ index: 0,
752
1003
  },
753
1004
  ],
1005
+ usageMetadata: {
1006
+ promptTokenCount: 72,
1007
+ candidatesTokenCount: 36,
1008
+ totalTokenCount: 185,
1009
+ promptTokensDetails: [
1010
+ {
1011
+ modality: 'TEXT',
1012
+ tokenCount: 72,
1013
+ },
1014
+ ],
1015
+ thoughtsTokenCount: 77,
1016
+ },
1017
+ modelVersion: 'gemini-3-pro-preview',
1018
+ responseId: 'UDcdaZviO4jojMcPycPDkQY',
754
1019
  },
755
- modelVersion: 'models/gemini-2.5-flash-preview-04-17',
756
- },
757
- {
758
- text: '45678',
759
- candidates: [
760
- {
761
- content: {
762
- parts: [
763
- {
764
- text: '45678',
765
- },
766
- ],
767
- role: 'model',
1020
+ {
1021
+ candidates: [
1022
+ {
1023
+ content: {
1024
+ parts: [
1025
+ {
1026
+ text: '',
1027
+ },
1028
+ ],
1029
+ role: 'model',
1030
+ },
1031
+ finishReason: 'STOP',
1032
+ index: 0,
768
1033
  },
769
- finishReason: 'STOP',
770
- index: 0,
771
- groundingMetadata: {
772
- searchEntryPoint: {
773
- renderedContent: 'content\n',
1034
+ ],
1035
+ usageMetadata: {
1036
+ promptTokenCount: 72,
1037
+ candidatesTokenCount: 36,
1038
+ totalTokenCount: 185,
1039
+ promptTokensDetails: [
1040
+ {
1041
+ modality: 'TEXT',
1042
+ tokenCount: 72,
774
1043
  },
775
- groundingChunks: [
776
- {
777
- web: {
778
- uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXG1234545',
779
- title: 'npmjs.com',
780
- },
781
- },
782
- {
783
- web: {
784
- uri: 'https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXE9288334',
785
- title: 'google.dev',
786
- },
787
- },
788
- ],
789
- groundingSupports: [
790
- {
791
- segment: {
792
- startIndex: 63,
793
- endIndex: 67,
794
- text: '1。',
795
- },
796
- groundingChunkIndices: [0],
797
- confidenceScores: [1],
798
- },
799
- {
800
- segment: {
801
- startIndex: 69,
802
- endIndex: 187,
803
- text: 'SDK。',
1044
+ ],
1045
+ thoughtsTokenCount: 77,
1046
+ },
1047
+ modelVersion: 'gemini-3-pro-preview',
1048
+ responseId: 'UDcdaZviO4jojMcPycPDkQY',
1049
+ },
1050
+ ];
1051
+
1052
+ const mockGoogleStream = new ReadableStream({
1053
+ start(controller) {
1054
+ data.forEach((item) => {
1055
+ controller.enqueue(item);
1056
+ });
1057
+ controller.close();
1058
+ },
1059
+ });
1060
+
1061
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
1062
+
1063
+ const chunks = await decodeStreamChunks(protocolStream);
1064
+
1065
+ expect(chunks).toEqual(
1066
+ [
1067
+ 'id: chat_1',
1068
+ 'event: tool_calls',
1069
+ 'data: [{"function":{"arguments":"{\\"location\\":\\"Paris\\"}","name":"get_current_temperature"},"id":"get_current_temperature_0_abcd1234","index":0,"thoughtSignature":"ErEDCq4DAdHtim...","type":"function"}]\n',
1070
+
1071
+ 'id: chat_1',
1072
+ 'event: tool_calls',
1073
+ 'data: [{"function":{"arguments":"{\\"location\\":\\"London\\"}","name":"get_current_temperature"},"id":"get_current_temperature_0_efgh5678","index":0,"type":"function"}]\n',
1074
+
1075
+ 'id: chat_1',
1076
+ 'event: stop',
1077
+ 'data: "STOP"\n',
1078
+
1079
+ 'id: chat_1',
1080
+ 'event: usage',
1081
+ 'data: {"inputTextTokens":72,"outputImageTokens":0,"outputReasoningTokens":77,"outputTextTokens":36,"totalInputTokens":72,"totalOutputTokens":113,"totalTokens":185}\n',
1082
+ ].map((i) => i + '\n'),
1083
+ );
1084
+ });
1085
+
1086
+ it('should handle thoughtSignature with empty text', async () => {
1087
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
1088
+
1089
+ const data = [
1090
+ {
1091
+ candidates: [
1092
+ {
1093
+ content: {
1094
+ parts: [
1095
+ {
1096
+ text: '你好!很高兴为你服务。请问有什么我可以帮你的吗?\n\n无论是回答问题、协助写作、翻译,还是随便聊聊,我都随时待命!',
804
1097
  },
805
- groundingChunkIndices: [1],
806
- confidenceScores: [1],
807
- },
808
- ],
809
- webSearchQueries: ['sdk latest version'],
1098
+ ],
1099
+ role: 'model',
1100
+ },
1101
+ index: 0,
810
1102
  },
1103
+ ],
1104
+ usageMetadata: {
1105
+ promptTokenCount: 1,
1106
+ candidatesTokenCount: 35,
1107
+ totalTokenCount: 712,
1108
+ promptTokensDetails: [
1109
+ {
1110
+ modality: 'TEXT',
1111
+ tokenCount: 1,
1112
+ },
1113
+ ],
1114
+ thoughtsTokenCount: 676,
811
1115
  },
812
- ],
813
- usageMetadata: {
814
- promptTokenCount: 9,
815
- candidatesTokenCount: 122,
816
- totalTokenCount: 131,
817
- promptTokensDetails: [
1116
+ modelVersion: 'gemini-3-pro-preview',
1117
+ responseId: 'lTcdaf_1FrONjMcP24Sz6QQ',
1118
+ },
1119
+ {
1120
+ candidates: [
818
1121
  {
819
- modality: 'TEXT',
820
- tokenCount: 9,
1122
+ content: {
1123
+ parts: [
1124
+ {
1125
+ text: '',
1126
+ thoughtSignature: 'Ep8YCpwYAdHtim...',
1127
+ },
1128
+ ],
1129
+ role: 'model',
1130
+ },
1131
+ finishReason: 'STOP',
1132
+ index: 0,
821
1133
  },
822
1134
  ],
1135
+ usageMetadata: {
1136
+ promptTokenCount: 1,
1137
+ candidatesTokenCount: 35,
1138
+ totalTokenCount: 712,
1139
+ promptTokensDetails: [
1140
+ {
1141
+ modality: 'TEXT',
1142
+ tokenCount: 1,
1143
+ },
1144
+ ],
1145
+ thoughtsTokenCount: 676,
1146
+ },
1147
+ modelVersion: 'gemini-3-pro-preview',
1148
+ responseId: 'lTcdaf_1FrONjMcP24Sz6QQ',
823
1149
  },
824
- modelVersion: 'models/gemini-2.5-flash-preview-04-17',
825
- },
826
- ];
1150
+ ];
827
1151
 
828
- const mockGoogleStream = new ReadableStream({
829
- start(controller) {
830
- data.forEach((item) => {
831
- controller.enqueue(item);
832
- });
1152
+ const mockGoogleStream = new ReadableStream({
1153
+ start(controller) {
1154
+ data.forEach((item) => {
1155
+ controller.enqueue(item);
1156
+ });
833
1157
 
834
- controller.close();
835
- },
836
- });
837
-
838
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
839
-
840
- const decoder = new TextDecoder();
841
- const chunks = [];
1158
+ controller.close();
1159
+ },
1160
+ });
842
1161
 
843
- // @ts-ignore
844
- for await (const chunk of protocolStream) {
845
- chunks.push(decoder.decode(chunk, { stream: true }));
846
- }
1162
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
847
1163
 
848
- expect(chunks).toEqual(
849
- [
850
- 'id: chat_1',
851
- 'event: text',
852
- 'data: "123"\n',
1164
+ const chunks = await decodeStreamChunks(protocolStream);
853
1165
 
854
- 'id: chat_1',
855
- 'event: text',
856
- 'data: "45678"\n',
1166
+ expect(chunks).toEqual(
1167
+ [
1168
+ 'id: chat_1',
1169
+ 'event: text',
1170
+ 'data: "你好!很高兴为你服务。请问有什么我可以帮你的吗?\\n\\n无论是回答问题、协助写作、翻译,还是随便聊聊,我都随时待命!"\n',
857
1171
 
858
- 'id: chat_1',
859
- 'event: grounding',
860
- `data: {\"citations\":[{\"favicon\":\"npmjs.com\",\"title\":\"npmjs.com\",\"url\":\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXG1234545\"},{\"favicon\":\"google.dev\",\"title\":\"google.dev\",\"url\":\"https://vertexaisearch.cloud.google.com/grounding-api-redirect/AbF9wXE9288334\"}],\"searchQueries\":[\"sdk latest version\"]}\n`,
861
- // stop
862
- 'id: chat_1',
863
- 'event: stop',
864
- `data: "STOP"\n`,
865
- // usage
866
- 'id: chat_1',
867
- 'event: usage',
868
- `data: {"inputTextTokens":9,"outputImageTokens":0,"outputTextTokens":122,"totalInputTokens":9,"totalOutputTokens":122,"totalTokens":131}\n`,
869
- ].map((i) => i + '\n'),
870
- );
871
- });
1172
+ 'id: chat_1',
1173
+ 'event: stop',
1174
+ 'data: "STOP"\n',
872
1175
 
873
- it('should handle promptFeedback with blockReason (PROHIBITED_CONTENT)', async () => {
874
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
875
-
876
- const data = {
877
- promptFeedback: {
878
- blockReason: 'PROHIBITED_CONTENT',
879
- },
880
- usageMetadata: {
881
- promptTokenCount: 4438,
882
- totalTokenCount: 4438,
883
- promptTokensDetails: [{ modality: 'TEXT', tokenCount: 4438 }],
884
- },
885
- modelVersion: 'gemini-2.5-pro',
886
- responseId: 'THOUaKaNOeiGz7IPjL_VgQc',
887
- };
888
-
889
- const mockGoogleStream = new ReadableStream({
890
- start(controller) {
891
- controller.enqueue(data);
892
- controller.close();
893
- },
1176
+ 'id: chat_1',
1177
+ 'event: usage',
1178
+ 'data: {"inputTextTokens":1,"outputImageTokens":0,"outputReasoningTokens":676,"outputTextTokens":35,"totalInputTokens":1,"totalOutputTokens":711,"totalTokens":712}\n',
1179
+ ].map((i) => i + '\n'),
1180
+ );
894
1181
  });
1182
+ });
895
1183
 
896
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
1184
+ describe('Error handling', () => {
1185
+ it('should handle promptFeedback with blockReason (PROHIBITED_CONTENT)', async () => {
1186
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
897
1187
 
898
- const decoder = new TextDecoder();
899
- const chunks = [];
1188
+ const data = {
1189
+ promptFeedback: {
1190
+ blockReason: 'PROHIBITED_CONTENT',
1191
+ },
1192
+ usageMetadata: {
1193
+ promptTokenCount: 4438,
1194
+ totalTokenCount: 4438,
1195
+ promptTokensDetails: [{ modality: 'TEXT', tokenCount: 4438 }],
1196
+ },
1197
+ modelVersion: 'gemini-2.5-pro',
1198
+ responseId: 'THOUaKaNOeiGz7IPjL_VgQc',
1199
+ };
1200
+
1201
+ const mockGoogleStream = new ReadableStream({
1202
+ start(controller) {
1203
+ controller.enqueue(data);
1204
+ controller.close();
1205
+ },
1206
+ });
900
1207
 
901
- // @ts-ignore
902
- for await (const chunk of protocolStream) {
903
- chunks.push(decoder.decode(chunk, { stream: true }));
904
- }
1208
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
905
1209
 
906
- expect(chunks).toEqual([
907
- 'id: chat_1\n',
908
- 'event: error\n',
909
- `data: {"body":{"context":{"promptFeedback":{"blockReason":"PROHIBITED_CONTENT"}},"message":"Your request may contain prohibited content. Please adjust your request to comply with the usage guidelines.","provider":"google"},"type":"ProviderBizError"}\n\n`,
910
- ]);
911
- });
1210
+ const chunks = await decodeStreamChunks(protocolStream);
912
1211
 
913
- it('should pass through injected lobe error marker', async () => {
914
- vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
1212
+ expect(chunks).toEqual([
1213
+ 'id: chat_1\n',
1214
+ 'event: error\n',
1215
+ `data: {"body":{"context":{"promptFeedback":{"blockReason":"PROHIBITED_CONTENT"}},"message":"Your request may contain prohibited content. Please adjust your request to comply with the usage guidelines.","provider":"google"},"type":"ProviderBizError"}\n\n`,
1216
+ ]);
1217
+ });
915
1218
 
916
- const errorPayload = { message: 'internal error', code: 123 };
1219
+ it('should pass through injected lobe error marker', async () => {
1220
+ vi.spyOn(uuidModule, 'nanoid').mockReturnValueOnce('1');
917
1221
 
918
- const mockGoogleStream = new ReadableStream({
919
- start(controller) {
920
- controller.enqueue({ [LOBE_ERROR_KEY]: errorPayload });
921
- controller.close();
922
- },
923
- });
1222
+ const errorPayload = { message: 'internal error', code: 123 };
924
1223
 
925
- const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
1224
+ const mockGoogleStream = new ReadableStream({
1225
+ start(controller) {
1226
+ controller.enqueue({ [LOBE_ERROR_KEY]: errorPayload });
1227
+ controller.close();
1228
+ },
1229
+ });
926
1230
 
927
- const decoder = new TextDecoder();
928
- const chunks = [];
1231
+ const protocolStream = GoogleGenerativeAIStream(mockGoogleStream);
929
1232
 
930
- // @ts-ignore
931
- for await (const chunk of protocolStream) {
932
- chunks.push(decoder.decode(chunk, { stream: true }));
933
- }
1233
+ const chunks = await decodeStreamChunks(protocolStream);
934
1234
 
935
- expect(chunks).toEqual([
936
- 'id: chat_1\n',
937
- 'event: error\n',
938
- `data: ${JSON.stringify(errorPayload)}\n\n`,
939
- ]);
1235
+ expect(chunks).toEqual([
1236
+ 'id: chat_1\n',
1237
+ 'event: error\n',
1238
+ `data: ${JSON.stringify(errorPayload)}\n\n`,
1239
+ ]);
1240
+ });
940
1241
  });
941
1242
  });