@ai-sdk/cohere 3.0.8 → 3.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1591 @@
1
+ import { LanguageModelV3Prompt } from '@ai-sdk/provider';
2
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
3
+ import {
4
+ convertReadableStreamToArray,
5
+ isNodeVersion,
6
+ } from '@ai-sdk/provider-utils/test';
7
+ import { createCohere } from './cohere-provider';
8
+ import { describe, it, expect, vi } from 'vitest';
9
+
10
+ const TEST_PROMPT: LanguageModelV3Prompt = [
11
+ {
12
+ role: 'system',
13
+ content: 'you are a friendly bot!',
14
+ },
15
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
16
+ ];
17
+
18
+ const provider = createCohere({
19
+ apiKey: 'test-api-key',
20
+ });
21
+ const model = provider('command-r-plus');
22
+
23
+ const server = createTestServer({
24
+ 'https://api.cohere.com/v2/chat': {},
25
+ });
26
+
27
+ describe('doGenerate', () => {
28
+ function prepareJsonResponse({
29
+ content = [],
30
+ tool_calls,
31
+ finish_reason = 'COMPLETE',
32
+ tokens = {
33
+ input_tokens: 4,
34
+ output_tokens: 30,
35
+ },
36
+ generation_id = 'dad0c7cd-7982-42a7-acfb-706ccf598291',
37
+ headers,
38
+ }: {
39
+ content?: Array<
40
+ { type: 'text'; text: string } | { type: 'thinking'; thinking: string }
41
+ >;
42
+ tool_calls?: any;
43
+ finish_reason?: string;
44
+ tokens?: {
45
+ input_tokens: number;
46
+ output_tokens: number;
47
+ };
48
+ generation_id?: string;
49
+ headers?: Record<string, string>;
50
+ } = {}) {
51
+ server.urls['https://api.cohere.com/v2/chat'].response = {
52
+ type: 'json-value',
53
+ headers,
54
+ body: {
55
+ response_id: '0cf61ae0-1f60-4c18-9802-be7be809e712',
56
+ generation_id,
57
+ message: {
58
+ role: 'assistant',
59
+ content,
60
+ ...(tool_calls ? { tool_calls } : {}),
61
+ },
62
+ finish_reason,
63
+ usage: {
64
+ billed_units: { input_tokens: 9, output_tokens: 415 },
65
+ tokens,
66
+ },
67
+ },
68
+ };
69
+ }
70
+
71
+ it('should extract text response', async () => {
72
+ prepareJsonResponse({ content: [{ type: 'text', text: 'Hello, World!' }] });
73
+
74
+ const { content } = await model.doGenerate({
75
+ prompt: TEST_PROMPT,
76
+ });
77
+
78
+ expect(content).toMatchInlineSnapshot(`
79
+ [
80
+ {
81
+ "text": "Hello, World!",
82
+ "type": "text",
83
+ },
84
+ ]
85
+ `);
86
+ });
87
+
88
+ it('should extract tool calls', async () => {
89
+ prepareJsonResponse({
90
+ content: [{ type: 'text', text: 'Hello, World!' }],
91
+ tool_calls: [
92
+ {
93
+ id: 'test-id-1',
94
+ type: 'function',
95
+ function: {
96
+ name: 'test-tool',
97
+ arguments: '{"value":"example value"}',
98
+ },
99
+ },
100
+ ],
101
+ });
102
+
103
+ const { content, finishReason } = await model.doGenerate({
104
+ tools: [
105
+ {
106
+ type: 'function',
107
+ name: 'test-tool',
108
+ inputSchema: {
109
+ type: 'object',
110
+ properties: { value: { type: 'string' } },
111
+ required: ['value'],
112
+ additionalProperties: false,
113
+ $schema: 'http://json-schema.org/draft-07/schema#',
114
+ },
115
+ },
116
+ ],
117
+ prompt: TEST_PROMPT,
118
+ });
119
+
120
+ expect(content).toMatchInlineSnapshot(`
121
+ [
122
+ {
123
+ "text": "Hello, World!",
124
+ "type": "text",
125
+ },
126
+ {
127
+ "input": "{"value":"example value"}",
128
+ "toolCallId": "test-id-1",
129
+ "toolName": "test-tool",
130
+ "type": "tool-call",
131
+ },
132
+ ]
133
+ `);
134
+ expect(finishReason).toMatchInlineSnapshot(`
135
+ {
136
+ "raw": "COMPLETE",
137
+ "unified": "stop",
138
+ }
139
+ `);
140
+ });
141
+
142
+ it('should extract usage', async () => {
143
+ prepareJsonResponse({
144
+ tokens: { input_tokens: 20, output_tokens: 5 },
145
+ });
146
+
147
+ const { usage } = await model.doGenerate({
148
+ prompt: TEST_PROMPT,
149
+ });
150
+
151
+ expect(usage).toMatchInlineSnapshot(`
152
+ {
153
+ "inputTokens": {
154
+ "cacheRead": undefined,
155
+ "cacheWrite": undefined,
156
+ "noCache": 20,
157
+ "total": 20,
158
+ },
159
+ "outputTokens": {
160
+ "reasoning": undefined,
161
+ "text": 5,
162
+ "total": 5,
163
+ },
164
+ "raw": {
165
+ "input_tokens": 20,
166
+ "output_tokens": 5,
167
+ },
168
+ }
169
+ `);
170
+ });
171
+
172
+ it('should send additional response information', async () => {
173
+ prepareJsonResponse({
174
+ content: [{ type: 'text', text: '' }],
175
+ generation_id: 'test-id',
176
+ });
177
+
178
+ const { response } = await model.doGenerate({
179
+ prompt: TEST_PROMPT,
180
+ });
181
+
182
+ expect(response).toMatchInlineSnapshot(`
183
+ {
184
+ "body": {
185
+ "finish_reason": "COMPLETE",
186
+ "generation_id": "test-id",
187
+ "message": {
188
+ "content": [
189
+ {
190
+ "text": "",
191
+ "type": "text",
192
+ },
193
+ ],
194
+ "role": "assistant",
195
+ },
196
+ "response_id": "0cf61ae0-1f60-4c18-9802-be7be809e712",
197
+ "usage": {
198
+ "billed_units": {
199
+ "input_tokens": 9,
200
+ "output_tokens": 415,
201
+ },
202
+ "tokens": {
203
+ "input_tokens": 4,
204
+ "output_tokens": 30,
205
+ },
206
+ },
207
+ },
208
+ "headers": {
209
+ "content-length": "287",
210
+ "content-type": "application/json",
211
+ },
212
+ "id": "test-id",
213
+ }
214
+ `);
215
+ });
216
+
217
+ it('should extract finish reason', async () => {
218
+ prepareJsonResponse({
219
+ finish_reason: 'MAX_TOKENS',
220
+ });
221
+
222
+ const { finishReason } = await model.doGenerate({
223
+ prompt: TEST_PROMPT,
224
+ });
225
+
226
+ expect(finishReason).toMatchInlineSnapshot(`
227
+ {
228
+ "raw": "MAX_TOKENS",
229
+ "unified": "length",
230
+ }
231
+ `);
232
+ });
233
+
234
+ it('should expose the raw response headers', async () => {
235
+ prepareJsonResponse({
236
+ content: [{ type: 'text', text: '' }],
237
+ headers: { 'test-header': 'test-value' },
238
+ });
239
+
240
+ const { response } = await model.doGenerate({
241
+ prompt: TEST_PROMPT,
242
+ });
243
+
244
+ expect(response?.headers).toStrictEqual({
245
+ // default headers:
246
+ 'content-length': '316',
247
+ 'content-type': 'application/json',
248
+
249
+ // custom header
250
+ 'test-header': 'test-value',
251
+ });
252
+ });
253
+
254
+ it('should pass model and messages', async () => {
255
+ prepareJsonResponse();
256
+
257
+ await model.doGenerate({
258
+ prompt: TEST_PROMPT,
259
+ });
260
+
261
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
262
+ model: 'command-r-plus',
263
+ messages: [
264
+ { role: 'system', content: 'you are a friendly bot!' },
265
+ { role: 'user', content: 'Hello' },
266
+ ],
267
+ });
268
+ });
269
+
270
+ describe('should pass tools', async () => {
271
+ it('should support "none" tool choice', async () => {
272
+ prepareJsonResponse();
273
+
274
+ await model.doGenerate({
275
+ toolChoice: { type: 'none' },
276
+ tools: [
277
+ {
278
+ type: 'function',
279
+ name: 'test-tool',
280
+ inputSchema: {
281
+ type: 'object',
282
+ properties: {
283
+ value: { type: 'string' },
284
+ },
285
+ required: ['value'],
286
+ additionalProperties: false,
287
+ $schema: 'http://json-schema.org/draft-07/schema#',
288
+ },
289
+ },
290
+ ],
291
+ prompt: TEST_PROMPT,
292
+ });
293
+
294
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
295
+ model: 'command-r-plus',
296
+ messages: [
297
+ {
298
+ role: 'system',
299
+ content: 'you are a friendly bot!',
300
+ },
301
+ { role: 'user', content: 'Hello' },
302
+ ],
303
+ tool_choice: 'NONE',
304
+ tools: [
305
+ {
306
+ type: 'function',
307
+ function: {
308
+ name: 'test-tool',
309
+ parameters: {
310
+ type: 'object',
311
+ properties: {
312
+ value: { type: 'string' },
313
+ },
314
+ required: ['value'],
315
+ additionalProperties: false,
316
+ $schema: 'http://json-schema.org/draft-07/schema#',
317
+ },
318
+ },
319
+ },
320
+ ],
321
+ });
322
+ });
323
+ });
324
+
325
+ it('should pass headers', async () => {
326
+ prepareJsonResponse();
327
+
328
+ const provider = createCohere({
329
+ apiKey: 'test-api-key',
330
+ headers: {
331
+ 'Custom-Provider-Header': 'provider-header-value',
332
+ },
333
+ });
334
+
335
+ await provider('command-r-plus').doGenerate({
336
+ prompt: TEST_PROMPT,
337
+ headers: {
338
+ 'Custom-Request-Header': 'request-header-value',
339
+ },
340
+ });
341
+
342
+ expect(server.calls[0].requestHeaders).toStrictEqual({
343
+ authorization: 'Bearer test-api-key',
344
+ 'content-type': 'application/json',
345
+ 'custom-provider-header': 'provider-header-value',
346
+ 'custom-request-header': 'request-header-value',
347
+ });
348
+ });
349
+
350
+ it('should pass response format', async () => {
351
+ prepareJsonResponse();
352
+
353
+ await model.doGenerate({
354
+ prompt: TEST_PROMPT,
355
+ responseFormat: {
356
+ type: 'json',
357
+ schema: {
358
+ type: 'object',
359
+ properties: {
360
+ text: { type: 'string' },
361
+ },
362
+ required: ['text'],
363
+ },
364
+ },
365
+ });
366
+
367
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
368
+ model: 'command-r-plus',
369
+ messages: [
370
+ { role: 'system', content: 'you are a friendly bot!' },
371
+ { role: 'user', content: 'Hello' },
372
+ ],
373
+ response_format: {
374
+ type: 'json_object',
375
+ json_schema: {
376
+ type: 'object',
377
+ properties: {
378
+ text: { type: 'string' },
379
+ },
380
+ required: ['text'],
381
+ },
382
+ },
383
+ });
384
+ });
385
+
386
+ it('should send request body', async () => {
387
+ prepareJsonResponse({ content: [{ type: 'text', text: '' }] });
388
+
389
+ const { request } = await model.doGenerate({
390
+ prompt: TEST_PROMPT,
391
+ });
392
+
393
+ expect(request).toMatchInlineSnapshot(`
394
+ {
395
+ "body": {
396
+ "frequency_penalty": undefined,
397
+ "k": undefined,
398
+ "max_tokens": undefined,
399
+ "messages": [
400
+ {
401
+ "content": "you are a friendly bot!",
402
+ "role": "system",
403
+ },
404
+ {
405
+ "content": "Hello",
406
+ "role": "user",
407
+ },
408
+ ],
409
+ "model": "command-r-plus",
410
+ "p": undefined,
411
+ "presence_penalty": undefined,
412
+ "response_format": undefined,
413
+ "seed": undefined,
414
+ "stop_sequences": undefined,
415
+ "temperature": undefined,
416
+ "tool_choice": undefined,
417
+ "tools": undefined,
418
+ },
419
+ }
420
+ `);
421
+ });
422
+
423
+ it('should handle string "null" tool call arguments', async () => {
424
+ prepareJsonResponse({
425
+ content: [],
426
+ tool_calls: [
427
+ {
428
+ id: 'test-id-1',
429
+ type: 'function',
430
+ function: {
431
+ name: 'currentTime',
432
+ arguments: 'null',
433
+ },
434
+ },
435
+ ],
436
+ });
437
+
438
+ const { content } = await model.doGenerate({
439
+ tools: [
440
+ {
441
+ type: 'function',
442
+ name: 'currentTime',
443
+ inputSchema: {
444
+ type: 'object',
445
+ properties: {},
446
+ required: [],
447
+ additionalProperties: false,
448
+ $schema: 'http://json-schema.org/draft-07/schema#',
449
+ },
450
+ },
451
+ ],
452
+ prompt: [
453
+ {
454
+ role: 'user',
455
+ content: [{ type: 'text', text: 'What is the current time?' }],
456
+ },
457
+ ],
458
+ });
459
+
460
+ expect(content).toMatchInlineSnapshot(`
461
+ [
462
+ {
463
+ "input": "{}",
464
+ "toolCallId": "test-id-1",
465
+ "toolName": "currentTime",
466
+ "type": "tool-call",
467
+ },
468
+ ]
469
+ `);
470
+ });
471
+
472
+ describe('citations', () => {
473
+ it('should extract text documents and send to API', async () => {
474
+ prepareJsonResponse({
475
+ content: [{ type: 'text', text: 'Hello, World!' }],
476
+ });
477
+
478
+ await model.doGenerate({
479
+ prompt: [
480
+ {
481
+ role: 'user',
482
+ content: [
483
+ { type: 'text', text: 'What does this say?' },
484
+ {
485
+ type: 'file',
486
+ data: 'This is a test document.',
487
+ mediaType: 'text/plain',
488
+ filename: 'test.txt',
489
+ },
490
+ ],
491
+ },
492
+ ],
493
+ });
494
+
495
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
496
+ {
497
+ "documents": [
498
+ {
499
+ "data": {
500
+ "text": "This is a test document.",
501
+ "title": "test.txt",
502
+ },
503
+ },
504
+ ],
505
+ "messages": [
506
+ {
507
+ "content": "What does this say?",
508
+ "role": "user",
509
+ },
510
+ ],
511
+ "model": "command-r-plus",
512
+ }
513
+ `);
514
+ });
515
+
516
+ it('should extract multiple text documents', async () => {
517
+ prepareJsonResponse({
518
+ content: [{ type: 'text', text: 'Hello, World!' }],
519
+ });
520
+
521
+ await model.doGenerate({
522
+ prompt: [
523
+ {
524
+ role: 'user',
525
+ content: [
526
+ { type: 'text', text: 'What do these documents say?' },
527
+ {
528
+ type: 'file',
529
+ data: Buffer.from('First document content'),
530
+ mediaType: 'text/plain',
531
+ filename: 'doc1.txt',
532
+ },
533
+ {
534
+ type: 'file',
535
+ data: Buffer.from('Second document content'),
536
+ mediaType: 'text/plain',
537
+ filename: 'doc2.txt',
538
+ },
539
+ ],
540
+ },
541
+ ],
542
+ });
543
+
544
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
545
+ {
546
+ "documents": [
547
+ {
548
+ "data": {
549
+ "text": "First document content",
550
+ "title": "doc1.txt",
551
+ },
552
+ },
553
+ {
554
+ "data": {
555
+ "text": "Second document content",
556
+ "title": "doc2.txt",
557
+ },
558
+ },
559
+ ],
560
+ "messages": [
561
+ {
562
+ "content": "What do these documents say?",
563
+ "role": "user",
564
+ },
565
+ ],
566
+ "model": "command-r-plus",
567
+ }
568
+ `);
569
+ });
570
+
571
+ it('should support JSON files', async () => {
572
+ prepareJsonResponse({
573
+ content: [{ type: 'text', text: 'Hello, World!' }],
574
+ });
575
+
576
+ await model.doGenerate({
577
+ prompt: [
578
+ {
579
+ role: 'user',
580
+ content: [
581
+ { type: 'text', text: 'What is in this JSON?' },
582
+ {
583
+ type: 'file',
584
+ data: Buffer.from('{"key": "value"}'),
585
+ mediaType: 'application/json',
586
+ filename: 'data.json',
587
+ },
588
+ ],
589
+ },
590
+ ],
591
+ });
592
+
593
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
594
+ {
595
+ "documents": [
596
+ {
597
+ "data": {
598
+ "text": "{"key": "value"}",
599
+ "title": "data.json",
600
+ },
601
+ },
602
+ ],
603
+ "messages": [
604
+ {
605
+ "content": "What is in this JSON?",
606
+ "role": "user",
607
+ },
608
+ ],
609
+ "model": "command-r-plus",
610
+ }
611
+ `);
612
+ });
613
+
614
+ it('should throw error for unsupported file types', async () => {
615
+ prepareJsonResponse({
616
+ content: [{ type: 'text', text: 'Hello, World!' }],
617
+ });
618
+
619
+ await expect(
620
+ model.doGenerate({
621
+ prompt: [
622
+ {
623
+ role: 'user',
624
+ content: [
625
+ { type: 'text', text: 'What is this?' },
626
+ {
627
+ type: 'file',
628
+ data: Buffer.from('PDF binary data'),
629
+ mediaType: 'application/pdf',
630
+ filename: 'document.pdf',
631
+ },
632
+ ],
633
+ },
634
+ ],
635
+ }),
636
+ ).rejects.toThrow(
637
+ "Media type 'application/pdf' is not supported. Supported media types are: text/* and application/json.",
638
+ );
639
+ });
640
+
641
+ it('should successfully process supported text media types', async () => {
642
+ prepareJsonResponse({
643
+ content: [{ type: 'text', text: 'Hello, World!' }],
644
+ });
645
+
646
+ await model.doGenerate({
647
+ prompt: [
648
+ {
649
+ role: 'user',
650
+ content: [
651
+ { type: 'text', text: 'What is this?' },
652
+ {
653
+ type: 'file',
654
+ data: Buffer.from('This is plain text content'),
655
+ mediaType: 'text/plain',
656
+ filename: 'text.txt',
657
+ },
658
+ {
659
+ type: 'file',
660
+ data: Buffer.from('# Markdown Header\nContent'),
661
+ mediaType: 'text/markdown',
662
+ filename: 'doc.md',
663
+ },
664
+ ],
665
+ },
666
+ ],
667
+ });
668
+
669
+ expect(await server.calls[0].requestBodyJson).toMatchObject({
670
+ documents: [
671
+ {
672
+ data: {
673
+ text: 'This is plain text content',
674
+ title: 'text.txt',
675
+ },
676
+ },
677
+ {
678
+ data: {
679
+ text: '# Markdown Header\nContent',
680
+ title: 'doc.md',
681
+ },
682
+ },
683
+ ],
684
+ });
685
+ });
686
+
687
+ it('should extract citations from response', async () => {
688
+ const mockGenerateId = vi.fn().mockReturnValue('test-citation-id');
689
+ const testProvider = createCohere({
690
+ apiKey: 'test-api-key',
691
+ generateId: mockGenerateId,
692
+ });
693
+ const testModel = testProvider('command-r-plus');
694
+
695
+ server.urls['https://api.cohere.com/v2/chat'].response = {
696
+ type: 'json-value',
697
+ body: {
698
+ response_id: '0cf61ae0-1f60-4c18-9802-be7be809e712',
699
+ generation_id: 'dad0c7cd-7982-42a7-acfb-706ccf598291',
700
+ message: {
701
+ role: 'assistant',
702
+ content: [
703
+ {
704
+ type: 'text',
705
+ text: 'AI has many benefits including automation.',
706
+ },
707
+ ],
708
+ citations: [
709
+ {
710
+ start: 31,
711
+ end: 41,
712
+ text: 'automation',
713
+ type: 'TEXT_CONTENT',
714
+ sources: [
715
+ {
716
+ type: 'document',
717
+ id: 'doc:0',
718
+ document: {
719
+ id: 'doc:0',
720
+ text: 'AI provides automation and efficiency.',
721
+ title: 'ai-benefits.txt',
722
+ },
723
+ },
724
+ ],
725
+ },
726
+ ],
727
+ },
728
+ finish_reason: 'COMPLETE',
729
+ usage: {
730
+ billed_units: { input_tokens: 9, output_tokens: 415 },
731
+ tokens: { input_tokens: 4, output_tokens: 30 },
732
+ },
733
+ },
734
+ };
735
+
736
+ const { content } = await testModel.doGenerate({
737
+ prompt: [
738
+ {
739
+ role: 'user',
740
+ content: [
741
+ { type: 'text', text: 'What are AI benefits?' },
742
+ {
743
+ type: 'file',
744
+ data: 'AI provides automation and efficiency.',
745
+ mediaType: 'text/plain',
746
+ filename: 'ai-benefits.txt',
747
+ },
748
+ ],
749
+ },
750
+ ],
751
+ });
752
+
753
+ expect(content).toMatchInlineSnapshot(`
754
+ [
755
+ {
756
+ "text": "AI has many benefits including automation.",
757
+ "type": "text",
758
+ },
759
+ {
760
+ "id": "test-citation-id",
761
+ "mediaType": "text/plain",
762
+ "providerMetadata": {
763
+ "cohere": {
764
+ "citationType": "TEXT_CONTENT",
765
+ "end": 41,
766
+ "sources": [
767
+ {
768
+ "document": {
769
+ "id": "doc:0",
770
+ "text": "AI provides automation and efficiency.",
771
+ "title": "ai-benefits.txt",
772
+ },
773
+ "id": "doc:0",
774
+ "type": "document",
775
+ },
776
+ ],
777
+ "start": 31,
778
+ "text": "automation",
779
+ },
780
+ },
781
+ "sourceType": "document",
782
+ "title": "ai-benefits.txt",
783
+ "type": "source",
784
+ },
785
+ ]
786
+ `);
787
+ });
788
+
789
+ it('should not include documents parameter when no files present', async () => {
790
+ prepareJsonResponse({
791
+ content: [{ type: 'text', text: 'Hello, World!' }],
792
+ });
793
+
794
+ await model.doGenerate({
795
+ prompt: TEST_PROMPT,
796
+ });
797
+
798
+ const requestBody = await server.calls[0].requestBodyJson;
799
+ expect(requestBody.documents).toBeUndefined();
800
+ });
801
+ });
802
+
803
+ it('should extract reasoning from response', async () => {
804
+ prepareJsonResponse({
805
+ content: [
806
+ { type: 'text', text: '42' },
807
+ { type: 'thinking', thinking: 'So I was thinking ...' },
808
+ ],
809
+ });
810
+
811
+ const { content } = await model.doGenerate({
812
+ prompt: TEST_PROMPT,
813
+ });
814
+
815
+ expect(content).toMatchInlineSnapshot(`
816
+ [
817
+ {
818
+ "text": "42",
819
+ "type": "text",
820
+ },
821
+ {
822
+ "text": "So I was thinking ...",
823
+ "type": "reasoning",
824
+ },
825
+ ]
826
+ `);
827
+ });
828
+ });
829
+
830
+ describe('doStream', () => {
831
+ function prepareStreamResponse({
832
+ content = [],
833
+ usage = {
834
+ input_tokens: 17,
835
+ output_tokens: 244,
836
+ },
837
+ finish_reason = 'COMPLETE',
838
+ headers,
839
+ }: {
840
+ content?: Array<
841
+ | { type: 'text'; deltas: string[] }
842
+ | { type: 'thinking'; deltas: string[] }
843
+ >;
844
+ usage?: {
845
+ input_tokens: number;
846
+ output_tokens: number;
847
+ };
848
+ finish_reason?: string;
849
+ headers?: Record<string, string>;
850
+ } = {}) {
851
+ const allEvents: string[] = [];
852
+
853
+ content.forEach(contentItem => {
854
+ if (contentItem.type === 'thinking') {
855
+ allEvents.push(
856
+ `event: content-start\ndata: {"type":"content-start","index":0,"delta":{"message":{"content":{"type":"thinking","thinking":""}}}}\n\n`,
857
+ ...contentItem.deltas.map(
858
+ text =>
859
+ `event: content-delta\ndata: {"type":"content-delta","index":0,"delta":{"message":{"content":{"thinking":"${text}"}}}}\n\n`,
860
+ ),
861
+ `event: content-end\ndata: {"type":"content-end","index":0}\n\n`,
862
+ );
863
+ } else if (contentItem.type === 'text') {
864
+ allEvents.push(
865
+ `event: content-start\ndata: {"type":"content-start","index":0,"delta":{"message":{"content":{"type":"text","text":""}}}}\n\n`,
866
+ ...contentItem.deltas.map(
867
+ text =>
868
+ `event: content-delta\ndata: {"type":"content-delta","index":0,"delta":{"message":{"content":{"text":"${text}"}}}}\n\n`,
869
+ ),
870
+ `event: content-end\ndata: {"type":"content-end","index":0}\n\n`,
871
+ );
872
+ }
873
+ });
874
+
875
+ server.urls['https://api.cohere.com/v2/chat'].response = {
876
+ type: 'stream-chunks',
877
+ headers,
878
+ chunks: [
879
+ `event: message-start\ndata: {"type":"message-start","id":"586ac33f-9c64-452c-8f8d-e5890e73b6fb","delta":{"message":{"role":"assistant","content":[],"tool_plan":"","tool_calls":[],"citations":[]}}}\n\n`,
880
+ ...allEvents,
881
+ `event: message-end\ndata: {"type":"message-end","delta":` +
882
+ `{"finish_reason":"${finish_reason}",` +
883
+ `"usage":{"tokens":{"input_tokens":${usage.input_tokens},"output_tokens":${usage.output_tokens}}}}}\n\n`,
884
+ `data: [DONE]\n\n`,
885
+ ],
886
+ };
887
+ }
888
+
889
+ it('should stream text deltas', async () => {
890
+ prepareStreamResponse({
891
+ content: [{ type: 'text', deltas: ['Hello', ', ', 'World!'] }],
892
+ finish_reason: 'COMPLETE',
893
+ usage: {
894
+ input_tokens: 34,
895
+ output_tokens: 12,
896
+ },
897
+ });
898
+
899
+ const { stream } = await model.doStream({
900
+ prompt: TEST_PROMPT,
901
+ includeRawChunks: false,
902
+ });
903
+
904
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
905
+ [
906
+ {
907
+ "type": "stream-start",
908
+ "warnings": [],
909
+ },
910
+ {
911
+ "id": "586ac33f-9c64-452c-8f8d-e5890e73b6fb",
912
+ "type": "response-metadata",
913
+ },
914
+ {
915
+ "id": "0",
916
+ "type": "text-start",
917
+ },
918
+ {
919
+ "delta": "Hello",
920
+ "id": "0",
921
+ "type": "text-delta",
922
+ },
923
+ {
924
+ "delta": ", ",
925
+ "id": "0",
926
+ "type": "text-delta",
927
+ },
928
+ {
929
+ "delta": "World!",
930
+ "id": "0",
931
+ "type": "text-delta",
932
+ },
933
+ {
934
+ "id": "0",
935
+ "type": "text-end",
936
+ },
937
+ {
938
+ "finishReason": {
939
+ "raw": "COMPLETE",
940
+ "unified": "stop",
941
+ },
942
+ "type": "finish",
943
+ "usage": {
944
+ "inputTokens": {
945
+ "cacheRead": undefined,
946
+ "cacheWrite": undefined,
947
+ "noCache": 34,
948
+ "total": 34,
949
+ },
950
+ "outputTokens": {
951
+ "reasoning": undefined,
952
+ "text": 12,
953
+ "total": 12,
954
+ },
955
+ "raw": {
956
+ "input_tokens": 34,
957
+ "output_tokens": 12,
958
+ },
959
+ },
960
+ },
961
+ ]
962
+ `);
963
+ });
964
+
965
+ it('should stream reasoning deltas', async () => {
966
+ prepareStreamResponse({
967
+ content: [
968
+ { type: 'thinking', deltas: ['So', 'I ', 'was ', 'thinking ', '...'] },
969
+ { type: 'text', deltas: ['Hello', ', ', 'World!'] },
970
+ ],
971
+ finish_reason: 'COMPLETE',
972
+ usage: {
973
+ input_tokens: 34,
974
+ output_tokens: 12,
975
+ },
976
+ });
977
+
978
+ const { stream } = await model.doStream({
979
+ prompt: TEST_PROMPT,
980
+ includeRawChunks: false,
981
+ });
982
+
983
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
984
+ [
985
+ {
986
+ "type": "stream-start",
987
+ "warnings": [],
988
+ },
989
+ {
990
+ "id": "586ac33f-9c64-452c-8f8d-e5890e73b6fb",
991
+ "type": "response-metadata",
992
+ },
993
+ {
994
+ "id": "0",
995
+ "type": "reasoning-start",
996
+ },
997
+ {
998
+ "delta": "So",
999
+ "id": "0",
1000
+ "type": "reasoning-delta",
1001
+ },
1002
+ {
1003
+ "delta": "I ",
1004
+ "id": "0",
1005
+ "type": "reasoning-delta",
1006
+ },
1007
+ {
1008
+ "delta": "was ",
1009
+ "id": "0",
1010
+ "type": "reasoning-delta",
1011
+ },
1012
+ {
1013
+ "delta": "thinking ",
1014
+ "id": "0",
1015
+ "type": "reasoning-delta",
1016
+ },
1017
+ {
1018
+ "delta": "...",
1019
+ "id": "0",
1020
+ "type": "reasoning-delta",
1021
+ },
1022
+ {
1023
+ "id": "0",
1024
+ "type": "reasoning-end",
1025
+ },
1026
+ {
1027
+ "id": "0",
1028
+ "type": "text-start",
1029
+ },
1030
+ {
1031
+ "delta": "Hello",
1032
+ "id": "0",
1033
+ "type": "text-delta",
1034
+ },
1035
+ {
1036
+ "delta": ", ",
1037
+ "id": "0",
1038
+ "type": "text-delta",
1039
+ },
1040
+ {
1041
+ "delta": "World!",
1042
+ "id": "0",
1043
+ "type": "text-delta",
1044
+ },
1045
+ {
1046
+ "id": "0",
1047
+ "type": "text-end",
1048
+ },
1049
+ {
1050
+ "finishReason": {
1051
+ "raw": "COMPLETE",
1052
+ "unified": "stop",
1053
+ },
1054
+ "type": "finish",
1055
+ "usage": {
1056
+ "inputTokens": {
1057
+ "cacheRead": undefined,
1058
+ "cacheWrite": undefined,
1059
+ "noCache": 34,
1060
+ "total": 34,
1061
+ },
1062
+ "outputTokens": {
1063
+ "reasoning": undefined,
1064
+ "text": 12,
1065
+ "total": 12,
1066
+ },
1067
+ "raw": {
1068
+ "input_tokens": 34,
1069
+ "output_tokens": 12,
1070
+ },
1071
+ },
1072
+ },
1073
+ ]
1074
+ `);
1075
+ });
1076
+
1077
+ it('should stream tool deltas', async () => {
1078
+ server.urls['https://api.cohere.com/v2/chat'].response = {
1079
+ type: 'stream-chunks',
1080
+ chunks: [
1081
+ `event: message-start\ndata: {"type":"message-start","id":"29f14a5a-11de-4cae-9800-25e4747408ea","delta":{"message":{"role":"assistant","content":[],"tool_plan":"","tool_calls":[],"citations":[]}}}\n\n`,
1082
+ `event: tool-call-start\ndata: {"type":"tool-call-start","delta":{"message":{"tool_calls":{"id":"test-id-1","type":"function","function":{"name":"test-tool","arguments":""}}}}}\n\n`,
1083
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"{\\n \\""}}}}}\n\n`,
1084
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"ticker"}}}}}\n\n`,
1085
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"_"}}}}}\n\n`,
1086
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"symbol"}}}}}\n\n`,
1087
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"\\":"}}}}}\n\n`,
1088
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":" \\""}}}}}\n\n`,
1089
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"AAPL"}}}}}\n\n`,
1090
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"\\""}}}}}\n\n`,
1091
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"\\n"}}}}}\n\n`,
1092
+ `event: tool-call-delta\ndata: {"type":"tool-call-delta","delta":{"message":{"tool_calls":{"function":{"arguments":"}"}}}}}\n\n`,
1093
+ `event: tool-call-end\ndata: {"type":"tool-call-end"}\n\n`,
1094
+ `event: message-end\ndata: {"type":"message-end","delta":{"finish_reason":"COMPLETE","usage":{"tokens":{"input_tokens":893,"output_tokens":62}}}}\n\n`,
1095
+ `data: [DONE]\n\n`,
1096
+ ],
1097
+ };
1098
+
1099
+ const { stream } = await model.doStream({
1100
+ prompt: TEST_PROMPT,
1101
+ tools: [
1102
+ {
1103
+ type: 'function',
1104
+ name: 'test-tool',
1105
+ inputSchema: {
1106
+ type: 'object',
1107
+ properties: { value: { type: 'string' } },
1108
+ required: ['value'],
1109
+ additionalProperties: false,
1110
+ $schema: 'http://json-schema.org/draft-07/schema#',
1111
+ },
1112
+ },
1113
+ ],
1114
+ includeRawChunks: false,
1115
+ });
1116
+
1117
+ const responseArray = await convertReadableStreamToArray(stream);
1118
+
1119
+ expect(responseArray).toMatchInlineSnapshot(`
1120
+ [
1121
+ {
1122
+ "type": "stream-start",
1123
+ "warnings": [],
1124
+ },
1125
+ {
1126
+ "id": "29f14a5a-11de-4cae-9800-25e4747408ea",
1127
+ "type": "response-metadata",
1128
+ },
1129
+ {
1130
+ "id": "test-id-1",
1131
+ "toolName": "test-tool",
1132
+ "type": "tool-input-start",
1133
+ },
1134
+ {
1135
+ "delta": "{
1136
+ "",
1137
+ "id": "test-id-1",
1138
+ "type": "tool-input-delta",
1139
+ },
1140
+ {
1141
+ "delta": "ticker",
1142
+ "id": "test-id-1",
1143
+ "type": "tool-input-delta",
1144
+ },
1145
+ {
1146
+ "delta": "_",
1147
+ "id": "test-id-1",
1148
+ "type": "tool-input-delta",
1149
+ },
1150
+ {
1151
+ "delta": "symbol",
1152
+ "id": "test-id-1",
1153
+ "type": "tool-input-delta",
1154
+ },
1155
+ {
1156
+ "delta": "":",
1157
+ "id": "test-id-1",
1158
+ "type": "tool-input-delta",
1159
+ },
1160
+ {
1161
+ "delta": " "",
1162
+ "id": "test-id-1",
1163
+ "type": "tool-input-delta",
1164
+ },
1165
+ {
1166
+ "delta": "AAPL",
1167
+ "id": "test-id-1",
1168
+ "type": "tool-input-delta",
1169
+ },
1170
+ {
1171
+ "delta": """,
1172
+ "id": "test-id-1",
1173
+ "type": "tool-input-delta",
1174
+ },
1175
+ {
1176
+ "delta": "
1177
+ ",
1178
+ "id": "test-id-1",
1179
+ "type": "tool-input-delta",
1180
+ },
1181
+ {
1182
+ "delta": "}",
1183
+ "id": "test-id-1",
1184
+ "type": "tool-input-delta",
1185
+ },
1186
+ {
1187
+ "id": "test-id-1",
1188
+ "type": "tool-input-end",
1189
+ },
1190
+ {
1191
+ "input": "{"ticker_symbol":"AAPL"}",
1192
+ "toolCallId": "test-id-1",
1193
+ "toolName": "test-tool",
1194
+ "type": "tool-call",
1195
+ },
1196
+ {
1197
+ "finishReason": {
1198
+ "raw": "COMPLETE",
1199
+ "unified": "stop",
1200
+ },
1201
+ "type": "finish",
1202
+ "usage": {
1203
+ "inputTokens": {
1204
+ "cacheRead": undefined,
1205
+ "cacheWrite": undefined,
1206
+ "noCache": 893,
1207
+ "total": 893,
1208
+ },
1209
+ "outputTokens": {
1210
+ "reasoning": undefined,
1211
+ "text": 62,
1212
+ "total": 62,
1213
+ },
1214
+ "raw": {
1215
+ "input_tokens": 893,
1216
+ "output_tokens": 62,
1217
+ },
1218
+ },
1219
+ },
1220
+ ]
1221
+ `);
1222
+
1223
+ // Check if the tool call ID is the same in the tool call delta and the tool call
1224
+ const toolCallIds = responseArray
1225
+ .filter(
1226
+ chunk =>
1227
+ chunk.type === 'tool-input-delta' || chunk.type === 'tool-call',
1228
+ )
1229
+ .map(chunk => (chunk.type === 'tool-call' ? chunk.toolCallId : chunk.id));
1230
+
1231
+ expect(new Set(toolCallIds)).toStrictEqual(new Set(['test-id-1']));
1232
+ });
1233
+
1234
+ it.skipIf(isNodeVersion(20))(
1235
+ 'should handle unparsable stream parts',
1236
+ async () => {
1237
+ server.urls['https://api.cohere.com/v2/chat'].response = {
1238
+ type: 'stream-chunks',
1239
+ chunks: [`event: foo-message\ndata: {unparsable}\n\n`],
1240
+ };
1241
+
1242
+ const { stream } = await model.doStream({
1243
+ prompt: TEST_PROMPT,
1244
+ includeRawChunks: false,
1245
+ });
1246
+
1247
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1248
+ [
1249
+ {
1250
+ "type": "stream-start",
1251
+ "warnings": [],
1252
+ },
1253
+ {
1254
+ "error": [AI_JSONParseError: JSON parsing failed: Text: {unparsable}.
1255
+ Error message: Expected property name or '}' in JSON at position 1 (line 1 column 2)],
1256
+ "type": "error",
1257
+ },
1258
+ {
1259
+ "finishReason": {
1260
+ "raw": undefined,
1261
+ "unified": "error",
1262
+ },
1263
+ "type": "finish",
1264
+ "usage": {
1265
+ "inputTokens": {
1266
+ "cacheRead": undefined,
1267
+ "cacheWrite": undefined,
1268
+ "noCache": undefined,
1269
+ "total": undefined,
1270
+ },
1271
+ "outputTokens": {
1272
+ "reasoning": undefined,
1273
+ "text": undefined,
1274
+ "total": undefined,
1275
+ },
1276
+ "raw": undefined,
1277
+ },
1278
+ },
1279
+ ]
1280
+ `);
1281
+ },
1282
+ );
1283
+
1284
+ it('should expose the raw response headers', async () => {
1285
+ prepareStreamResponse({
1286
+ content: [],
1287
+ headers: { 'test-header': 'test-value' },
1288
+ });
1289
+
1290
+ const { response } = await model.doStream({
1291
+ prompt: TEST_PROMPT,
1292
+ includeRawChunks: false,
1293
+ });
1294
+
1295
+ expect(response?.headers).toStrictEqual({
1296
+ // default headers:
1297
+ 'content-type': 'text/event-stream',
1298
+ 'cache-control': 'no-cache',
1299
+ connection: 'keep-alive',
1300
+
1301
+ // custom header
1302
+ 'test-header': 'test-value',
1303
+ });
1304
+ });
1305
+
1306
+ it('should pass the messages and the model', async () => {
1307
+ prepareStreamResponse();
1308
+
1309
+ await model.doStream({
1310
+ prompt: TEST_PROMPT,
1311
+ includeRawChunks: false,
1312
+ });
1313
+
1314
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
1315
+ stream: true,
1316
+ model: 'command-r-plus',
1317
+ messages: [
1318
+ {
1319
+ role: 'system',
1320
+ content: 'you are a friendly bot!',
1321
+ },
1322
+ {
1323
+ role: 'user',
1324
+ content: 'Hello',
1325
+ },
1326
+ ],
1327
+ });
1328
+ });
1329
+
1330
+ it('should pass headers', async () => {
1331
+ prepareStreamResponse();
1332
+
1333
+ const provider = createCohere({
1334
+ apiKey: 'test-api-key',
1335
+ headers: {
1336
+ 'Custom-Provider-Header': 'provider-header-value',
1337
+ },
1338
+ });
1339
+
1340
+ await provider('command-r-plus').doStream({
1341
+ prompt: TEST_PROMPT,
1342
+ headers: {
1343
+ 'Custom-Request-Header': 'request-header-value',
1344
+ },
1345
+ includeRawChunks: false,
1346
+ });
1347
+
1348
+ expect(server.calls[0].requestHeaders).toStrictEqual({
1349
+ authorization: 'Bearer test-api-key',
1350
+ 'content-type': 'application/json',
1351
+ 'custom-provider-header': 'provider-header-value',
1352
+ 'custom-request-header': 'request-header-value',
1353
+ });
1354
+ });
1355
+
1356
+ it('should send request body', async () => {
1357
+ prepareStreamResponse();
1358
+
1359
+ const { request } = await model.doStream({
1360
+ prompt: TEST_PROMPT,
1361
+ includeRawChunks: false,
1362
+ });
1363
+
1364
+ expect(request).toMatchInlineSnapshot(`
1365
+ {
1366
+ "body": {
1367
+ "frequency_penalty": undefined,
1368
+ "k": undefined,
1369
+ "max_tokens": undefined,
1370
+ "messages": [
1371
+ {
1372
+ "content": "you are a friendly bot!",
1373
+ "role": "system",
1374
+ },
1375
+ {
1376
+ "content": "Hello",
1377
+ "role": "user",
1378
+ },
1379
+ ],
1380
+ "model": "command-r-plus",
1381
+ "p": undefined,
1382
+ "presence_penalty": undefined,
1383
+ "response_format": undefined,
1384
+ "seed": undefined,
1385
+ "stop_sequences": undefined,
1386
+ "stream": true,
1387
+ "temperature": undefined,
1388
+ "tool_choice": undefined,
1389
+ "tools": undefined,
1390
+ },
1391
+ }
1392
+ `);
1393
+ });
1394
+
1395
+ it('should handle empty tool call arguments', async () => {
1396
+ server.urls['https://api.cohere.com/v2/chat'].response = {
1397
+ type: 'stream-chunks',
1398
+ chunks: [
1399
+ `event: message-start\ndata: {"type":"message-start","id":"test-id","delta":{"message":{"role":"assistant","content":[],"tool_plan":"","tool_calls":[],"citations":[]}}}\n\n`,
1400
+ `event: tool-call-start\ndata: {"type":"tool-call-start","delta":{"message":{"tool_calls":{"id":"test-id-1","type":"function","function":{"name":"test-tool","arguments":""}}}}}\n\n`,
1401
+ `event: tool-call-end\ndata: {"type":"tool-call-end"}\n\n`,
1402
+ `event: message-end\ndata: {"type":"message-end","delta":{"finish_reason":"COMPLETE","usage":{"tokens":{"input_tokens":10,"output_tokens":5}}}}\n\n`,
1403
+ `data: [DONE]\n\n`,
1404
+ ],
1405
+ };
1406
+
1407
+ const { stream } = await model.doStream({
1408
+ prompt: TEST_PROMPT,
1409
+ tools: [
1410
+ {
1411
+ type: 'function',
1412
+ name: 'test-tool',
1413
+ inputSchema: {
1414
+ type: 'object',
1415
+ properties: {},
1416
+ required: [],
1417
+ additionalProperties: false,
1418
+ $schema: 'http://json-schema.org/draft-07/schema#',
1419
+ },
1420
+ },
1421
+ ],
1422
+ includeRawChunks: false,
1423
+ });
1424
+
1425
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1426
+ [
1427
+ {
1428
+ "type": "stream-start",
1429
+ "warnings": [],
1430
+ },
1431
+ {
1432
+ "id": "test-id",
1433
+ "type": "response-metadata",
1434
+ },
1435
+ {
1436
+ "id": "test-id-1",
1437
+ "toolName": "test-tool",
1438
+ "type": "tool-input-start",
1439
+ },
1440
+ {
1441
+ "id": "test-id-1",
1442
+ "type": "tool-input-end",
1443
+ },
1444
+ {
1445
+ "input": "{}",
1446
+ "toolCallId": "test-id-1",
1447
+ "toolName": "test-tool",
1448
+ "type": "tool-call",
1449
+ },
1450
+ {
1451
+ "finishReason": {
1452
+ "raw": "COMPLETE",
1453
+ "unified": "stop",
1454
+ },
1455
+ "type": "finish",
1456
+ "usage": {
1457
+ "inputTokens": {
1458
+ "cacheRead": undefined,
1459
+ "cacheWrite": undefined,
1460
+ "noCache": 10,
1461
+ "total": 10,
1462
+ },
1463
+ "outputTokens": {
1464
+ "reasoning": undefined,
1465
+ "text": 5,
1466
+ "total": 5,
1467
+ },
1468
+ "raw": {
1469
+ "input_tokens": 10,
1470
+ "output_tokens": 5,
1471
+ },
1472
+ },
1473
+ },
1474
+ ]
1475
+ `);
1476
+ });
1477
+
1478
+ it('should include raw chunks when includeRawChunks is enabled', async () => {
1479
+ prepareStreamResponse({
1480
+ content: [{ type: 'text', deltas: ['Hello', ' World!'] }],
1481
+ });
1482
+
1483
+ const { stream } = await model.doStream({
1484
+ prompt: TEST_PROMPT,
1485
+ includeRawChunks: true,
1486
+ });
1487
+
1488
+ const chunks = await convertReadableStreamToArray(stream);
1489
+
1490
+ expect(chunks.filter(chunk => chunk.type === 'raw')).toMatchInlineSnapshot(`
1491
+ [
1492
+ {
1493
+ "rawValue": {
1494
+ "delta": {
1495
+ "message": {
1496
+ "citations": [],
1497
+ "content": [],
1498
+ "role": "assistant",
1499
+ "tool_calls": [],
1500
+ "tool_plan": "",
1501
+ },
1502
+ },
1503
+ "id": "586ac33f-9c64-452c-8f8d-e5890e73b6fb",
1504
+ "type": "message-start",
1505
+ },
1506
+ "type": "raw",
1507
+ },
1508
+ {
1509
+ "rawValue": {
1510
+ "delta": {
1511
+ "message": {
1512
+ "content": {
1513
+ "text": "",
1514
+ "type": "text",
1515
+ },
1516
+ },
1517
+ },
1518
+ "index": 0,
1519
+ "type": "content-start",
1520
+ },
1521
+ "type": "raw",
1522
+ },
1523
+ {
1524
+ "rawValue": {
1525
+ "delta": {
1526
+ "message": {
1527
+ "content": {
1528
+ "text": "Hello",
1529
+ },
1530
+ },
1531
+ },
1532
+ "index": 0,
1533
+ "type": "content-delta",
1534
+ },
1535
+ "type": "raw",
1536
+ },
1537
+ {
1538
+ "rawValue": {
1539
+ "delta": {
1540
+ "message": {
1541
+ "content": {
1542
+ "text": " World!",
1543
+ },
1544
+ },
1545
+ },
1546
+ "index": 0,
1547
+ "type": "content-delta",
1548
+ },
1549
+ "type": "raw",
1550
+ },
1551
+ {
1552
+ "rawValue": {
1553
+ "index": 0,
1554
+ "type": "content-end",
1555
+ },
1556
+ "type": "raw",
1557
+ },
1558
+ {
1559
+ "rawValue": {
1560
+ "delta": {
1561
+ "finish_reason": "COMPLETE",
1562
+ "usage": {
1563
+ "tokens": {
1564
+ "input_tokens": 17,
1565
+ "output_tokens": 244,
1566
+ },
1567
+ },
1568
+ },
1569
+ "type": "message-end",
1570
+ },
1571
+ "type": "raw",
1572
+ },
1573
+ ]
1574
+ `);
1575
+ });
1576
+
1577
+ it('should not include raw chunks when includeRawChunks is false', async () => {
1578
+ prepareStreamResponse({
1579
+ content: [{ type: 'text', deltas: ['Hello', ' World!'] }],
1580
+ });
1581
+
1582
+ const { stream } = await model.doStream({
1583
+ prompt: TEST_PROMPT,
1584
+ includeRawChunks: false,
1585
+ });
1586
+
1587
+ const chunks = await convertReadableStreamToArray(stream);
1588
+
1589
+ expect(chunks.filter(chunk => chunk.type === 'raw')).toHaveLength(0);
1590
+ });
1591
+ });