@ai-sdk/mistral 3.0.8 → 3.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1755 @@
1
+ import { LanguageModelV3Prompt } from '@ai-sdk/provider';
2
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
3
+ import {
4
+ convertReadableStreamToArray,
5
+ mockId,
6
+ } from '@ai-sdk/provider-utils/test';
7
+ import fs from 'node:fs';
8
+ import { createMistral } from './mistral-provider';
9
+ import { describe, it, expect, vi } from 'vitest';
10
+
11
+ vi.mock('./version', () => ({
12
+ VERSION: '0.0.0-test',
13
+ }));
14
+
15
+ const TEST_PROMPT: LanguageModelV3Prompt = [
16
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
17
+ ];
18
+
19
+ const provider = createMistral({
20
+ apiKey: 'test-api-key',
21
+ generateId: mockId(),
22
+ });
23
+ const model = provider.chat('mistral-small-latest');
24
+
25
+ const server = createTestServer({
26
+ 'https://api.mistral.ai/v1/chat/completions': {},
27
+ });
28
+
29
+ describe('doGenerate', () => {
30
+ function prepareJsonResponse({
31
+ content = '',
32
+ usage = {
33
+ prompt_tokens: 4,
34
+ total_tokens: 34,
35
+ completion_tokens: 30,
36
+ },
37
+ id = '16362f24e60340d0994dd205c267a43a',
38
+ created = 1711113008,
39
+ model = 'mistral-small-latest',
40
+ headers,
41
+ }: {
42
+ content?: string;
43
+ usage?: {
44
+ prompt_tokens: number;
45
+ total_tokens: number;
46
+ completion_tokens: number;
47
+ };
48
+ id?: string;
49
+ created?: number;
50
+ model?: string;
51
+ headers?: Record<string, string>;
52
+ }) {
53
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
54
+ type: 'json-value',
55
+ headers,
56
+ body: {
57
+ object: 'chat.completion',
58
+ id,
59
+ created,
60
+ model,
61
+ choices: [
62
+ {
63
+ index: 0,
64
+ message: {
65
+ role: 'assistant',
66
+ content,
67
+ tool_calls: null,
68
+ },
69
+ finish_reason: 'stop',
70
+ logprobs: null,
71
+ },
72
+ ],
73
+ usage,
74
+ },
75
+ };
76
+ }
77
+
78
+ function prepareJsonFixtureResponse(filename: string) {
79
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
80
+ type: 'json-value',
81
+ body: JSON.parse(
82
+ fs.readFileSync(`src/__fixtures__/${filename}.json`, 'utf8'),
83
+ ),
84
+ };
85
+ }
86
+
87
+ it('should extract text content', async () => {
88
+ prepareJsonFixtureResponse('mistral-generate-text.1');
89
+
90
+ const { content } = await model.doGenerate({
91
+ prompt: TEST_PROMPT,
92
+ });
93
+
94
+ expect(content).toMatchInlineSnapshot(`
95
+ [
96
+ {
97
+ "text": "**Holiday Name: "World Kindness Day of Sharing"**
98
+
99
+ **Date:** The third Saturday in October
100
+
101
+ **Purpose:** To celebrate and promote acts of kindness, generosity, and connection by sharing something meaningful with others—whether it's time, skills, stories, or physical items.
102
+
103
+ ### **Traditions & Customs:**
104
+
105
+ 1. **"Share a Skill" Exchanges:**
106
+ - People teach others something they're good at—cooking, crafting, gardening, or even life advice—free of charge. Community centers, schools, and online platforms host skill-sharing sessions.
107
+
108
+ 2. **"Story Swap" Gatherings:**
109
+ - Friends and strangers meet in parks, cafes, or virtual spaces to share personal stories, jokes, or wisdom. The goal is to foster empathy and connection through storytelling.
110
+
111
+ 3. **"Kindness Kits" for Strangers:**
112
+ - People assemble small care packages (handwritten notes, snacks, seeds, or handmade crafts) and leave them in public places (libraries, bus stops, parks) for others to find.
113
+
114
+ 4. **"Pay It Forward" Chains:**
115
+ - Individuals perform random acts of kindness (buying coffee for the next person, donating to a cause, or helping a neighbor) and encourage others to do the same.
116
+
117
+ 5. **"Memory Lane" Sharing:**
118
+ - Families and friends gather to share old photos, heirlooms, or family recipes, passing down traditions and creating new ones.
119
+
120
+ 6. **"Global Kindness Map":**
121
+ - An interactive online map where people pin acts of kindness they've done or received, inspiring others to contribute.
122
+
123
+ **Symbol:** A **hand holding a heart** (representing giving and compassion).
124
+
125
+ **Food & Drink:** "Kindness Cookies" (homemade treats shared with neighbors) and "Unity Tea" (a blend of herbs from different cultures, symbolizing harmony).
126
+
127
+ **Why It's Special:** Unlike commercial holidays, this day focuses on **meaningful human connection**—reminding everyone that kindness is a universal language.
128
+
129
+ Would you celebrate it? What would you share? 😊",
130
+ "type": "text",
131
+ },
132
+ ]
133
+ `);
134
+ });
135
+
136
+ it('should avoid duplication when there is a trailing assistant message', async () => {
137
+ prepareJsonResponse({ content: 'prefix and more content' });
138
+
139
+ const { content } = await model.doGenerate({
140
+ prompt: [
141
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
142
+ {
143
+ role: 'assistant',
144
+ content: [{ type: 'text', text: 'prefix ' }],
145
+ },
146
+ ],
147
+ });
148
+
149
+ expect(content).toMatchInlineSnapshot(`
150
+ [
151
+ {
152
+ "text": "prefix and more content",
153
+ "type": "text",
154
+ },
155
+ ]
156
+ `);
157
+ });
158
+
159
+ it('should extract tool call content', async () => {
160
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
161
+ type: 'json-value',
162
+ body: {
163
+ id: 'b3999b8c93e04e11bcbff7bcab829667',
164
+ object: 'chat.completion',
165
+ created: 1722349660,
166
+ model: 'mistral-large-latest',
167
+ choices: [
168
+ {
169
+ index: 0,
170
+ message: {
171
+ role: 'assistant',
172
+ tool_calls: [
173
+ {
174
+ id: 'gSIMJiOkT',
175
+ function: {
176
+ name: 'weatherTool',
177
+ arguments: '{"location": "paris"}',
178
+ },
179
+ },
180
+ ],
181
+ },
182
+ finish_reason: 'tool_calls',
183
+ logprobs: null,
184
+ },
185
+ ],
186
+ usage: { prompt_tokens: 124, total_tokens: 146, completion_tokens: 22 },
187
+ },
188
+ };
189
+
190
+ const { content } = await model.doGenerate({
191
+ prompt: TEST_PROMPT,
192
+ });
193
+
194
+ expect(content).toMatchInlineSnapshot(`
195
+ [
196
+ {
197
+ "input": "{"location": "paris"}",
198
+ "toolCallId": "gSIMJiOkT",
199
+ "toolName": "weatherTool",
200
+ "type": "tool-call",
201
+ },
202
+ ]
203
+ `);
204
+ });
205
+
206
+ it('should extract thinking content as reasoning', async () => {
207
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
208
+ type: 'json-value',
209
+ body: {
210
+ id: 'test-thinking-id',
211
+ object: 'chat.completion',
212
+ created: 1722349660,
213
+ model: 'magistral-medium-2507',
214
+ choices: [
215
+ {
216
+ index: 0,
217
+ message: {
218
+ role: 'assistant',
219
+ content: [
220
+ {
221
+ type: 'thinking',
222
+ thinking: [
223
+ {
224
+ type: 'text',
225
+ text: 'Let me think about this problem step by step.',
226
+ },
227
+ ],
228
+ },
229
+ {
230
+ type: 'text',
231
+ text: 'Here is my answer.',
232
+ },
233
+ ],
234
+ },
235
+ finish_reason: 'stop',
236
+ },
237
+ ],
238
+ usage: { prompt_tokens: 10, total_tokens: 30, completion_tokens: 20 },
239
+ },
240
+ };
241
+
242
+ const { content } = await model.doGenerate({
243
+ prompt: TEST_PROMPT,
244
+ });
245
+
246
+ expect(content).toMatchInlineSnapshot(`
247
+ [
248
+ {
249
+ "text": "Let me think about this problem step by step.",
250
+ "type": "reasoning",
251
+ },
252
+ {
253
+ "text": "Here is my answer.",
254
+ "type": "text",
255
+ },
256
+ ]
257
+ `);
258
+ });
259
+
260
+ it('should preserve ordering of mixed thinking and text content', async () => {
261
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
262
+ type: 'json-value',
263
+ body: {
264
+ id: 'mixed-content-test',
265
+ object: 'chat.completion',
266
+ created: 1722349660,
267
+ model: 'magistral-medium-2507',
268
+ choices: [
269
+ {
270
+ index: 0,
271
+ message: {
272
+ role: 'assistant',
273
+ content: [
274
+ {
275
+ type: 'thinking',
276
+ thinking: [{ type: 'text', text: 'First thought.' }],
277
+ },
278
+ {
279
+ type: 'text',
280
+ text: 'Partial answer.',
281
+ },
282
+ {
283
+ type: 'thinking',
284
+ thinking: [{ type: 'text', text: 'Second thought.' }],
285
+ },
286
+ {
287
+ type: 'text',
288
+ text: 'Final answer.',
289
+ },
290
+ ],
291
+ },
292
+ finish_reason: 'stop',
293
+ },
294
+ ],
295
+ usage: { prompt_tokens: 10, total_tokens: 30, completion_tokens: 20 },
296
+ },
297
+ };
298
+
299
+ const { content } = await model.doGenerate({
300
+ prompt: TEST_PROMPT,
301
+ });
302
+
303
+ expect(content).toMatchInlineSnapshot(`
304
+ [
305
+ {
306
+ "text": "First thought.",
307
+ "type": "reasoning",
308
+ },
309
+ {
310
+ "text": "Partial answer.",
311
+ "type": "text",
312
+ },
313
+ {
314
+ "text": "Second thought.",
315
+ "type": "reasoning",
316
+ },
317
+ {
318
+ "text": "Final answer.",
319
+ "type": "text",
320
+ },
321
+ ]
322
+ `);
323
+ });
324
+
325
+ it('should handle empty thinking content', async () => {
326
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
327
+ type: 'json-value',
328
+ body: {
329
+ id: 'empty-thinking-test',
330
+ object: 'chat.completion',
331
+ created: 1722349660,
332
+ model: 'magistral-medium-2507',
333
+ choices: [
334
+ {
335
+ index: 0,
336
+ message: {
337
+ role: 'assistant',
338
+ content: [
339
+ {
340
+ type: 'thinking',
341
+ thinking: [],
342
+ },
343
+ {
344
+ type: 'text',
345
+ text: 'Just the answer.',
346
+ },
347
+ ],
348
+ },
349
+ finish_reason: 'stop',
350
+ },
351
+ ],
352
+ usage: { prompt_tokens: 10, total_tokens: 30, completion_tokens: 20 },
353
+ },
354
+ };
355
+
356
+ const { content } = await model.doGenerate({
357
+ prompt: TEST_PROMPT,
358
+ });
359
+
360
+ expect(content).toMatchInlineSnapshot(`
361
+ [
362
+ {
363
+ "text": "Just the answer.",
364
+ "type": "text",
365
+ },
366
+ ]
367
+ `);
368
+ });
369
+
370
+ it('should extract usage', async () => {
371
+ prepareJsonResponse({
372
+ usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 },
373
+ });
374
+
375
+ const { usage } = await model.doGenerate({
376
+ prompt: TEST_PROMPT,
377
+ });
378
+
379
+ expect(usage).toMatchInlineSnapshot(`
380
+ {
381
+ "inputTokens": {
382
+ "cacheRead": undefined,
383
+ "cacheWrite": undefined,
384
+ "noCache": 20,
385
+ "total": 20,
386
+ },
387
+ "outputTokens": {
388
+ "reasoning": undefined,
389
+ "text": 5,
390
+ "total": 5,
391
+ },
392
+ "raw": {
393
+ "completion_tokens": 5,
394
+ "prompt_tokens": 20,
395
+ "total_tokens": 25,
396
+ },
397
+ }
398
+ `);
399
+ });
400
+
401
+ it('should send additional response information', async () => {
402
+ prepareJsonResponse({
403
+ id: 'test-id',
404
+ created: 123,
405
+ model: 'test-model',
406
+ });
407
+
408
+ const { response } = await model.doGenerate({
409
+ prompt: TEST_PROMPT,
410
+ });
411
+
412
+ expect({
413
+ id: response?.id,
414
+ timestamp: response?.timestamp,
415
+ modelId: response?.modelId,
416
+ }).toStrictEqual({
417
+ id: 'test-id',
418
+ timestamp: new Date(123 * 1000),
419
+ modelId: 'test-model',
420
+ });
421
+ });
422
+
423
+ it('should expose the raw response headers', async () => {
424
+ prepareJsonResponse({
425
+ headers: { 'test-header': 'test-value' },
426
+ });
427
+
428
+ const { response } = await model.doGenerate({
429
+ prompt: TEST_PROMPT,
430
+ });
431
+
432
+ expect(response?.headers).toStrictEqual({
433
+ // default headers:
434
+ 'content-length': '314',
435
+ 'content-type': 'application/json',
436
+
437
+ // custom header
438
+ 'test-header': 'test-value',
439
+ });
440
+ });
441
+
442
+ it('should pass the model and the messages', async () => {
443
+ prepareJsonResponse({ content: '' });
444
+
445
+ await model.doGenerate({
446
+ prompt: TEST_PROMPT,
447
+ });
448
+
449
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
450
+ model: 'mistral-small-latest',
451
+ messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],
452
+ });
453
+ });
454
+
455
+ it('should pass tools and toolChoice', async () => {
456
+ prepareJsonResponse({ content: '' });
457
+
458
+ await model.doGenerate({
459
+ tools: [
460
+ {
461
+ type: 'function',
462
+ name: 'test-tool',
463
+ inputSchema: {
464
+ type: 'object',
465
+ properties: { value: { type: 'string' } },
466
+ required: ['value'],
467
+ additionalProperties: false,
468
+ $schema: 'http://json-schema.org/draft-07/schema#',
469
+ },
470
+ },
471
+ ],
472
+ toolChoice: {
473
+ type: 'tool',
474
+ toolName: 'test-tool',
475
+ },
476
+ prompt: TEST_PROMPT,
477
+ });
478
+
479
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
480
+ model: 'mistral-small-latest',
481
+ messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],
482
+ tools: [
483
+ {
484
+ type: 'function',
485
+ function: {
486
+ name: 'test-tool',
487
+ parameters: {
488
+ type: 'object',
489
+ properties: { value: { type: 'string' } },
490
+ required: ['value'],
491
+ additionalProperties: false,
492
+ $schema: 'http://json-schema.org/draft-07/schema#',
493
+ },
494
+ },
495
+ },
496
+ ],
497
+ tool_choice: 'any',
498
+ });
499
+ });
500
+
501
+ it('should pass headers', async () => {
502
+ prepareJsonResponse({ content: '' });
503
+
504
+ const provider = createMistral({
505
+ apiKey: 'test-api-key',
506
+ headers: {
507
+ 'Custom-Provider-Header': 'provider-header-value',
508
+ },
509
+ });
510
+
511
+ await provider.chat('mistral-small-latest').doGenerate({
512
+ prompt: TEST_PROMPT,
513
+ headers: {
514
+ 'Custom-Request-Header': 'request-header-value',
515
+ },
516
+ });
517
+
518
+ const requestHeaders = server.calls[0].requestHeaders;
519
+
520
+ expect(requestHeaders).toStrictEqual({
521
+ authorization: 'Bearer test-api-key',
522
+ 'content-type': 'application/json',
523
+ 'custom-provider-header': 'provider-header-value',
524
+ 'custom-request-header': 'request-header-value',
525
+ });
526
+ expect(server.calls[0].requestUserAgent).toContain(
527
+ `ai-sdk/mistral/0.0.0-test`,
528
+ );
529
+ });
530
+
531
+ it('should send request body', async () => {
532
+ prepareJsonResponse({ content: '' });
533
+
534
+ const { request } = await model.doGenerate({
535
+ prompt: TEST_PROMPT,
536
+ });
537
+
538
+ expect(request).toMatchInlineSnapshot(`
539
+ {
540
+ "body": {
541
+ "document_image_limit": undefined,
542
+ "document_page_limit": undefined,
543
+ "max_tokens": undefined,
544
+ "messages": [
545
+ {
546
+ "content": [
547
+ {
548
+ "text": "Hello",
549
+ "type": "text",
550
+ },
551
+ ],
552
+ "role": "user",
553
+ },
554
+ ],
555
+ "model": "mistral-small-latest",
556
+ "random_seed": undefined,
557
+ "response_format": undefined,
558
+ "safe_prompt": undefined,
559
+ "temperature": undefined,
560
+ "tool_choice": undefined,
561
+ "tools": undefined,
562
+ "top_p": undefined,
563
+ },
564
+ }
565
+ `);
566
+ });
567
+
568
+ it('should inject JSON instruction for JSON response format', async () => {
569
+ prepareJsonResponse({ content: '' });
570
+
571
+ const { request } = await model.doGenerate({
572
+ prompt: TEST_PROMPT,
573
+ responseFormat: {
574
+ type: 'json',
575
+ },
576
+ });
577
+
578
+ expect(request).toMatchInlineSnapshot(`
579
+ {
580
+ "body": {
581
+ "document_image_limit": undefined,
582
+ "document_page_limit": undefined,
583
+ "max_tokens": undefined,
584
+ "messages": [
585
+ {
586
+ "content": "You MUST answer with JSON.",
587
+ "role": "system",
588
+ },
589
+ {
590
+ "content": [
591
+ {
592
+ "text": "Hello",
593
+ "type": "text",
594
+ },
595
+ ],
596
+ "role": "user",
597
+ },
598
+ ],
599
+ "model": "mistral-small-latest",
600
+ "random_seed": undefined,
601
+ "response_format": {
602
+ "type": "json_object",
603
+ },
604
+ "safe_prompt": undefined,
605
+ "temperature": undefined,
606
+ "tool_choice": undefined,
607
+ "tools": undefined,
608
+ "top_p": undefined,
609
+ },
610
+ }
611
+ `);
612
+ });
613
+
614
+ it('should inject JSON instruction for JSON response format with schema', async () => {
615
+ prepareJsonResponse({ content: '' });
616
+
617
+ const { request } = await model.doGenerate({
618
+ prompt: TEST_PROMPT,
619
+ responseFormat: {
620
+ type: 'json',
621
+ schema: {
622
+ type: 'object',
623
+ properties: {
624
+ name: { type: 'string' },
625
+ },
626
+ },
627
+ },
628
+ });
629
+
630
+ expect(request).toMatchInlineSnapshot(`
631
+ {
632
+ "body": {
633
+ "document_image_limit": undefined,
634
+ "document_page_limit": undefined,
635
+ "max_tokens": undefined,
636
+ "messages": [
637
+ {
638
+ "content": [
639
+ {
640
+ "text": "Hello",
641
+ "type": "text",
642
+ },
643
+ ],
644
+ "role": "user",
645
+ },
646
+ ],
647
+ "model": "mistral-small-latest",
648
+ "random_seed": undefined,
649
+ "response_format": {
650
+ "json_schema": {
651
+ "description": undefined,
652
+ "name": "response",
653
+ "schema": {
654
+ "properties": {
655
+ "name": {
656
+ "type": "string",
657
+ },
658
+ },
659
+ "type": "object",
660
+ },
661
+ "strict": false,
662
+ },
663
+ "type": "json_schema",
664
+ },
665
+ "safe_prompt": undefined,
666
+ "temperature": undefined,
667
+ "tool_choice": undefined,
668
+ "tools": undefined,
669
+ "top_p": undefined,
670
+ },
671
+ }
672
+ `);
673
+ });
674
+
675
+ it('should extract content when message content is a content object', async () => {
676
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
677
+ type: 'json-value',
678
+ body: {
679
+ object: 'chat.completion',
680
+ id: 'object-id',
681
+ created: 1711113008,
682
+ model: 'mistral-small-latest',
683
+ choices: [
684
+ {
685
+ index: 0,
686
+ message: {
687
+ role: 'assistant',
688
+ content: [
689
+ {
690
+ type: 'text',
691
+ text: 'Hello from object',
692
+ },
693
+ ],
694
+ tool_calls: null,
695
+ },
696
+ finish_reason: 'stop',
697
+ logprobs: null,
698
+ },
699
+ ],
700
+ usage: { prompt_tokens: 4, total_tokens: 34, completion_tokens: 30 },
701
+ },
702
+ };
703
+
704
+ const { content } = await model.doGenerate({
705
+ prompt: TEST_PROMPT,
706
+ });
707
+
708
+ expect(content).toMatchInlineSnapshot(`
709
+ [
710
+ {
711
+ "text": "Hello from object",
712
+ "type": "text",
713
+ },
714
+ ]
715
+ `);
716
+ });
717
+
718
+ it('should return raw text with think tags for reasoning models', async () => {
719
+ const reasoningModel = provider.chat('magistral-small-2506');
720
+
721
+ prepareJsonResponse({
722
+ content:
723
+ "<think>\nLet me think about this problem step by step.\nFirst, I need to understand what the user is asking.\nThen I can provide a helpful response.\n</think>\n\nHello! I'm ready to help you with your question.",
724
+ });
725
+
726
+ const { content } = await reasoningModel.doGenerate({
727
+ prompt: TEST_PROMPT,
728
+ });
729
+
730
+ expect(content).toMatchInlineSnapshot(`
731
+ [
732
+ {
733
+ "text": "<think>
734
+ Let me think about this problem step by step.
735
+ First, I need to understand what the user is asking.
736
+ Then I can provide a helpful response.
737
+ </think>
738
+
739
+ Hello! I'm ready to help you with your question.",
740
+ "type": "text",
741
+ },
742
+ ]
743
+ `);
744
+ });
745
+
746
+ it('should pass parallelToolCalls option', async () => {
747
+ prepareJsonResponse({ content: '' });
748
+
749
+ await model.doGenerate({
750
+ tools: [
751
+ {
752
+ type: 'function',
753
+ name: 'test-tool',
754
+ inputSchema: {
755
+ type: 'object',
756
+ properties: { value: { type: 'string' } },
757
+ required: ['value'],
758
+ additionalProperties: false,
759
+ $schema: 'http://json-schema.org/draft-07/schema#',
760
+ },
761
+ },
762
+ ],
763
+ prompt: TEST_PROMPT,
764
+ providerOptions: {
765
+ mistral: {
766
+ parallelToolCalls: false,
767
+ },
768
+ },
769
+ });
770
+
771
+ expect(await server.calls[0].requestBodyJson).toMatchObject({
772
+ model: 'mistral-small-latest',
773
+ messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],
774
+ tools: [
775
+ {
776
+ type: 'function',
777
+ function: {
778
+ name: 'test-tool',
779
+ parameters: {
780
+ type: 'object',
781
+ properties: { value: { type: 'string' } },
782
+ required: ['value'],
783
+ additionalProperties: false,
784
+ $schema: 'http://json-schema.org/draft-07/schema#',
785
+ },
786
+ },
787
+ },
788
+ ],
789
+ parallel_tool_calls: false,
790
+ });
791
+ });
792
+ });
793
+
794
+ describe('doStream', () => {
795
+ function prepareStreamResponse({
796
+ content,
797
+ headers,
798
+ }: {
799
+ content: string[];
800
+ headers?: Record<string, string>;
801
+ }) {
802
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
803
+ type: 'stream-chunks',
804
+ headers,
805
+ chunks: [
806
+ `data: {"id":"53ff663126294946a6b7a4747b70597e","object":"chat.completion.chunk",` +
807
+ `"created":1750537996,"model":"mistral-small-latest","choices":[{"index":0,` +
808
+ `"delta":{"role":"assistant","content":""},"finish_reason":null,"logprobs":null}]}\n\n`,
809
+ ...content.map(text => {
810
+ return (
811
+ `data: {"id":"53ff663126294946a6b7a4747b70597e","object":"chat.completion.chunk",` +
812
+ `"created":1750537996,"model":"mistral-small-latest","choices":[{"index":0,` +
813
+ `"delta":{"role":"assistant","content":"${text}"},"finish_reason":null,"logprobs":null}]}\n\n`
814
+ );
815
+ }),
816
+ `data: {"id":"53ff663126294946a6b7a4747b70597e","object":"chat.completion.chunk",` +
817
+ `"created":1750537996,"model":"mistral-small-latest","choices":[{"index":0,` +
818
+ `"delta":{"content":""},"finish_reason":"stop","logprobs":null}],` +
819
+ `"usage":{"prompt_tokens":4,"total_tokens":36,"completion_tokens":32}}\n\n`,
820
+ `data: [DONE]\n\n`,
821
+ ],
822
+ };
823
+ }
824
+
825
+ it('should stream text deltas', async () => {
826
+ prepareStreamResponse({ content: ['Hello', ', ', 'world!'] });
827
+
828
+ const { stream } = await model.doStream({
829
+ prompt: TEST_PROMPT,
830
+ includeRawChunks: false,
831
+ });
832
+
833
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
834
+ [
835
+ {
836
+ "type": "stream-start",
837
+ "warnings": [],
838
+ },
839
+ {
840
+ "id": "53ff663126294946a6b7a4747b70597e",
841
+ "modelId": "mistral-small-latest",
842
+ "timestamp": 2025-06-21T20:33:16.000Z,
843
+ "type": "response-metadata",
844
+ },
845
+ {
846
+ "id": "0",
847
+ "type": "text-start",
848
+ },
849
+ {
850
+ "delta": "Hello",
851
+ "id": "0",
852
+ "type": "text-delta",
853
+ },
854
+ {
855
+ "delta": ", ",
856
+ "id": "0",
857
+ "type": "text-delta",
858
+ },
859
+ {
860
+ "delta": "world!",
861
+ "id": "0",
862
+ "type": "text-delta",
863
+ },
864
+ {
865
+ "id": "0",
866
+ "type": "text-end",
867
+ },
868
+ {
869
+ "finishReason": {
870
+ "raw": "stop",
871
+ "unified": "stop",
872
+ },
873
+ "type": "finish",
874
+ "usage": {
875
+ "inputTokens": {
876
+ "cacheRead": undefined,
877
+ "cacheWrite": undefined,
878
+ "noCache": 4,
879
+ "total": 4,
880
+ },
881
+ "outputTokens": {
882
+ "reasoning": undefined,
883
+ "text": 32,
884
+ "total": 32,
885
+ },
886
+ "raw": {
887
+ "completion_tokens": 32,
888
+ "prompt_tokens": 4,
889
+ "total_tokens": 36,
890
+ },
891
+ },
892
+ },
893
+ ]
894
+ `);
895
+ });
896
+
897
+ it('should avoid duplication when there is a trailing assistant message', async () => {
898
+ prepareStreamResponse({ content: ['prefix', ' and', ' more content'] });
899
+
900
+ const { stream } = await model.doStream({
901
+ prompt: [
902
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
903
+ {
904
+ role: 'assistant',
905
+ content: [{ type: 'text', text: 'prefix ' }],
906
+ },
907
+ ],
908
+ includeRawChunks: false,
909
+ });
910
+
911
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
912
+ [
913
+ {
914
+ "type": "stream-start",
915
+ "warnings": [],
916
+ },
917
+ {
918
+ "id": "53ff663126294946a6b7a4747b70597e",
919
+ "modelId": "mistral-small-latest",
920
+ "timestamp": 2025-06-21T20:33:16.000Z,
921
+ "type": "response-metadata",
922
+ },
923
+ {
924
+ "id": "0",
925
+ "type": "text-start",
926
+ },
927
+ {
928
+ "delta": "prefix",
929
+ "id": "0",
930
+ "type": "text-delta",
931
+ },
932
+ {
933
+ "delta": " and",
934
+ "id": "0",
935
+ "type": "text-delta",
936
+ },
937
+ {
938
+ "delta": " more content",
939
+ "id": "0",
940
+ "type": "text-delta",
941
+ },
942
+ {
943
+ "id": "0",
944
+ "type": "text-end",
945
+ },
946
+ {
947
+ "finishReason": {
948
+ "raw": "stop",
949
+ "unified": "stop",
950
+ },
951
+ "type": "finish",
952
+ "usage": {
953
+ "inputTokens": {
954
+ "cacheRead": undefined,
955
+ "cacheWrite": undefined,
956
+ "noCache": 4,
957
+ "total": 4,
958
+ },
959
+ "outputTokens": {
960
+ "reasoning": undefined,
961
+ "text": 32,
962
+ "total": 32,
963
+ },
964
+ "raw": {
965
+ "completion_tokens": 32,
966
+ "prompt_tokens": 4,
967
+ "total_tokens": 36,
968
+ },
969
+ },
970
+ },
971
+ ]
972
+ `);
973
+ });
974
+
975
+ it('should stream tool deltas', async () => {
976
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
977
+ type: 'stream-chunks',
978
+ chunks: [
979
+ `data: {"id":"a8f32d91e5b64c2f9e7b3a8d4f6c1e5a","object":"chat.completion.chunk","created":1750538400,"model":"mistral-large-latest",` +
980
+ `"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null,"logprobs":null}]}\n\n`,
981
+ `data: {"id":"a8f32d91e5b64c2f9e7b3a8d4f6c1e5a","object":"chat.completion.chunk","created":1750538400,"model":"mistral-large-latest",` +
982
+ `"choices":[{"index":0,"delta":{"content":null,"tool_calls":[{"id":"call_9K8xFjN2mP3qR7sT","function":{"name":"test-tool","arguments":` +
983
+ `"{\\"value\\":\\"Sparkle Day\\"}"` +
984
+ `}}]},"finish_reason":"tool_calls","logprobs":null}],"usage":{"prompt_tokens":183,"total_tokens":316,"completion_tokens":133}}\n\n`,
985
+ 'data: [DONE]\n\n',
986
+ ],
987
+ };
988
+
989
+ const { stream } = await createMistral({
990
+ apiKey: 'test-api-key',
991
+ })
992
+ .chat('mistral-large-latest')
993
+ .doStream({
994
+ tools: [
995
+ {
996
+ type: 'function',
997
+ name: 'test-tool',
998
+ inputSchema: {
999
+ type: 'object',
1000
+ properties: { value: { type: 'string' } },
1001
+ required: ['value'],
1002
+ additionalProperties: false,
1003
+ $schema: 'http://json-schema.org/draft-07/schema#',
1004
+ },
1005
+ },
1006
+ ],
1007
+ prompt: TEST_PROMPT,
1008
+ includeRawChunks: false,
1009
+ });
1010
+
1011
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1012
+ [
1013
+ {
1014
+ "type": "stream-start",
1015
+ "warnings": [],
1016
+ },
1017
+ {
1018
+ "id": "a8f32d91e5b64c2f9e7b3a8d4f6c1e5a",
1019
+ "modelId": "mistral-large-latest",
1020
+ "timestamp": 2025-06-21T20:40:00.000Z,
1021
+ "type": "response-metadata",
1022
+ },
1023
+ {
1024
+ "id": "call_9K8xFjN2mP3qR7sT",
1025
+ "toolName": "test-tool",
1026
+ "type": "tool-input-start",
1027
+ },
1028
+ {
1029
+ "delta": "{"value":"Sparkle Day"}",
1030
+ "id": "call_9K8xFjN2mP3qR7sT",
1031
+ "type": "tool-input-delta",
1032
+ },
1033
+ {
1034
+ "id": "call_9K8xFjN2mP3qR7sT",
1035
+ "type": "tool-input-end",
1036
+ },
1037
+ {
1038
+ "input": "{"value":"Sparkle Day"}",
1039
+ "toolCallId": "call_9K8xFjN2mP3qR7sT",
1040
+ "toolName": "test-tool",
1041
+ "type": "tool-call",
1042
+ },
1043
+ {
1044
+ "finishReason": {
1045
+ "raw": "tool_calls",
1046
+ "unified": "tool-calls",
1047
+ },
1048
+ "type": "finish",
1049
+ "usage": {
1050
+ "inputTokens": {
1051
+ "cacheRead": undefined,
1052
+ "cacheWrite": undefined,
1053
+ "noCache": 183,
1054
+ "total": 183,
1055
+ },
1056
+ "outputTokens": {
1057
+ "reasoning": undefined,
1058
+ "text": 133,
1059
+ "total": 133,
1060
+ },
1061
+ "raw": {
1062
+ "completion_tokens": 133,
1063
+ "prompt_tokens": 183,
1064
+ "total_tokens": 316,
1065
+ },
1066
+ },
1067
+ },
1068
+ ]
1069
+ `);
1070
+ });
1071
+
1072
+ it('should expose the raw response headers', async () => {
1073
+ prepareStreamResponse({
1074
+ content: [],
1075
+ headers: { 'test-header': 'test-value' },
1076
+ });
1077
+
1078
+ const { response } = await model.doStream({
1079
+ prompt: TEST_PROMPT,
1080
+ includeRawChunks: false,
1081
+ });
1082
+
1083
+ expect(response?.headers).toStrictEqual({
1084
+ // default headers:
1085
+ 'content-type': 'text/event-stream',
1086
+ 'cache-control': 'no-cache',
1087
+ connection: 'keep-alive',
1088
+
1089
+ // custom header
1090
+ 'test-header': 'test-value',
1091
+ });
1092
+ });
1093
+
1094
+ it('should pass the messages', async () => {
1095
+ prepareStreamResponse({ content: [''] });
1096
+
1097
+ await model.doStream({
1098
+ prompt: TEST_PROMPT,
1099
+ includeRawChunks: false,
1100
+ });
1101
+
1102
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
1103
+ stream: true,
1104
+ model: 'mistral-small-latest',
1105
+ messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],
1106
+ });
1107
+ });
1108
+
1109
+ it('should pass headers', async () => {
1110
+ prepareStreamResponse({ content: [] });
1111
+
1112
+ const provider = createMistral({
1113
+ apiKey: 'test-api-key',
1114
+ headers: {
1115
+ 'Custom-Provider-Header': 'provider-header-value',
1116
+ },
1117
+ });
1118
+
1119
+ await provider.chat('mistral-small-latest').doStream({
1120
+ prompt: TEST_PROMPT,
1121
+ includeRawChunks: false,
1122
+ headers: {
1123
+ 'Custom-Request-Header': 'request-header-value',
1124
+ },
1125
+ });
1126
+
1127
+ expect(server.calls[0].requestHeaders).toStrictEqual({
1128
+ authorization: 'Bearer test-api-key',
1129
+ 'content-type': 'application/json',
1130
+ 'custom-provider-header': 'provider-header-value',
1131
+ 'custom-request-header': 'request-header-value',
1132
+ });
1133
+ expect(server.calls[0].requestUserAgent).toContain(
1134
+ `ai-sdk/mistral/0.0.0-test`,
1135
+ );
1136
+ });
1137
+
1138
+ it('should send request body', async () => {
1139
+ prepareStreamResponse({ content: [] });
1140
+
1141
+ const { request } = await model.doStream({
1142
+ prompt: TEST_PROMPT,
1143
+ includeRawChunks: false,
1144
+ });
1145
+
1146
+ expect(request).toMatchInlineSnapshot(`
1147
+ {
1148
+ "body": {
1149
+ "document_image_limit": undefined,
1150
+ "document_page_limit": undefined,
1151
+ "max_tokens": undefined,
1152
+ "messages": [
1153
+ {
1154
+ "content": [
1155
+ {
1156
+ "text": "Hello",
1157
+ "type": "text",
1158
+ },
1159
+ ],
1160
+ "role": "user",
1161
+ },
1162
+ ],
1163
+ "model": "mistral-small-latest",
1164
+ "random_seed": undefined,
1165
+ "response_format": undefined,
1166
+ "safe_prompt": undefined,
1167
+ "stream": true,
1168
+ "temperature": undefined,
1169
+ "tool_choice": undefined,
1170
+ "tools": undefined,
1171
+ "top_p": undefined,
1172
+ },
1173
+ }
1174
+ `);
1175
+ });
1176
+
1177
+ it('should stream text with content objects', async () => {
1178
+ // Instead of using prepareStreamResponse (which sends strings),
1179
+ // we set the chunks manually so that each delta's content is an object.
1180
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1181
+ type: 'stream-chunks',
1182
+ chunks: [
1183
+ `data: {"id":"b9e43f82d6c74a1e9f5b2c8e7a9d4f6b","object":"chat.completion.chunk","created":1750538500,"model":"mistral-small-latest","choices":[{"index":0,"delta":{"role":"assistant","content":[{"type":"text","text":""}]},"finish_reason":null,"logprobs":null}]}\n\n`,
1184
+ `data: {"id":"b9e43f82d6c74a1e9f5b2c8e7a9d4f6b","object":"chat.completion.chunk","created":1750538500,"model":"mistral-small-latest","choices":[{"index":0,"delta":{"content":[{"type":"text","text":"Hello"}]},"finish_reason":null,"logprobs":null}]}\n\n`,
1185
+ `data: {"id":"b9e43f82d6c74a1e9f5b2c8e7a9d4f6b","object":"chat.completion.chunk","created":1750538500,"model":"mistral-small-latest","choices":[{"index":0,"delta":{"content":[{"type":"text","text":", world!"}]},"finish_reason":"stop","logprobs":null}],"usage":{"prompt_tokens":4,"total_tokens":36,"completion_tokens":32}}\n\n`,
1186
+ `data: [DONE]\n\n`,
1187
+ ],
1188
+ };
1189
+
1190
+ const { stream } = await model.doStream({
1191
+ prompt: TEST_PROMPT,
1192
+ includeRawChunks: false,
1193
+ });
1194
+
1195
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1196
+ [
1197
+ {
1198
+ "type": "stream-start",
1199
+ "warnings": [],
1200
+ },
1201
+ {
1202
+ "id": "b9e43f82d6c74a1e9f5b2c8e7a9d4f6b",
1203
+ "modelId": "mistral-small-latest",
1204
+ "timestamp": 2025-06-21T20:41:40.000Z,
1205
+ "type": "response-metadata",
1206
+ },
1207
+ {
1208
+ "id": "0",
1209
+ "type": "text-start",
1210
+ },
1211
+ {
1212
+ "delta": "Hello",
1213
+ "id": "0",
1214
+ "type": "text-delta",
1215
+ },
1216
+ {
1217
+ "delta": ", world!",
1218
+ "id": "0",
1219
+ "type": "text-delta",
1220
+ },
1221
+ {
1222
+ "id": "0",
1223
+ "type": "text-end",
1224
+ },
1225
+ {
1226
+ "finishReason": {
1227
+ "raw": "stop",
1228
+ "unified": "stop",
1229
+ },
1230
+ "type": "finish",
1231
+ "usage": {
1232
+ "inputTokens": {
1233
+ "cacheRead": undefined,
1234
+ "cacheWrite": undefined,
1235
+ "noCache": 4,
1236
+ "total": 4,
1237
+ },
1238
+ "outputTokens": {
1239
+ "reasoning": undefined,
1240
+ "text": 32,
1241
+ "total": 32,
1242
+ },
1243
+ "raw": {
1244
+ "completion_tokens": 32,
1245
+ "prompt_tokens": 4,
1246
+ "total_tokens": 36,
1247
+ },
1248
+ },
1249
+ },
1250
+ ]
1251
+ `);
1252
+ });
1253
+
1254
+ it('should stream thinking content as reasoning deltas', async () => {
1255
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1256
+ type: 'stream-chunks',
1257
+ chunks: [
1258
+ `data: {"id":"thinking-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"role":"assistant","content":[{"type":"thinking","thinking":[{"type":"text","text":"Let me think..."}]}]},"finish_reason":null}]}\n\n`,
1259
+ `data: {"id":"thinking-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"role":"assistant","content":[{"type":"text","text":"The answer is 4."}]},"finish_reason":null}]}\n\n`,
1260
+ `data: {"id":"thinking-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"content":""},"finish_reason":"stop"}],"usage":{"prompt_tokens":5,"total_tokens":25,"completion_tokens":20}}\n\n`,
1261
+ 'data: [DONE]\n\n',
1262
+ ],
1263
+ };
1264
+
1265
+ const { stream } = await model.doStream({
1266
+ prompt: TEST_PROMPT,
1267
+ includeRawChunks: false,
1268
+ });
1269
+
1270
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1271
+ [
1272
+ {
1273
+ "type": "stream-start",
1274
+ "warnings": [],
1275
+ },
1276
+ {
1277
+ "id": "thinking-test",
1278
+ "modelId": "magistral-small-2507",
1279
+ "timestamp": 2025-06-21T20:33:20.000Z,
1280
+ "type": "response-metadata",
1281
+ },
1282
+ {
1283
+ "id": "id-0",
1284
+ "type": "reasoning-start",
1285
+ },
1286
+ {
1287
+ "delta": "Let me think...",
1288
+ "id": "id-0",
1289
+ "type": "reasoning-delta",
1290
+ },
1291
+ {
1292
+ "id": "id-0",
1293
+ "type": "reasoning-end",
1294
+ },
1295
+ {
1296
+ "id": "0",
1297
+ "type": "text-start",
1298
+ },
1299
+ {
1300
+ "delta": "The answer is 4.",
1301
+ "id": "0",
1302
+ "type": "text-delta",
1303
+ },
1304
+ {
1305
+ "id": "0",
1306
+ "type": "text-end",
1307
+ },
1308
+ {
1309
+ "finishReason": {
1310
+ "raw": "stop",
1311
+ "unified": "stop",
1312
+ },
1313
+ "type": "finish",
1314
+ "usage": {
1315
+ "inputTokens": {
1316
+ "cacheRead": undefined,
1317
+ "cacheWrite": undefined,
1318
+ "noCache": 5,
1319
+ "total": 5,
1320
+ },
1321
+ "outputTokens": {
1322
+ "reasoning": undefined,
1323
+ "text": 20,
1324
+ "total": 20,
1325
+ },
1326
+ "raw": {
1327
+ "completion_tokens": 20,
1328
+ "prompt_tokens": 5,
1329
+ "total_tokens": 25,
1330
+ },
1331
+ },
1332
+ },
1333
+ ]
1334
+ `);
1335
+ });
1336
+
1337
+ it('should handle interleaved thinking and text content in streaming', async () => {
1338
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1339
+ type: 'stream-chunks',
1340
+ chunks: [
1341
+ `data: {"id":"interleaved-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"role":"assistant","content":[{"type":"thinking","thinking":[{"type":"text","text":"First thought."}]}]},"finish_reason":null}]}\n\n`,
1342
+ `data: {"id":"interleaved-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"role":"assistant","content":[{"type":"text","text":"Partial answer."}]},"finish_reason":null}]}\n\n`,
1343
+ `data: {"id":"interleaved-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"role":"assistant","content":[{"type":"thinking","thinking":[{"type":"text","text":"Second thought."}]}]},"finish_reason":null}]}\n\n`,
1344
+ `data: {"id":"interleaved-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"role":"assistant","content":[{"type":"text","text":"Final answer."}]},"finish_reason":null}]}\n\n`,
1345
+ `data: {"id":"interleaved-test","object":"chat.completion.chunk","created":1750538000,"model":"magistral-small-2507","choices":[{"index":0,"delta":{"content":""},"finish_reason":"stop"}],"usage":{"prompt_tokens":10,"total_tokens":40,"completion_tokens":30}}\n\n`,
1346
+ 'data: [DONE]\n\n',
1347
+ ],
1348
+ };
1349
+
1350
+ const { stream } = await model.doStream({
1351
+ prompt: TEST_PROMPT,
1352
+ includeRawChunks: false,
1353
+ });
1354
+
1355
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1356
+ [
1357
+ {
1358
+ "type": "stream-start",
1359
+ "warnings": [],
1360
+ },
1361
+ {
1362
+ "id": "interleaved-test",
1363
+ "modelId": "magistral-small-2507",
1364
+ "timestamp": 2025-06-21T20:33:20.000Z,
1365
+ "type": "response-metadata",
1366
+ },
1367
+ {
1368
+ "id": "id-1",
1369
+ "type": "reasoning-start",
1370
+ },
1371
+ {
1372
+ "delta": "First thought.",
1373
+ "id": "id-1",
1374
+ "type": "reasoning-delta",
1375
+ },
1376
+ {
1377
+ "id": "id-1",
1378
+ "type": "reasoning-end",
1379
+ },
1380
+ {
1381
+ "id": "0",
1382
+ "type": "text-start",
1383
+ },
1384
+ {
1385
+ "delta": "Partial answer.",
1386
+ "id": "0",
1387
+ "type": "text-delta",
1388
+ },
1389
+ {
1390
+ "id": "0",
1391
+ "type": "text-end",
1392
+ },
1393
+ {
1394
+ "id": "id-2",
1395
+ "type": "reasoning-start",
1396
+ },
1397
+ {
1398
+ "delta": "Second thought.",
1399
+ "id": "id-2",
1400
+ "type": "reasoning-delta",
1401
+ },
1402
+ {
1403
+ "id": "id-2",
1404
+ "type": "reasoning-end",
1405
+ },
1406
+ {
1407
+ "id": "0",
1408
+ "type": "text-start",
1409
+ },
1410
+ {
1411
+ "delta": "Final answer.",
1412
+ "id": "0",
1413
+ "type": "text-delta",
1414
+ },
1415
+ {
1416
+ "id": "0",
1417
+ "type": "text-end",
1418
+ },
1419
+ {
1420
+ "finishReason": {
1421
+ "raw": "stop",
1422
+ "unified": "stop",
1423
+ },
1424
+ "type": "finish",
1425
+ "usage": {
1426
+ "inputTokens": {
1427
+ "cacheRead": undefined,
1428
+ "cacheWrite": undefined,
1429
+ "noCache": 10,
1430
+ "total": 10,
1431
+ },
1432
+ "outputTokens": {
1433
+ "reasoning": undefined,
1434
+ "text": 30,
1435
+ "total": 30,
1436
+ },
1437
+ "raw": {
1438
+ "completion_tokens": 30,
1439
+ "prompt_tokens": 10,
1440
+ "total_tokens": 40,
1441
+ },
1442
+ },
1443
+ },
1444
+ ]
1445
+ `);
1446
+ });
1447
+ });
1448
+
1449
+ describe('doStream with raw chunks', () => {
1450
+ it('should stream raw chunks when includeRawChunks is true', async () => {
1451
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1452
+ type: 'stream-chunks',
1453
+ chunks: [
1454
+ `data: {"id":"c7d54e93f8a64b2e9c1f5a8b7d9e2f4c","object":"chat.completion.chunk","created":1750538600,"model":"mistral-large-latest","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},"finish_reason":null,"logprobs":null}]}\n\n`,
1455
+ `data: {"id":"d8e65fa4g9b75c3f0d2g6b9c8e0f3g5d","object":"chat.completion.chunk","created":1750538601,"model":"mistral-large-latest","choices":[{"index":0,"delta":{"content":" world"},"finish_reason":null,"logprobs":null}]}\n\n`,
1456
+ `data: {"id":"e9f76gb5h0c86d4g1e3h7c0d9f1g4h6e","object":"chat.completion.chunk","created":1750538602,"model":"mistral-large-latest","choices":[{"index":0,"delta":{},"finish_reason":"stop","logprobs":null}],"usage":{"prompt_tokens":10,"completion_tokens":5,"total_tokens":15}}\n\n`,
1457
+ 'data: [DONE]\n\n',
1458
+ ],
1459
+ };
1460
+
1461
+ const { stream } = await model.doStream({
1462
+ prompt: TEST_PROMPT,
1463
+ includeRawChunks: true,
1464
+ });
1465
+
1466
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1467
+ [
1468
+ {
1469
+ "type": "stream-start",
1470
+ "warnings": [],
1471
+ },
1472
+ {
1473
+ "rawValue": {
1474
+ "choices": [
1475
+ {
1476
+ "delta": {
1477
+ "content": "Hello",
1478
+ "role": "assistant",
1479
+ },
1480
+ "finish_reason": null,
1481
+ "index": 0,
1482
+ "logprobs": null,
1483
+ },
1484
+ ],
1485
+ "created": 1750538600,
1486
+ "id": "c7d54e93f8a64b2e9c1f5a8b7d9e2f4c",
1487
+ "model": "mistral-large-latest",
1488
+ "object": "chat.completion.chunk",
1489
+ },
1490
+ "type": "raw",
1491
+ },
1492
+ {
1493
+ "id": "c7d54e93f8a64b2e9c1f5a8b7d9e2f4c",
1494
+ "modelId": "mistral-large-latest",
1495
+ "timestamp": 2025-06-21T20:43:20.000Z,
1496
+ "type": "response-metadata",
1497
+ },
1498
+ {
1499
+ "id": "0",
1500
+ "type": "text-start",
1501
+ },
1502
+ {
1503
+ "delta": "Hello",
1504
+ "id": "0",
1505
+ "type": "text-delta",
1506
+ },
1507
+ {
1508
+ "rawValue": {
1509
+ "choices": [
1510
+ {
1511
+ "delta": {
1512
+ "content": " world",
1513
+ },
1514
+ "finish_reason": null,
1515
+ "index": 0,
1516
+ "logprobs": null,
1517
+ },
1518
+ ],
1519
+ "created": 1750538601,
1520
+ "id": "d8e65fa4g9b75c3f0d2g6b9c8e0f3g5d",
1521
+ "model": "mistral-large-latest",
1522
+ "object": "chat.completion.chunk",
1523
+ },
1524
+ "type": "raw",
1525
+ },
1526
+ {
1527
+ "delta": " world",
1528
+ "id": "0",
1529
+ "type": "text-delta",
1530
+ },
1531
+ {
1532
+ "rawValue": {
1533
+ "choices": [
1534
+ {
1535
+ "delta": {},
1536
+ "finish_reason": "stop",
1537
+ "index": 0,
1538
+ "logprobs": null,
1539
+ },
1540
+ ],
1541
+ "created": 1750538602,
1542
+ "id": "e9f76gb5h0c86d4g1e3h7c0d9f1g4h6e",
1543
+ "model": "mistral-large-latest",
1544
+ "object": "chat.completion.chunk",
1545
+ "usage": {
1546
+ "completion_tokens": 5,
1547
+ "prompt_tokens": 10,
1548
+ "total_tokens": 15,
1549
+ },
1550
+ },
1551
+ "type": "raw",
1552
+ },
1553
+ {
1554
+ "id": "0",
1555
+ "type": "text-end",
1556
+ },
1557
+ {
1558
+ "finishReason": {
1559
+ "raw": "stop",
1560
+ "unified": "stop",
1561
+ },
1562
+ "type": "finish",
1563
+ "usage": {
1564
+ "inputTokens": {
1565
+ "cacheRead": undefined,
1566
+ "cacheWrite": undefined,
1567
+ "noCache": 10,
1568
+ "total": 10,
1569
+ },
1570
+ "outputTokens": {
1571
+ "reasoning": undefined,
1572
+ "text": 5,
1573
+ "total": 5,
1574
+ },
1575
+ "raw": {
1576
+ "completion_tokens": 5,
1577
+ "prompt_tokens": 10,
1578
+ "total_tokens": 15,
1579
+ },
1580
+ },
1581
+ },
1582
+ ]
1583
+ `);
1584
+ });
1585
+ });
1586
+
1587
+ describe('tool result format support', () => {
1588
+ it('should handle new LanguageModelV3ToolResultOutput format', async () => {
1589
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1590
+ type: 'json-value',
1591
+ body: {
1592
+ id: 'test-id',
1593
+ object: 'chat.completion',
1594
+ created: 1234567890,
1595
+ model: 'mistral-small',
1596
+ choices: [
1597
+ {
1598
+ index: 0,
1599
+ message: {
1600
+ role: 'assistant',
1601
+ content: 'Here is the result',
1602
+ tool_calls: null,
1603
+ },
1604
+ finish_reason: 'stop',
1605
+ },
1606
+ ],
1607
+ usage: {
1608
+ prompt_tokens: 10,
1609
+ completion_tokens: 5,
1610
+ total_tokens: 15,
1611
+ },
1612
+ },
1613
+ };
1614
+
1615
+ const result = await model.doGenerate({
1616
+ prompt: [
1617
+ {
1618
+ role: 'user',
1619
+ content: [{ type: 'text', text: 'Hello' }],
1620
+ },
1621
+ {
1622
+ role: 'assistant',
1623
+ content: [
1624
+ {
1625
+ type: 'tool-call',
1626
+ toolCallId: 'call-1',
1627
+ toolName: 'test-tool',
1628
+ input: { query: 'test' },
1629
+ },
1630
+ ],
1631
+ },
1632
+ {
1633
+ role: 'tool',
1634
+ content: [
1635
+ {
1636
+ type: 'tool-result',
1637
+ toolCallId: 'call-1',
1638
+ toolName: 'test-tool',
1639
+ output: { type: 'json', value: { result: 'success' } },
1640
+ },
1641
+ ],
1642
+ },
1643
+ ],
1644
+ });
1645
+
1646
+ expect(result.content).toEqual([
1647
+ { type: 'text', text: 'Here is the result' },
1648
+ ]);
1649
+
1650
+ expect(result.finishReason).toMatchInlineSnapshot(`
1651
+ {
1652
+ "raw": "stop",
1653
+ "unified": "stop",
1654
+ }
1655
+ `);
1656
+ });
1657
+ });
1658
+
1659
+ describe('reference content parsing', () => {
1660
+ it('should handle reference_ids as numbers', async () => {
1661
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1662
+ type: 'json-value',
1663
+ body: {
1664
+ object: 'chat.completion',
1665
+ id: 'test-id',
1666
+ created: 1711113008,
1667
+ model: 'mistral-small-latest',
1668
+ choices: [
1669
+ {
1670
+ index: 0,
1671
+ message: {
1672
+ role: 'assistant',
1673
+ content: [
1674
+ { type: 'text', text: 'Here is the info' },
1675
+ { type: 'reference', reference_ids: [1, 2, 3] },
1676
+ ],
1677
+ tool_calls: null,
1678
+ },
1679
+ finish_reason: 'stop',
1680
+ },
1681
+ ],
1682
+ usage: { prompt_tokens: 4, total_tokens: 34, completion_tokens: 30 },
1683
+ },
1684
+ };
1685
+
1686
+ const { content } = await model.doGenerate({ prompt: TEST_PROMPT });
1687
+
1688
+ expect(content).toStrictEqual([{ type: 'text', text: 'Here is the info' }]);
1689
+ });
1690
+
1691
+ it('should handle reference_ids as strings', async () => {
1692
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1693
+ type: 'json-value',
1694
+ body: {
1695
+ object: 'chat.completion',
1696
+ id: 'test-id',
1697
+ created: 1711113008,
1698
+ model: 'mistral-small-latest',
1699
+ choices: [
1700
+ {
1701
+ index: 0,
1702
+ message: {
1703
+ role: 'assistant',
1704
+ content: [
1705
+ { type: 'text', text: 'Here is the info' },
1706
+ {
1707
+ type: 'reference',
1708
+ reference_ids: ['ref-1', 'ref-2', 'ref-3'],
1709
+ },
1710
+ ],
1711
+ tool_calls: null,
1712
+ },
1713
+ finish_reason: 'stop',
1714
+ },
1715
+ ],
1716
+ usage: { prompt_tokens: 4, total_tokens: 34, completion_tokens: 30 },
1717
+ },
1718
+ };
1719
+
1720
+ const { content } = await model.doGenerate({ prompt: TEST_PROMPT });
1721
+
1722
+ expect(content).toStrictEqual([{ type: 'text', text: 'Here is the info' }]);
1723
+ });
1724
+
1725
+ it('should handle mixed reference_ids (numbers and strings)', async () => {
1726
+ server.urls['https://api.mistral.ai/v1/chat/completions'].response = {
1727
+ type: 'json-value',
1728
+ body: {
1729
+ object: 'chat.completion',
1730
+ id: 'test-id',
1731
+ created: 1711113008,
1732
+ model: 'mistral-small-latest',
1733
+ choices: [
1734
+ {
1735
+ index: 0,
1736
+ message: {
1737
+ role: 'assistant',
1738
+ content: [
1739
+ { type: 'text', text: 'Here is the info' },
1740
+ { type: 'reference', reference_ids: [1, 'ref-2', 3] },
1741
+ ],
1742
+ tool_calls: null,
1743
+ },
1744
+ finish_reason: 'stop',
1745
+ },
1746
+ ],
1747
+ usage: { prompt_tokens: 4, total_tokens: 34, completion_tokens: 30 },
1748
+ },
1749
+ };
1750
+
1751
+ const { content } = await model.doGenerate({ prompt: TEST_PROMPT });
1752
+
1753
+ expect(content).toStrictEqual([{ type: 'text', text: 'Here is the info' }]);
1754
+ });
1755
+ });