@ai-sdk/xai 0.0.0-64aae7dd-20260114144918 → 0.0.0-98261322-20260122142521

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +64 -5
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/docs/01-xai.mdx +697 -0
  5. package/package.json +11 -6
  6. package/src/convert-to-xai-chat-messages.test.ts +243 -0
  7. package/src/convert-to-xai-chat-messages.ts +142 -0
  8. package/src/convert-xai-chat-usage.test.ts +240 -0
  9. package/src/convert-xai-chat-usage.ts +23 -0
  10. package/src/get-response-metadata.ts +19 -0
  11. package/src/index.ts +14 -0
  12. package/src/map-xai-finish-reason.ts +19 -0
  13. package/src/responses/__fixtures__/xai-code-execution-tool.1.json +68 -0
  14. package/src/responses/__fixtures__/xai-text-streaming.1.chunks.txt +698 -0
  15. package/src/responses/__fixtures__/xai-text-with-reasoning-streaming-store-false.1.chunks.txt +655 -0
  16. package/src/responses/__fixtures__/xai-text-with-reasoning-streaming.1.chunks.txt +679 -0
  17. package/src/responses/__fixtures__/xai-web-search-tool.1.chunks.txt +274 -0
  18. package/src/responses/__fixtures__/xai-web-search-tool.1.json +90 -0
  19. package/src/responses/__fixtures__/xai-x-search-tool.1.json +149 -0
  20. package/src/responses/__fixtures__/xai-x-search-tool.chunks.txt +1757 -0
  21. package/src/responses/__snapshots__/xai-responses-language-model.test.ts.snap +21929 -0
  22. package/src/responses/convert-to-xai-responses-input.test.ts +463 -0
  23. package/src/responses/convert-to-xai-responses-input.ts +206 -0
  24. package/src/responses/convert-xai-responses-usage.ts +24 -0
  25. package/src/responses/map-xai-responses-finish-reason.ts +20 -0
  26. package/src/responses/xai-responses-api.ts +393 -0
  27. package/src/responses/xai-responses-language-model.test.ts +1803 -0
  28. package/src/responses/xai-responses-language-model.ts +732 -0
  29. package/src/responses/xai-responses-options.ts +34 -0
  30. package/src/responses/xai-responses-prepare-tools.test.ts +497 -0
  31. package/src/responses/xai-responses-prepare-tools.ts +226 -0
  32. package/src/tool/code-execution.ts +17 -0
  33. package/src/tool/index.ts +15 -0
  34. package/src/tool/view-image.ts +20 -0
  35. package/src/tool/view-x-video.ts +18 -0
  36. package/src/tool/web-search.ts +56 -0
  37. package/src/tool/x-search.ts +63 -0
  38. package/src/version.ts +6 -0
  39. package/src/xai-chat-language-model.test.ts +1805 -0
  40. package/src/xai-chat-language-model.ts +681 -0
  41. package/src/xai-chat-options.ts +131 -0
  42. package/src/xai-chat-prompt.ts +44 -0
  43. package/src/xai-error.ts +19 -0
  44. package/src/xai-image-settings.ts +1 -0
  45. package/src/xai-prepare-tools.ts +95 -0
  46. package/src/xai-provider.test.ts +167 -0
  47. package/src/xai-provider.ts +162 -0
@@ -0,0 +1,1803 @@
1
+ import { LanguageModelV3Prompt } from '@ai-sdk/provider';
2
+ import {
3
+ convertReadableStreamToArray,
4
+ mockId,
5
+ } from '@ai-sdk/provider-utils/test';
6
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
7
+ import { beforeEach, describe, expect, it } from 'vitest';
8
+ import fs from 'node:fs';
9
+ import { XaiResponsesLanguageModel } from './xai-responses-language-model';
10
+ import { XaiResponsesProviderOptions } from './xai-responses-options';
11
+
12
+ const TEST_PROMPT: LanguageModelV3Prompt = [
13
+ { role: 'user', content: [{ type: 'text', text: 'hello' }] },
14
+ ];
15
+
16
+ function createModel(modelId = 'grok-4-fast') {
17
+ return new XaiResponsesLanguageModel(modelId, {
18
+ provider: 'xai.responses',
19
+ baseURL: 'https://api.x.ai/v1',
20
+ headers: () => ({ Authorization: 'Bearer test-key' }),
21
+ generateId: mockId(),
22
+ });
23
+ }
24
+
25
+ describe('XaiResponsesLanguageModel', () => {
26
+ const server = createTestServer({
27
+ 'https://api.x.ai/v1/responses': {},
28
+ });
29
+
30
+ function prepareJsonFixtureResponse(filename: string) {
31
+ server.urls['https://api.x.ai/v1/responses'].response = {
32
+ type: 'json-value',
33
+ body: JSON.parse(
34
+ fs.readFileSync(`src/responses/__fixtures__/${filename}.json`, 'utf8'),
35
+ ),
36
+ };
37
+ }
38
+
39
+ function prepareChunksFixtureResponse(filename: string) {
40
+ const chunks = fs
41
+ .readFileSync(`src/responses/__fixtures__/${filename}.chunks.txt`, 'utf8')
42
+ .split('\n')
43
+ .map(line => `data: ${line}\n\n`);
44
+ chunks.push('data: [DONE]\n\n');
45
+
46
+ server.urls['https://api.x.ai/v1/responses'].response = {
47
+ type: 'stream-chunks',
48
+ chunks,
49
+ };
50
+ }
51
+
52
+ function prepareJsonResponse(body: Record<string, unknown>) {
53
+ server.urls['https://api.x.ai/v1/responses'].response = {
54
+ type: 'json-value',
55
+ body,
56
+ };
57
+ }
58
+
59
+ function prepareStreamChunks(chunks: string[]) {
60
+ server.urls['https://api.x.ai/v1/responses'].response = {
61
+ type: 'stream-chunks',
62
+ chunks: chunks
63
+ .map(chunk => `data: ${chunk}\n\n`)
64
+ .concat('data: [DONE]\n\n'),
65
+ };
66
+ }
67
+
68
+ describe('doGenerate', () => {
69
+ describe('basic text generation', () => {
70
+ it('should generate text content', async () => {
71
+ prepareJsonResponse({
72
+ id: 'resp_123',
73
+ object: 'response',
74
+ created_at: 1700000000,
75
+ status: 'completed',
76
+ model: 'grok-4-fast',
77
+ output: [
78
+ {
79
+ type: 'message',
80
+ id: 'msg_123',
81
+ status: 'completed',
82
+ role: 'assistant',
83
+ content: [
84
+ {
85
+ type: 'output_text',
86
+ text: 'hello world',
87
+ annotations: [],
88
+ },
89
+ ],
90
+ },
91
+ ],
92
+ usage: {
93
+ input_tokens: 10,
94
+ output_tokens: 5,
95
+ total_tokens: 15,
96
+ },
97
+ });
98
+
99
+ const result = await createModel().doGenerate({
100
+ prompt: TEST_PROMPT,
101
+ });
102
+
103
+ expect(result.content).toMatchInlineSnapshot(`
104
+ [
105
+ {
106
+ "text": "hello world",
107
+ "type": "text",
108
+ },
109
+ ]
110
+ `);
111
+ });
112
+
113
+ it('should extract usage correctly', async () => {
114
+ prepareJsonResponse({
115
+ id: 'resp_123',
116
+ object: 'response',
117
+ status: 'completed',
118
+ model: 'grok-4-fast',
119
+ output: [],
120
+ usage: {
121
+ input_tokens: 345,
122
+ output_tokens: 538,
123
+ total_tokens: 883,
124
+ output_tokens_details: {
125
+ reasoning_tokens: 123,
126
+ },
127
+ },
128
+ });
129
+
130
+ const result = await createModel().doGenerate({
131
+ prompt: TEST_PROMPT,
132
+ });
133
+
134
+ expect(result.usage).toMatchInlineSnapshot(`
135
+ {
136
+ "inputTokens": {
137
+ "cacheRead": 0,
138
+ "cacheWrite": undefined,
139
+ "noCache": 345,
140
+ "total": 345,
141
+ },
142
+ "outputTokens": {
143
+ "reasoning": 123,
144
+ "text": 415,
145
+ "total": 538,
146
+ },
147
+ "raw": {
148
+ "input_tokens": 345,
149
+ "output_tokens": 538,
150
+ "output_tokens_details": {
151
+ "reasoning_tokens": 123,
152
+ },
153
+ "total_tokens": 883,
154
+ },
155
+ }
156
+ `);
157
+ });
158
+
159
+ it('should extract finish reason from status', async () => {
160
+ prepareJsonResponse({
161
+ id: 'resp_123',
162
+ object: 'response',
163
+ status: 'completed',
164
+ model: 'grok-4-fast',
165
+ output: [],
166
+ usage: { input_tokens: 10, output_tokens: 5 },
167
+ });
168
+
169
+ const result = await createModel().doGenerate({
170
+ prompt: TEST_PROMPT,
171
+ });
172
+
173
+ expect(result.finishReason).toMatchInlineSnapshot(`
174
+ {
175
+ "raw": "completed",
176
+ "unified": "stop",
177
+ }
178
+ `);
179
+ });
180
+ });
181
+
182
+ describe('reasoning content', () => {
183
+ it('should extract reasoning with encrypted content when store=false', async () => {
184
+ prepareJsonResponse({
185
+ id: 'resp_123',
186
+ object: 'response',
187
+ status: 'completed',
188
+ model: 'grok-4-fast',
189
+ output: [
190
+ {
191
+ type: 'reasoning',
192
+ id: 'rs_456',
193
+ status: 'completed',
194
+ summary: [
195
+ {
196
+ type: 'summary_text',
197
+ text: 'First, analyze the question carefully.',
198
+ },
199
+ ],
200
+ encrypted_content: 'abc123encryptedcontent',
201
+ },
202
+ {
203
+ type: 'message',
204
+ id: 'msg_123',
205
+ status: 'completed',
206
+ role: 'assistant',
207
+ content: [
208
+ {
209
+ type: 'output_text',
210
+ text: 'The answer is 42.',
211
+ annotations: [],
212
+ },
213
+ ],
214
+ },
215
+ ],
216
+ usage: {
217
+ input_tokens: 10,
218
+ output_tokens: 20,
219
+ output_tokens_details: {
220
+ reasoning_tokens: 15,
221
+ },
222
+ },
223
+ });
224
+
225
+ const result = await createModel().doGenerate({
226
+ prompt: TEST_PROMPT,
227
+ });
228
+
229
+ expect(result.content).toMatchInlineSnapshot(`
230
+ [
231
+ {
232
+ "providerMetadata": {
233
+ "xai": {
234
+ "itemId": "rs_456",
235
+ "reasoningEncryptedContent": "abc123encryptedcontent",
236
+ },
237
+ },
238
+ "text": "First, analyze the question carefully.",
239
+ "type": "reasoning",
240
+ },
241
+ {
242
+ "text": "The answer is 42.",
243
+ "type": "text",
244
+ },
245
+ ]
246
+ `);
247
+ });
248
+
249
+ it('should handle reasoning without encrypted content', async () => {
250
+ prepareJsonResponse({
251
+ id: 'resp_123',
252
+ object: 'response',
253
+ status: 'completed',
254
+ model: 'grok-4-fast',
255
+ output: [
256
+ {
257
+ type: 'reasoning',
258
+ id: 'rs_456',
259
+ status: 'completed',
260
+ summary: [
261
+ {
262
+ type: 'summary_text',
263
+ text: 'Thinking through the problem.',
264
+ },
265
+ ],
266
+ },
267
+ {
268
+ type: 'message',
269
+ id: 'msg_123',
270
+ status: 'completed',
271
+ role: 'assistant',
272
+ content: [
273
+ {
274
+ type: 'output_text',
275
+ text: 'Solution found.',
276
+ annotations: [],
277
+ },
278
+ ],
279
+ },
280
+ ],
281
+ usage: {
282
+ input_tokens: 10,
283
+ output_tokens: 15,
284
+ },
285
+ });
286
+
287
+ const result = await createModel().doGenerate({
288
+ prompt: TEST_PROMPT,
289
+ });
290
+
291
+ expect(result.content).toMatchInlineSnapshot(`
292
+ [
293
+ {
294
+ "providerMetadata": {
295
+ "xai": {
296
+ "itemId": "rs_456",
297
+ },
298
+ },
299
+ "text": "Thinking through the problem.",
300
+ "type": "reasoning",
301
+ },
302
+ {
303
+ "text": "Solution found.",
304
+ "type": "text",
305
+ },
306
+ ]
307
+ `);
308
+ });
309
+ });
310
+
311
+ describe('settings and options', () => {
312
+ it('should send model id and settings', async () => {
313
+ prepareJsonResponse({
314
+ id: 'resp_123',
315
+ object: 'response',
316
+ status: 'completed',
317
+ model: 'grok-4-fast',
318
+ output: [],
319
+ usage: { input_tokens: 10, output_tokens: 5 },
320
+ });
321
+
322
+ await createModel('grok-4-fast').doGenerate({
323
+ prompt: [
324
+ { role: 'system', content: 'you are helpful' },
325
+ { role: 'user', content: [{ type: 'text', text: 'hello' }] },
326
+ ],
327
+ temperature: 0.5,
328
+ topP: 0.9,
329
+ maxOutputTokens: 100,
330
+ });
331
+
332
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
333
+ {
334
+ "input": [
335
+ {
336
+ "content": "you are helpful",
337
+ "role": "system",
338
+ },
339
+ {
340
+ "content": [
341
+ {
342
+ "text": "hello",
343
+ "type": "input_text",
344
+ },
345
+ ],
346
+ "role": "user",
347
+ },
348
+ ],
349
+ "max_output_tokens": 100,
350
+ "model": "grok-4-fast",
351
+ "temperature": 0.5,
352
+ "top_p": 0.9,
353
+ }
354
+ `);
355
+ });
356
+
357
+ describe('provider options', () => {
358
+ it('reasoningEffort', async () => {
359
+ prepareJsonResponse({
360
+ id: 'resp_123',
361
+ object: 'response',
362
+ status: 'completed',
363
+ model: 'grok-4-fast',
364
+ output: [],
365
+ usage: { input_tokens: 10, output_tokens: 5 },
366
+ });
367
+
368
+ await createModel().doGenerate({
369
+ prompt: TEST_PROMPT,
370
+ providerOptions: {
371
+ xai: {
372
+ reasoningEffort: 'high',
373
+ } satisfies XaiResponsesProviderOptions,
374
+ },
375
+ });
376
+
377
+ const requestBody = await server.calls[0].requestBodyJson;
378
+ expect(requestBody.reasoning.effort).toBe('high');
379
+ });
380
+
381
+ it('store:true', async () => {
382
+ prepareJsonResponse({
383
+ id: 'resp_123',
384
+ object: 'response',
385
+ status: 'completed',
386
+ model: 'grok-4-fast',
387
+ output: [],
388
+ usage: { input_tokens: 10, output_tokens: 5 },
389
+ });
390
+
391
+ await createModel().doGenerate({
392
+ prompt: TEST_PROMPT,
393
+ providerOptions: {
394
+ xai: {
395
+ store: true,
396
+ } satisfies XaiResponsesProviderOptions,
397
+ },
398
+ });
399
+
400
+ const requestBody = await server.calls[0].requestBodyJson;
401
+ expect(requestBody.store).toBe(undefined);
402
+ expect(requestBody.include).toBe(undefined);
403
+ });
404
+
405
+ it('store:false', async () => {
406
+ prepareJsonResponse({
407
+ id: 'resp_123',
408
+ object: 'response',
409
+ status: 'completed',
410
+ model: 'grok-4-fast',
411
+ output: [],
412
+ usage: { input_tokens: 10, output_tokens: 5 },
413
+ });
414
+
415
+ await createModel().doGenerate({
416
+ prompt: TEST_PROMPT,
417
+ providerOptions: {
418
+ xai: {
419
+ store: false,
420
+ } satisfies XaiResponsesProviderOptions,
421
+ },
422
+ });
423
+
424
+ const requestBody = await server.calls[0].requestBodyJson;
425
+ expect(requestBody.store).toBe(false);
426
+ expect(requestBody.include).toStrictEqual([
427
+ 'reasoning.encrypted_content',
428
+ ]);
429
+ });
430
+
431
+ it('previousResponseId', async () => {
432
+ prepareJsonResponse({
433
+ id: 'resp_123',
434
+ object: 'response',
435
+ status: 'completed',
436
+ model: 'grok-4-fast',
437
+ output: [],
438
+ usage: { input_tokens: 10, output_tokens: 5 },
439
+ });
440
+
441
+ await createModel().doGenerate({
442
+ prompt: TEST_PROMPT,
443
+ providerOptions: {
444
+ xai: {
445
+ previousResponseId: 'resp_456',
446
+ } satisfies XaiResponsesProviderOptions,
447
+ },
448
+ });
449
+
450
+ const requestBody = await server.calls[0].requestBodyJson;
451
+ expect(requestBody.previous_response_id).toBe('resp_456');
452
+ });
453
+ });
454
+
455
+ it('should warn about unsupported stopSequences', async () => {
456
+ prepareJsonResponse({
457
+ id: 'resp_123',
458
+ object: 'response',
459
+ status: 'completed',
460
+ model: 'grok-4-fast',
461
+ output: [],
462
+ usage: { input_tokens: 10, output_tokens: 5 },
463
+ });
464
+
465
+ const result = await createModel().doGenerate({
466
+ prompt: TEST_PROMPT,
467
+ stopSequences: ['\n\n', 'STOP'],
468
+ });
469
+
470
+ expect(result.warnings).toMatchInlineSnapshot(`
471
+ [
472
+ {
473
+ "feature": "stopSequences",
474
+ "type": "unsupported",
475
+ },
476
+ ]
477
+ `);
478
+ });
479
+
480
+ describe('responseFormat', () => {
481
+ it('should send response format json schema', async () => {
482
+ prepareJsonResponse({
483
+ id: 'resp_123',
484
+ object: 'response',
485
+ status: 'completed',
486
+ model: 'grok-4-fast',
487
+ output: [],
488
+ usage: { input_tokens: 10, output_tokens: 5 },
489
+ });
490
+
491
+ await createModel().doGenerate({
492
+ prompt: TEST_PROMPT,
493
+ responseFormat: {
494
+ type: 'json',
495
+ name: 'recipe',
496
+ description: 'A recipe object',
497
+ schema: {
498
+ type: 'object',
499
+ properties: {
500
+ name: { type: 'string' },
501
+ ingredients: { type: 'array', items: { type: 'string' } },
502
+ },
503
+ required: ['name', 'ingredients'],
504
+ additionalProperties: false,
505
+ },
506
+ },
507
+ });
508
+
509
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
510
+ {
511
+ "input": [
512
+ {
513
+ "content": [
514
+ {
515
+ "text": "hello",
516
+ "type": "input_text",
517
+ },
518
+ ],
519
+ "role": "user",
520
+ },
521
+ ],
522
+ "model": "grok-4-fast",
523
+ "text": {
524
+ "format": {
525
+ "description": "A recipe object",
526
+ "name": "recipe",
527
+ "schema": {
528
+ "additionalProperties": false,
529
+ "properties": {
530
+ "ingredients": {
531
+ "items": {
532
+ "type": "string",
533
+ },
534
+ "type": "array",
535
+ },
536
+ "name": {
537
+ "type": "string",
538
+ },
539
+ },
540
+ "required": [
541
+ "name",
542
+ "ingredients",
543
+ ],
544
+ "type": "object",
545
+ },
546
+ "strict": true,
547
+ "type": "json_schema",
548
+ },
549
+ },
550
+ }
551
+ `);
552
+ });
553
+
554
+ it('should send response format json object when no schema provided', async () => {
555
+ prepareJsonResponse({
556
+ id: 'resp_123',
557
+ object: 'response',
558
+ status: 'completed',
559
+ model: 'grok-4-fast',
560
+ output: [],
561
+ usage: { input_tokens: 10, output_tokens: 5 },
562
+ });
563
+
564
+ await createModel().doGenerate({
565
+ prompt: TEST_PROMPT,
566
+ responseFormat: {
567
+ type: 'json',
568
+ },
569
+ });
570
+
571
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
572
+ {
573
+ "input": [
574
+ {
575
+ "content": [
576
+ {
577
+ "text": "hello",
578
+ "type": "input_text",
579
+ },
580
+ ],
581
+ "role": "user",
582
+ },
583
+ ],
584
+ "model": "grok-4-fast",
585
+ "text": {
586
+ "format": {
587
+ "type": "json_object",
588
+ },
589
+ },
590
+ }
591
+ `);
592
+ });
593
+
594
+ it('should use default name when responseFormat.name is not provided', async () => {
595
+ prepareJsonResponse({
596
+ id: 'resp_123',
597
+ object: 'response',
598
+ status: 'completed',
599
+ model: 'grok-4-fast',
600
+ output: [],
601
+ usage: { input_tokens: 10, output_tokens: 5 },
602
+ });
603
+
604
+ await createModel().doGenerate({
605
+ prompt: TEST_PROMPT,
606
+ responseFormat: {
607
+ type: 'json',
608
+ schema: {
609
+ type: 'object',
610
+ properties: { value: { type: 'string' } },
611
+ },
612
+ },
613
+ });
614
+
615
+ const requestBody = await server.calls[0].requestBodyJson;
616
+ expect(requestBody.text.format.name).toBe('response');
617
+ });
618
+ });
619
+ });
620
+
621
+ describe('web_search tool', () => {
622
+ let result: Awaited<
623
+ ReturnType<(typeof createModel)['prototype']['doGenerate']>
624
+ >;
625
+
626
+ beforeEach(async () => {
627
+ prepareJsonFixtureResponse('xai-web-search-tool.1');
628
+
629
+ result = await createModel().doGenerate({
630
+ prompt: TEST_PROMPT,
631
+ tools: [
632
+ {
633
+ type: 'provider',
634
+ id: 'xai.web_search',
635
+ name: 'web_search',
636
+ args: {},
637
+ },
638
+ ],
639
+ });
640
+ });
641
+
642
+ it('should include web_search tool call with providerExecuted true', async () => {
643
+ expect(result.content).toMatchSnapshot();
644
+ });
645
+
646
+ it('should send web_search tool with args in request', async () => {
647
+ prepareJsonResponse({
648
+ id: 'resp_123',
649
+ object: 'response',
650
+ status: 'completed',
651
+ model: 'grok-4-fast',
652
+ output: [],
653
+ usage: { input_tokens: 10, output_tokens: 5 },
654
+ });
655
+
656
+ await createModel().doGenerate({
657
+ prompt: TEST_PROMPT,
658
+ tools: [
659
+ {
660
+ type: 'provider',
661
+ id: 'xai.web_search',
662
+ name: 'web_search',
663
+ args: {
664
+ allowedDomains: ['wikipedia.org'],
665
+ enableImageUnderstanding: true,
666
+ },
667
+ },
668
+ ],
669
+ });
670
+
671
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
672
+ {
673
+ "input": [
674
+ {
675
+ "content": [
676
+ {
677
+ "text": "hello",
678
+ "type": "input_text",
679
+ },
680
+ ],
681
+ "role": "user",
682
+ },
683
+ ],
684
+ "model": "grok-4-fast",
685
+ "tools": [
686
+ {
687
+ "type": "web_search",
688
+ },
689
+ ],
690
+ }
691
+ `);
692
+ });
693
+ });
694
+
695
+ describe('x_search tool', () => {
696
+ it('should include x_search tool call with real response', async () => {
697
+ prepareJsonFixtureResponse('xai-x-search-tool.1');
698
+
699
+ const result = await createModel().doGenerate({
700
+ prompt: TEST_PROMPT,
701
+ tools: [
702
+ {
703
+ type: 'provider',
704
+ id: 'xai.x_search',
705
+ name: 'x_search',
706
+ args: {},
707
+ },
708
+ ],
709
+ });
710
+
711
+ expect(result.content).toMatchSnapshot();
712
+ });
713
+ });
714
+
715
+ describe('code_interpreter tool', () => {
716
+ it('should include code_interpreter tool call with real response', async () => {
717
+ prepareJsonFixtureResponse('xai-code-execution-tool.1');
718
+
719
+ const result = await createModel().doGenerate({
720
+ prompt: TEST_PROMPT,
721
+ tools: [
722
+ {
723
+ type: 'provider',
724
+ id: 'xai.code_execution',
725
+ name: 'code_execution',
726
+ args: {},
727
+ },
728
+ ],
729
+ });
730
+
731
+ expect(result.content).toMatchSnapshot();
732
+ });
733
+ });
734
+
735
+ describe('function tools', () => {
736
+ it('should include function tool calls without providerExecuted', async () => {
737
+ prepareJsonResponse({
738
+ id: 'resp_123',
739
+ object: 'response',
740
+ status: 'completed',
741
+ model: 'grok-4-fast',
742
+ output: [
743
+ {
744
+ type: 'function_call',
745
+ id: 'fc_123',
746
+ name: 'weather',
747
+ arguments: '{"location":"sf"}',
748
+ call_id: 'call_123',
749
+ },
750
+ ],
751
+ usage: { input_tokens: 10, output_tokens: 5 },
752
+ });
753
+
754
+ const result = await createModel().doGenerate({
755
+ prompt: TEST_PROMPT,
756
+ tools: [
757
+ {
758
+ type: 'function',
759
+ name: 'weather',
760
+ description: 'get weather',
761
+ inputSchema: {
762
+ type: 'object',
763
+ properties: { location: { type: 'string' } },
764
+ },
765
+ },
766
+ ],
767
+ });
768
+
769
+ expect(result.content).toMatchInlineSnapshot(`
770
+ [
771
+ {
772
+ "input": "{"location":"sf"}",
773
+ "toolCallId": "call_123",
774
+ "toolName": "weather",
775
+ "type": "tool-call",
776
+ },
777
+ ]
778
+ `);
779
+ });
780
+ });
781
+
782
+ describe('citations', () => {
783
+ it('should extract citations from annotations', async () => {
784
+ prepareJsonResponse({
785
+ id: 'resp_123',
786
+ object: 'response',
787
+ status: 'completed',
788
+ model: 'grok-4-fast',
789
+ output: [
790
+ {
791
+ type: 'message',
792
+ id: 'msg_123',
793
+ status: 'completed',
794
+ role: 'assistant',
795
+ content: [
796
+ {
797
+ type: 'output_text',
798
+ text: 'based on research',
799
+ annotations: [
800
+ {
801
+ type: 'url_citation',
802
+ url: 'https://example.com',
803
+ title: 'example title',
804
+ },
805
+ {
806
+ type: 'url_citation',
807
+ url: 'https://test.com',
808
+ },
809
+ ],
810
+ },
811
+ ],
812
+ },
813
+ ],
814
+ usage: { input_tokens: 10, output_tokens: 5 },
815
+ });
816
+
817
+ const result = await createModel().doGenerate({
818
+ prompt: TEST_PROMPT,
819
+ });
820
+
821
+ expect(result.content).toMatchInlineSnapshot(`
822
+ [
823
+ {
824
+ "text": "based on research",
825
+ "type": "text",
826
+ },
827
+ {
828
+ "id": "id-0",
829
+ "sourceType": "url",
830
+ "title": "example title",
831
+ "type": "source",
832
+ "url": "https://example.com",
833
+ },
834
+ {
835
+ "id": "id-1",
836
+ "sourceType": "url",
837
+ "title": "https://test.com",
838
+ "type": "source",
839
+ "url": "https://test.com",
840
+ },
841
+ ]
842
+ `);
843
+ });
844
+ });
845
+
846
+ describe('multiple tools', () => {
847
+ it('should handle multiple server-side tools', async () => {
848
+ prepareJsonResponse({
849
+ id: 'resp_123',
850
+ object: 'response',
851
+ status: 'completed',
852
+ model: 'grok-4-fast',
853
+ output: [
854
+ {
855
+ type: 'web_search_call',
856
+ id: 'ws_123',
857
+ name: 'web_search',
858
+ arguments: '{}',
859
+ call_id: '',
860
+ status: 'completed',
861
+ },
862
+ {
863
+ type: 'code_interpreter_call',
864
+ id: 'code_123',
865
+ name: 'code_execution',
866
+ arguments: '{}',
867
+ call_id: '',
868
+ status: 'completed',
869
+ },
870
+ ],
871
+ usage: { input_tokens: 10, output_tokens: 5 },
872
+ });
873
+
874
+ const result = await createModel().doGenerate({
875
+ prompt: TEST_PROMPT,
876
+ tools: [
877
+ {
878
+ type: 'provider',
879
+ id: 'xai.web_search',
880
+ name: 'web_search',
881
+ args: {},
882
+ },
883
+ {
884
+ type: 'provider',
885
+ id: 'xai.code_execution',
886
+ name: 'code_execution',
887
+ args: {},
888
+ },
889
+ ],
890
+ });
891
+
892
+ expect(result.content).toHaveLength(2);
893
+ expect(result.content[0].type).toBe('tool-call');
894
+ expect(result.content[1].type).toBe('tool-call');
895
+ });
896
+ });
897
+
898
+ describe('tool name mapping by type', () => {
899
+ it('should map web_search_call type to web_search tool name when name is empty', async () => {
900
+ prepareJsonResponse({
901
+ id: 'resp_123',
902
+ object: 'response',
903
+ status: 'completed',
904
+ model: 'grok-4-fast',
905
+ output: [
906
+ {
907
+ type: 'web_search_call',
908
+ id: 'ws_123',
909
+ name: '',
910
+ arguments: '{"query":"test"}',
911
+ call_id: '',
912
+ status: 'completed',
913
+ },
914
+ ],
915
+ usage: { input_tokens: 10, output_tokens: 5 },
916
+ });
917
+
918
+ const result = await createModel().doGenerate({
919
+ prompt: TEST_PROMPT,
920
+ tools: [
921
+ {
922
+ type: 'provider',
923
+ id: 'xai.web_search',
924
+ name: 'web_search',
925
+ args: {},
926
+ },
927
+ ],
928
+ });
929
+
930
+ expect(result.content).toEqual([
931
+ {
932
+ type: 'tool-call',
933
+ toolCallId: 'ws_123',
934
+ toolName: 'web_search',
935
+ input: '{"query":"test"}',
936
+ providerExecuted: true,
937
+ },
938
+ ]);
939
+ });
940
+
941
+ it('should map x_search_call type to x_search tool name when name is empty', async () => {
942
+ prepareJsonResponse({
943
+ id: 'resp_123',
944
+ object: 'response',
945
+ status: 'completed',
946
+ model: 'grok-4-fast',
947
+ output: [
948
+ {
949
+ type: 'x_search_call',
950
+ id: 'xs_123',
951
+ name: '',
952
+ arguments: '{"query":"test"}',
953
+ call_id: '',
954
+ status: 'completed',
955
+ },
956
+ ],
957
+ usage: { input_tokens: 10, output_tokens: 5 },
958
+ });
959
+
960
+ const result = await createModel().doGenerate({
961
+ prompt: TEST_PROMPT,
962
+ tools: [
963
+ {
964
+ type: 'provider',
965
+ id: 'xai.x_search',
966
+ name: 'x_search',
967
+ args: {},
968
+ },
969
+ ],
970
+ });
971
+
972
+ expect(result.content).toEqual([
973
+ {
974
+ type: 'tool-call',
975
+ toolCallId: 'xs_123',
976
+ toolName: 'x_search',
977
+ input: '{"query":"test"}',
978
+ providerExecuted: true,
979
+ },
980
+ ]);
981
+ });
982
+
983
+ it('should map code_interpreter_call type to code_execution tool name when name is empty', async () => {
984
+ prepareJsonResponse({
985
+ id: 'resp_123',
986
+ object: 'response',
987
+ status: 'completed',
988
+ model: 'grok-4-fast',
989
+ output: [
990
+ {
991
+ type: 'code_interpreter_call',
992
+ id: 'ci_123',
993
+ name: '',
994
+ arguments: '{}',
995
+ call_id: '',
996
+ status: 'completed',
997
+ },
998
+ ],
999
+ usage: { input_tokens: 10, output_tokens: 5 },
1000
+ });
1001
+
1002
+ const result = await createModel().doGenerate({
1003
+ prompt: TEST_PROMPT,
1004
+ tools: [
1005
+ {
1006
+ type: 'provider',
1007
+ id: 'xai.code_execution',
1008
+ name: 'code_execution',
1009
+ args: {},
1010
+ },
1011
+ ],
1012
+ });
1013
+
1014
+ expect(result.content).toEqual([
1015
+ {
1016
+ type: 'tool-call',
1017
+ toolCallId: 'ci_123',
1018
+ toolName: 'code_execution',
1019
+ input: '{}',
1020
+ providerExecuted: true,
1021
+ },
1022
+ ]);
1023
+ });
1024
+
1025
+ it('should map code_execution_call type to code_execution tool name when name is empty', async () => {
1026
+ prepareJsonResponse({
1027
+ id: 'resp_123',
1028
+ object: 'response',
1029
+ status: 'completed',
1030
+ model: 'grok-4-fast',
1031
+ output: [
1032
+ {
1033
+ type: 'code_execution_call',
1034
+ id: 'ce_123',
1035
+ name: '',
1036
+ arguments: '{}',
1037
+ call_id: '',
1038
+ status: 'completed',
1039
+ },
1040
+ ],
1041
+ usage: { input_tokens: 10, output_tokens: 5 },
1042
+ });
1043
+
1044
+ const result = await createModel().doGenerate({
1045
+ prompt: TEST_PROMPT,
1046
+ tools: [
1047
+ {
1048
+ type: 'provider',
1049
+ id: 'xai.code_execution',
1050
+ name: 'code_execution',
1051
+ args: {},
1052
+ },
1053
+ ],
1054
+ });
1055
+
1056
+ expect(result.content).toEqual([
1057
+ {
1058
+ type: 'tool-call',
1059
+ toolCallId: 'ce_123',
1060
+ toolName: 'code_execution',
1061
+ input: '{}',
1062
+ providerExecuted: true,
1063
+ },
1064
+ ]);
1065
+ });
1066
+
1067
+ it('should use custom tool name from provider tool when type matches', async () => {
1068
+ prepareJsonResponse({
1069
+ id: 'resp_123',
1070
+ object: 'response',
1071
+ status: 'completed',
1072
+ model: 'grok-4-fast',
1073
+ output: [
1074
+ {
1075
+ type: 'web_search_call',
1076
+ id: 'ws_123',
1077
+ name: '',
1078
+ arguments: '{}',
1079
+ call_id: '',
1080
+ status: 'completed',
1081
+ },
1082
+ ],
1083
+ usage: { input_tokens: 10, output_tokens: 5 },
1084
+ });
1085
+
1086
+ const result = await createModel().doGenerate({
1087
+ prompt: TEST_PROMPT,
1088
+ tools: [
1089
+ {
1090
+ type: 'provider',
1091
+ id: 'xai.web_search',
1092
+ name: 'my_custom_search',
1093
+ args: {},
1094
+ },
1095
+ ],
1096
+ });
1097
+
1098
+ expect(result.content).toEqual([
1099
+ {
1100
+ type: 'tool-call',
1101
+ toolCallId: 'ws_123',
1102
+ toolName: 'my_custom_search',
1103
+ input: '{}',
1104
+ providerExecuted: true,
1105
+ },
1106
+ ]);
1107
+ });
1108
+ });
1109
+ });
1110
+
1111
+ describe('doStream', () => {
1112
+ describe('text streaming', () => {
1113
+ it('should stream web search with real response', async () => {
1114
+ prepareChunksFixtureResponse('xai-web-search-tool.1');
1115
+
1116
+ const { stream } = await createModel().doStream({
1117
+ prompt: TEST_PROMPT,
1118
+ tools: [
1119
+ {
1120
+ type: 'provider',
1121
+ id: 'xai.web_search',
1122
+ name: 'web_search',
1123
+ args: {},
1124
+ },
1125
+ ],
1126
+ });
1127
+
1128
+ const parts = await convertReadableStreamToArray(stream);
1129
+
1130
+ expect(parts).toMatchSnapshot();
1131
+ });
1132
+
1133
+ it('should stream text deltas', async () => {
1134
+ prepareChunksFixtureResponse('xai-text-streaming.1');
1135
+
1136
+ const { stream } = await createModel().doStream({
1137
+ prompt: TEST_PROMPT,
1138
+ });
1139
+
1140
+ const parts = await convertReadableStreamToArray(stream);
1141
+
1142
+ expect(parts).toMatchSnapshot();
1143
+ });
1144
+
1145
+ it('should stream text deltas with reasoning', async () => {
1146
+ prepareChunksFixtureResponse('xai-text-with-reasoning-streaming.1');
1147
+
1148
+ const { stream } = await createModel().doStream({
1149
+ prompt: TEST_PROMPT,
1150
+ });
1151
+
1152
+ const parts = await convertReadableStreamToArray(stream);
1153
+
1154
+ expect(parts).toMatchSnapshot();
1155
+ });
1156
+
1157
+ it('should stream text deltas with encrypted reasoning', async () => {
1158
+ prepareChunksFixtureResponse(
1159
+ 'xai-text-with-reasoning-streaming-store-false.1',
1160
+ );
1161
+
1162
+ const { stream } = await createModel().doStream({
1163
+ prompt: TEST_PROMPT,
1164
+ });
1165
+
1166
+ const parts = await convertReadableStreamToArray(stream);
1167
+
1168
+ expect(parts).toMatchSnapshot();
1169
+ });
1170
+
1171
+ it('should include encrypted content in reasoning-end providerMetadata', async () => {
1172
+ prepareStreamChunks([
1173
+ JSON.stringify({
1174
+ type: 'response.created',
1175
+ response: {
1176
+ id: 'resp_123',
1177
+ object: 'response',
1178
+ model: 'grok-4-fast',
1179
+ output: [],
1180
+ },
1181
+ }),
1182
+ JSON.stringify({
1183
+ type: 'response.output_item.added',
1184
+ item: {
1185
+ type: 'reasoning',
1186
+ id: 'rs_456',
1187
+ status: 'in_progress',
1188
+ summary: [],
1189
+ },
1190
+ output_index: 0,
1191
+ }),
1192
+ JSON.stringify({
1193
+ type: 'response.reasoning_summary_part.added',
1194
+ item_id: 'rs_456',
1195
+ output_index: 0,
1196
+ summary_index: 0,
1197
+ part: { type: 'summary_text', text: '' },
1198
+ }),
1199
+ JSON.stringify({
1200
+ type: 'response.reasoning_summary_text.delta',
1201
+ item_id: 'rs_456',
1202
+ output_index: 0,
1203
+ summary_index: 0,
1204
+ delta: 'Analyzing...',
1205
+ }),
1206
+ JSON.stringify({
1207
+ type: 'response.reasoning_summary_text.done',
1208
+ item_id: 'rs_456',
1209
+ output_index: 0,
1210
+ summary_index: 0,
1211
+ text: 'Analyzing...',
1212
+ }),
1213
+ JSON.stringify({
1214
+ type: 'response.output_item.done',
1215
+ item: {
1216
+ type: 'reasoning',
1217
+ id: 'rs_456',
1218
+ status: 'completed',
1219
+ summary: [{ type: 'summary_text', text: 'Analyzing...' }],
1220
+ encrypted_content: 'encrypted_data_abc123',
1221
+ },
1222
+ output_index: 0,
1223
+ }),
1224
+ JSON.stringify({
1225
+ type: 'response.output_item.added',
1226
+ item: {
1227
+ type: 'message',
1228
+ id: 'msg_789',
1229
+ role: 'assistant',
1230
+ status: 'in_progress',
1231
+ content: [],
1232
+ },
1233
+ output_index: 1,
1234
+ }),
1235
+ JSON.stringify({
1236
+ type: 'response.output_text.delta',
1237
+ item_id: 'msg_789',
1238
+ output_index: 1,
1239
+ content_index: 0,
1240
+ delta: 'Result.',
1241
+ }),
1242
+ JSON.stringify({
1243
+ type: 'response.done',
1244
+ response: {
1245
+ id: 'resp_123',
1246
+ object: 'response',
1247
+ model: 'grok-4-fast',
1248
+ status: 'completed',
1249
+ output: [],
1250
+ usage: { input_tokens: 10, output_tokens: 20 },
1251
+ },
1252
+ }),
1253
+ ]);
1254
+
1255
+ const { stream } = await createModel().doStream({
1256
+ prompt: TEST_PROMPT,
1257
+ });
1258
+
1259
+ const parts = await convertReadableStreamToArray(stream);
1260
+
1261
+ const reasoningEnd = parts.find(part => part.type === 'reasoning-end');
1262
+ expect(reasoningEnd).toMatchInlineSnapshot(`
1263
+ {
1264
+ "id": "reasoning-rs_456",
1265
+ "providerMetadata": {
1266
+ "xai": {
1267
+ "itemId": "rs_456",
1268
+ "reasoningEncryptedContent": "encrypted_data_abc123",
1269
+ },
1270
+ },
1271
+ "type": "reasoning-end",
1272
+ }
1273
+ `);
1274
+ });
1275
+
1276
+ it('should stream x_search tool call', async () => {
1277
+ prepareChunksFixtureResponse('xai-x-search-tool');
1278
+
1279
+ const { stream } = await createModel().doStream({
1280
+ prompt: TEST_PROMPT,
1281
+ tools: [
1282
+ {
1283
+ type: 'provider',
1284
+ id: 'xai.x_search',
1285
+ name: 'x_search',
1286
+ args: {},
1287
+ },
1288
+ ],
1289
+ });
1290
+
1291
+ const parts = await convertReadableStreamToArray(stream);
1292
+
1293
+ expect(parts).toMatchSnapshot();
1294
+ });
1295
+
1296
+ it('should not emit duplicate text-delta from response.output_item.done after streaming', async () => {
1297
+ prepareStreamChunks([
1298
+ JSON.stringify({
1299
+ type: 'response.created',
1300
+ response: {
1301
+ id: 'resp_123',
1302
+ object: 'response',
1303
+ model: 'grok-4-fast',
1304
+ created_at: 1700000000,
1305
+ status: 'in_progress',
1306
+ output: [],
1307
+ },
1308
+ }),
1309
+ // Message item added - should emit text-start
1310
+ JSON.stringify({
1311
+ type: 'response.output_item.added',
1312
+ item: {
1313
+ type: 'message',
1314
+ id: 'msg_123',
1315
+ status: 'in_progress',
1316
+ role: 'assistant',
1317
+ content: [],
1318
+ },
1319
+ output_index: 0,
1320
+ }),
1321
+ // Stream text deltas
1322
+ JSON.stringify({
1323
+ type: 'response.output_text.delta',
1324
+ item_id: 'msg_123',
1325
+ output_index: 0,
1326
+ content_index: 0,
1327
+ delta: 'Hello',
1328
+ }),
1329
+ JSON.stringify({
1330
+ type: 'response.output_text.delta',
1331
+ item_id: 'msg_123',
1332
+ output_index: 0,
1333
+ content_index: 0,
1334
+ delta: ' ',
1335
+ }),
1336
+ JSON.stringify({
1337
+ type: 'response.output_text.delta',
1338
+ item_id: 'msg_123',
1339
+ output_index: 0,
1340
+ content_index: 0,
1341
+ delta: 'world',
1342
+ }),
1343
+ // Message item done - should NOT emit text-delta with full text
1344
+ JSON.stringify({
1345
+ type: 'response.output_item.done',
1346
+ item: {
1347
+ type: 'message',
1348
+ id: 'msg_123',
1349
+ status: 'completed',
1350
+ role: 'assistant',
1351
+ content: [
1352
+ {
1353
+ type: 'output_text',
1354
+ text: 'Hello world', // Full accumulated text
1355
+ annotations: [],
1356
+ },
1357
+ ],
1358
+ },
1359
+ output_index: 0,
1360
+ }),
1361
+ JSON.stringify({
1362
+ type: 'response.done',
1363
+ response: {
1364
+ id: 'resp_123',
1365
+ object: 'response',
1366
+ status: 'completed',
1367
+ output: [],
1368
+ usage: { input_tokens: 10, output_tokens: 5 },
1369
+ },
1370
+ }),
1371
+ ]);
1372
+
1373
+ const { stream } = await createModel().doStream({
1374
+ prompt: TEST_PROMPT,
1375
+ });
1376
+
1377
+ const parts = await convertReadableStreamToArray(stream);
1378
+
1379
+ // Count text-delta events
1380
+ const textDeltas = parts.filter(part => part.type === 'text-delta');
1381
+
1382
+ // Should only have 3 text-deltas from streaming, NOT 4 (with duplicate full text)
1383
+ expect(textDeltas).toHaveLength(3);
1384
+ expect(textDeltas.map(d => d.delta)).toEqual(['Hello', ' ', 'world']);
1385
+
1386
+ // Verify there's no text-delta with the full accumulated text
1387
+ const fullTextDelta = textDeltas.find(d => d.delta === 'Hello world');
1388
+ expect(fullTextDelta).toBeUndefined();
1389
+ });
1390
+ });
1391
+
1392
+ describe('tool call streaming', () => {
1393
+ it('should stream web_search tool calls', async () => {
1394
+ prepareStreamChunks([
1395
+ JSON.stringify({
1396
+ type: 'response.created',
1397
+ response: {
1398
+ id: 'resp_123',
1399
+ object: 'response',
1400
+ model: 'grok-4-fast',
1401
+ status: 'in_progress',
1402
+ output: [],
1403
+ },
1404
+ }),
1405
+ JSON.stringify({
1406
+ type: 'response.output_item.added',
1407
+ item: {
1408
+ type: 'web_search_call',
1409
+ id: 'ws_123',
1410
+ name: 'web_search',
1411
+ arguments: '{"query":"test"}',
1412
+ call_id: '',
1413
+ status: 'completed',
1414
+ },
1415
+ output_index: 0,
1416
+ }),
1417
+ JSON.stringify({
1418
+ type: 'response.done',
1419
+ response: {
1420
+ id: 'resp_123',
1421
+ object: 'response',
1422
+ status: 'completed',
1423
+ output: [],
1424
+ usage: { input_tokens: 10, output_tokens: 5 },
1425
+ },
1426
+ }),
1427
+ ]);
1428
+
1429
+ const { stream } = await createModel().doStream({
1430
+ prompt: TEST_PROMPT,
1431
+ tools: [
1432
+ {
1433
+ type: 'provider',
1434
+ id: 'xai.web_search',
1435
+ name: 'web_search',
1436
+ args: {},
1437
+ },
1438
+ ],
1439
+ });
1440
+
1441
+ const parts = await convertReadableStreamToArray(stream);
1442
+
1443
+ expect(parts).toContainEqual({
1444
+ type: 'tool-call',
1445
+ toolCallId: 'ws_123',
1446
+ toolName: 'web_search',
1447
+ input: '{"query":"test"}',
1448
+ providerExecuted: true,
1449
+ });
1450
+ });
1451
+ });
1452
+
1453
+ describe('tool name mapping by type in streaming', () => {
1454
+ it('should map web_search_call type to web_search tool name when name is empty', async () => {
1455
+ prepareStreamChunks([
1456
+ JSON.stringify({
1457
+ type: 'response.created',
1458
+ response: {
1459
+ id: 'resp_123',
1460
+ object: 'response',
1461
+ model: 'grok-4-fast',
1462
+ status: 'in_progress',
1463
+ output: [],
1464
+ },
1465
+ }),
1466
+ JSON.stringify({
1467
+ type: 'response.output_item.added',
1468
+ item: {
1469
+ type: 'web_search_call',
1470
+ id: 'ws_123',
1471
+ name: '',
1472
+ arguments: '{"query":"test"}',
1473
+ call_id: '',
1474
+ status: 'completed',
1475
+ },
1476
+ output_index: 0,
1477
+ }),
1478
+ JSON.stringify({
1479
+ type: 'response.done',
1480
+ response: {
1481
+ id: 'resp_123',
1482
+ object: 'response',
1483
+ status: 'completed',
1484
+ output: [],
1485
+ usage: { input_tokens: 10, output_tokens: 5 },
1486
+ },
1487
+ }),
1488
+ ]);
1489
+
1490
+ const { stream } = await createModel().doStream({
1491
+ prompt: TEST_PROMPT,
1492
+ tools: [
1493
+ {
1494
+ type: 'provider',
1495
+ id: 'xai.web_search',
1496
+ name: 'web_search',
1497
+ args: {},
1498
+ },
1499
+ ],
1500
+ });
1501
+
1502
+ const parts = await convertReadableStreamToArray(stream);
1503
+
1504
+ expect(parts).toContainEqual({
1505
+ type: 'tool-call',
1506
+ toolCallId: 'ws_123',
1507
+ toolName: 'web_search',
1508
+ input: '{"query":"test"}',
1509
+ providerExecuted: true,
1510
+ });
1511
+ });
1512
+
1513
+ it('should map x_search_call type to x_search tool name when name is empty', async () => {
1514
+ prepareStreamChunks([
1515
+ JSON.stringify({
1516
+ type: 'response.created',
1517
+ response: {
1518
+ id: 'resp_123',
1519
+ object: 'response',
1520
+ model: 'grok-4-fast',
1521
+ status: 'in_progress',
1522
+ output: [],
1523
+ },
1524
+ }),
1525
+ JSON.stringify({
1526
+ type: 'response.output_item.added',
1527
+ item: {
1528
+ type: 'x_search_call',
1529
+ id: 'xs_123',
1530
+ name: '',
1531
+ arguments: '{"query":"test"}',
1532
+ call_id: '',
1533
+ status: 'completed',
1534
+ },
1535
+ output_index: 0,
1536
+ }),
1537
+ JSON.stringify({
1538
+ type: 'response.done',
1539
+ response: {
1540
+ id: 'resp_123',
1541
+ object: 'response',
1542
+ status: 'completed',
1543
+ output: [],
1544
+ usage: { input_tokens: 10, output_tokens: 5 },
1545
+ },
1546
+ }),
1547
+ ]);
1548
+
1549
+ const { stream } = await createModel().doStream({
1550
+ prompt: TEST_PROMPT,
1551
+ tools: [
1552
+ {
1553
+ type: 'provider',
1554
+ id: 'xai.x_search',
1555
+ name: 'x_search',
1556
+ args: {},
1557
+ },
1558
+ ],
1559
+ });
1560
+
1561
+ const parts = await convertReadableStreamToArray(stream);
1562
+
1563
+ expect(parts).toContainEqual({
1564
+ type: 'tool-call',
1565
+ toolCallId: 'xs_123',
1566
+ toolName: 'x_search',
1567
+ input: '{"query":"test"}',
1568
+ providerExecuted: true,
1569
+ });
1570
+ });
1571
+
1572
+ it('should map code_interpreter_call type to code_execution tool name when name is empty', async () => {
1573
+ prepareStreamChunks([
1574
+ JSON.stringify({
1575
+ type: 'response.created',
1576
+ response: {
1577
+ id: 'resp_123',
1578
+ object: 'response',
1579
+ model: 'grok-4-fast',
1580
+ status: 'in_progress',
1581
+ output: [],
1582
+ },
1583
+ }),
1584
+ JSON.stringify({
1585
+ type: 'response.output_item.added',
1586
+ item: {
1587
+ type: 'code_interpreter_call',
1588
+ id: 'ci_123',
1589
+ name: '',
1590
+ arguments: '{}',
1591
+ call_id: '',
1592
+ status: 'completed',
1593
+ },
1594
+ output_index: 0,
1595
+ }),
1596
+ JSON.stringify({
1597
+ type: 'response.done',
1598
+ response: {
1599
+ id: 'resp_123',
1600
+ object: 'response',
1601
+ status: 'completed',
1602
+ output: [],
1603
+ usage: { input_tokens: 10, output_tokens: 5 },
1604
+ },
1605
+ }),
1606
+ ]);
1607
+
1608
+ const { stream } = await createModel().doStream({
1609
+ prompt: TEST_PROMPT,
1610
+ tools: [
1611
+ {
1612
+ type: 'provider',
1613
+ id: 'xai.code_execution',
1614
+ name: 'code_execution',
1615
+ args: {},
1616
+ },
1617
+ ],
1618
+ });
1619
+
1620
+ const parts = await convertReadableStreamToArray(stream);
1621
+
1622
+ expect(parts).toContainEqual({
1623
+ type: 'tool-call',
1624
+ toolCallId: 'ci_123',
1625
+ toolName: 'code_execution',
1626
+ input: '{}',
1627
+ providerExecuted: true,
1628
+ });
1629
+ });
1630
+
1631
+ it('should map code_execution_call type to code_execution tool name when name is empty', async () => {
1632
+ prepareStreamChunks([
1633
+ JSON.stringify({
1634
+ type: 'response.created',
1635
+ response: {
1636
+ id: 'resp_123',
1637
+ object: 'response',
1638
+ model: 'grok-4-fast',
1639
+ status: 'in_progress',
1640
+ output: [],
1641
+ },
1642
+ }),
1643
+ JSON.stringify({
1644
+ type: 'response.output_item.added',
1645
+ item: {
1646
+ type: 'code_execution_call',
1647
+ id: 'ce_123',
1648
+ name: '',
1649
+ arguments: '{}',
1650
+ call_id: '',
1651
+ status: 'completed',
1652
+ },
1653
+ output_index: 0,
1654
+ }),
1655
+ JSON.stringify({
1656
+ type: 'response.done',
1657
+ response: {
1658
+ id: 'resp_123',
1659
+ object: 'response',
1660
+ status: 'completed',
1661
+ output: [],
1662
+ usage: { input_tokens: 10, output_tokens: 5 },
1663
+ },
1664
+ }),
1665
+ ]);
1666
+
1667
+ const { stream } = await createModel().doStream({
1668
+ prompt: TEST_PROMPT,
1669
+ tools: [
1670
+ {
1671
+ type: 'provider',
1672
+ id: 'xai.code_execution',
1673
+ name: 'code_execution',
1674
+ args: {},
1675
+ },
1676
+ ],
1677
+ });
1678
+
1679
+ const parts = await convertReadableStreamToArray(stream);
1680
+
1681
+ expect(parts).toContainEqual({
1682
+ type: 'tool-call',
1683
+ toolCallId: 'ce_123',
1684
+ toolName: 'code_execution',
1685
+ input: '{}',
1686
+ providerExecuted: true,
1687
+ });
1688
+ });
1689
+
1690
+ it('should use custom tool name from provider tool when type matches', async () => {
1691
+ prepareStreamChunks([
1692
+ JSON.stringify({
1693
+ type: 'response.created',
1694
+ response: {
1695
+ id: 'resp_123',
1696
+ object: 'response',
1697
+ model: 'grok-4-fast',
1698
+ status: 'in_progress',
1699
+ output: [],
1700
+ },
1701
+ }),
1702
+ JSON.stringify({
1703
+ type: 'response.output_item.added',
1704
+ item: {
1705
+ type: 'web_search_call',
1706
+ id: 'ws_123',
1707
+ name: '',
1708
+ arguments: '{}',
1709
+ call_id: '',
1710
+ status: 'completed',
1711
+ },
1712
+ output_index: 0,
1713
+ }),
1714
+ JSON.stringify({
1715
+ type: 'response.done',
1716
+ response: {
1717
+ id: 'resp_123',
1718
+ object: 'response',
1719
+ status: 'completed',
1720
+ output: [],
1721
+ usage: { input_tokens: 10, output_tokens: 5 },
1722
+ },
1723
+ }),
1724
+ ]);
1725
+
1726
+ const { stream } = await createModel().doStream({
1727
+ prompt: TEST_PROMPT,
1728
+ tools: [
1729
+ {
1730
+ type: 'provider',
1731
+ id: 'xai.web_search',
1732
+ name: 'my_custom_search',
1733
+ args: {},
1734
+ },
1735
+ ],
1736
+ });
1737
+
1738
+ const parts = await convertReadableStreamToArray(stream);
1739
+
1740
+ expect(parts).toContainEqual({
1741
+ type: 'tool-call',
1742
+ toolCallId: 'ws_123',
1743
+ toolName: 'my_custom_search',
1744
+ input: '{}',
1745
+ providerExecuted: true,
1746
+ });
1747
+ });
1748
+ });
1749
+
1750
+ describe('citation streaming', () => {
1751
+ it('should stream citations as sources', async () => {
1752
+ prepareStreamChunks([
1753
+ JSON.stringify({
1754
+ type: 'response.created',
1755
+ response: {
1756
+ id: 'resp_123',
1757
+ object: 'response',
1758
+ model: 'grok-4-fast',
1759
+ status: 'in_progress',
1760
+ output: [],
1761
+ },
1762
+ }),
1763
+ JSON.stringify({
1764
+ type: 'response.output_text.annotation.added',
1765
+ item_id: 'msg_123',
1766
+ output_index: 0,
1767
+ content_index: 0,
1768
+ annotation_index: 0,
1769
+ annotation: {
1770
+ type: 'url_citation',
1771
+ url: 'https://example.com',
1772
+ title: 'example',
1773
+ },
1774
+ }),
1775
+ JSON.stringify({
1776
+ type: 'response.done',
1777
+ response: {
1778
+ id: 'resp_123',
1779
+ object: 'response',
1780
+ status: 'completed',
1781
+ output: [],
1782
+ usage: { input_tokens: 10, output_tokens: 5 },
1783
+ },
1784
+ }),
1785
+ ]);
1786
+
1787
+ const { stream } = await createModel().doStream({
1788
+ prompt: TEST_PROMPT,
1789
+ });
1790
+
1791
+ const parts = await convertReadableStreamToArray(stream);
1792
+
1793
+ expect(parts).toContainEqual({
1794
+ type: 'source',
1795
+ sourceType: 'url',
1796
+ id: 'id-0',
1797
+ url: 'https://example.com',
1798
+ title: 'example',
1799
+ });
1800
+ });
1801
+ });
1802
+ });
1803
+ });