@ai-sdk/xai 4.0.0-beta.4 → 4.0.0-beta.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,22 +1,27 @@
1
1
  import {
2
- LanguageModelV3,
3
- LanguageModelV3CallOptions,
4
- LanguageModelV3Content,
5
- LanguageModelV3FinishReason,
6
- LanguageModelV3GenerateResult,
7
- LanguageModelV3StreamPart,
8
- LanguageModelV3StreamResult,
9
- LanguageModelV3Usage,
10
- SharedV3Warning,
2
+ LanguageModelV4,
3
+ LanguageModelV4CallOptions,
4
+ LanguageModelV4Content,
5
+ LanguageModelV4FinishReason,
6
+ LanguageModelV4GenerateResult,
7
+ LanguageModelV4StreamPart,
8
+ LanguageModelV4StreamResult,
9
+ LanguageModelV4Usage,
10
+ SharedV4Warning,
11
11
  } from '@ai-sdk/provider';
12
12
  import {
13
13
  combineHeaders,
14
14
  createEventSourceResponseHandler,
15
15
  createJsonResponseHandler,
16
16
  FetchFunction,
17
+ isCustomReasoning,
18
+ mapReasoningToProviderEffort,
17
19
  parseProviderOptions,
18
20
  ParseResult,
19
21
  postJsonToApi,
22
+ serializeModelOptions,
23
+ WORKFLOW_SERIALIZE,
24
+ WORKFLOW_DESERIALIZE,
20
25
  } from '@ai-sdk/provider-utils';
21
26
  import { z } from 'zod/v4';
22
27
  import { getResponseMetadata } from '../get-response-metadata';
@@ -38,18 +43,32 @@ import { prepareResponsesTools } from './xai-responses-prepare-tools';
38
43
  type XaiResponsesConfig = {
39
44
  provider: string;
40
45
  baseURL: string | undefined;
41
- headers: () => Record<string, string | undefined>;
46
+ headers?: () => Record<string, string | undefined>;
42
47
  generateId: () => string;
43
48
  fetch?: FetchFunction;
44
49
  };
45
50
 
46
- export class XaiResponsesLanguageModel implements LanguageModelV3 {
47
- readonly specificationVersion = 'v3';
51
+ export class XaiResponsesLanguageModel implements LanguageModelV4 {
52
+ readonly specificationVersion = 'v4';
48
53
 
49
54
  readonly modelId: XaiResponsesModelId;
50
55
 
51
56
  private readonly config: XaiResponsesConfig;
52
57
 
58
+ static [WORKFLOW_SERIALIZE](model: XaiResponsesLanguageModel) {
59
+ return serializeModelOptions({
60
+ modelId: model.modelId,
61
+ config: model.config,
62
+ });
63
+ }
64
+
65
+ static [WORKFLOW_DESERIALIZE](options: {
66
+ modelId: XaiResponsesModelId;
67
+ config: XaiResponsesConfig;
68
+ }) {
69
+ return new XaiResponsesLanguageModel(options.modelId, options.config);
70
+ }
71
+
53
72
  constructor(modelId: XaiResponsesModelId, config: XaiResponsesConfig) {
54
73
  this.modelId = modelId;
55
74
  this.config = config;
@@ -74,8 +93,9 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
74
93
  providerOptions,
75
94
  tools,
76
95
  toolChoice,
77
- }: LanguageModelV3CallOptions) {
78
- const warnings: SharedV3Warning[] = [];
96
+ reasoning,
97
+ }: LanguageModelV4CallOptions) {
98
+ const warnings: SharedV4Warning[] = [];
79
99
 
80
100
  const options =
81
101
  (await parseProviderOptions({
@@ -110,7 +130,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
110
130
 
111
131
  const { input, inputWarnings } = await convertToXaiResponsesInput({
112
132
  prompt,
113
- store: true,
133
+ store: options.store ?? true,
114
134
  });
115
135
  warnings.push(...inputWarnings);
116
136
 
@@ -139,6 +159,24 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
139
159
  }
140
160
  }
141
161
 
162
+ const resolvedReasoningEffort =
163
+ options.reasoningEffort ??
164
+ (isCustomReasoning(reasoning)
165
+ ? reasoning === 'none'
166
+ ? undefined
167
+ : mapReasoningToProviderEffort({
168
+ reasoning,
169
+ effortMap: {
170
+ minimal: 'low',
171
+ low: 'low',
172
+ medium: 'medium',
173
+ high: 'high',
174
+ xhigh: 'high',
175
+ },
176
+ warnings,
177
+ })
178
+ : undefined);
179
+
142
180
  const baseArgs: Record<string, unknown> = {
143
181
  model: this.modelId,
144
182
  input,
@@ -165,8 +203,16 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
165
203
  : { type: 'json_object' },
166
204
  },
167
205
  }),
168
- ...(options.reasoningEffort != null && {
169
- reasoning: { effort: options.reasoningEffort },
206
+ ...((resolvedReasoningEffort != null ||
207
+ options.reasoningSummary != null) && {
208
+ reasoning: {
209
+ ...(resolvedReasoningEffort != null && {
210
+ effort: resolvedReasoningEffort,
211
+ }),
212
+ ...(options.reasoningSummary != null && {
213
+ summary: options.reasoningSummary,
214
+ }),
215
+ },
170
216
  }),
171
217
  ...(options.store === false && {
172
218
  store: options.store,
@@ -199,8 +245,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
199
245
  }
200
246
 
201
247
  async doGenerate(
202
- options: LanguageModelV3CallOptions,
203
- ): Promise<LanguageModelV3GenerateResult> {
248
+ options: LanguageModelV4CallOptions,
249
+ ): Promise<LanguageModelV4GenerateResult> {
204
250
  const {
205
251
  args: body,
206
252
  warnings,
@@ -217,7 +263,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
217
263
  rawValue: rawResponse,
218
264
  } = await postJsonToApi({
219
265
  url: `${this.config.baseURL ?? 'https://api.x.ai/v1'}/responses`,
220
- headers: combineHeaders(this.config.headers(), options.headers),
266
+ headers: combineHeaders(this.config.headers?.(), options.headers),
221
267
  body,
222
268
  failedResponseHandler: xaiFailedResponseHandler,
223
269
  successfulResponseHandler: createJsonResponseHandler(
@@ -227,7 +273,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
227
273
  fetch: this.config.fetch,
228
274
  });
229
275
 
230
- const content: Array<LanguageModelV3Content> = [];
276
+ const content: Array<LanguageModelV4Content> = [];
277
+ let hasFunctionCall = false;
231
278
 
232
279
  const webSearchSubTools = [
233
280
  'web_search',
@@ -350,6 +397,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
350
397
  }
351
398
 
352
399
  case 'function_call': {
400
+ hasFunctionCall = true;
353
401
  content.push({
354
402
  type: 'tool-call',
355
403
  toolCallId: part.call_id,
@@ -360,16 +408,22 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
360
408
  }
361
409
 
362
410
  case 'reasoning': {
363
- const summaryTexts = part.summary
364
- .map(s => s.text)
365
- .filter(text => text && text.length > 0);
366
-
367
- if (summaryTexts.length > 0) {
368
- const reasoningText = summaryTexts.join('');
369
- if (part.encrypted_content || part.id) {
370
- content.push({
371
- type: 'reasoning',
372
- text: reasoningText,
411
+ const texts =
412
+ part.summary.length > 0
413
+ ? part.summary.map(s => s.text)
414
+ : (part.content ?? []).map(c => c.text);
415
+
416
+ const reasoningText = texts
417
+ .filter(text => text && text.length > 0)
418
+ .join('');
419
+
420
+ // condition changed here since encrypted content can now come with empty reasoning text
421
+ if (reasoningText || part.encrypted_content) {
422
+ const hasMetadata = part.encrypted_content || part.id;
423
+ content.push({
424
+ type: 'reasoning',
425
+ text: reasoningText,
426
+ ...(hasMetadata && {
373
427
  providerMetadata: {
374
428
  xai: {
375
429
  ...(part.encrypted_content && {
@@ -378,13 +432,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
378
432
  ...(part.id && { itemId: part.id }),
379
433
  },
380
434
  },
381
- });
382
- } else {
383
- content.push({
384
- type: 'reasoning',
385
- text: reasoningText,
386
- });
387
- }
435
+ }),
436
+ });
388
437
  }
389
438
  break;
390
439
  }
@@ -398,7 +447,9 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
398
447
  return {
399
448
  content,
400
449
  finishReason: {
401
- unified: mapXaiResponsesFinishReason(response.status),
450
+ unified: hasFunctionCall
451
+ ? 'tool-calls'
452
+ : mapXaiResponsesFinishReason(response.status),
402
453
  raw: response.status ?? undefined,
403
454
  },
404
455
  usage: response.usage
@@ -418,8 +469,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
418
469
  }
419
470
 
420
471
  async doStream(
421
- options: LanguageModelV3CallOptions,
422
- ): Promise<LanguageModelV3StreamResult> {
472
+ options: LanguageModelV4CallOptions,
473
+ ): Promise<LanguageModelV4StreamResult> {
423
474
  const {
424
475
  args,
425
476
  warnings,
@@ -436,7 +487,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
436
487
 
437
488
  const { responseHeaders, value: response } = await postJsonToApi({
438
489
  url: `${this.config.baseURL ?? 'https://api.x.ai/v1'}/responses`,
439
- headers: combineHeaders(this.config.headers(), options.headers),
490
+ headers: combineHeaders(this.config.headers?.(), options.headers),
440
491
  body,
441
492
  failedResponseHandler: xaiFailedResponseHandler,
442
493
  successfulResponseHandler: createEventSourceResponseHandler(
@@ -446,11 +497,12 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
446
497
  fetch: this.config.fetch,
447
498
  });
448
499
 
449
- let finishReason: LanguageModelV3FinishReason = {
500
+ let finishReason: LanguageModelV4FinishReason = {
450
501
  unified: 'other',
451
502
  raw: undefined,
452
503
  };
453
- let usage: LanguageModelV3Usage | undefined = undefined;
504
+ let hasFunctionCall = false;
505
+ let usage: LanguageModelV4Usage | undefined = undefined;
454
506
  let isFirstChunk = true;
455
507
  const contentBlocks: Record<string, { type: 'text' }> = {};
456
508
  const seenToolCalls = new Set<string>();
@@ -473,7 +525,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
473
525
  stream: response.pipeThrough(
474
526
  new TransformStream<
475
527
  ParseResult<z.infer<typeof xaiResponsesChunkSchema>>,
476
- LanguageModelV3StreamPart
528
+ LanguageModelV4StreamPart
477
529
  >({
478
530
  start(controller) {
479
531
  controller.enqueue({ type: 'stream-start', warnings });
@@ -633,7 +685,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
633
685
 
634
686
  if (
635
687
  event.type === 'response.done' ||
636
- event.type === 'response.completed'
688
+ event.type === 'response.completed' ||
689
+ event.type === 'response.incomplete'
637
690
  ) {
638
691
  const response = event.response;
639
692
 
@@ -641,9 +694,22 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
641
694
  usage = convertXaiResponsesUsage(response.usage);
642
695
  }
643
696
 
644
- if (response.status) {
697
+ if (event.type === 'response.incomplete') {
698
+ const reason =
699
+ 'incomplete_details' in response
700
+ ? response.incomplete_details?.reason
701
+ : undefined;
702
+ finishReason = {
703
+ unified: reason
704
+ ? mapXaiResponsesFinishReason(reason)
705
+ : 'other',
706
+ raw: reason ?? 'incomplete',
707
+ };
708
+ } else if ('status' in response && response.status) {
645
709
  finishReason = {
646
- unified: mapXaiResponsesFinishReason(response.status),
710
+ unified: hasFunctionCall
711
+ ? 'tool-calls'
712
+ : mapXaiResponsesFinishReason(response.status),
647
713
  raw: response.status,
648
714
  };
649
715
  }
@@ -651,6 +717,25 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
651
717
  return;
652
718
  }
653
719
 
720
+ if (event.type === 'response.failed') {
721
+ const reason = event.response.incomplete_details?.reason;
722
+ finishReason = {
723
+ unified: reason ? mapXaiResponsesFinishReason(reason) : 'error',
724
+ raw: reason ?? 'error',
725
+ };
726
+
727
+ if (event.response.usage) {
728
+ usage = convertXaiResponsesUsage(event.response.usage);
729
+ }
730
+
731
+ return;
732
+ }
733
+
734
+ if (event.type === 'error') {
735
+ controller.enqueue({ type: 'error', error: event });
736
+ return;
737
+ }
738
+
654
739
  // Custom tool call input streaming - already handled by output_item events
655
740
  if (
656
741
  event.type === 'response.custom_tool_call_input.delta' ||
@@ -911,6 +996,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
911
996
  toolName: part.name,
912
997
  });
913
998
  } else if (event.type === 'response.output_item.done') {
999
+ hasFunctionCall = true;
914
1000
  ongoingToolCalls[event.output_index] = undefined;
915
1001
 
916
1002
  controller.enqueue({
@@ -6,6 +6,9 @@ export type XaiResponsesModelId =
6
6
  | 'grok-4'
7
7
  | 'grok-4-fast-non-reasoning'
8
8
  | 'grok-4-fast-reasoning'
9
+ | 'grok-4.20-0309-non-reasoning'
10
+ | 'grok-4.20-0309-reasoning'
11
+ | 'grok-4.20-multi-agent-0309'
9
12
  | (string & {});
10
13
 
11
14
  /**
@@ -17,10 +20,13 @@ export const xaiLanguageModelResponsesOptions = z.object({
17
20
  * Possible values are `low` (uses fewer reasoning tokens), `medium` and `high` (uses more reasoning tokens).
18
21
  */
19
22
  reasoningEffort: z.enum(['low', 'medium', 'high']).optional(),
23
+ reasoningSummary: z.enum(['auto', 'concise', 'detailed']).optional(),
20
24
  logprobs: z.boolean().optional(),
21
25
  topLogprobs: z.number().int().min(0).max(8).optional(),
22
26
  /**
23
27
  * Whether to store the input message(s) and model response for later retrieval.
28
+ * Must be set to `false` for teams with Zero Data Retention (ZDR) enabled,
29
+ * otherwise the API will return an error.
24
30
  * @default true
25
31
  */
26
32
  store: z.boolean().optional(),
@@ -1,6 +1,6 @@
1
1
  import {
2
- LanguageModelV3CallOptions,
3
- SharedV3Warning,
2
+ LanguageModelV4CallOptions,
3
+ SharedV4Warning,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
6
  import { validateTypes } from '@ai-sdk/provider-utils';
@@ -20,16 +20,16 @@ export async function prepareResponsesTools({
20
20
  tools,
21
21
  toolChoice,
22
22
  }: {
23
- tools: LanguageModelV3CallOptions['tools'];
24
- toolChoice?: LanguageModelV3CallOptions['toolChoice'];
23
+ tools: LanguageModelV4CallOptions['tools'];
24
+ toolChoice?: LanguageModelV4CallOptions['toolChoice'];
25
25
  }): Promise<{
26
26
  tools: Array<XaiResponsesTool> | undefined;
27
27
  toolChoice: XaiResponsesToolChoice | undefined;
28
- toolWarnings: SharedV3Warning[];
28
+ toolWarnings: SharedV4Warning[];
29
29
  }> {
30
30
  const normalizedTools = tools?.length ? tools : undefined;
31
31
 
32
- const toolWarnings: SharedV3Warning[] = [];
32
+ const toolWarnings: SharedV4Warning[] = [];
33
33
 
34
34
  if (normalizedTools == null) {
35
35
  return { tools: undefined, toolChoice: undefined, toolWarnings };
@@ -1,14 +1,14 @@
1
1
  import {
2
2
  APICallError,
3
- LanguageModelV3,
4
- LanguageModelV3CallOptions,
5
- LanguageModelV3Content,
6
- LanguageModelV3FinishReason,
7
- LanguageModelV3GenerateResult,
8
- LanguageModelV3StreamPart,
9
- LanguageModelV3StreamResult,
10
- LanguageModelV3Usage,
11
- SharedV3Warning,
3
+ LanguageModelV4,
4
+ LanguageModelV4CallOptions,
5
+ LanguageModelV4Content,
6
+ LanguageModelV4FinishReason,
7
+ LanguageModelV4GenerateResult,
8
+ LanguageModelV4StreamPart,
9
+ LanguageModelV4StreamResult,
10
+ LanguageModelV4Usage,
11
+ SharedV4Warning,
12
12
  } from '@ai-sdk/provider';
13
13
  import {
14
14
  combineHeaders,
@@ -16,10 +16,15 @@ import {
16
16
  createJsonResponseHandler,
17
17
  extractResponseHeaders,
18
18
  FetchFunction,
19
+ isCustomReasoning,
20
+ mapReasoningToProviderEffort,
19
21
  parseProviderOptions,
20
22
  ParseResult,
21
23
  postJsonToApi,
22
24
  safeParseJSON,
25
+ serializeModelOptions,
26
+ WORKFLOW_SERIALIZE,
27
+ WORKFLOW_DESERIALIZE,
23
28
  } from '@ai-sdk/provider-utils';
24
29
  import { z } from 'zod/v4';
25
30
  import { convertToXaiChatMessages } from './convert-to-xai-chat-messages';
@@ -36,18 +41,32 @@ import { prepareTools } from './xai-prepare-tools';
36
41
  type XaiChatConfig = {
37
42
  provider: string;
38
43
  baseURL: string | undefined;
39
- headers: () => Record<string, string | undefined>;
44
+ headers?: () => Record<string, string | undefined>;
40
45
  generateId: () => string;
41
46
  fetch?: FetchFunction;
42
47
  };
43
48
 
44
- export class XaiChatLanguageModel implements LanguageModelV3 {
45
- readonly specificationVersion = 'v3';
49
+ export class XaiChatLanguageModel implements LanguageModelV4 {
50
+ readonly specificationVersion = 'v4';
46
51
 
47
52
  readonly modelId: XaiChatModelId;
48
53
 
49
54
  private readonly config: XaiChatConfig;
50
55
 
56
+ static [WORKFLOW_SERIALIZE](model: XaiChatLanguageModel) {
57
+ return serializeModelOptions({
58
+ modelId: model.modelId,
59
+ config: model.config,
60
+ });
61
+ }
62
+
63
+ static [WORKFLOW_DESERIALIZE](options: {
64
+ modelId: XaiChatModelId;
65
+ config: XaiChatConfig;
66
+ }) {
67
+ return new XaiChatLanguageModel(options.modelId, options.config);
68
+ }
69
+
51
70
  constructor(modelId: XaiChatModelId, config: XaiChatConfig) {
52
71
  this.modelId = modelId;
53
72
  this.config = config;
@@ -71,12 +90,13 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
71
90
  presencePenalty,
72
91
  stopSequences,
73
92
  seed,
93
+ reasoning,
74
94
  responseFormat,
75
95
  providerOptions,
76
96
  tools,
77
97
  toolChoice,
78
- }: LanguageModelV3CallOptions) {
79
- const warnings: SharedV3Warning[] = [];
98
+ }: LanguageModelV4CallOptions) {
99
+ const warnings: SharedV4Warning[] = [];
80
100
 
81
101
  // parse xai-specific provider options
82
102
  const options =
@@ -133,7 +153,23 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
133
153
  temperature,
134
154
  top_p: topP,
135
155
  seed,
136
- reasoning_effort: options.reasoningEffort,
156
+ reasoning_effort:
157
+ options.reasoningEffort ??
158
+ (isCustomReasoning(reasoning)
159
+ ? reasoning === 'none'
160
+ ? undefined
161
+ : mapReasoningToProviderEffort({
162
+ reasoning,
163
+ effortMap: {
164
+ minimal: 'low',
165
+ low: 'low',
166
+ medium: 'low',
167
+ high: 'high',
168
+ xhigh: 'high',
169
+ },
170
+ warnings,
171
+ })
172
+ : undefined),
137
173
 
138
174
  // parallel function calling
139
175
  parallel_function_calling: options.parallel_function_calling,
@@ -202,8 +238,8 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
202
238
  }
203
239
 
204
240
  async doGenerate(
205
- options: LanguageModelV3CallOptions,
206
- ): Promise<LanguageModelV3GenerateResult> {
241
+ options: LanguageModelV4CallOptions,
242
+ ): Promise<LanguageModelV4GenerateResult> {
207
243
  const { args: body, warnings } = await this.getArgs(options);
208
244
 
209
245
  const url = `${this.config.baseURL ?? 'https://api.x.ai/v1'}/chat/completions`;
@@ -214,7 +250,7 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
214
250
  rawValue: rawResponse,
215
251
  } = await postJsonToApi({
216
252
  url,
217
- headers: combineHeaders(this.config.headers(), options.headers),
253
+ headers: combineHeaders(this.config.headers?.(), options.headers),
218
254
  body,
219
255
  failedResponseHandler: xaiFailedResponseHandler,
220
256
  successfulResponseHandler: createJsonResponseHandler(
@@ -237,7 +273,7 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
237
273
  }
238
274
 
239
275
  const choice = response.choices![0];
240
- const content: Array<LanguageModelV3Content> = [];
276
+ const content: Array<LanguageModelV4Content> = [];
241
277
 
242
278
  // extract text content
243
279
  if (choice.message.content != null && choice.message.content.length > 0) {
@@ -312,8 +348,8 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
312
348
  }
313
349
 
314
350
  async doStream(
315
- options: LanguageModelV3CallOptions,
316
- ): Promise<LanguageModelV3StreamResult> {
351
+ options: LanguageModelV4CallOptions,
352
+ ): Promise<LanguageModelV4StreamResult> {
317
353
  const { args, warnings } = await this.getArgs(options);
318
354
  const body = {
319
355
  ...args,
@@ -327,7 +363,7 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
327
363
 
328
364
  const { responseHeaders, value: response } = await postJsonToApi({
329
365
  url,
330
- headers: combineHeaders(this.config.headers(), options.headers),
366
+ headers: combineHeaders(this.config.headers?.(), options.headers),
331
367
  body,
332
368
  failedResponseHandler: xaiFailedResponseHandler,
333
369
  successfulResponseHandler: async ({ response }) => {
@@ -375,11 +411,11 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
375
411
  fetch: this.config.fetch,
376
412
  });
377
413
 
378
- let finishReason: LanguageModelV3FinishReason = {
414
+ let finishReason: LanguageModelV4FinishReason = {
379
415
  unified: 'other',
380
416
  raw: undefined,
381
417
  };
382
- let usage: LanguageModelV3Usage | undefined = undefined;
418
+ let usage: LanguageModelV4Usage | undefined = undefined;
383
419
  let isFirstChunk = true;
384
420
  const contentBlocks: Record<
385
421
  string,
@@ -394,7 +430,7 @@ export class XaiChatLanguageModel implements LanguageModelV3 {
394
430
  stream: response.pipeThrough(
395
431
  new TransformStream<
396
432
  ParseResult<z.infer<typeof xaiChatChunkSchema>>,
397
- LanguageModelV3StreamPart
433
+ LanguageModelV4StreamPart
398
434
  >({
399
435
  start(controller) {
400
436
  controller.enqueue({ type: 'stream-start', warnings });
@@ -6,6 +6,9 @@ export type XaiChatModelId =
6
6
  | 'grok-4-1-fast-non-reasoning'
7
7
  | 'grok-4-fast-non-reasoning'
8
8
  | 'grok-4-fast-reasoning'
9
+ | 'grok-4.20-0309-non-reasoning'
10
+ | 'grok-4.20-0309-reasoning'
11
+ | 'grok-4.20-multi-agent-0309'
9
12
  | 'grok-code-fast-1'
10
13
  | 'grok-4'
11
14
  | 'grok-4-0709'
@@ -14,12 +17,6 @@ export type XaiChatModelId =
14
17
  | 'grok-3-latest'
15
18
  | 'grok-3-mini'
16
19
  | 'grok-3-mini-latest'
17
- | 'grok-2-vision-1212'
18
- | 'grok-2-vision'
19
- | 'grok-2-vision-latest'
20
- | 'grok-2-image-1212'
21
- | 'grok-2-image'
22
- | 'grok-2-image-latest'
23
20
  | (string & {});
24
21
 
25
22
  // search source schemas
@@ -18,7 +18,8 @@ export interface XaiUserMessage {
18
18
 
19
19
  export type XaiUserMessageContent =
20
20
  | { type: 'text'; text: string }
21
- | { type: 'image_url'; image_url: { url: string } };
21
+ | { type: 'image_url'; image_url: { url: string } }
22
+ | { type: 'file'; file: { file_id: string } };
22
23
 
23
24
  export interface XaiAssistantMessage {
24
25
  role: 'assistant';