@ai-sdk/xai 4.0.0-beta.3 → 4.0.0-beta.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/xai",
3
- "version": "4.0.0-beta.3",
3
+ "version": "4.0.0-beta.31",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -29,9 +29,9 @@
29
29
  }
30
30
  },
31
31
  "dependencies": {
32
- "@ai-sdk/openai-compatible": "3.0.0-beta.2",
33
- "@ai-sdk/provider": "4.0.0-beta.0",
34
- "@ai-sdk/provider-utils": "5.0.0-beta.1"
32
+ "@ai-sdk/openai-compatible": "3.0.0-beta.20",
33
+ "@ai-sdk/provider": "4.0.0-beta.9",
34
+ "@ai-sdk/provider-utils": "5.0.0-beta.15"
35
35
  },
36
36
  "devDependencies": {
37
37
  "@types/node": "20.17.24",
@@ -65,9 +65,7 @@
65
65
  "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
66
66
  "build:watch": "pnpm clean && tsup --watch",
67
67
  "clean": "del-cli dist docs *.tsbuildinfo",
68
- "lint": "eslint \"./**/*.ts*\"",
69
68
  "type-check": "tsc --build",
70
- "prettier-check": "prettier --check \"./**/*.ts*\"",
71
69
  "test": "pnpm test:node && pnpm test:edge",
72
70
  "test:update": "pnpm test:node -u",
73
71
  "test:watch": "vitest --config vitest.node.config.js",
@@ -1,17 +1,21 @@
1
1
  import {
2
- SharedV3Warning,
3
- LanguageModelV3Prompt,
2
+ SharedV4Warning,
3
+ LanguageModelV4Prompt,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
- import { convertToBase64 } from '@ai-sdk/provider-utils';
6
+ import {
7
+ convertToBase64,
8
+ isProviderReference,
9
+ resolveProviderReference,
10
+ } from '@ai-sdk/provider-utils';
7
11
  import { XaiChatPrompt } from './xai-chat-prompt';
8
12
 
9
- export function convertToXaiChatMessages(prompt: LanguageModelV3Prompt): {
13
+ export function convertToXaiChatMessages(prompt: LanguageModelV4Prompt): {
10
14
  messages: XaiChatPrompt;
11
- warnings: Array<SharedV3Warning>;
15
+ warnings: Array<SharedV4Warning>;
12
16
  } {
13
17
  const messages: XaiChatPrompt = [];
14
- const warnings: Array<SharedV3Warning> = [];
18
+ const warnings: Array<SharedV4Warning> = [];
15
19
 
16
20
  for (const { role, content } of prompt) {
17
21
  switch (role) {
@@ -34,6 +38,18 @@ export function convertToXaiChatMessages(prompt: LanguageModelV3Prompt): {
34
38
  return { type: 'text', text: part.text };
35
39
  }
36
40
  case 'file': {
41
+ if (isProviderReference(part.data)) {
42
+ return {
43
+ type: 'file',
44
+ file: {
45
+ file_id: resolveProviderReference({
46
+ reference: part.data,
47
+ provider: 'xai',
48
+ }),
49
+ },
50
+ };
51
+ }
52
+
37
53
  if (part.mediaType.startsWith('image/')) {
38
54
  const mediaType =
39
55
  part.mediaType === 'image/*'
@@ -1,7 +1,7 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
  import { XaiChatUsage } from './xai-chat-language-model';
3
3
 
4
- export function convertXaiChatUsage(usage: XaiChatUsage): LanguageModelV3Usage {
4
+ export function convertXaiChatUsage(usage: XaiChatUsage): LanguageModelV4Usage {
5
5
  const cacheReadTokens = usage.prompt_tokens_details?.cached_tokens ?? 0;
6
6
  const reasoningTokens =
7
7
  usage.completion_tokens_details?.reasoning_tokens ?? 0;
@@ -0,0 +1,16 @@
1
+ import { lazySchema, zodSchema } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ export const xaiFilesResponseSchema = lazySchema(() =>
5
+ zodSchema(
6
+ z.object({
7
+ id: z.string(),
8
+ object: z.string().nullish(),
9
+ bytes: z.number().nullish(),
10
+ created_at: z.number().nullish(),
11
+ filename: z.string().nullish(),
12
+ purpose: z.string().nullish(),
13
+ status: z.string().nullish(),
14
+ }),
15
+ ),
16
+ );
@@ -0,0 +1,15 @@
1
+ import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ export const xaiFilesOptionsSchema = lazySchema(() =>
5
+ zodSchema(
6
+ z
7
+ .object({
8
+ teamId: z.string().optional(),
9
+ filePath: z.string().optional(),
10
+ })
11
+ .passthrough(),
12
+ ),
13
+ );
14
+
15
+ export type XaiFilesOptions = InferSchema<typeof xaiFilesOptionsSchema>;
@@ -0,0 +1,93 @@
1
+ import {
2
+ FilesV4,
3
+ FilesV4UploadFileCallOptions,
4
+ FilesV4UploadFileResult,
5
+ } from '@ai-sdk/provider';
6
+ import {
7
+ combineHeaders,
8
+ convertBase64ToUint8Array,
9
+ createJsonResponseHandler,
10
+ FetchFunction,
11
+ parseProviderOptions,
12
+ postFormDataToApi,
13
+ } from '@ai-sdk/provider-utils';
14
+ import { xaiFailedResponseHandler } from '../xai-error';
15
+ import { xaiFilesResponseSchema } from './xai-files-api';
16
+ import { xaiFilesOptionsSchema, XaiFilesOptions } from './xai-files-options';
17
+
18
+ interface XaiFilesConfig {
19
+ provider: string;
20
+ baseURL: string | undefined;
21
+ headers: () => Record<string, string | undefined>;
22
+ fetch?: FetchFunction;
23
+ }
24
+
25
+ export class XaiFiles implements FilesV4 {
26
+ readonly specificationVersion = 'v4';
27
+
28
+ get provider(): string {
29
+ return this.config.provider;
30
+ }
31
+
32
+ constructor(private readonly config: XaiFilesConfig) {}
33
+
34
+ async uploadFile({
35
+ data,
36
+ mediaType,
37
+ filename,
38
+ providerOptions,
39
+ }: FilesV4UploadFileCallOptions): Promise<FilesV4UploadFileResult> {
40
+ const xaiOptions = (await parseProviderOptions({
41
+ provider: 'xai',
42
+ providerOptions,
43
+ schema: xaiFilesOptionsSchema,
44
+ })) as XaiFilesOptions | undefined;
45
+
46
+ const fileBytes =
47
+ data instanceof Uint8Array ? data : convertBase64ToUint8Array(data);
48
+
49
+ const blob = new Blob([fileBytes], {
50
+ type: mediaType,
51
+ });
52
+
53
+ const formData = new FormData();
54
+ if (filename != null) {
55
+ formData.append('file', blob, filename);
56
+ } else {
57
+ formData.append('file', blob);
58
+ }
59
+
60
+ if (xaiOptions?.teamId != null) {
61
+ formData.append('team_id', xaiOptions.teamId);
62
+ }
63
+
64
+ const { value: response } = await postFormDataToApi({
65
+ url: `${this.config.baseURL}/files`,
66
+ headers: combineHeaders(this.config.headers()),
67
+ formData,
68
+ failedResponseHandler: xaiFailedResponseHandler,
69
+ successfulResponseHandler: createJsonResponseHandler(
70
+ xaiFilesResponseSchema,
71
+ ),
72
+ fetch: this.config.fetch,
73
+ });
74
+
75
+ return {
76
+ warnings: [],
77
+ providerReference: { xai: response.id },
78
+ ...((response.filename ?? filename)
79
+ ? { filename: response.filename ?? filename }
80
+ : {}),
81
+ ...(mediaType != null ? { mediaType } : {}),
82
+ providerMetadata: {
83
+ xai: {
84
+ ...(response.filename != null ? { filename: response.filename } : {}),
85
+ ...(response.bytes != null ? { bytes: response.bytes } : {}),
86
+ ...(response.created_at != null
87
+ ? { createdAt: response.created_at }
88
+ : {}),
89
+ },
90
+ },
91
+ };
92
+ }
93
+ }
package/src/index.ts CHANGED
@@ -20,6 +20,7 @@ export type {
20
20
  /** @deprecated Use `XaiVideoModelOptions` instead. */
21
21
  XaiVideoModelOptions as XaiVideoProviderOptions,
22
22
  } from './xai-video-options';
23
+ export type { XaiFilesOptions } from './files/xai-files-options';
23
24
  export { createXai, xai } from './xai-provider';
24
25
  export type { XaiProvider, XaiProviderSettings } from './xai-provider';
25
26
  export {
@@ -1,8 +1,8 @@
1
- import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
1
+ import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapXaiFinishReason(
4
4
  finishReason: string | null | undefined,
5
- ): LanguageModelV3FinishReason['unified'] {
5
+ ): LanguageModelV4FinishReason['unified'] {
6
6
  switch (finishReason) {
7
7
  case 'stop':
8
8
  return 'stop';
@@ -1,9 +1,13 @@
1
1
  import {
2
- SharedV3Warning,
3
- LanguageModelV3Message,
2
+ SharedV4Warning,
3
+ LanguageModelV4Message,
4
4
  UnsupportedFunctionalityError,
5
5
  } from '@ai-sdk/provider';
6
- import { convertToBase64 } from '@ai-sdk/provider-utils';
6
+ import {
7
+ convertToBase64,
8
+ isProviderReference,
9
+ resolveProviderReference,
10
+ } from '@ai-sdk/provider-utils';
7
11
  import {
8
12
  XaiResponsesInput,
9
13
  XaiResponsesUserMessageContentPart,
@@ -12,14 +16,14 @@ import {
12
16
  export async function convertToXaiResponsesInput({
13
17
  prompt,
14
18
  }: {
15
- prompt: LanguageModelV3Message[];
19
+ prompt: LanguageModelV4Message[];
16
20
  store?: boolean;
17
21
  }): Promise<{
18
22
  input: XaiResponsesInput;
19
- inputWarnings: SharedV3Warning[];
23
+ inputWarnings: SharedV4Warning[];
20
24
  }> {
21
25
  const input: XaiResponsesInput = [];
22
- const inputWarnings: SharedV3Warning[] = [];
26
+ const inputWarnings: SharedV4Warning[] = [];
23
27
 
24
28
  for (const message of prompt) {
25
29
  switch (message.role) {
@@ -42,7 +46,15 @@ export async function convertToXaiResponsesInput({
42
46
  }
43
47
 
44
48
  case 'file': {
45
- if (block.mediaType.startsWith('image/')) {
49
+ if (isProviderReference(block.data)) {
50
+ contentParts.push({
51
+ type: 'input_file',
52
+ file_id: resolveProviderReference({
53
+ reference: block.data,
54
+ provider: 'xai',
55
+ }),
56
+ });
57
+ } else if (block.mediaType.startsWith('image/')) {
46
58
  const mediaType =
47
59
  block.mediaType === 'image/*'
48
60
  ? 'image/jpeg'
@@ -124,6 +136,8 @@ export async function convertToXaiResponsesInput({
124
136
  }
125
137
 
126
138
  case 'reasoning':
139
+ case 'reasoning-file':
140
+ case 'custom':
127
141
  case 'file': {
128
142
  inputWarnings.push({
129
143
  type: 'other',
@@ -1,9 +1,9 @@
1
- import { LanguageModelV3Usage } from '@ai-sdk/provider';
1
+ import { LanguageModelV4Usage } from '@ai-sdk/provider';
2
2
  import { XaiResponsesUsage } from './xai-responses-api';
3
3
 
4
4
  export function convertXaiResponsesUsage(
5
5
  usage: XaiResponsesUsage,
6
- ): LanguageModelV3Usage {
6
+ ): LanguageModelV4Usage {
7
7
  const cacheReadTokens = usage.input_tokens_details?.cached_tokens ?? 0;
8
8
  const reasoningTokens = usage.output_tokens_details?.reasoning_tokens ?? 0;
9
9
 
@@ -1,13 +1,14 @@
1
- import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
1
+ import { LanguageModelV4FinishReason } from '@ai-sdk/provider';
2
2
 
3
3
  export function mapXaiResponsesFinishReason(
4
4
  finishReason: string | null | undefined,
5
- ): LanguageModelV3FinishReason['unified'] {
5
+ ): LanguageModelV4FinishReason['unified'] {
6
6
  switch (finishReason) {
7
7
  case 'stop':
8
8
  case 'completed':
9
9
  return 'stop';
10
10
  case 'length':
11
+ case 'max_output_tokens':
11
12
  return 'length';
12
13
  case 'tool_calls':
13
14
  case 'function_call':
@@ -26,7 +26,8 @@ export type XaiResponsesSystemMessage = {
26
26
 
27
27
  export type XaiResponsesUserMessageContentPart =
28
28
  | { type: 'input_text'; text: string }
29
- | { type: 'input_image'; image_url: string };
29
+ | { type: 'input_image'; image_url: string }
30
+ | { type: 'input_file'; file_id: string };
30
31
 
31
32
  export type XaiResponsesUserMessage = {
32
33
  role: 'user';
@@ -223,6 +224,9 @@ const outputItemSchema = z.discriminatedUnion('type', [
223
224
  type: z.literal('reasoning'),
224
225
  id: z.string(),
225
226
  summary: z.array(reasoningSummaryPartSchema),
227
+ content: z
228
+ .array(z.object({ type: z.string(), text: z.string() }))
229
+ .nullish(),
226
230
  status: z.string(),
227
231
  encrypted_content: z.string().nullish(),
228
232
  }),
@@ -518,6 +522,32 @@ export const xaiResponsesChunkSchema = z.union([
518
522
  output_index: z.number(),
519
523
  output: z.string().optional(),
520
524
  }),
525
+ z.object({
526
+ type: z.literal('response.incomplete'),
527
+ response: z.object({
528
+ incomplete_details: z.object({ reason: z.string() }).nullish(),
529
+ usage: xaiResponsesUsageSchema.nullish(),
530
+ }),
531
+ }),
532
+ z.object({
533
+ type: z.literal('response.failed'),
534
+ response: z.object({
535
+ error: z
536
+ .object({
537
+ code: z.string().nullish(),
538
+ message: z.string(),
539
+ })
540
+ .nullish(),
541
+ incomplete_details: z.object({ reason: z.string() }).nullish(),
542
+ usage: xaiResponsesUsageSchema.nullish(),
543
+ }),
544
+ }),
545
+ z.object({
546
+ type: z.literal('error'),
547
+ code: z.string().nullish(),
548
+ message: z.string(),
549
+ param: z.string().nullish(),
550
+ }),
521
551
  z.object({
522
552
  type: z.literal('response.done'),
523
553
  response: xaiResponsesResponseSchema,
@@ -1,19 +1,21 @@
1
1
  import {
2
- LanguageModelV3,
3
- LanguageModelV3CallOptions,
4
- LanguageModelV3Content,
5
- LanguageModelV3FinishReason,
6
- LanguageModelV3GenerateResult,
7
- LanguageModelV3StreamPart,
8
- LanguageModelV3StreamResult,
9
- LanguageModelV3Usage,
10
- SharedV3Warning,
2
+ LanguageModelV4,
3
+ LanguageModelV4CallOptions,
4
+ LanguageModelV4Content,
5
+ LanguageModelV4FinishReason,
6
+ LanguageModelV4GenerateResult,
7
+ LanguageModelV4StreamPart,
8
+ LanguageModelV4StreamResult,
9
+ LanguageModelV4Usage,
10
+ SharedV4Warning,
11
11
  } from '@ai-sdk/provider';
12
12
  import {
13
13
  combineHeaders,
14
14
  createEventSourceResponseHandler,
15
15
  createJsonResponseHandler,
16
16
  FetchFunction,
17
+ isCustomReasoning,
18
+ mapReasoningToProviderEffort,
17
19
  parseProviderOptions,
18
20
  ParseResult,
19
21
  postJsonToApi,
@@ -43,8 +45,8 @@ type XaiResponsesConfig = {
43
45
  fetch?: FetchFunction;
44
46
  };
45
47
 
46
- export class XaiResponsesLanguageModel implements LanguageModelV3 {
47
- readonly specificationVersion = 'v3';
48
+ export class XaiResponsesLanguageModel implements LanguageModelV4 {
49
+ readonly specificationVersion = 'v4';
48
50
 
49
51
  readonly modelId: XaiResponsesModelId;
50
52
 
@@ -74,8 +76,9 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
74
76
  providerOptions,
75
77
  tools,
76
78
  toolChoice,
77
- }: LanguageModelV3CallOptions) {
78
- const warnings: SharedV3Warning[] = [];
79
+ reasoning,
80
+ }: LanguageModelV4CallOptions) {
81
+ const warnings: SharedV4Warning[] = [];
79
82
 
80
83
  const options =
81
84
  (await parseProviderOptions({
@@ -139,6 +142,24 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
139
142
  }
140
143
  }
141
144
 
145
+ const resolvedReasoningEffort =
146
+ options.reasoningEffort ??
147
+ (isCustomReasoning(reasoning)
148
+ ? reasoning === 'none'
149
+ ? undefined
150
+ : mapReasoningToProviderEffort({
151
+ reasoning,
152
+ effortMap: {
153
+ minimal: 'low',
154
+ low: 'low',
155
+ medium: 'medium',
156
+ high: 'high',
157
+ xhigh: 'high',
158
+ },
159
+ warnings,
160
+ })
161
+ : undefined);
162
+
142
163
  const baseArgs: Record<string, unknown> = {
143
164
  model: this.modelId,
144
165
  input,
@@ -165,8 +186,16 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
165
186
  : { type: 'json_object' },
166
187
  },
167
188
  }),
168
- ...(options.reasoningEffort != null && {
169
- reasoning: { effort: options.reasoningEffort },
189
+ ...((resolvedReasoningEffort != null ||
190
+ options.reasoningSummary != null) && {
191
+ reasoning: {
192
+ ...(resolvedReasoningEffort != null && {
193
+ effort: resolvedReasoningEffort,
194
+ }),
195
+ ...(options.reasoningSummary != null && {
196
+ summary: options.reasoningSummary,
197
+ }),
198
+ },
170
199
  }),
171
200
  ...(options.store === false && {
172
201
  store: options.store,
@@ -199,8 +228,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
199
228
  }
200
229
 
201
230
  async doGenerate(
202
- options: LanguageModelV3CallOptions,
203
- ): Promise<LanguageModelV3GenerateResult> {
231
+ options: LanguageModelV4CallOptions,
232
+ ): Promise<LanguageModelV4GenerateResult> {
204
233
  const {
205
234
  args: body,
206
235
  warnings,
@@ -227,7 +256,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
227
256
  fetch: this.config.fetch,
228
257
  });
229
258
 
230
- const content: Array<LanguageModelV3Content> = [];
259
+ const content: Array<LanguageModelV4Content> = [];
260
+ let hasFunctionCall = false;
231
261
 
232
262
  const webSearchSubTools = [
233
263
  'web_search',
@@ -350,6 +380,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
350
380
  }
351
381
 
352
382
  case 'function_call': {
383
+ hasFunctionCall = true;
353
384
  content.push({
354
385
  type: 'tool-call',
355
386
  toolCallId: part.call_id,
@@ -360,12 +391,16 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
360
391
  }
361
392
 
362
393
  case 'reasoning': {
363
- const summaryTexts = part.summary
364
- .map(s => s.text)
365
- .filter(text => text && text.length > 0);
394
+ const texts =
395
+ part.summary.length > 0
396
+ ? part.summary.map(s => s.text)
397
+ : (part.content ?? []).map(c => c.text);
398
+
399
+ const reasoningText = texts
400
+ .filter(text => text && text.length > 0)
401
+ .join('');
366
402
 
367
- if (summaryTexts.length > 0) {
368
- const reasoningText = summaryTexts.join('');
403
+ if (reasoningText) {
369
404
  if (part.encrypted_content || part.id) {
370
405
  content.push({
371
406
  type: 'reasoning',
@@ -398,7 +433,9 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
398
433
  return {
399
434
  content,
400
435
  finishReason: {
401
- unified: mapXaiResponsesFinishReason(response.status),
436
+ unified: hasFunctionCall
437
+ ? 'tool-calls'
438
+ : mapXaiResponsesFinishReason(response.status),
402
439
  raw: response.status ?? undefined,
403
440
  },
404
441
  usage: response.usage
@@ -418,8 +455,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
418
455
  }
419
456
 
420
457
  async doStream(
421
- options: LanguageModelV3CallOptions,
422
- ): Promise<LanguageModelV3StreamResult> {
458
+ options: LanguageModelV4CallOptions,
459
+ ): Promise<LanguageModelV4StreamResult> {
423
460
  const {
424
461
  args,
425
462
  warnings,
@@ -446,11 +483,12 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
446
483
  fetch: this.config.fetch,
447
484
  });
448
485
 
449
- let finishReason: LanguageModelV3FinishReason = {
486
+ let finishReason: LanguageModelV4FinishReason = {
450
487
  unified: 'other',
451
488
  raw: undefined,
452
489
  };
453
- let usage: LanguageModelV3Usage | undefined = undefined;
490
+ let hasFunctionCall = false;
491
+ let usage: LanguageModelV4Usage | undefined = undefined;
454
492
  let isFirstChunk = true;
455
493
  const contentBlocks: Record<string, { type: 'text' }> = {};
456
494
  const seenToolCalls = new Set<string>();
@@ -473,7 +511,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
473
511
  stream: response.pipeThrough(
474
512
  new TransformStream<
475
513
  ParseResult<z.infer<typeof xaiResponsesChunkSchema>>,
476
- LanguageModelV3StreamPart
514
+ LanguageModelV4StreamPart
477
515
  >({
478
516
  start(controller) {
479
517
  controller.enqueue({ type: 'stream-start', warnings });
@@ -633,7 +671,8 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
633
671
 
634
672
  if (
635
673
  event.type === 'response.done' ||
636
- event.type === 'response.completed'
674
+ event.type === 'response.completed' ||
675
+ event.type === 'response.incomplete'
637
676
  ) {
638
677
  const response = event.response;
639
678
 
@@ -641,9 +680,22 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
641
680
  usage = convertXaiResponsesUsage(response.usage);
642
681
  }
643
682
 
644
- if (response.status) {
683
+ if (event.type === 'response.incomplete') {
684
+ const reason =
685
+ 'incomplete_details' in response
686
+ ? response.incomplete_details?.reason
687
+ : undefined;
645
688
  finishReason = {
646
- unified: mapXaiResponsesFinishReason(response.status),
689
+ unified: reason
690
+ ? mapXaiResponsesFinishReason(reason)
691
+ : 'other',
692
+ raw: reason ?? 'incomplete',
693
+ };
694
+ } else if ('status' in response && response.status) {
695
+ finishReason = {
696
+ unified: hasFunctionCall
697
+ ? 'tool-calls'
698
+ : mapXaiResponsesFinishReason(response.status),
647
699
  raw: response.status,
648
700
  };
649
701
  }
@@ -651,6 +703,25 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
651
703
  return;
652
704
  }
653
705
 
706
+ if (event.type === 'response.failed') {
707
+ const reason = event.response.incomplete_details?.reason;
708
+ finishReason = {
709
+ unified: reason ? mapXaiResponsesFinishReason(reason) : 'error',
710
+ raw: reason ?? 'error',
711
+ };
712
+
713
+ if (event.response.usage) {
714
+ usage = convertXaiResponsesUsage(event.response.usage);
715
+ }
716
+
717
+ return;
718
+ }
719
+
720
+ if (event.type === 'error') {
721
+ controller.enqueue({ type: 'error', error: event });
722
+ return;
723
+ }
724
+
654
725
  // Custom tool call input streaming - already handled by output_item events
655
726
  if (
656
727
  event.type === 'response.custom_tool_call_input.delta' ||
@@ -911,6 +982,7 @@ export class XaiResponsesLanguageModel implements LanguageModelV3 {
911
982
  toolName: part.name,
912
983
  });
913
984
  } else if (event.type === 'response.output_item.done') {
985
+ hasFunctionCall = true;
914
986
  ongoingToolCalls[event.output_index] = undefined;
915
987
 
916
988
  controller.enqueue({
@@ -6,6 +6,9 @@ export type XaiResponsesModelId =
6
6
  | 'grok-4'
7
7
  | 'grok-4-fast-non-reasoning'
8
8
  | 'grok-4-fast-reasoning'
9
+ | 'grok-4.20-0309-non-reasoning'
10
+ | 'grok-4.20-0309-reasoning'
11
+ | 'grok-4.20-multi-agent-0309'
9
12
  | (string & {});
10
13
 
11
14
  /**
@@ -17,6 +20,7 @@ export const xaiLanguageModelResponsesOptions = z.object({
17
20
  * Possible values are `low` (uses fewer reasoning tokens), `medium` and `high` (uses more reasoning tokens).
18
21
  */
19
22
  reasoningEffort: z.enum(['low', 'medium', 'high']).optional(),
23
+ reasoningSummary: z.enum(['auto', 'concise', 'detailed']).optional(),
20
24
  logprobs: z.boolean().optional(),
21
25
  topLogprobs: z.number().int().min(0).max(8).optional(),
22
26
  /**