@ai-sdk/xai 2.0.0-canary.9 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -27,48 +27,732 @@ module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/xai-provider.ts
29
29
  var import_openai_compatible = require("@ai-sdk/openai-compatible");
30
+ var import_provider3 = require("@ai-sdk/provider");
31
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
32
+
33
+ // src/xai-chat-language-model.ts
34
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
+ var import_v43 = require("zod/v4");
36
+
37
+ // src/convert-to-xai-chat-messages.ts
30
38
  var import_provider = require("@ai-sdk/provider");
31
39
  var import_provider_utils = require("@ai-sdk/provider-utils");
40
+ function convertToXaiChatMessages(prompt) {
41
+ const messages = [];
42
+ const warnings = [];
43
+ for (const { role, content } of prompt) {
44
+ switch (role) {
45
+ case "system": {
46
+ messages.push({ role: "system", content });
47
+ break;
48
+ }
49
+ case "user": {
50
+ if (content.length === 1 && content[0].type === "text") {
51
+ messages.push({ role: "user", content: content[0].text });
52
+ break;
53
+ }
54
+ messages.push({
55
+ role: "user",
56
+ content: content.map((part) => {
57
+ switch (part.type) {
58
+ case "text": {
59
+ return { type: "text", text: part.text };
60
+ }
61
+ case "file": {
62
+ if (part.mediaType.startsWith("image/")) {
63
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
64
+ return {
65
+ type: "image_url",
66
+ image_url: {
67
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
68
+ }
69
+ };
70
+ } else {
71
+ throw new import_provider.UnsupportedFunctionalityError({
72
+ functionality: `file part media type ${part.mediaType}`
73
+ });
74
+ }
75
+ }
76
+ }
77
+ })
78
+ });
79
+ break;
80
+ }
81
+ case "assistant": {
82
+ let text = "";
83
+ const toolCalls = [];
84
+ for (const part of content) {
85
+ switch (part.type) {
86
+ case "text": {
87
+ text += part.text;
88
+ break;
89
+ }
90
+ case "tool-call": {
91
+ toolCalls.push({
92
+ id: part.toolCallId,
93
+ type: "function",
94
+ function: {
95
+ name: part.toolName,
96
+ arguments: JSON.stringify(part.input)
97
+ }
98
+ });
99
+ break;
100
+ }
101
+ }
102
+ }
103
+ messages.push({
104
+ role: "assistant",
105
+ content: text,
106
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
107
+ });
108
+ break;
109
+ }
110
+ case "tool": {
111
+ for (const toolResponse of content) {
112
+ const output = toolResponse.output;
113
+ let contentValue;
114
+ switch (output.type) {
115
+ case "text":
116
+ case "error-text":
117
+ contentValue = output.value;
118
+ break;
119
+ case "content":
120
+ case "json":
121
+ case "error-json":
122
+ contentValue = JSON.stringify(output.value);
123
+ break;
124
+ }
125
+ messages.push({
126
+ role: "tool",
127
+ tool_call_id: toolResponse.toolCallId,
128
+ content: contentValue
129
+ });
130
+ }
131
+ break;
132
+ }
133
+ default: {
134
+ const _exhaustiveCheck = role;
135
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
136
+ }
137
+ }
138
+ }
139
+ return { messages, warnings };
140
+ }
32
141
 
33
- // src/xai-chat-settings.ts
34
- function supportsStructuredOutputs(modelId) {
35
- return [
36
- "grok-3",
37
- "grok-3-beta",
38
- "grok-3-latest",
39
- "grok-3-fast",
40
- "grok-3-fast-beta",
41
- "grok-3-fast-latest",
42
- "grok-3-mini",
43
- "grok-3-mini-beta",
44
- "grok-3-mini-latest",
45
- "grok-3-mini-fast",
46
- "grok-3-mini-fast-beta",
47
- "grok-3-mini-fast-latest",
48
- "grok-2-1212",
49
- "grok-2-vision-1212"
50
- ].includes(modelId);
142
+ // src/get-response-metadata.ts
143
+ function getResponseMetadata({
144
+ id,
145
+ model,
146
+ created
147
+ }) {
148
+ return {
149
+ id: id != null ? id : void 0,
150
+ modelId: model != null ? model : void 0,
151
+ timestamp: created != null ? new Date(created * 1e3) : void 0
152
+ };
51
153
  }
52
154
 
155
+ // src/map-xai-finish-reason.ts
156
+ function mapXaiFinishReason(finishReason) {
157
+ switch (finishReason) {
158
+ case "stop":
159
+ return "stop";
160
+ case "length":
161
+ return "length";
162
+ case "tool_calls":
163
+ case "function_call":
164
+ return "tool-calls";
165
+ case "content_filter":
166
+ return "content-filter";
167
+ default:
168
+ return "unknown";
169
+ }
170
+ }
171
+
172
+ // src/xai-chat-options.ts
173
+ var import_v4 = require("zod/v4");
174
+ var webSourceSchema = import_v4.z.object({
175
+ type: import_v4.z.literal("web"),
176
+ country: import_v4.z.string().length(2).optional(),
177
+ excludedWebsites: import_v4.z.array(import_v4.z.string()).max(5).optional(),
178
+ allowedWebsites: import_v4.z.array(import_v4.z.string()).max(5).optional(),
179
+ safeSearch: import_v4.z.boolean().optional()
180
+ });
181
+ var xSourceSchema = import_v4.z.object({
182
+ type: import_v4.z.literal("x"),
183
+ xHandles: import_v4.z.array(import_v4.z.string()).optional()
184
+ });
185
+ var newsSourceSchema = import_v4.z.object({
186
+ type: import_v4.z.literal("news"),
187
+ country: import_v4.z.string().length(2).optional(),
188
+ excludedWebsites: import_v4.z.array(import_v4.z.string()).max(5).optional(),
189
+ safeSearch: import_v4.z.boolean().optional()
190
+ });
191
+ var rssSourceSchema = import_v4.z.object({
192
+ type: import_v4.z.literal("rss"),
193
+ links: import_v4.z.array(import_v4.z.string().url()).max(1)
194
+ // currently only supports one RSS link
195
+ });
196
+ var searchSourceSchema = import_v4.z.discriminatedUnion("type", [
197
+ webSourceSchema,
198
+ xSourceSchema,
199
+ newsSourceSchema,
200
+ rssSourceSchema
201
+ ]);
202
+ var xaiProviderOptions = import_v4.z.object({
203
+ /**
204
+ * reasoning effort for reasoning models
205
+ * only supported by grok-3-mini and grok-3-mini-fast models
206
+ */
207
+ reasoningEffort: import_v4.z.enum(["low", "high"]).optional(),
208
+ searchParameters: import_v4.z.object({
209
+ /**
210
+ * search mode preference
211
+ * - "off": disables search completely
212
+ * - "auto": model decides whether to search (default)
213
+ * - "on": always enables search
214
+ */
215
+ mode: import_v4.z.enum(["off", "auto", "on"]),
216
+ /**
217
+ * whether to return citations in the response
218
+ * defaults to true
219
+ */
220
+ returnCitations: import_v4.z.boolean().optional(),
221
+ /**
222
+ * start date for search data (ISO8601 format: YYYY-MM-DD)
223
+ */
224
+ fromDate: import_v4.z.string().optional(),
225
+ /**
226
+ * end date for search data (ISO8601 format: YYYY-MM-DD)
227
+ */
228
+ toDate: import_v4.z.string().optional(),
229
+ /**
230
+ * maximum number of search results to consider
231
+ * defaults to 20
232
+ */
233
+ maxSearchResults: import_v4.z.number().min(1).max(50).optional(),
234
+ /**
235
+ * data sources to search from
236
+ * defaults to ["web", "x"] if not specified
237
+ */
238
+ sources: import_v4.z.array(searchSourceSchema).optional()
239
+ }).optional()
240
+ });
241
+
53
242
  // src/xai-error.ts
54
- var import_zod = require("zod");
55
- var xaiErrorSchema = import_zod.z.object({
56
- code: import_zod.z.string(),
57
- error: import_zod.z.string()
243
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
244
+ var import_v42 = require("zod/v4");
245
+ var xaiErrorDataSchema = import_v42.z.object({
246
+ error: import_v42.z.object({
247
+ message: import_v42.z.string(),
248
+ type: import_v42.z.string().nullish(),
249
+ param: import_v42.z.any().nullish(),
250
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
251
+ })
252
+ });
253
+ var xaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
254
+ errorSchema: xaiErrorDataSchema,
255
+ errorToMessage: (data) => data.error.message
256
+ });
257
+
258
+ // src/xai-prepare-tools.ts
259
+ var import_provider2 = require("@ai-sdk/provider");
260
+ function prepareTools({
261
+ tools,
262
+ toolChoice
263
+ }) {
264
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
265
+ const toolWarnings = [];
266
+ if (tools == null) {
267
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
268
+ }
269
+ const xaiTools = [];
270
+ for (const tool of tools) {
271
+ if (tool.type === "provider-defined") {
272
+ toolWarnings.push({ type: "unsupported-tool", tool });
273
+ } else {
274
+ xaiTools.push({
275
+ type: "function",
276
+ function: {
277
+ name: tool.name,
278
+ description: tool.description,
279
+ parameters: tool.inputSchema
280
+ }
281
+ });
282
+ }
283
+ }
284
+ if (toolChoice == null) {
285
+ return { tools: xaiTools, toolChoice: void 0, toolWarnings };
286
+ }
287
+ const type = toolChoice.type;
288
+ switch (type) {
289
+ case "auto":
290
+ case "none":
291
+ return { tools: xaiTools, toolChoice: type, toolWarnings };
292
+ case "required":
293
+ return { tools: xaiTools, toolChoice: "required", toolWarnings };
294
+ case "tool":
295
+ return {
296
+ tools: xaiTools,
297
+ toolChoice: {
298
+ type: "function",
299
+ function: { name: toolChoice.toolName }
300
+ },
301
+ toolWarnings
302
+ };
303
+ default: {
304
+ const _exhaustiveCheck = type;
305
+ throw new import_provider2.UnsupportedFunctionalityError({
306
+ functionality: `tool choice type: ${_exhaustiveCheck}`
307
+ });
308
+ }
309
+ }
310
+ }
311
+
312
+ // src/xai-chat-language-model.ts
313
+ var XaiChatLanguageModel = class {
314
+ constructor(modelId, config) {
315
+ this.specificationVersion = "v2";
316
+ this.supportedUrls = {
317
+ "image/*": [/^https?:\/\/.*$/]
318
+ };
319
+ this.modelId = modelId;
320
+ this.config = config;
321
+ }
322
+ get provider() {
323
+ return this.config.provider;
324
+ }
325
+ async getArgs({
326
+ prompt,
327
+ maxOutputTokens,
328
+ temperature,
329
+ topP,
330
+ topK,
331
+ frequencyPenalty,
332
+ presencePenalty,
333
+ stopSequences,
334
+ seed,
335
+ responseFormat,
336
+ providerOptions,
337
+ tools,
338
+ toolChoice
339
+ }) {
340
+ var _a, _b, _c;
341
+ const warnings = [];
342
+ const options = (_a = await (0, import_provider_utils3.parseProviderOptions)({
343
+ provider: "xai",
344
+ providerOptions,
345
+ schema: xaiProviderOptions
346
+ })) != null ? _a : {};
347
+ if (topK != null) {
348
+ warnings.push({
349
+ type: "unsupported-setting",
350
+ setting: "topK"
351
+ });
352
+ }
353
+ if (frequencyPenalty != null) {
354
+ warnings.push({
355
+ type: "unsupported-setting",
356
+ setting: "frequencyPenalty"
357
+ });
358
+ }
359
+ if (presencePenalty != null) {
360
+ warnings.push({
361
+ type: "unsupported-setting",
362
+ setting: "presencePenalty"
363
+ });
364
+ }
365
+ if (stopSequences != null) {
366
+ warnings.push({
367
+ type: "unsupported-setting",
368
+ setting: "stopSequences"
369
+ });
370
+ }
371
+ if (responseFormat != null && responseFormat.type === "json" && responseFormat.schema != null) {
372
+ warnings.push({
373
+ type: "unsupported-setting",
374
+ setting: "responseFormat",
375
+ details: "JSON response format schema is not supported"
376
+ });
377
+ }
378
+ const { messages, warnings: messageWarnings } = convertToXaiChatMessages(prompt);
379
+ warnings.push(...messageWarnings);
380
+ const {
381
+ tools: xaiTools,
382
+ toolChoice: xaiToolChoice,
383
+ toolWarnings
384
+ } = prepareTools({
385
+ tools,
386
+ toolChoice
387
+ });
388
+ warnings.push(...toolWarnings);
389
+ const baseArgs = {
390
+ // model id
391
+ model: this.modelId,
392
+ // standard generation settings
393
+ max_tokens: maxOutputTokens,
394
+ temperature,
395
+ top_p: topP,
396
+ seed,
397
+ reasoning_effort: options.reasoningEffort,
398
+ // response format
399
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? responseFormat.schema != null ? {
400
+ type: "json_schema",
401
+ json_schema: {
402
+ name: (_b = responseFormat.name) != null ? _b : "response",
403
+ schema: responseFormat.schema,
404
+ strict: true
405
+ }
406
+ } : { type: "json_object" } : void 0,
407
+ // search parameters
408
+ search_parameters: options.searchParameters ? {
409
+ mode: options.searchParameters.mode,
410
+ return_citations: options.searchParameters.returnCitations,
411
+ from_date: options.searchParameters.fromDate,
412
+ to_date: options.searchParameters.toDate,
413
+ max_search_results: options.searchParameters.maxSearchResults,
414
+ sources: (_c = options.searchParameters.sources) == null ? void 0 : _c.map((source) => ({
415
+ type: source.type,
416
+ ...source.type === "web" && {
417
+ country: source.country,
418
+ excluded_websites: source.excludedWebsites,
419
+ allowed_websites: source.allowedWebsites,
420
+ safe_search: source.safeSearch
421
+ },
422
+ ...source.type === "x" && {
423
+ x_handles: source.xHandles
424
+ },
425
+ ...source.type === "news" && {
426
+ country: source.country,
427
+ excluded_websites: source.excludedWebsites,
428
+ safe_search: source.safeSearch
429
+ },
430
+ ...source.type === "rss" && {
431
+ links: source.links
432
+ }
433
+ }))
434
+ } : void 0,
435
+ // messages in xai format
436
+ messages,
437
+ // tools in xai format
438
+ tools: xaiTools,
439
+ tool_choice: xaiToolChoice
440
+ };
441
+ return {
442
+ args: baseArgs,
443
+ warnings
444
+ };
445
+ }
446
+ async doGenerate(options) {
447
+ var _a, _b, _c;
448
+ const { args: body, warnings } = await this.getArgs(options);
449
+ const {
450
+ responseHeaders,
451
+ value: response,
452
+ rawValue: rawResponse
453
+ } = await (0, import_provider_utils3.postJsonToApi)({
454
+ url: `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`,
455
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
456
+ body,
457
+ failedResponseHandler: xaiFailedResponseHandler,
458
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
459
+ xaiChatResponseSchema
460
+ ),
461
+ abortSignal: options.abortSignal,
462
+ fetch: this.config.fetch
463
+ });
464
+ const choice = response.choices[0];
465
+ const content = [];
466
+ if (choice.message.content != null && choice.message.content.length > 0) {
467
+ let text = choice.message.content;
468
+ const lastMessage = body.messages[body.messages.length - 1];
469
+ if ((lastMessage == null ? void 0 : lastMessage.role) === "assistant" && text === lastMessage.content) {
470
+ text = "";
471
+ }
472
+ if (text.length > 0) {
473
+ content.push({ type: "text", text });
474
+ }
475
+ }
476
+ if (choice.message.reasoning_content != null && choice.message.reasoning_content.length > 0) {
477
+ content.push({
478
+ type: "reasoning",
479
+ text: choice.message.reasoning_content
480
+ });
481
+ }
482
+ if (choice.message.tool_calls != null) {
483
+ for (const toolCall of choice.message.tool_calls) {
484
+ content.push({
485
+ type: "tool-call",
486
+ toolCallId: toolCall.id,
487
+ toolName: toolCall.function.name,
488
+ input: toolCall.function.arguments
489
+ });
490
+ }
491
+ }
492
+ if (response.citations != null) {
493
+ for (const url of response.citations) {
494
+ content.push({
495
+ type: "source",
496
+ sourceType: "url",
497
+ id: this.config.generateId(),
498
+ url
499
+ });
500
+ }
501
+ }
502
+ return {
503
+ content,
504
+ finishReason: mapXaiFinishReason(choice.finish_reason),
505
+ usage: {
506
+ inputTokens: response.usage.prompt_tokens,
507
+ outputTokens: response.usage.completion_tokens,
508
+ totalTokens: response.usage.total_tokens,
509
+ reasoningTokens: (_c = (_b = response.usage.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0
510
+ },
511
+ request: { body },
512
+ response: {
513
+ ...getResponseMetadata(response),
514
+ headers: responseHeaders,
515
+ body: rawResponse
516
+ },
517
+ warnings
518
+ };
519
+ }
520
+ async doStream(options) {
521
+ var _a;
522
+ const { args, warnings } = await this.getArgs(options);
523
+ const body = {
524
+ ...args,
525
+ stream: true,
526
+ stream_options: {
527
+ include_usage: true
528
+ }
529
+ };
530
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
531
+ url: `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`,
532
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
533
+ body,
534
+ failedResponseHandler: xaiFailedResponseHandler,
535
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(xaiChatChunkSchema),
536
+ abortSignal: options.abortSignal,
537
+ fetch: this.config.fetch
538
+ });
539
+ let finishReason = "unknown";
540
+ const usage = {
541
+ inputTokens: void 0,
542
+ outputTokens: void 0,
543
+ totalTokens: void 0
544
+ };
545
+ let isFirstChunk = true;
546
+ const contentBlocks = {};
547
+ const lastReasoningDeltas = {};
548
+ const self = this;
549
+ return {
550
+ stream: response.pipeThrough(
551
+ new TransformStream({
552
+ start(controller) {
553
+ controller.enqueue({ type: "stream-start", warnings });
554
+ },
555
+ transform(chunk, controller) {
556
+ var _a2, _b;
557
+ if (options.includeRawChunks) {
558
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
559
+ }
560
+ if (!chunk.success) {
561
+ controller.enqueue({ type: "error", error: chunk.error });
562
+ return;
563
+ }
564
+ const value = chunk.value;
565
+ if (isFirstChunk) {
566
+ controller.enqueue({
567
+ type: "response-metadata",
568
+ ...getResponseMetadata(value)
569
+ });
570
+ isFirstChunk = false;
571
+ }
572
+ if (value.citations != null) {
573
+ for (const url of value.citations) {
574
+ controller.enqueue({
575
+ type: "source",
576
+ sourceType: "url",
577
+ id: self.config.generateId(),
578
+ url
579
+ });
580
+ }
581
+ }
582
+ if (value.usage != null) {
583
+ usage.inputTokens = value.usage.prompt_tokens;
584
+ usage.outputTokens = value.usage.completion_tokens;
585
+ usage.totalTokens = value.usage.total_tokens;
586
+ usage.reasoningTokens = (_b = (_a2 = value.usage.completion_tokens_details) == null ? void 0 : _a2.reasoning_tokens) != null ? _b : void 0;
587
+ }
588
+ const choice = value.choices[0];
589
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
590
+ finishReason = mapXaiFinishReason(choice.finish_reason);
591
+ }
592
+ if ((choice == null ? void 0 : choice.delta) == null) {
593
+ return;
594
+ }
595
+ const delta = choice.delta;
596
+ const choiceIndex = choice.index;
597
+ if (delta.content != null && delta.content.length > 0) {
598
+ const textContent = delta.content;
599
+ const lastMessage = body.messages[body.messages.length - 1];
600
+ if ((lastMessage == null ? void 0 : lastMessage.role) === "assistant" && textContent === lastMessage.content) {
601
+ return;
602
+ }
603
+ const blockId = `text-${value.id || choiceIndex}`;
604
+ if (contentBlocks[blockId] == null) {
605
+ contentBlocks[blockId] = { type: "text" };
606
+ controller.enqueue({
607
+ type: "text-start",
608
+ id: blockId
609
+ });
610
+ }
611
+ controller.enqueue({
612
+ type: "text-delta",
613
+ id: blockId,
614
+ delta: textContent
615
+ });
616
+ }
617
+ if (delta.reasoning_content != null && delta.reasoning_content.length > 0) {
618
+ const blockId = `reasoning-${value.id || choiceIndex}`;
619
+ if (lastReasoningDeltas[blockId] === delta.reasoning_content) {
620
+ return;
621
+ }
622
+ lastReasoningDeltas[blockId] = delta.reasoning_content;
623
+ if (contentBlocks[blockId] == null) {
624
+ contentBlocks[blockId] = { type: "reasoning" };
625
+ controller.enqueue({
626
+ type: "reasoning-start",
627
+ id: blockId
628
+ });
629
+ }
630
+ controller.enqueue({
631
+ type: "reasoning-delta",
632
+ id: blockId,
633
+ delta: delta.reasoning_content
634
+ });
635
+ }
636
+ if (delta.tool_calls != null) {
637
+ for (const toolCall of delta.tool_calls) {
638
+ const toolCallId = toolCall.id;
639
+ controller.enqueue({
640
+ type: "tool-input-start",
641
+ id: toolCallId,
642
+ toolName: toolCall.function.name
643
+ });
644
+ controller.enqueue({
645
+ type: "tool-input-delta",
646
+ id: toolCallId,
647
+ delta: toolCall.function.arguments
648
+ });
649
+ controller.enqueue({
650
+ type: "tool-input-end",
651
+ id: toolCallId
652
+ });
653
+ controller.enqueue({
654
+ type: "tool-call",
655
+ toolCallId,
656
+ toolName: toolCall.function.name,
657
+ input: toolCall.function.arguments
658
+ });
659
+ }
660
+ }
661
+ },
662
+ flush(controller) {
663
+ for (const [blockId, block] of Object.entries(contentBlocks)) {
664
+ controller.enqueue({
665
+ type: block.type === "text" ? "text-end" : "reasoning-end",
666
+ id: blockId
667
+ });
668
+ }
669
+ controller.enqueue({ type: "finish", finishReason, usage });
670
+ }
671
+ })
672
+ ),
673
+ request: { body },
674
+ response: { headers: responseHeaders }
675
+ };
676
+ }
677
+ };
678
+ var xaiUsageSchema = import_v43.z.object({
679
+ prompt_tokens: import_v43.z.number(),
680
+ completion_tokens: import_v43.z.number(),
681
+ total_tokens: import_v43.z.number(),
682
+ completion_tokens_details: import_v43.z.object({
683
+ reasoning_tokens: import_v43.z.number().nullish()
684
+ }).nullish()
685
+ });
686
+ var xaiChatResponseSchema = import_v43.z.object({
687
+ id: import_v43.z.string().nullish(),
688
+ created: import_v43.z.number().nullish(),
689
+ model: import_v43.z.string().nullish(),
690
+ choices: import_v43.z.array(
691
+ import_v43.z.object({
692
+ message: import_v43.z.object({
693
+ role: import_v43.z.literal("assistant"),
694
+ content: import_v43.z.string().nullish(),
695
+ reasoning_content: import_v43.z.string().nullish(),
696
+ tool_calls: import_v43.z.array(
697
+ import_v43.z.object({
698
+ id: import_v43.z.string(),
699
+ type: import_v43.z.literal("function"),
700
+ function: import_v43.z.object({
701
+ name: import_v43.z.string(),
702
+ arguments: import_v43.z.string()
703
+ })
704
+ })
705
+ ).nullish()
706
+ }),
707
+ index: import_v43.z.number(),
708
+ finish_reason: import_v43.z.string().nullish()
709
+ })
710
+ ),
711
+ object: import_v43.z.literal("chat.completion"),
712
+ usage: xaiUsageSchema,
713
+ citations: import_v43.z.array(import_v43.z.string().url()).nullish()
714
+ });
715
+ var xaiChatChunkSchema = import_v43.z.object({
716
+ id: import_v43.z.string().nullish(),
717
+ created: import_v43.z.number().nullish(),
718
+ model: import_v43.z.string().nullish(),
719
+ choices: import_v43.z.array(
720
+ import_v43.z.object({
721
+ delta: import_v43.z.object({
722
+ role: import_v43.z.enum(["assistant"]).optional(),
723
+ content: import_v43.z.string().nullish(),
724
+ reasoning_content: import_v43.z.string().nullish(),
725
+ tool_calls: import_v43.z.array(
726
+ import_v43.z.object({
727
+ id: import_v43.z.string(),
728
+ type: import_v43.z.literal("function"),
729
+ function: import_v43.z.object({
730
+ name: import_v43.z.string(),
731
+ arguments: import_v43.z.string()
732
+ })
733
+ })
734
+ ).nullish()
735
+ }),
736
+ finish_reason: import_v43.z.string().nullish(),
737
+ index: import_v43.z.number()
738
+ })
739
+ ),
740
+ usage: xaiUsageSchema.nullish(),
741
+ citations: import_v43.z.array(import_v43.z.string().url()).nullish()
58
742
  });
59
743
 
60
744
  // src/xai-provider.ts
61
745
  var xaiErrorStructure = {
62
- errorSchema: xaiErrorSchema,
63
- errorToMessage: (data) => data.error
746
+ errorSchema: xaiErrorDataSchema,
747
+ errorToMessage: (data) => data.error.message
64
748
  };
65
749
  function createXai(options = {}) {
66
750
  var _a;
67
- const baseURL = (0, import_provider_utils.withoutTrailingSlash)(
751
+ const baseURL = (0, import_provider_utils4.withoutTrailingSlash)(
68
752
  (_a = options.baseURL) != null ? _a : "https://api.x.ai/v1"
69
753
  );
70
754
  const getHeaders = () => ({
71
- Authorization: `Bearer ${(0, import_provider_utils.loadApiKey)({
755
+ Authorization: `Bearer ${(0, import_provider_utils4.loadApiKey)({
72
756
  apiKey: options.apiKey,
73
757
  environmentVariableName: "XAI_API_KEY",
74
758
  description: "xAI API key"
@@ -76,18 +760,16 @@ function createXai(options = {}) {
76
760
  ...options.headers
77
761
  });
78
762
  const createLanguageModel = (modelId) => {
79
- const structuredOutputs = supportsStructuredOutputs(modelId);
80
- return new import_openai_compatible.OpenAICompatibleChatLanguageModel(modelId, {
763
+ return new XaiChatLanguageModel(modelId, {
81
764
  provider: "xai.chat",
82
- url: ({ path }) => `${baseURL}${path}`,
765
+ baseURL,
83
766
  headers: getHeaders,
84
- fetch: options.fetch,
85
- errorStructure: xaiErrorStructure,
86
- supportsStructuredOutputs: structuredOutputs
767
+ generateId: import_provider_utils4.generateId,
768
+ fetch: options.fetch
87
769
  });
88
770
  };
89
- const createImageModel = (modelId, settings = {}) => {
90
- return new import_openai_compatible.OpenAICompatibleImageModel(modelId, settings, {
771
+ const createImageModel = (modelId) => {
772
+ return new import_openai_compatible.OpenAICompatibleImageModel(modelId, {
91
773
  provider: "xai.image",
92
774
  url: ({ path }) => `${baseURL}${path}`,
93
775
  headers: getHeaders,
@@ -99,7 +781,7 @@ function createXai(options = {}) {
99
781
  provider.languageModel = createLanguageModel;
100
782
  provider.chat = createLanguageModel;
101
783
  provider.textEmbeddingModel = (modelId) => {
102
- throw new import_provider.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
784
+ throw new import_provider3.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
103
785
  };
104
786
  provider.imageModel = createImageModel;
105
787
  provider.image = createImageModel;