@adaline/xai 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,2179 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __defProps = Object.defineProperties;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
6
+ var __getOwnPropNames = Object.getOwnPropertyNames;
7
+ var __getOwnPropSymbols = Object.getOwnPropertySymbols;
8
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
9
+ var __propIsEnum = Object.prototype.propertyIsEnumerable;
10
+ var __knownSymbol = (name, symbol) => (symbol = Symbol[name]) ? symbol : Symbol.for("Symbol." + name);
11
+ var __typeError = (msg) => {
12
+ throw TypeError(msg);
13
+ };
14
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
15
+ var __spreadValues = (a, b) => {
16
+ for (var prop in b || (b = {}))
17
+ if (__hasOwnProp.call(b, prop))
18
+ __defNormalProp(a, prop, b[prop]);
19
+ if (__getOwnPropSymbols)
20
+ for (var prop of __getOwnPropSymbols(b)) {
21
+ if (__propIsEnum.call(b, prop))
22
+ __defNormalProp(a, prop, b[prop]);
23
+ }
24
+ return a;
25
+ };
26
+ var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
27
+ var __export = (target, all) => {
28
+ for (var name in all)
29
+ __defProp(target, name, { get: all[name], enumerable: true });
30
+ };
31
+ var __copyProps = (to, from, except, desc) => {
32
+ if (from && typeof from === "object" || typeof from === "function") {
33
+ for (let key of __getOwnPropNames(from))
34
+ if (!__hasOwnProp.call(to, key) && key !== except)
35
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
36
+ }
37
+ return to;
38
+ };
39
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
40
+ var __async = (__this, __arguments, generator) => {
41
+ return new Promise((resolve, reject) => {
42
+ var fulfilled = (value) => {
43
+ try {
44
+ step(generator.next(value));
45
+ } catch (e) {
46
+ reject(e);
47
+ }
48
+ };
49
+ var rejected = (value) => {
50
+ try {
51
+ step(generator.throw(value));
52
+ } catch (e) {
53
+ reject(e);
54
+ }
55
+ };
56
+ var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected);
57
+ step((generator = generator.apply(__this, __arguments)).next());
58
+ });
59
+ };
60
+ var __await = function(promise, isYieldStar) {
61
+ this[0] = promise;
62
+ this[1] = isYieldStar;
63
+ };
64
+ var __asyncGenerator = (__this, __arguments, generator) => {
65
+ var resume = (k, v, yes, no) => {
66
+ try {
67
+ var x = generator[k](v), isAwait = (v = x.value) instanceof __await, done = x.done;
68
+ Promise.resolve(isAwait ? v[0] : v).then((y) => isAwait ? resume(k === "return" ? k : "next", v[1] ? { done: y.done, value: y.value } : y, yes, no) : yes({ value: y, done })).catch((e) => resume("throw", e, yes, no));
69
+ } catch (e) {
70
+ no(e);
71
+ }
72
+ }, method = (k) => it[k] = (x) => new Promise((yes, no) => resume(k, x, yes, no)), it = {};
73
+ return generator = generator.apply(__this, __arguments), it[__knownSymbol("asyncIterator")] = () => it, method("next"), method("throw"), method("return"), it;
74
+ };
75
+ var __yieldStar = (value) => {
76
+ var obj = value[__knownSymbol("asyncIterator")], isAwait = false, method, it = {};
77
+ if (obj == null) {
78
+ obj = value[__knownSymbol("iterator")]();
79
+ method = (k) => it[k] = (x) => obj[k](x);
80
+ } else {
81
+ obj = obj.call(value);
82
+ method = (k) => it[k] = (v) => {
83
+ if (isAwait) {
84
+ isAwait = false;
85
+ if (k === "throw") throw v;
86
+ return v;
87
+ }
88
+ isAwait = true;
89
+ return {
90
+ done: false,
91
+ value: new __await(new Promise((resolve) => {
92
+ var x = obj[k](v);
93
+ if (!(x instanceof Object)) __typeError("Object expected");
94
+ resolve(x);
95
+ }), 1)
96
+ };
97
+ };
98
+ }
99
+ return it[__knownSymbol("iterator")] = () => it, method("next"), "throw" in obj ? method("throw") : it.throw = (x) => {
100
+ throw x;
101
+ }, "return" in obj && method("return"), it;
102
+ };
103
+
104
+ // src/index.ts
105
+ var src_exports = {};
106
+ __export(src_exports, {
107
+ BaseChatModel: () => BaseChatModel,
108
+ BaseChatModelOptions: () => BaseChatModelOptions,
109
+ Grok_2: () => Grok_2,
110
+ Grok_2_1212: () => Grok_2_1212,
111
+ Grok_2_1212_Literal: () => Grok_2_1212_Literal,
112
+ Grok_2_1212_Options: () => Grok_2_1212_Options,
113
+ Grok_2_1212_Schema: () => Grok_2_1212_Schema,
114
+ Grok_2_Latest: () => Grok_2_Latest,
115
+ Grok_2_Latest_Literal: () => Grok_2_Latest_Literal,
116
+ Grok_2_Latest_Options: () => Grok_2_Latest_Options,
117
+ Grok_2_Latest_Schema: () => Grok_2_Latest_Schema,
118
+ Grok_2_Literal: () => Grok_2_Literal,
119
+ Grok_2_Options: () => Grok_2_Options,
120
+ Grok_2_Schema: () => Grok_2_Schema,
121
+ Grok_3_Beta: () => Grok_3_Beta,
122
+ Grok_3_Beta_Literal: () => Grok_3_Beta_Literal,
123
+ Grok_3_Beta_Options: () => Grok_3_Beta_Options,
124
+ Grok_3_Beta_Schema: () => Grok_3_Beta_Schema,
125
+ Grok_3_Fast_Beta: () => Grok_3_Fast_Beta,
126
+ Grok_3_Fast_Beta_Literal: () => Grok_3_Fast_Beta_Literal,
127
+ Grok_3_Fast_Beta_Options: () => Grok_3_Fast_Beta_Options,
128
+ Grok_3_Fast_Beta_Schema: () => Grok_3_Fast_Beta_Schema,
129
+ Grok_3_Mini_Beta: () => Grok_3_Mini_Beta,
130
+ Grok_3_Mini_Beta_Literal: () => Grok_3_Mini_Beta_Literal,
131
+ Grok_3_Mini_Beta_Options: () => Grok_3_Mini_Beta_Options,
132
+ Grok_3_Mini_Beta_Schema: () => Grok_3_Mini_Beta_Schema,
133
+ Grok_3_Mini_Fast_Beta: () => Grok_3_Mini_Fast_Beta,
134
+ Grok_3_Mini_Fast_Beta_Literal: () => Grok_3_Mini_Fast_Beta_Literal,
135
+ Grok_3_Mini_Fast_Beta_Options: () => Grok_3_Mini_Fast_Beta_Options,
136
+ Grok_3_Mini_Fast_Beta_Schema: () => Grok_3_Mini_Fast_Beta_Schema,
137
+ Grok_4: () => Grok_4,
138
+ Grok_4_0709: () => Grok_4_0709,
139
+ Grok_4_0709_Literal: () => Grok_4_0709_Literal,
140
+ Grok_4_0709_Options: () => Grok_4_0709_Options,
141
+ Grok_4_0709_Schema: () => Grok_4_0709_Schema,
142
+ Grok_4_1_Fast_Non_Reasoning: () => Grok_4_1_Fast_Non_Reasoning,
143
+ Grok_4_1_Fast_Non_Reasoning_Literal: () => Grok_4_1_Fast_Non_Reasoning_Literal,
144
+ Grok_4_1_Fast_Non_Reasoning_Options: () => Grok_4_1_Fast_Non_Reasoning_Options,
145
+ Grok_4_1_Fast_Non_Reasoning_Schema: () => Grok_4_1_Fast_Non_Reasoning_Schema,
146
+ Grok_4_1_Fast_Reasoning: () => Grok_4_1_Fast_Reasoning,
147
+ Grok_4_1_Fast_Reasoning_Literal: () => Grok_4_1_Fast_Reasoning_Literal,
148
+ Grok_4_1_Fast_Reasoning_Options: () => Grok_4_1_Fast_Reasoning_Options,
149
+ Grok_4_1_Fast_Reasoning_Schema: () => Grok_4_1_Fast_Reasoning_Schema,
150
+ Grok_4_Fast_Non_Reasoning: () => Grok_4_Fast_Non_Reasoning,
151
+ Grok_4_Fast_Non_Reasoning_Literal: () => Grok_4_Fast_Non_Reasoning_Literal,
152
+ Grok_4_Fast_Non_Reasoning_Options: () => Grok_4_Fast_Non_Reasoning_Options,
153
+ Grok_4_Fast_Non_Reasoning_Schema: () => Grok_4_Fast_Non_Reasoning_Schema,
154
+ Grok_4_Fast_Reasoning: () => Grok_4_Fast_Reasoning,
155
+ Grok_4_Fast_Reasoning_Literal: () => Grok_4_Fast_Reasoning_Literal,
156
+ Grok_4_Fast_Reasoning_Options: () => Grok_4_Fast_Reasoning_Options,
157
+ Grok_4_Fast_Reasoning_Schema: () => Grok_4_Fast_Reasoning_Schema,
158
+ Grok_4_Literal: () => Grok_4_Literal,
159
+ Grok_4_Options: () => Grok_4_Options,
160
+ Grok_4_Schema: () => Grok_4_Schema,
161
+ Grok_Code_Fast_1: () => Grok_Code_Fast_1,
162
+ Grok_Code_Fast_1_Literal: () => Grok_Code_Fast_1_Literal,
163
+ Grok_Code_Fast_1_Options: () => Grok_Code_Fast_1_Options,
164
+ Grok_Code_Fast_1_Schema: () => Grok_Code_Fast_1_Schema,
165
+ ProviderLiteral: () => ProviderLiteral,
166
+ XAI: () => XAI,
167
+ XAIChatModelConfigs: () => chat_model_exports,
168
+ XAIChatModelModalities: () => XAIChatModelModalities,
169
+ XAIChatModelModalitiesEnum: () => XAIChatModelModalitiesEnum,
170
+ XAIChatModelRoles: () => XAIChatModelRoles,
171
+ XAIChatModelRolesMap: () => XAIChatModelRolesMap,
172
+ XAIChatModelTextModalities: () => XAIChatModelTextModalities,
173
+ XAIChatModelTextModalitiesEnum: () => XAIChatModelTextModalitiesEnum,
174
+ XAIChatModelTextToolModalities: () => XAIChatModelTextToolModalities,
175
+ XAIChatModelTextToolModalitiesEnum: () => XAIChatModelTextToolModalitiesEnum,
176
+ XAIChatRequest: () => XAIChatRequest,
177
+ XAIChatRequestAssistantMessage: () => XAIChatRequestAssistantMessage,
178
+ XAIChatRequestImageContent: () => XAIChatRequestImageContent,
179
+ XAIChatRequestMessage: () => XAIChatRequestMessage,
180
+ XAIChatRequestResponseFormat: () => XAIChatRequestResponseFormat,
181
+ XAIChatRequestSystemMessage: () => XAIChatRequestSystemMessage,
182
+ XAIChatRequestTextContent: () => XAIChatRequestTextContent,
183
+ XAIChatRequestTool: () => XAIChatRequestTool,
184
+ XAIChatRequestToolCallContent: () => XAIChatRequestToolCallContent,
185
+ XAIChatRequestToolChoiceEnum: () => XAIChatRequestToolChoiceEnum,
186
+ XAIChatRequestToolChoiceFunction: () => XAIChatRequestToolChoiceFunction,
187
+ XAIChatRequestToolMessage: () => XAIChatRequestToolMessage,
188
+ XAIChatRequestUserMessage: () => XAIChatRequestUserMessage,
189
+ XAICompleteChatResponse: () => XAICompleteChatResponse,
190
+ XAIStreamChatResponse: () => XAIStreamChatResponse,
191
+ XAIToolCallsCompleteChatResponse: () => XAIToolCallsCompleteChatResponse,
192
+ XAIToolCallsStreamChatResponse: () => XAIToolCallsStreamChatResponse
193
+ });
194
+ module.exports = __toCommonJS(src_exports);
195
+
196
+ // src/provider/provider.xai.ts
197
+ var import_provider19 = require("@adaline/provider");
198
+
199
+ // src/models/chat-models/base-chat-model.xai.ts
200
+ var import_zod5 = require("zod");
201
+ var import_provider = require("@adaline/provider");
202
+ var import_types3 = require("@adaline/types");
203
+
204
+ // src/models/pricing.json
205
+ var pricing_default = {
206
+ "grok-2": {
207
+ modelName: "grok-2",
208
+ currency: "USD",
209
+ tokenRanges: [
210
+ {
211
+ minTokens: 0,
212
+ maxTokens: null,
213
+ prices: {
214
+ base: {
215
+ inputPricePerMillion: 2,
216
+ outputPricePerMillion: 10
217
+ }
218
+ }
219
+ }
220
+ ]
221
+ },
222
+ "grok-2-latest": {
223
+ modelName: "grok-2-latest",
224
+ currency: "USD",
225
+ tokenRanges: [
226
+ {
227
+ minTokens: 0,
228
+ maxTokens: null,
229
+ prices: {
230
+ base: {
231
+ inputPricePerMillion: 2,
232
+ outputPricePerMillion: 10
233
+ }
234
+ }
235
+ }
236
+ ]
237
+ },
238
+ "grok-2-1212": {
239
+ modelName: "grok-2-1212",
240
+ currency: "USD",
241
+ tokenRanges: [
242
+ {
243
+ minTokens: 0,
244
+ maxTokens: null,
245
+ prices: {
246
+ base: {
247
+ inputPricePerMillion: 2,
248
+ outputPricePerMillion: 10
249
+ }
250
+ }
251
+ }
252
+ ]
253
+ },
254
+ "grok-2-vision": {
255
+ modelName: "grok-2-vision",
256
+ currency: "USD",
257
+ tokenRanges: [
258
+ {
259
+ minTokens: 0,
260
+ maxTokens: null,
261
+ prices: {
262
+ base: {
263
+ inputPricePerMillion: 2,
264
+ outputPricePerMillion: 10
265
+ }
266
+ }
267
+ }
268
+ ]
269
+ },
270
+ "grok-2-vision-latest": {
271
+ modelName: "grok-2-vision-latest",
272
+ currency: "USD",
273
+ tokenRanges: [
274
+ {
275
+ minTokens: 0,
276
+ maxTokens: null,
277
+ prices: {
278
+ base: {
279
+ inputPricePerMillion: 2,
280
+ outputPricePerMillion: 10
281
+ }
282
+ }
283
+ }
284
+ ]
285
+ },
286
+ "grok-2-vision-1212": {
287
+ modelName: "grok-2-vision-1212",
288
+ currency: "USD",
289
+ tokenRanges: [
290
+ {
291
+ minTokens: 0,
292
+ maxTokens: null,
293
+ prices: {
294
+ base: {
295
+ inputPricePerMillion: 2,
296
+ outputPricePerMillion: 10
297
+ }
298
+ }
299
+ }
300
+ ]
301
+ },
302
+ "grok-3-beta": {
303
+ modelName: "grok-3-beta",
304
+ currency: "USD",
305
+ tokenRanges: [
306
+ {
307
+ minTokens: 0,
308
+ maxTokens: null,
309
+ prices: {
310
+ base: {
311
+ inputPricePerMillion: 3,
312
+ outputPricePerMillion: 15
313
+ }
314
+ }
315
+ }
316
+ ]
317
+ },
318
+ "grok-3-fast-beta": {
319
+ modelName: "grok-3-fast-beta",
320
+ currency: "USD",
321
+ tokenRanges: [
322
+ {
323
+ minTokens: 0,
324
+ maxTokens: null,
325
+ prices: {
326
+ base: {
327
+ inputPricePerMillion: 5,
328
+ outputPricePerMillion: 25
329
+ }
330
+ }
331
+ }
332
+ ]
333
+ },
334
+ "grok-3-mini-beta": {
335
+ modelName: "grok-3-mini-beta",
336
+ currency: "USD",
337
+ tokenRanges: [
338
+ {
339
+ minTokens: 0,
340
+ maxTokens: null,
341
+ prices: {
342
+ base: {
343
+ inputPricePerMillion: 0.3,
344
+ outputPricePerMillion: 0.5
345
+ }
346
+ }
347
+ }
348
+ ]
349
+ },
350
+ "grok-3-mini-fast-beta": {
351
+ modelName: "grok-3-mini-fast-beta",
352
+ currency: "USD",
353
+ tokenRanges: [
354
+ {
355
+ minTokens: 0,
356
+ maxTokens: null,
357
+ prices: {
358
+ base: {
359
+ inputPricePerMillion: 0.6,
360
+ outputPricePerMillion: 4
361
+ }
362
+ }
363
+ }
364
+ ]
365
+ },
366
+ "grok-4": {
367
+ modelName: "grok-4",
368
+ currency: "USD",
369
+ tokenRanges: [
370
+ {
371
+ minTokens: 0,
372
+ maxTokens: null,
373
+ prices: {
374
+ base: {
375
+ inputPricePerMillion: 6,
376
+ outputPricePerMillion: 18
377
+ }
378
+ }
379
+ }
380
+ ]
381
+ },
382
+ "grok-4-0709": {
383
+ modelName: "grok-4-0709",
384
+ currency: "USD",
385
+ tokenRanges: [
386
+ {
387
+ minTokens: 0,
388
+ maxTokens: null,
389
+ prices: {
390
+ base: {
391
+ inputPricePerMillion: 6,
392
+ outputPricePerMillion: 18
393
+ }
394
+ }
395
+ }
396
+ ]
397
+ },
398
+ "grok-4-fast-reasoning": {
399
+ modelName: "grok-4-fast-reasoning",
400
+ currency: "USD",
401
+ tokenRanges: [
402
+ {
403
+ minTokens: 0,
404
+ maxTokens: null,
405
+ prices: {
406
+ base: {
407
+ inputPricePerMillion: 3,
408
+ outputPricePerMillion: 12
409
+ }
410
+ }
411
+ }
412
+ ]
413
+ },
414
+ "grok-4-fast-non-reasoning": {
415
+ modelName: "grok-4-fast-non-reasoning",
416
+ currency: "USD",
417
+ tokenRanges: [
418
+ {
419
+ minTokens: 0,
420
+ maxTokens: null,
421
+ prices: {
422
+ base: {
423
+ inputPricePerMillion: 3,
424
+ outputPricePerMillion: 12
425
+ }
426
+ }
427
+ }
428
+ ]
429
+ },
430
+ "grok-4.1-fast-reasoning": {
431
+ modelName: "grok-4.1-fast-reasoning",
432
+ currency: "USD",
433
+ tokenRanges: [
434
+ {
435
+ minTokens: 0,
436
+ maxTokens: null,
437
+ prices: {
438
+ base: {
439
+ inputPricePerMillion: 3,
440
+ outputPricePerMillion: 12
441
+ }
442
+ }
443
+ }
444
+ ]
445
+ },
446
+ "grok-4.1-fast-non-reasoning": {
447
+ modelName: "grok-4.1-fast-non-reasoning",
448
+ currency: "USD",
449
+ tokenRanges: [
450
+ {
451
+ minTokens: 0,
452
+ maxTokens: null,
453
+ prices: {
454
+ base: {
455
+ inputPricePerMillion: 3,
456
+ outputPricePerMillion: 12
457
+ }
458
+ }
459
+ }
460
+ ]
461
+ },
462
+ "grok-code-fast-1": {
463
+ modelName: "grok-code-fast-1",
464
+ currency: "USD",
465
+ tokenRanges: [
466
+ {
467
+ minTokens: 0,
468
+ maxTokens: null,
469
+ prices: {
470
+ base: {
471
+ inputPricePerMillion: 0.3,
472
+ outputPricePerMillion: 0.5
473
+ }
474
+ }
475
+ }
476
+ ]
477
+ }
478
+ };
479
+
480
+ // src/models/chat-models/types/roles.chat-model.xai.ts
481
+ var import_zod = require("zod");
482
+ var import_types = require("@adaline/types");
483
+ var XAIChatModelRoles = import_zod.z.enum([import_types.SystemRoleLiteral, import_types.UserRoleLiteral, import_types.AssistantRoleLiteral, import_types.ToolRoleLiteral]);
484
+ var XAIChatModelRolesMap = {
485
+ system: import_types.SystemRoleLiteral,
486
+ user: import_types.UserRoleLiteral,
487
+ assistant: import_types.AssistantRoleLiteral,
488
+ tool: import_types.ToolRoleLiteral
489
+ };
490
+
491
+ // src/models/chat-models/types/modalities.chat-model.xai.ts
492
+ var import_zod2 = require("zod");
493
+ var import_types2 = require("@adaline/types");
494
+ var XAIChatModelModalities = [
495
+ import_types2.TextModalityLiteral,
496
+ import_types2.ImageModalityLiteral,
497
+ import_types2.ToolCallModalityLiteral,
498
+ import_types2.ToolResponseModalityLiteral
499
+ ];
500
+ var XAIChatModelModalitiesEnum = import_zod2.z.enum([
501
+ import_types2.TextModalityLiteral,
502
+ import_types2.ImageModalityLiteral,
503
+ import_types2.ToolCallModalityLiteral,
504
+ import_types2.ToolResponseModalityLiteral
505
+ ]);
506
+ var XAIChatModelTextModalities = [import_types2.TextModalityLiteral];
507
+ var XAIChatModelTextModalitiesEnum = import_zod2.z.enum([import_types2.TextModalityLiteral]);
508
+ var XAIChatModelTextToolModalities = [
509
+ import_types2.TextModalityLiteral,
510
+ import_types2.ToolCallModalityLiteral,
511
+ import_types2.ToolResponseModalityLiteral
512
+ ];
513
+ var XAIChatModelTextToolModalitiesEnum = import_zod2.z.enum([import_types2.TextModalityLiteral, import_types2.ToolCallModalityLiteral, import_types2.ToolResponseModalityLiteral]);
514
+
515
+ // src/models/chat-models/types/response.chat-model.xai.ts
516
+ var import_zod3 = require("zod");
517
+ var XAIBaseLogProb = import_zod3.z.object({
518
+ token: import_zod3.z.string(),
519
+ logprob: import_zod3.z.number(),
520
+ bytes: import_zod3.z.array(import_zod3.z.number()).nullable()
521
+ });
522
+ var XAILogProb = import_zod3.z.object({
523
+ content: import_zod3.z.array(
524
+ XAIBaseLogProb.extend({
525
+ top_logprobs: import_zod3.z.array(XAIBaseLogProb)
526
+ })
527
+ ).nullable().optional(),
528
+ refusal: import_zod3.z.array(
529
+ XAIBaseLogProb.extend({
530
+ top_logprobs: import_zod3.z.array(XAIBaseLogProb)
531
+ })
532
+ ).nullable().optional()
533
+ }).nullable();
534
+ var XAIToolCallsCompleteChatResponse = import_zod3.z.array(
535
+ import_zod3.z.object({
536
+ id: import_zod3.z.string().min(1),
537
+ type: import_zod3.z.enum(["function"]),
538
+ function: import_zod3.z.object({
539
+ name: import_zod3.z.string(),
540
+ arguments: import_zod3.z.string()
541
+ })
542
+ })
543
+ );
544
+ var XAICompleteChatResponse = import_zod3.z.object({
545
+ id: import_zod3.z.string(),
546
+ object: import_zod3.z.literal("chat.completion"),
547
+ created: import_zod3.z.number(),
548
+ model: import_zod3.z.string(),
549
+ system_fingerprint: import_zod3.z.string().nullable().optional(),
550
+ choices: import_zod3.z.array(
551
+ import_zod3.z.object({
552
+ index: import_zod3.z.number(),
553
+ message: import_zod3.z.object({
554
+ role: import_zod3.z.string(),
555
+ content: import_zod3.z.string().nullable().optional(),
556
+ tool_calls: XAIToolCallsCompleteChatResponse.optional(),
557
+ refusal: import_zod3.z.string().nullable().optional()
558
+ }),
559
+ logprobs: XAILogProb.optional(),
560
+ finish_reason: import_zod3.z.string()
561
+ })
562
+ ),
563
+ usage: import_zod3.z.object({
564
+ prompt_tokens: import_zod3.z.number(),
565
+ completion_tokens: import_zod3.z.number(),
566
+ total_tokens: import_zod3.z.number(),
567
+ reasoning_tokens: import_zod3.z.number().optional()
568
+ })
569
+ });
570
+ var XAIToolCallsStreamChatResponse = import_zod3.z.array(
571
+ import_zod3.z.object({
572
+ index: import_zod3.z.number().int(),
573
+ id: import_zod3.z.string().min(1).optional(),
574
+ type: import_zod3.z.enum(["function"]).optional(),
575
+ function: import_zod3.z.object({
576
+ name: import_zod3.z.string().min(1).optional(),
577
+ arguments: import_zod3.z.string().optional()
578
+ }).optional()
579
+ })
580
+ );
581
+ var XAIStreamChatResponse = import_zod3.z.object({
582
+ id: import_zod3.z.string(),
583
+ object: import_zod3.z.string(),
584
+ created: import_zod3.z.number(),
585
+ model: import_zod3.z.string(),
586
+ system_fingerprint: import_zod3.z.string().nullable().optional(),
587
+ choices: import_zod3.z.array(
588
+ import_zod3.z.object({
589
+ index: import_zod3.z.number(),
590
+ delta: import_zod3.z.object({
591
+ content: import_zod3.z.string().nullable().optional(),
592
+ tool_calls: XAIToolCallsStreamChatResponse.optional(),
593
+ refusal: import_zod3.z.string().nullable().optional()
594
+ }).or(import_zod3.z.object({})),
595
+ logprobs: XAILogProb.optional(),
596
+ finish_reason: import_zod3.z.string().nullable().optional()
597
+ })
598
+ ),
599
+ usage: import_zod3.z.object({
600
+ prompt_tokens: import_zod3.z.number(),
601
+ completion_tokens: import_zod3.z.number(),
602
+ total_tokens: import_zod3.z.number(),
603
+ reasoning_tokens: import_zod3.z.number().optional()
604
+ }).nullable().optional()
605
+ });
606
+
607
+ // src/models/chat-models/types/request.chat-model.xai.ts
608
+ var import_zod4 = require("zod");
609
+ var XAIChatRequestTool = import_zod4.z.object({
610
+ type: import_zod4.z.literal("function"),
611
+ function: import_zod4.z.object({
612
+ name: import_zod4.z.string().min(1),
613
+ description: import_zod4.z.string().min(1).optional(),
614
+ strict: import_zod4.z.boolean().optional(),
615
+ parameters: import_zod4.z.any()
616
+ })
617
+ });
618
+ var XAIChatRequestToolChoiceEnum = import_zod4.z.enum(["none", "auto", "required"]);
619
+ var XAIChatRequestToolChoiceFunction = import_zod4.z.object({
620
+ type: import_zod4.z.literal("function"),
621
+ function: import_zod4.z.object({
622
+ name: import_zod4.z.string().min(1)
623
+ })
624
+ });
625
+ var XAIChatRequestResponseFormat = import_zod4.z.object({
626
+ type: import_zod4.z.enum(["text", "json_object"])
627
+ }).or(
628
+ import_zod4.z.object({
629
+ type: import_zod4.z.literal("json_schema"),
630
+ json_schema: import_zod4.z.object({
631
+ name: import_zod4.z.string().min(1),
632
+ description: import_zod4.z.string().min(1).optional(),
633
+ strict: import_zod4.z.boolean().optional(),
634
+ schema: import_zod4.z.any()
635
+ })
636
+ })
637
+ );
638
+ var XAIChatRequestTextContent = import_zod4.z.object({
639
+ text: import_zod4.z.string().min(1),
640
+ type: import_zod4.z.literal("text")
641
+ });
642
+ var XAIChatRequestImageContent = import_zod4.z.object({
643
+ type: import_zod4.z.literal("image_url"),
644
+ image_url: import_zod4.z.object({
645
+ url: import_zod4.z.string().min(1),
646
+ detail: import_zod4.z.enum(["low", "high", "auto"]).optional()
647
+ })
648
+ });
649
+ var XAIChatRequestToolCallContent = import_zod4.z.object({
650
+ id: import_zod4.z.string().min(1),
651
+ type: import_zod4.z.literal("function"),
652
+ function: import_zod4.z.object({
653
+ name: import_zod4.z.string().min(1),
654
+ arguments: import_zod4.z.string().min(1)
655
+ })
656
+ });
657
+ var XAIChatRequestSystemMessage = import_zod4.z.object({
658
+ role: import_zod4.z.literal("system"),
659
+ content: import_zod4.z.string().min(1).or(import_zod4.z.array(XAIChatRequestTextContent).min(1))
660
+ });
661
+ var XAIChatRequestUserMessage = import_zod4.z.object({
662
+ role: import_zod4.z.literal("user"),
663
+ content: import_zod4.z.string().min(1).or(import_zod4.z.array(import_zod4.z.union([XAIChatRequestTextContent, XAIChatRequestImageContent])).min(1))
664
+ });
665
+ var XAIChatRequestAssistantMessage = import_zod4.z.object({
666
+ role: import_zod4.z.literal("assistant"),
667
+ content: import_zod4.z.string().min(1).or(import_zod4.z.array(XAIChatRequestTextContent).min(1)).optional(),
668
+ tool_calls: import_zod4.z.array(XAIChatRequestToolCallContent).min(1).optional()
669
+ });
670
+ var XAIChatRequestToolMessage = import_zod4.z.object({
671
+ role: import_zod4.z.literal("tool"),
672
+ tool_call_id: import_zod4.z.string().min(1),
673
+ content: import_zod4.z.string().min(1)
674
+ });
675
+ var XAIChatRequestMessage = import_zod4.z.union([
676
+ XAIChatRequestSystemMessage,
677
+ XAIChatRequestUserMessage,
678
+ XAIChatRequestAssistantMessage,
679
+ XAIChatRequestToolMessage
680
+ ]);
681
+ var XAIChatRequest = import_zod4.z.object({
682
+ model: import_zod4.z.string().min(1).optional(),
683
+ messages: import_zod4.z.array(XAIChatRequestMessage).min(1),
684
+ frequency_penalty: import_zod4.z.number().min(-2).max(2).nullable().optional(),
685
+ logprobs: import_zod4.z.boolean().nullable().optional(),
686
+ top_logprobs: import_zod4.z.number().min(0).max(20).nullable().optional(),
687
+ max_tokens: import_zod4.z.number().min(0).nullable().optional(),
688
+ presence_penalty: import_zod4.z.number().min(-2).max(2).nullable().optional(),
689
+ response_format: XAIChatRequestResponseFormat.optional(),
690
+ seed: import_zod4.z.number().nullable().optional(),
691
+ stop: import_zod4.z.string().or(import_zod4.z.array(import_zod4.z.string()).max(4)).nullable().optional(),
692
+ temperature: import_zod4.z.number().min(0).max(2).nullable().optional(),
693
+ top_p: import_zod4.z.number().min(0).max(1).nullable().optional(),
694
+ tools: import_zod4.z.array(XAIChatRequestTool).optional(),
695
+ tool_choice: XAIChatRequestToolChoiceEnum.or(XAIChatRequestToolChoiceFunction).optional(),
696
+ reasoning_effort: import_zod4.z.enum(["low", "high"]).optional()
697
+ });
698
+
699
+ // src/models/chat-models/base-chat-model.xai.ts
700
+ var BaseChatModelOptions = import_zod5.z.object({
701
+ modelName: import_zod5.z.string(),
702
+ apiKey: import_zod5.z.string(),
703
+ baseUrl: import_zod5.z.string().url().optional(),
704
+ completeChatUrl: import_zod5.z.string().url().optional(),
705
+ streamChatUrl: import_zod5.z.string().url().optional()
706
+ });
707
+ var BaseChatModel = class {
708
+ constructor(modelSchema, options) {
709
+ this.version = "v1";
710
+ const parsedOptions = BaseChatModelOptions.parse(options);
711
+ this.modelSchema = modelSchema;
712
+ this.modelName = parsedOptions.modelName;
713
+ this.apiKey = parsedOptions.apiKey;
714
+ this.baseUrl = (0, import_provider.urlWithoutTrailingSlash)(parsedOptions.baseUrl || XAI.baseUrl);
715
+ this.streamChatUrl = (0, import_provider.urlWithoutTrailingSlash)(parsedOptions.streamChatUrl || `${this.baseUrl}/chat/completions`);
716
+ this.completeChatUrl = (0, import_provider.urlWithoutTrailingSlash)(parsedOptions.completeChatUrl || `${this.baseUrl}/chat/completions`);
717
+ }
718
+ getDefaultBaseUrl() {
719
+ return this.baseUrl;
720
+ }
721
+ getDefaultHeaders() {
722
+ return {
723
+ Authorization: `Bearer ${this.apiKey}`,
724
+ "Content-Type": "application/json"
725
+ };
726
+ }
727
+ getDefaultParams() {
728
+ return {
729
+ model: this.modelName
730
+ };
731
+ }
732
+ getRetryDelay(responseHeaders) {
733
+ const parseDuration = (duration) => {
734
+ const regex = /(\d+)(h|m|s|ms)/g;
735
+ const timeUnits = {
736
+ h: 36e5,
737
+ m: 6e4,
738
+ s: 1e3,
739
+ ms: 1
740
+ };
741
+ let match;
742
+ let totalMs = 0;
743
+ while ((match = regex.exec(duration)) !== null) {
744
+ const value = parseInt(match[1]);
745
+ const unit = match[2];
746
+ totalMs += value * timeUnits[unit];
747
+ }
748
+ return totalMs;
749
+ };
750
+ let resetRequestsDelayMs = 0;
751
+ let resetTokensDelayMs = 0;
752
+ const shouldRetry = true;
753
+ if (responseHeaders["x-ratelimit-reset-requests"]) {
754
+ resetRequestsDelayMs = parseDuration(responseHeaders["x-ratelimit-reset-requests"]);
755
+ }
756
+ if (responseHeaders["x-ratelimit-reset-tokens"]) {
757
+ resetTokensDelayMs = parseDuration(responseHeaders["x-ratelimit-reset-tokens"]);
758
+ }
759
+ const delayMs = Math.max(resetRequestsDelayMs, resetTokensDelayMs);
760
+ return { shouldRetry, delayMs };
761
+ }
762
+ getTokenCount(messages) {
763
+ return messages.reduce((acc, message) => {
764
+ return acc + message.content.map((content) => content.modality === "text" ? content.value : "").join(" ").length;
765
+ }, 0);
766
+ }
767
+ transformModelRequest(request) {
768
+ const safeRequest = XAIChatRequest.safeParse(request);
769
+ if (!safeRequest.success) {
770
+ throw new import_provider.InvalidModelRequestError({ info: "Invalid model request", cause: safeRequest.error });
771
+ }
772
+ const parsedRequest = safeRequest.data;
773
+ const modelName = parsedRequest.model;
774
+ if (parsedRequest.tool_choice && (!parsedRequest.tools || parsedRequest.tools.length === 0)) {
775
+ throw new import_provider.InvalidModelRequestError({
776
+ info: `Invalid model request for model : '${this.modelName}'`,
777
+ cause: new Error("'tools' are required when 'tool_choice' is specified")
778
+ });
779
+ }
780
+ const _config = {};
781
+ if (parsedRequest.response_format) {
782
+ _config.responseFormat = parsedRequest.response_format.type;
783
+ if (parsedRequest.response_format.type === "json_schema") {
784
+ _config.responseSchema = {
785
+ name: parsedRequest.response_format.json_schema.name,
786
+ description: parsedRequest.response_format.json_schema.description || "",
787
+ strict: parsedRequest.response_format.json_schema.strict,
788
+ schema: parsedRequest.response_format.json_schema.schema
789
+ };
790
+ }
791
+ }
792
+ if (parsedRequest.tool_choice) {
793
+ if (typeof parsedRequest.tool_choice === "string") {
794
+ _config.toolChoice = parsedRequest.tool_choice;
795
+ } else {
796
+ _config.toolChoice = parsedRequest.tool_choice.function.name;
797
+ }
798
+ }
799
+ _config.seed = parsedRequest.seed;
800
+ _config.maxTokens = parsedRequest.max_tokens;
801
+ _config.temperature = parsedRequest.temperature;
802
+ _config.topP = parsedRequest.top_p;
803
+ _config.presencePenalty = parsedRequest.presence_penalty;
804
+ _config.frequencyPenalty = parsedRequest.frequency_penalty;
805
+ _config.stop = parsedRequest.stop;
806
+ _config.logProbs = parsedRequest.logprobs;
807
+ _config.topLogProbs = parsedRequest.top_logprobs;
808
+ _config.reasoningEffort = parsedRequest.reasoning_effort;
809
+ const config = (0, import_types3.Config)().parse((0, import_provider.removeUndefinedEntries)(_config));
810
+ const messages = [];
811
+ const toolCallMap = {};
812
+ parsedRequest.messages.forEach((message) => {
813
+ const role = message.role;
814
+ switch (role) {
815
+ case "system":
816
+ {
817
+ const content = message.content;
818
+ if (typeof content === "string") {
819
+ messages.push({
820
+ role,
821
+ content: [{ modality: import_types3.TextModalityLiteral, value: content }]
822
+ });
823
+ } else {
824
+ const _content = content.map((c) => {
825
+ return { modality: import_types3.TextModalityLiteral, value: c.text };
826
+ });
827
+ messages.push({ role, content: _content });
828
+ }
829
+ }
830
+ break;
831
+ case "user":
832
+ {
833
+ const content = message.content;
834
+ if (typeof content === "string") {
835
+ messages.push({
836
+ role,
837
+ content: [{ modality: import_types3.TextModalityLiteral, value: content }]
838
+ });
839
+ } else {
840
+ const _content = content.map((c) => {
841
+ if (c.type === "text") {
842
+ return { modality: import_types3.TextModalityLiteral, value: c.text };
843
+ } else {
844
+ if (c.image_url.url.startsWith("data:")) {
845
+ return {
846
+ modality: import_types3.ImageModalityLiteral,
847
+ detail: c.image_url.detail || "auto",
848
+ value: {
849
+ type: import_types3.Base64ImageContentTypeLiteral,
850
+ base64: c.image_url.url,
851
+ mediaType: (0, import_provider.getMimeTypeFromBase64)(c.image_url.url)
852
+ }
853
+ };
854
+ } else {
855
+ return {
856
+ modality: import_types3.ImageModalityLiteral,
857
+ detail: c.image_url.detail || "auto",
858
+ value: { type: import_types3.UrlImageContentTypeLiteral, url: c.image_url.url }
859
+ };
860
+ }
861
+ }
862
+ });
863
+ messages.push({ role, content: _content });
864
+ }
865
+ }
866
+ break;
867
+ case "assistant":
868
+ {
869
+ const assistantContent = [];
870
+ if (!message.content && !message.tool_calls) {
871
+ throw new import_provider.InvalidModelRequestError({
872
+ info: `Invalid model request for model : '${this.modelName}'`,
873
+ cause: new Error("one of'content' or 'tool_calls' must be provided")
874
+ });
875
+ }
876
+ if (message.content) {
877
+ const content = message.content;
878
+ if (typeof content === "string") {
879
+ assistantContent.push({ modality: import_types3.TextModalityLiteral, value: content });
880
+ } else {
881
+ content.forEach((c) => {
882
+ assistantContent.push({ modality: import_types3.TextModalityLiteral, value: c.text });
883
+ });
884
+ }
885
+ }
886
+ if (message.tool_calls) {
887
+ const toolCalls = message.tool_calls;
888
+ toolCalls.forEach((toolCall, index) => {
889
+ const toolCallContent = {
890
+ modality: import_types3.ToolCallModalityLiteral,
891
+ id: toolCall.id,
892
+ index,
893
+ name: toolCall.function.name,
894
+ arguments: toolCall.function.arguments
895
+ };
896
+ assistantContent.push(toolCallContent);
897
+ toolCallMap[toolCallContent.id] = toolCallContent;
898
+ });
899
+ }
900
+ messages.push({ role, content: assistantContent });
901
+ }
902
+ break;
903
+ case "tool":
904
+ {
905
+ const toolResponse = message;
906
+ messages.push({
907
+ role,
908
+ content: [
909
+ {
910
+ modality: import_types3.ToolResponseModalityLiteral,
911
+ id: toolResponse.tool_call_id,
912
+ index: toolCallMap[toolResponse.tool_call_id].index,
913
+ name: toolCallMap[toolResponse.tool_call_id].name,
914
+ data: toolResponse.content
915
+ }
916
+ ]
917
+ });
918
+ }
919
+ break;
920
+ }
921
+ });
922
+ const tools = [];
923
+ if (parsedRequest.tools) {
924
+ parsedRequest.tools.forEach((tool) => {
925
+ tools.push({
926
+ type: "function",
927
+ definition: {
928
+ schema: {
929
+ name: tool.function.name,
930
+ description: tool.function.description || "",
931
+ strict: tool.function.strict,
932
+ parameters: tool.function.parameters
933
+ }
934
+ }
935
+ });
936
+ });
937
+ }
938
+ return {
939
+ modelName,
940
+ config,
941
+ messages,
942
+ tools: tools.length > 0 ? tools : void 0
943
+ };
944
+ }
945
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
946
+ transformConfig(config, messages, tools) {
947
+ const _toolChoice = config.toolChoice;
948
+ delete config.toolChoice;
949
+ const _parsedConfig = this.modelSchema.config.schema.safeParse(config);
950
+ if (!_parsedConfig.success) {
951
+ throw new import_provider.InvalidConfigError({
952
+ info: `Invalid config for model : '${this.modelName}'`,
953
+ cause: _parsedConfig.error
954
+ });
955
+ }
956
+ const parsedConfig = _parsedConfig.data;
957
+ if (_toolChoice !== void 0) {
958
+ parsedConfig.toolChoice = _toolChoice;
959
+ }
960
+ Object.keys(parsedConfig).forEach((key) => {
961
+ if (!(key in this.modelSchema.config.def)) {
962
+ throw new import_provider.InvalidConfigError({
963
+ info: `Invalid config for model : '${this.modelName}'`,
964
+ cause: new Error(`Invalid config key : '${key}',
965
+ available keys : [${Object.keys(this.modelSchema.config.def).join(", ")}]`)
966
+ });
967
+ }
968
+ });
969
+ const transformedConfig = Object.keys(parsedConfig).reduce((acc, key) => {
970
+ const def = this.modelSchema.config.def[key];
971
+ const paramKey = def.param;
972
+ const paramValue = parsedConfig[key];
973
+ if (paramKey === "max_tokens" && def.type === "range" && paramValue === 0) {
974
+ acc[paramKey] = def.max;
975
+ } else {
976
+ acc[paramKey] = paramValue;
977
+ }
978
+ return acc;
979
+ }, {});
980
+ if (transformedConfig.top_logprobs && !transformedConfig.logprobs) {
981
+ throw new import_provider.InvalidConfigError({
982
+ info: `Invalid config for model : '${this.modelName}'`,
983
+ cause: new Error("'logprobs' must be 'true' when 'top_logprobs' is specified")
984
+ });
985
+ }
986
+ if ("tool_choice" in transformedConfig && transformedConfig.tool_choice !== void 0) {
987
+ const toolChoice2 = transformedConfig.tool_choice;
988
+ if (!tools || tools && tools.length === 0) {
989
+ throw new import_provider.InvalidConfigError({
990
+ info: `Invalid config for model : '${this.modelName}'`,
991
+ cause: new Error("'tools' are required when 'toolChoice' is specified")
992
+ });
993
+ } else if (tools && tools.length > 0) {
994
+ const configToolChoice = this.modelSchema.config.def.toolChoice;
995
+ if (!configToolChoice.choices.includes(toolChoice2)) {
996
+ if (tools.map((tool) => tool.definition.schema.name).includes(toolChoice2)) {
997
+ transformedConfig.tool_choice = { type: "function", function: { name: toolChoice2 } };
998
+ } else {
999
+ throw new import_provider.InvalidConfigError({
1000
+ info: `Invalid config for model : '${this.modelName}'`,
1001
+ cause: new Error(`toolChoice : '${toolChoice2}' is not part of provided 'tools' names or
1002
+ one of [${configToolChoice.choices.join(", ")}]`)
1003
+ });
1004
+ }
1005
+ }
1006
+ }
1007
+ }
1008
+ if ("response_format" in transformedConfig && transformedConfig.response_format !== void 0) {
1009
+ const responseFormat2 = transformedConfig.response_format;
1010
+ if (responseFormat2 === "json_schema") {
1011
+ if (!("response_schema" in transformedConfig)) {
1012
+ throw new import_provider.InvalidConfigError({
1013
+ info: `Invalid config for model : '${this.modelName}'`,
1014
+ cause: new Error("'responseSchema' is required in config when 'responseFormat' is 'json_schema'")
1015
+ });
1016
+ } else {
1017
+ transformedConfig.response_format = {
1018
+ type: "json_schema",
1019
+ json_schema: transformedConfig.response_schema
1020
+ };
1021
+ delete transformedConfig.response_schema;
1022
+ }
1023
+ } else {
1024
+ transformedConfig.response_format = { type: responseFormat2 };
1025
+ }
1026
+ }
1027
+ return transformedConfig;
1028
+ }
1029
+ transformMessages(messages) {
1030
+ if (!messages || messages && messages.length === 0) {
1031
+ return { messages: [] };
1032
+ }
1033
+ const parsedMessages = messages.map((message) => {
1034
+ const parsedMessage = (0, import_types3.Message)().safeParse(message);
1035
+ if (!parsedMessage.success) {
1036
+ throw new import_provider.InvalidMessagesError({ info: "Invalid messages", cause: parsedMessage.error });
1037
+ }
1038
+ return parsedMessage.data;
1039
+ });
1040
+ parsedMessages.forEach((message) => {
1041
+ message.content.forEach((content) => {
1042
+ if (!this.modelSchema.modalities.includes(content.modality)) {
1043
+ throw new import_provider.InvalidMessagesError({
1044
+ info: `Invalid message content for model : '${this.modelName}'`,
1045
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}',
1046
+ available modalities : [${this.modelSchema.modalities.join(", ")}]`)
1047
+ });
1048
+ }
1049
+ });
1050
+ });
1051
+ parsedMessages.forEach((message) => {
1052
+ if (!Object.keys(this.modelSchema.roles).includes(message.role)) {
1053
+ throw new import_provider.InvalidMessagesError({
1054
+ info: `Invalid message content for model : '${this.modelName}'`,
1055
+ cause: new Error(`model : '${this.modelName}' does not support role : '${message.role}',
1056
+ available roles : [${Object.keys(this.modelSchema.roles).join(", ")}]`)
1057
+ });
1058
+ }
1059
+ });
1060
+ parsedMessages.forEach((message) => {
1061
+ message.content = message.content.filter(
1062
+ (content) => content.modality !== "error" && content.modality !== "search-result"
1063
+ );
1064
+ });
1065
+ const transformedMessages = parsedMessages.map((message) => {
1066
+ switch (message.role) {
1067
+ case import_types3.SystemRoleLiteral: {
1068
+ const textContent = [];
1069
+ message.content.forEach((content) => {
1070
+ if (content.modality === import_types3.TextModalityLiteral) {
1071
+ textContent.push({ type: "text", text: content.value });
1072
+ } else {
1073
+ throw new import_provider.InvalidMessagesError({
1074
+ info: `Invalid message content for model : '${this.modelName}'`,
1075
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}' for role : 'system'`)
1076
+ });
1077
+ }
1078
+ });
1079
+ return { role: "system", content: textContent };
1080
+ }
1081
+ case import_types3.UserRoleLiteral: {
1082
+ const userContent = [];
1083
+ message.content.forEach((content) => {
1084
+ if (content.modality === import_types3.TextModalityLiteral) {
1085
+ userContent.push({ type: "text", text: content.value });
1086
+ } else if (content.modality === import_types3.ImageModalityLiteral) {
1087
+ if (content.value.type === import_types3.Base64ImageContentTypeLiteral) {
1088
+ userContent.push({
1089
+ type: "image_url",
1090
+ image_url: { url: content.value.base64, detail: content.detail }
1091
+ });
1092
+ } else if (content.value.type === import_types3.UrlImageContentTypeLiteral) {
1093
+ userContent.push({
1094
+ type: "image_url",
1095
+ image_url: { url: content.value.url, detail: content.detail }
1096
+ });
1097
+ }
1098
+ } else {
1099
+ throw new import_provider.InvalidMessagesError({
1100
+ info: `Invalid message content for model : '${this.modelName}'`,
1101
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}' for role : 'user'`)
1102
+ });
1103
+ }
1104
+ });
1105
+ return { role: "user", content: userContent };
1106
+ }
1107
+ case import_types3.AssistantRoleLiteral: {
1108
+ const textContent = [];
1109
+ const toolCalls = [];
1110
+ message.content.forEach((content) => {
1111
+ if (content.modality === import_types3.TextModalityLiteral) {
1112
+ textContent.push({ type: "text", text: content.value });
1113
+ } else if (content.modality === import_types3.ToolCallModalityLiteral) {
1114
+ toolCalls.push({
1115
+ id: content.id,
1116
+ type: "function",
1117
+ function: { name: content.name, arguments: content.arguments }
1118
+ });
1119
+ } else {
1120
+ throw new import_provider.InvalidMessagesError({
1121
+ info: `Invalid message content for model : '${this.modelName}'`,
1122
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}' for role : 'assistant'`)
1123
+ });
1124
+ }
1125
+ });
1126
+ if (textContent.length > 0 && toolCalls.length > 0) {
1127
+ return { role: "assistant", content: textContent, tool_calls: toolCalls };
1128
+ } else if (textContent.length > 0) {
1129
+ return { role: "assistant", content: textContent };
1130
+ } else if (toolCalls.length > 0) {
1131
+ return { role: "assistant", tool_calls: toolCalls };
1132
+ } else {
1133
+ throw new import_provider.InvalidMessagesError({
1134
+ info: `Invalid message content for model : '${this.modelName}'`,
1135
+ cause: new Error("assistant message must have at least one text or tool_call content")
1136
+ });
1137
+ }
1138
+ }
1139
+ case import_types3.ToolRoleLiteral: {
1140
+ const toolResponseContent = message.content.find(
1141
+ (content) => content.modality === import_types3.ToolResponseModalityLiteral
1142
+ );
1143
+ if (!toolResponseContent) {
1144
+ throw new import_provider.InvalidMessagesError({
1145
+ info: `Invalid message content for model : '${this.modelName}'`,
1146
+ cause: new Error("tool message must have tool_response content")
1147
+ });
1148
+ }
1149
+ return {
1150
+ role: "tool",
1151
+ tool_call_id: toolResponseContent.id,
1152
+ content: typeof toolResponseContent.data === "string" ? toolResponseContent.data : JSON.stringify(toolResponseContent.data)
1153
+ };
1154
+ }
1155
+ default:
1156
+ throw new import_provider.InvalidMessagesError({
1157
+ info: `Invalid message content for model : '${this.modelName}'`,
1158
+ cause: new Error(`model : '${this.modelName}' does not support role : '${message.role}'`)
1159
+ });
1160
+ }
1161
+ });
1162
+ return { messages: transformedMessages };
1163
+ }
1164
+ transformTools(tools) {
1165
+ if (!tools || tools && tools.length === 0) {
1166
+ return {};
1167
+ }
1168
+ if (!this.modelSchema.modalities.includes(import_types3.ToolCallModalityLiteral)) {
1169
+ throw new import_provider.InvalidToolsError({
1170
+ info: `Invalid tools for model : '${this.modelName}'`,
1171
+ cause: new Error(`model : '${this.modelName}' does not support tools`)
1172
+ });
1173
+ }
1174
+ const parsedTools = tools.map((tool) => {
1175
+ const parsedTool = (0, import_types3.Tool)().safeParse(tool);
1176
+ if (!parsedTool.success) {
1177
+ throw new import_provider.InvalidToolsError({ info: "Invalid tools", cause: parsedTool.error });
1178
+ }
1179
+ return parsedTool.data;
1180
+ });
1181
+ const transformedTools = parsedTools.map((tool) => ({
1182
+ type: "function",
1183
+ function: tool.definition.schema
1184
+ }));
1185
+ return { tools: transformedTools };
1186
+ }
1187
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1188
+ getCompleteChatUrl(config, messages, tools) {
1189
+ return __async(this, null, function* () {
1190
+ return new Promise((resolve) => {
1191
+ resolve(this.completeChatUrl);
1192
+ });
1193
+ });
1194
+ }
1195
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1196
+ getCompleteChatHeaders(config, messages, tools) {
1197
+ return __async(this, null, function* () {
1198
+ return new Promise((resolve) => {
1199
+ resolve(this.getDefaultHeaders());
1200
+ });
1201
+ });
1202
+ }
1203
+ getCompleteChatData(config, messages, tools) {
1204
+ return __async(this, null, function* () {
1205
+ const transformedConfig = this.transformConfig(config, messages, tools);
1206
+ const transformedMessages = this.transformMessages(messages);
1207
+ if (transformedMessages.messages && transformedMessages.messages.length === 0) {
1208
+ throw new import_provider.InvalidMessagesError({
1209
+ info: "Messages are required",
1210
+ cause: new Error("Messages are required")
1211
+ });
1212
+ }
1213
+ const transformedTools = tools ? this.transformTools(tools) : {};
1214
+ return new Promise((resolve) => {
1215
+ resolve(__spreadValues(__spreadValues(__spreadValues(__spreadValues({}, this.getDefaultParams()), transformedConfig), transformedMessages), transformedTools));
1216
+ });
1217
+ });
1218
+ }
1219
+ transformCompleteChatResponse(response) {
1220
+ const parsedResponse = XAICompleteChatResponse.safeParse(response);
1221
+ if (!parsedResponse.success) {
1222
+ throw new import_provider.ModelResponseError({ info: "Invalid response from model", cause: parsedResponse.error });
1223
+ }
1224
+ const data = parsedResponse.data;
1225
+ const choice = data.choices[0];
1226
+ const messages = [];
1227
+ const assistantContent = [];
1228
+ if (choice.message.content) {
1229
+ assistantContent.push((0, import_types3.createTextContent)(choice.message.content));
1230
+ }
1231
+ if (choice.message.tool_calls) {
1232
+ choice.message.tool_calls.forEach((toolCall, index) => {
1233
+ assistantContent.push((0, import_types3.createToolCallContent)(index, toolCall.id, toolCall.function.name, toolCall.function.arguments));
1234
+ });
1235
+ }
1236
+ if (assistantContent.length > 0) {
1237
+ messages.push({ role: import_types3.AssistantRoleLiteral, content: assistantContent });
1238
+ }
1239
+ const usage = {
1240
+ promptTokens: data.usage.prompt_tokens,
1241
+ completionTokens: data.usage.completion_tokens,
1242
+ totalTokens: data.usage.total_tokens
1243
+ };
1244
+ let logProbs2;
1245
+ if (choice.logprobs && choice.logprobs.content) {
1246
+ logProbs2 = choice.logprobs.content.map((logProb) => ({
1247
+ token: logProb.token,
1248
+ logProb: logProb.logprob,
1249
+ bytes: logProb.bytes,
1250
+ topLogProbs: logProb.top_logprobs.map((topLogProb) => ({
1251
+ token: topLogProb.token,
1252
+ logProb: topLogProb.logprob,
1253
+ bytes: topLogProb.bytes
1254
+ }))
1255
+ }));
1256
+ }
1257
+ return {
1258
+ messages,
1259
+ usage,
1260
+ logProbs: logProbs2
1261
+ };
1262
+ }
1263
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1264
+ getStreamChatUrl(config, messages, tools) {
1265
+ return __async(this, null, function* () {
1266
+ return new Promise((resolve) => {
1267
+ resolve(this.streamChatUrl);
1268
+ });
1269
+ });
1270
+ }
1271
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1272
+ getStreamChatHeaders(config, messages, tools) {
1273
+ return __async(this, null, function* () {
1274
+ return new Promise((resolve) => {
1275
+ resolve(this.getDefaultHeaders());
1276
+ });
1277
+ });
1278
+ }
1279
+ getStreamChatData(config, messages, tools) {
1280
+ return __async(this, null, function* () {
1281
+ const transformedConfig = this.transformConfig(config, messages, tools);
1282
+ const transformedMessages = this.transformMessages(messages);
1283
+ if (transformedMessages.messages && transformedMessages.messages.length === 0) {
1284
+ throw new import_provider.InvalidMessagesError({
1285
+ info: "Messages are required",
1286
+ cause: new Error("Messages are required")
1287
+ });
1288
+ }
1289
+ const transformedTools = tools ? this.transformTools(tools) : {};
1290
+ return new Promise((resolve) => {
1291
+ resolve(__spreadValues(__spreadValues(__spreadValues(__spreadValues({
1292
+ stream: true,
1293
+ stream_options: { include_usage: true }
1294
+ }, this.getDefaultParams()), transformedConfig), transformedMessages), transformedTools));
1295
+ });
1296
+ });
1297
+ }
1298
+ transformStreamChatResponseChunk(chunk, buffer) {
1299
+ return __asyncGenerator(this, null, function* () {
1300
+ var _a, _b;
1301
+ const data = buffer + chunk;
1302
+ const lines = [];
1303
+ let newBuffer = "";
1304
+ let currentIndex = 0;
1305
+ while (currentIndex < data.length) {
1306
+ const newlineIndex = data.indexOf("\n", currentIndex);
1307
+ if (newlineIndex === -1) {
1308
+ newBuffer = data.substring(currentIndex);
1309
+ break;
1310
+ } else {
1311
+ const line = data.substring(currentIndex, newlineIndex).trim();
1312
+ if (line) {
1313
+ lines.push(line);
1314
+ }
1315
+ currentIndex = newlineIndex + 1;
1316
+ }
1317
+ }
1318
+ for (const line of lines) {
1319
+ if (line === "data: [DONE]") {
1320
+ return;
1321
+ }
1322
+ if (line.startsWith("data: ")) {
1323
+ const jsonStr = line.substring("data: ".length);
1324
+ try {
1325
+ const structuredLine = JSON.parse(jsonStr);
1326
+ const safe = XAIStreamChatResponse.safeParse(structuredLine);
1327
+ if (safe.success) {
1328
+ const partialResponse = { partialMessages: [] };
1329
+ const parsedResponse = safe.data;
1330
+ if (parsedResponse.choices.length > 0) {
1331
+ const message = parsedResponse.choices[0].delta;
1332
+ if (message !== void 0 && Object.keys(message).length !== 0) {
1333
+ if ("content" in message && message.content !== null) {
1334
+ partialResponse.partialMessages.push((0, import_types3.createPartialTextMessage)(import_types3.AssistantRoleLiteral, message.content));
1335
+ } else if ("tool_calls" in message && message.tool_calls !== void 0) {
1336
+ const toolCall = message.tool_calls.at(0);
1337
+ partialResponse.partialMessages.push(
1338
+ (0, import_types3.createPartialToolCallMessage)(
1339
+ import_types3.AssistantRoleLiteral,
1340
+ toolCall.index,
1341
+ toolCall.id,
1342
+ (_a = toolCall.function) == null ? void 0 : _a.name,
1343
+ (_b = toolCall.function) == null ? void 0 : _b.arguments
1344
+ )
1345
+ );
1346
+ }
1347
+ }
1348
+ }
1349
+ if (parsedResponse.usage) {
1350
+ partialResponse.usage = {
1351
+ promptTokens: parsedResponse.usage.prompt_tokens,
1352
+ completionTokens: parsedResponse.usage.completion_tokens,
1353
+ totalTokens: parsedResponse.usage.total_tokens
1354
+ };
1355
+ }
1356
+ yield { partialResponse, buffer: newBuffer };
1357
+ } else {
1358
+ throw new import_provider.ModelResponseError({ info: "Invalid response from model", cause: safe.error });
1359
+ }
1360
+ } catch (error) {
1361
+ throw new import_provider.ModelResponseError({
1362
+ info: `Malformed JSON received in stream: ${jsonStr}`,
1363
+ cause: error
1364
+ });
1365
+ }
1366
+ }
1367
+ }
1368
+ yield { partialResponse: { partialMessages: [] }, buffer: newBuffer };
1369
+ });
1370
+ }
1371
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1372
+ transformProxyStreamChatResponseChunk(chunk, buffer, data, headers, query) {
1373
+ return __asyncGenerator(this, null, function* () {
1374
+ yield* __yieldStar(this.transformStreamChatResponseChunk(chunk, buffer));
1375
+ });
1376
+ }
1377
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1378
+ getProxyStreamChatUrl(data, headers, query) {
1379
+ return __async(this, null, function* () {
1380
+ return new Promise((resolve) => {
1381
+ resolve(this.streamChatUrl);
1382
+ });
1383
+ });
1384
+ }
1385
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1386
+ getProxyCompleteChatUrl(data, headers, query) {
1387
+ return __async(this, null, function* () {
1388
+ return new Promise((resolve) => {
1389
+ resolve(this.completeChatUrl);
1390
+ });
1391
+ });
1392
+ }
1393
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1394
+ getProxyCompleteChatHeaders(data, headers, query) {
1395
+ return __async(this, null, function* () {
1396
+ if (!headers) {
1397
+ return {};
1398
+ }
1399
+ const sanitizedHeaders = __spreadValues({}, headers);
1400
+ delete sanitizedHeaders.host;
1401
+ delete sanitizedHeaders["content-length"];
1402
+ return sanitizedHeaders;
1403
+ });
1404
+ }
1405
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1406
+ getProxyStreamChatHeaders(data, headers, query) {
1407
+ return __async(this, null, function* () {
1408
+ return yield this.getProxyCompleteChatHeaders(data, headers, query);
1409
+ });
1410
+ }
1411
+ getModelPricing() {
1412
+ if (!(this.modelName in pricing_default)) {
1413
+ throw new import_provider.ModelResponseError({
1414
+ info: `Invalid model pricing for model : '${this.modelName}'`,
1415
+ cause: new Error(`No pricing configuration found for model "${this.modelName}"`)
1416
+ });
1417
+ }
1418
+ const entry = pricing_default[this.modelName];
1419
+ return entry;
1420
+ }
1421
+ };
1422
+
1423
+ // src/models/chat-models/grok-2.xai.ts
1424
+ var import_provider5 = require("@adaline/provider");
1425
+
1426
+ // src/configs/chat-model/index.ts
1427
+ var chat_model_exports = {};
1428
+ __export(chat_model_exports, {
1429
+ ChatModelBaseConfigDef: () => ChatModelBaseConfigDef,
1430
+ ChatModelBaseConfigSchema: () => ChatModelBaseConfigSchema,
1431
+ ChatModelMiniReasoningConfigDef: () => ChatModelMiniReasoningConfigDef,
1432
+ ChatModelMiniReasoningConfigSchema: () => ChatModelMiniReasoningConfigSchema,
1433
+ ChatModelReasoningConfigDef: () => ChatModelReasoningConfigDef,
1434
+ ChatModelReasoningConfigSchema: () => ChatModelReasoningConfigSchema,
1435
+ ChatModelResponseSchemaConfigDef: () => ChatModelResponseSchemaConfigDef,
1436
+ ChatModelResponseSchemaConfigSchema: () => ChatModelResponseSchemaConfigSchema,
1437
+ frequencyPenalty: () => frequencyPenalty,
1438
+ logProbs: () => logProbs,
1439
+ maxTokens: () => maxTokens,
1440
+ presencePenalty: () => presencePenalty,
1441
+ reasoningEffort: () => reasoningEffort,
1442
+ seed: () => seed,
1443
+ stop: () => stop,
1444
+ temperature: () => temperature,
1445
+ toolChoice: () => toolChoice,
1446
+ topLogProbs: () => topLogProbs,
1447
+ topP: () => topP
1448
+ });
1449
+
1450
+ // src/configs/chat-model/base.config.chat-model.xai.ts
1451
+ var import_zod6 = require("zod");
1452
+
1453
+ // src/configs/chat-model/common.config.chat-model.xai.ts
1454
+ var import_provider3 = require("@adaline/provider");
1455
+ var temperature = (0, import_provider3.RangeConfigItem)({
1456
+ param: "temperature",
1457
+ title: import_provider3.CHAT_CONFIG.TEMPERATURE.title,
1458
+ description: import_provider3.CHAT_CONFIG.TEMPERATURE.description,
1459
+ min: 0,
1460
+ max: 2,
1461
+ step: 0.01,
1462
+ default: 1
1463
+ });
1464
+ var maxTokens = (maxOutputTokens) => (0, import_provider3.RangeConfigItem)({
1465
+ param: "max_tokens",
1466
+ title: import_provider3.CHAT_CONFIG.MAX_TOKENS.title,
1467
+ description: import_provider3.CHAT_CONFIG.MAX_TOKENS.description,
1468
+ min: 0,
1469
+ max: maxOutputTokens,
1470
+ step: 1,
1471
+ default: 0
1472
+ });
1473
+ var stop = (maxSequences) => (0, import_provider3.MultiStringConfigItem)({
1474
+ param: "stop",
1475
+ title: import_provider3.CHAT_CONFIG.STOP(maxSequences).title,
1476
+ description: import_provider3.CHAT_CONFIG.STOP(maxSequences).description,
1477
+ max: maxSequences
1478
+ });
1479
+ var topP = (0, import_provider3.RangeConfigItem)({
1480
+ param: "top_p",
1481
+ title: import_provider3.CHAT_CONFIG.TOP_P.title,
1482
+ description: import_provider3.CHAT_CONFIG.TOP_P.description,
1483
+ min: 0,
1484
+ max: 1,
1485
+ step: 0.01,
1486
+ default: 1
1487
+ });
1488
+ var frequencyPenalty = (0, import_provider3.RangeConfigItem)({
1489
+ param: "frequency_penalty",
1490
+ title: import_provider3.CHAT_CONFIG.FREQUENCY_PENALTY.title,
1491
+ description: import_provider3.CHAT_CONFIG.FREQUENCY_PENALTY.description,
1492
+ min: -2,
1493
+ max: 2,
1494
+ step: 0.01,
1495
+ default: 0
1496
+ });
1497
+ var presencePenalty = (0, import_provider3.RangeConfigItem)({
1498
+ param: "presence_penalty",
1499
+ title: import_provider3.CHAT_CONFIG.PRESENCE_PENALTY.title,
1500
+ description: import_provider3.CHAT_CONFIG.PRESENCE_PENALTY.description,
1501
+ min: -2,
1502
+ max: 2,
1503
+ step: 0.01,
1504
+ default: 0
1505
+ });
1506
+ var seed = (0, import_provider3.RangeConfigItem)({
1507
+ param: "seed",
1508
+ title: import_provider3.CHAT_CONFIG.SEED.title,
1509
+ description: import_provider3.CHAT_CONFIG.SEED.description,
1510
+ min: 0,
1511
+ max: 1e6,
1512
+ step: 1,
1513
+ default: 0
1514
+ });
1515
+ var logProbs = (0, import_provider3.SelectBooleanConfigItem)({
1516
+ param: "logprobs",
1517
+ title: import_provider3.CHAT_CONFIG.LOG_PROBS.title,
1518
+ description: import_provider3.CHAT_CONFIG.LOG_PROBS.description,
1519
+ default: false
1520
+ });
1521
+ var topLogProbs = (0, import_provider3.RangeConfigItem)({
1522
+ param: "top_logprobs",
1523
+ title: import_provider3.CHAT_CONFIG.TOP_LOG_PROBS.title,
1524
+ description: import_provider3.CHAT_CONFIG.TOP_LOG_PROBS.description,
1525
+ min: 0,
1526
+ max: 20,
1527
+ step: 1,
1528
+ default: 0
1529
+ });
1530
+ var toolChoice = (0, import_provider3.SelectStringConfigItem)({
1531
+ param: "tool_choice",
1532
+ title: "Tool choice",
1533
+ description: "Controls which (if any) tool is called by the model. 'none' means the model will not call a function. 'auto' means the model can pick between generating a message or calling a tool.",
1534
+ default: "auto",
1535
+ choices: ["auto", "required", "none"]
1536
+ });
1537
+ var reasoningEffort = (0, import_provider3.SelectStringConfigItem)({
1538
+ param: "reasoning_effort",
1539
+ title: "Reasoning Effort",
1540
+ description: "Controls how much time the model spends thinking before responding. 'low' uses minimal thinking time for quick responses, 'high' uses maximum thinking time for complex problems. Only supported by grok-3-mini models.",
1541
+ default: "low",
1542
+ choices: ["low", "high"]
1543
+ });
1544
+
1545
+ // src/configs/chat-model/base.config.chat-model.xai.ts
1546
+ var ChatModelBaseConfigSchema = (maxOutputTokens, maxSequences) => import_zod6.z.object({
1547
+ temperature: temperature.schema,
1548
+ maxTokens: maxTokens(maxOutputTokens).schema,
1549
+ stop: stop(maxSequences).schema,
1550
+ topP: topP.schema,
1551
+ frequencyPenalty: frequencyPenalty.schema,
1552
+ presencePenalty: presencePenalty.schema,
1553
+ seed: seed.schema.transform((value) => value === 0 ? void 0 : value),
1554
+ logProbs: logProbs.schema,
1555
+ topLogProbs: topLogProbs.schema,
1556
+ toolChoice: toolChoice.schema
1557
+ });
1558
+ var ChatModelBaseConfigDef = (maxOutputTokens, maxSequences) => ({
1559
+ temperature: temperature.def,
1560
+ maxTokens: maxTokens(maxOutputTokens).def,
1561
+ stop: stop(maxSequences).def,
1562
+ topP: topP.def,
1563
+ frequencyPenalty: frequencyPenalty.def,
1564
+ presencePenalty: presencePenalty.def,
1565
+ seed: seed.def,
1566
+ logProbs: logProbs.def,
1567
+ topLogProbs: topLogProbs.def,
1568
+ toolChoice: toolChoice.def
1569
+ });
1570
+
1571
+ // src/configs/chat-model/reasoning.config.chat-model.xai.ts
1572
+ var import_zod7 = require("zod");
1573
+ var ChatModelReasoningConfigSchema = (maxOutputTokens) => import_zod7.z.object({
1574
+ temperature: temperature.schema,
1575
+ maxTokens: maxTokens(maxOutputTokens).schema,
1576
+ topP: topP.schema,
1577
+ seed: seed.schema.transform((value) => value === 0 ? void 0 : value),
1578
+ logProbs: logProbs.schema,
1579
+ topLogProbs: topLogProbs.schema,
1580
+ toolChoice: toolChoice.schema
1581
+ });
1582
+ var ChatModelReasoningConfigDef = (maxOutputTokens) => ({
1583
+ temperature: temperature.def,
1584
+ maxTokens: maxTokens(maxOutputTokens).def,
1585
+ topP: topP.def,
1586
+ seed: seed.def,
1587
+ logProbs: logProbs.def,
1588
+ topLogProbs: topLogProbs.def,
1589
+ toolChoice: toolChoice.def
1590
+ });
1591
+ var ChatModelMiniReasoningConfigSchema = (maxOutputTokens, _maxSequences) => import_zod7.z.object({
1592
+ temperature: temperature.schema,
1593
+ maxTokens: maxTokens(maxOutputTokens).schema,
1594
+ topP: topP.schema,
1595
+ seed: seed.schema.transform((value) => value === 0 ? void 0 : value),
1596
+ logProbs: logProbs.schema,
1597
+ topLogProbs: topLogProbs.schema,
1598
+ toolChoice: toolChoice.schema,
1599
+ reasoningEffort: reasoningEffort.schema
1600
+ });
1601
+ var ChatModelMiniReasoningConfigDef = (maxOutputTokens) => ({
1602
+ temperature: temperature.def,
1603
+ maxTokens: maxTokens(maxOutputTokens).def,
1604
+ topP: topP.def,
1605
+ seed: seed.def,
1606
+ logProbs: logProbs.def,
1607
+ topLogProbs: topLogProbs.def,
1608
+ toolChoice: toolChoice.def,
1609
+ reasoningEffort: reasoningEffort.def
1610
+ });
1611
+
1612
+ // src/configs/chat-model/response-schema.config.chat-model.xai.ts
1613
+ var import_provider4 = require("@adaline/provider");
1614
+ var import_types5 = require("@adaline/types");
1615
+ var responseSchema = (0, import_provider4.ObjectSchemaConfigItem)({
1616
+ param: "response_schema",
1617
+ title: import_provider4.CHAT_CONFIG.RESPONSE_SCHEMA.title,
1618
+ description: import_provider4.CHAT_CONFIG.RESPONSE_SCHEMA.description,
1619
+ objectSchema: import_types5.ResponseSchema
1620
+ });
1621
+ var responseFormat = (0, import_provider4.SelectStringConfigItem)({
1622
+ param: "response_format",
1623
+ title: import_provider4.CHAT_CONFIG.RESPONSE_FORMAT_WITH_SCHEMA.title,
1624
+ description: import_provider4.CHAT_CONFIG.RESPONSE_FORMAT_WITH_SCHEMA.description,
1625
+ default: "text",
1626
+ choices: ["text", "json_object", "json_schema"]
1627
+ });
1628
+ var ChatModelResponseSchemaConfigDef = (maxOutputTokens, maxSequences) => __spreadProps(__spreadValues({}, ChatModelBaseConfigDef(maxOutputTokens, maxSequences)), {
1629
+ responseFormat: responseFormat.def,
1630
+ responseSchema: responseSchema.def
1631
+ });
1632
+ var ChatModelResponseSchemaConfigSchema = (maxOutputTokens, maxSequences) => ChatModelBaseConfigSchema(maxOutputTokens, maxSequences).extend({
1633
+ responseFormat: responseFormat.schema,
1634
+ responseSchema: responseSchema.schema
1635
+ });
1636
+
1637
+ // src/models/chat-models/grok-2.xai.ts
1638
+ var Grok_2_Literal = "grok-2";
1639
+ var Grok_2_Description = "Grok-2 is xAI's flagship language model with strong reasoning capabilities and a 131K context window.";
1640
+ var Grok_2_Schema = (0, import_provider5.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1641
+ name: Grok_2_Literal,
1642
+ description: Grok_2_Description,
1643
+ maxInputTokens: 131072,
1644
+ maxOutputTokens: 32768,
1645
+ roles: XAIChatModelRolesMap,
1646
+ modalities: XAIChatModelTextToolModalities,
1647
+ config: {
1648
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(32768, 4),
1649
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(32768, 4)
1650
+ },
1651
+ price: pricing_default[Grok_2_Literal]
1652
+ });
1653
+ var Grok_2_Options = BaseChatModelOptions;
1654
+ var Grok_2 = class extends BaseChatModel {
1655
+ constructor(options) {
1656
+ super(Grok_2_Schema, options);
1657
+ }
1658
+ };
1659
+
1660
+ // src/models/chat-models/grok-2-latest.xai.ts
1661
+ var import_provider6 = require("@adaline/provider");
1662
+ var Grok_2_Latest_Literal = "grok-2-latest";
1663
+ var Grok_2_Latest_Description = "Grok-2 Latest is the most recent version of xAI's Grok-2 model with a 131K context window.";
1664
+ var Grok_2_Latest_Schema = (0, import_provider6.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1665
+ name: Grok_2_Latest_Literal,
1666
+ description: Grok_2_Latest_Description,
1667
+ maxInputTokens: 131072,
1668
+ maxOutputTokens: 32768,
1669
+ roles: XAIChatModelRolesMap,
1670
+ modalities: XAIChatModelTextToolModalities,
1671
+ config: {
1672
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(32768, 4),
1673
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(32768, 4)
1674
+ },
1675
+ price: pricing_default[Grok_2_Latest_Literal]
1676
+ });
1677
+ var Grok_2_Latest_Options = BaseChatModelOptions;
1678
+ var Grok_2_Latest = class extends BaseChatModel {
1679
+ constructor(options) {
1680
+ super(Grok_2_Latest_Schema, options);
1681
+ }
1682
+ };
1683
+
1684
+ // src/models/chat-models/grok-2-1212.xai.ts
1685
+ var import_provider7 = require("@adaline/provider");
1686
+ var Grok_2_1212_Literal = "grok-2-1212";
1687
+ var Grok_2_1212_Description = "Grok-2 version 1212 is a specific release of xAI's Grok-2 model with a 131K context window.";
1688
+ var Grok_2_1212_Schema = (0, import_provider7.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1689
+ name: Grok_2_1212_Literal,
1690
+ description: Grok_2_1212_Description,
1691
+ maxInputTokens: 131072,
1692
+ maxOutputTokens: 32768,
1693
+ roles: XAIChatModelRolesMap,
1694
+ modalities: XAIChatModelTextToolModalities,
1695
+ config: {
1696
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(32768, 4),
1697
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(32768, 4)
1698
+ },
1699
+ price: pricing_default[Grok_2_1212_Literal]
1700
+ });
1701
+ var Grok_2_1212_Options = BaseChatModelOptions;
1702
+ var Grok_2_1212 = class extends BaseChatModel {
1703
+ constructor(options) {
1704
+ super(Grok_2_1212_Schema, options);
1705
+ }
1706
+ };
1707
+
1708
+ // src/models/chat-models/grok-3-beta.xai.ts
1709
+ var import_provider8 = require("@adaline/provider");
1710
+ var Grok_3_Beta_Literal = "grok-3-beta";
1711
+ var Grok_3_Beta_Description = "Grok-3 Beta is xAI's latest flagship model for enterprise tasks with a 131K context window.";
1712
+ var Grok_3_Beta_Schema = (0, import_provider8.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1713
+ name: Grok_3_Beta_Literal,
1714
+ description: Grok_3_Beta_Description,
1715
+ maxInputTokens: 131072,
1716
+ maxOutputTokens: 131072,
1717
+ roles: XAIChatModelRolesMap,
1718
+ modalities: XAIChatModelTextToolModalities,
1719
+ config: {
1720
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1721
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1722
+ },
1723
+ price: pricing_default[Grok_3_Beta_Literal]
1724
+ });
1725
+ var Grok_3_Beta_Options = BaseChatModelOptions;
1726
+ var Grok_3_Beta = class extends BaseChatModel {
1727
+ constructor(options) {
1728
+ super(Grok_3_Beta_Schema, options);
1729
+ }
1730
+ };
1731
+
1732
+ // src/models/chat-models/grok-3-fast-beta.xai.ts
1733
+ var import_provider9 = require("@adaline/provider");
1734
+ var Grok_3_Fast_Beta_Literal = "grok-3-fast-beta";
1735
+ var Grok_3_Fast_Beta_Description = "Grok-3 Fast Beta is xAI's fastest flagship model optimized for speed with a 131K context window.";
1736
+ var Grok_3_Fast_Beta_Schema = (0, import_provider9.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1737
+ name: Grok_3_Fast_Beta_Literal,
1738
+ description: Grok_3_Fast_Beta_Description,
1739
+ maxInputTokens: 131072,
1740
+ maxOutputTokens: 131072,
1741
+ roles: XAIChatModelRolesMap,
1742
+ modalities: XAIChatModelTextToolModalities,
1743
+ config: {
1744
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1745
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1746
+ },
1747
+ price: pricing_default[Grok_3_Fast_Beta_Literal]
1748
+ });
1749
+ var Grok_3_Fast_Beta_Options = BaseChatModelOptions;
1750
+ var Grok_3_Fast_Beta = class extends BaseChatModel {
1751
+ constructor(options) {
1752
+ super(Grok_3_Fast_Beta_Schema, options);
1753
+ }
1754
+ };
1755
+
1756
+ // src/models/chat-models/grok-3-mini-beta.xai.ts
1757
+ var import_provider10 = require("@adaline/provider");
1758
+ var Grok_3_Mini_Beta_Literal = "grok-3-mini-beta";
1759
+ var Grok_3_Mini_Beta_Description = "Grok-3 Mini Beta is xAI's lightweight reasoning model with support for reasoning_effort parameter and a 131K context window.";
1760
+ var Grok_3_Mini_Beta_Schema = (0, import_provider10.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1761
+ name: Grok_3_Mini_Beta_Literal,
1762
+ description: Grok_3_Mini_Beta_Description,
1763
+ maxInputTokens: 131072,
1764
+ maxOutputTokens: 131072,
1765
+ roles: XAIChatModelRolesMap,
1766
+ modalities: XAIChatModelTextToolModalities,
1767
+ config: {
1768
+ def: chat_model_exports.ChatModelMiniReasoningConfigDef(131072),
1769
+ schema: chat_model_exports.ChatModelMiniReasoningConfigSchema(131072, 4)
1770
+ },
1771
+ price: pricing_default[Grok_3_Mini_Beta_Literal]
1772
+ });
1773
+ var Grok_3_Mini_Beta_Options = BaseChatModelOptions;
1774
+ var Grok_3_Mini_Beta = class extends BaseChatModel {
1775
+ constructor(options) {
1776
+ super(Grok_3_Mini_Beta_Schema, options);
1777
+ }
1778
+ };
1779
+
1780
+ // src/models/chat-models/grok-3-mini-fast-beta.xai.ts
1781
+ var import_provider11 = require("@adaline/provider");
1782
+ var Grok_3_Mini_Fast_Beta_Literal = "grok-3-mini-fast-beta";
1783
+ var Grok_3_Mini_Fast_Beta_Description = "Grok-3 Mini Fast Beta is xAI's fast lightweight model optimized for speed with a 131K context window.";
1784
+ var Grok_3_Mini_Fast_Beta_Schema = (0, import_provider11.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1785
+ name: Grok_3_Mini_Fast_Beta_Literal,
1786
+ description: Grok_3_Mini_Fast_Beta_Description,
1787
+ maxInputTokens: 131072,
1788
+ maxOutputTokens: 131072,
1789
+ roles: XAIChatModelRolesMap,
1790
+ modalities: XAIChatModelTextToolModalities,
1791
+ config: {
1792
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1793
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1794
+ },
1795
+ price: pricing_default[Grok_3_Mini_Fast_Beta_Literal]
1796
+ });
1797
+ var Grok_3_Mini_Fast_Beta_Options = BaseChatModelOptions;
1798
+ var Grok_3_Mini_Fast_Beta = class extends BaseChatModel {
1799
+ constructor(options) {
1800
+ super(Grok_3_Mini_Fast_Beta_Schema, options);
1801
+ }
1802
+ };
1803
+
1804
+ // src/models/chat-models/grok-4.xai.ts
1805
+ var import_provider12 = require("@adaline/provider");
1806
+ var Grok_4_Literal = "grok-4";
1807
+ var Grok_4_Description = "Grok-4 is xAI's most advanced reasoning model with a 256K context window.";
1808
+ var Grok_4_Schema = (0, import_provider12.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1809
+ name: Grok_4_Literal,
1810
+ description: Grok_4_Description,
1811
+ maxInputTokens: 262144,
1812
+ maxOutputTokens: 131072,
1813
+ roles: XAIChatModelRolesMap,
1814
+ modalities: XAIChatModelTextToolModalities,
1815
+ config: {
1816
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1817
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1818
+ },
1819
+ price: pricing_default[Grok_4_Literal]
1820
+ });
1821
+ var Grok_4_Options = BaseChatModelOptions;
1822
+ var Grok_4 = class extends BaseChatModel {
1823
+ constructor(options) {
1824
+ super(Grok_4_Schema, options);
1825
+ }
1826
+ };
1827
+
1828
+ // src/models/chat-models/grok-4-0709.xai.ts
1829
+ var import_provider13 = require("@adaline/provider");
1830
+ var Grok_4_0709_Literal = "grok-4-0709";
1831
+ var Grok_4_0709_Description = "Grok-4 version 0709 is a specific release of xAI's most advanced reasoning model with a 256K context window.";
1832
+ var Grok_4_0709_Schema = (0, import_provider13.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1833
+ name: Grok_4_0709_Literal,
1834
+ description: Grok_4_0709_Description,
1835
+ maxInputTokens: 262144,
1836
+ maxOutputTokens: 131072,
1837
+ roles: XAIChatModelRolesMap,
1838
+ modalities: XAIChatModelTextToolModalities,
1839
+ config: {
1840
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1841
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1842
+ },
1843
+ price: pricing_default[Grok_4_0709_Literal]
1844
+ });
1845
+ var Grok_4_0709_Options = BaseChatModelOptions;
1846
+ var Grok_4_0709 = class extends BaseChatModel {
1847
+ constructor(options) {
1848
+ super(Grok_4_0709_Schema, options);
1849
+ }
1850
+ };
1851
+
1852
+ // src/models/chat-models/grok-4-fast-reasoning.xai.ts
1853
+ var import_provider14 = require("@adaline/provider");
1854
+ var Grok_4_Fast_Reasoning_Literal = "grok-4-fast-reasoning";
1855
+ var Grok_4_Fast_Reasoning_Description = "Grok-4 Fast Reasoning is xAI's fast reasoning model with a 2M context window.";
1856
+ var Grok_4_Fast_Reasoning_Schema = (0, import_provider14.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1857
+ name: Grok_4_Fast_Reasoning_Literal,
1858
+ description: Grok_4_Fast_Reasoning_Description,
1859
+ maxInputTokens: 2097152,
1860
+ maxOutputTokens: 131072,
1861
+ roles: XAIChatModelRolesMap,
1862
+ modalities: XAIChatModelTextToolModalities,
1863
+ config: {
1864
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1865
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1866
+ },
1867
+ price: pricing_default[Grok_4_Fast_Reasoning_Literal]
1868
+ });
1869
+ var Grok_4_Fast_Reasoning_Options = BaseChatModelOptions;
1870
+ var Grok_4_Fast_Reasoning = class extends BaseChatModel {
1871
+ constructor(options) {
1872
+ super(Grok_4_Fast_Reasoning_Schema, options);
1873
+ }
1874
+ };
1875
+
1876
+ // src/models/chat-models/grok-4-fast-non-reasoning.xai.ts
1877
+ var import_provider15 = require("@adaline/provider");
1878
+ var Grok_4_Fast_Non_Reasoning_Literal = "grok-4-fast-non-reasoning";
1879
+ var Grok_4_Fast_Non_Reasoning_Description = "Grok-4 Fast Non-Reasoning is xAI's fast model without reasoning capabilities with a 2M context window.";
1880
+ var Grok_4_Fast_Non_Reasoning_Schema = (0, import_provider15.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1881
+ name: Grok_4_Fast_Non_Reasoning_Literal,
1882
+ description: Grok_4_Fast_Non_Reasoning_Description,
1883
+ maxInputTokens: 2097152,
1884
+ maxOutputTokens: 131072,
1885
+ roles: XAIChatModelRolesMap,
1886
+ modalities: XAIChatModelTextToolModalities,
1887
+ config: {
1888
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1889
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1890
+ },
1891
+ price: pricing_default[Grok_4_Fast_Non_Reasoning_Literal]
1892
+ });
1893
+ var Grok_4_Fast_Non_Reasoning_Options = BaseChatModelOptions;
1894
+ var Grok_4_Fast_Non_Reasoning = class extends BaseChatModel {
1895
+ constructor(options) {
1896
+ super(Grok_4_Fast_Non_Reasoning_Schema, options);
1897
+ }
1898
+ };
1899
+
1900
+ // src/models/chat-models/grok-4.1-fast-reasoning.xai.ts
1901
+ var import_provider16 = require("@adaline/provider");
1902
+ var Grok_4_1_Fast_Reasoning_Literal = "grok-4.1-fast-reasoning";
1903
+ var Grok_4_1_Fast_Reasoning_Description = "Grok-4.1 Fast Reasoning is xAI's updated fast reasoning model with a 2M context window.";
1904
+ var Grok_4_1_Fast_Reasoning_Schema = (0, import_provider16.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1905
+ name: Grok_4_1_Fast_Reasoning_Literal,
1906
+ description: Grok_4_1_Fast_Reasoning_Description,
1907
+ maxInputTokens: 2097152,
1908
+ maxOutputTokens: 131072,
1909
+ roles: XAIChatModelRolesMap,
1910
+ modalities: XAIChatModelTextToolModalities,
1911
+ config: {
1912
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1913
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1914
+ },
1915
+ price: pricing_default["grok-4.1-fast-reasoning"]
1916
+ });
1917
+ var Grok_4_1_Fast_Reasoning_Options = BaseChatModelOptions;
1918
+ var Grok_4_1_Fast_Reasoning = class extends BaseChatModel {
1919
+ constructor(options) {
1920
+ super(Grok_4_1_Fast_Reasoning_Schema, options);
1921
+ }
1922
+ };
1923
+
1924
+ // src/models/chat-models/grok-4.1-fast-non-reasoning.xai.ts
1925
+ var import_provider17 = require("@adaline/provider");
1926
+ var Grok_4_1_Fast_Non_Reasoning_Literal = "grok-4.1-fast-non-reasoning";
1927
+ var Grok_4_1_Fast_Non_Reasoning_Description = "Grok-4.1 Fast Non-Reasoning is xAI's updated fast model without reasoning capabilities with a 2M context window.";
1928
+ var Grok_4_1_Fast_Non_Reasoning_Schema = (0, import_provider17.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1929
+ name: Grok_4_1_Fast_Non_Reasoning_Literal,
1930
+ description: Grok_4_1_Fast_Non_Reasoning_Description,
1931
+ maxInputTokens: 2097152,
1932
+ maxOutputTokens: 131072,
1933
+ roles: XAIChatModelRolesMap,
1934
+ modalities: XAIChatModelTextToolModalities,
1935
+ config: {
1936
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1937
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1938
+ },
1939
+ price: pricing_default["grok-4.1-fast-non-reasoning"]
1940
+ });
1941
+ var Grok_4_1_Fast_Non_Reasoning_Options = BaseChatModelOptions;
1942
+ var Grok_4_1_Fast_Non_Reasoning = class extends BaseChatModel {
1943
+ constructor(options) {
1944
+ super(Grok_4_1_Fast_Non_Reasoning_Schema, options);
1945
+ }
1946
+ };
1947
+
1948
+ // src/models/chat-models/grok-code-fast-1.xai.ts
1949
+ var import_provider18 = require("@adaline/provider");
1950
+ var Grok_Code_Fast_1_Literal = "grok-code-fast-1";
1951
+ var Grok_Code_Fast_1_Description = "Grok Code Fast 1 is xAI's specialized coding model optimized for code generation with a 256K context window.";
1952
+ var Grok_Code_Fast_1_Schema = (0, import_provider18.ChatModelSchema)(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1953
+ name: Grok_Code_Fast_1_Literal,
1954
+ description: Grok_Code_Fast_1_Description,
1955
+ maxInputTokens: 262144,
1956
+ maxOutputTokens: 131072,
1957
+ roles: XAIChatModelRolesMap,
1958
+ modalities: XAIChatModelTextToolModalities,
1959
+ config: {
1960
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1961
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1962
+ },
1963
+ price: pricing_default[Grok_Code_Fast_1_Literal]
1964
+ });
1965
+ var Grok_Code_Fast_1_Options = BaseChatModelOptions;
1966
+ var Grok_Code_Fast_1 = class extends BaseChatModel {
1967
+ constructor(options) {
1968
+ super(Grok_Code_Fast_1_Schema, options);
1969
+ }
1970
+ };
1971
+
1972
+ // src/provider/provider.xai.ts
1973
+ var ProviderLiteral = "xai";
1974
+ var XAI = class {
1975
+ constructor() {
1976
+ this.version = "v1";
1977
+ this.name = ProviderLiteral;
1978
+ this.chatModelFactories = {
1979
+ [Grok_2_Literal]: {
1980
+ model: Grok_2,
1981
+ modelOptions: Grok_2_Options,
1982
+ modelSchema: Grok_2_Schema
1983
+ },
1984
+ [Grok_2_Latest_Literal]: {
1985
+ model: Grok_2_Latest,
1986
+ modelOptions: Grok_2_Latest_Options,
1987
+ modelSchema: Grok_2_Latest_Schema
1988
+ },
1989
+ [Grok_2_1212_Literal]: {
1990
+ model: Grok_2_1212,
1991
+ modelOptions: Grok_2_1212_Options,
1992
+ modelSchema: Grok_2_1212_Schema
1993
+ },
1994
+ [Grok_3_Beta_Literal]: {
1995
+ model: Grok_3_Beta,
1996
+ modelOptions: Grok_3_Beta_Options,
1997
+ modelSchema: Grok_3_Beta_Schema
1998
+ },
1999
+ [Grok_3_Fast_Beta_Literal]: {
2000
+ model: Grok_3_Fast_Beta,
2001
+ modelOptions: Grok_3_Fast_Beta_Options,
2002
+ modelSchema: Grok_3_Fast_Beta_Schema
2003
+ },
2004
+ [Grok_3_Mini_Beta_Literal]: {
2005
+ model: Grok_3_Mini_Beta,
2006
+ modelOptions: Grok_3_Mini_Beta_Options,
2007
+ modelSchema: Grok_3_Mini_Beta_Schema
2008
+ },
2009
+ [Grok_3_Mini_Fast_Beta_Literal]: {
2010
+ model: Grok_3_Mini_Fast_Beta,
2011
+ modelOptions: Grok_3_Mini_Fast_Beta_Options,
2012
+ modelSchema: Grok_3_Mini_Fast_Beta_Schema
2013
+ },
2014
+ [Grok_4_Literal]: {
2015
+ model: Grok_4,
2016
+ modelOptions: Grok_4_Options,
2017
+ modelSchema: Grok_4_Schema
2018
+ },
2019
+ [Grok_4_0709_Literal]: {
2020
+ model: Grok_4_0709,
2021
+ modelOptions: Grok_4_0709_Options,
2022
+ modelSchema: Grok_4_0709_Schema
2023
+ },
2024
+ [Grok_4_Fast_Reasoning_Literal]: {
2025
+ model: Grok_4_Fast_Reasoning,
2026
+ modelOptions: Grok_4_Fast_Reasoning_Options,
2027
+ modelSchema: Grok_4_Fast_Reasoning_Schema
2028
+ },
2029
+ [Grok_4_Fast_Non_Reasoning_Literal]: {
2030
+ model: Grok_4_Fast_Non_Reasoning,
2031
+ modelOptions: Grok_4_Fast_Non_Reasoning_Options,
2032
+ modelSchema: Grok_4_Fast_Non_Reasoning_Schema
2033
+ },
2034
+ [Grok_4_1_Fast_Reasoning_Literal]: {
2035
+ model: Grok_4_1_Fast_Reasoning,
2036
+ modelOptions: Grok_4_1_Fast_Reasoning_Options,
2037
+ modelSchema: Grok_4_1_Fast_Reasoning_Schema
2038
+ },
2039
+ [Grok_4_1_Fast_Non_Reasoning_Literal]: {
2040
+ model: Grok_4_1_Fast_Non_Reasoning,
2041
+ modelOptions: Grok_4_1_Fast_Non_Reasoning_Options,
2042
+ modelSchema: Grok_4_1_Fast_Non_Reasoning_Schema
2043
+ },
2044
+ [Grok_Code_Fast_1_Literal]: {
2045
+ model: Grok_Code_Fast_1,
2046
+ modelOptions: Grok_Code_Fast_1_Options,
2047
+ modelSchema: Grok_Code_Fast_1_Schema
2048
+ }
2049
+ };
2050
+ }
2051
+ chatModelLiterals() {
2052
+ return Object.keys(this.chatModelFactories);
2053
+ }
2054
+ chatModelSchemas() {
2055
+ return Object.keys(this.chatModelFactories).reduce(
2056
+ (acc, key) => {
2057
+ acc[key] = this.chatModelFactories[key].modelSchema;
2058
+ return acc;
2059
+ },
2060
+ {}
2061
+ );
2062
+ }
2063
+ chatModel(options) {
2064
+ const modelName = options.modelName;
2065
+ const factory = this.chatModelFactories[modelName];
2066
+ if (!factory) {
2067
+ throw new import_provider19.ProviderError({
2068
+ info: `Invalid model name: '${modelName}' for provider: '${this.name}'`,
2069
+ cause: new Error(`Available models: [${this.chatModelLiterals().join(", ")}]`)
2070
+ });
2071
+ }
2072
+ const parsedOptions = factory.modelOptions.parse(options);
2073
+ return new factory.model(parsedOptions);
2074
+ }
2075
+ // XAI does not support embedding models
2076
+ embeddingModelLiterals() {
2077
+ return [];
2078
+ }
2079
+ embeddingModelSchemas() {
2080
+ return {};
2081
+ }
2082
+ embeddingModel() {
2083
+ throw new import_provider19.ProviderError({
2084
+ info: "XAI does not support embedding models",
2085
+ cause: new Error("No embedding models available")
2086
+ });
2087
+ }
2088
+ };
2089
+ XAI.baseUrl = "https://api.x.ai/v1";
2090
+ // Annotate the CommonJS export names for ESM import in node:
2091
+ 0 && (module.exports = {
2092
+ BaseChatModel,
2093
+ BaseChatModelOptions,
2094
+ Grok_2,
2095
+ Grok_2_1212,
2096
+ Grok_2_1212_Literal,
2097
+ Grok_2_1212_Options,
2098
+ Grok_2_1212_Schema,
2099
+ Grok_2_Latest,
2100
+ Grok_2_Latest_Literal,
2101
+ Grok_2_Latest_Options,
2102
+ Grok_2_Latest_Schema,
2103
+ Grok_2_Literal,
2104
+ Grok_2_Options,
2105
+ Grok_2_Schema,
2106
+ Grok_3_Beta,
2107
+ Grok_3_Beta_Literal,
2108
+ Grok_3_Beta_Options,
2109
+ Grok_3_Beta_Schema,
2110
+ Grok_3_Fast_Beta,
2111
+ Grok_3_Fast_Beta_Literal,
2112
+ Grok_3_Fast_Beta_Options,
2113
+ Grok_3_Fast_Beta_Schema,
2114
+ Grok_3_Mini_Beta,
2115
+ Grok_3_Mini_Beta_Literal,
2116
+ Grok_3_Mini_Beta_Options,
2117
+ Grok_3_Mini_Beta_Schema,
2118
+ Grok_3_Mini_Fast_Beta,
2119
+ Grok_3_Mini_Fast_Beta_Literal,
2120
+ Grok_3_Mini_Fast_Beta_Options,
2121
+ Grok_3_Mini_Fast_Beta_Schema,
2122
+ Grok_4,
2123
+ Grok_4_0709,
2124
+ Grok_4_0709_Literal,
2125
+ Grok_4_0709_Options,
2126
+ Grok_4_0709_Schema,
2127
+ Grok_4_1_Fast_Non_Reasoning,
2128
+ Grok_4_1_Fast_Non_Reasoning_Literal,
2129
+ Grok_4_1_Fast_Non_Reasoning_Options,
2130
+ Grok_4_1_Fast_Non_Reasoning_Schema,
2131
+ Grok_4_1_Fast_Reasoning,
2132
+ Grok_4_1_Fast_Reasoning_Literal,
2133
+ Grok_4_1_Fast_Reasoning_Options,
2134
+ Grok_4_1_Fast_Reasoning_Schema,
2135
+ Grok_4_Fast_Non_Reasoning,
2136
+ Grok_4_Fast_Non_Reasoning_Literal,
2137
+ Grok_4_Fast_Non_Reasoning_Options,
2138
+ Grok_4_Fast_Non_Reasoning_Schema,
2139
+ Grok_4_Fast_Reasoning,
2140
+ Grok_4_Fast_Reasoning_Literal,
2141
+ Grok_4_Fast_Reasoning_Options,
2142
+ Grok_4_Fast_Reasoning_Schema,
2143
+ Grok_4_Literal,
2144
+ Grok_4_Options,
2145
+ Grok_4_Schema,
2146
+ Grok_Code_Fast_1,
2147
+ Grok_Code_Fast_1_Literal,
2148
+ Grok_Code_Fast_1_Options,
2149
+ Grok_Code_Fast_1_Schema,
2150
+ ProviderLiteral,
2151
+ XAI,
2152
+ XAIChatModelConfigs,
2153
+ XAIChatModelModalities,
2154
+ XAIChatModelModalitiesEnum,
2155
+ XAIChatModelRoles,
2156
+ XAIChatModelRolesMap,
2157
+ XAIChatModelTextModalities,
2158
+ XAIChatModelTextModalitiesEnum,
2159
+ XAIChatModelTextToolModalities,
2160
+ XAIChatModelTextToolModalitiesEnum,
2161
+ XAIChatRequest,
2162
+ XAIChatRequestAssistantMessage,
2163
+ XAIChatRequestImageContent,
2164
+ XAIChatRequestMessage,
2165
+ XAIChatRequestResponseFormat,
2166
+ XAIChatRequestSystemMessage,
2167
+ XAIChatRequestTextContent,
2168
+ XAIChatRequestTool,
2169
+ XAIChatRequestToolCallContent,
2170
+ XAIChatRequestToolChoiceEnum,
2171
+ XAIChatRequestToolChoiceFunction,
2172
+ XAIChatRequestToolMessage,
2173
+ XAIChatRequestUserMessage,
2174
+ XAICompleteChatResponse,
2175
+ XAIStreamChatResponse,
2176
+ XAIToolCallsCompleteChatResponse,
2177
+ XAIToolCallsStreamChatResponse
2178
+ });
2179
+ //# sourceMappingURL=index.js.map