@adaline/xai 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,2101 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __defProps = Object.defineProperties;
3
+ var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
4
+ var __getOwnPropSymbols = Object.getOwnPropertySymbols;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __propIsEnum = Object.prototype.propertyIsEnumerable;
7
+ var __knownSymbol = (name, symbol) => (symbol = Symbol[name]) ? symbol : Symbol.for("Symbol." + name);
8
+ var __typeError = (msg) => {
9
+ throw TypeError(msg);
10
+ };
11
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
12
+ var __spreadValues = (a, b) => {
13
+ for (var prop in b || (b = {}))
14
+ if (__hasOwnProp.call(b, prop))
15
+ __defNormalProp(a, prop, b[prop]);
16
+ if (__getOwnPropSymbols)
17
+ for (var prop of __getOwnPropSymbols(b)) {
18
+ if (__propIsEnum.call(b, prop))
19
+ __defNormalProp(a, prop, b[prop]);
20
+ }
21
+ return a;
22
+ };
23
+ var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
24
+ var __export = (target, all) => {
25
+ for (var name in all)
26
+ __defProp(target, name, { get: all[name], enumerable: true });
27
+ };
28
+ var __async = (__this, __arguments, generator) => {
29
+ return new Promise((resolve, reject) => {
30
+ var fulfilled = (value) => {
31
+ try {
32
+ step(generator.next(value));
33
+ } catch (e) {
34
+ reject(e);
35
+ }
36
+ };
37
+ var rejected = (value) => {
38
+ try {
39
+ step(generator.throw(value));
40
+ } catch (e) {
41
+ reject(e);
42
+ }
43
+ };
44
+ var step = (x) => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected);
45
+ step((generator = generator.apply(__this, __arguments)).next());
46
+ });
47
+ };
48
+ var __await = function(promise, isYieldStar) {
49
+ this[0] = promise;
50
+ this[1] = isYieldStar;
51
+ };
52
+ var __asyncGenerator = (__this, __arguments, generator) => {
53
+ var resume = (k, v, yes, no) => {
54
+ try {
55
+ var x = generator[k](v), isAwait = (v = x.value) instanceof __await, done = x.done;
56
+ Promise.resolve(isAwait ? v[0] : v).then((y) => isAwait ? resume(k === "return" ? k : "next", v[1] ? { done: y.done, value: y.value } : y, yes, no) : yes({ value: y, done })).catch((e) => resume("throw", e, yes, no));
57
+ } catch (e) {
58
+ no(e);
59
+ }
60
+ }, method = (k) => it[k] = (x) => new Promise((yes, no) => resume(k, x, yes, no)), it = {};
61
+ return generator = generator.apply(__this, __arguments), it[__knownSymbol("asyncIterator")] = () => it, method("next"), method("throw"), method("return"), it;
62
+ };
63
+ var __yieldStar = (value) => {
64
+ var obj = value[__knownSymbol("asyncIterator")], isAwait = false, method, it = {};
65
+ if (obj == null) {
66
+ obj = value[__knownSymbol("iterator")]();
67
+ method = (k) => it[k] = (x) => obj[k](x);
68
+ } else {
69
+ obj = obj.call(value);
70
+ method = (k) => it[k] = (v) => {
71
+ if (isAwait) {
72
+ isAwait = false;
73
+ if (k === "throw") throw v;
74
+ return v;
75
+ }
76
+ isAwait = true;
77
+ return {
78
+ done: false,
79
+ value: new __await(new Promise((resolve) => {
80
+ var x = obj[k](v);
81
+ if (!(x instanceof Object)) __typeError("Object expected");
82
+ resolve(x);
83
+ }), 1)
84
+ };
85
+ };
86
+ }
87
+ return it[__knownSymbol("iterator")] = () => it, method("next"), "throw" in obj ? method("throw") : it.throw = (x) => {
88
+ throw x;
89
+ }, "return" in obj && method("return"), it;
90
+ };
91
+
92
+ // src/provider/provider.xai.ts
93
+ import { ProviderError } from "@adaline/provider";
94
+
95
+ // src/models/chat-models/base-chat-model.xai.ts
96
+ import { z as z5 } from "zod";
97
+ import {
98
+ getMimeTypeFromBase64,
99
+ InvalidConfigError,
100
+ InvalidMessagesError,
101
+ InvalidModelRequestError,
102
+ InvalidToolsError,
103
+ ModelResponseError,
104
+ removeUndefinedEntries,
105
+ urlWithoutTrailingSlash
106
+ } from "@adaline/provider";
107
+ import {
108
+ AssistantRoleLiteral as AssistantRoleLiteral2,
109
+ Base64ImageContentTypeLiteral,
110
+ Config,
111
+ createPartialTextMessage,
112
+ createPartialToolCallMessage,
113
+ createTextContent,
114
+ createToolCallContent,
115
+ ImageModalityLiteral as ImageModalityLiteral2,
116
+ Message,
117
+ SystemRoleLiteral as SystemRoleLiteral2,
118
+ TextModalityLiteral as TextModalityLiteral2,
119
+ Tool,
120
+ ToolCallModalityLiteral as ToolCallModalityLiteral2,
121
+ ToolResponseModalityLiteral as ToolResponseModalityLiteral2,
122
+ ToolRoleLiteral as ToolRoleLiteral2,
123
+ UrlImageContentTypeLiteral,
124
+ UserRoleLiteral as UserRoleLiteral2
125
+ } from "@adaline/types";
126
+
127
+ // src/models/pricing.json
128
+ var pricing_default = {
129
+ "grok-2": {
130
+ modelName: "grok-2",
131
+ currency: "USD",
132
+ tokenRanges: [
133
+ {
134
+ minTokens: 0,
135
+ maxTokens: null,
136
+ prices: {
137
+ base: {
138
+ inputPricePerMillion: 2,
139
+ outputPricePerMillion: 10
140
+ }
141
+ }
142
+ }
143
+ ]
144
+ },
145
+ "grok-2-latest": {
146
+ modelName: "grok-2-latest",
147
+ currency: "USD",
148
+ tokenRanges: [
149
+ {
150
+ minTokens: 0,
151
+ maxTokens: null,
152
+ prices: {
153
+ base: {
154
+ inputPricePerMillion: 2,
155
+ outputPricePerMillion: 10
156
+ }
157
+ }
158
+ }
159
+ ]
160
+ },
161
+ "grok-2-1212": {
162
+ modelName: "grok-2-1212",
163
+ currency: "USD",
164
+ tokenRanges: [
165
+ {
166
+ minTokens: 0,
167
+ maxTokens: null,
168
+ prices: {
169
+ base: {
170
+ inputPricePerMillion: 2,
171
+ outputPricePerMillion: 10
172
+ }
173
+ }
174
+ }
175
+ ]
176
+ },
177
+ "grok-2-vision": {
178
+ modelName: "grok-2-vision",
179
+ currency: "USD",
180
+ tokenRanges: [
181
+ {
182
+ minTokens: 0,
183
+ maxTokens: null,
184
+ prices: {
185
+ base: {
186
+ inputPricePerMillion: 2,
187
+ outputPricePerMillion: 10
188
+ }
189
+ }
190
+ }
191
+ ]
192
+ },
193
+ "grok-2-vision-latest": {
194
+ modelName: "grok-2-vision-latest",
195
+ currency: "USD",
196
+ tokenRanges: [
197
+ {
198
+ minTokens: 0,
199
+ maxTokens: null,
200
+ prices: {
201
+ base: {
202
+ inputPricePerMillion: 2,
203
+ outputPricePerMillion: 10
204
+ }
205
+ }
206
+ }
207
+ ]
208
+ },
209
+ "grok-2-vision-1212": {
210
+ modelName: "grok-2-vision-1212",
211
+ currency: "USD",
212
+ tokenRanges: [
213
+ {
214
+ minTokens: 0,
215
+ maxTokens: null,
216
+ prices: {
217
+ base: {
218
+ inputPricePerMillion: 2,
219
+ outputPricePerMillion: 10
220
+ }
221
+ }
222
+ }
223
+ ]
224
+ },
225
+ "grok-3-beta": {
226
+ modelName: "grok-3-beta",
227
+ currency: "USD",
228
+ tokenRanges: [
229
+ {
230
+ minTokens: 0,
231
+ maxTokens: null,
232
+ prices: {
233
+ base: {
234
+ inputPricePerMillion: 3,
235
+ outputPricePerMillion: 15
236
+ }
237
+ }
238
+ }
239
+ ]
240
+ },
241
+ "grok-3-fast-beta": {
242
+ modelName: "grok-3-fast-beta",
243
+ currency: "USD",
244
+ tokenRanges: [
245
+ {
246
+ minTokens: 0,
247
+ maxTokens: null,
248
+ prices: {
249
+ base: {
250
+ inputPricePerMillion: 5,
251
+ outputPricePerMillion: 25
252
+ }
253
+ }
254
+ }
255
+ ]
256
+ },
257
+ "grok-3-mini-beta": {
258
+ modelName: "grok-3-mini-beta",
259
+ currency: "USD",
260
+ tokenRanges: [
261
+ {
262
+ minTokens: 0,
263
+ maxTokens: null,
264
+ prices: {
265
+ base: {
266
+ inputPricePerMillion: 0.3,
267
+ outputPricePerMillion: 0.5
268
+ }
269
+ }
270
+ }
271
+ ]
272
+ },
273
+ "grok-3-mini-fast-beta": {
274
+ modelName: "grok-3-mini-fast-beta",
275
+ currency: "USD",
276
+ tokenRanges: [
277
+ {
278
+ minTokens: 0,
279
+ maxTokens: null,
280
+ prices: {
281
+ base: {
282
+ inputPricePerMillion: 0.6,
283
+ outputPricePerMillion: 4
284
+ }
285
+ }
286
+ }
287
+ ]
288
+ },
289
+ "grok-4": {
290
+ modelName: "grok-4",
291
+ currency: "USD",
292
+ tokenRanges: [
293
+ {
294
+ minTokens: 0,
295
+ maxTokens: null,
296
+ prices: {
297
+ base: {
298
+ inputPricePerMillion: 6,
299
+ outputPricePerMillion: 18
300
+ }
301
+ }
302
+ }
303
+ ]
304
+ },
305
+ "grok-4-0709": {
306
+ modelName: "grok-4-0709",
307
+ currency: "USD",
308
+ tokenRanges: [
309
+ {
310
+ minTokens: 0,
311
+ maxTokens: null,
312
+ prices: {
313
+ base: {
314
+ inputPricePerMillion: 6,
315
+ outputPricePerMillion: 18
316
+ }
317
+ }
318
+ }
319
+ ]
320
+ },
321
+ "grok-4-fast-reasoning": {
322
+ modelName: "grok-4-fast-reasoning",
323
+ currency: "USD",
324
+ tokenRanges: [
325
+ {
326
+ minTokens: 0,
327
+ maxTokens: null,
328
+ prices: {
329
+ base: {
330
+ inputPricePerMillion: 3,
331
+ outputPricePerMillion: 12
332
+ }
333
+ }
334
+ }
335
+ ]
336
+ },
337
+ "grok-4-fast-non-reasoning": {
338
+ modelName: "grok-4-fast-non-reasoning",
339
+ currency: "USD",
340
+ tokenRanges: [
341
+ {
342
+ minTokens: 0,
343
+ maxTokens: null,
344
+ prices: {
345
+ base: {
346
+ inputPricePerMillion: 3,
347
+ outputPricePerMillion: 12
348
+ }
349
+ }
350
+ }
351
+ ]
352
+ },
353
+ "grok-4.1-fast-reasoning": {
354
+ modelName: "grok-4.1-fast-reasoning",
355
+ currency: "USD",
356
+ tokenRanges: [
357
+ {
358
+ minTokens: 0,
359
+ maxTokens: null,
360
+ prices: {
361
+ base: {
362
+ inputPricePerMillion: 3,
363
+ outputPricePerMillion: 12
364
+ }
365
+ }
366
+ }
367
+ ]
368
+ },
369
+ "grok-4.1-fast-non-reasoning": {
370
+ modelName: "grok-4.1-fast-non-reasoning",
371
+ currency: "USD",
372
+ tokenRanges: [
373
+ {
374
+ minTokens: 0,
375
+ maxTokens: null,
376
+ prices: {
377
+ base: {
378
+ inputPricePerMillion: 3,
379
+ outputPricePerMillion: 12
380
+ }
381
+ }
382
+ }
383
+ ]
384
+ },
385
+ "grok-code-fast-1": {
386
+ modelName: "grok-code-fast-1",
387
+ currency: "USD",
388
+ tokenRanges: [
389
+ {
390
+ minTokens: 0,
391
+ maxTokens: null,
392
+ prices: {
393
+ base: {
394
+ inputPricePerMillion: 0.3,
395
+ outputPricePerMillion: 0.5
396
+ }
397
+ }
398
+ }
399
+ ]
400
+ }
401
+ };
402
+
403
+ // src/models/chat-models/types/roles.chat-model.xai.ts
404
+ import { z } from "zod";
405
+ import { AssistantRoleLiteral, SystemRoleLiteral, ToolRoleLiteral, UserRoleLiteral } from "@adaline/types";
406
+ var XAIChatModelRoles = z.enum([SystemRoleLiteral, UserRoleLiteral, AssistantRoleLiteral, ToolRoleLiteral]);
407
+ var XAIChatModelRolesMap = {
408
+ system: SystemRoleLiteral,
409
+ user: UserRoleLiteral,
410
+ assistant: AssistantRoleLiteral,
411
+ tool: ToolRoleLiteral
412
+ };
413
+
414
+ // src/models/chat-models/types/modalities.chat-model.xai.ts
415
+ import { z as z2 } from "zod";
416
+ import { ImageModalityLiteral, TextModalityLiteral, ToolCallModalityLiteral, ToolResponseModalityLiteral } from "@adaline/types";
417
+ var XAIChatModelModalities = [
418
+ TextModalityLiteral,
419
+ ImageModalityLiteral,
420
+ ToolCallModalityLiteral,
421
+ ToolResponseModalityLiteral
422
+ ];
423
+ var XAIChatModelModalitiesEnum = z2.enum([
424
+ TextModalityLiteral,
425
+ ImageModalityLiteral,
426
+ ToolCallModalityLiteral,
427
+ ToolResponseModalityLiteral
428
+ ]);
429
+ var XAIChatModelTextModalities = [TextModalityLiteral];
430
+ var XAIChatModelTextModalitiesEnum = z2.enum([TextModalityLiteral]);
431
+ var XAIChatModelTextToolModalities = [
432
+ TextModalityLiteral,
433
+ ToolCallModalityLiteral,
434
+ ToolResponseModalityLiteral
435
+ ];
436
+ var XAIChatModelTextToolModalitiesEnum = z2.enum([TextModalityLiteral, ToolCallModalityLiteral, ToolResponseModalityLiteral]);
437
+
438
+ // src/models/chat-models/types/response.chat-model.xai.ts
439
+ import { z as z3 } from "zod";
440
+ var XAIBaseLogProb = z3.object({
441
+ token: z3.string(),
442
+ logprob: z3.number(),
443
+ bytes: z3.array(z3.number()).nullable()
444
+ });
445
+ var XAILogProb = z3.object({
446
+ content: z3.array(
447
+ XAIBaseLogProb.extend({
448
+ top_logprobs: z3.array(XAIBaseLogProb)
449
+ })
450
+ ).nullable().optional(),
451
+ refusal: z3.array(
452
+ XAIBaseLogProb.extend({
453
+ top_logprobs: z3.array(XAIBaseLogProb)
454
+ })
455
+ ).nullable().optional()
456
+ }).nullable();
457
+ var XAIToolCallsCompleteChatResponse = z3.array(
458
+ z3.object({
459
+ id: z3.string().min(1),
460
+ type: z3.enum(["function"]),
461
+ function: z3.object({
462
+ name: z3.string(),
463
+ arguments: z3.string()
464
+ })
465
+ })
466
+ );
467
+ var XAICompleteChatResponse = z3.object({
468
+ id: z3.string(),
469
+ object: z3.literal("chat.completion"),
470
+ created: z3.number(),
471
+ model: z3.string(),
472
+ system_fingerprint: z3.string().nullable().optional(),
473
+ choices: z3.array(
474
+ z3.object({
475
+ index: z3.number(),
476
+ message: z3.object({
477
+ role: z3.string(),
478
+ content: z3.string().nullable().optional(),
479
+ tool_calls: XAIToolCallsCompleteChatResponse.optional(),
480
+ refusal: z3.string().nullable().optional()
481
+ }),
482
+ logprobs: XAILogProb.optional(),
483
+ finish_reason: z3.string()
484
+ })
485
+ ),
486
+ usage: z3.object({
487
+ prompt_tokens: z3.number(),
488
+ completion_tokens: z3.number(),
489
+ total_tokens: z3.number(),
490
+ reasoning_tokens: z3.number().optional()
491
+ })
492
+ });
493
+ var XAIToolCallsStreamChatResponse = z3.array(
494
+ z3.object({
495
+ index: z3.number().int(),
496
+ id: z3.string().min(1).optional(),
497
+ type: z3.enum(["function"]).optional(),
498
+ function: z3.object({
499
+ name: z3.string().min(1).optional(),
500
+ arguments: z3.string().optional()
501
+ }).optional()
502
+ })
503
+ );
504
+ var XAIStreamChatResponse = z3.object({
505
+ id: z3.string(),
506
+ object: z3.string(),
507
+ created: z3.number(),
508
+ model: z3.string(),
509
+ system_fingerprint: z3.string().nullable().optional(),
510
+ choices: z3.array(
511
+ z3.object({
512
+ index: z3.number(),
513
+ delta: z3.object({
514
+ content: z3.string().nullable().optional(),
515
+ tool_calls: XAIToolCallsStreamChatResponse.optional(),
516
+ refusal: z3.string().nullable().optional()
517
+ }).or(z3.object({})),
518
+ logprobs: XAILogProb.optional(),
519
+ finish_reason: z3.string().nullable().optional()
520
+ })
521
+ ),
522
+ usage: z3.object({
523
+ prompt_tokens: z3.number(),
524
+ completion_tokens: z3.number(),
525
+ total_tokens: z3.number(),
526
+ reasoning_tokens: z3.number().optional()
527
+ }).nullable().optional()
528
+ });
529
+
530
+ // src/models/chat-models/types/request.chat-model.xai.ts
531
+ import { z as z4 } from "zod";
532
+ var XAIChatRequestTool = z4.object({
533
+ type: z4.literal("function"),
534
+ function: z4.object({
535
+ name: z4.string().min(1),
536
+ description: z4.string().min(1).optional(),
537
+ strict: z4.boolean().optional(),
538
+ parameters: z4.any()
539
+ })
540
+ });
541
+ var XAIChatRequestToolChoiceEnum = z4.enum(["none", "auto", "required"]);
542
+ var XAIChatRequestToolChoiceFunction = z4.object({
543
+ type: z4.literal("function"),
544
+ function: z4.object({
545
+ name: z4.string().min(1)
546
+ })
547
+ });
548
+ var XAIChatRequestResponseFormat = z4.object({
549
+ type: z4.enum(["text", "json_object"])
550
+ }).or(
551
+ z4.object({
552
+ type: z4.literal("json_schema"),
553
+ json_schema: z4.object({
554
+ name: z4.string().min(1),
555
+ description: z4.string().min(1).optional(),
556
+ strict: z4.boolean().optional(),
557
+ schema: z4.any()
558
+ })
559
+ })
560
+ );
561
+ var XAIChatRequestTextContent = z4.object({
562
+ text: z4.string().min(1),
563
+ type: z4.literal("text")
564
+ });
565
+ var XAIChatRequestImageContent = z4.object({
566
+ type: z4.literal("image_url"),
567
+ image_url: z4.object({
568
+ url: z4.string().min(1),
569
+ detail: z4.enum(["low", "high", "auto"]).optional()
570
+ })
571
+ });
572
+ var XAIChatRequestToolCallContent = z4.object({
573
+ id: z4.string().min(1),
574
+ type: z4.literal("function"),
575
+ function: z4.object({
576
+ name: z4.string().min(1),
577
+ arguments: z4.string().min(1)
578
+ })
579
+ });
580
+ var XAIChatRequestSystemMessage = z4.object({
581
+ role: z4.literal("system"),
582
+ content: z4.string().min(1).or(z4.array(XAIChatRequestTextContent).min(1))
583
+ });
584
+ var XAIChatRequestUserMessage = z4.object({
585
+ role: z4.literal("user"),
586
+ content: z4.string().min(1).or(z4.array(z4.union([XAIChatRequestTextContent, XAIChatRequestImageContent])).min(1))
587
+ });
588
+ var XAIChatRequestAssistantMessage = z4.object({
589
+ role: z4.literal("assistant"),
590
+ content: z4.string().min(1).or(z4.array(XAIChatRequestTextContent).min(1)).optional(),
591
+ tool_calls: z4.array(XAIChatRequestToolCallContent).min(1).optional()
592
+ });
593
+ var XAIChatRequestToolMessage = z4.object({
594
+ role: z4.literal("tool"),
595
+ tool_call_id: z4.string().min(1),
596
+ content: z4.string().min(1)
597
+ });
598
+ var XAIChatRequestMessage = z4.union([
599
+ XAIChatRequestSystemMessage,
600
+ XAIChatRequestUserMessage,
601
+ XAIChatRequestAssistantMessage,
602
+ XAIChatRequestToolMessage
603
+ ]);
604
+ var XAIChatRequest = z4.object({
605
+ model: z4.string().min(1).optional(),
606
+ messages: z4.array(XAIChatRequestMessage).min(1),
607
+ frequency_penalty: z4.number().min(-2).max(2).nullable().optional(),
608
+ logprobs: z4.boolean().nullable().optional(),
609
+ top_logprobs: z4.number().min(0).max(20).nullable().optional(),
610
+ max_tokens: z4.number().min(0).nullable().optional(),
611
+ presence_penalty: z4.number().min(-2).max(2).nullable().optional(),
612
+ response_format: XAIChatRequestResponseFormat.optional(),
613
+ seed: z4.number().nullable().optional(),
614
+ stop: z4.string().or(z4.array(z4.string()).max(4)).nullable().optional(),
615
+ temperature: z4.number().min(0).max(2).nullable().optional(),
616
+ top_p: z4.number().min(0).max(1).nullable().optional(),
617
+ tools: z4.array(XAIChatRequestTool).optional(),
618
+ tool_choice: XAIChatRequestToolChoiceEnum.or(XAIChatRequestToolChoiceFunction).optional(),
619
+ reasoning_effort: z4.enum(["low", "high"]).optional()
620
+ });
621
+
622
+ // src/models/chat-models/base-chat-model.xai.ts
623
+ var BaseChatModelOptions = z5.object({
624
+ modelName: z5.string(),
625
+ apiKey: z5.string(),
626
+ baseUrl: z5.string().url().optional(),
627
+ completeChatUrl: z5.string().url().optional(),
628
+ streamChatUrl: z5.string().url().optional()
629
+ });
630
+ var BaseChatModel = class {
631
+ constructor(modelSchema, options) {
632
+ this.version = "v1";
633
+ const parsedOptions = BaseChatModelOptions.parse(options);
634
+ this.modelSchema = modelSchema;
635
+ this.modelName = parsedOptions.modelName;
636
+ this.apiKey = parsedOptions.apiKey;
637
+ this.baseUrl = urlWithoutTrailingSlash(parsedOptions.baseUrl || XAI.baseUrl);
638
+ this.streamChatUrl = urlWithoutTrailingSlash(parsedOptions.streamChatUrl || `${this.baseUrl}/chat/completions`);
639
+ this.completeChatUrl = urlWithoutTrailingSlash(parsedOptions.completeChatUrl || `${this.baseUrl}/chat/completions`);
640
+ }
641
+ getDefaultBaseUrl() {
642
+ return this.baseUrl;
643
+ }
644
+ getDefaultHeaders() {
645
+ return {
646
+ Authorization: `Bearer ${this.apiKey}`,
647
+ "Content-Type": "application/json"
648
+ };
649
+ }
650
+ getDefaultParams() {
651
+ return {
652
+ model: this.modelName
653
+ };
654
+ }
655
+ getRetryDelay(responseHeaders) {
656
+ const parseDuration = (duration) => {
657
+ const regex = /(\d+)(h|m|s|ms)/g;
658
+ const timeUnits = {
659
+ h: 36e5,
660
+ m: 6e4,
661
+ s: 1e3,
662
+ ms: 1
663
+ };
664
+ let match;
665
+ let totalMs = 0;
666
+ while ((match = regex.exec(duration)) !== null) {
667
+ const value = parseInt(match[1]);
668
+ const unit = match[2];
669
+ totalMs += value * timeUnits[unit];
670
+ }
671
+ return totalMs;
672
+ };
673
+ let resetRequestsDelayMs = 0;
674
+ let resetTokensDelayMs = 0;
675
+ const shouldRetry = true;
676
+ if (responseHeaders["x-ratelimit-reset-requests"]) {
677
+ resetRequestsDelayMs = parseDuration(responseHeaders["x-ratelimit-reset-requests"]);
678
+ }
679
+ if (responseHeaders["x-ratelimit-reset-tokens"]) {
680
+ resetTokensDelayMs = parseDuration(responseHeaders["x-ratelimit-reset-tokens"]);
681
+ }
682
+ const delayMs = Math.max(resetRequestsDelayMs, resetTokensDelayMs);
683
+ return { shouldRetry, delayMs };
684
+ }
685
+ getTokenCount(messages) {
686
+ return messages.reduce((acc, message) => {
687
+ return acc + message.content.map((content) => content.modality === "text" ? content.value : "").join(" ").length;
688
+ }, 0);
689
+ }
690
+ transformModelRequest(request) {
691
+ const safeRequest = XAIChatRequest.safeParse(request);
692
+ if (!safeRequest.success) {
693
+ throw new InvalidModelRequestError({ info: "Invalid model request", cause: safeRequest.error });
694
+ }
695
+ const parsedRequest = safeRequest.data;
696
+ const modelName = parsedRequest.model;
697
+ if (parsedRequest.tool_choice && (!parsedRequest.tools || parsedRequest.tools.length === 0)) {
698
+ throw new InvalidModelRequestError({
699
+ info: `Invalid model request for model : '${this.modelName}'`,
700
+ cause: new Error("'tools' are required when 'tool_choice' is specified")
701
+ });
702
+ }
703
+ const _config = {};
704
+ if (parsedRequest.response_format) {
705
+ _config.responseFormat = parsedRequest.response_format.type;
706
+ if (parsedRequest.response_format.type === "json_schema") {
707
+ _config.responseSchema = {
708
+ name: parsedRequest.response_format.json_schema.name,
709
+ description: parsedRequest.response_format.json_schema.description || "",
710
+ strict: parsedRequest.response_format.json_schema.strict,
711
+ schema: parsedRequest.response_format.json_schema.schema
712
+ };
713
+ }
714
+ }
715
+ if (parsedRequest.tool_choice) {
716
+ if (typeof parsedRequest.tool_choice === "string") {
717
+ _config.toolChoice = parsedRequest.tool_choice;
718
+ } else {
719
+ _config.toolChoice = parsedRequest.tool_choice.function.name;
720
+ }
721
+ }
722
+ _config.seed = parsedRequest.seed;
723
+ _config.maxTokens = parsedRequest.max_tokens;
724
+ _config.temperature = parsedRequest.temperature;
725
+ _config.topP = parsedRequest.top_p;
726
+ _config.presencePenalty = parsedRequest.presence_penalty;
727
+ _config.frequencyPenalty = parsedRequest.frequency_penalty;
728
+ _config.stop = parsedRequest.stop;
729
+ _config.logProbs = parsedRequest.logprobs;
730
+ _config.topLogProbs = parsedRequest.top_logprobs;
731
+ _config.reasoningEffort = parsedRequest.reasoning_effort;
732
+ const config = Config().parse(removeUndefinedEntries(_config));
733
+ const messages = [];
734
+ const toolCallMap = {};
735
+ parsedRequest.messages.forEach((message) => {
736
+ const role = message.role;
737
+ switch (role) {
738
+ case "system":
739
+ {
740
+ const content = message.content;
741
+ if (typeof content === "string") {
742
+ messages.push({
743
+ role,
744
+ content: [{ modality: TextModalityLiteral2, value: content }]
745
+ });
746
+ } else {
747
+ const _content = content.map((c) => {
748
+ return { modality: TextModalityLiteral2, value: c.text };
749
+ });
750
+ messages.push({ role, content: _content });
751
+ }
752
+ }
753
+ break;
754
+ case "user":
755
+ {
756
+ const content = message.content;
757
+ if (typeof content === "string") {
758
+ messages.push({
759
+ role,
760
+ content: [{ modality: TextModalityLiteral2, value: content }]
761
+ });
762
+ } else {
763
+ const _content = content.map((c) => {
764
+ if (c.type === "text") {
765
+ return { modality: TextModalityLiteral2, value: c.text };
766
+ } else {
767
+ if (c.image_url.url.startsWith("data:")) {
768
+ return {
769
+ modality: ImageModalityLiteral2,
770
+ detail: c.image_url.detail || "auto",
771
+ value: {
772
+ type: Base64ImageContentTypeLiteral,
773
+ base64: c.image_url.url,
774
+ mediaType: getMimeTypeFromBase64(c.image_url.url)
775
+ }
776
+ };
777
+ } else {
778
+ return {
779
+ modality: ImageModalityLiteral2,
780
+ detail: c.image_url.detail || "auto",
781
+ value: { type: UrlImageContentTypeLiteral, url: c.image_url.url }
782
+ };
783
+ }
784
+ }
785
+ });
786
+ messages.push({ role, content: _content });
787
+ }
788
+ }
789
+ break;
790
+ case "assistant":
791
+ {
792
+ const assistantContent = [];
793
+ if (!message.content && !message.tool_calls) {
794
+ throw new InvalidModelRequestError({
795
+ info: `Invalid model request for model : '${this.modelName}'`,
796
+ cause: new Error("one of'content' or 'tool_calls' must be provided")
797
+ });
798
+ }
799
+ if (message.content) {
800
+ const content = message.content;
801
+ if (typeof content === "string") {
802
+ assistantContent.push({ modality: TextModalityLiteral2, value: content });
803
+ } else {
804
+ content.forEach((c) => {
805
+ assistantContent.push({ modality: TextModalityLiteral2, value: c.text });
806
+ });
807
+ }
808
+ }
809
+ if (message.tool_calls) {
810
+ const toolCalls = message.tool_calls;
811
+ toolCalls.forEach((toolCall, index) => {
812
+ const toolCallContent = {
813
+ modality: ToolCallModalityLiteral2,
814
+ id: toolCall.id,
815
+ index,
816
+ name: toolCall.function.name,
817
+ arguments: toolCall.function.arguments
818
+ };
819
+ assistantContent.push(toolCallContent);
820
+ toolCallMap[toolCallContent.id] = toolCallContent;
821
+ });
822
+ }
823
+ messages.push({ role, content: assistantContent });
824
+ }
825
+ break;
826
+ case "tool":
827
+ {
828
+ const toolResponse = message;
829
+ messages.push({
830
+ role,
831
+ content: [
832
+ {
833
+ modality: ToolResponseModalityLiteral2,
834
+ id: toolResponse.tool_call_id,
835
+ index: toolCallMap[toolResponse.tool_call_id].index,
836
+ name: toolCallMap[toolResponse.tool_call_id].name,
837
+ data: toolResponse.content
838
+ }
839
+ ]
840
+ });
841
+ }
842
+ break;
843
+ }
844
+ });
845
+ const tools = [];
846
+ if (parsedRequest.tools) {
847
+ parsedRequest.tools.forEach((tool) => {
848
+ tools.push({
849
+ type: "function",
850
+ definition: {
851
+ schema: {
852
+ name: tool.function.name,
853
+ description: tool.function.description || "",
854
+ strict: tool.function.strict,
855
+ parameters: tool.function.parameters
856
+ }
857
+ }
858
+ });
859
+ });
860
+ }
861
+ return {
862
+ modelName,
863
+ config,
864
+ messages,
865
+ tools: tools.length > 0 ? tools : void 0
866
+ };
867
+ }
868
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
869
+ transformConfig(config, messages, tools) {
870
+ const _toolChoice = config.toolChoice;
871
+ delete config.toolChoice;
872
+ const _parsedConfig = this.modelSchema.config.schema.safeParse(config);
873
+ if (!_parsedConfig.success) {
874
+ throw new InvalidConfigError({
875
+ info: `Invalid config for model : '${this.modelName}'`,
876
+ cause: _parsedConfig.error
877
+ });
878
+ }
879
+ const parsedConfig = _parsedConfig.data;
880
+ if (_toolChoice !== void 0) {
881
+ parsedConfig.toolChoice = _toolChoice;
882
+ }
883
+ Object.keys(parsedConfig).forEach((key) => {
884
+ if (!(key in this.modelSchema.config.def)) {
885
+ throw new InvalidConfigError({
886
+ info: `Invalid config for model : '${this.modelName}'`,
887
+ cause: new Error(`Invalid config key : '${key}',
888
+ available keys : [${Object.keys(this.modelSchema.config.def).join(", ")}]`)
889
+ });
890
+ }
891
+ });
892
+ const transformedConfig = Object.keys(parsedConfig).reduce((acc, key) => {
893
+ const def = this.modelSchema.config.def[key];
894
+ const paramKey = def.param;
895
+ const paramValue = parsedConfig[key];
896
+ if (paramKey === "max_tokens" && def.type === "range" && paramValue === 0) {
897
+ acc[paramKey] = def.max;
898
+ } else {
899
+ acc[paramKey] = paramValue;
900
+ }
901
+ return acc;
902
+ }, {});
903
+ if (transformedConfig.top_logprobs && !transformedConfig.logprobs) {
904
+ throw new InvalidConfigError({
905
+ info: `Invalid config for model : '${this.modelName}'`,
906
+ cause: new Error("'logprobs' must be 'true' when 'top_logprobs' is specified")
907
+ });
908
+ }
909
+ if ("tool_choice" in transformedConfig && transformedConfig.tool_choice !== void 0) {
910
+ const toolChoice2 = transformedConfig.tool_choice;
911
+ if (!tools || tools && tools.length === 0) {
912
+ throw new InvalidConfigError({
913
+ info: `Invalid config for model : '${this.modelName}'`,
914
+ cause: new Error("'tools' are required when 'toolChoice' is specified")
915
+ });
916
+ } else if (tools && tools.length > 0) {
917
+ const configToolChoice = this.modelSchema.config.def.toolChoice;
918
+ if (!configToolChoice.choices.includes(toolChoice2)) {
919
+ if (tools.map((tool) => tool.definition.schema.name).includes(toolChoice2)) {
920
+ transformedConfig.tool_choice = { type: "function", function: { name: toolChoice2 } };
921
+ } else {
922
+ throw new InvalidConfigError({
923
+ info: `Invalid config for model : '${this.modelName}'`,
924
+ cause: new Error(`toolChoice : '${toolChoice2}' is not part of provided 'tools' names or
925
+ one of [${configToolChoice.choices.join(", ")}]`)
926
+ });
927
+ }
928
+ }
929
+ }
930
+ }
931
+ if ("response_format" in transformedConfig && transformedConfig.response_format !== void 0) {
932
+ const responseFormat2 = transformedConfig.response_format;
933
+ if (responseFormat2 === "json_schema") {
934
+ if (!("response_schema" in transformedConfig)) {
935
+ throw new InvalidConfigError({
936
+ info: `Invalid config for model : '${this.modelName}'`,
937
+ cause: new Error("'responseSchema' is required in config when 'responseFormat' is 'json_schema'")
938
+ });
939
+ } else {
940
+ transformedConfig.response_format = {
941
+ type: "json_schema",
942
+ json_schema: transformedConfig.response_schema
943
+ };
944
+ delete transformedConfig.response_schema;
945
+ }
946
+ } else {
947
+ transformedConfig.response_format = { type: responseFormat2 };
948
+ }
949
+ }
950
+ return transformedConfig;
951
+ }
952
+ transformMessages(messages) {
953
+ if (!messages || messages && messages.length === 0) {
954
+ return { messages: [] };
955
+ }
956
+ const parsedMessages = messages.map((message) => {
957
+ const parsedMessage = Message().safeParse(message);
958
+ if (!parsedMessage.success) {
959
+ throw new InvalidMessagesError({ info: "Invalid messages", cause: parsedMessage.error });
960
+ }
961
+ return parsedMessage.data;
962
+ });
963
+ parsedMessages.forEach((message) => {
964
+ message.content.forEach((content) => {
965
+ if (!this.modelSchema.modalities.includes(content.modality)) {
966
+ throw new InvalidMessagesError({
967
+ info: `Invalid message content for model : '${this.modelName}'`,
968
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}',
969
+ available modalities : [${this.modelSchema.modalities.join(", ")}]`)
970
+ });
971
+ }
972
+ });
973
+ });
974
+ parsedMessages.forEach((message) => {
975
+ if (!Object.keys(this.modelSchema.roles).includes(message.role)) {
976
+ throw new InvalidMessagesError({
977
+ info: `Invalid message content for model : '${this.modelName}'`,
978
+ cause: new Error(`model : '${this.modelName}' does not support role : '${message.role}',
979
+ available roles : [${Object.keys(this.modelSchema.roles).join(", ")}]`)
980
+ });
981
+ }
982
+ });
983
+ parsedMessages.forEach((message) => {
984
+ message.content = message.content.filter(
985
+ (content) => content.modality !== "error" && content.modality !== "search-result"
986
+ );
987
+ });
988
+ const transformedMessages = parsedMessages.map((message) => {
989
+ switch (message.role) {
990
+ case SystemRoleLiteral2: {
991
+ const textContent = [];
992
+ message.content.forEach((content) => {
993
+ if (content.modality === TextModalityLiteral2) {
994
+ textContent.push({ type: "text", text: content.value });
995
+ } else {
996
+ throw new InvalidMessagesError({
997
+ info: `Invalid message content for model : '${this.modelName}'`,
998
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}' for role : 'system'`)
999
+ });
1000
+ }
1001
+ });
1002
+ return { role: "system", content: textContent };
1003
+ }
1004
+ case UserRoleLiteral2: {
1005
+ const userContent = [];
1006
+ message.content.forEach((content) => {
1007
+ if (content.modality === TextModalityLiteral2) {
1008
+ userContent.push({ type: "text", text: content.value });
1009
+ } else if (content.modality === ImageModalityLiteral2) {
1010
+ if (content.value.type === Base64ImageContentTypeLiteral) {
1011
+ userContent.push({
1012
+ type: "image_url",
1013
+ image_url: { url: content.value.base64, detail: content.detail }
1014
+ });
1015
+ } else if (content.value.type === UrlImageContentTypeLiteral) {
1016
+ userContent.push({
1017
+ type: "image_url",
1018
+ image_url: { url: content.value.url, detail: content.detail }
1019
+ });
1020
+ }
1021
+ } else {
1022
+ throw new InvalidMessagesError({
1023
+ info: `Invalid message content for model : '${this.modelName}'`,
1024
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}' for role : 'user'`)
1025
+ });
1026
+ }
1027
+ });
1028
+ return { role: "user", content: userContent };
1029
+ }
1030
+ case AssistantRoleLiteral2: {
1031
+ const textContent = [];
1032
+ const toolCalls = [];
1033
+ message.content.forEach((content) => {
1034
+ if (content.modality === TextModalityLiteral2) {
1035
+ textContent.push({ type: "text", text: content.value });
1036
+ } else if (content.modality === ToolCallModalityLiteral2) {
1037
+ toolCalls.push({
1038
+ id: content.id,
1039
+ type: "function",
1040
+ function: { name: content.name, arguments: content.arguments }
1041
+ });
1042
+ } else {
1043
+ throw new InvalidMessagesError({
1044
+ info: `Invalid message content for model : '${this.modelName}'`,
1045
+ cause: new Error(`model : '${this.modelName}' does not support modality : '${content.modality}' for role : 'assistant'`)
1046
+ });
1047
+ }
1048
+ });
1049
+ if (textContent.length > 0 && toolCalls.length > 0) {
1050
+ return { role: "assistant", content: textContent, tool_calls: toolCalls };
1051
+ } else if (textContent.length > 0) {
1052
+ return { role: "assistant", content: textContent };
1053
+ } else if (toolCalls.length > 0) {
1054
+ return { role: "assistant", tool_calls: toolCalls };
1055
+ } else {
1056
+ throw new InvalidMessagesError({
1057
+ info: `Invalid message content for model : '${this.modelName}'`,
1058
+ cause: new Error("assistant message must have at least one text or tool_call content")
1059
+ });
1060
+ }
1061
+ }
1062
+ case ToolRoleLiteral2: {
1063
+ const toolResponseContent = message.content.find(
1064
+ (content) => content.modality === ToolResponseModalityLiteral2
1065
+ );
1066
+ if (!toolResponseContent) {
1067
+ throw new InvalidMessagesError({
1068
+ info: `Invalid message content for model : '${this.modelName}'`,
1069
+ cause: new Error("tool message must have tool_response content")
1070
+ });
1071
+ }
1072
+ return {
1073
+ role: "tool",
1074
+ tool_call_id: toolResponseContent.id,
1075
+ content: typeof toolResponseContent.data === "string" ? toolResponseContent.data : JSON.stringify(toolResponseContent.data)
1076
+ };
1077
+ }
1078
+ default:
1079
+ throw new InvalidMessagesError({
1080
+ info: `Invalid message content for model : '${this.modelName}'`,
1081
+ cause: new Error(`model : '${this.modelName}' does not support role : '${message.role}'`)
1082
+ });
1083
+ }
1084
+ });
1085
+ return { messages: transformedMessages };
1086
+ }
1087
+ transformTools(tools) {
1088
+ if (!tools || tools && tools.length === 0) {
1089
+ return {};
1090
+ }
1091
+ if (!this.modelSchema.modalities.includes(ToolCallModalityLiteral2)) {
1092
+ throw new InvalidToolsError({
1093
+ info: `Invalid tools for model : '${this.modelName}'`,
1094
+ cause: new Error(`model : '${this.modelName}' does not support tools`)
1095
+ });
1096
+ }
1097
+ const parsedTools = tools.map((tool) => {
1098
+ const parsedTool = Tool().safeParse(tool);
1099
+ if (!parsedTool.success) {
1100
+ throw new InvalidToolsError({ info: "Invalid tools", cause: parsedTool.error });
1101
+ }
1102
+ return parsedTool.data;
1103
+ });
1104
+ const transformedTools = parsedTools.map((tool) => ({
1105
+ type: "function",
1106
+ function: tool.definition.schema
1107
+ }));
1108
+ return { tools: transformedTools };
1109
+ }
1110
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1111
+ getCompleteChatUrl(config, messages, tools) {
1112
+ return __async(this, null, function* () {
1113
+ return new Promise((resolve) => {
1114
+ resolve(this.completeChatUrl);
1115
+ });
1116
+ });
1117
+ }
1118
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1119
+ getCompleteChatHeaders(config, messages, tools) {
1120
+ return __async(this, null, function* () {
1121
+ return new Promise((resolve) => {
1122
+ resolve(this.getDefaultHeaders());
1123
+ });
1124
+ });
1125
+ }
1126
+ getCompleteChatData(config, messages, tools) {
1127
+ return __async(this, null, function* () {
1128
+ const transformedConfig = this.transformConfig(config, messages, tools);
1129
+ const transformedMessages = this.transformMessages(messages);
1130
+ if (transformedMessages.messages && transformedMessages.messages.length === 0) {
1131
+ throw new InvalidMessagesError({
1132
+ info: "Messages are required",
1133
+ cause: new Error("Messages are required")
1134
+ });
1135
+ }
1136
+ const transformedTools = tools ? this.transformTools(tools) : {};
1137
+ return new Promise((resolve) => {
1138
+ resolve(__spreadValues(__spreadValues(__spreadValues(__spreadValues({}, this.getDefaultParams()), transformedConfig), transformedMessages), transformedTools));
1139
+ });
1140
+ });
1141
+ }
1142
+ transformCompleteChatResponse(response) {
1143
+ const parsedResponse = XAICompleteChatResponse.safeParse(response);
1144
+ if (!parsedResponse.success) {
1145
+ throw new ModelResponseError({ info: "Invalid response from model", cause: parsedResponse.error });
1146
+ }
1147
+ const data = parsedResponse.data;
1148
+ const choice = data.choices[0];
1149
+ const messages = [];
1150
+ const assistantContent = [];
1151
+ if (choice.message.content) {
1152
+ assistantContent.push(createTextContent(choice.message.content));
1153
+ }
1154
+ if (choice.message.tool_calls) {
1155
+ choice.message.tool_calls.forEach((toolCall, index) => {
1156
+ assistantContent.push(createToolCallContent(index, toolCall.id, toolCall.function.name, toolCall.function.arguments));
1157
+ });
1158
+ }
1159
+ if (assistantContent.length > 0) {
1160
+ messages.push({ role: AssistantRoleLiteral2, content: assistantContent });
1161
+ }
1162
+ const usage = {
1163
+ promptTokens: data.usage.prompt_tokens,
1164
+ completionTokens: data.usage.completion_tokens,
1165
+ totalTokens: data.usage.total_tokens
1166
+ };
1167
+ let logProbs2;
1168
+ if (choice.logprobs && choice.logprobs.content) {
1169
+ logProbs2 = choice.logprobs.content.map((logProb) => ({
1170
+ token: logProb.token,
1171
+ logProb: logProb.logprob,
1172
+ bytes: logProb.bytes,
1173
+ topLogProbs: logProb.top_logprobs.map((topLogProb) => ({
1174
+ token: topLogProb.token,
1175
+ logProb: topLogProb.logprob,
1176
+ bytes: topLogProb.bytes
1177
+ }))
1178
+ }));
1179
+ }
1180
+ return {
1181
+ messages,
1182
+ usage,
1183
+ logProbs: logProbs2
1184
+ };
1185
+ }
1186
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1187
+ getStreamChatUrl(config, messages, tools) {
1188
+ return __async(this, null, function* () {
1189
+ return new Promise((resolve) => {
1190
+ resolve(this.streamChatUrl);
1191
+ });
1192
+ });
1193
+ }
1194
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1195
+ getStreamChatHeaders(config, messages, tools) {
1196
+ return __async(this, null, function* () {
1197
+ return new Promise((resolve) => {
1198
+ resolve(this.getDefaultHeaders());
1199
+ });
1200
+ });
1201
+ }
1202
+ getStreamChatData(config, messages, tools) {
1203
+ return __async(this, null, function* () {
1204
+ const transformedConfig = this.transformConfig(config, messages, tools);
1205
+ const transformedMessages = this.transformMessages(messages);
1206
+ if (transformedMessages.messages && transformedMessages.messages.length === 0) {
1207
+ throw new InvalidMessagesError({
1208
+ info: "Messages are required",
1209
+ cause: new Error("Messages are required")
1210
+ });
1211
+ }
1212
+ const transformedTools = tools ? this.transformTools(tools) : {};
1213
+ return new Promise((resolve) => {
1214
+ resolve(__spreadValues(__spreadValues(__spreadValues(__spreadValues({
1215
+ stream: true,
1216
+ stream_options: { include_usage: true }
1217
+ }, this.getDefaultParams()), transformedConfig), transformedMessages), transformedTools));
1218
+ });
1219
+ });
1220
+ }
1221
+ transformStreamChatResponseChunk(chunk, buffer) {
1222
+ return __asyncGenerator(this, null, function* () {
1223
+ var _a, _b;
1224
+ const data = buffer + chunk;
1225
+ const lines = [];
1226
+ let newBuffer = "";
1227
+ let currentIndex = 0;
1228
+ while (currentIndex < data.length) {
1229
+ const newlineIndex = data.indexOf("\n", currentIndex);
1230
+ if (newlineIndex === -1) {
1231
+ newBuffer = data.substring(currentIndex);
1232
+ break;
1233
+ } else {
1234
+ const line = data.substring(currentIndex, newlineIndex).trim();
1235
+ if (line) {
1236
+ lines.push(line);
1237
+ }
1238
+ currentIndex = newlineIndex + 1;
1239
+ }
1240
+ }
1241
+ for (const line of lines) {
1242
+ if (line === "data: [DONE]") {
1243
+ return;
1244
+ }
1245
+ if (line.startsWith("data: ")) {
1246
+ const jsonStr = line.substring("data: ".length);
1247
+ try {
1248
+ const structuredLine = JSON.parse(jsonStr);
1249
+ const safe = XAIStreamChatResponse.safeParse(structuredLine);
1250
+ if (safe.success) {
1251
+ const partialResponse = { partialMessages: [] };
1252
+ const parsedResponse = safe.data;
1253
+ if (parsedResponse.choices.length > 0) {
1254
+ const message = parsedResponse.choices[0].delta;
1255
+ if (message !== void 0 && Object.keys(message).length !== 0) {
1256
+ if ("content" in message && message.content !== null) {
1257
+ partialResponse.partialMessages.push(createPartialTextMessage(AssistantRoleLiteral2, message.content));
1258
+ } else if ("tool_calls" in message && message.tool_calls !== void 0) {
1259
+ const toolCall = message.tool_calls.at(0);
1260
+ partialResponse.partialMessages.push(
1261
+ createPartialToolCallMessage(
1262
+ AssistantRoleLiteral2,
1263
+ toolCall.index,
1264
+ toolCall.id,
1265
+ (_a = toolCall.function) == null ? void 0 : _a.name,
1266
+ (_b = toolCall.function) == null ? void 0 : _b.arguments
1267
+ )
1268
+ );
1269
+ }
1270
+ }
1271
+ }
1272
+ if (parsedResponse.usage) {
1273
+ partialResponse.usage = {
1274
+ promptTokens: parsedResponse.usage.prompt_tokens,
1275
+ completionTokens: parsedResponse.usage.completion_tokens,
1276
+ totalTokens: parsedResponse.usage.total_tokens
1277
+ };
1278
+ }
1279
+ yield { partialResponse, buffer: newBuffer };
1280
+ } else {
1281
+ throw new ModelResponseError({ info: "Invalid response from model", cause: safe.error });
1282
+ }
1283
+ } catch (error) {
1284
+ throw new ModelResponseError({
1285
+ info: `Malformed JSON received in stream: ${jsonStr}`,
1286
+ cause: error
1287
+ });
1288
+ }
1289
+ }
1290
+ }
1291
+ yield { partialResponse: { partialMessages: [] }, buffer: newBuffer };
1292
+ });
1293
+ }
1294
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1295
+ transformProxyStreamChatResponseChunk(chunk, buffer, data, headers, query) {
1296
+ return __asyncGenerator(this, null, function* () {
1297
+ yield* __yieldStar(this.transformStreamChatResponseChunk(chunk, buffer));
1298
+ });
1299
+ }
1300
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1301
+ getProxyStreamChatUrl(data, headers, query) {
1302
+ return __async(this, null, function* () {
1303
+ return new Promise((resolve) => {
1304
+ resolve(this.streamChatUrl);
1305
+ });
1306
+ });
1307
+ }
1308
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1309
+ getProxyCompleteChatUrl(data, headers, query) {
1310
+ return __async(this, null, function* () {
1311
+ return new Promise((resolve) => {
1312
+ resolve(this.completeChatUrl);
1313
+ });
1314
+ });
1315
+ }
1316
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1317
+ getProxyCompleteChatHeaders(data, headers, query) {
1318
+ return __async(this, null, function* () {
1319
+ if (!headers) {
1320
+ return {};
1321
+ }
1322
+ const sanitizedHeaders = __spreadValues({}, headers);
1323
+ delete sanitizedHeaders.host;
1324
+ delete sanitizedHeaders["content-length"];
1325
+ return sanitizedHeaders;
1326
+ });
1327
+ }
1328
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
1329
+ getProxyStreamChatHeaders(data, headers, query) {
1330
+ return __async(this, null, function* () {
1331
+ return yield this.getProxyCompleteChatHeaders(data, headers, query);
1332
+ });
1333
+ }
1334
+ getModelPricing() {
1335
+ if (!(this.modelName in pricing_default)) {
1336
+ throw new ModelResponseError({
1337
+ info: `Invalid model pricing for model : '${this.modelName}'`,
1338
+ cause: new Error(`No pricing configuration found for model "${this.modelName}"`)
1339
+ });
1340
+ }
1341
+ const entry = pricing_default[this.modelName];
1342
+ return entry;
1343
+ }
1344
+ };
1345
+
1346
+ // src/models/chat-models/grok-2.xai.ts
1347
+ import { ChatModelSchema } from "@adaline/provider";
1348
+
1349
+ // src/configs/chat-model/index.ts
1350
+ var chat_model_exports = {};
1351
+ __export(chat_model_exports, {
1352
+ ChatModelBaseConfigDef: () => ChatModelBaseConfigDef,
1353
+ ChatModelBaseConfigSchema: () => ChatModelBaseConfigSchema,
1354
+ ChatModelMiniReasoningConfigDef: () => ChatModelMiniReasoningConfigDef,
1355
+ ChatModelMiniReasoningConfigSchema: () => ChatModelMiniReasoningConfigSchema,
1356
+ ChatModelReasoningConfigDef: () => ChatModelReasoningConfigDef,
1357
+ ChatModelReasoningConfigSchema: () => ChatModelReasoningConfigSchema,
1358
+ ChatModelResponseSchemaConfigDef: () => ChatModelResponseSchemaConfigDef,
1359
+ ChatModelResponseSchemaConfigSchema: () => ChatModelResponseSchemaConfigSchema,
1360
+ frequencyPenalty: () => frequencyPenalty,
1361
+ logProbs: () => logProbs,
1362
+ maxTokens: () => maxTokens,
1363
+ presencePenalty: () => presencePenalty,
1364
+ reasoningEffort: () => reasoningEffort,
1365
+ seed: () => seed,
1366
+ stop: () => stop,
1367
+ temperature: () => temperature,
1368
+ toolChoice: () => toolChoice,
1369
+ topLogProbs: () => topLogProbs,
1370
+ topP: () => topP
1371
+ });
1372
+
1373
+ // src/configs/chat-model/base.config.chat-model.xai.ts
1374
+ import { z as z6 } from "zod";
1375
+
1376
+ // src/configs/chat-model/common.config.chat-model.xai.ts
1377
+ import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectBooleanConfigItem, SelectStringConfigItem } from "@adaline/provider";
1378
+ var temperature = RangeConfigItem({
1379
+ param: "temperature",
1380
+ title: CHAT_CONFIG.TEMPERATURE.title,
1381
+ description: CHAT_CONFIG.TEMPERATURE.description,
1382
+ min: 0,
1383
+ max: 2,
1384
+ step: 0.01,
1385
+ default: 1
1386
+ });
1387
+ var maxTokens = (maxOutputTokens) => RangeConfigItem({
1388
+ param: "max_tokens",
1389
+ title: CHAT_CONFIG.MAX_TOKENS.title,
1390
+ description: CHAT_CONFIG.MAX_TOKENS.description,
1391
+ min: 0,
1392
+ max: maxOutputTokens,
1393
+ step: 1,
1394
+ default: 0
1395
+ });
1396
+ var stop = (maxSequences) => MultiStringConfigItem({
1397
+ param: "stop",
1398
+ title: CHAT_CONFIG.STOP(maxSequences).title,
1399
+ description: CHAT_CONFIG.STOP(maxSequences).description,
1400
+ max: maxSequences
1401
+ });
1402
+ var topP = RangeConfigItem({
1403
+ param: "top_p",
1404
+ title: CHAT_CONFIG.TOP_P.title,
1405
+ description: CHAT_CONFIG.TOP_P.description,
1406
+ min: 0,
1407
+ max: 1,
1408
+ step: 0.01,
1409
+ default: 1
1410
+ });
1411
+ var frequencyPenalty = RangeConfigItem({
1412
+ param: "frequency_penalty",
1413
+ title: CHAT_CONFIG.FREQUENCY_PENALTY.title,
1414
+ description: CHAT_CONFIG.FREQUENCY_PENALTY.description,
1415
+ min: -2,
1416
+ max: 2,
1417
+ step: 0.01,
1418
+ default: 0
1419
+ });
1420
+ var presencePenalty = RangeConfigItem({
1421
+ param: "presence_penalty",
1422
+ title: CHAT_CONFIG.PRESENCE_PENALTY.title,
1423
+ description: CHAT_CONFIG.PRESENCE_PENALTY.description,
1424
+ min: -2,
1425
+ max: 2,
1426
+ step: 0.01,
1427
+ default: 0
1428
+ });
1429
+ var seed = RangeConfigItem({
1430
+ param: "seed",
1431
+ title: CHAT_CONFIG.SEED.title,
1432
+ description: CHAT_CONFIG.SEED.description,
1433
+ min: 0,
1434
+ max: 1e6,
1435
+ step: 1,
1436
+ default: 0
1437
+ });
1438
+ var logProbs = SelectBooleanConfigItem({
1439
+ param: "logprobs",
1440
+ title: CHAT_CONFIG.LOG_PROBS.title,
1441
+ description: CHAT_CONFIG.LOG_PROBS.description,
1442
+ default: false
1443
+ });
1444
+ var topLogProbs = RangeConfigItem({
1445
+ param: "top_logprobs",
1446
+ title: CHAT_CONFIG.TOP_LOG_PROBS.title,
1447
+ description: CHAT_CONFIG.TOP_LOG_PROBS.description,
1448
+ min: 0,
1449
+ max: 20,
1450
+ step: 1,
1451
+ default: 0
1452
+ });
1453
+ var toolChoice = SelectStringConfigItem({
1454
+ param: "tool_choice",
1455
+ title: "Tool choice",
1456
+ description: "Controls which (if any) tool is called by the model. 'none' means the model will not call a function. 'auto' means the model can pick between generating a message or calling a tool.",
1457
+ default: "auto",
1458
+ choices: ["auto", "required", "none"]
1459
+ });
1460
+ var reasoningEffort = SelectStringConfigItem({
1461
+ param: "reasoning_effort",
1462
+ title: "Reasoning Effort",
1463
+ description: "Controls how much time the model spends thinking before responding. 'low' uses minimal thinking time for quick responses, 'high' uses maximum thinking time for complex problems. Only supported by grok-3-mini models.",
1464
+ default: "low",
1465
+ choices: ["low", "high"]
1466
+ });
1467
+
1468
+ // src/configs/chat-model/base.config.chat-model.xai.ts
1469
+ var ChatModelBaseConfigSchema = (maxOutputTokens, maxSequences) => z6.object({
1470
+ temperature: temperature.schema,
1471
+ maxTokens: maxTokens(maxOutputTokens).schema,
1472
+ stop: stop(maxSequences).schema,
1473
+ topP: topP.schema,
1474
+ frequencyPenalty: frequencyPenalty.schema,
1475
+ presencePenalty: presencePenalty.schema,
1476
+ seed: seed.schema.transform((value) => value === 0 ? void 0 : value),
1477
+ logProbs: logProbs.schema,
1478
+ topLogProbs: topLogProbs.schema,
1479
+ toolChoice: toolChoice.schema
1480
+ });
1481
+ var ChatModelBaseConfigDef = (maxOutputTokens, maxSequences) => ({
1482
+ temperature: temperature.def,
1483
+ maxTokens: maxTokens(maxOutputTokens).def,
1484
+ stop: stop(maxSequences).def,
1485
+ topP: topP.def,
1486
+ frequencyPenalty: frequencyPenalty.def,
1487
+ presencePenalty: presencePenalty.def,
1488
+ seed: seed.def,
1489
+ logProbs: logProbs.def,
1490
+ topLogProbs: topLogProbs.def,
1491
+ toolChoice: toolChoice.def
1492
+ });
1493
+
1494
+ // src/configs/chat-model/reasoning.config.chat-model.xai.ts
1495
+ import { z as z7 } from "zod";
1496
+ var ChatModelReasoningConfigSchema = (maxOutputTokens) => z7.object({
1497
+ temperature: temperature.schema,
1498
+ maxTokens: maxTokens(maxOutputTokens).schema,
1499
+ topP: topP.schema,
1500
+ seed: seed.schema.transform((value) => value === 0 ? void 0 : value),
1501
+ logProbs: logProbs.schema,
1502
+ topLogProbs: topLogProbs.schema,
1503
+ toolChoice: toolChoice.schema
1504
+ });
1505
+ var ChatModelReasoningConfigDef = (maxOutputTokens) => ({
1506
+ temperature: temperature.def,
1507
+ maxTokens: maxTokens(maxOutputTokens).def,
1508
+ topP: topP.def,
1509
+ seed: seed.def,
1510
+ logProbs: logProbs.def,
1511
+ topLogProbs: topLogProbs.def,
1512
+ toolChoice: toolChoice.def
1513
+ });
1514
+ var ChatModelMiniReasoningConfigSchema = (maxOutputTokens, _maxSequences) => z7.object({
1515
+ temperature: temperature.schema,
1516
+ maxTokens: maxTokens(maxOutputTokens).schema,
1517
+ topP: topP.schema,
1518
+ seed: seed.schema.transform((value) => value === 0 ? void 0 : value),
1519
+ logProbs: logProbs.schema,
1520
+ topLogProbs: topLogProbs.schema,
1521
+ toolChoice: toolChoice.schema,
1522
+ reasoningEffort: reasoningEffort.schema
1523
+ });
1524
+ var ChatModelMiniReasoningConfigDef = (maxOutputTokens) => ({
1525
+ temperature: temperature.def,
1526
+ maxTokens: maxTokens(maxOutputTokens).def,
1527
+ topP: topP.def,
1528
+ seed: seed.def,
1529
+ logProbs: logProbs.def,
1530
+ topLogProbs: topLogProbs.def,
1531
+ toolChoice: toolChoice.def,
1532
+ reasoningEffort: reasoningEffort.def
1533
+ });
1534
+
1535
+ // src/configs/chat-model/response-schema.config.chat-model.xai.ts
1536
+ import { CHAT_CONFIG as CHAT_CONFIG2, ObjectSchemaConfigItem, SelectStringConfigItem as SelectStringConfigItem2 } from "@adaline/provider";
1537
+ import { ResponseSchema } from "@adaline/types";
1538
+ var responseSchema = ObjectSchemaConfigItem({
1539
+ param: "response_schema",
1540
+ title: CHAT_CONFIG2.RESPONSE_SCHEMA.title,
1541
+ description: CHAT_CONFIG2.RESPONSE_SCHEMA.description,
1542
+ objectSchema: ResponseSchema
1543
+ });
1544
+ var responseFormat = SelectStringConfigItem2({
1545
+ param: "response_format",
1546
+ title: CHAT_CONFIG2.RESPONSE_FORMAT_WITH_SCHEMA.title,
1547
+ description: CHAT_CONFIG2.RESPONSE_FORMAT_WITH_SCHEMA.description,
1548
+ default: "text",
1549
+ choices: ["text", "json_object", "json_schema"]
1550
+ });
1551
+ var ChatModelResponseSchemaConfigDef = (maxOutputTokens, maxSequences) => __spreadProps(__spreadValues({}, ChatModelBaseConfigDef(maxOutputTokens, maxSequences)), {
1552
+ responseFormat: responseFormat.def,
1553
+ responseSchema: responseSchema.def
1554
+ });
1555
+ var ChatModelResponseSchemaConfigSchema = (maxOutputTokens, maxSequences) => ChatModelBaseConfigSchema(maxOutputTokens, maxSequences).extend({
1556
+ responseFormat: responseFormat.schema,
1557
+ responseSchema: responseSchema.schema
1558
+ });
1559
+
1560
+ // src/models/chat-models/grok-2.xai.ts
1561
+ var Grok_2_Literal = "grok-2";
1562
+ var Grok_2_Description = "Grok-2 is xAI's flagship language model with strong reasoning capabilities and a 131K context window.";
1563
+ var Grok_2_Schema = ChatModelSchema(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1564
+ name: Grok_2_Literal,
1565
+ description: Grok_2_Description,
1566
+ maxInputTokens: 131072,
1567
+ maxOutputTokens: 32768,
1568
+ roles: XAIChatModelRolesMap,
1569
+ modalities: XAIChatModelTextToolModalities,
1570
+ config: {
1571
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(32768, 4),
1572
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(32768, 4)
1573
+ },
1574
+ price: pricing_default[Grok_2_Literal]
1575
+ });
1576
+ var Grok_2_Options = BaseChatModelOptions;
1577
+ var Grok_2 = class extends BaseChatModel {
1578
+ constructor(options) {
1579
+ super(Grok_2_Schema, options);
1580
+ }
1581
+ };
1582
+
1583
+ // src/models/chat-models/grok-2-latest.xai.ts
1584
+ import { ChatModelSchema as ChatModelSchema2 } from "@adaline/provider";
1585
+ var Grok_2_Latest_Literal = "grok-2-latest";
1586
+ var Grok_2_Latest_Description = "Grok-2 Latest is the most recent version of xAI's Grok-2 model with a 131K context window.";
1587
+ var Grok_2_Latest_Schema = ChatModelSchema2(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1588
+ name: Grok_2_Latest_Literal,
1589
+ description: Grok_2_Latest_Description,
1590
+ maxInputTokens: 131072,
1591
+ maxOutputTokens: 32768,
1592
+ roles: XAIChatModelRolesMap,
1593
+ modalities: XAIChatModelTextToolModalities,
1594
+ config: {
1595
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(32768, 4),
1596
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(32768, 4)
1597
+ },
1598
+ price: pricing_default[Grok_2_Latest_Literal]
1599
+ });
1600
+ var Grok_2_Latest_Options = BaseChatModelOptions;
1601
+ var Grok_2_Latest = class extends BaseChatModel {
1602
+ constructor(options) {
1603
+ super(Grok_2_Latest_Schema, options);
1604
+ }
1605
+ };
1606
+
1607
+ // src/models/chat-models/grok-2-1212.xai.ts
1608
+ import { ChatModelSchema as ChatModelSchema3 } from "@adaline/provider";
1609
+ var Grok_2_1212_Literal = "grok-2-1212";
1610
+ var Grok_2_1212_Description = "Grok-2 version 1212 is a specific release of xAI's Grok-2 model with a 131K context window.";
1611
+ var Grok_2_1212_Schema = ChatModelSchema3(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1612
+ name: Grok_2_1212_Literal,
1613
+ description: Grok_2_1212_Description,
1614
+ maxInputTokens: 131072,
1615
+ maxOutputTokens: 32768,
1616
+ roles: XAIChatModelRolesMap,
1617
+ modalities: XAIChatModelTextToolModalities,
1618
+ config: {
1619
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(32768, 4),
1620
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(32768, 4)
1621
+ },
1622
+ price: pricing_default[Grok_2_1212_Literal]
1623
+ });
1624
+ var Grok_2_1212_Options = BaseChatModelOptions;
1625
+ var Grok_2_1212 = class extends BaseChatModel {
1626
+ constructor(options) {
1627
+ super(Grok_2_1212_Schema, options);
1628
+ }
1629
+ };
1630
+
1631
+ // src/models/chat-models/grok-3-beta.xai.ts
1632
+ import { ChatModelSchema as ChatModelSchema4 } from "@adaline/provider";
1633
+ var Grok_3_Beta_Literal = "grok-3-beta";
1634
+ var Grok_3_Beta_Description = "Grok-3 Beta is xAI's latest flagship model for enterprise tasks with a 131K context window.";
1635
+ var Grok_3_Beta_Schema = ChatModelSchema4(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1636
+ name: Grok_3_Beta_Literal,
1637
+ description: Grok_3_Beta_Description,
1638
+ maxInputTokens: 131072,
1639
+ maxOutputTokens: 131072,
1640
+ roles: XAIChatModelRolesMap,
1641
+ modalities: XAIChatModelTextToolModalities,
1642
+ config: {
1643
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1644
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1645
+ },
1646
+ price: pricing_default[Grok_3_Beta_Literal]
1647
+ });
1648
+ var Grok_3_Beta_Options = BaseChatModelOptions;
1649
+ var Grok_3_Beta = class extends BaseChatModel {
1650
+ constructor(options) {
1651
+ super(Grok_3_Beta_Schema, options);
1652
+ }
1653
+ };
1654
+
1655
+ // src/models/chat-models/grok-3-fast-beta.xai.ts
1656
+ import { ChatModelSchema as ChatModelSchema5 } from "@adaline/provider";
1657
+ var Grok_3_Fast_Beta_Literal = "grok-3-fast-beta";
1658
+ var Grok_3_Fast_Beta_Description = "Grok-3 Fast Beta is xAI's fastest flagship model optimized for speed with a 131K context window.";
1659
+ var Grok_3_Fast_Beta_Schema = ChatModelSchema5(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1660
+ name: Grok_3_Fast_Beta_Literal,
1661
+ description: Grok_3_Fast_Beta_Description,
1662
+ maxInputTokens: 131072,
1663
+ maxOutputTokens: 131072,
1664
+ roles: XAIChatModelRolesMap,
1665
+ modalities: XAIChatModelTextToolModalities,
1666
+ config: {
1667
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1668
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1669
+ },
1670
+ price: pricing_default[Grok_3_Fast_Beta_Literal]
1671
+ });
1672
+ var Grok_3_Fast_Beta_Options = BaseChatModelOptions;
1673
+ var Grok_3_Fast_Beta = class extends BaseChatModel {
1674
+ constructor(options) {
1675
+ super(Grok_3_Fast_Beta_Schema, options);
1676
+ }
1677
+ };
1678
+
1679
+ // src/models/chat-models/grok-3-mini-beta.xai.ts
1680
+ import { ChatModelSchema as ChatModelSchema6 } from "@adaline/provider";
1681
+ var Grok_3_Mini_Beta_Literal = "grok-3-mini-beta";
1682
+ var Grok_3_Mini_Beta_Description = "Grok-3 Mini Beta is xAI's lightweight reasoning model with support for reasoning_effort parameter and a 131K context window.";
1683
+ var Grok_3_Mini_Beta_Schema = ChatModelSchema6(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1684
+ name: Grok_3_Mini_Beta_Literal,
1685
+ description: Grok_3_Mini_Beta_Description,
1686
+ maxInputTokens: 131072,
1687
+ maxOutputTokens: 131072,
1688
+ roles: XAIChatModelRolesMap,
1689
+ modalities: XAIChatModelTextToolModalities,
1690
+ config: {
1691
+ def: chat_model_exports.ChatModelMiniReasoningConfigDef(131072),
1692
+ schema: chat_model_exports.ChatModelMiniReasoningConfigSchema(131072, 4)
1693
+ },
1694
+ price: pricing_default[Grok_3_Mini_Beta_Literal]
1695
+ });
1696
+ var Grok_3_Mini_Beta_Options = BaseChatModelOptions;
1697
+ var Grok_3_Mini_Beta = class extends BaseChatModel {
1698
+ constructor(options) {
1699
+ super(Grok_3_Mini_Beta_Schema, options);
1700
+ }
1701
+ };
1702
+
1703
+ // src/models/chat-models/grok-3-mini-fast-beta.xai.ts
1704
+ import { ChatModelSchema as ChatModelSchema7 } from "@adaline/provider";
1705
+ var Grok_3_Mini_Fast_Beta_Literal = "grok-3-mini-fast-beta";
1706
+ var Grok_3_Mini_Fast_Beta_Description = "Grok-3 Mini Fast Beta is xAI's fast lightweight model optimized for speed with a 131K context window.";
1707
+ var Grok_3_Mini_Fast_Beta_Schema = ChatModelSchema7(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1708
+ name: Grok_3_Mini_Fast_Beta_Literal,
1709
+ description: Grok_3_Mini_Fast_Beta_Description,
1710
+ maxInputTokens: 131072,
1711
+ maxOutputTokens: 131072,
1712
+ roles: XAIChatModelRolesMap,
1713
+ modalities: XAIChatModelTextToolModalities,
1714
+ config: {
1715
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1716
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1717
+ },
1718
+ price: pricing_default[Grok_3_Mini_Fast_Beta_Literal]
1719
+ });
1720
+ var Grok_3_Mini_Fast_Beta_Options = BaseChatModelOptions;
1721
+ var Grok_3_Mini_Fast_Beta = class extends BaseChatModel {
1722
+ constructor(options) {
1723
+ super(Grok_3_Mini_Fast_Beta_Schema, options);
1724
+ }
1725
+ };
1726
+
1727
+ // src/models/chat-models/grok-4.xai.ts
1728
+ import { ChatModelSchema as ChatModelSchema8 } from "@adaline/provider";
1729
+ var Grok_4_Literal = "grok-4";
1730
+ var Grok_4_Description = "Grok-4 is xAI's most advanced reasoning model with a 256K context window.";
1731
+ var Grok_4_Schema = ChatModelSchema8(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1732
+ name: Grok_4_Literal,
1733
+ description: Grok_4_Description,
1734
+ maxInputTokens: 262144,
1735
+ maxOutputTokens: 131072,
1736
+ roles: XAIChatModelRolesMap,
1737
+ modalities: XAIChatModelTextToolModalities,
1738
+ config: {
1739
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1740
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1741
+ },
1742
+ price: pricing_default[Grok_4_Literal]
1743
+ });
1744
+ var Grok_4_Options = BaseChatModelOptions;
1745
+ var Grok_4 = class extends BaseChatModel {
1746
+ constructor(options) {
1747
+ super(Grok_4_Schema, options);
1748
+ }
1749
+ };
1750
+
1751
+ // src/models/chat-models/grok-4-0709.xai.ts
1752
+ import { ChatModelSchema as ChatModelSchema9 } from "@adaline/provider";
1753
+ var Grok_4_0709_Literal = "grok-4-0709";
1754
+ var Grok_4_0709_Description = "Grok-4 version 0709 is a specific release of xAI's most advanced reasoning model with a 256K context window.";
1755
+ var Grok_4_0709_Schema = ChatModelSchema9(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1756
+ name: Grok_4_0709_Literal,
1757
+ description: Grok_4_0709_Description,
1758
+ maxInputTokens: 262144,
1759
+ maxOutputTokens: 131072,
1760
+ roles: XAIChatModelRolesMap,
1761
+ modalities: XAIChatModelTextToolModalities,
1762
+ config: {
1763
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1764
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1765
+ },
1766
+ price: pricing_default[Grok_4_0709_Literal]
1767
+ });
1768
+ var Grok_4_0709_Options = BaseChatModelOptions;
1769
+ var Grok_4_0709 = class extends BaseChatModel {
1770
+ constructor(options) {
1771
+ super(Grok_4_0709_Schema, options);
1772
+ }
1773
+ };
1774
+
1775
+ // src/models/chat-models/grok-4-fast-reasoning.xai.ts
1776
+ import { ChatModelSchema as ChatModelSchema10 } from "@adaline/provider";
1777
+ var Grok_4_Fast_Reasoning_Literal = "grok-4-fast-reasoning";
1778
+ var Grok_4_Fast_Reasoning_Description = "Grok-4 Fast Reasoning is xAI's fast reasoning model with a 2M context window.";
1779
+ var Grok_4_Fast_Reasoning_Schema = ChatModelSchema10(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1780
+ name: Grok_4_Fast_Reasoning_Literal,
1781
+ description: Grok_4_Fast_Reasoning_Description,
1782
+ maxInputTokens: 2097152,
1783
+ maxOutputTokens: 131072,
1784
+ roles: XAIChatModelRolesMap,
1785
+ modalities: XAIChatModelTextToolModalities,
1786
+ config: {
1787
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1788
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1789
+ },
1790
+ price: pricing_default[Grok_4_Fast_Reasoning_Literal]
1791
+ });
1792
+ var Grok_4_Fast_Reasoning_Options = BaseChatModelOptions;
1793
+ var Grok_4_Fast_Reasoning = class extends BaseChatModel {
1794
+ constructor(options) {
1795
+ super(Grok_4_Fast_Reasoning_Schema, options);
1796
+ }
1797
+ };
1798
+
1799
+ // src/models/chat-models/grok-4-fast-non-reasoning.xai.ts
1800
+ import { ChatModelSchema as ChatModelSchema11 } from "@adaline/provider";
1801
+ var Grok_4_Fast_Non_Reasoning_Literal = "grok-4-fast-non-reasoning";
1802
+ var Grok_4_Fast_Non_Reasoning_Description = "Grok-4 Fast Non-Reasoning is xAI's fast model without reasoning capabilities with a 2M context window.";
1803
+ var Grok_4_Fast_Non_Reasoning_Schema = ChatModelSchema11(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1804
+ name: Grok_4_Fast_Non_Reasoning_Literal,
1805
+ description: Grok_4_Fast_Non_Reasoning_Description,
1806
+ maxInputTokens: 2097152,
1807
+ maxOutputTokens: 131072,
1808
+ roles: XAIChatModelRolesMap,
1809
+ modalities: XAIChatModelTextToolModalities,
1810
+ config: {
1811
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1812
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1813
+ },
1814
+ price: pricing_default[Grok_4_Fast_Non_Reasoning_Literal]
1815
+ });
1816
+ var Grok_4_Fast_Non_Reasoning_Options = BaseChatModelOptions;
1817
+ var Grok_4_Fast_Non_Reasoning = class extends BaseChatModel {
1818
+ constructor(options) {
1819
+ super(Grok_4_Fast_Non_Reasoning_Schema, options);
1820
+ }
1821
+ };
1822
+
1823
+ // src/models/chat-models/grok-4.1-fast-reasoning.xai.ts
1824
+ import { ChatModelSchema as ChatModelSchema12 } from "@adaline/provider";
1825
+ var Grok_4_1_Fast_Reasoning_Literal = "grok-4.1-fast-reasoning";
1826
+ var Grok_4_1_Fast_Reasoning_Description = "Grok-4.1 Fast Reasoning is xAI's updated fast reasoning model with a 2M context window.";
1827
+ var Grok_4_1_Fast_Reasoning_Schema = ChatModelSchema12(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1828
+ name: Grok_4_1_Fast_Reasoning_Literal,
1829
+ description: Grok_4_1_Fast_Reasoning_Description,
1830
+ maxInputTokens: 2097152,
1831
+ maxOutputTokens: 131072,
1832
+ roles: XAIChatModelRolesMap,
1833
+ modalities: XAIChatModelTextToolModalities,
1834
+ config: {
1835
+ def: chat_model_exports.ChatModelReasoningConfigDef(131072),
1836
+ schema: chat_model_exports.ChatModelReasoningConfigSchema(131072)
1837
+ },
1838
+ price: pricing_default["grok-4.1-fast-reasoning"]
1839
+ });
1840
+ var Grok_4_1_Fast_Reasoning_Options = BaseChatModelOptions;
1841
+ var Grok_4_1_Fast_Reasoning = class extends BaseChatModel {
1842
+ constructor(options) {
1843
+ super(Grok_4_1_Fast_Reasoning_Schema, options);
1844
+ }
1845
+ };
1846
+
1847
+ // src/models/chat-models/grok-4.1-fast-non-reasoning.xai.ts
1848
+ import { ChatModelSchema as ChatModelSchema13 } from "@adaline/provider";
1849
+ var Grok_4_1_Fast_Non_Reasoning_Literal = "grok-4.1-fast-non-reasoning";
1850
+ var Grok_4_1_Fast_Non_Reasoning_Description = "Grok-4.1 Fast Non-Reasoning is xAI's updated fast model without reasoning capabilities with a 2M context window.";
1851
+ var Grok_4_1_Fast_Non_Reasoning_Schema = ChatModelSchema13(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1852
+ name: Grok_4_1_Fast_Non_Reasoning_Literal,
1853
+ description: Grok_4_1_Fast_Non_Reasoning_Description,
1854
+ maxInputTokens: 2097152,
1855
+ maxOutputTokens: 131072,
1856
+ roles: XAIChatModelRolesMap,
1857
+ modalities: XAIChatModelTextToolModalities,
1858
+ config: {
1859
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1860
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1861
+ },
1862
+ price: pricing_default["grok-4.1-fast-non-reasoning"]
1863
+ });
1864
+ var Grok_4_1_Fast_Non_Reasoning_Options = BaseChatModelOptions;
1865
+ var Grok_4_1_Fast_Non_Reasoning = class extends BaseChatModel {
1866
+ constructor(options) {
1867
+ super(Grok_4_1_Fast_Non_Reasoning_Schema, options);
1868
+ }
1869
+ };
1870
+
1871
+ // src/models/chat-models/grok-code-fast-1.xai.ts
1872
+ import { ChatModelSchema as ChatModelSchema14 } from "@adaline/provider";
1873
+ var Grok_Code_Fast_1_Literal = "grok-code-fast-1";
1874
+ var Grok_Code_Fast_1_Description = "Grok Code Fast 1 is xAI's specialized coding model optimized for code generation with a 256K context window.";
1875
+ var Grok_Code_Fast_1_Schema = ChatModelSchema14(XAIChatModelRoles, XAIChatModelTextToolModalitiesEnum).parse({
1876
+ name: Grok_Code_Fast_1_Literal,
1877
+ description: Grok_Code_Fast_1_Description,
1878
+ maxInputTokens: 262144,
1879
+ maxOutputTokens: 131072,
1880
+ roles: XAIChatModelRolesMap,
1881
+ modalities: XAIChatModelTextToolModalities,
1882
+ config: {
1883
+ def: chat_model_exports.ChatModelResponseSchemaConfigDef(131072, 4),
1884
+ schema: chat_model_exports.ChatModelResponseSchemaConfigSchema(131072, 4)
1885
+ },
1886
+ price: pricing_default[Grok_Code_Fast_1_Literal]
1887
+ });
1888
+ var Grok_Code_Fast_1_Options = BaseChatModelOptions;
1889
+ var Grok_Code_Fast_1 = class extends BaseChatModel {
1890
+ constructor(options) {
1891
+ super(Grok_Code_Fast_1_Schema, options);
1892
+ }
1893
+ };
1894
+
1895
+ // src/provider/provider.xai.ts
1896
+ var ProviderLiteral = "xai";
1897
+ var XAI = class {
1898
+ constructor() {
1899
+ this.version = "v1";
1900
+ this.name = ProviderLiteral;
1901
+ this.chatModelFactories = {
1902
+ [Grok_2_Literal]: {
1903
+ model: Grok_2,
1904
+ modelOptions: Grok_2_Options,
1905
+ modelSchema: Grok_2_Schema
1906
+ },
1907
+ [Grok_2_Latest_Literal]: {
1908
+ model: Grok_2_Latest,
1909
+ modelOptions: Grok_2_Latest_Options,
1910
+ modelSchema: Grok_2_Latest_Schema
1911
+ },
1912
+ [Grok_2_1212_Literal]: {
1913
+ model: Grok_2_1212,
1914
+ modelOptions: Grok_2_1212_Options,
1915
+ modelSchema: Grok_2_1212_Schema
1916
+ },
1917
+ [Grok_3_Beta_Literal]: {
1918
+ model: Grok_3_Beta,
1919
+ modelOptions: Grok_3_Beta_Options,
1920
+ modelSchema: Grok_3_Beta_Schema
1921
+ },
1922
+ [Grok_3_Fast_Beta_Literal]: {
1923
+ model: Grok_3_Fast_Beta,
1924
+ modelOptions: Grok_3_Fast_Beta_Options,
1925
+ modelSchema: Grok_3_Fast_Beta_Schema
1926
+ },
1927
+ [Grok_3_Mini_Beta_Literal]: {
1928
+ model: Grok_3_Mini_Beta,
1929
+ modelOptions: Grok_3_Mini_Beta_Options,
1930
+ modelSchema: Grok_3_Mini_Beta_Schema
1931
+ },
1932
+ [Grok_3_Mini_Fast_Beta_Literal]: {
1933
+ model: Grok_3_Mini_Fast_Beta,
1934
+ modelOptions: Grok_3_Mini_Fast_Beta_Options,
1935
+ modelSchema: Grok_3_Mini_Fast_Beta_Schema
1936
+ },
1937
+ [Grok_4_Literal]: {
1938
+ model: Grok_4,
1939
+ modelOptions: Grok_4_Options,
1940
+ modelSchema: Grok_4_Schema
1941
+ },
1942
+ [Grok_4_0709_Literal]: {
1943
+ model: Grok_4_0709,
1944
+ modelOptions: Grok_4_0709_Options,
1945
+ modelSchema: Grok_4_0709_Schema
1946
+ },
1947
+ [Grok_4_Fast_Reasoning_Literal]: {
1948
+ model: Grok_4_Fast_Reasoning,
1949
+ modelOptions: Grok_4_Fast_Reasoning_Options,
1950
+ modelSchema: Grok_4_Fast_Reasoning_Schema
1951
+ },
1952
+ [Grok_4_Fast_Non_Reasoning_Literal]: {
1953
+ model: Grok_4_Fast_Non_Reasoning,
1954
+ modelOptions: Grok_4_Fast_Non_Reasoning_Options,
1955
+ modelSchema: Grok_4_Fast_Non_Reasoning_Schema
1956
+ },
1957
+ [Grok_4_1_Fast_Reasoning_Literal]: {
1958
+ model: Grok_4_1_Fast_Reasoning,
1959
+ modelOptions: Grok_4_1_Fast_Reasoning_Options,
1960
+ modelSchema: Grok_4_1_Fast_Reasoning_Schema
1961
+ },
1962
+ [Grok_4_1_Fast_Non_Reasoning_Literal]: {
1963
+ model: Grok_4_1_Fast_Non_Reasoning,
1964
+ modelOptions: Grok_4_1_Fast_Non_Reasoning_Options,
1965
+ modelSchema: Grok_4_1_Fast_Non_Reasoning_Schema
1966
+ },
1967
+ [Grok_Code_Fast_1_Literal]: {
1968
+ model: Grok_Code_Fast_1,
1969
+ modelOptions: Grok_Code_Fast_1_Options,
1970
+ modelSchema: Grok_Code_Fast_1_Schema
1971
+ }
1972
+ };
1973
+ }
1974
+ chatModelLiterals() {
1975
+ return Object.keys(this.chatModelFactories);
1976
+ }
1977
+ chatModelSchemas() {
1978
+ return Object.keys(this.chatModelFactories).reduce(
1979
+ (acc, key) => {
1980
+ acc[key] = this.chatModelFactories[key].modelSchema;
1981
+ return acc;
1982
+ },
1983
+ {}
1984
+ );
1985
+ }
1986
+ chatModel(options) {
1987
+ const modelName = options.modelName;
1988
+ const factory = this.chatModelFactories[modelName];
1989
+ if (!factory) {
1990
+ throw new ProviderError({
1991
+ info: `Invalid model name: '${modelName}' for provider: '${this.name}'`,
1992
+ cause: new Error(`Available models: [${this.chatModelLiterals().join(", ")}]`)
1993
+ });
1994
+ }
1995
+ const parsedOptions = factory.modelOptions.parse(options);
1996
+ return new factory.model(parsedOptions);
1997
+ }
1998
+ // XAI does not support embedding models
1999
+ embeddingModelLiterals() {
2000
+ return [];
2001
+ }
2002
+ embeddingModelSchemas() {
2003
+ return {};
2004
+ }
2005
+ embeddingModel() {
2006
+ throw new ProviderError({
2007
+ info: "XAI does not support embedding models",
2008
+ cause: new Error("No embedding models available")
2009
+ });
2010
+ }
2011
+ };
2012
+ XAI.baseUrl = "https://api.x.ai/v1";
2013
+ export {
2014
+ BaseChatModel,
2015
+ BaseChatModelOptions,
2016
+ Grok_2,
2017
+ Grok_2_1212,
2018
+ Grok_2_1212_Literal,
2019
+ Grok_2_1212_Options,
2020
+ Grok_2_1212_Schema,
2021
+ Grok_2_Latest,
2022
+ Grok_2_Latest_Literal,
2023
+ Grok_2_Latest_Options,
2024
+ Grok_2_Latest_Schema,
2025
+ Grok_2_Literal,
2026
+ Grok_2_Options,
2027
+ Grok_2_Schema,
2028
+ Grok_3_Beta,
2029
+ Grok_3_Beta_Literal,
2030
+ Grok_3_Beta_Options,
2031
+ Grok_3_Beta_Schema,
2032
+ Grok_3_Fast_Beta,
2033
+ Grok_3_Fast_Beta_Literal,
2034
+ Grok_3_Fast_Beta_Options,
2035
+ Grok_3_Fast_Beta_Schema,
2036
+ Grok_3_Mini_Beta,
2037
+ Grok_3_Mini_Beta_Literal,
2038
+ Grok_3_Mini_Beta_Options,
2039
+ Grok_3_Mini_Beta_Schema,
2040
+ Grok_3_Mini_Fast_Beta,
2041
+ Grok_3_Mini_Fast_Beta_Literal,
2042
+ Grok_3_Mini_Fast_Beta_Options,
2043
+ Grok_3_Mini_Fast_Beta_Schema,
2044
+ Grok_4,
2045
+ Grok_4_0709,
2046
+ Grok_4_0709_Literal,
2047
+ Grok_4_0709_Options,
2048
+ Grok_4_0709_Schema,
2049
+ Grok_4_1_Fast_Non_Reasoning,
2050
+ Grok_4_1_Fast_Non_Reasoning_Literal,
2051
+ Grok_4_1_Fast_Non_Reasoning_Options,
2052
+ Grok_4_1_Fast_Non_Reasoning_Schema,
2053
+ Grok_4_1_Fast_Reasoning,
2054
+ Grok_4_1_Fast_Reasoning_Literal,
2055
+ Grok_4_1_Fast_Reasoning_Options,
2056
+ Grok_4_1_Fast_Reasoning_Schema,
2057
+ Grok_4_Fast_Non_Reasoning,
2058
+ Grok_4_Fast_Non_Reasoning_Literal,
2059
+ Grok_4_Fast_Non_Reasoning_Options,
2060
+ Grok_4_Fast_Non_Reasoning_Schema,
2061
+ Grok_4_Fast_Reasoning,
2062
+ Grok_4_Fast_Reasoning_Literal,
2063
+ Grok_4_Fast_Reasoning_Options,
2064
+ Grok_4_Fast_Reasoning_Schema,
2065
+ Grok_4_Literal,
2066
+ Grok_4_Options,
2067
+ Grok_4_Schema,
2068
+ Grok_Code_Fast_1,
2069
+ Grok_Code_Fast_1_Literal,
2070
+ Grok_Code_Fast_1_Options,
2071
+ Grok_Code_Fast_1_Schema,
2072
+ ProviderLiteral,
2073
+ XAI,
2074
+ chat_model_exports as XAIChatModelConfigs,
2075
+ XAIChatModelModalities,
2076
+ XAIChatModelModalitiesEnum,
2077
+ XAIChatModelRoles,
2078
+ XAIChatModelRolesMap,
2079
+ XAIChatModelTextModalities,
2080
+ XAIChatModelTextModalitiesEnum,
2081
+ XAIChatModelTextToolModalities,
2082
+ XAIChatModelTextToolModalitiesEnum,
2083
+ XAIChatRequest,
2084
+ XAIChatRequestAssistantMessage,
2085
+ XAIChatRequestImageContent,
2086
+ XAIChatRequestMessage,
2087
+ XAIChatRequestResponseFormat,
2088
+ XAIChatRequestSystemMessage,
2089
+ XAIChatRequestTextContent,
2090
+ XAIChatRequestTool,
2091
+ XAIChatRequestToolCallContent,
2092
+ XAIChatRequestToolChoiceEnum,
2093
+ XAIChatRequestToolChoiceFunction,
2094
+ XAIChatRequestToolMessage,
2095
+ XAIChatRequestUserMessage,
2096
+ XAICompleteChatResponse,
2097
+ XAIStreamChatResponse,
2098
+ XAIToolCallsCompleteChatResponse,
2099
+ XAIToolCallsStreamChatResponse
2100
+ };
2101
+ //# sourceMappingURL=index.mjs.map