@yourgpt/llm-sdk 0.1.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +209 -451
  11. package/dist/index.d.ts +209 -451
  12. package/dist/index.js +1905 -311
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1895 -309
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1,671 @@
1
+ 'use strict';
2
+
3
+ var core = require('@yourgpt/copilot-sdk/core');
4
+
5
+ // src/providers/xai/provider.ts
6
+ var XAI_MODELS = {
7
+ // Grok 4.1 Fast (Latest - December 2025)
8
+ "grok-4-1-fast-reasoning": { vision: false, tools: true, maxTokens: 2e6 },
9
+ "grok-4-1-fast-non-reasoning": {
10
+ vision: false,
11
+ tools: true,
12
+ maxTokens: 2e6
13
+ },
14
+ // Grok 4 Fast (September 2025)
15
+ "grok-4-fast-reasoning": { vision: false, tools: true, maxTokens: 2e6 },
16
+ "grok-4-fast-non-reasoning": {
17
+ vision: false,
18
+ tools: true,
19
+ maxTokens: 2e6
20
+ },
21
+ // Grok 4 (July 2025)
22
+ "grok-4": { vision: true, tools: true, maxTokens: 256e3 },
23
+ "grok-4-0709": { vision: true, tools: true, maxTokens: 256e3 },
24
+ // Grok 3 (February 2025) - Stable
25
+ "grok-3-beta": { vision: true, tools: true, maxTokens: 131072 },
26
+ "grok-3-fast-beta": { vision: false, tools: true, maxTokens: 131072 },
27
+ "grok-3-mini-beta": { vision: false, tools: true, maxTokens: 32768 },
28
+ "grok-3-mini-fast-beta": { vision: false, tools: true, maxTokens: 32768 },
29
+ // Grok Code Fast (August 2025)
30
+ "grok-code-fast-1": { vision: false, tools: true, maxTokens: 256e3 },
31
+ // Grok 2 (Legacy)
32
+ "grok-2": { vision: true, tools: true, maxTokens: 131072 },
33
+ "grok-2-latest": { vision: true, tools: true, maxTokens: 131072 },
34
+ "grok-2-mini": { vision: false, tools: true, maxTokens: 131072 }
35
+ };
36
+ function xai(modelId, options = {}) {
37
+ const apiKey = options.apiKey ?? process.env.XAI_API_KEY;
38
+ const baseURL = options.baseURL ?? "https://api.x.ai/v1";
39
+ let client = null;
40
+ async function getClient() {
41
+ if (!client) {
42
+ const { default: OpenAI } = await import('openai');
43
+ client = new OpenAI({
44
+ apiKey,
45
+ baseURL
46
+ });
47
+ }
48
+ return client;
49
+ }
50
+ const modelConfig = XAI_MODELS[modelId] ?? XAI_MODELS["grok-3-fast-beta"];
51
+ return {
52
+ provider: "xai",
53
+ modelId,
54
+ capabilities: {
55
+ supportsVision: modelConfig.vision,
56
+ supportsTools: modelConfig.tools,
57
+ supportsStreaming: true,
58
+ supportsJsonMode: false,
59
+ // xAI doesn't support JSON mode yet
60
+ supportsThinking: false,
61
+ supportsPDF: false,
62
+ maxTokens: modelConfig.maxTokens,
63
+ supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
64
+ },
65
+ async doGenerate(params) {
66
+ const client2 = await getClient();
67
+ const messages = formatMessagesForXAI(params.messages);
68
+ const response = await client2.chat.completions.create({
69
+ model: modelId,
70
+ messages,
71
+ tools: params.tools,
72
+ temperature: params.temperature,
73
+ max_tokens: params.maxTokens
74
+ });
75
+ const choice = response.choices[0];
76
+ const message = choice.message;
77
+ const toolCalls = (message.tool_calls ?? []).map(
78
+ (tc) => ({
79
+ id: tc.id,
80
+ name: tc.function.name,
81
+ args: JSON.parse(tc.function.arguments || "{}")
82
+ })
83
+ );
84
+ return {
85
+ text: message.content ?? "",
86
+ toolCalls,
87
+ finishReason: mapFinishReason(choice.finish_reason),
88
+ usage: {
89
+ promptTokens: response.usage?.prompt_tokens ?? 0,
90
+ completionTokens: response.usage?.completion_tokens ?? 0,
91
+ totalTokens: response.usage?.total_tokens ?? 0
92
+ },
93
+ rawResponse: response
94
+ };
95
+ },
96
+ async *doStream(params) {
97
+ const client2 = await getClient();
98
+ const messages = formatMessagesForXAI(params.messages);
99
+ const stream = await client2.chat.completions.create({
100
+ model: modelId,
101
+ messages,
102
+ tools: params.tools,
103
+ temperature: params.temperature,
104
+ max_tokens: params.maxTokens,
105
+ stream: true
106
+ });
107
+ let currentToolCall = null;
108
+ let totalPromptTokens = 0;
109
+ let totalCompletionTokens = 0;
110
+ for await (const chunk of stream) {
111
+ if (params.signal?.aborted) {
112
+ yield { type: "error", error: new Error("Aborted") };
113
+ return;
114
+ }
115
+ const choice = chunk.choices[0];
116
+ const delta = choice?.delta;
117
+ if (delta?.content) {
118
+ yield { type: "text-delta", text: delta.content };
119
+ }
120
+ if (delta?.tool_calls) {
121
+ for (const tc of delta.tool_calls) {
122
+ if (tc.id) {
123
+ if (currentToolCall) {
124
+ yield {
125
+ type: "tool-call",
126
+ toolCall: {
127
+ id: currentToolCall.id,
128
+ name: currentToolCall.name,
129
+ args: JSON.parse(currentToolCall.arguments || "{}")
130
+ }
131
+ };
132
+ }
133
+ currentToolCall = {
134
+ id: tc.id,
135
+ name: tc.function?.name ?? "",
136
+ arguments: tc.function?.arguments ?? ""
137
+ };
138
+ } else if (currentToolCall && tc.function?.arguments) {
139
+ currentToolCall.arguments += tc.function.arguments;
140
+ }
141
+ }
142
+ }
143
+ if (choice?.finish_reason) {
144
+ if (currentToolCall) {
145
+ yield {
146
+ type: "tool-call",
147
+ toolCall: {
148
+ id: currentToolCall.id,
149
+ name: currentToolCall.name,
150
+ args: JSON.parse(currentToolCall.arguments || "{}")
151
+ }
152
+ };
153
+ currentToolCall = null;
154
+ }
155
+ if (chunk.usage) {
156
+ totalPromptTokens = chunk.usage.prompt_tokens;
157
+ totalCompletionTokens = chunk.usage.completion_tokens;
158
+ }
159
+ yield {
160
+ type: "finish",
161
+ finishReason: mapFinishReason(choice.finish_reason),
162
+ usage: {
163
+ promptTokens: totalPromptTokens,
164
+ completionTokens: totalCompletionTokens,
165
+ totalTokens: totalPromptTokens + totalCompletionTokens
166
+ }
167
+ };
168
+ }
169
+ }
170
+ }
171
+ };
172
+ }
173
+ function mapFinishReason(reason) {
174
+ switch (reason) {
175
+ case "stop":
176
+ return "stop";
177
+ case "length":
178
+ return "length";
179
+ case "tool_calls":
180
+ case "function_call":
181
+ return "tool-calls";
182
+ case "content_filter":
183
+ return "content-filter";
184
+ default:
185
+ return "unknown";
186
+ }
187
+ }
188
+ function formatMessagesForXAI(messages) {
189
+ return messages.map((msg) => {
190
+ switch (msg.role) {
191
+ case "system":
192
+ return { role: "system", content: msg.content };
193
+ case "user":
194
+ if (typeof msg.content === "string") {
195
+ return { role: "user", content: msg.content };
196
+ }
197
+ return {
198
+ role: "user",
199
+ content: msg.content.map((part) => {
200
+ if (part.type === "text") {
201
+ return { type: "text", text: part.text };
202
+ }
203
+ if (part.type === "image") {
204
+ const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
205
+ const url = imageData.startsWith("data:") ? imageData : `data:${part.mimeType ?? "image/png"};base64,${imageData}`;
206
+ return { type: "image_url", image_url: { url, detail: "auto" } };
207
+ }
208
+ return { type: "text", text: "" };
209
+ })
210
+ };
211
+ case "assistant":
212
+ const assistantMsg = {
213
+ role: "assistant",
214
+ content: msg.content
215
+ };
216
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
217
+ assistantMsg.tool_calls = msg.toolCalls.map((tc) => ({
218
+ id: tc.id,
219
+ type: "function",
220
+ function: {
221
+ name: tc.name,
222
+ arguments: JSON.stringify(tc.args)
223
+ }
224
+ }));
225
+ }
226
+ return assistantMsg;
227
+ case "tool":
228
+ return {
229
+ role: "tool",
230
+ tool_call_id: msg.toolCallId,
231
+ content: msg.content
232
+ };
233
+ default:
234
+ return msg;
235
+ }
236
+ });
237
+ }
238
+
239
+ // src/adapters/base.ts
240
+ function parameterToJsonSchema(param) {
241
+ const schema = {
242
+ type: param.type
243
+ };
244
+ if (param.description) {
245
+ schema.description = param.description;
246
+ }
247
+ if (param.enum) {
248
+ schema.enum = param.enum;
249
+ }
250
+ if (param.type === "array" && param.items) {
251
+ schema.items = parameterToJsonSchema(
252
+ param.items
253
+ );
254
+ }
255
+ if (param.type === "object" && param.properties) {
256
+ schema.properties = Object.fromEntries(
257
+ Object.entries(param.properties).map(([key, prop]) => [
258
+ key,
259
+ parameterToJsonSchema(
260
+ prop
261
+ )
262
+ ])
263
+ );
264
+ }
265
+ return schema;
266
+ }
267
+ function formatTools(actions) {
268
+ return actions.map((action) => ({
269
+ type: "function",
270
+ function: {
271
+ name: action.name,
272
+ description: action.description,
273
+ parameters: {
274
+ type: "object",
275
+ properties: action.parameters ? Object.fromEntries(
276
+ Object.entries(action.parameters).map(([key, param]) => [
277
+ key,
278
+ parameterToJsonSchema(param)
279
+ ])
280
+ ) : {},
281
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
282
+ }
283
+ }
284
+ }));
285
+ }
286
+ function hasImageAttachments(message) {
287
+ const attachments = message.metadata?.attachments;
288
+ return attachments?.some((a) => a.type === "image") ?? false;
289
+ }
290
+ function attachmentToOpenAIImage(attachment) {
291
+ if (attachment.type !== "image") return null;
292
+ let imageUrl;
293
+ if (attachment.url) {
294
+ imageUrl = attachment.url;
295
+ } else if (attachment.data) {
296
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
297
+ } else {
298
+ return null;
299
+ }
300
+ return {
301
+ type: "image_url",
302
+ image_url: {
303
+ url: imageUrl,
304
+ detail: "auto"
305
+ }
306
+ };
307
+ }
308
+ function messageToOpenAIContent(message) {
309
+ const attachments = message.metadata?.attachments;
310
+ const content = message.content ?? "";
311
+ if (!hasImageAttachments(message)) {
312
+ return content;
313
+ }
314
+ const blocks = [];
315
+ if (content) {
316
+ blocks.push({ type: "text", text: content });
317
+ }
318
+ if (attachments) {
319
+ for (const attachment of attachments) {
320
+ const imageBlock = attachmentToOpenAIImage(attachment);
321
+ if (imageBlock) {
322
+ blocks.push(imageBlock);
323
+ }
324
+ }
325
+ }
326
+ return blocks;
327
+ }
328
+ function formatMessagesForOpenAI(messages, systemPrompt) {
329
+ const formatted = [];
330
+ if (systemPrompt) {
331
+ formatted.push({ role: "system", content: systemPrompt });
332
+ }
333
+ for (const msg of messages) {
334
+ if (msg.role === "system") {
335
+ formatted.push({ role: "system", content: msg.content ?? "" });
336
+ } else if (msg.role === "user") {
337
+ formatted.push({
338
+ role: "user",
339
+ content: messageToOpenAIContent(msg)
340
+ });
341
+ } else if (msg.role === "assistant") {
342
+ const assistantMsg = {
343
+ role: "assistant",
344
+ content: msg.content
345
+ };
346
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
347
+ assistantMsg.tool_calls = msg.tool_calls;
348
+ }
349
+ formatted.push(assistantMsg);
350
+ } else if (msg.role === "tool" && msg.tool_call_id) {
351
+ formatted.push({
352
+ role: "tool",
353
+ content: msg.content ?? "",
354
+ tool_call_id: msg.tool_call_id
355
+ });
356
+ }
357
+ }
358
+ return formatted;
359
+ }
360
+
361
+ // src/adapters/xai.ts
362
+ var XAI_BASE_URL = "https://api.x.ai/v1";
363
+ var XAIAdapter = class {
364
+ constructor(config) {
365
+ this.provider = "xai";
366
+ this.config = config;
367
+ this.model = config.model || "grok-2";
368
+ }
369
+ async getClient() {
370
+ if (!this.client) {
371
+ const { default: OpenAI } = await import('openai');
372
+ this.client = new OpenAI({
373
+ apiKey: this.config.apiKey,
374
+ baseURL: this.config.baseUrl || XAI_BASE_URL
375
+ });
376
+ }
377
+ return this.client;
378
+ }
379
+ async *stream(request) {
380
+ const client = await this.getClient();
381
+ let messages;
382
+ if (request.rawMessages && request.rawMessages.length > 0) {
383
+ const processedMessages = request.rawMessages.map((msg) => {
384
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
385
+ if (hasAttachments) {
386
+ const content = [];
387
+ if (msg.content) {
388
+ content.push({ type: "text", text: msg.content });
389
+ }
390
+ for (const attachment of msg.attachments) {
391
+ if (attachment.type === "image") {
392
+ let imageUrl = attachment.data;
393
+ if (!imageUrl.startsWith("data:")) {
394
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
395
+ }
396
+ content.push({
397
+ type: "image_url",
398
+ image_url: { url: imageUrl, detail: "auto" }
399
+ });
400
+ }
401
+ }
402
+ return { ...msg, content, attachments: void 0 };
403
+ }
404
+ return msg;
405
+ });
406
+ if (request.systemPrompt) {
407
+ const hasSystem = processedMessages.some((m) => m.role === "system");
408
+ if (!hasSystem) {
409
+ messages = [
410
+ { role: "system", content: request.systemPrompt },
411
+ ...processedMessages
412
+ ];
413
+ } else {
414
+ messages = processedMessages;
415
+ }
416
+ } else {
417
+ messages = processedMessages;
418
+ }
419
+ } else {
420
+ messages = formatMessagesForOpenAI(
421
+ request.messages,
422
+ request.systemPrompt
423
+ );
424
+ }
425
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
426
+ const messageId = core.generateMessageId();
427
+ yield { type: "message:start", id: messageId };
428
+ try {
429
+ const stream = await client.chat.completions.create({
430
+ model: request.config?.model || this.model,
431
+ messages,
432
+ tools,
433
+ temperature: request.config?.temperature ?? this.config.temperature,
434
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
435
+ stream: true
436
+ });
437
+ let currentToolCall = null;
438
+ for await (const chunk of stream) {
439
+ if (request.signal?.aborted) {
440
+ break;
441
+ }
442
+ const delta = chunk.choices[0]?.delta;
443
+ if (delta?.content) {
444
+ yield { type: "message:delta", content: delta.content };
445
+ }
446
+ if (delta?.tool_calls) {
447
+ for (const toolCall of delta.tool_calls) {
448
+ if (toolCall.id) {
449
+ if (currentToolCall) {
450
+ yield {
451
+ type: "action:args",
452
+ id: currentToolCall.id,
453
+ args: currentToolCall.arguments
454
+ };
455
+ }
456
+ currentToolCall = {
457
+ id: toolCall.id,
458
+ name: toolCall.function?.name || "",
459
+ arguments: toolCall.function?.arguments || ""
460
+ };
461
+ yield {
462
+ type: "action:start",
463
+ id: currentToolCall.id,
464
+ name: currentToolCall.name
465
+ };
466
+ } else if (currentToolCall && toolCall.function?.arguments) {
467
+ currentToolCall.arguments += toolCall.function.arguments;
468
+ }
469
+ }
470
+ }
471
+ if (chunk.choices[0]?.finish_reason) {
472
+ if (currentToolCall) {
473
+ yield {
474
+ type: "action:args",
475
+ id: currentToolCall.id,
476
+ args: currentToolCall.arguments
477
+ };
478
+ }
479
+ }
480
+ }
481
+ yield { type: "message:end" };
482
+ yield { type: "done" };
483
+ } catch (error) {
484
+ yield {
485
+ type: "error",
486
+ message: error instanceof Error ? error.message : "Unknown error",
487
+ code: "XAI_ERROR"
488
+ };
489
+ }
490
+ }
491
+ /**
492
+ * Non-streaming completion (optional, for debugging)
493
+ */
494
+ async complete(request) {
495
+ const client = await this.getClient();
496
+ let messages;
497
+ if (request.rawMessages && request.rawMessages.length > 0) {
498
+ messages = request.rawMessages;
499
+ if (request.systemPrompt) {
500
+ const hasSystem = messages.some((m) => m.role === "system");
501
+ if (!hasSystem) {
502
+ messages = [
503
+ { role: "system", content: request.systemPrompt },
504
+ ...messages
505
+ ];
506
+ }
507
+ }
508
+ } else {
509
+ messages = formatMessagesForOpenAI(
510
+ request.messages,
511
+ request.systemPrompt
512
+ );
513
+ }
514
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
515
+ const response = await client.chat.completions.create({
516
+ model: request.config?.model || this.model,
517
+ messages,
518
+ tools,
519
+ temperature: request.config?.temperature ?? this.config.temperature,
520
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
521
+ });
522
+ const choice = response.choices[0];
523
+ const message = choice?.message;
524
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
525
+ id: tc.id,
526
+ name: tc.function.name,
527
+ args: JSON.parse(tc.function.arguments || "{}")
528
+ }));
529
+ return {
530
+ content: message?.content || "",
531
+ toolCalls,
532
+ rawResponse: response
533
+ };
534
+ }
535
+ };
536
+ function createXAIAdapter(config) {
537
+ return new XAIAdapter(config);
538
+ }
539
+
540
+ // src/providers/xai/index.ts
541
+ var XAI_MODELS2 = {
542
+ // Grok 4.1 Fast (Latest - December 2025)
543
+ "grok-4-1-fast-reasoning": {
544
+ vision: false,
545
+ tools: true,
546
+ maxTokens: 2e6,
547
+ outputTokens: 16384
548
+ },
549
+ "grok-4-1-fast-non-reasoning": {
550
+ vision: false,
551
+ tools: true,
552
+ maxTokens: 2e6,
553
+ outputTokens: 16384
554
+ },
555
+ // Grok 4 Fast (September 2025)
556
+ "grok-4-fast-reasoning": {
557
+ vision: false,
558
+ tools: true,
559
+ maxTokens: 2e6,
560
+ outputTokens: 16384
561
+ },
562
+ "grok-4-fast-non-reasoning": {
563
+ vision: false,
564
+ tools: true,
565
+ maxTokens: 2e6,
566
+ outputTokens: 16384
567
+ },
568
+ // Grok 4 (July 2025)
569
+ "grok-4": {
570
+ vision: true,
571
+ tools: true,
572
+ maxTokens: 256e3,
573
+ outputTokens: 16384
574
+ },
575
+ "grok-4-0709": {
576
+ vision: true,
577
+ tools: true,
578
+ maxTokens: 256e3,
579
+ outputTokens: 16384
580
+ },
581
+ // Grok 3 (February 2025) - Stable
582
+ "grok-3-beta": {
583
+ vision: true,
584
+ tools: true,
585
+ maxTokens: 131072,
586
+ outputTokens: 8192
587
+ },
588
+ "grok-3-fast-beta": {
589
+ vision: false,
590
+ tools: true,
591
+ maxTokens: 131072,
592
+ outputTokens: 8192
593
+ },
594
+ "grok-3-mini-beta": {
595
+ vision: false,
596
+ tools: true,
597
+ maxTokens: 32768,
598
+ outputTokens: 8192
599
+ },
600
+ "grok-3-mini-fast-beta": {
601
+ vision: false,
602
+ tools: true,
603
+ maxTokens: 32768,
604
+ outputTokens: 8192
605
+ },
606
+ // Grok Code Fast (August 2025)
607
+ "grok-code-fast-1": {
608
+ vision: false,
609
+ tools: true,
610
+ maxTokens: 256e3,
611
+ outputTokens: 16384
612
+ },
613
+ // Grok 2 (Legacy - for backward compatibility)
614
+ "grok-2": {
615
+ vision: true,
616
+ tools: true,
617
+ maxTokens: 131072,
618
+ outputTokens: 4096
619
+ },
620
+ "grok-2-latest": {
621
+ vision: true,
622
+ tools: true,
623
+ maxTokens: 131072,
624
+ outputTokens: 4096
625
+ },
626
+ "grok-2-mini": {
627
+ vision: false,
628
+ tools: true,
629
+ maxTokens: 131072,
630
+ outputTokens: 4096
631
+ }
632
+ };
633
+ function createXAI(config = {}) {
634
+ const apiKey = config.apiKey ?? process.env.XAI_API_KEY ?? "";
635
+ return {
636
+ name: "xai",
637
+ supportedModels: Object.keys(XAI_MODELS2),
638
+ languageModel(modelId) {
639
+ return createXAIAdapter({
640
+ apiKey,
641
+ model: modelId,
642
+ baseUrl: config.baseUrl
643
+ });
644
+ },
645
+ getCapabilities(modelId) {
646
+ const model = XAI_MODELS2[modelId] ?? XAI_MODELS2["grok-3-fast-beta"];
647
+ return {
648
+ supportsVision: model.vision,
649
+ supportsTools: model.tools,
650
+ supportsThinking: false,
651
+ supportsStreaming: true,
652
+ supportsPDF: false,
653
+ supportsAudio: false,
654
+ supportsVideo: false,
655
+ maxTokens: model.maxTokens,
656
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
657
+ supportsJsonMode: false,
658
+ // xAI doesn't support JSON mode yet
659
+ supportsSystemMessages: true
660
+ };
661
+ }
662
+ };
663
+ }
664
+ var createXAIProvider = createXAI;
665
+
666
+ exports.createXAI = createXAI;
667
+ exports.createXAIModel = xai;
668
+ exports.createXAIProvider = createXAIProvider;
669
+ exports.xai = xai;
670
+ //# sourceMappingURL=index.js.map
671
+ //# sourceMappingURL=index.js.map