@yourgpt/llm-sdk 0.1.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +209 -451
  11. package/dist/index.d.ts +209 -451
  12. package/dist/index.js +1905 -311
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1895 -309
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1,666 @@
1
+ import { generateMessageId } from '@yourgpt/copilot-sdk/core';
2
+
3
+ // src/providers/xai/provider.ts
4
+ var XAI_MODELS = {
5
+ // Grok 4.1 Fast (Latest - December 2025)
6
+ "grok-4-1-fast-reasoning": { vision: false, tools: true, maxTokens: 2e6 },
7
+ "grok-4-1-fast-non-reasoning": {
8
+ vision: false,
9
+ tools: true,
10
+ maxTokens: 2e6
11
+ },
12
+ // Grok 4 Fast (September 2025)
13
+ "grok-4-fast-reasoning": { vision: false, tools: true, maxTokens: 2e6 },
14
+ "grok-4-fast-non-reasoning": {
15
+ vision: false,
16
+ tools: true,
17
+ maxTokens: 2e6
18
+ },
19
+ // Grok 4 (July 2025)
20
+ "grok-4": { vision: true, tools: true, maxTokens: 256e3 },
21
+ "grok-4-0709": { vision: true, tools: true, maxTokens: 256e3 },
22
+ // Grok 3 (February 2025) - Stable
23
+ "grok-3-beta": { vision: true, tools: true, maxTokens: 131072 },
24
+ "grok-3-fast-beta": { vision: false, tools: true, maxTokens: 131072 },
25
+ "grok-3-mini-beta": { vision: false, tools: true, maxTokens: 32768 },
26
+ "grok-3-mini-fast-beta": { vision: false, tools: true, maxTokens: 32768 },
27
+ // Grok Code Fast (August 2025)
28
+ "grok-code-fast-1": { vision: false, tools: true, maxTokens: 256e3 },
29
+ // Grok 2 (Legacy)
30
+ "grok-2": { vision: true, tools: true, maxTokens: 131072 },
31
+ "grok-2-latest": { vision: true, tools: true, maxTokens: 131072 },
32
+ "grok-2-mini": { vision: false, tools: true, maxTokens: 131072 }
33
+ };
34
+ function xai(modelId, options = {}) {
35
+ const apiKey = options.apiKey ?? process.env.XAI_API_KEY;
36
+ const baseURL = options.baseURL ?? "https://api.x.ai/v1";
37
+ let client = null;
38
+ async function getClient() {
39
+ if (!client) {
40
+ const { default: OpenAI } = await import('openai');
41
+ client = new OpenAI({
42
+ apiKey,
43
+ baseURL
44
+ });
45
+ }
46
+ return client;
47
+ }
48
+ const modelConfig = XAI_MODELS[modelId] ?? XAI_MODELS["grok-3-fast-beta"];
49
+ return {
50
+ provider: "xai",
51
+ modelId,
52
+ capabilities: {
53
+ supportsVision: modelConfig.vision,
54
+ supportsTools: modelConfig.tools,
55
+ supportsStreaming: true,
56
+ supportsJsonMode: false,
57
+ // xAI doesn't support JSON mode yet
58
+ supportsThinking: false,
59
+ supportsPDF: false,
60
+ maxTokens: modelConfig.maxTokens,
61
+ supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
62
+ },
63
+ async doGenerate(params) {
64
+ const client2 = await getClient();
65
+ const messages = formatMessagesForXAI(params.messages);
66
+ const response = await client2.chat.completions.create({
67
+ model: modelId,
68
+ messages,
69
+ tools: params.tools,
70
+ temperature: params.temperature,
71
+ max_tokens: params.maxTokens
72
+ });
73
+ const choice = response.choices[0];
74
+ const message = choice.message;
75
+ const toolCalls = (message.tool_calls ?? []).map(
76
+ (tc) => ({
77
+ id: tc.id,
78
+ name: tc.function.name,
79
+ args: JSON.parse(tc.function.arguments || "{}")
80
+ })
81
+ );
82
+ return {
83
+ text: message.content ?? "",
84
+ toolCalls,
85
+ finishReason: mapFinishReason(choice.finish_reason),
86
+ usage: {
87
+ promptTokens: response.usage?.prompt_tokens ?? 0,
88
+ completionTokens: response.usage?.completion_tokens ?? 0,
89
+ totalTokens: response.usage?.total_tokens ?? 0
90
+ },
91
+ rawResponse: response
92
+ };
93
+ },
94
+ async *doStream(params) {
95
+ const client2 = await getClient();
96
+ const messages = formatMessagesForXAI(params.messages);
97
+ const stream = await client2.chat.completions.create({
98
+ model: modelId,
99
+ messages,
100
+ tools: params.tools,
101
+ temperature: params.temperature,
102
+ max_tokens: params.maxTokens,
103
+ stream: true
104
+ });
105
+ let currentToolCall = null;
106
+ let totalPromptTokens = 0;
107
+ let totalCompletionTokens = 0;
108
+ for await (const chunk of stream) {
109
+ if (params.signal?.aborted) {
110
+ yield { type: "error", error: new Error("Aborted") };
111
+ return;
112
+ }
113
+ const choice = chunk.choices[0];
114
+ const delta = choice?.delta;
115
+ if (delta?.content) {
116
+ yield { type: "text-delta", text: delta.content };
117
+ }
118
+ if (delta?.tool_calls) {
119
+ for (const tc of delta.tool_calls) {
120
+ if (tc.id) {
121
+ if (currentToolCall) {
122
+ yield {
123
+ type: "tool-call",
124
+ toolCall: {
125
+ id: currentToolCall.id,
126
+ name: currentToolCall.name,
127
+ args: JSON.parse(currentToolCall.arguments || "{}")
128
+ }
129
+ };
130
+ }
131
+ currentToolCall = {
132
+ id: tc.id,
133
+ name: tc.function?.name ?? "",
134
+ arguments: tc.function?.arguments ?? ""
135
+ };
136
+ } else if (currentToolCall && tc.function?.arguments) {
137
+ currentToolCall.arguments += tc.function.arguments;
138
+ }
139
+ }
140
+ }
141
+ if (choice?.finish_reason) {
142
+ if (currentToolCall) {
143
+ yield {
144
+ type: "tool-call",
145
+ toolCall: {
146
+ id: currentToolCall.id,
147
+ name: currentToolCall.name,
148
+ args: JSON.parse(currentToolCall.arguments || "{}")
149
+ }
150
+ };
151
+ currentToolCall = null;
152
+ }
153
+ if (chunk.usage) {
154
+ totalPromptTokens = chunk.usage.prompt_tokens;
155
+ totalCompletionTokens = chunk.usage.completion_tokens;
156
+ }
157
+ yield {
158
+ type: "finish",
159
+ finishReason: mapFinishReason(choice.finish_reason),
160
+ usage: {
161
+ promptTokens: totalPromptTokens,
162
+ completionTokens: totalCompletionTokens,
163
+ totalTokens: totalPromptTokens + totalCompletionTokens
164
+ }
165
+ };
166
+ }
167
+ }
168
+ }
169
+ };
170
+ }
171
+ function mapFinishReason(reason) {
172
+ switch (reason) {
173
+ case "stop":
174
+ return "stop";
175
+ case "length":
176
+ return "length";
177
+ case "tool_calls":
178
+ case "function_call":
179
+ return "tool-calls";
180
+ case "content_filter":
181
+ return "content-filter";
182
+ default:
183
+ return "unknown";
184
+ }
185
+ }
186
+ function formatMessagesForXAI(messages) {
187
+ return messages.map((msg) => {
188
+ switch (msg.role) {
189
+ case "system":
190
+ return { role: "system", content: msg.content };
191
+ case "user":
192
+ if (typeof msg.content === "string") {
193
+ return { role: "user", content: msg.content };
194
+ }
195
+ return {
196
+ role: "user",
197
+ content: msg.content.map((part) => {
198
+ if (part.type === "text") {
199
+ return { type: "text", text: part.text };
200
+ }
201
+ if (part.type === "image") {
202
+ const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
203
+ const url = imageData.startsWith("data:") ? imageData : `data:${part.mimeType ?? "image/png"};base64,${imageData}`;
204
+ return { type: "image_url", image_url: { url, detail: "auto" } };
205
+ }
206
+ return { type: "text", text: "" };
207
+ })
208
+ };
209
+ case "assistant":
210
+ const assistantMsg = {
211
+ role: "assistant",
212
+ content: msg.content
213
+ };
214
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
215
+ assistantMsg.tool_calls = msg.toolCalls.map((tc) => ({
216
+ id: tc.id,
217
+ type: "function",
218
+ function: {
219
+ name: tc.name,
220
+ arguments: JSON.stringify(tc.args)
221
+ }
222
+ }));
223
+ }
224
+ return assistantMsg;
225
+ case "tool":
226
+ return {
227
+ role: "tool",
228
+ tool_call_id: msg.toolCallId,
229
+ content: msg.content
230
+ };
231
+ default:
232
+ return msg;
233
+ }
234
+ });
235
+ }
236
+
237
+ // src/adapters/base.ts
238
+ function parameterToJsonSchema(param) {
239
+ const schema = {
240
+ type: param.type
241
+ };
242
+ if (param.description) {
243
+ schema.description = param.description;
244
+ }
245
+ if (param.enum) {
246
+ schema.enum = param.enum;
247
+ }
248
+ if (param.type === "array" && param.items) {
249
+ schema.items = parameterToJsonSchema(
250
+ param.items
251
+ );
252
+ }
253
+ if (param.type === "object" && param.properties) {
254
+ schema.properties = Object.fromEntries(
255
+ Object.entries(param.properties).map(([key, prop]) => [
256
+ key,
257
+ parameterToJsonSchema(
258
+ prop
259
+ )
260
+ ])
261
+ );
262
+ }
263
+ return schema;
264
+ }
265
+ function formatTools(actions) {
266
+ return actions.map((action) => ({
267
+ type: "function",
268
+ function: {
269
+ name: action.name,
270
+ description: action.description,
271
+ parameters: {
272
+ type: "object",
273
+ properties: action.parameters ? Object.fromEntries(
274
+ Object.entries(action.parameters).map(([key, param]) => [
275
+ key,
276
+ parameterToJsonSchema(param)
277
+ ])
278
+ ) : {},
279
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
280
+ }
281
+ }
282
+ }));
283
+ }
284
+ function hasImageAttachments(message) {
285
+ const attachments = message.metadata?.attachments;
286
+ return attachments?.some((a) => a.type === "image") ?? false;
287
+ }
288
+ function attachmentToOpenAIImage(attachment) {
289
+ if (attachment.type !== "image") return null;
290
+ let imageUrl;
291
+ if (attachment.url) {
292
+ imageUrl = attachment.url;
293
+ } else if (attachment.data) {
294
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
295
+ } else {
296
+ return null;
297
+ }
298
+ return {
299
+ type: "image_url",
300
+ image_url: {
301
+ url: imageUrl,
302
+ detail: "auto"
303
+ }
304
+ };
305
+ }
306
+ function messageToOpenAIContent(message) {
307
+ const attachments = message.metadata?.attachments;
308
+ const content = message.content ?? "";
309
+ if (!hasImageAttachments(message)) {
310
+ return content;
311
+ }
312
+ const blocks = [];
313
+ if (content) {
314
+ blocks.push({ type: "text", text: content });
315
+ }
316
+ if (attachments) {
317
+ for (const attachment of attachments) {
318
+ const imageBlock = attachmentToOpenAIImage(attachment);
319
+ if (imageBlock) {
320
+ blocks.push(imageBlock);
321
+ }
322
+ }
323
+ }
324
+ return blocks;
325
+ }
326
+ function formatMessagesForOpenAI(messages, systemPrompt) {
327
+ const formatted = [];
328
+ if (systemPrompt) {
329
+ formatted.push({ role: "system", content: systemPrompt });
330
+ }
331
+ for (const msg of messages) {
332
+ if (msg.role === "system") {
333
+ formatted.push({ role: "system", content: msg.content ?? "" });
334
+ } else if (msg.role === "user") {
335
+ formatted.push({
336
+ role: "user",
337
+ content: messageToOpenAIContent(msg)
338
+ });
339
+ } else if (msg.role === "assistant") {
340
+ const assistantMsg = {
341
+ role: "assistant",
342
+ content: msg.content
343
+ };
344
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
345
+ assistantMsg.tool_calls = msg.tool_calls;
346
+ }
347
+ formatted.push(assistantMsg);
348
+ } else if (msg.role === "tool" && msg.tool_call_id) {
349
+ formatted.push({
350
+ role: "tool",
351
+ content: msg.content ?? "",
352
+ tool_call_id: msg.tool_call_id
353
+ });
354
+ }
355
+ }
356
+ return formatted;
357
+ }
358
+
359
+ // src/adapters/xai.ts
360
+ var XAI_BASE_URL = "https://api.x.ai/v1";
361
+ var XAIAdapter = class {
362
+ constructor(config) {
363
+ this.provider = "xai";
364
+ this.config = config;
365
+ this.model = config.model || "grok-2";
366
+ }
367
+ async getClient() {
368
+ if (!this.client) {
369
+ const { default: OpenAI } = await import('openai');
370
+ this.client = new OpenAI({
371
+ apiKey: this.config.apiKey,
372
+ baseURL: this.config.baseUrl || XAI_BASE_URL
373
+ });
374
+ }
375
+ return this.client;
376
+ }
377
+ async *stream(request) {
378
+ const client = await this.getClient();
379
+ let messages;
380
+ if (request.rawMessages && request.rawMessages.length > 0) {
381
+ const processedMessages = request.rawMessages.map((msg) => {
382
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
383
+ if (hasAttachments) {
384
+ const content = [];
385
+ if (msg.content) {
386
+ content.push({ type: "text", text: msg.content });
387
+ }
388
+ for (const attachment of msg.attachments) {
389
+ if (attachment.type === "image") {
390
+ let imageUrl = attachment.data;
391
+ if (!imageUrl.startsWith("data:")) {
392
+ imageUrl = `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
393
+ }
394
+ content.push({
395
+ type: "image_url",
396
+ image_url: { url: imageUrl, detail: "auto" }
397
+ });
398
+ }
399
+ }
400
+ return { ...msg, content, attachments: void 0 };
401
+ }
402
+ return msg;
403
+ });
404
+ if (request.systemPrompt) {
405
+ const hasSystem = processedMessages.some((m) => m.role === "system");
406
+ if (!hasSystem) {
407
+ messages = [
408
+ { role: "system", content: request.systemPrompt },
409
+ ...processedMessages
410
+ ];
411
+ } else {
412
+ messages = processedMessages;
413
+ }
414
+ } else {
415
+ messages = processedMessages;
416
+ }
417
+ } else {
418
+ messages = formatMessagesForOpenAI(
419
+ request.messages,
420
+ request.systemPrompt
421
+ );
422
+ }
423
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
424
+ const messageId = generateMessageId();
425
+ yield { type: "message:start", id: messageId };
426
+ try {
427
+ const stream = await client.chat.completions.create({
428
+ model: request.config?.model || this.model,
429
+ messages,
430
+ tools,
431
+ temperature: request.config?.temperature ?? this.config.temperature,
432
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
433
+ stream: true
434
+ });
435
+ let currentToolCall = null;
436
+ for await (const chunk of stream) {
437
+ if (request.signal?.aborted) {
438
+ break;
439
+ }
440
+ const delta = chunk.choices[0]?.delta;
441
+ if (delta?.content) {
442
+ yield { type: "message:delta", content: delta.content };
443
+ }
444
+ if (delta?.tool_calls) {
445
+ for (const toolCall of delta.tool_calls) {
446
+ if (toolCall.id) {
447
+ if (currentToolCall) {
448
+ yield {
449
+ type: "action:args",
450
+ id: currentToolCall.id,
451
+ args: currentToolCall.arguments
452
+ };
453
+ }
454
+ currentToolCall = {
455
+ id: toolCall.id,
456
+ name: toolCall.function?.name || "",
457
+ arguments: toolCall.function?.arguments || ""
458
+ };
459
+ yield {
460
+ type: "action:start",
461
+ id: currentToolCall.id,
462
+ name: currentToolCall.name
463
+ };
464
+ } else if (currentToolCall && toolCall.function?.arguments) {
465
+ currentToolCall.arguments += toolCall.function.arguments;
466
+ }
467
+ }
468
+ }
469
+ if (chunk.choices[0]?.finish_reason) {
470
+ if (currentToolCall) {
471
+ yield {
472
+ type: "action:args",
473
+ id: currentToolCall.id,
474
+ args: currentToolCall.arguments
475
+ };
476
+ }
477
+ }
478
+ }
479
+ yield { type: "message:end" };
480
+ yield { type: "done" };
481
+ } catch (error) {
482
+ yield {
483
+ type: "error",
484
+ message: error instanceof Error ? error.message : "Unknown error",
485
+ code: "XAI_ERROR"
486
+ };
487
+ }
488
+ }
489
+ /**
490
+ * Non-streaming completion (optional, for debugging)
491
+ */
492
+ async complete(request) {
493
+ const client = await this.getClient();
494
+ let messages;
495
+ if (request.rawMessages && request.rawMessages.length > 0) {
496
+ messages = request.rawMessages;
497
+ if (request.systemPrompt) {
498
+ const hasSystem = messages.some((m) => m.role === "system");
499
+ if (!hasSystem) {
500
+ messages = [
501
+ { role: "system", content: request.systemPrompt },
502
+ ...messages
503
+ ];
504
+ }
505
+ }
506
+ } else {
507
+ messages = formatMessagesForOpenAI(
508
+ request.messages,
509
+ request.systemPrompt
510
+ );
511
+ }
512
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
513
+ const response = await client.chat.completions.create({
514
+ model: request.config?.model || this.model,
515
+ messages,
516
+ tools,
517
+ temperature: request.config?.temperature ?? this.config.temperature,
518
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens
519
+ });
520
+ const choice = response.choices[0];
521
+ const message = choice?.message;
522
+ const toolCalls = (message?.tool_calls || []).map((tc) => ({
523
+ id: tc.id,
524
+ name: tc.function.name,
525
+ args: JSON.parse(tc.function.arguments || "{}")
526
+ }));
527
+ return {
528
+ content: message?.content || "",
529
+ toolCalls,
530
+ rawResponse: response
531
+ };
532
+ }
533
+ };
534
+ function createXAIAdapter(config) {
535
+ return new XAIAdapter(config);
536
+ }
537
+
538
+ // src/providers/xai/index.ts
539
+ var XAI_MODELS2 = {
540
+ // Grok 4.1 Fast (Latest - December 2025)
541
+ "grok-4-1-fast-reasoning": {
542
+ vision: false,
543
+ tools: true,
544
+ maxTokens: 2e6,
545
+ outputTokens: 16384
546
+ },
547
+ "grok-4-1-fast-non-reasoning": {
548
+ vision: false,
549
+ tools: true,
550
+ maxTokens: 2e6,
551
+ outputTokens: 16384
552
+ },
553
+ // Grok 4 Fast (September 2025)
554
+ "grok-4-fast-reasoning": {
555
+ vision: false,
556
+ tools: true,
557
+ maxTokens: 2e6,
558
+ outputTokens: 16384
559
+ },
560
+ "grok-4-fast-non-reasoning": {
561
+ vision: false,
562
+ tools: true,
563
+ maxTokens: 2e6,
564
+ outputTokens: 16384
565
+ },
566
+ // Grok 4 (July 2025)
567
+ "grok-4": {
568
+ vision: true,
569
+ tools: true,
570
+ maxTokens: 256e3,
571
+ outputTokens: 16384
572
+ },
573
+ "grok-4-0709": {
574
+ vision: true,
575
+ tools: true,
576
+ maxTokens: 256e3,
577
+ outputTokens: 16384
578
+ },
579
+ // Grok 3 (February 2025) - Stable
580
+ "grok-3-beta": {
581
+ vision: true,
582
+ tools: true,
583
+ maxTokens: 131072,
584
+ outputTokens: 8192
585
+ },
586
+ "grok-3-fast-beta": {
587
+ vision: false,
588
+ tools: true,
589
+ maxTokens: 131072,
590
+ outputTokens: 8192
591
+ },
592
+ "grok-3-mini-beta": {
593
+ vision: false,
594
+ tools: true,
595
+ maxTokens: 32768,
596
+ outputTokens: 8192
597
+ },
598
+ "grok-3-mini-fast-beta": {
599
+ vision: false,
600
+ tools: true,
601
+ maxTokens: 32768,
602
+ outputTokens: 8192
603
+ },
604
+ // Grok Code Fast (August 2025)
605
+ "grok-code-fast-1": {
606
+ vision: false,
607
+ tools: true,
608
+ maxTokens: 256e3,
609
+ outputTokens: 16384
610
+ },
611
+ // Grok 2 (Legacy - for backward compatibility)
612
+ "grok-2": {
613
+ vision: true,
614
+ tools: true,
615
+ maxTokens: 131072,
616
+ outputTokens: 4096
617
+ },
618
+ "grok-2-latest": {
619
+ vision: true,
620
+ tools: true,
621
+ maxTokens: 131072,
622
+ outputTokens: 4096
623
+ },
624
+ "grok-2-mini": {
625
+ vision: false,
626
+ tools: true,
627
+ maxTokens: 131072,
628
+ outputTokens: 4096
629
+ }
630
+ };
631
+ function createXAI(config = {}) {
632
+ const apiKey = config.apiKey ?? process.env.XAI_API_KEY ?? "";
633
+ return {
634
+ name: "xai",
635
+ supportedModels: Object.keys(XAI_MODELS2),
636
+ languageModel(modelId) {
637
+ return createXAIAdapter({
638
+ apiKey,
639
+ model: modelId,
640
+ baseUrl: config.baseUrl
641
+ });
642
+ },
643
+ getCapabilities(modelId) {
644
+ const model = XAI_MODELS2[modelId] ?? XAI_MODELS2["grok-3-fast-beta"];
645
+ return {
646
+ supportsVision: model.vision,
647
+ supportsTools: model.tools,
648
+ supportsThinking: false,
649
+ supportsStreaming: true,
650
+ supportsPDF: false,
651
+ supportsAudio: false,
652
+ supportsVideo: false,
653
+ maxTokens: model.maxTokens,
654
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
655
+ supportsJsonMode: false,
656
+ // xAI doesn't support JSON mode yet
657
+ supportsSystemMessages: true
658
+ };
659
+ }
660
+ };
661
+ }
662
+ var createXAIProvider = createXAI;
663
+
664
+ export { createXAI, xai as createXAIModel, createXAIProvider, xai };
665
+ //# sourceMappingURL=index.mjs.map
666
+ //# sourceMappingURL=index.mjs.map