@yourgpt/llm-sdk 0.1.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +209 -451
  11. package/dist/index.d.ts +209 -451
  12. package/dist/index.js +1905 -311
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1895 -309
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1,679 @@
1
+ 'use strict';
2
+
3
+ var core = require('@yourgpt/copilot-sdk/core');
4
+
5
+ // src/providers/openai/provider.ts
6
+ var OPENAI_MODELS = {
7
+ // GPT-4o series
8
+ "gpt-4o": { vision: true, tools: true, jsonMode: true, maxTokens: 128e3 },
9
+ "gpt-4o-mini": {
10
+ vision: true,
11
+ tools: true,
12
+ jsonMode: true,
13
+ maxTokens: 128e3
14
+ },
15
+ "gpt-4o-2024-11-20": {
16
+ vision: true,
17
+ tools: true,
18
+ jsonMode: true,
19
+ maxTokens: 128e3
20
+ },
21
+ "gpt-4o-2024-08-06": {
22
+ vision: true,
23
+ tools: true,
24
+ jsonMode: true,
25
+ maxTokens: 128e3
26
+ },
27
+ // GPT-4 Turbo
28
+ "gpt-4-turbo": {
29
+ vision: true,
30
+ tools: true,
31
+ jsonMode: true,
32
+ maxTokens: 128e3
33
+ },
34
+ "gpt-4-turbo-preview": {
35
+ vision: false,
36
+ tools: true,
37
+ jsonMode: true,
38
+ maxTokens: 128e3
39
+ },
40
+ // GPT-4
41
+ "gpt-4": { vision: false, tools: true, jsonMode: false, maxTokens: 8192 },
42
+ "gpt-4-32k": {
43
+ vision: false,
44
+ tools: true,
45
+ jsonMode: false,
46
+ maxTokens: 32768
47
+ },
48
+ // GPT-3.5
49
+ "gpt-3.5-turbo": {
50
+ vision: false,
51
+ tools: true,
52
+ jsonMode: true,
53
+ maxTokens: 16385
54
+ },
55
+ // O1 series (reasoning)
56
+ o1: { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 },
57
+ "o1-mini": { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 },
58
+ "o1-preview": {
59
+ vision: true,
60
+ tools: false,
61
+ jsonMode: false,
62
+ maxTokens: 128e3
63
+ },
64
+ // O3 series
65
+ "o3-mini": { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 }
66
+ };
67
+ function openai(modelId, options = {}) {
68
+ const apiKey = options.apiKey ?? process.env.OPENAI_API_KEY;
69
+ const baseURL = options.baseURL ?? "https://api.openai.com/v1";
70
+ let client = null;
71
+ async function getClient() {
72
+ if (!client) {
73
+ const { default: OpenAI } = await import('openai');
74
+ client = new OpenAI({
75
+ apiKey,
76
+ baseURL,
77
+ organization: options.organization,
78
+ defaultHeaders: options.headers
79
+ });
80
+ }
81
+ return client;
82
+ }
83
+ const modelConfig = OPENAI_MODELS[modelId] ?? OPENAI_MODELS["gpt-4o"];
84
+ return {
85
+ provider: "openai",
86
+ modelId,
87
+ capabilities: {
88
+ supportsVision: modelConfig.vision,
89
+ supportsTools: modelConfig.tools,
90
+ supportsStreaming: true,
91
+ supportsJsonMode: modelConfig.jsonMode,
92
+ supportsThinking: false,
93
+ supportsPDF: false,
94
+ maxTokens: modelConfig.maxTokens,
95
+ supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
96
+ },
97
+ async doGenerate(params) {
98
+ const client2 = await getClient();
99
+ const messages = formatMessagesForOpenAI(params.messages);
100
+ const response = await client2.chat.completions.create({
101
+ model: modelId,
102
+ messages,
103
+ tools: params.tools,
104
+ temperature: params.temperature,
105
+ max_tokens: params.maxTokens
106
+ });
107
+ const choice = response.choices[0];
108
+ const message = choice.message;
109
+ const toolCalls = (message.tool_calls ?? []).map(
110
+ (tc) => ({
111
+ id: tc.id,
112
+ name: tc.function.name,
113
+ args: JSON.parse(tc.function.arguments || "{}")
114
+ })
115
+ );
116
+ return {
117
+ text: message.content ?? "",
118
+ toolCalls,
119
+ finishReason: mapFinishReason(choice.finish_reason),
120
+ usage: {
121
+ promptTokens: response.usage?.prompt_tokens ?? 0,
122
+ completionTokens: response.usage?.completion_tokens ?? 0,
123
+ totalTokens: response.usage?.total_tokens ?? 0
124
+ },
125
+ rawResponse: response
126
+ };
127
+ },
128
+ async *doStream(params) {
129
+ const client2 = await getClient();
130
+ const messages = formatMessagesForOpenAI(params.messages);
131
+ const stream = await client2.chat.completions.create({
132
+ model: modelId,
133
+ messages,
134
+ tools: params.tools,
135
+ temperature: params.temperature,
136
+ max_tokens: params.maxTokens,
137
+ stream: true
138
+ });
139
+ let currentToolCall = null;
140
+ let totalPromptTokens = 0;
141
+ let totalCompletionTokens = 0;
142
+ for await (const chunk of stream) {
143
+ if (params.signal?.aborted) {
144
+ yield { type: "error", error: new Error("Aborted") };
145
+ return;
146
+ }
147
+ const choice = chunk.choices[0];
148
+ const delta = choice?.delta;
149
+ if (delta?.content) {
150
+ yield { type: "text-delta", text: delta.content };
151
+ }
152
+ if (delta?.tool_calls) {
153
+ for (const tc of delta.tool_calls) {
154
+ if (tc.id) {
155
+ if (currentToolCall) {
156
+ yield {
157
+ type: "tool-call",
158
+ toolCall: {
159
+ id: currentToolCall.id,
160
+ name: currentToolCall.name,
161
+ args: JSON.parse(currentToolCall.arguments || "{}")
162
+ }
163
+ };
164
+ }
165
+ currentToolCall = {
166
+ id: tc.id,
167
+ name: tc.function?.name ?? "",
168
+ arguments: tc.function?.arguments ?? ""
169
+ };
170
+ } else if (currentToolCall && tc.function?.arguments) {
171
+ currentToolCall.arguments += tc.function.arguments;
172
+ }
173
+ }
174
+ }
175
+ if (choice?.finish_reason) {
176
+ if (currentToolCall) {
177
+ yield {
178
+ type: "tool-call",
179
+ toolCall: {
180
+ id: currentToolCall.id,
181
+ name: currentToolCall.name,
182
+ args: JSON.parse(currentToolCall.arguments || "{}")
183
+ }
184
+ };
185
+ currentToolCall = null;
186
+ }
187
+ if (chunk.usage) {
188
+ totalPromptTokens = chunk.usage.prompt_tokens;
189
+ totalCompletionTokens = chunk.usage.completion_tokens;
190
+ }
191
+ yield {
192
+ type: "finish",
193
+ finishReason: mapFinishReason(choice.finish_reason),
194
+ usage: {
195
+ promptTokens: totalPromptTokens,
196
+ completionTokens: totalCompletionTokens,
197
+ totalTokens: totalPromptTokens + totalCompletionTokens
198
+ }
199
+ };
200
+ }
201
+ }
202
+ }
203
+ };
204
+ }
205
+ function mapFinishReason(reason) {
206
+ switch (reason) {
207
+ case "stop":
208
+ return "stop";
209
+ case "length":
210
+ return "length";
211
+ case "tool_calls":
212
+ case "function_call":
213
+ return "tool-calls";
214
+ case "content_filter":
215
+ return "content-filter";
216
+ default:
217
+ return "unknown";
218
+ }
219
+ }
220
+ function formatMessagesForOpenAI(messages) {
221
+ return messages.map((msg) => {
222
+ switch (msg.role) {
223
+ case "system":
224
+ return { role: "system", content: msg.content };
225
+ case "user":
226
+ if (typeof msg.content === "string") {
227
+ return { role: "user", content: msg.content };
228
+ }
229
+ return {
230
+ role: "user",
231
+ content: msg.content.map((part) => {
232
+ if (part.type === "text") {
233
+ return { type: "text", text: part.text };
234
+ }
235
+ if (part.type === "image") {
236
+ const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
237
+ const url = imageData.startsWith("data:") ? imageData : `data:${part.mimeType ?? "image/png"};base64,${imageData}`;
238
+ return { type: "image_url", image_url: { url, detail: "auto" } };
239
+ }
240
+ return { type: "text", text: "" };
241
+ })
242
+ };
243
+ case "assistant":
244
+ const assistantMsg = {
245
+ role: "assistant",
246
+ content: msg.content
247
+ };
248
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
249
+ assistantMsg.tool_calls = msg.toolCalls.map((tc) => ({
250
+ id: tc.id,
251
+ type: "function",
252
+ function: {
253
+ name: tc.name,
254
+ arguments: JSON.stringify(tc.args)
255
+ }
256
+ }));
257
+ }
258
+ return assistantMsg;
259
+ case "tool":
260
+ return {
261
+ role: "tool",
262
+ tool_call_id: msg.toolCallId,
263
+ content: msg.content
264
+ };
265
+ default:
266
+ return msg;
267
+ }
268
+ });
269
+ }
270
+
271
+ // src/adapters/base.ts
272
+ function parameterToJsonSchema(param) {
273
+ const schema = {
274
+ type: param.type
275
+ };
276
+ if (param.description) {
277
+ schema.description = param.description;
278
+ }
279
+ if (param.enum) {
280
+ schema.enum = param.enum;
281
+ }
282
+ if (param.type === "array" && param.items) {
283
+ schema.items = parameterToJsonSchema(
284
+ param.items
285
+ );
286
+ }
287
+ if (param.type === "object" && param.properties) {
288
+ schema.properties = Object.fromEntries(
289
+ Object.entries(param.properties).map(([key, prop]) => [
290
+ key,
291
+ parameterToJsonSchema(
292
+ prop
293
+ )
294
+ ])
295
+ );
296
+ }
297
+ return schema;
298
+ }
299
+ function formatTools(actions) {
300
+ return actions.map((action) => ({
301
+ type: "function",
302
+ function: {
303
+ name: action.name,
304
+ description: action.description,
305
+ parameters: {
306
+ type: "object",
307
+ properties: action.parameters ? Object.fromEntries(
308
+ Object.entries(action.parameters).map(([key, param]) => [
309
+ key,
310
+ parameterToJsonSchema(param)
311
+ ])
312
+ ) : {},
313
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
314
+ }
315
+ }
316
+ }));
317
+ }
318
+ function hasImageAttachments(message) {
319
+ const attachments = message.metadata?.attachments;
320
+ return attachments?.some((a) => a.type === "image") ?? false;
321
+ }
322
+ function attachmentToOpenAIImage(attachment) {
323
+ if (attachment.type !== "image") return null;
324
+ let imageUrl;
325
+ if (attachment.url) {
326
+ imageUrl = attachment.url;
327
+ } else if (attachment.data) {
328
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
329
+ } else {
330
+ return null;
331
+ }
332
+ return {
333
+ type: "image_url",
334
+ image_url: {
335
+ url: imageUrl,
336
+ detail: "auto"
337
+ }
338
+ };
339
+ }
340
+ function messageToOpenAIContent(message) {
341
+ const attachments = message.metadata?.attachments;
342
+ const content = message.content ?? "";
343
+ if (!hasImageAttachments(message)) {
344
+ return content;
345
+ }
346
+ const blocks = [];
347
+ if (content) {
348
+ blocks.push({ type: "text", text: content });
349
+ }
350
+ if (attachments) {
351
+ for (const attachment of attachments) {
352
+ const imageBlock = attachmentToOpenAIImage(attachment);
353
+ if (imageBlock) {
354
+ blocks.push(imageBlock);
355
+ }
356
+ }
357
+ }
358
+ return blocks;
359
+ }
360
+ function formatMessagesForOpenAI2(messages, systemPrompt) {
361
+ const formatted = [];
362
+ if (systemPrompt) {
363
+ formatted.push({ role: "system", content: systemPrompt });
364
+ }
365
+ for (const msg of messages) {
366
+ if (msg.role === "system") {
367
+ formatted.push({ role: "system", content: msg.content ?? "" });
368
+ } else if (msg.role === "user") {
369
+ formatted.push({
370
+ role: "user",
371
+ content: messageToOpenAIContent(msg)
372
+ });
373
+ } else if (msg.role === "assistant") {
374
+ const assistantMsg = {
375
+ role: "assistant",
376
+ content: msg.content
377
+ };
378
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
379
+ assistantMsg.tool_calls = msg.tool_calls;
380
+ }
381
+ formatted.push(assistantMsg);
382
+ } else if (msg.role === "tool" && msg.tool_call_id) {
383
+ formatted.push({
384
+ role: "tool",
385
+ content: msg.content ?? "",
386
+ tool_call_id: msg.tool_call_id
387
+ });
388
+ }
389
+ }
390
+ return formatted;
391
+ }
392
+
393
+ // src/adapters/openai.ts
394
+ var OpenAIAdapter = class {
395
+ constructor(config) {
396
+ this.provider = "openai";
397
+ this.config = config;
398
+ this.model = config.model || "gpt-4o";
399
+ }
400
+ async getClient() {
401
+ if (!this.client) {
402
+ const { default: OpenAI } = await import('openai');
403
+ this.client = new OpenAI({
404
+ apiKey: this.config.apiKey,
405
+ baseURL: this.config.baseUrl
406
+ });
407
+ }
408
+ return this.client;
409
+ }
410
+ async *stream(request) {
411
+ const client = await this.getClient();
412
+ let messages;
413
+ if (request.rawMessages && request.rawMessages.length > 0) {
414
+ const processedMessages = request.rawMessages.map((msg) => {
415
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
416
+ if (hasAttachments) {
417
+ const content = [];
418
+ if (msg.content) {
419
+ content.push({ type: "text", text: msg.content });
420
+ }
421
+ for (const attachment of msg.attachments) {
422
+ if (attachment.type === "image") {
423
+ let imageUrl;
424
+ if (attachment.url) {
425
+ imageUrl = attachment.url;
426
+ } else if (attachment.data) {
427
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
428
+ } else {
429
+ continue;
430
+ }
431
+ content.push({
432
+ type: "image_url",
433
+ image_url: { url: imageUrl, detail: "auto" }
434
+ });
435
+ }
436
+ }
437
+ return { ...msg, content, attachments: void 0 };
438
+ }
439
+ return msg;
440
+ });
441
+ if (request.systemPrompt) {
442
+ const hasSystem = processedMessages.some((m) => m.role === "system");
443
+ if (!hasSystem) {
444
+ messages = [
445
+ { role: "system", content: request.systemPrompt },
446
+ ...processedMessages
447
+ ];
448
+ } else {
449
+ messages = processedMessages;
450
+ }
451
+ } else {
452
+ messages = processedMessages;
453
+ }
454
+ } else {
455
+ messages = formatMessagesForOpenAI2(
456
+ request.messages,
457
+ request.systemPrompt
458
+ );
459
+ }
460
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
461
+ const messageId = core.generateMessageId();
462
+ yield { type: "message:start", id: messageId };
463
+ try {
464
+ const stream = await client.chat.completions.create({
465
+ model: request.config?.model || this.model,
466
+ messages,
467
+ tools,
468
+ temperature: request.config?.temperature ?? this.config.temperature,
469
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
470
+ stream: true
471
+ });
472
+ let currentToolCall = null;
473
+ for await (const chunk of stream) {
474
+ if (request.signal?.aborted) {
475
+ break;
476
+ }
477
+ const delta = chunk.choices[0]?.delta;
478
+ if (delta?.content) {
479
+ yield { type: "message:delta", content: delta.content };
480
+ }
481
+ if (delta?.tool_calls) {
482
+ for (const toolCall of delta.tool_calls) {
483
+ if (toolCall.id) {
484
+ if (currentToolCall) {
485
+ yield {
486
+ type: "action:args",
487
+ id: currentToolCall.id,
488
+ args: currentToolCall.arguments
489
+ };
490
+ }
491
+ currentToolCall = {
492
+ id: toolCall.id,
493
+ name: toolCall.function?.name || "",
494
+ arguments: toolCall.function?.arguments || ""
495
+ };
496
+ yield {
497
+ type: "action:start",
498
+ id: currentToolCall.id,
499
+ name: currentToolCall.name
500
+ };
501
+ } else if (currentToolCall && toolCall.function?.arguments) {
502
+ currentToolCall.arguments += toolCall.function.arguments;
503
+ }
504
+ }
505
+ }
506
+ if (chunk.choices[0]?.finish_reason) {
507
+ if (currentToolCall) {
508
+ yield {
509
+ type: "action:args",
510
+ id: currentToolCall.id,
511
+ args: currentToolCall.arguments
512
+ };
513
+ }
514
+ }
515
+ }
516
+ yield { type: "message:end" };
517
+ yield { type: "done" };
518
+ } catch (error) {
519
+ yield {
520
+ type: "error",
521
+ message: error instanceof Error ? error.message : "Unknown error",
522
+ code: "OPENAI_ERROR"
523
+ };
524
+ }
525
+ }
526
+ };
527
+ function createOpenAIAdapter(config) {
528
+ return new OpenAIAdapter(config);
529
+ }
530
+
531
+ // src/providers/openai/index.ts
532
+ var OPENAI_MODELS2 = {
533
+ // GPT-4o series
534
+ "gpt-4o": {
535
+ vision: true,
536
+ tools: true,
537
+ audio: true,
538
+ jsonMode: true,
539
+ maxTokens: 128e3
540
+ },
541
+ "gpt-4o-mini": {
542
+ vision: true,
543
+ tools: true,
544
+ audio: false,
545
+ jsonMode: true,
546
+ maxTokens: 128e3
547
+ },
548
+ "gpt-4o-2024-11-20": {
549
+ vision: true,
550
+ tools: true,
551
+ audio: true,
552
+ jsonMode: true,
553
+ maxTokens: 128e3
554
+ },
555
+ "gpt-4o-2024-08-06": {
556
+ vision: true,
557
+ tools: true,
558
+ audio: false,
559
+ jsonMode: true,
560
+ maxTokens: 128e3
561
+ },
562
+ // GPT-4 Turbo series
563
+ "gpt-4-turbo": {
564
+ vision: true,
565
+ tools: true,
566
+ audio: false,
567
+ jsonMode: true,
568
+ maxTokens: 128e3
569
+ },
570
+ "gpt-4-turbo-preview": {
571
+ vision: false,
572
+ tools: true,
573
+ audio: false,
574
+ jsonMode: true,
575
+ maxTokens: 128e3
576
+ },
577
+ // GPT-4 series
578
+ "gpt-4": {
579
+ vision: false,
580
+ tools: true,
581
+ audio: false,
582
+ jsonMode: false,
583
+ maxTokens: 8192
584
+ },
585
+ "gpt-4-32k": {
586
+ vision: false,
587
+ tools: true,
588
+ audio: false,
589
+ jsonMode: false,
590
+ maxTokens: 32768
591
+ },
592
+ // GPT-3.5 series
593
+ "gpt-3.5-turbo": {
594
+ vision: false,
595
+ tools: true,
596
+ audio: false,
597
+ jsonMode: true,
598
+ maxTokens: 16385
599
+ },
600
+ "gpt-3.5-turbo-16k": {
601
+ vision: false,
602
+ tools: true,
603
+ audio: false,
604
+ jsonMode: true,
605
+ maxTokens: 16385
606
+ },
607
+ // O1 reasoning series
608
+ o1: {
609
+ vision: true,
610
+ tools: false,
611
+ // O1 doesn't support tools yet
612
+ audio: false,
613
+ jsonMode: false,
614
+ maxTokens: 128e3
615
+ },
616
+ "o1-mini": {
617
+ vision: true,
618
+ tools: false,
619
+ audio: false,
620
+ jsonMode: false,
621
+ maxTokens: 128e3
622
+ },
623
+ "o1-preview": {
624
+ vision: true,
625
+ tools: false,
626
+ audio: false,
627
+ jsonMode: false,
628
+ maxTokens: 128e3
629
+ },
630
+ // O3 reasoning series
631
+ "o3-mini": {
632
+ vision: true,
633
+ tools: false,
634
+ audio: false,
635
+ jsonMode: false,
636
+ maxTokens: 128e3
637
+ }
638
+ };
639
+ function createOpenAI(config = {}) {
640
+ const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
641
+ return {
642
+ name: "openai",
643
+ supportedModels: Object.keys(OPENAI_MODELS2),
644
+ languageModel(modelId) {
645
+ return createOpenAIAdapter({
646
+ apiKey,
647
+ model: modelId,
648
+ baseUrl: config.baseUrl
649
+ });
650
+ },
651
+ getCapabilities(modelId) {
652
+ const model = OPENAI_MODELS2[modelId] ?? OPENAI_MODELS2["gpt-4o"];
653
+ return {
654
+ supportsVision: model.vision,
655
+ supportsTools: model.tools,
656
+ supportsThinking: false,
657
+ // OpenAI doesn't have extended thinking
658
+ supportsStreaming: true,
659
+ supportsPDF: false,
660
+ // OpenAI doesn't support PDFs directly
661
+ supportsAudio: model.audio,
662
+ supportsVideo: false,
663
+ maxTokens: model.maxTokens,
664
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
665
+ supportedAudioTypes: model.audio ? ["audio/mp3", "audio/wav", "audio/webm"] : [],
666
+ supportsJsonMode: model.jsonMode,
667
+ supportsSystemMessages: true
668
+ };
669
+ }
670
+ };
671
+ }
672
+ var createOpenAIProvider = createOpenAI;
673
+
674
+ exports.createOpenAI = createOpenAI;
675
+ exports.createOpenAIModel = openai;
676
+ exports.createOpenAIProvider = createOpenAIProvider;
677
+ exports.openai = openai;
678
+ //# sourceMappingURL=index.js.map
679
+ //# sourceMappingURL=index.js.map