@yourgpt/llm-sdk 0.1.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +209 -451
  11. package/dist/index.d.ts +209 -451
  12. package/dist/index.js +1905 -311
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1895 -309
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1,674 @@
1
+ import { generateMessageId } from '@yourgpt/copilot-sdk/core';
2
+
3
+ // src/providers/openai/provider.ts
4
+ var OPENAI_MODELS = {
5
+ // GPT-4o series
6
+ "gpt-4o": { vision: true, tools: true, jsonMode: true, maxTokens: 128e3 },
7
+ "gpt-4o-mini": {
8
+ vision: true,
9
+ tools: true,
10
+ jsonMode: true,
11
+ maxTokens: 128e3
12
+ },
13
+ "gpt-4o-2024-11-20": {
14
+ vision: true,
15
+ tools: true,
16
+ jsonMode: true,
17
+ maxTokens: 128e3
18
+ },
19
+ "gpt-4o-2024-08-06": {
20
+ vision: true,
21
+ tools: true,
22
+ jsonMode: true,
23
+ maxTokens: 128e3
24
+ },
25
+ // GPT-4 Turbo
26
+ "gpt-4-turbo": {
27
+ vision: true,
28
+ tools: true,
29
+ jsonMode: true,
30
+ maxTokens: 128e3
31
+ },
32
+ "gpt-4-turbo-preview": {
33
+ vision: false,
34
+ tools: true,
35
+ jsonMode: true,
36
+ maxTokens: 128e3
37
+ },
38
+ // GPT-4
39
+ "gpt-4": { vision: false, tools: true, jsonMode: false, maxTokens: 8192 },
40
+ "gpt-4-32k": {
41
+ vision: false,
42
+ tools: true,
43
+ jsonMode: false,
44
+ maxTokens: 32768
45
+ },
46
+ // GPT-3.5
47
+ "gpt-3.5-turbo": {
48
+ vision: false,
49
+ tools: true,
50
+ jsonMode: true,
51
+ maxTokens: 16385
52
+ },
53
+ // O1 series (reasoning)
54
+ o1: { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 },
55
+ "o1-mini": { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 },
56
+ "o1-preview": {
57
+ vision: true,
58
+ tools: false,
59
+ jsonMode: false,
60
+ maxTokens: 128e3
61
+ },
62
+ // O3 series
63
+ "o3-mini": { vision: true, tools: false, jsonMode: false, maxTokens: 128e3 }
64
+ };
65
+ function openai(modelId, options = {}) {
66
+ const apiKey = options.apiKey ?? process.env.OPENAI_API_KEY;
67
+ const baseURL = options.baseURL ?? "https://api.openai.com/v1";
68
+ let client = null;
69
+ async function getClient() {
70
+ if (!client) {
71
+ const { default: OpenAI } = await import('openai');
72
+ client = new OpenAI({
73
+ apiKey,
74
+ baseURL,
75
+ organization: options.organization,
76
+ defaultHeaders: options.headers
77
+ });
78
+ }
79
+ return client;
80
+ }
81
+ const modelConfig = OPENAI_MODELS[modelId] ?? OPENAI_MODELS["gpt-4o"];
82
+ return {
83
+ provider: "openai",
84
+ modelId,
85
+ capabilities: {
86
+ supportsVision: modelConfig.vision,
87
+ supportsTools: modelConfig.tools,
88
+ supportsStreaming: true,
89
+ supportsJsonMode: modelConfig.jsonMode,
90
+ supportsThinking: false,
91
+ supportsPDF: false,
92
+ maxTokens: modelConfig.maxTokens,
93
+ supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
94
+ },
95
+ async doGenerate(params) {
96
+ const client2 = await getClient();
97
+ const messages = formatMessagesForOpenAI(params.messages);
98
+ const response = await client2.chat.completions.create({
99
+ model: modelId,
100
+ messages,
101
+ tools: params.tools,
102
+ temperature: params.temperature,
103
+ max_tokens: params.maxTokens
104
+ });
105
+ const choice = response.choices[0];
106
+ const message = choice.message;
107
+ const toolCalls = (message.tool_calls ?? []).map(
108
+ (tc) => ({
109
+ id: tc.id,
110
+ name: tc.function.name,
111
+ args: JSON.parse(tc.function.arguments || "{}")
112
+ })
113
+ );
114
+ return {
115
+ text: message.content ?? "",
116
+ toolCalls,
117
+ finishReason: mapFinishReason(choice.finish_reason),
118
+ usage: {
119
+ promptTokens: response.usage?.prompt_tokens ?? 0,
120
+ completionTokens: response.usage?.completion_tokens ?? 0,
121
+ totalTokens: response.usage?.total_tokens ?? 0
122
+ },
123
+ rawResponse: response
124
+ };
125
+ },
126
+ async *doStream(params) {
127
+ const client2 = await getClient();
128
+ const messages = formatMessagesForOpenAI(params.messages);
129
+ const stream = await client2.chat.completions.create({
130
+ model: modelId,
131
+ messages,
132
+ tools: params.tools,
133
+ temperature: params.temperature,
134
+ max_tokens: params.maxTokens,
135
+ stream: true
136
+ });
137
+ let currentToolCall = null;
138
+ let totalPromptTokens = 0;
139
+ let totalCompletionTokens = 0;
140
+ for await (const chunk of stream) {
141
+ if (params.signal?.aborted) {
142
+ yield { type: "error", error: new Error("Aborted") };
143
+ return;
144
+ }
145
+ const choice = chunk.choices[0];
146
+ const delta = choice?.delta;
147
+ if (delta?.content) {
148
+ yield { type: "text-delta", text: delta.content };
149
+ }
150
+ if (delta?.tool_calls) {
151
+ for (const tc of delta.tool_calls) {
152
+ if (tc.id) {
153
+ if (currentToolCall) {
154
+ yield {
155
+ type: "tool-call",
156
+ toolCall: {
157
+ id: currentToolCall.id,
158
+ name: currentToolCall.name,
159
+ args: JSON.parse(currentToolCall.arguments || "{}")
160
+ }
161
+ };
162
+ }
163
+ currentToolCall = {
164
+ id: tc.id,
165
+ name: tc.function?.name ?? "",
166
+ arguments: tc.function?.arguments ?? ""
167
+ };
168
+ } else if (currentToolCall && tc.function?.arguments) {
169
+ currentToolCall.arguments += tc.function.arguments;
170
+ }
171
+ }
172
+ }
173
+ if (choice?.finish_reason) {
174
+ if (currentToolCall) {
175
+ yield {
176
+ type: "tool-call",
177
+ toolCall: {
178
+ id: currentToolCall.id,
179
+ name: currentToolCall.name,
180
+ args: JSON.parse(currentToolCall.arguments || "{}")
181
+ }
182
+ };
183
+ currentToolCall = null;
184
+ }
185
+ if (chunk.usage) {
186
+ totalPromptTokens = chunk.usage.prompt_tokens;
187
+ totalCompletionTokens = chunk.usage.completion_tokens;
188
+ }
189
+ yield {
190
+ type: "finish",
191
+ finishReason: mapFinishReason(choice.finish_reason),
192
+ usage: {
193
+ promptTokens: totalPromptTokens,
194
+ completionTokens: totalCompletionTokens,
195
+ totalTokens: totalPromptTokens + totalCompletionTokens
196
+ }
197
+ };
198
+ }
199
+ }
200
+ }
201
+ };
202
+ }
203
+ function mapFinishReason(reason) {
204
+ switch (reason) {
205
+ case "stop":
206
+ return "stop";
207
+ case "length":
208
+ return "length";
209
+ case "tool_calls":
210
+ case "function_call":
211
+ return "tool-calls";
212
+ case "content_filter":
213
+ return "content-filter";
214
+ default:
215
+ return "unknown";
216
+ }
217
+ }
218
+ function formatMessagesForOpenAI(messages) {
219
+ return messages.map((msg) => {
220
+ switch (msg.role) {
221
+ case "system":
222
+ return { role: "system", content: msg.content };
223
+ case "user":
224
+ if (typeof msg.content === "string") {
225
+ return { role: "user", content: msg.content };
226
+ }
227
+ return {
228
+ role: "user",
229
+ content: msg.content.map((part) => {
230
+ if (part.type === "text") {
231
+ return { type: "text", text: part.text };
232
+ }
233
+ if (part.type === "image") {
234
+ const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
235
+ const url = imageData.startsWith("data:") ? imageData : `data:${part.mimeType ?? "image/png"};base64,${imageData}`;
236
+ return { type: "image_url", image_url: { url, detail: "auto" } };
237
+ }
238
+ return { type: "text", text: "" };
239
+ })
240
+ };
241
+ case "assistant":
242
+ const assistantMsg = {
243
+ role: "assistant",
244
+ content: msg.content
245
+ };
246
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
247
+ assistantMsg.tool_calls = msg.toolCalls.map((tc) => ({
248
+ id: tc.id,
249
+ type: "function",
250
+ function: {
251
+ name: tc.name,
252
+ arguments: JSON.stringify(tc.args)
253
+ }
254
+ }));
255
+ }
256
+ return assistantMsg;
257
+ case "tool":
258
+ return {
259
+ role: "tool",
260
+ tool_call_id: msg.toolCallId,
261
+ content: msg.content
262
+ };
263
+ default:
264
+ return msg;
265
+ }
266
+ });
267
+ }
268
+
269
+ // src/adapters/base.ts
270
+ function parameterToJsonSchema(param) {
271
+ const schema = {
272
+ type: param.type
273
+ };
274
+ if (param.description) {
275
+ schema.description = param.description;
276
+ }
277
+ if (param.enum) {
278
+ schema.enum = param.enum;
279
+ }
280
+ if (param.type === "array" && param.items) {
281
+ schema.items = parameterToJsonSchema(
282
+ param.items
283
+ );
284
+ }
285
+ if (param.type === "object" && param.properties) {
286
+ schema.properties = Object.fromEntries(
287
+ Object.entries(param.properties).map(([key, prop]) => [
288
+ key,
289
+ parameterToJsonSchema(
290
+ prop
291
+ )
292
+ ])
293
+ );
294
+ }
295
+ return schema;
296
+ }
297
+ function formatTools(actions) {
298
+ return actions.map((action) => ({
299
+ type: "function",
300
+ function: {
301
+ name: action.name,
302
+ description: action.description,
303
+ parameters: {
304
+ type: "object",
305
+ properties: action.parameters ? Object.fromEntries(
306
+ Object.entries(action.parameters).map(([key, param]) => [
307
+ key,
308
+ parameterToJsonSchema(param)
309
+ ])
310
+ ) : {},
311
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
312
+ }
313
+ }
314
+ }));
315
+ }
316
+ function hasImageAttachments(message) {
317
+ const attachments = message.metadata?.attachments;
318
+ return attachments?.some((a) => a.type === "image") ?? false;
319
+ }
320
+ function attachmentToOpenAIImage(attachment) {
321
+ if (attachment.type !== "image") return null;
322
+ let imageUrl;
323
+ if (attachment.url) {
324
+ imageUrl = attachment.url;
325
+ } else if (attachment.data) {
326
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
327
+ } else {
328
+ return null;
329
+ }
330
+ return {
331
+ type: "image_url",
332
+ image_url: {
333
+ url: imageUrl,
334
+ detail: "auto"
335
+ }
336
+ };
337
+ }
338
+ function messageToOpenAIContent(message) {
339
+ const attachments = message.metadata?.attachments;
340
+ const content = message.content ?? "";
341
+ if (!hasImageAttachments(message)) {
342
+ return content;
343
+ }
344
+ const blocks = [];
345
+ if (content) {
346
+ blocks.push({ type: "text", text: content });
347
+ }
348
+ if (attachments) {
349
+ for (const attachment of attachments) {
350
+ const imageBlock = attachmentToOpenAIImage(attachment);
351
+ if (imageBlock) {
352
+ blocks.push(imageBlock);
353
+ }
354
+ }
355
+ }
356
+ return blocks;
357
+ }
358
+ function formatMessagesForOpenAI2(messages, systemPrompt) {
359
+ const formatted = [];
360
+ if (systemPrompt) {
361
+ formatted.push({ role: "system", content: systemPrompt });
362
+ }
363
+ for (const msg of messages) {
364
+ if (msg.role === "system") {
365
+ formatted.push({ role: "system", content: msg.content ?? "" });
366
+ } else if (msg.role === "user") {
367
+ formatted.push({
368
+ role: "user",
369
+ content: messageToOpenAIContent(msg)
370
+ });
371
+ } else if (msg.role === "assistant") {
372
+ const assistantMsg = {
373
+ role: "assistant",
374
+ content: msg.content
375
+ };
376
+ if (msg.tool_calls && msg.tool_calls.length > 0) {
377
+ assistantMsg.tool_calls = msg.tool_calls;
378
+ }
379
+ formatted.push(assistantMsg);
380
+ } else if (msg.role === "tool" && msg.tool_call_id) {
381
+ formatted.push({
382
+ role: "tool",
383
+ content: msg.content ?? "",
384
+ tool_call_id: msg.tool_call_id
385
+ });
386
+ }
387
+ }
388
+ return formatted;
389
+ }
390
+
391
+ // src/adapters/openai.ts
392
+ var OpenAIAdapter = class {
393
+ constructor(config) {
394
+ this.provider = "openai";
395
+ this.config = config;
396
+ this.model = config.model || "gpt-4o";
397
+ }
398
+ async getClient() {
399
+ if (!this.client) {
400
+ const { default: OpenAI } = await import('openai');
401
+ this.client = new OpenAI({
402
+ apiKey: this.config.apiKey,
403
+ baseURL: this.config.baseUrl
404
+ });
405
+ }
406
+ return this.client;
407
+ }
408
+ async *stream(request) {
409
+ const client = await this.getClient();
410
+ let messages;
411
+ if (request.rawMessages && request.rawMessages.length > 0) {
412
+ const processedMessages = request.rawMessages.map((msg) => {
413
+ const hasAttachments = msg.attachments && Array.isArray(msg.attachments) && msg.attachments.length > 0;
414
+ if (hasAttachments) {
415
+ const content = [];
416
+ if (msg.content) {
417
+ content.push({ type: "text", text: msg.content });
418
+ }
419
+ for (const attachment of msg.attachments) {
420
+ if (attachment.type === "image") {
421
+ let imageUrl;
422
+ if (attachment.url) {
423
+ imageUrl = attachment.url;
424
+ } else if (attachment.data) {
425
+ imageUrl = attachment.data.startsWith("data:") ? attachment.data : `data:${attachment.mimeType || "image/png"};base64,${attachment.data}`;
426
+ } else {
427
+ continue;
428
+ }
429
+ content.push({
430
+ type: "image_url",
431
+ image_url: { url: imageUrl, detail: "auto" }
432
+ });
433
+ }
434
+ }
435
+ return { ...msg, content, attachments: void 0 };
436
+ }
437
+ return msg;
438
+ });
439
+ if (request.systemPrompt) {
440
+ const hasSystem = processedMessages.some((m) => m.role === "system");
441
+ if (!hasSystem) {
442
+ messages = [
443
+ { role: "system", content: request.systemPrompt },
444
+ ...processedMessages
445
+ ];
446
+ } else {
447
+ messages = processedMessages;
448
+ }
449
+ } else {
450
+ messages = processedMessages;
451
+ }
452
+ } else {
453
+ messages = formatMessagesForOpenAI2(
454
+ request.messages,
455
+ request.systemPrompt
456
+ );
457
+ }
458
+ const tools = request.actions?.length ? formatTools(request.actions) : void 0;
459
+ const messageId = generateMessageId();
460
+ yield { type: "message:start", id: messageId };
461
+ try {
462
+ const stream = await client.chat.completions.create({
463
+ model: request.config?.model || this.model,
464
+ messages,
465
+ tools,
466
+ temperature: request.config?.temperature ?? this.config.temperature,
467
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
468
+ stream: true
469
+ });
470
+ let currentToolCall = null;
471
+ for await (const chunk of stream) {
472
+ if (request.signal?.aborted) {
473
+ break;
474
+ }
475
+ const delta = chunk.choices[0]?.delta;
476
+ if (delta?.content) {
477
+ yield { type: "message:delta", content: delta.content };
478
+ }
479
+ if (delta?.tool_calls) {
480
+ for (const toolCall of delta.tool_calls) {
481
+ if (toolCall.id) {
482
+ if (currentToolCall) {
483
+ yield {
484
+ type: "action:args",
485
+ id: currentToolCall.id,
486
+ args: currentToolCall.arguments
487
+ };
488
+ }
489
+ currentToolCall = {
490
+ id: toolCall.id,
491
+ name: toolCall.function?.name || "",
492
+ arguments: toolCall.function?.arguments || ""
493
+ };
494
+ yield {
495
+ type: "action:start",
496
+ id: currentToolCall.id,
497
+ name: currentToolCall.name
498
+ };
499
+ } else if (currentToolCall && toolCall.function?.arguments) {
500
+ currentToolCall.arguments += toolCall.function.arguments;
501
+ }
502
+ }
503
+ }
504
+ if (chunk.choices[0]?.finish_reason) {
505
+ if (currentToolCall) {
506
+ yield {
507
+ type: "action:args",
508
+ id: currentToolCall.id,
509
+ args: currentToolCall.arguments
510
+ };
511
+ }
512
+ }
513
+ }
514
+ yield { type: "message:end" };
515
+ yield { type: "done" };
516
+ } catch (error) {
517
+ yield {
518
+ type: "error",
519
+ message: error instanceof Error ? error.message : "Unknown error",
520
+ code: "OPENAI_ERROR"
521
+ };
522
+ }
523
+ }
524
+ };
525
+ function createOpenAIAdapter(config) {
526
+ return new OpenAIAdapter(config);
527
+ }
528
+
529
+ // src/providers/openai/index.ts
530
+ var OPENAI_MODELS2 = {
531
+ // GPT-4o series
532
+ "gpt-4o": {
533
+ vision: true,
534
+ tools: true,
535
+ audio: true,
536
+ jsonMode: true,
537
+ maxTokens: 128e3
538
+ },
539
+ "gpt-4o-mini": {
540
+ vision: true,
541
+ tools: true,
542
+ audio: false,
543
+ jsonMode: true,
544
+ maxTokens: 128e3
545
+ },
546
+ "gpt-4o-2024-11-20": {
547
+ vision: true,
548
+ tools: true,
549
+ audio: true,
550
+ jsonMode: true,
551
+ maxTokens: 128e3
552
+ },
553
+ "gpt-4o-2024-08-06": {
554
+ vision: true,
555
+ tools: true,
556
+ audio: false,
557
+ jsonMode: true,
558
+ maxTokens: 128e3
559
+ },
560
+ // GPT-4 Turbo series
561
+ "gpt-4-turbo": {
562
+ vision: true,
563
+ tools: true,
564
+ audio: false,
565
+ jsonMode: true,
566
+ maxTokens: 128e3
567
+ },
568
+ "gpt-4-turbo-preview": {
569
+ vision: false,
570
+ tools: true,
571
+ audio: false,
572
+ jsonMode: true,
573
+ maxTokens: 128e3
574
+ },
575
+ // GPT-4 series
576
+ "gpt-4": {
577
+ vision: false,
578
+ tools: true,
579
+ audio: false,
580
+ jsonMode: false,
581
+ maxTokens: 8192
582
+ },
583
+ "gpt-4-32k": {
584
+ vision: false,
585
+ tools: true,
586
+ audio: false,
587
+ jsonMode: false,
588
+ maxTokens: 32768
589
+ },
590
+ // GPT-3.5 series
591
+ "gpt-3.5-turbo": {
592
+ vision: false,
593
+ tools: true,
594
+ audio: false,
595
+ jsonMode: true,
596
+ maxTokens: 16385
597
+ },
598
+ "gpt-3.5-turbo-16k": {
599
+ vision: false,
600
+ tools: true,
601
+ audio: false,
602
+ jsonMode: true,
603
+ maxTokens: 16385
604
+ },
605
+ // O1 reasoning series
606
+ o1: {
607
+ vision: true,
608
+ tools: false,
609
+ // O1 doesn't support tools yet
610
+ audio: false,
611
+ jsonMode: false,
612
+ maxTokens: 128e3
613
+ },
614
+ "o1-mini": {
615
+ vision: true,
616
+ tools: false,
617
+ audio: false,
618
+ jsonMode: false,
619
+ maxTokens: 128e3
620
+ },
621
+ "o1-preview": {
622
+ vision: true,
623
+ tools: false,
624
+ audio: false,
625
+ jsonMode: false,
626
+ maxTokens: 128e3
627
+ },
628
+ // O3 reasoning series
629
+ "o3-mini": {
630
+ vision: true,
631
+ tools: false,
632
+ audio: false,
633
+ jsonMode: false,
634
+ maxTokens: 128e3
635
+ }
636
+ };
637
+ function createOpenAI(config = {}) {
638
+ const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY ?? "";
639
+ return {
640
+ name: "openai",
641
+ supportedModels: Object.keys(OPENAI_MODELS2),
642
+ languageModel(modelId) {
643
+ return createOpenAIAdapter({
644
+ apiKey,
645
+ model: modelId,
646
+ baseUrl: config.baseUrl
647
+ });
648
+ },
649
+ getCapabilities(modelId) {
650
+ const model = OPENAI_MODELS2[modelId] ?? OPENAI_MODELS2["gpt-4o"];
651
+ return {
652
+ supportsVision: model.vision,
653
+ supportsTools: model.tools,
654
+ supportsThinking: false,
655
+ // OpenAI doesn't have extended thinking
656
+ supportsStreaming: true,
657
+ supportsPDF: false,
658
+ // OpenAI doesn't support PDFs directly
659
+ supportsAudio: model.audio,
660
+ supportsVideo: false,
661
+ maxTokens: model.maxTokens,
662
+ supportedImageTypes: model.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : [],
663
+ supportedAudioTypes: model.audio ? ["audio/mp3", "audio/wav", "audio/webm"] : [],
664
+ supportsJsonMode: model.jsonMode,
665
+ supportsSystemMessages: true
666
+ };
667
+ }
668
+ };
669
+ }
670
+ var createOpenAIProvider = createOpenAI;
671
+
672
+ export { createOpenAI, openai as createOpenAIModel, createOpenAIProvider, openai };
673
+ //# sourceMappingURL=index.mjs.map
674
+ //# sourceMappingURL=index.mjs.map