@standardagents/openai 0.10.0-dev.ffffff

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,1218 @@
1
+ // src/OpenAIProvider.ts
2
+ import { ProviderError, defineTool } from "@standardagents/spec";
3
+ import { z } from "zod";
4
+
5
+ // src/transformers.ts
6
+ import { mapReasoningLevel } from "@standardagents/spec";
7
+ var DEFAULT_REASONING_LEVELS = {
8
+ 0: null,
9
+ 33: "low",
10
+ 66: "medium",
11
+ 100: "high"
12
+ };
13
+ var OPENAI_NATIVE_TOOLS = /* @__PURE__ */ new Set([
14
+ "image_generation",
15
+ "web_search",
16
+ "code_interpreter",
17
+ "file_search"
18
+ ]);
19
+ function transformContentPart(part) {
20
+ if (part.type === "text") {
21
+ return { type: "input_text", text: part.text };
22
+ }
23
+ if (part.type === "image") {
24
+ const data = part.data || "";
25
+ const imageUrl = data.startsWith("data:") ? data : `data:${part.mediaType || "image/png"};base64,${data}`;
26
+ return {
27
+ type: "input_image",
28
+ image_url: imageUrl,
29
+ detail: part.detail || "auto"
30
+ };
31
+ }
32
+ if (part.type === "image_url") {
33
+ const url = part.image_url?.url || "";
34
+ const detail = part.image_url?.detail || "auto";
35
+ return {
36
+ type: "input_image",
37
+ image_url: url,
38
+ detail
39
+ };
40
+ }
41
+ const fileData = part.data || "";
42
+ return {
43
+ type: "input_file",
44
+ filename: part.filename,
45
+ file_data: fileData.startsWith("data:") ? fileData : `data:${part.mediaType || "application/octet-stream"};base64,${fileData}`
46
+ };
47
+ }
48
+ function transformMessageContent(content) {
49
+ if (typeof content === "string") {
50
+ return content;
51
+ }
52
+ return content.map(transformContentPart);
53
+ }
54
+ function transformUserMessage(msg) {
55
+ const content = transformMessageContent(msg.content);
56
+ return {
57
+ role: "user",
58
+ content: typeof content === "string" ? content : content
59
+ };
60
+ }
61
+ function transformAssistantMessage(msg) {
62
+ const items = [];
63
+ const hasToolCalls = msg.toolCalls && msg.toolCalls.length > 0;
64
+ if (msg.content || hasToolCalls) {
65
+ items.push({
66
+ type: "message",
67
+ role: "assistant",
68
+ content: msg.content ? [{ type: "output_text", text: msg.content }] : []
69
+ // Empty content array when only tool calls
70
+ });
71
+ }
72
+ if (msg.reasoning || msg.reasoningDetails) {
73
+ const reasoningItemsById = /* @__PURE__ */ new Map();
74
+ const defaultId = `rs_${crypto.randomUUID().replace(/-/g, "").slice(0, 24)}`;
75
+ const getOrCreateItem = (id) => {
76
+ let item = reasoningItemsById.get(id);
77
+ if (!item) {
78
+ item = {
79
+ id,
80
+ type: "reasoning",
81
+ summary: []
82
+ };
83
+ reasoningItemsById.set(id, item);
84
+ }
85
+ return item;
86
+ };
87
+ if (msg.reasoningDetails) {
88
+ for (const detail of msg.reasoningDetails) {
89
+ const itemId = detail.id || defaultId;
90
+ const item = getOrCreateItem(itemId);
91
+ if (detail.type === "encrypted" && detail.data) {
92
+ item.encrypted_content = detail.data;
93
+ } else if (detail.type === "summary" && detail.text) {
94
+ item.summary.push({ type: "summary_text", text: detail.text });
95
+ } else if (detail.type === "text" && detail.text) {
96
+ item.summary.push({ type: "summary_text", text: detail.text });
97
+ }
98
+ }
99
+ }
100
+ if (msg.reasoning && typeof msg.reasoning === "string") {
101
+ if (reasoningItemsById.size > 0) {
102
+ const firstItem = reasoningItemsById.values().next().value;
103
+ if (firstItem) {
104
+ firstItem.summary.unshift({ type: "summary_text", text: msg.reasoning });
105
+ }
106
+ } else {
107
+ const newItem = getOrCreateItem(defaultId);
108
+ newItem.summary = [{ type: "summary_text", text: msg.reasoning }];
109
+ }
110
+ }
111
+ for (const item of reasoningItemsById.values()) {
112
+ items.push(item);
113
+ }
114
+ }
115
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
116
+ for (const tc of msg.toolCalls) {
117
+ if (OPENAI_NATIVE_TOOLS.has(tc.name) && tc.name !== "image_generation") {
118
+ continue;
119
+ }
120
+ items.push({
121
+ type: "function_call",
122
+ call_id: tc.id,
123
+ name: tc.name,
124
+ arguments: JSON.stringify(tc.arguments)
125
+ });
126
+ }
127
+ }
128
+ return items;
129
+ }
130
+ function transformToolMessage(msg) {
131
+ let output;
132
+ if (typeof msg.content === "string") {
133
+ output = msg.content;
134
+ } else if ("type" in msg.content) {
135
+ if (msg.content.type === "text") {
136
+ output = msg.content.text;
137
+ } else if (msg.content.type === "error") {
138
+ output = `Error: ${msg.content.error}`;
139
+ } else {
140
+ output = JSON.stringify(msg.content);
141
+ }
142
+ } else {
143
+ output = JSON.stringify(msg.content);
144
+ }
145
+ if (msg.attachments?.length) {
146
+ const imageAttachments = msg.attachments.filter((a) => a.type === "image" && a.data);
147
+ if (imageAttachments.length > 0) {
148
+ const outputContent = [];
149
+ if (output) {
150
+ outputContent.push({
151
+ type: "input_text",
152
+ text: output
153
+ });
154
+ }
155
+ for (const attachment of imageAttachments) {
156
+ const attachmentData = attachment.data || "";
157
+ const imageData = attachmentData.startsWith("data:") ? attachmentData : `data:${attachment.mediaType || "image/png"};base64,${attachmentData}`;
158
+ outputContent.push({
159
+ type: "input_image",
160
+ image_url: imageData,
161
+ detail: "auto"
162
+ });
163
+ }
164
+ return [{
165
+ type: "function_call_output",
166
+ call_id: msg.toolCallId,
167
+ output: outputContent
168
+ }];
169
+ }
170
+ }
171
+ return [{
172
+ type: "function_call_output",
173
+ call_id: msg.toolCallId,
174
+ output
175
+ }];
176
+ }
177
+ function transformMessages(messages) {
178
+ let instructions;
179
+ const input = [];
180
+ for (const msg of messages) {
181
+ switch (msg.role) {
182
+ case "system":
183
+ instructions = instructions ? `${instructions}
184
+
185
+ ${msg.content}` : msg.content;
186
+ break;
187
+ case "user":
188
+ input.push(transformUserMessage(msg));
189
+ break;
190
+ case "assistant":
191
+ input.push(...transformAssistantMessage(msg));
192
+ break;
193
+ case "tool":
194
+ input.push(...transformToolMessage(msg));
195
+ break;
196
+ }
197
+ }
198
+ return { input, instructions };
199
+ }
200
+ function transformTool(tool) {
201
+ const inputParams = tool.function.parameters;
202
+ let parameters;
203
+ if (inputParams && typeof inputParams === "object") {
204
+ parameters = {
205
+ ...inputParams,
206
+ additionalProperties: false
207
+ };
208
+ } else {
209
+ parameters = {
210
+ type: "object",
211
+ properties: {},
212
+ required: [],
213
+ additionalProperties: false
214
+ };
215
+ }
216
+ return {
217
+ type: "function",
218
+ name: tool.function.name,
219
+ description: tool.function.description || void 0,
220
+ parameters,
221
+ strict: true
222
+ };
223
+ }
224
+ function transformTools(tools) {
225
+ const functionTools = [];
226
+ const nativeTools = [];
227
+ for (const tool of tools) {
228
+ const toolName = tool.function.name;
229
+ console.log(`[transformTools] Tool "${toolName}": executionMode=${tool.executionMode}, isNativeTool=${OPENAI_NATIVE_TOOLS.has(toolName)}`);
230
+ if (tool.executionMode === "provider" && OPENAI_NATIVE_TOOLS.has(toolName)) {
231
+ console.log(`[transformTools] Adding "${toolName}" as native tool`);
232
+ nativeTools.push({ type: toolName });
233
+ } else {
234
+ console.log(`[transformTools] Adding "${toolName}" as function tool`);
235
+ functionTools.push(transformTool(tool));
236
+ }
237
+ }
238
+ console.log(`[transformTools] Result: ${functionTools.length} function tools, ${nativeTools.length} native tools`);
239
+ return { functionTools, nativeTools };
240
+ }
241
+ function transformToolChoice(choice) {
242
+ if (choice === "auto") {
243
+ return "auto";
244
+ }
245
+ if (choice === "none") {
246
+ return "none";
247
+ }
248
+ if (choice === "required") {
249
+ return "required";
250
+ }
251
+ if (typeof choice === "object" && "name" in choice) {
252
+ return { type: "function", name: choice.name };
253
+ }
254
+ return "auto";
255
+ }
256
+ function mapFinishReason(response) {
257
+ if (response.status === "failed") {
258
+ return "error";
259
+ }
260
+ if (response.status === "incomplete") {
261
+ if (response.incomplete_details?.reason === "max_output_tokens") {
262
+ return "length";
263
+ }
264
+ if (response.incomplete_details?.reason === "content_filter") {
265
+ return "content_filter";
266
+ }
267
+ }
268
+ const hasToolCalls = response.output.some(
269
+ (item) => item.type === "function_call"
270
+ );
271
+ if (hasToolCalls) {
272
+ return "tool_calls";
273
+ }
274
+ return "stop";
275
+ }
276
+ function extractTextContent(output) {
277
+ const textParts = [];
278
+ for (const item of output) {
279
+ if (item.type === "message" && item.role === "assistant") {
280
+ for (const content of item.content) {
281
+ if (content.type === "output_text") {
282
+ textParts.push(content.text);
283
+ }
284
+ }
285
+ }
286
+ }
287
+ return textParts.length > 0 ? textParts.join("") : null;
288
+ }
289
+ function extractReasoningContent(output) {
290
+ let reasoning = null;
291
+ const reasoningDetails = [];
292
+ for (const item of output) {
293
+ if (item.type === "reasoning") {
294
+ const reasoningId = item.id;
295
+ if (item.summary && item.summary.length > 0) {
296
+ const summaryText = item.summary.map((s) => s.text).join("\n");
297
+ reasoning = reasoning ? `${reasoning}
298
+ ${summaryText}` : summaryText;
299
+ reasoningDetails.push({ type: "summary", id: reasoningId, text: summaryText });
300
+ }
301
+ if (item.encrypted_content) {
302
+ reasoningDetails.push({ type: "encrypted", id: reasoningId, data: item.encrypted_content });
303
+ }
304
+ }
305
+ }
306
+ return {
307
+ reasoning,
308
+ reasoningDetails: reasoningDetails.length > 0 ? reasoningDetails : void 0
309
+ };
310
+ }
311
+ function extractToolCalls(output) {
312
+ const toolCalls = [];
313
+ for (const item of output) {
314
+ if (item.type === "function_call") {
315
+ let parsedArgs = {};
316
+ try {
317
+ parsedArgs = item.arguments ? JSON.parse(item.arguments) : {};
318
+ } catch {
319
+ }
320
+ toolCalls.push({
321
+ id: item.call_id,
322
+ name: item.name,
323
+ arguments: parsedArgs
324
+ });
325
+ }
326
+ }
327
+ return toolCalls.length > 0 ? toolCalls : void 0;
328
+ }
329
+ function extractProviderImages(output) {
330
+ const images = [];
331
+ for (const item of output) {
332
+ if (item.type === "image_generation_call" && item.result) {
333
+ images.push({
334
+ id: item.id,
335
+ toolName: "image_generation",
336
+ data: item.result,
337
+ mediaType: "image/png"
338
+ // Note: OpenAI SDK ImageGenerationCall doesn't expose revised_prompt
339
+ });
340
+ }
341
+ }
342
+ return images.length > 0 ? images : void 0;
343
+ }
344
+ function transformUsage(usage) {
345
+ if (!usage) {
346
+ return {
347
+ promptTokens: 0,
348
+ completionTokens: 0,
349
+ totalTokens: 0
350
+ };
351
+ }
352
+ return {
353
+ promptTokens: usage.input_tokens || 0,
354
+ completionTokens: usage.output_tokens || 0,
355
+ totalTokens: usage.total_tokens || 0,
356
+ reasoningTokens: usage.output_tokens_details?.reasoning_tokens,
357
+ cachedTokens: usage.input_tokens_details?.cached_tokens
358
+ };
359
+ }
360
+ function transformResponse(response) {
361
+ const content = extractTextContent(response.output);
362
+ const { reasoning, reasoningDetails } = extractReasoningContent(response.output);
363
+ const toolCalls = extractToolCalls(response.output);
364
+ const images = extractProviderImages(response.output);
365
+ return {
366
+ content,
367
+ reasoning,
368
+ reasoningDetails,
369
+ toolCalls,
370
+ images,
371
+ finishReason: mapFinishReason(response),
372
+ usage: transformUsage(response.usage),
373
+ metadata: {
374
+ model: response.model,
375
+ provider: "openai",
376
+ requestId: response.id
377
+ }
378
+ };
379
+ }
380
+ function buildCreateParams(request, reasoningLevels = DEFAULT_REASONING_LEVELS) {
381
+ const { input, instructions } = transformMessages(request.messages);
382
+ const params = {
383
+ model: request.model,
384
+ input,
385
+ store: false
386
+ // Always stateless
387
+ };
388
+ if (instructions) {
389
+ params.instructions = instructions;
390
+ }
391
+ if (request.tools && request.tools.length > 0) {
392
+ const { functionTools, nativeTools } = transformTools(request.tools);
393
+ const usedNativeTools = /* @__PURE__ */ new Set();
394
+ for (const msg of request.messages) {
395
+ if (msg.role === "tool" && msg.toolName && OPENAI_NATIVE_TOOLS.has(msg.toolName)) {
396
+ usedNativeTools.add(msg.toolName);
397
+ }
398
+ }
399
+ const availableNativeTools = nativeTools.filter((tool) => {
400
+ const toolType = tool.type;
401
+ if (usedNativeTools.has(toolType)) {
402
+ console.log(`[buildCreateParams] Excluding native tool "${toolType}" - already used in conversation`);
403
+ return false;
404
+ }
405
+ return true;
406
+ });
407
+ params.tools = [
408
+ ...functionTools,
409
+ ...availableNativeTools
410
+ ];
411
+ const hasWebSearch = availableNativeTools.some(
412
+ (tool) => tool.type === "web_search"
413
+ );
414
+ if (hasWebSearch) {
415
+ params.include = params.include || [];
416
+ if (!params.include.includes("web_search_call.results")) {
417
+ params.include.push("web_search_call.results");
418
+ }
419
+ }
420
+ if (request.toolChoice !== void 0) {
421
+ params.tool_choice = transformToolChoice(request.toolChoice);
422
+ }
423
+ if (request.parallelToolCalls !== void 0) {
424
+ params.parallel_tool_calls = request.parallelToolCalls;
425
+ }
426
+ }
427
+ if (request.maxOutputTokens !== void 0) {
428
+ params.max_output_tokens = request.maxOutputTokens;
429
+ }
430
+ if (request.temperature !== void 0) {
431
+ params.temperature = request.temperature;
432
+ }
433
+ if (request.topP !== void 0) {
434
+ params.top_p = request.topP;
435
+ }
436
+ if (request.reasoning?.level !== void 0) {
437
+ const effort = mapReasoningLevel(request.reasoning.level, reasoningLevels);
438
+ if (effort) {
439
+ params.reasoning = {
440
+ effort,
441
+ summary: "auto"
442
+ };
443
+ params.include = params.include || [];
444
+ if (!params.include.includes("reasoning.encrypted_content")) {
445
+ params.include.push("reasoning.encrypted_content");
446
+ }
447
+ }
448
+ }
449
+ if (request.responseFormat) {
450
+ if (request.responseFormat.type === "json") {
451
+ if (request.responseFormat.schema) {
452
+ params.text = {
453
+ format: {
454
+ type: "json_schema",
455
+ name: "response",
456
+ schema: request.responseFormat.schema,
457
+ strict: true
458
+ }
459
+ };
460
+ } else {
461
+ params.text = {
462
+ format: { type: "json_object" }
463
+ };
464
+ }
465
+ }
466
+ }
467
+ if (request.providerOptions) {
468
+ const { include: _providerInclude, ...otherOptions } = request.providerOptions;
469
+ Object.assign(params, otherOptions);
470
+ }
471
+ return params;
472
+ }
473
+ function createStreamState() {
474
+ return {
475
+ toolCalls: /* @__PURE__ */ new Map(),
476
+ imageGenerations: /* @__PURE__ */ new Map(),
477
+ webSearches: /* @__PURE__ */ new Map(),
478
+ reasoningContent: "",
479
+ hasContent: false,
480
+ hasReasoning: false,
481
+ currentItemId: null,
482
+ imageIndex: 0
483
+ };
484
+ }
485
+ function processStreamEvent(event, state) {
486
+ const chunks = [];
487
+ switch (event.type) {
488
+ // Text content streaming
489
+ case "response.output_text.delta":
490
+ state.hasContent = true;
491
+ chunks.push({ type: "content-delta", delta: event.delta });
492
+ break;
493
+ case "response.output_text.done":
494
+ break;
495
+ // Reasoning streaming
496
+ case "response.reasoning.delta":
497
+ state.hasReasoning = true;
498
+ if (typeof event.delta === "string") {
499
+ state.reasoningContent += event.delta;
500
+ chunks.push({ type: "reasoning-delta", delta: event.delta });
501
+ }
502
+ break;
503
+ case "response.reasoning.done":
504
+ break;
505
+ // Function call and image generation streaming
506
+ case "response.output_item.added":
507
+ if (event.item.type === "function_call") {
508
+ state.toolCalls.set(event.item.call_id, {
509
+ id: event.item.call_id,
510
+ name: event.item.name,
511
+ arguments: ""
512
+ });
513
+ chunks.push({
514
+ type: "tool-call-start",
515
+ id: event.item.call_id,
516
+ name: event.item.name
517
+ });
518
+ } else if (event.item.type === "image_generation_call") {
519
+ console.log(`[processStreamEvent] image_generation_call added: id=${event.item.id}, status=${event.item.status}`);
520
+ state.imageGenerations.set(event.item.id, {
521
+ id: event.item.id,
522
+ status: event.item.status
523
+ });
524
+ } else if (event.item.type === "web_search_call") {
525
+ console.log(`[processStreamEvent] web_search_call added: id=${event.item.id}, status=${event.item.status}`);
526
+ state.webSearches.set(event.item.id, {
527
+ id: event.item.id,
528
+ status: event.item.status
529
+ });
530
+ }
531
+ break;
532
+ case "response.output_item.done":
533
+ if (event.item.type === "image_generation_call") {
534
+ console.log(`[processStreamEvent] image_generation_call done: id=${event.item.id}, status=${event.item.status}, hasResult=${!!event.item.result}, resultLength=${event.item.result?.length || 0}`);
535
+ if (event.item.result) {
536
+ const imageIndex = state.imageIndex;
537
+ state.imageIndex++;
538
+ console.log(`[processStreamEvent] Emitting image-done chunk #${imageIndex}, id=${event.item.id}`);
539
+ chunks.push({
540
+ type: "image-done",
541
+ index: imageIndex,
542
+ image: {
543
+ id: event.item.id,
544
+ toolName: "image_generation",
545
+ data: event.item.result,
546
+ mediaType: "image/png"
547
+ // Note: OpenAI SDK ImageGenerationCall doesn't expose revised_prompt
548
+ }
549
+ });
550
+ }
551
+ state.imageGenerations.delete(event.item.id);
552
+ } else if (event.item.type === "web_search_call") {
553
+ console.log(`[processStreamEvent] web_search_call done: id=${event.item.id}, status=${event.item.status}`);
554
+ const webSearchItem = event.item;
555
+ const actions = [];
556
+ if (webSearchItem.action) {
557
+ actions.push({
558
+ type: webSearchItem.action.type,
559
+ query: webSearchItem.action.query,
560
+ url: webSearchItem.action.url,
561
+ pattern: webSearchItem.action.pattern,
562
+ sources: webSearchItem.action.sources
563
+ });
564
+ }
565
+ chunks.push({
566
+ type: "web-search-done",
567
+ result: {
568
+ id: event.item.id,
569
+ status: event.item.status,
570
+ actions: actions.length > 0 ? actions : void 0
571
+ }
572
+ });
573
+ state.webSearches.delete(event.item.id);
574
+ }
575
+ break;
576
+ case "response.function_call_arguments.delta": {
577
+ const deltaToolCall = Array.from(state.toolCalls.values()).find(
578
+ (tc) => tc.id === event.item_id
579
+ );
580
+ if (deltaToolCall) {
581
+ deltaToolCall.arguments += event.delta;
582
+ chunks.push({
583
+ type: "tool-call-delta",
584
+ id: deltaToolCall.id,
585
+ argumentsDelta: event.delta
586
+ });
587
+ }
588
+ break;
589
+ }
590
+ case "response.function_call_arguments.done": {
591
+ const doneToolCall = Array.from(state.toolCalls.values()).find(
592
+ (tc) => tc.id === event.item_id
593
+ );
594
+ if (doneToolCall) {
595
+ let parsedArgs = {};
596
+ try {
597
+ parsedArgs = doneToolCall.arguments ? JSON.parse(doneToolCall.arguments) : {};
598
+ } catch {
599
+ }
600
+ chunks.push({
601
+ type: "tool-call-done",
602
+ id: doneToolCall.id,
603
+ arguments: parsedArgs
604
+ });
605
+ }
606
+ break;
607
+ }
608
+ // Response completion
609
+ case "response.completed": {
610
+ if (state.hasContent) {
611
+ chunks.push({ type: "content-done" });
612
+ }
613
+ if (state.hasReasoning) {
614
+ chunks.push({ type: "reasoning-done" });
615
+ }
616
+ const { reasoningDetails } = extractReasoningContent(event.response.output);
617
+ chunks.push({
618
+ type: "finish",
619
+ finishReason: mapFinishReason(event.response),
620
+ usage: transformUsage(event.response.usage),
621
+ reasoningDetails
622
+ });
623
+ break;
624
+ }
625
+ case "response.failed":
626
+ chunks.push({
627
+ type: "error",
628
+ error: event.response.error?.message || "Response generation failed",
629
+ code: event.response.error?.code
630
+ });
631
+ break;
632
+ case "response.incomplete": {
633
+ if (state.hasContent) {
634
+ chunks.push({ type: "content-done" });
635
+ }
636
+ if (state.hasReasoning) {
637
+ chunks.push({ type: "reasoning-done" });
638
+ }
639
+ const { reasoningDetails: incompleteReasoningDetails } = extractReasoningContent(event.response.output);
640
+ chunks.push({
641
+ type: "finish",
642
+ finishReason: mapFinishReason(event.response),
643
+ usage: transformUsage(event.response.usage),
644
+ reasoningDetails: incompleteReasoningDetails
645
+ });
646
+ break;
647
+ }
648
+ // Ignore other events
649
+ default:
650
+ break;
651
+ }
652
+ return chunks;
653
+ }
654
+ function createErrorChunk(error, code) {
655
+ return { type: "error", error, code };
656
+ }
657
+ function isBase64Like(str) {
658
+ if (str.startsWith("data:")) return true;
659
+ if (str.length > 200) {
660
+ const base64Pattern = /^[A-Za-z0-9+/]+=*$/;
661
+ return base64Pattern.test(str.substring(0, 200));
662
+ }
663
+ return false;
664
+ }
665
+ function truncateBase64String(str, maxLength = 50) {
666
+ if (str.length <= maxLength) return str;
667
+ const preview = str.substring(0, maxLength);
668
+ return `${preview}...[truncated, ${str.length.toLocaleString()} chars]`;
669
+ }
670
+ function truncateBase64(obj, maxLength = 50) {
671
+ if (obj === null || obj === void 0) {
672
+ return obj;
673
+ }
674
+ if (typeof obj === "string") {
675
+ if (isBase64Like(obj)) {
676
+ return truncateBase64String(obj, maxLength);
677
+ }
678
+ return obj;
679
+ }
680
+ if (Array.isArray(obj)) {
681
+ return obj.map((item) => truncateBase64(item, maxLength));
682
+ }
683
+ if (typeof obj === "object") {
684
+ const result = {};
685
+ for (const [key, value] of Object.entries(obj)) {
686
+ result[key] = truncateBase64(value, maxLength);
687
+ }
688
+ return result;
689
+ }
690
+ return obj;
691
+ }
692
+
693
+ // src/icons.ts
694
+ var OPENAI_ICON = `<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="none">
695
+ <rect width="48" height="48" rx="24" fill="white"/>
696
+ <path d="M19.3418 18.5599V14.7599C19.3418 14.4399 19.4608 14.1998 19.7382 14.04L27.3102 9.63997C28.3409 9.03999 29.5699 8.76014 30.8383 8.76014C35.5954 8.76014 38.6085 12.4802 38.6085 16.4401C38.6085 16.72 38.6085 17.04 38.5687 17.3601L30.7194 12.72C30.2437 12.4401 29.7678 12.4401 29.2922 12.72L19.3418 18.5599ZM37.0226 33.36V24.2799C37.0226 23.7197 36.7846 23.3197 36.309 23.0398L26.3586 17.1998L29.6093 15.3197C29.8868 15.1599 30.1247 15.1599 30.4022 15.3197L37.9741 19.7197C40.1547 20.9999 41.6213 23.7197 41.6213 26.3596C41.6213 29.3995 39.8375 32.1999 37.0226 33.36ZM17.0029 25.3601L13.7522 23.4402C13.4748 23.2804 13.3557 23.0402 13.3557 22.7202V13.9203C13.3557 9.64039 16.6065 6.40016 21.0069 6.40016C22.6722 6.40016 24.2179 6.96029 25.5265 7.96025L17.7168 12.5204C17.2412 12.8003 17.0033 13.2002 17.0033 13.7605L17.0029 25.3601ZM24 29.44L19.3418 26.8001V21.2003L24 18.5604L28.6578 21.2003V26.8001L24 29.44ZM26.993 41.6002C25.3278 41.6002 23.7821 41.04 22.4735 40.0402L30.2831 35.4799C30.7588 35.2001 30.9967 34.8001 30.9967 34.2399V22.6399L34.2873 24.5598C34.5646 24.7196 34.6837 24.9597 34.6837 25.2798V34.0797C34.6837 38.3596 31.3931 41.6002 26.993 41.6002ZM17.5975 32.6802L10.0255 28.2803C7.84493 27.0001 6.37833 24.2803 6.37833 21.6404C6.37833 18.5604 8.20193 15.8004 11.0164 14.6403V23.7602C11.0164 24.3204 11.2544 24.7204 11.73 25.0003L21.641 30.8001L18.3902 32.6802C18.1129 32.84 17.8749 32.84 17.5975 32.6802ZM17.1617 39.2402C12.682 39.2402 9.39151 35.8402 9.39151 31.6401C9.39151 31.3201 9.43125 31.0001 9.47066 30.68L17.2803 35.2402C17.7559 35.5201 18.2319 35.5201 18.7074 35.2402L28.6578 29.4404V33.2404C28.6578 33.5605 28.5388 33.8005 28.2614 33.9604L20.6894 38.3604C19.6586 38.9603 18.4301 39.2402 17.1617 39.2402ZM26.993 44C31.7899 44 35.7936 40.5601 36.7057 36C41.1457 34.8399 44 30.6399 44 26.36C44 23.5598 42.8108 20.8401 40.6701 18.88C40.8683 18.0399 40.9872 17.1998 40.9872 16.3602C40.9872 10.6403 36.3885 6.35998 31.0763 6.35998C30.0062 6.35998 28.9754 6.51979 27.9446 6.88001C26.1604 5.11992 23.7025 4 21.0069 4C16.2101 4 12.2064 7.4398 11.2943 12C6.8543 13.1601 4 17.3601 4 21.6399C4 24.4401 5.18916 27.1599 7.32995 29.1199C7.13174 29.96 7.01277 30.8001 7.01277 31.6398C7.01277 37.3597 11.6114 41.6399 16.9236 41.6399C17.9938 41.6399 19.0246 41.4801 20.0554 41.1199C21.8392 42.88 24.2971 44 26.993 44Z" fill="black"/>
697
+ </svg>`;
698
+ function svgToDataUri(svg) {
699
+ const encoded = encodeURIComponent(svg).replace(/'/g, "%27").replace(/"/g, "%22");
700
+ return `data:image/svg+xml,${encoded}`;
701
+ }
702
+ function getOpenAIIconDataUri() {
703
+ return svgToDataUri(OPENAI_ICON);
704
+ }
705
+
706
+ // src/OpenAIProvider.ts
707
+ var OpenAIProvider = class _OpenAIProvider {
708
+ name = "openai";
709
+ specificationVersion = "1";
710
+ client = null;
711
+ config;
712
+ /** Cache for models list to avoid repeated API calls */
713
+ static modelsCache = null;
714
+ static modelsCacheTime = 0;
715
+ static CACHE_TTL = 5 * 60 * 1e3;
716
+ // 5 minutes
717
+ constructor(config) {
718
+ this.config = config;
719
+ }
720
+ async getClient() {
721
+ if (!this.client) {
722
+ const { default: OpenAI } = await import("openai");
723
+ this.client = new OpenAI({
724
+ apiKey: this.config.apiKey,
725
+ baseURL: this.config.baseUrl,
726
+ timeout: this.config.timeout
727
+ });
728
+ }
729
+ return this.client;
730
+ }
731
+ supportsModel(modelId) {
732
+ return modelId.startsWith("gpt-") || modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4") || modelId.startsWith("dall-e") || modelId.startsWith("chatgpt-");
733
+ }
734
+ /**
735
+ * Get the icon for this provider as a data URI.
736
+ * Always returns the OpenAI icon since all models are from OpenAI.
737
+ */
738
+ getIcon(_modelId) {
739
+ return getOpenAIIconDataUri();
740
+ }
741
+ // ============================================================================
742
+ // Model Capabilities
743
+ // ============================================================================
744
+ /**
745
+ * Hardcoded capability mappings for OpenAI models.
746
+ * OpenAI doesn't provide a capabilities API, so these are manually maintained.
747
+ */
748
+ static MODEL_CAPABILITIES = {
749
+ "gpt-4o": {
750
+ supportsImages: true,
751
+ supportsToolCalls: true,
752
+ supportsStreaming: true,
753
+ supportsJsonMode: true,
754
+ maxContextTokens: 128e3,
755
+ maxOutputTokens: 16384
756
+ },
757
+ "gpt-4o-mini": {
758
+ supportsImages: true,
759
+ supportsToolCalls: true,
760
+ supportsStreaming: true,
761
+ supportsJsonMode: true,
762
+ maxContextTokens: 128e3,
763
+ maxOutputTokens: 16384
764
+ },
765
+ "gpt-4-turbo": {
766
+ supportsImages: true,
767
+ supportsToolCalls: true,
768
+ supportsStreaming: true,
769
+ supportsJsonMode: true,
770
+ maxContextTokens: 128e3,
771
+ maxOutputTokens: 4096
772
+ },
773
+ "gpt-4": {
774
+ supportsImages: false,
775
+ supportsToolCalls: true,
776
+ supportsStreaming: true,
777
+ supportsJsonMode: true,
778
+ maxContextTokens: 8192,
779
+ maxOutputTokens: 4096
780
+ },
781
+ "gpt-3.5-turbo": {
782
+ supportsImages: false,
783
+ supportsToolCalls: true,
784
+ supportsStreaming: true,
785
+ supportsJsonMode: true,
786
+ maxContextTokens: 16385,
787
+ maxOutputTokens: 4096
788
+ },
789
+ "o1": {
790
+ supportsImages: true,
791
+ supportsToolCalls: true,
792
+ supportsStreaming: true,
793
+ supportsJsonMode: true,
794
+ maxContextTokens: 2e5,
795
+ maxOutputTokens: 1e5,
796
+ reasoningLevels: { 0: null, 33: "low", 66: "medium", 100: "high" }
797
+ },
798
+ "o1-preview": {
799
+ supportsImages: true,
800
+ supportsToolCalls: false,
801
+ supportsStreaming: true,
802
+ supportsJsonMode: false,
803
+ maxContextTokens: 128e3,
804
+ maxOutputTokens: 32768,
805
+ reasoningLevels: { 0: null, 33: "low", 66: "medium", 100: "high" }
806
+ },
807
+ "o1-mini": {
808
+ supportsImages: false,
809
+ supportsToolCalls: false,
810
+ supportsStreaming: true,
811
+ supportsJsonMode: false,
812
+ maxContextTokens: 128e3,
813
+ maxOutputTokens: 65536,
814
+ reasoningLevels: { 0: null, 33: "low", 66: "medium", 100: "high" }
815
+ },
816
+ "o3-mini": {
817
+ supportsImages: false,
818
+ supportsToolCalls: true,
819
+ supportsStreaming: true,
820
+ supportsJsonMode: true,
821
+ maxContextTokens: 2e5,
822
+ maxOutputTokens: 1e5,
823
+ reasoningLevels: { 0: null, 33: "low", 66: "medium", 100: "high" }
824
+ },
825
+ "o4-mini": {
826
+ supportsImages: true,
827
+ supportsToolCalls: true,
828
+ supportsStreaming: true,
829
+ supportsJsonMode: true,
830
+ maxContextTokens: 2e5,
831
+ maxOutputTokens: 1e5,
832
+ reasoningLevels: { 0: null, 33: "low", 66: "medium", 100: "high" }
833
+ }
834
+ };
835
+ /**
836
+ * Get capabilities for a specific model.
837
+ * Uses hardcoded mappings since OpenAI doesn't provide a capabilities API.
838
+ */
839
+ async getModelCapabilities(modelId) {
840
+ if (_OpenAIProvider.MODEL_CAPABILITIES[modelId]) {
841
+ return { ..._OpenAIProvider.MODEL_CAPABILITIES[modelId] };
842
+ }
843
+ for (const [prefix, caps] of Object.entries(_OpenAIProvider.MODEL_CAPABILITIES)) {
844
+ if (modelId.startsWith(prefix)) {
845
+ return { ...caps };
846
+ }
847
+ }
848
+ return {
849
+ supportsImages: false,
850
+ supportsToolCalls: true,
851
+ supportsStreaming: true,
852
+ supportsJsonMode: true,
853
+ maxContextTokens: 8192,
854
+ maxOutputTokens: 4096
855
+ };
856
+ }
857
+ /**
858
+ * Human-readable names and descriptions for models.
859
+ * Used to enrich API response data.
860
+ */
861
+ static MODEL_METADATA = {
862
+ "gpt-4o": { name: "GPT-4o", description: "Most capable GPT-4 model with vision" },
863
+ "gpt-4o-mini": { name: "GPT-4o Mini", description: "Fast and affordable GPT-4o variant" },
864
+ "gpt-4-turbo": { name: "GPT-4 Turbo", description: "GPT-4 Turbo with vision" },
865
+ "gpt-4": { name: "GPT-4", description: "Original GPT-4 model" },
866
+ "gpt-3.5-turbo": { name: "GPT-3.5 Turbo", description: "Fast and cost-effective" },
867
+ "o1": { name: "o1", description: "Advanced reasoning model" },
868
+ "o1-preview": { name: "o1 Preview", description: "Preview reasoning model" },
869
+ "o1-mini": { name: "o1 Mini", description: "Smaller reasoning model" },
870
+ "o3-mini": { name: "o3 Mini", description: "Latest compact reasoning model" },
871
+ "o4-mini": { name: "o4 Mini", description: "Next-gen compact reasoning model" },
872
+ "chatgpt-4o-latest": { name: "ChatGPT-4o Latest", description: "Latest ChatGPT model" }
873
+ };
874
+ /**
875
+ * Prefixes for chat-capable models (filter out embeddings, tts, whisper, dall-e, etc.)
876
+ */
877
+ static CHAT_MODEL_PREFIXES = ["gpt-", "o1", "o3", "o4", "chatgpt-"];
878
+ // ============================================================================
879
+ // Provider-Embedded Tools
880
+ // ============================================================================
881
+ /**
882
+ * Provider-embedded tools using defineTool().
883
+ * These are OpenAI's built-in tools that execute on OpenAI's servers.
884
+ * The execute function is a no-op since execution is handled by the provider.
885
+ */
886
+ static TOOLS = {
887
+ web_search: defineTool({
888
+ description: "Search the web for up-to-date information with citations",
889
+ args: z.object({
890
+ query: z.string().describe("Search query"),
891
+ searchContextSize: z.enum(["low", "medium", "high"]).default("medium").describe("Amount of context to gather")
892
+ }),
893
+ execute: async (_state, _args) => {
894
+ return { status: "success", result: "Handled by OpenAI" };
895
+ },
896
+ tenvs: z.object({
897
+ userLocation: z.string().optional().describe("User location for relevant results")
898
+ }),
899
+ executionMode: "provider",
900
+ executionProvider: "openai"
901
+ }),
902
+ file_search: defineTool({
903
+ description: "Search through uploaded files using vector embeddings",
904
+ args: z.object({
905
+ query: z.string().describe("Search query")
906
+ }),
907
+ execute: async (_state, _args) => {
908
+ return { status: "success", result: "Handled by OpenAI" };
909
+ },
910
+ tenvs: z.object({
911
+ vectorStoreId: z.string().describe("OpenAI Vector Store ID")
912
+ }),
913
+ executionMode: "provider",
914
+ executionProvider: "openai"
915
+ }),
916
+ code_interpreter: defineTool({
917
+ description: "Execute Python code in a sandboxed environment",
918
+ args: z.object({
919
+ code: z.string().describe("Python code to execute")
920
+ }),
921
+ execute: async (_state, _args) => {
922
+ return { status: "success", result: "Handled by OpenAI" };
923
+ },
924
+ tenvs: z.object({
925
+ containerId: z.string().optional().describe("Code interpreter container ID")
926
+ }),
927
+ executionMode: "provider",
928
+ executionProvider: "openai"
929
+ }),
930
+ image_generation: defineTool({
931
+ description: "Generate images using GPT-image-1",
932
+ args: z.object({
933
+ prompt: z.string().describe("Image generation prompt"),
934
+ quality: z.enum(["standard", "hd"]).default("standard").describe("Image quality"),
935
+ size: z.enum(["1024x1024", "1792x1024", "1024x1792"]).default("1024x1024").describe("Image size")
936
+ }),
937
+ execute: async (_state, _args) => {
938
+ return { status: "success", result: "Handled by OpenAI" };
939
+ },
940
+ executionMode: "provider",
941
+ executionProvider: "openai"
942
+ })
943
+ };
944
+ /**
945
+ * Which tools are available for each model.
946
+ */
947
+ static MODEL_TOOLS = {
948
+ "gpt-4o": ["web_search", "file_search", "code_interpreter", "image_generation"],
949
+ "gpt-4o-mini": ["web_search", "file_search", "code_interpreter"],
950
+ "o1": ["web_search", "code_interpreter"],
951
+ "o3-mini": ["web_search", "code_interpreter"],
952
+ "o4-mini": ["web_search", "file_search", "code_interpreter", "image_generation"]
953
+ };
954
+ /**
955
+ * Get tools embedded in this provider.
956
+ * These are OpenAI's built-in tools with tenv requirements.
957
+ *
958
+ * @param modelId - Optional filter to get tools available for a specific model
959
+ * @returns Record of tool name to tool definition
960
+ */
961
+ getTools(modelId) {
962
+ if (!modelId) {
963
+ return { ..._OpenAIProvider.TOOLS };
964
+ }
965
+ let toolNames = _OpenAIProvider.MODEL_TOOLS[modelId];
966
+ if (!toolNames) {
967
+ for (const [prefix, tools] of Object.entries(_OpenAIProvider.MODEL_TOOLS)) {
968
+ if (modelId.startsWith(prefix)) {
969
+ toolNames = tools;
970
+ break;
971
+ }
972
+ }
973
+ }
974
+ if (!toolNames) {
975
+ return { ..._OpenAIProvider.TOOLS };
976
+ }
977
+ const result = {};
978
+ for (const name of toolNames) {
979
+ if (_OpenAIProvider.TOOLS[name]) {
980
+ result[name] = _OpenAIProvider.TOOLS[name];
981
+ }
982
+ }
983
+ return result;
984
+ }
985
+ /**
986
+ * Fetch models from OpenAI API with caching.
987
+ */
988
+ async fetchModelsWithCache() {
989
+ const now = Date.now();
990
+ if (_OpenAIProvider.modelsCache && now - _OpenAIProvider.modelsCacheTime < _OpenAIProvider.CACHE_TTL) {
991
+ return _OpenAIProvider.modelsCache;
992
+ }
993
+ const client = await this.getClient();
994
+ const response = await client.models.list();
995
+ const models = [];
996
+ for await (const model of response) {
997
+ models.push(model);
998
+ }
999
+ _OpenAIProvider.modelsCache = models;
1000
+ _OpenAIProvider.modelsCacheTime = now;
1001
+ return models;
1002
+ }
1003
+ /**
1004
+ * Check if a model ID is a chat-capable model.
1005
+ */
1006
+ isChatModel(modelId) {
1007
+ return _OpenAIProvider.CHAT_MODEL_PREFIXES.some((prefix) => modelId.startsWith(prefix));
1008
+ }
1009
+ /**
1010
+ * Get human-readable name for a model.
1011
+ */
1012
+ getModelName(modelId) {
1013
+ if (_OpenAIProvider.MODEL_METADATA[modelId]) {
1014
+ return _OpenAIProvider.MODEL_METADATA[modelId].name;
1015
+ }
1016
+ for (const [prefix, meta] of Object.entries(_OpenAIProvider.MODEL_METADATA)) {
1017
+ if (modelId.startsWith(prefix + "-")) {
1018
+ return `${meta.name} (${modelId.slice(prefix.length + 1)})`;
1019
+ }
1020
+ }
1021
+ return modelId.split("-").map((part) => part.charAt(0).toUpperCase() + part.slice(1)).join(" ");
1022
+ }
1023
+ /**
1024
+ * Get description for a model.
1025
+ */
1026
+ getModelDescription(modelId) {
1027
+ if (_OpenAIProvider.MODEL_METADATA[modelId]) {
1028
+ return _OpenAIProvider.MODEL_METADATA[modelId].description;
1029
+ }
1030
+ for (const [prefix, meta] of Object.entries(_OpenAIProvider.MODEL_METADATA)) {
1031
+ if (modelId.startsWith(prefix + "-")) {
1032
+ return meta.description;
1033
+ }
1034
+ }
1035
+ return "";
1036
+ }
1037
+ /**
1038
+ * Map OpenAI model info to ProviderModelInfo.
1039
+ */
1040
+ mapToProviderModelInfo(model) {
1041
+ const caps = _OpenAIProvider.MODEL_CAPABILITIES[model.id];
1042
+ let contextLength = caps?.maxContextTokens;
1043
+ if (!contextLength) {
1044
+ for (const [prefix, prefixCaps] of Object.entries(_OpenAIProvider.MODEL_CAPABILITIES)) {
1045
+ if (model.id.startsWith(prefix)) {
1046
+ contextLength = prefixCaps.maxContextTokens;
1047
+ break;
1048
+ }
1049
+ }
1050
+ }
1051
+ return {
1052
+ id: model.id,
1053
+ name: this.getModelName(model.id),
1054
+ description: this.getModelDescription(model.id),
1055
+ contextLength,
1056
+ iconId: this.getIcon(model.id)
1057
+ };
1058
+ }
1059
+ /**
1060
+ * Get list of available models from OpenAI.
1061
+ * Fetches from the OpenAI API with caching.
1062
+ *
1063
+ * @param filter - Optional search string to filter models by name/id
1064
+ */
1065
+ async getModels(filter) {
1066
+ try {
1067
+ const rawModels = await this.fetchModelsWithCache();
1068
+ let models = rawModels.filter((m) => this.isChatModel(m.id)).map((m) => this.mapToProviderModelInfo(m)).sort((a, b) => {
1069
+ return a.name.localeCompare(b.name);
1070
+ });
1071
+ if (filter) {
1072
+ const lowerFilter = filter.toLowerCase();
1073
+ models = models.filter(
1074
+ (m) => m.id.toLowerCase().includes(lowerFilter) || m.name.toLowerCase().includes(lowerFilter) || m.description && m.description.toLowerCase().includes(lowerFilter)
1075
+ );
1076
+ }
1077
+ return models;
1078
+ } catch (error) {
1079
+ console.error("Failed to fetch models from OpenAI:", error);
1080
+ return [];
1081
+ }
1082
+ }
1083
+ // ============================================================================
1084
+ // Generation Methods
1085
+ // ============================================================================
1086
+ async generate(request) {
1087
+ const client = await this.getClient();
1088
+ try {
1089
+ const params = buildCreateParams(request, DEFAULT_REASONING_LEVELS);
1090
+ const response = await client.responses.create(
1091
+ { ...params, stream: false },
1092
+ { signal: request.signal }
1093
+ );
1094
+ return transformResponse(response);
1095
+ } catch (error) {
1096
+ throw this.toProviderError(error);
1097
+ }
1098
+ }
1099
+ async stream(request) {
1100
+ const client = await this.getClient();
1101
+ const self = this;
1102
+ try {
1103
+ const params = buildCreateParams(request, DEFAULT_REASONING_LEVELS);
1104
+ console.log("[OpenAI] Sending request with", Array.isArray(params.input) ? params.input.length : 1, "input items:");
1105
+ if (Array.isArray(params.input)) {
1106
+ for (const item of params.input) {
1107
+ const itemType = item.type || item.role || "unknown";
1108
+ const hasImage = JSON.stringify(item).includes("input_image");
1109
+ console.log(` - ${itemType}${hasImage ? " (has image)" : ""}`);
1110
+ }
1111
+ }
1112
+ const stream = await client.responses.create(
1113
+ { ...params, stream: true },
1114
+ { signal: request.signal }
1115
+ );
1116
+ return {
1117
+ async *[Symbol.asyncIterator]() {
1118
+ const state = createStreamState();
1119
+ try {
1120
+ for await (const event of stream) {
1121
+ const chunks = processStreamEvent(event, state);
1122
+ for (const chunk of chunks) {
1123
+ yield chunk;
1124
+ }
1125
+ }
1126
+ } catch (error) {
1127
+ const providerError = self.toProviderError(error);
1128
+ yield createErrorChunk(providerError.message, providerError.code);
1129
+ }
1130
+ }
1131
+ };
1132
+ } catch (error) {
1133
+ throw this.toProviderError(error);
1134
+ }
1135
+ }
1136
+ // ============================================================================
1137
+ // Error Handling
1138
+ // ============================================================================
1139
+ toProviderError(error) {
1140
+ if (error instanceof ProviderError) {
1141
+ return error;
1142
+ }
1143
+ if (error instanceof Error) {
1144
+ const anyError = error;
1145
+ const status = anyError.status || anyError.statusCode;
1146
+ const retryAfter = anyError.headers?.["retry-after"] ? parseInt(anyError.headers["retry-after"], 10) : void 0;
1147
+ if (status === 429) {
1148
+ return new ProviderError(error.message, "rate_limit", status, retryAfter);
1149
+ }
1150
+ if (status === 401 || status === 403) {
1151
+ return new ProviderError(error.message, "auth_error", status);
1152
+ }
1153
+ if (status === 400) {
1154
+ return new ProviderError(error.message, "invalid_request", status);
1155
+ }
1156
+ if (status >= 500) {
1157
+ return new ProviderError(error.message, "server_error", status);
1158
+ }
1159
+ if (error.name === "AbortError" || anyError.code === "ETIMEDOUT") {
1160
+ return new ProviderError(error.message, "timeout");
1161
+ }
1162
+ return new ProviderError(error.message, "unknown", status);
1163
+ }
1164
+ return new ProviderError(String(error), "unknown");
1165
+ }
1166
+ // ============================================================================
1167
+ // Inspection
1168
+ // ============================================================================
1169
+ /**
1170
+ * Transform a ProviderRequest to OpenAI Responses API format for inspection.
1171
+ * Returns the exact request body that would be sent to OpenAI, with base64 data truncated.
1172
+ */
1173
+ async inspectRequest(request) {
1174
+ const params = buildCreateParams(request, DEFAULT_REASONING_LEVELS);
1175
+ return {
1176
+ body: truncateBase64(params),
1177
+ messagesPath: "input",
1178
+ metadata: {
1179
+ endpoint: "responses.create"
1180
+ }
1181
+ };
1182
+ }
1183
+ };
1184
+
1185
+ // src/providerOptions.ts
1186
+ import { z as z2 } from "zod";
1187
+ var openaiProviderOptions = z2.object({
1188
+ /** Service tier for request: 'auto', 'default', or 'flex' */
1189
+ service_tier: z2.enum(["auto", "default", "flex"]).optional(),
1190
+ /** User identifier for abuse monitoring */
1191
+ user: z2.string().optional(),
1192
+ /** Seed for deterministic outputs (beta feature) */
1193
+ seed: z2.number().int().optional(),
1194
+ /** Frequency penalty (-2.0 to 2.0) - reduces repetition of tokens */
1195
+ frequency_penalty: z2.number().min(-2).max(2).optional(),
1196
+ /** Presence penalty (-2.0 to 2.0) - encourages new topics */
1197
+ presence_penalty: z2.number().min(-2).max(2).optional(),
1198
+ /** Whether to return log probabilities of output tokens */
1199
+ logprobs: z2.boolean().optional(),
1200
+ /** Number of most likely tokens to return at each position (0-20) */
1201
+ top_logprobs: z2.number().int().min(0).max(20).optional(),
1202
+ /** Whether to store the completion for future reference */
1203
+ store: z2.boolean().optional(),
1204
+ /** Metadata for stored completions */
1205
+ metadata: z2.record(z2.string(), z2.string()).optional()
1206
+ }).passthrough();
1207
+
1208
+ // src/index.ts
1209
+ var openai = Object.assign(
1210
+ (config) => new OpenAIProvider(config),
1211
+ { providerOptions: openaiProviderOptions }
1212
+ );
1213
+ export {
1214
+ OpenAIProvider,
1215
+ openai,
1216
+ openaiProviderOptions
1217
+ };
1218
+ //# sourceMappingURL=index.js.map