@zenning/openai 1.6.0 → 2.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3755 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/internal/index.ts
21
+ var internal_exports = {};
22
+ __export(internal_exports, {
23
+ OpenAIChatLanguageModel: () => OpenAIChatLanguageModel,
24
+ OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
+ OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
+ OpenAIImageModel: () => OpenAIImageModel,
27
+ OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
+ OpenAISpeechModel: () => OpenAISpeechModel,
29
+ OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
+ openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
33
+ openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
34
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
35
+ openaiProviderOptions: () => openaiProviderOptions
36
+ });
37
+ module.exports = __toCommonJS(internal_exports);
38
+
39
+ // src/chat/openai-chat-language-model.ts
40
+ var import_provider3 = require("@ai-sdk/provider");
41
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
42
+ var import_v45 = require("zod/v4");
43
+
44
+ // src/openai-error.ts
45
+ var import_v4 = require("zod/v4");
46
+ var import_provider_utils = require("@ai-sdk/provider-utils");
47
+ var openaiErrorDataSchema = import_v4.z.object({
48
+ error: import_v4.z.object({
49
+ message: import_v4.z.string(),
50
+ // The additional information below is handled loosely to support
51
+ // OpenAI-compatible providers that have slightly different error
52
+ // responses:
53
+ type: import_v4.z.string().nullish(),
54
+ param: import_v4.z.any().nullish(),
55
+ code: import_v4.z.union([import_v4.z.string(), import_v4.z.number()]).nullish()
56
+ })
57
+ });
58
+ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
59
+ errorSchema: openaiErrorDataSchema,
60
+ errorToMessage: (data) => data.error.message
61
+ });
62
+
63
+ // src/chat/convert-to-openai-chat-messages.ts
64
+ var import_provider = require("@ai-sdk/provider");
65
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
66
+ function convertToOpenAIChatMessages({
67
+ prompt,
68
+ systemMessageMode = "system"
69
+ }) {
70
+ const messages = [];
71
+ const warnings = [];
72
+ for (const { role, content } of prompt) {
73
+ switch (role) {
74
+ case "system": {
75
+ switch (systemMessageMode) {
76
+ case "system": {
77
+ messages.push({ role: "system", content });
78
+ break;
79
+ }
80
+ case "developer": {
81
+ messages.push({ role: "developer", content });
82
+ break;
83
+ }
84
+ case "remove": {
85
+ warnings.push({
86
+ type: "other",
87
+ message: "system messages are removed for this model"
88
+ });
89
+ break;
90
+ }
91
+ default: {
92
+ const _exhaustiveCheck = systemMessageMode;
93
+ throw new Error(
94
+ `Unsupported system message mode: ${_exhaustiveCheck}`
95
+ );
96
+ }
97
+ }
98
+ break;
99
+ }
100
+ case "user": {
101
+ if (content.length === 1 && content[0].type === "text") {
102
+ messages.push({ role: "user", content: content[0].text });
103
+ break;
104
+ }
105
+ messages.push({
106
+ role: "user",
107
+ content: content.map((part, index) => {
108
+ var _a, _b, _c;
109
+ switch (part.type) {
110
+ case "text": {
111
+ return { type: "text", text: part.text };
112
+ }
113
+ case "file": {
114
+ if (part.mediaType.startsWith("image/")) {
115
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
116
+ return {
117
+ type: "image_url",
118
+ image_url: {
119
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`,
120
+ // OpenAI specific extension: image detail
121
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
122
+ }
123
+ };
124
+ } else if (part.mediaType.startsWith("audio/")) {
125
+ if (part.data instanceof URL) {
126
+ throw new import_provider.UnsupportedFunctionalityError({
127
+ functionality: "audio file parts with URLs"
128
+ });
129
+ }
130
+ switch (part.mediaType) {
131
+ case "audio/wav": {
132
+ return {
133
+ type: "input_audio",
134
+ input_audio: {
135
+ data: (0, import_provider_utils2.convertToBase64)(part.data),
136
+ format: "wav"
137
+ }
138
+ };
139
+ }
140
+ case "audio/mp3":
141
+ case "audio/mpeg": {
142
+ return {
143
+ type: "input_audio",
144
+ input_audio: {
145
+ data: (0, import_provider_utils2.convertToBase64)(part.data),
146
+ format: "mp3"
147
+ }
148
+ };
149
+ }
150
+ default: {
151
+ throw new import_provider.UnsupportedFunctionalityError({
152
+ functionality: `audio content parts with media type ${part.mediaType}`
153
+ });
154
+ }
155
+ }
156
+ } else if (part.mediaType === "application/pdf") {
157
+ if (part.data instanceof URL) {
158
+ throw new import_provider.UnsupportedFunctionalityError({
159
+ functionality: "PDF file parts with URLs"
160
+ });
161
+ }
162
+ return {
163
+ type: "file",
164
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
165
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
166
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils2.convertToBase64)(part.data)}`
167
+ }
168
+ };
169
+ } else {
170
+ throw new import_provider.UnsupportedFunctionalityError({
171
+ functionality: `file part media type ${part.mediaType}`
172
+ });
173
+ }
174
+ }
175
+ }
176
+ })
177
+ });
178
+ break;
179
+ }
180
+ case "assistant": {
181
+ let text = "";
182
+ const toolCalls = [];
183
+ for (const part of content) {
184
+ switch (part.type) {
185
+ case "text": {
186
+ text += part.text;
187
+ break;
188
+ }
189
+ case "tool-call": {
190
+ toolCalls.push({
191
+ id: part.toolCallId,
192
+ type: "function",
193
+ function: {
194
+ name: part.toolName,
195
+ arguments: JSON.stringify(part.input)
196
+ }
197
+ });
198
+ break;
199
+ }
200
+ }
201
+ }
202
+ messages.push({
203
+ role: "assistant",
204
+ content: text,
205
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
206
+ });
207
+ break;
208
+ }
209
+ case "tool": {
210
+ for (const toolResponse of content) {
211
+ const output = toolResponse.output;
212
+ let contentValue;
213
+ switch (output.type) {
214
+ case "text":
215
+ case "error-text":
216
+ contentValue = output.value;
217
+ break;
218
+ case "content":
219
+ case "json":
220
+ case "error-json":
221
+ contentValue = JSON.stringify(output.value);
222
+ break;
223
+ }
224
+ messages.push({
225
+ role: "tool",
226
+ tool_call_id: toolResponse.toolCallId,
227
+ content: contentValue
228
+ });
229
+ }
230
+ break;
231
+ }
232
+ default: {
233
+ const _exhaustiveCheck = role;
234
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
235
+ }
236
+ }
237
+ }
238
+ return { messages, warnings };
239
+ }
240
+
241
+ // src/chat/get-response-metadata.ts
242
+ function getResponseMetadata({
243
+ id,
244
+ model,
245
+ created
246
+ }) {
247
+ return {
248
+ id: id != null ? id : void 0,
249
+ modelId: model != null ? model : void 0,
250
+ timestamp: created != null ? new Date(created * 1e3) : void 0
251
+ };
252
+ }
253
+
254
+ // src/chat/map-openai-finish-reason.ts
255
+ function mapOpenAIFinishReason(finishReason) {
256
+ switch (finishReason) {
257
+ case "stop":
258
+ return "stop";
259
+ case "length":
260
+ return "length";
261
+ case "content_filter":
262
+ return "content-filter";
263
+ case "function_call":
264
+ case "tool_calls":
265
+ return "tool-calls";
266
+ default:
267
+ return "unknown";
268
+ }
269
+ }
270
+
271
+ // src/chat/openai-chat-options.ts
272
+ var import_v42 = require("zod/v4");
273
+ var openaiProviderOptions = import_v42.z.object({
274
+ /**
275
+ * Modify the likelihood of specified tokens appearing in the completion.
276
+ *
277
+ * Accepts a JSON object that maps tokens (specified by their token ID in
278
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
279
+ */
280
+ logitBias: import_v42.z.record(import_v42.z.coerce.number(), import_v42.z.number()).optional(),
281
+ /**
282
+ * Return the log probabilities of the tokens.
283
+ *
284
+ * Setting to true will return the log probabilities of the tokens that
285
+ * were generated.
286
+ *
287
+ * Setting to a number will return the log probabilities of the top n
288
+ * tokens that were generated.
289
+ */
290
+ logprobs: import_v42.z.union([import_v42.z.boolean(), import_v42.z.number()]).optional(),
291
+ /**
292
+ * Whether to enable parallel function calling during tool use. Default to true.
293
+ */
294
+ parallelToolCalls: import_v42.z.boolean().optional(),
295
+ /**
296
+ * A unique identifier representing your end-user, which can help OpenAI to
297
+ * monitor and detect abuse.
298
+ */
299
+ user: import_v42.z.string().optional(),
300
+ /**
301
+ * Reasoning effort for reasoning models. Defaults to `medium`.
302
+ */
303
+ reasoningEffort: import_v42.z.enum(["minimal", "low", "medium", "high"]).optional(),
304
+ /**
305
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
306
+ */
307
+ maxCompletionTokens: import_v42.z.number().optional(),
308
+ /**
309
+ * Whether to enable persistence in responses API.
310
+ */
311
+ store: import_v42.z.boolean().optional(),
312
+ /**
313
+ * Metadata to associate with the request.
314
+ */
315
+ metadata: import_v42.z.record(import_v42.z.string().max(64), import_v42.z.string().max(512)).optional(),
316
+ /**
317
+ * Parameters for prediction mode.
318
+ */
319
+ prediction: import_v42.z.record(import_v42.z.string(), import_v42.z.any()).optional(),
320
+ /**
321
+ * Whether to use structured outputs.
322
+ *
323
+ * @default true
324
+ */
325
+ structuredOutputs: import_v42.z.boolean().optional(),
326
+ /**
327
+ * Service tier for the request.
328
+ * - 'auto': Default service tier
329
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
330
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
331
+ *
332
+ * @default 'auto'
333
+ */
334
+ serviceTier: import_v42.z.enum(["auto", "flex", "priority"]).optional(),
335
+ /**
336
+ * Whether to use strict JSON schema validation.
337
+ *
338
+ * @default false
339
+ */
340
+ strictJsonSchema: import_v42.z.boolean().optional(),
341
+ /**
342
+ * Controls the verbosity of the model's responses.
343
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
344
+ */
345
+ textVerbosity: import_v42.z.enum(["low", "medium", "high"]).optional(),
346
+ /**
347
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
348
+ * Useful for improving cache hit rates and working around automatic caching issues.
349
+ */
350
+ promptCacheKey: import_v42.z.string().optional(),
351
+ /**
352
+ * A stable identifier used to help detect users of your application
353
+ * that may be violating OpenAI's usage policies. The IDs should be a
354
+ * string that uniquely identifies each user. We recommend hashing their
355
+ * username or email address, in order to avoid sending us any identifying
356
+ * information.
357
+ */
358
+ safetyIdentifier: import_v42.z.string().optional()
359
+ });
360
+
361
+ // src/chat/openai-chat-prepare-tools.ts
362
+ var import_provider2 = require("@ai-sdk/provider");
363
+
364
+ // src/tool/file-search.ts
365
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
366
+ var import_v43 = require("zod/v4");
367
+ var comparisonFilterSchema = import_v43.z.object({
368
+ key: import_v43.z.string(),
369
+ type: import_v43.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
370
+ value: import_v43.z.union([import_v43.z.string(), import_v43.z.number(), import_v43.z.boolean()])
371
+ });
372
+ var compoundFilterSchema = import_v43.z.object({
373
+ type: import_v43.z.enum(["and", "or"]),
374
+ filters: import_v43.z.array(
375
+ import_v43.z.union([comparisonFilterSchema, import_v43.z.lazy(() => compoundFilterSchema)])
376
+ )
377
+ });
378
+ var filtersSchema = import_v43.z.union([comparisonFilterSchema, compoundFilterSchema]);
379
+ var fileSearchArgsSchema = import_v43.z.object({
380
+ vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
381
+ maxNumResults: import_v43.z.number().optional(),
382
+ ranking: import_v43.z.object({
383
+ ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
384
+ }).optional(),
385
+ filters: filtersSchema.optional()
386
+ });
387
+ var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
388
+ id: "openai.file_search",
389
+ name: "file_search",
390
+ inputSchema: import_v43.z.object({
391
+ query: import_v43.z.string().optional()
392
+ })
393
+ });
394
+
395
+ // src/tool/web-search-preview.ts
396
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
397
+ var import_v44 = require("zod/v4");
398
+ var webSearchPreviewArgsSchema = import_v44.z.object({
399
+ /**
400
+ * Search context size to use for the web search.
401
+ * - high: Most comprehensive context, highest cost, slower response
402
+ * - medium: Balanced context, cost, and latency (default)
403
+ * - low: Least context, lowest cost, fastest response
404
+ */
405
+ searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
406
+ /**
407
+ * User location information to provide geographically relevant search results.
408
+ */
409
+ userLocation: import_v44.z.object({
410
+ /**
411
+ * Type of location (always 'approximate')
412
+ */
413
+ type: import_v44.z.literal("approximate"),
414
+ /**
415
+ * Two-letter ISO country code (e.g., 'US', 'GB')
416
+ */
417
+ country: import_v44.z.string().optional(),
418
+ /**
419
+ * City name (free text, e.g., 'Minneapolis')
420
+ */
421
+ city: import_v44.z.string().optional(),
422
+ /**
423
+ * Region name (free text, e.g., 'Minnesota')
424
+ */
425
+ region: import_v44.z.string().optional(),
426
+ /**
427
+ * IANA timezone (e.g., 'America/Chicago')
428
+ */
429
+ timezone: import_v44.z.string().optional()
430
+ }).optional()
431
+ });
432
+ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
433
+ id: "openai.web_search_preview",
434
+ name: "web_search_preview",
435
+ inputSchema: import_v44.z.object({
436
+ action: import_v44.z.discriminatedUnion("type", [
437
+ import_v44.z.object({
438
+ type: import_v44.z.literal("search"),
439
+ query: import_v44.z.string().nullish()
440
+ }),
441
+ import_v44.z.object({
442
+ type: import_v44.z.literal("open_page"),
443
+ url: import_v44.z.string()
444
+ }),
445
+ import_v44.z.object({
446
+ type: import_v44.z.literal("find"),
447
+ url: import_v44.z.string(),
448
+ pattern: import_v44.z.string()
449
+ })
450
+ ]).nullish()
451
+ })
452
+ });
453
+
454
+ // src/chat/openai-chat-prepare-tools.ts
455
+ function prepareChatTools({
456
+ tools,
457
+ toolChoice,
458
+ structuredOutputs,
459
+ strictJsonSchema
460
+ }) {
461
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
462
+ const toolWarnings = [];
463
+ if (tools == null) {
464
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
465
+ }
466
+ const openaiTools = [];
467
+ for (const tool of tools) {
468
+ switch (tool.type) {
469
+ case "function":
470
+ openaiTools.push({
471
+ type: "function",
472
+ function: {
473
+ name: tool.name,
474
+ description: tool.description,
475
+ parameters: tool.inputSchema,
476
+ strict: structuredOutputs ? strictJsonSchema : void 0
477
+ }
478
+ });
479
+ break;
480
+ case "provider-defined":
481
+ switch (tool.id) {
482
+ case "openai.file_search": {
483
+ const args = fileSearchArgsSchema.parse(tool.args);
484
+ openaiTools.push({
485
+ type: "file_search",
486
+ vector_store_ids: args.vectorStoreIds,
487
+ max_num_results: args.maxNumResults,
488
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
489
+ filters: args.filters
490
+ });
491
+ break;
492
+ }
493
+ case "openai.web_search_preview": {
494
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
495
+ openaiTools.push({
496
+ type: "web_search_preview",
497
+ search_context_size: args.searchContextSize,
498
+ user_location: args.userLocation
499
+ });
500
+ break;
501
+ }
502
+ default:
503
+ toolWarnings.push({ type: "unsupported-tool", tool });
504
+ break;
505
+ }
506
+ break;
507
+ default:
508
+ toolWarnings.push({ type: "unsupported-tool", tool });
509
+ break;
510
+ }
511
+ }
512
+ if (toolChoice == null) {
513
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
514
+ }
515
+ const type = toolChoice.type;
516
+ switch (type) {
517
+ case "auto":
518
+ case "none":
519
+ case "required":
520
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
521
+ case "tool":
522
+ return {
523
+ tools: openaiTools,
524
+ toolChoice: {
525
+ type: "function",
526
+ function: {
527
+ name: toolChoice.toolName
528
+ }
529
+ },
530
+ toolWarnings
531
+ };
532
+ default: {
533
+ const _exhaustiveCheck = type;
534
+ throw new import_provider2.UnsupportedFunctionalityError({
535
+ functionality: `tool choice type: ${_exhaustiveCheck}`
536
+ });
537
+ }
538
+ }
539
+ }
540
+
541
+ // src/chat/openai-chat-language-model.ts
542
+ var OpenAIChatLanguageModel = class {
543
+ constructor(modelId, config) {
544
+ this.specificationVersion = "v2";
545
+ this.supportedUrls = {
546
+ "image/*": [/^https?:\/\/.*$/]
547
+ };
548
+ this.modelId = modelId;
549
+ this.config = config;
550
+ }
551
+ get provider() {
552
+ return this.config.provider;
553
+ }
554
+ async getArgs({
555
+ prompt,
556
+ maxOutputTokens,
557
+ temperature,
558
+ topP,
559
+ topK,
560
+ frequencyPenalty,
561
+ presencePenalty,
562
+ stopSequences,
563
+ responseFormat,
564
+ seed,
565
+ tools,
566
+ toolChoice,
567
+ providerOptions
568
+ }) {
569
+ var _a, _b, _c, _d;
570
+ const warnings = [];
571
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
572
+ provider: "openai",
573
+ providerOptions,
574
+ schema: openaiProviderOptions
575
+ })) != null ? _a : {};
576
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
577
+ if (topK != null) {
578
+ warnings.push({
579
+ type: "unsupported-setting",
580
+ setting: "topK"
581
+ });
582
+ }
583
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
584
+ warnings.push({
585
+ type: "unsupported-setting",
586
+ setting: "responseFormat",
587
+ details: "JSON response format schema is only supported with structuredOutputs"
588
+ });
589
+ }
590
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
591
+ {
592
+ prompt,
593
+ systemMessageMode: getSystemMessageMode(this.modelId)
594
+ }
595
+ );
596
+ warnings.push(...messageWarnings);
597
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
598
+ const baseArgs = {
599
+ // model id:
600
+ model: this.modelId,
601
+ // model specific settings:
602
+ logit_bias: openaiOptions.logitBias,
603
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
604
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
605
+ user: openaiOptions.user,
606
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
607
+ // standardized settings:
608
+ max_tokens: maxOutputTokens,
609
+ temperature,
610
+ top_p: topP,
611
+ frequency_penalty: frequencyPenalty,
612
+ presence_penalty: presencePenalty,
613
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
614
+ type: "json_schema",
615
+ json_schema: {
616
+ schema: responseFormat.schema,
617
+ strict: strictJsonSchema,
618
+ name: (_d = responseFormat.name) != null ? _d : "response",
619
+ description: responseFormat.description
620
+ }
621
+ } : { type: "json_object" } : void 0,
622
+ stop: stopSequences,
623
+ seed,
624
+ verbosity: openaiOptions.textVerbosity,
625
+ // openai specific settings:
626
+ // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
627
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
628
+ store: openaiOptions.store,
629
+ metadata: openaiOptions.metadata,
630
+ prediction: openaiOptions.prediction,
631
+ reasoning_effort: openaiOptions.reasoningEffort,
632
+ service_tier: openaiOptions.serviceTier,
633
+ prompt_cache_key: openaiOptions.promptCacheKey,
634
+ safety_identifier: openaiOptions.safetyIdentifier,
635
+ // messages:
636
+ messages
637
+ };
638
+ if (isReasoningModel(this.modelId)) {
639
+ if (baseArgs.temperature != null) {
640
+ baseArgs.temperature = void 0;
641
+ warnings.push({
642
+ type: "unsupported-setting",
643
+ setting: "temperature",
644
+ details: "temperature is not supported for reasoning models"
645
+ });
646
+ }
647
+ if (baseArgs.top_p != null) {
648
+ baseArgs.top_p = void 0;
649
+ warnings.push({
650
+ type: "unsupported-setting",
651
+ setting: "topP",
652
+ details: "topP is not supported for reasoning models"
653
+ });
654
+ }
655
+ if (baseArgs.frequency_penalty != null) {
656
+ baseArgs.frequency_penalty = void 0;
657
+ warnings.push({
658
+ type: "unsupported-setting",
659
+ setting: "frequencyPenalty",
660
+ details: "frequencyPenalty is not supported for reasoning models"
661
+ });
662
+ }
663
+ if (baseArgs.presence_penalty != null) {
664
+ baseArgs.presence_penalty = void 0;
665
+ warnings.push({
666
+ type: "unsupported-setting",
667
+ setting: "presencePenalty",
668
+ details: "presencePenalty is not supported for reasoning models"
669
+ });
670
+ }
671
+ if (baseArgs.logit_bias != null) {
672
+ baseArgs.logit_bias = void 0;
673
+ warnings.push({
674
+ type: "other",
675
+ message: "logitBias is not supported for reasoning models"
676
+ });
677
+ }
678
+ if (baseArgs.logprobs != null) {
679
+ baseArgs.logprobs = void 0;
680
+ warnings.push({
681
+ type: "other",
682
+ message: "logprobs is not supported for reasoning models"
683
+ });
684
+ }
685
+ if (baseArgs.top_logprobs != null) {
686
+ baseArgs.top_logprobs = void 0;
687
+ warnings.push({
688
+ type: "other",
689
+ message: "topLogprobs is not supported for reasoning models"
690
+ });
691
+ }
692
+ if (baseArgs.max_tokens != null) {
693
+ if (baseArgs.max_completion_tokens == null) {
694
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
695
+ }
696
+ baseArgs.max_tokens = void 0;
697
+ }
698
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
699
+ if (baseArgs.temperature != null) {
700
+ baseArgs.temperature = void 0;
701
+ warnings.push({
702
+ type: "unsupported-setting",
703
+ setting: "temperature",
704
+ details: "temperature is not supported for the search preview models and has been removed."
705
+ });
706
+ }
707
+ }
708
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
709
+ warnings.push({
710
+ type: "unsupported-setting",
711
+ setting: "serviceTier",
712
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
713
+ });
714
+ baseArgs.service_tier = void 0;
715
+ }
716
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
717
+ warnings.push({
718
+ type: "unsupported-setting",
719
+ setting: "serviceTier",
720
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
721
+ });
722
+ baseArgs.service_tier = void 0;
723
+ }
724
+ const {
725
+ tools: openaiTools,
726
+ toolChoice: openaiToolChoice,
727
+ toolWarnings
728
+ } = prepareChatTools({
729
+ tools,
730
+ toolChoice,
731
+ structuredOutputs,
732
+ strictJsonSchema
733
+ });
734
+ return {
735
+ args: {
736
+ ...baseArgs,
737
+ tools: openaiTools,
738
+ tool_choice: openaiToolChoice
739
+ },
740
+ warnings: [...warnings, ...toolWarnings]
741
+ };
742
+ }
743
+ async doGenerate(options) {
744
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
745
+ const { args: body, warnings } = await this.getArgs(options);
746
+ const {
747
+ responseHeaders,
748
+ value: response,
749
+ rawValue: rawResponse
750
+ } = await (0, import_provider_utils5.postJsonToApi)({
751
+ url: this.config.url({
752
+ path: "/chat/completions",
753
+ modelId: this.modelId
754
+ }),
755
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
756
+ body,
757
+ failedResponseHandler: openaiFailedResponseHandler,
758
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
759
+ openaiChatResponseSchema
760
+ ),
761
+ abortSignal: options.abortSignal,
762
+ fetch: this.config.fetch
763
+ });
764
+ const choice = response.choices[0];
765
+ const content = [];
766
+ const text = choice.message.content;
767
+ if (text != null && text.length > 0) {
768
+ content.push({ type: "text", text });
769
+ }
770
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
771
+ content.push({
772
+ type: "tool-call",
773
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
774
+ toolName: toolCall.function.name,
775
+ input: toolCall.function.arguments
776
+ });
777
+ }
778
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
779
+ content.push({
780
+ type: "source",
781
+ sourceType: "url",
782
+ id: (0, import_provider_utils5.generateId)(),
783
+ url: annotation.url,
784
+ title: annotation.title
785
+ });
786
+ }
787
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
788
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
789
+ const providerMetadata = { openai: {} };
790
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
791
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
792
+ }
793
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
794
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
795
+ }
796
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
797
+ providerMetadata.openai.logprobs = choice.logprobs.content;
798
+ }
799
+ return {
800
+ content,
801
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
802
+ usage: {
803
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
804
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
805
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
806
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
807
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
808
+ },
809
+ request: { body },
810
+ response: {
811
+ ...getResponseMetadata(response),
812
+ headers: responseHeaders,
813
+ body: rawResponse
814
+ },
815
+ warnings,
816
+ providerMetadata
817
+ };
818
+ }
819
+ async doStream(options) {
820
+ const { args, warnings } = await this.getArgs(options);
821
+ const body = {
822
+ ...args,
823
+ stream: true,
824
+ stream_options: {
825
+ include_usage: true
826
+ }
827
+ };
828
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
829
+ url: this.config.url({
830
+ path: "/chat/completions",
831
+ modelId: this.modelId
832
+ }),
833
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
834
+ body,
835
+ failedResponseHandler: openaiFailedResponseHandler,
836
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
837
+ openaiChatChunkSchema
838
+ ),
839
+ abortSignal: options.abortSignal,
840
+ fetch: this.config.fetch
841
+ });
842
+ const toolCalls = [];
843
+ let finishReason = "unknown";
844
+ const usage = {
845
+ inputTokens: void 0,
846
+ outputTokens: void 0,
847
+ totalTokens: void 0
848
+ };
849
+ let isFirstChunk = true;
850
+ let isActiveText = false;
851
+ const providerMetadata = { openai: {} };
852
+ return {
853
+ stream: response.pipeThrough(
854
+ new TransformStream({
855
+ start(controller) {
856
+ controller.enqueue({ type: "stream-start", warnings });
857
+ },
858
+ transform(chunk, controller) {
859
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
860
+ if (options.includeRawChunks) {
861
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
862
+ }
863
+ if (!chunk.success) {
864
+ finishReason = "error";
865
+ controller.enqueue({ type: "error", error: chunk.error });
866
+ return;
867
+ }
868
+ const value = chunk.value;
869
+ if ("error" in value) {
870
+ finishReason = "error";
871
+ controller.enqueue({ type: "error", error: value.error });
872
+ return;
873
+ }
874
+ if (isFirstChunk) {
875
+ isFirstChunk = false;
876
+ controller.enqueue({
877
+ type: "response-metadata",
878
+ ...getResponseMetadata(value)
879
+ });
880
+ }
881
+ if (value.usage != null) {
882
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
883
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
884
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
885
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
886
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
887
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
888
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
889
+ }
890
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
891
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
892
+ }
893
+ }
894
+ const choice = value.choices[0];
895
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
896
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
897
+ }
898
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
899
+ providerMetadata.openai.logprobs = choice.logprobs.content;
900
+ }
901
+ if ((choice == null ? void 0 : choice.delta) == null) {
902
+ return;
903
+ }
904
+ const delta = choice.delta;
905
+ if (delta.content != null) {
906
+ if (!isActiveText) {
907
+ controller.enqueue({ type: "text-start", id: "0" });
908
+ isActiveText = true;
909
+ }
910
+ controller.enqueue({
911
+ type: "text-delta",
912
+ id: "0",
913
+ delta: delta.content
914
+ });
915
+ }
916
+ if (delta.tool_calls != null) {
917
+ for (const toolCallDelta of delta.tool_calls) {
918
+ const index = toolCallDelta.index;
919
+ if (toolCalls[index] == null) {
920
+ if (toolCallDelta.type !== "function") {
921
+ throw new import_provider3.InvalidResponseDataError({
922
+ data: toolCallDelta,
923
+ message: `Expected 'function' type.`
924
+ });
925
+ }
926
+ if (toolCallDelta.id == null) {
927
+ throw new import_provider3.InvalidResponseDataError({
928
+ data: toolCallDelta,
929
+ message: `Expected 'id' to be a string.`
930
+ });
931
+ }
932
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
933
+ throw new import_provider3.InvalidResponseDataError({
934
+ data: toolCallDelta,
935
+ message: `Expected 'function.name' to be a string.`
936
+ });
937
+ }
938
+ controller.enqueue({
939
+ type: "tool-input-start",
940
+ id: toolCallDelta.id,
941
+ toolName: toolCallDelta.function.name
942
+ });
943
+ toolCalls[index] = {
944
+ id: toolCallDelta.id,
945
+ type: "function",
946
+ function: {
947
+ name: toolCallDelta.function.name,
948
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
949
+ },
950
+ hasFinished: false
951
+ };
952
+ const toolCall2 = toolCalls[index];
953
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
954
+ if (toolCall2.function.arguments.length > 0) {
955
+ controller.enqueue({
956
+ type: "tool-input-delta",
957
+ id: toolCall2.id,
958
+ delta: toolCall2.function.arguments
959
+ });
960
+ }
961
+ if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
962
+ controller.enqueue({
963
+ type: "tool-input-end",
964
+ id: toolCall2.id
965
+ });
966
+ controller.enqueue({
967
+ type: "tool-call",
968
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
969
+ toolName: toolCall2.function.name,
970
+ input: toolCall2.function.arguments
971
+ });
972
+ toolCall2.hasFinished = true;
973
+ }
974
+ }
975
+ continue;
976
+ }
977
+ const toolCall = toolCalls[index];
978
+ if (toolCall.hasFinished) {
979
+ continue;
980
+ }
981
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
982
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
983
+ }
984
+ controller.enqueue({
985
+ type: "tool-input-delta",
986
+ id: toolCall.id,
987
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
988
+ });
989
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
990
+ controller.enqueue({
991
+ type: "tool-input-end",
992
+ id: toolCall.id
993
+ });
994
+ controller.enqueue({
995
+ type: "tool-call",
996
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
997
+ toolName: toolCall.function.name,
998
+ input: toolCall.function.arguments
999
+ });
1000
+ toolCall.hasFinished = true;
1001
+ }
1002
+ }
1003
+ }
1004
+ if (delta.annotations != null) {
1005
+ for (const annotation of delta.annotations) {
1006
+ controller.enqueue({
1007
+ type: "source",
1008
+ sourceType: "url",
1009
+ id: (0, import_provider_utils5.generateId)(),
1010
+ url: annotation.url,
1011
+ title: annotation.title
1012
+ });
1013
+ }
1014
+ }
1015
+ },
1016
+ flush(controller) {
1017
+ if (isActiveText) {
1018
+ controller.enqueue({ type: "text-end", id: "0" });
1019
+ }
1020
+ controller.enqueue({
1021
+ type: "finish",
1022
+ finishReason,
1023
+ usage,
1024
+ ...providerMetadata != null ? { providerMetadata } : {}
1025
+ });
1026
+ }
1027
+ })
1028
+ ),
1029
+ request: { body },
1030
+ response: { headers: responseHeaders }
1031
+ };
1032
+ }
1033
+ };
1034
+ var openaiTokenUsageSchema = import_v45.z.object({
1035
+ prompt_tokens: import_v45.z.number().nullish(),
1036
+ completion_tokens: import_v45.z.number().nullish(),
1037
+ total_tokens: import_v45.z.number().nullish(),
1038
+ prompt_tokens_details: import_v45.z.object({
1039
+ cached_tokens: import_v45.z.number().nullish()
1040
+ }).nullish(),
1041
+ completion_tokens_details: import_v45.z.object({
1042
+ reasoning_tokens: import_v45.z.number().nullish(),
1043
+ accepted_prediction_tokens: import_v45.z.number().nullish(),
1044
+ rejected_prediction_tokens: import_v45.z.number().nullish()
1045
+ }).nullish()
1046
+ }).nullish();
1047
+ var openaiChatResponseSchema = import_v45.z.object({
1048
+ id: import_v45.z.string().nullish(),
1049
+ created: import_v45.z.number().nullish(),
1050
+ model: import_v45.z.string().nullish(),
1051
+ choices: import_v45.z.array(
1052
+ import_v45.z.object({
1053
+ message: import_v45.z.object({
1054
+ role: import_v45.z.literal("assistant").nullish(),
1055
+ content: import_v45.z.string().nullish(),
1056
+ tool_calls: import_v45.z.array(
1057
+ import_v45.z.object({
1058
+ id: import_v45.z.string().nullish(),
1059
+ type: import_v45.z.literal("function"),
1060
+ function: import_v45.z.object({
1061
+ name: import_v45.z.string(),
1062
+ arguments: import_v45.z.string()
1063
+ })
1064
+ })
1065
+ ).nullish(),
1066
+ annotations: import_v45.z.array(
1067
+ import_v45.z.object({
1068
+ type: import_v45.z.literal("url_citation"),
1069
+ start_index: import_v45.z.number(),
1070
+ end_index: import_v45.z.number(),
1071
+ url: import_v45.z.string(),
1072
+ title: import_v45.z.string()
1073
+ })
1074
+ ).nullish()
1075
+ }),
1076
+ index: import_v45.z.number(),
1077
+ logprobs: import_v45.z.object({
1078
+ content: import_v45.z.array(
1079
+ import_v45.z.object({
1080
+ token: import_v45.z.string(),
1081
+ logprob: import_v45.z.number(),
1082
+ top_logprobs: import_v45.z.array(
1083
+ import_v45.z.object({
1084
+ token: import_v45.z.string(),
1085
+ logprob: import_v45.z.number()
1086
+ })
1087
+ )
1088
+ })
1089
+ ).nullish()
1090
+ }).nullish(),
1091
+ finish_reason: import_v45.z.string().nullish()
1092
+ })
1093
+ ),
1094
+ usage: openaiTokenUsageSchema
1095
+ });
1096
+ var openaiChatChunkSchema = import_v45.z.union([
1097
+ import_v45.z.object({
1098
+ id: import_v45.z.string().nullish(),
1099
+ created: import_v45.z.number().nullish(),
1100
+ model: import_v45.z.string().nullish(),
1101
+ choices: import_v45.z.array(
1102
+ import_v45.z.object({
1103
+ delta: import_v45.z.object({
1104
+ role: import_v45.z.enum(["assistant"]).nullish(),
1105
+ content: import_v45.z.string().nullish(),
1106
+ tool_calls: import_v45.z.array(
1107
+ import_v45.z.object({
1108
+ index: import_v45.z.number(),
1109
+ id: import_v45.z.string().nullish(),
1110
+ type: import_v45.z.literal("function").nullish(),
1111
+ function: import_v45.z.object({
1112
+ name: import_v45.z.string().nullish(),
1113
+ arguments: import_v45.z.string().nullish()
1114
+ })
1115
+ })
1116
+ ).nullish(),
1117
+ annotations: import_v45.z.array(
1118
+ import_v45.z.object({
1119
+ type: import_v45.z.literal("url_citation"),
1120
+ start_index: import_v45.z.number(),
1121
+ end_index: import_v45.z.number(),
1122
+ url: import_v45.z.string(),
1123
+ title: import_v45.z.string()
1124
+ })
1125
+ ).nullish()
1126
+ }).nullish(),
1127
+ logprobs: import_v45.z.object({
1128
+ content: import_v45.z.array(
1129
+ import_v45.z.object({
1130
+ token: import_v45.z.string(),
1131
+ logprob: import_v45.z.number(),
1132
+ top_logprobs: import_v45.z.array(
1133
+ import_v45.z.object({
1134
+ token: import_v45.z.string(),
1135
+ logprob: import_v45.z.number()
1136
+ })
1137
+ )
1138
+ })
1139
+ ).nullish()
1140
+ }).nullish(),
1141
+ finish_reason: import_v45.z.string().nullish(),
1142
+ index: import_v45.z.number()
1143
+ })
1144
+ ),
1145
+ usage: openaiTokenUsageSchema
1146
+ }),
1147
+ openaiErrorDataSchema
1148
+ ]);
1149
+ function isReasoningModel(modelId) {
1150
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1151
+ }
1152
+ function supportsFlexProcessing(modelId) {
1153
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1154
+ }
1155
+ function supportsPriorityProcessing(modelId) {
1156
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1157
+ }
1158
+ function getSystemMessageMode(modelId) {
1159
+ var _a, _b;
1160
+ if (!isReasoningModel(modelId)) {
1161
+ return "system";
1162
+ }
1163
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1164
+ }
1165
+ var reasoningModels = {
1166
+ "o1-mini": {
1167
+ systemMessageMode: "remove"
1168
+ },
1169
+ "o1-mini-2024-09-12": {
1170
+ systemMessageMode: "remove"
1171
+ },
1172
+ "o1-preview": {
1173
+ systemMessageMode: "remove"
1174
+ },
1175
+ "o1-preview-2024-09-12": {
1176
+ systemMessageMode: "remove"
1177
+ },
1178
+ o3: {
1179
+ systemMessageMode: "developer"
1180
+ },
1181
+ "o3-2025-04-16": {
1182
+ systemMessageMode: "developer"
1183
+ },
1184
+ "o3-mini": {
1185
+ systemMessageMode: "developer"
1186
+ },
1187
+ "o3-mini-2025-01-31": {
1188
+ systemMessageMode: "developer"
1189
+ },
1190
+ "o4-mini": {
1191
+ systemMessageMode: "developer"
1192
+ },
1193
+ "o4-mini-2025-04-16": {
1194
+ systemMessageMode: "developer"
1195
+ }
1196
+ };
1197
+
1198
+ // src/completion/openai-completion-language-model.ts
1199
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1200
+ var import_v47 = require("zod/v4");
1201
+
1202
+ // src/completion/convert-to-openai-completion-prompt.ts
1203
+ var import_provider4 = require("@ai-sdk/provider");
1204
+ function convertToOpenAICompletionPrompt({
1205
+ prompt,
1206
+ user = "user",
1207
+ assistant = "assistant"
1208
+ }) {
1209
+ let text = "";
1210
+ if (prompt[0].role === "system") {
1211
+ text += `${prompt[0].content}
1212
+
1213
+ `;
1214
+ prompt = prompt.slice(1);
1215
+ }
1216
+ for (const { role, content } of prompt) {
1217
+ switch (role) {
1218
+ case "system": {
1219
+ throw new import_provider4.InvalidPromptError({
1220
+ message: "Unexpected system message in prompt: ${content}",
1221
+ prompt
1222
+ });
1223
+ }
1224
+ case "user": {
1225
+ const userMessage = content.map((part) => {
1226
+ switch (part.type) {
1227
+ case "text": {
1228
+ return part.text;
1229
+ }
1230
+ }
1231
+ }).filter(Boolean).join("");
1232
+ text += `${user}:
1233
+ ${userMessage}
1234
+
1235
+ `;
1236
+ break;
1237
+ }
1238
+ case "assistant": {
1239
+ const assistantMessage = content.map((part) => {
1240
+ switch (part.type) {
1241
+ case "text": {
1242
+ return part.text;
1243
+ }
1244
+ case "tool-call": {
1245
+ throw new import_provider4.UnsupportedFunctionalityError({
1246
+ functionality: "tool-call messages"
1247
+ });
1248
+ }
1249
+ }
1250
+ }).join("");
1251
+ text += `${assistant}:
1252
+ ${assistantMessage}
1253
+
1254
+ `;
1255
+ break;
1256
+ }
1257
+ case "tool": {
1258
+ throw new import_provider4.UnsupportedFunctionalityError({
1259
+ functionality: "tool messages"
1260
+ });
1261
+ }
1262
+ default: {
1263
+ const _exhaustiveCheck = role;
1264
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1265
+ }
1266
+ }
1267
+ }
1268
+ text += `${assistant}:
1269
+ `;
1270
+ return {
1271
+ prompt: text,
1272
+ stopSequences: [`
1273
+ ${user}:`]
1274
+ };
1275
+ }
1276
+
1277
+ // src/completion/get-response-metadata.ts
1278
+ function getResponseMetadata2({
1279
+ id,
1280
+ model,
1281
+ created
1282
+ }) {
1283
+ return {
1284
+ id: id != null ? id : void 0,
1285
+ modelId: model != null ? model : void 0,
1286
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1287
+ };
1288
+ }
1289
+
1290
+ // src/completion/map-openai-finish-reason.ts
1291
+ function mapOpenAIFinishReason2(finishReason) {
1292
+ switch (finishReason) {
1293
+ case "stop":
1294
+ return "stop";
1295
+ case "length":
1296
+ return "length";
1297
+ case "content_filter":
1298
+ return "content-filter";
1299
+ case "function_call":
1300
+ case "tool_calls":
1301
+ return "tool-calls";
1302
+ default:
1303
+ return "unknown";
1304
+ }
1305
+ }
1306
+
1307
+ // src/completion/openai-completion-options.ts
1308
+ var import_v46 = require("zod/v4");
1309
+ var openaiCompletionProviderOptions = import_v46.z.object({
1310
+ /**
1311
+ Echo back the prompt in addition to the completion.
1312
+ */
1313
+ echo: import_v46.z.boolean().optional(),
1314
+ /**
1315
+ Modify the likelihood of specified tokens appearing in the completion.
1316
+
1317
+ Accepts a JSON object that maps tokens (specified by their token ID in
1318
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1319
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1320
+ the bias is added to the logits generated by the model prior to sampling.
1321
+ The exact effect will vary per model, but values between -1 and 1 should
1322
+ decrease or increase likelihood of selection; values like -100 or 100
1323
+ should result in a ban or exclusive selection of the relevant token.
1324
+
1325
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1326
+ token from being generated.
1327
+ */
1328
+ logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
1329
+ /**
1330
+ The suffix that comes after a completion of inserted text.
1331
+ */
1332
+ suffix: import_v46.z.string().optional(),
1333
+ /**
1334
+ A unique identifier representing your end-user, which can help OpenAI to
1335
+ monitor and detect abuse. Learn more.
1336
+ */
1337
+ user: import_v46.z.string().optional(),
1338
+ /**
1339
+ Return the log probabilities of the tokens. Including logprobs will increase
1340
+ the response size and can slow down response times. However, it can
1341
+ be useful to better understand how the model is behaving.
1342
+ Setting to true will return the log probabilities of the tokens that
1343
+ were generated.
1344
+ Setting to a number will return the log probabilities of the top n
1345
+ tokens that were generated.
1346
+ */
1347
+ logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
1348
+ });
1349
+
1350
+ // src/completion/openai-completion-language-model.ts
1351
+ var OpenAICompletionLanguageModel = class {
1352
+ constructor(modelId, config) {
1353
+ this.specificationVersion = "v2";
1354
+ this.supportedUrls = {
1355
+ // No URLs are supported for completion models.
1356
+ };
1357
+ this.modelId = modelId;
1358
+ this.config = config;
1359
+ }
1360
+ get providerOptionsName() {
1361
+ return this.config.provider.split(".")[0].trim();
1362
+ }
1363
+ get provider() {
1364
+ return this.config.provider;
1365
+ }
1366
+ async getArgs({
1367
+ prompt,
1368
+ maxOutputTokens,
1369
+ temperature,
1370
+ topP,
1371
+ topK,
1372
+ frequencyPenalty,
1373
+ presencePenalty,
1374
+ stopSequences: userStopSequences,
1375
+ responseFormat,
1376
+ tools,
1377
+ toolChoice,
1378
+ seed,
1379
+ providerOptions
1380
+ }) {
1381
+ const warnings = [];
1382
+ const openaiOptions = {
1383
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1384
+ provider: "openai",
1385
+ providerOptions,
1386
+ schema: openaiCompletionProviderOptions
1387
+ }),
1388
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1389
+ provider: this.providerOptionsName,
1390
+ providerOptions,
1391
+ schema: openaiCompletionProviderOptions
1392
+ })
1393
+ };
1394
+ if (topK != null) {
1395
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1396
+ }
1397
+ if (tools == null ? void 0 : tools.length) {
1398
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1399
+ }
1400
+ if (toolChoice != null) {
1401
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1402
+ }
1403
+ if (responseFormat != null && responseFormat.type !== "text") {
1404
+ warnings.push({
1405
+ type: "unsupported-setting",
1406
+ setting: "responseFormat",
1407
+ details: "JSON response format is not supported."
1408
+ });
1409
+ }
1410
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1411
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1412
+ return {
1413
+ args: {
1414
+ // model id:
1415
+ model: this.modelId,
1416
+ // model specific settings:
1417
+ echo: openaiOptions.echo,
1418
+ logit_bias: openaiOptions.logitBias,
1419
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1420
+ suffix: openaiOptions.suffix,
1421
+ user: openaiOptions.user,
1422
+ // standardized settings:
1423
+ max_tokens: maxOutputTokens,
1424
+ temperature,
1425
+ top_p: topP,
1426
+ frequency_penalty: frequencyPenalty,
1427
+ presence_penalty: presencePenalty,
1428
+ seed,
1429
+ // prompt:
1430
+ prompt: completionPrompt,
1431
+ // stop sequences:
1432
+ stop: stop.length > 0 ? stop : void 0
1433
+ },
1434
+ warnings
1435
+ };
1436
+ }
1437
+ async doGenerate(options) {
1438
+ var _a, _b, _c;
1439
+ const { args, warnings } = await this.getArgs(options);
1440
+ const {
1441
+ responseHeaders,
1442
+ value: response,
1443
+ rawValue: rawResponse
1444
+ } = await (0, import_provider_utils6.postJsonToApi)({
1445
+ url: this.config.url({
1446
+ path: "/completions",
1447
+ modelId: this.modelId
1448
+ }),
1449
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1450
+ body: args,
1451
+ failedResponseHandler: openaiFailedResponseHandler,
1452
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1453
+ openaiCompletionResponseSchema
1454
+ ),
1455
+ abortSignal: options.abortSignal,
1456
+ fetch: this.config.fetch
1457
+ });
1458
+ const choice = response.choices[0];
1459
+ const providerMetadata = { openai: {} };
1460
+ if (choice.logprobs != null) {
1461
+ providerMetadata.openai.logprobs = choice.logprobs;
1462
+ }
1463
+ return {
1464
+ content: [{ type: "text", text: choice.text }],
1465
+ usage: {
1466
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1467
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1468
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1469
+ },
1470
+ finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1471
+ request: { body: args },
1472
+ response: {
1473
+ ...getResponseMetadata2(response),
1474
+ headers: responseHeaders,
1475
+ body: rawResponse
1476
+ },
1477
+ providerMetadata,
1478
+ warnings
1479
+ };
1480
+ }
1481
+ async doStream(options) {
1482
+ const { args, warnings } = await this.getArgs(options);
1483
+ const body = {
1484
+ ...args,
1485
+ stream: true,
1486
+ stream_options: {
1487
+ include_usage: true
1488
+ }
1489
+ };
1490
+ const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
1491
+ url: this.config.url({
1492
+ path: "/completions",
1493
+ modelId: this.modelId
1494
+ }),
1495
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1496
+ body,
1497
+ failedResponseHandler: openaiFailedResponseHandler,
1498
+ successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(
1499
+ openaiCompletionChunkSchema
1500
+ ),
1501
+ abortSignal: options.abortSignal,
1502
+ fetch: this.config.fetch
1503
+ });
1504
+ let finishReason = "unknown";
1505
+ const providerMetadata = { openai: {} };
1506
+ const usage = {
1507
+ inputTokens: void 0,
1508
+ outputTokens: void 0,
1509
+ totalTokens: void 0
1510
+ };
1511
+ let isFirstChunk = true;
1512
+ return {
1513
+ stream: response.pipeThrough(
1514
+ new TransformStream({
1515
+ start(controller) {
1516
+ controller.enqueue({ type: "stream-start", warnings });
1517
+ },
1518
+ transform(chunk, controller) {
1519
+ if (options.includeRawChunks) {
1520
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1521
+ }
1522
+ if (!chunk.success) {
1523
+ finishReason = "error";
1524
+ controller.enqueue({ type: "error", error: chunk.error });
1525
+ return;
1526
+ }
1527
+ const value = chunk.value;
1528
+ if ("error" in value) {
1529
+ finishReason = "error";
1530
+ controller.enqueue({ type: "error", error: value.error });
1531
+ return;
1532
+ }
1533
+ if (isFirstChunk) {
1534
+ isFirstChunk = false;
1535
+ controller.enqueue({
1536
+ type: "response-metadata",
1537
+ ...getResponseMetadata2(value)
1538
+ });
1539
+ controller.enqueue({ type: "text-start", id: "0" });
1540
+ }
1541
+ if (value.usage != null) {
1542
+ usage.inputTokens = value.usage.prompt_tokens;
1543
+ usage.outputTokens = value.usage.completion_tokens;
1544
+ usage.totalTokens = value.usage.total_tokens;
1545
+ }
1546
+ const choice = value.choices[0];
1547
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1548
+ finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1549
+ }
1550
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1551
+ providerMetadata.openai.logprobs = choice.logprobs;
1552
+ }
1553
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1554
+ controller.enqueue({
1555
+ type: "text-delta",
1556
+ id: "0",
1557
+ delta: choice.text
1558
+ });
1559
+ }
1560
+ },
1561
+ flush(controller) {
1562
+ if (!isFirstChunk) {
1563
+ controller.enqueue({ type: "text-end", id: "0" });
1564
+ }
1565
+ controller.enqueue({
1566
+ type: "finish",
1567
+ finishReason,
1568
+ providerMetadata,
1569
+ usage
1570
+ });
1571
+ }
1572
+ })
1573
+ ),
1574
+ request: { body },
1575
+ response: { headers: responseHeaders }
1576
+ };
1577
+ }
1578
+ };
1579
+ var usageSchema = import_v47.z.object({
1580
+ prompt_tokens: import_v47.z.number(),
1581
+ completion_tokens: import_v47.z.number(),
1582
+ total_tokens: import_v47.z.number()
1583
+ });
1584
+ var openaiCompletionResponseSchema = import_v47.z.object({
1585
+ id: import_v47.z.string().nullish(),
1586
+ created: import_v47.z.number().nullish(),
1587
+ model: import_v47.z.string().nullish(),
1588
+ choices: import_v47.z.array(
1589
+ import_v47.z.object({
1590
+ text: import_v47.z.string(),
1591
+ finish_reason: import_v47.z.string(),
1592
+ logprobs: import_v47.z.object({
1593
+ tokens: import_v47.z.array(import_v47.z.string()),
1594
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1595
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1596
+ }).nullish()
1597
+ })
1598
+ ),
1599
+ usage: usageSchema.nullish()
1600
+ });
1601
+ var openaiCompletionChunkSchema = import_v47.z.union([
1602
+ import_v47.z.object({
1603
+ id: import_v47.z.string().nullish(),
1604
+ created: import_v47.z.number().nullish(),
1605
+ model: import_v47.z.string().nullish(),
1606
+ choices: import_v47.z.array(
1607
+ import_v47.z.object({
1608
+ text: import_v47.z.string(),
1609
+ finish_reason: import_v47.z.string().nullish(),
1610
+ index: import_v47.z.number(),
1611
+ logprobs: import_v47.z.object({
1612
+ tokens: import_v47.z.array(import_v47.z.string()),
1613
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1614
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1615
+ }).nullish()
1616
+ })
1617
+ ),
1618
+ usage: usageSchema.nullish()
1619
+ }),
1620
+ openaiErrorDataSchema
1621
+ ]);
1622
+
1623
+ // src/embedding/openai-embedding-model.ts
1624
+ var import_provider5 = require("@ai-sdk/provider");
1625
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1626
+ var import_v49 = require("zod/v4");
1627
+
1628
+ // src/embedding/openai-embedding-options.ts
1629
+ var import_v48 = require("zod/v4");
1630
+ var openaiEmbeddingProviderOptions = import_v48.z.object({
1631
+ /**
1632
+ The number of dimensions the resulting output embeddings should have.
1633
+ Only supported in text-embedding-3 and later models.
1634
+ */
1635
+ dimensions: import_v48.z.number().optional(),
1636
+ /**
1637
+ A unique identifier representing your end-user, which can help OpenAI to
1638
+ monitor and detect abuse. Learn more.
1639
+ */
1640
+ user: import_v48.z.string().optional()
1641
+ });
1642
+
1643
+ // src/embedding/openai-embedding-model.ts
1644
+ var OpenAIEmbeddingModel = class {
1645
+ constructor(modelId, config) {
1646
+ this.specificationVersion = "v2";
1647
+ this.maxEmbeddingsPerCall = 2048;
1648
+ this.supportsParallelCalls = true;
1649
+ this.modelId = modelId;
1650
+ this.config = config;
1651
+ }
1652
+ get provider() {
1653
+ return this.config.provider;
1654
+ }
1655
+ async doEmbed({
1656
+ values,
1657
+ headers,
1658
+ abortSignal,
1659
+ providerOptions
1660
+ }) {
1661
+ var _a;
1662
+ if (values.length > this.maxEmbeddingsPerCall) {
1663
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1664
+ provider: this.provider,
1665
+ modelId: this.modelId,
1666
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1667
+ values
1668
+ });
1669
+ }
1670
+ const openaiOptions = (_a = await (0, import_provider_utils7.parseProviderOptions)({
1671
+ provider: "openai",
1672
+ providerOptions,
1673
+ schema: openaiEmbeddingProviderOptions
1674
+ })) != null ? _a : {};
1675
+ const {
1676
+ responseHeaders,
1677
+ value: response,
1678
+ rawValue
1679
+ } = await (0, import_provider_utils7.postJsonToApi)({
1680
+ url: this.config.url({
1681
+ path: "/embeddings",
1682
+ modelId: this.modelId
1683
+ }),
1684
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), headers),
1685
+ body: {
1686
+ model: this.modelId,
1687
+ input: values,
1688
+ encoding_format: "float",
1689
+ dimensions: openaiOptions.dimensions,
1690
+ user: openaiOptions.user
1691
+ },
1692
+ failedResponseHandler: openaiFailedResponseHandler,
1693
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1694
+ openaiTextEmbeddingResponseSchema
1695
+ ),
1696
+ abortSignal,
1697
+ fetch: this.config.fetch
1698
+ });
1699
+ return {
1700
+ embeddings: response.data.map((item) => item.embedding),
1701
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1702
+ response: { headers: responseHeaders, body: rawValue }
1703
+ };
1704
+ }
1705
+ };
1706
+ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
1707
+ data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
1708
+ usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
1709
+ });
1710
+
1711
+ // src/image/openai-image-model.ts
1712
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1713
+ var import_v410 = require("zod/v4");
1714
+
1715
+ // src/image/openai-image-options.ts
1716
+ var modelMaxImagesPerCall = {
1717
+ "dall-e-3": 1,
1718
+ "dall-e-2": 10,
1719
+ "gpt-image-1": 10
1720
+ };
1721
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1722
+
1723
+ // src/image/openai-image-model.ts
1724
+ var OpenAIImageModel = class {
1725
+ constructor(modelId, config) {
1726
+ this.modelId = modelId;
1727
+ this.config = config;
1728
+ this.specificationVersion = "v2";
1729
+ }
1730
+ get maxImagesPerCall() {
1731
+ var _a;
1732
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1733
+ }
1734
+ get provider() {
1735
+ return this.config.provider;
1736
+ }
1737
+ async doGenerate({
1738
+ prompt,
1739
+ n,
1740
+ size,
1741
+ aspectRatio,
1742
+ seed,
1743
+ providerOptions,
1744
+ headers,
1745
+ abortSignal
1746
+ }) {
1747
+ var _a, _b, _c, _d;
1748
+ const warnings = [];
1749
+ if (aspectRatio != null) {
1750
+ warnings.push({
1751
+ type: "unsupported-setting",
1752
+ setting: "aspectRatio",
1753
+ details: "This model does not support aspect ratio. Use `size` instead."
1754
+ });
1755
+ }
1756
+ if (seed != null) {
1757
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1758
+ }
1759
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1760
+ const { value: response, responseHeaders } = await (0, import_provider_utils8.postJsonToApi)({
1761
+ url: this.config.url({
1762
+ path: "/images/generations",
1763
+ modelId: this.modelId
1764
+ }),
1765
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), headers),
1766
+ body: {
1767
+ model: this.modelId,
1768
+ prompt,
1769
+ n,
1770
+ size,
1771
+ ...(_d = providerOptions.openai) != null ? _d : {},
1772
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1773
+ },
1774
+ failedResponseHandler: openaiFailedResponseHandler,
1775
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1776
+ openaiImageResponseSchema
1777
+ ),
1778
+ abortSignal,
1779
+ fetch: this.config.fetch
1780
+ });
1781
+ return {
1782
+ images: response.data.map((item) => item.b64_json),
1783
+ warnings,
1784
+ response: {
1785
+ timestamp: currentDate,
1786
+ modelId: this.modelId,
1787
+ headers: responseHeaders
1788
+ },
1789
+ providerMetadata: {
1790
+ openai: {
1791
+ images: response.data.map(
1792
+ (item) => item.revised_prompt ? {
1793
+ revisedPrompt: item.revised_prompt
1794
+ } : null
1795
+ )
1796
+ }
1797
+ }
1798
+ };
1799
+ }
1800
+ };
1801
+ var openaiImageResponseSchema = import_v410.z.object({
1802
+ data: import_v410.z.array(
1803
+ import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
1804
+ )
1805
+ });
1806
+
1807
+ // src/transcription/openai-transcription-model.ts
1808
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1809
+ var import_v412 = require("zod/v4");
1810
+
1811
+ // src/transcription/openai-transcription-options.ts
1812
+ var import_v411 = require("zod/v4");
1813
+ var openAITranscriptionProviderOptions = import_v411.z.object({
1814
+ /**
1815
+ * Additional information to include in the transcription response.
1816
+ */
1817
+ include: import_v411.z.array(import_v411.z.string()).optional(),
1818
+ /**
1819
+ * The language of the input audio in ISO-639-1 format.
1820
+ */
1821
+ language: import_v411.z.string().optional(),
1822
+ /**
1823
+ * An optional text to guide the model's style or continue a previous audio segment.
1824
+ */
1825
+ prompt: import_v411.z.string().optional(),
1826
+ /**
1827
+ * The sampling temperature, between 0 and 1.
1828
+ * @default 0
1829
+ */
1830
+ temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
1831
+ /**
1832
+ * The timestamp granularities to populate for this transcription.
1833
+ * @default ['segment']
1834
+ */
1835
+ timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
1836
+ });
1837
+
1838
+ // src/transcription/openai-transcription-model.ts
1839
+ var languageMap = {
1840
+ afrikaans: "af",
1841
+ arabic: "ar",
1842
+ armenian: "hy",
1843
+ azerbaijani: "az",
1844
+ belarusian: "be",
1845
+ bosnian: "bs",
1846
+ bulgarian: "bg",
1847
+ catalan: "ca",
1848
+ chinese: "zh",
1849
+ croatian: "hr",
1850
+ czech: "cs",
1851
+ danish: "da",
1852
+ dutch: "nl",
1853
+ english: "en",
1854
+ estonian: "et",
1855
+ finnish: "fi",
1856
+ french: "fr",
1857
+ galician: "gl",
1858
+ german: "de",
1859
+ greek: "el",
1860
+ hebrew: "he",
1861
+ hindi: "hi",
1862
+ hungarian: "hu",
1863
+ icelandic: "is",
1864
+ indonesian: "id",
1865
+ italian: "it",
1866
+ japanese: "ja",
1867
+ kannada: "kn",
1868
+ kazakh: "kk",
1869
+ korean: "ko",
1870
+ latvian: "lv",
1871
+ lithuanian: "lt",
1872
+ macedonian: "mk",
1873
+ malay: "ms",
1874
+ marathi: "mr",
1875
+ maori: "mi",
1876
+ nepali: "ne",
1877
+ norwegian: "no",
1878
+ persian: "fa",
1879
+ polish: "pl",
1880
+ portuguese: "pt",
1881
+ romanian: "ro",
1882
+ russian: "ru",
1883
+ serbian: "sr",
1884
+ slovak: "sk",
1885
+ slovenian: "sl",
1886
+ spanish: "es",
1887
+ swahili: "sw",
1888
+ swedish: "sv",
1889
+ tagalog: "tl",
1890
+ tamil: "ta",
1891
+ thai: "th",
1892
+ turkish: "tr",
1893
+ ukrainian: "uk",
1894
+ urdu: "ur",
1895
+ vietnamese: "vi",
1896
+ welsh: "cy"
1897
+ };
1898
+ var OpenAITranscriptionModel = class {
1899
+ constructor(modelId, config) {
1900
+ this.modelId = modelId;
1901
+ this.config = config;
1902
+ this.specificationVersion = "v2";
1903
+ }
1904
+ get provider() {
1905
+ return this.config.provider;
1906
+ }
1907
+ async getArgs({
1908
+ audio,
1909
+ mediaType,
1910
+ providerOptions
1911
+ }) {
1912
+ const warnings = [];
1913
+ const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
1914
+ provider: "openai",
1915
+ providerOptions,
1916
+ schema: openAITranscriptionProviderOptions
1917
+ });
1918
+ const formData = new FormData();
1919
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils9.convertBase64ToUint8Array)(audio)]);
1920
+ formData.append("model", this.modelId);
1921
+ const fileExtension = (0, import_provider_utils9.mediaTypeToExtension)(mediaType);
1922
+ formData.append(
1923
+ "file",
1924
+ new File([blob], "audio", { type: mediaType }),
1925
+ `audio.${fileExtension}`
1926
+ );
1927
+ if (openAIOptions) {
1928
+ const transcriptionModelOptions = {
1929
+ include: openAIOptions.include,
1930
+ language: openAIOptions.language,
1931
+ prompt: openAIOptions.prompt,
1932
+ // https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
1933
+ // prefer verbose_json to get segments for models that support it
1934
+ response_format: [
1935
+ "gpt-4o-transcribe",
1936
+ "gpt-4o-mini-transcribe"
1937
+ ].includes(this.modelId) ? "json" : "verbose_json",
1938
+ temperature: openAIOptions.temperature,
1939
+ timestamp_granularities: openAIOptions.timestampGranularities
1940
+ };
1941
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1942
+ if (value != null) {
1943
+ if (Array.isArray(value)) {
1944
+ for (const item of value) {
1945
+ formData.append(`${key}[]`, String(item));
1946
+ }
1947
+ } else {
1948
+ formData.append(key, String(value));
1949
+ }
1950
+ }
1951
+ }
1952
+ }
1953
+ return {
1954
+ formData,
1955
+ warnings
1956
+ };
1957
+ }
1958
+ async doGenerate(options) {
1959
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1960
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1961
+ const { formData, warnings } = await this.getArgs(options);
1962
+ const {
1963
+ value: response,
1964
+ responseHeaders,
1965
+ rawValue: rawResponse
1966
+ } = await (0, import_provider_utils9.postFormDataToApi)({
1967
+ url: this.config.url({
1968
+ path: "/audio/transcriptions",
1969
+ modelId: this.modelId
1970
+ }),
1971
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
1972
+ formData,
1973
+ failedResponseHandler: openaiFailedResponseHandler,
1974
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
1975
+ openaiTranscriptionResponseSchema
1976
+ ),
1977
+ abortSignal: options.abortSignal,
1978
+ fetch: this.config.fetch
1979
+ });
1980
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1981
+ return {
1982
+ text: response.text,
1983
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
1984
+ text: segment.text,
1985
+ startSecond: segment.start,
1986
+ endSecond: segment.end
1987
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
1988
+ text: word.word,
1989
+ startSecond: word.start,
1990
+ endSecond: word.end
1991
+ }))) != null ? _g : [],
1992
+ language,
1993
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
1994
+ warnings,
1995
+ response: {
1996
+ timestamp: currentDate,
1997
+ modelId: this.modelId,
1998
+ headers: responseHeaders,
1999
+ body: rawResponse
2000
+ }
2001
+ };
2002
+ }
2003
+ };
2004
+ var openaiTranscriptionResponseSchema = import_v412.z.object({
2005
+ text: import_v412.z.string(),
2006
+ language: import_v412.z.string().nullish(),
2007
+ duration: import_v412.z.number().nullish(),
2008
+ words: import_v412.z.array(
2009
+ import_v412.z.object({
2010
+ word: import_v412.z.string(),
2011
+ start: import_v412.z.number(),
2012
+ end: import_v412.z.number()
2013
+ })
2014
+ ).nullish(),
2015
+ segments: import_v412.z.array(
2016
+ import_v412.z.object({
2017
+ id: import_v412.z.number(),
2018
+ seek: import_v412.z.number(),
2019
+ start: import_v412.z.number(),
2020
+ end: import_v412.z.number(),
2021
+ text: import_v412.z.string(),
2022
+ tokens: import_v412.z.array(import_v412.z.number()),
2023
+ temperature: import_v412.z.number(),
2024
+ avg_logprob: import_v412.z.number(),
2025
+ compression_ratio: import_v412.z.number(),
2026
+ no_speech_prob: import_v412.z.number()
2027
+ })
2028
+ ).nullish()
2029
+ });
2030
+
2031
+ // src/speech/openai-speech-model.ts
2032
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
2033
+ var import_v413 = require("zod/v4");
2034
+ var OpenAIProviderOptionsSchema = import_v413.z.object({
2035
+ instructions: import_v413.z.string().nullish(),
2036
+ speed: import_v413.z.number().min(0.25).max(4).default(1).nullish()
2037
+ });
2038
+ var OpenAISpeechModel = class {
2039
+ constructor(modelId, config) {
2040
+ this.modelId = modelId;
2041
+ this.config = config;
2042
+ this.specificationVersion = "v2";
2043
+ }
2044
+ get provider() {
2045
+ return this.config.provider;
2046
+ }
2047
+ async getArgs({
2048
+ text,
2049
+ voice = "alloy",
2050
+ outputFormat = "mp3",
2051
+ speed,
2052
+ instructions,
2053
+ language,
2054
+ providerOptions
2055
+ }) {
2056
+ const warnings = [];
2057
+ const openAIOptions = await (0, import_provider_utils10.parseProviderOptions)({
2058
+ provider: "openai",
2059
+ providerOptions,
2060
+ schema: OpenAIProviderOptionsSchema
2061
+ });
2062
+ const requestBody = {
2063
+ model: this.modelId,
2064
+ input: text,
2065
+ voice,
2066
+ response_format: "mp3",
2067
+ speed,
2068
+ instructions
2069
+ };
2070
+ if (outputFormat) {
2071
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
2072
+ requestBody.response_format = outputFormat;
2073
+ } else {
2074
+ warnings.push({
2075
+ type: "unsupported-setting",
2076
+ setting: "outputFormat",
2077
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
2078
+ });
2079
+ }
2080
+ }
2081
+ if (openAIOptions) {
2082
+ const speechModelOptions = {};
2083
+ for (const key in speechModelOptions) {
2084
+ const value = speechModelOptions[key];
2085
+ if (value !== void 0) {
2086
+ requestBody[key] = value;
2087
+ }
2088
+ }
2089
+ }
2090
+ if (language) {
2091
+ warnings.push({
2092
+ type: "unsupported-setting",
2093
+ setting: "language",
2094
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
2095
+ });
2096
+ }
2097
+ return {
2098
+ requestBody,
2099
+ warnings
2100
+ };
2101
+ }
2102
+ async doGenerate(options) {
2103
+ var _a, _b, _c;
2104
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2105
+ const { requestBody, warnings } = await this.getArgs(options);
2106
+ const {
2107
+ value: audio,
2108
+ responseHeaders,
2109
+ rawValue: rawResponse
2110
+ } = await (0, import_provider_utils10.postJsonToApi)({
2111
+ url: this.config.url({
2112
+ path: "/audio/speech",
2113
+ modelId: this.modelId
2114
+ }),
2115
+ headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2116
+ body: requestBody,
2117
+ failedResponseHandler: openaiFailedResponseHandler,
2118
+ successfulResponseHandler: (0, import_provider_utils10.createBinaryResponseHandler)(),
2119
+ abortSignal: options.abortSignal,
2120
+ fetch: this.config.fetch
2121
+ });
2122
+ return {
2123
+ audio,
2124
+ warnings,
2125
+ request: {
2126
+ body: JSON.stringify(requestBody)
2127
+ },
2128
+ response: {
2129
+ timestamp: currentDate,
2130
+ modelId: this.modelId,
2131
+ headers: responseHeaders,
2132
+ body: rawResponse
2133
+ }
2134
+ };
2135
+ }
2136
+ };
2137
+
2138
+ // src/responses/openai-responses-language-model.ts
2139
+ var import_provider8 = require("@ai-sdk/provider");
2140
+ var import_provider_utils15 = require("@ai-sdk/provider-utils");
2141
+ var import_v417 = require("zod/v4");
2142
+
2143
+ // src/responses/convert-to-openai-responses-messages.ts
2144
+ var import_provider6 = require("@ai-sdk/provider");
2145
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
2146
+ var import_v414 = require("zod/v4");
2147
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
2148
+ function isFileId(data, prefixes) {
2149
+ if (!prefixes) return false;
2150
+ return prefixes.some((prefix) => data.startsWith(prefix));
2151
+ }
2152
+ async function convertToOpenAIResponsesMessages({
2153
+ prompt,
2154
+ systemMessageMode,
2155
+ fileIdPrefixes
2156
+ }) {
2157
+ var _a, _b, _c, _d, _e, _f;
2158
+ const messages = [];
2159
+ const warnings = [];
2160
+ for (const { role, content } of prompt) {
2161
+ switch (role) {
2162
+ case "system": {
2163
+ switch (systemMessageMode) {
2164
+ case "system": {
2165
+ messages.push({ role: "system", content });
2166
+ break;
2167
+ }
2168
+ case "developer": {
2169
+ messages.push({ role: "developer", content });
2170
+ break;
2171
+ }
2172
+ case "remove": {
2173
+ warnings.push({
2174
+ type: "other",
2175
+ message: "system messages are removed for this model"
2176
+ });
2177
+ break;
2178
+ }
2179
+ default: {
2180
+ const _exhaustiveCheck = systemMessageMode;
2181
+ throw new Error(
2182
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2183
+ );
2184
+ }
2185
+ }
2186
+ break;
2187
+ }
2188
+ case "user": {
2189
+ messages.push({
2190
+ role: "user",
2191
+ content: content.map((part, index) => {
2192
+ var _a2, _b2, _c2;
2193
+ switch (part.type) {
2194
+ case "text": {
2195
+ return { type: "input_text", text: part.text };
2196
+ }
2197
+ case "file": {
2198
+ if (part.mediaType.startsWith("image/")) {
2199
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2200
+ return {
2201
+ type: "input_image",
2202
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2203
+ image_url: `data:${mediaType};base64,${(0, import_provider_utils12.convertToBase64)(part.data)}`
2204
+ },
2205
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2206
+ };
2207
+ } else if (part.mediaType === "application/pdf") {
2208
+ if (part.data instanceof URL) {
2209
+ return {
2210
+ type: "input_file",
2211
+ file_url: part.data.toString()
2212
+ };
2213
+ }
2214
+ return {
2215
+ type: "input_file",
2216
+ ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2217
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2218
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils12.convertToBase64)(part.data)}`
2219
+ }
2220
+ };
2221
+ } else {
2222
+ throw new import_provider6.UnsupportedFunctionalityError({
2223
+ functionality: `file part media type ${part.mediaType}`
2224
+ });
2225
+ }
2226
+ }
2227
+ }
2228
+ })
2229
+ });
2230
+ break;
2231
+ }
2232
+ case "assistant": {
2233
+ const reasoningMessages = {};
2234
+ for (const part of content) {
2235
+ switch (part.type) {
2236
+ case "text": {
2237
+ messages.push({
2238
+ role: "assistant",
2239
+ content: [{ type: "output_text", text: part.text }],
2240
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2241
+ });
2242
+ break;
2243
+ }
2244
+ case "tool-call": {
2245
+ if (part.providerExecuted) {
2246
+ break;
2247
+ }
2248
+ messages.push({
2249
+ type: "function_call",
2250
+ call_id: part.toolCallId,
2251
+ name: part.toolName,
2252
+ arguments: JSON.stringify(part.input),
2253
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2254
+ });
2255
+ break;
2256
+ }
2257
+ case "tool-result": {
2258
+ warnings.push({
2259
+ type: "other",
2260
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
2261
+ });
2262
+ break;
2263
+ }
2264
+ case "reasoning": {
2265
+ const providerOptions = await (0, import_provider_utils11.parseProviderOptions)({
2266
+ provider: "openai",
2267
+ providerOptions: part.providerOptions,
2268
+ schema: openaiResponsesReasoningProviderOptionsSchema
2269
+ });
2270
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2271
+ if (reasoningId != null) {
2272
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2273
+ const summaryParts = [];
2274
+ if (part.text.length > 0) {
2275
+ summaryParts.push({ type: "summary_text", text: part.text });
2276
+ } else if (existingReasoningMessage !== void 0) {
2277
+ warnings.push({
2278
+ type: "other",
2279
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2280
+ });
2281
+ }
2282
+ if (existingReasoningMessage === void 0) {
2283
+ reasoningMessages[reasoningId] = {
2284
+ type: "reasoning",
2285
+ id: reasoningId,
2286
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2287
+ summary: summaryParts
2288
+ };
2289
+ messages.push(reasoningMessages[reasoningId]);
2290
+ } else {
2291
+ existingReasoningMessage.summary.push(...summaryParts);
2292
+ }
2293
+ } else {
2294
+ warnings.push({
2295
+ type: "other",
2296
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2297
+ });
2298
+ }
2299
+ break;
2300
+ }
2301
+ }
2302
+ }
2303
+ break;
2304
+ }
2305
+ case "tool": {
2306
+ for (const part of content) {
2307
+ const output = part.output;
2308
+ let contentValue;
2309
+ switch (output.type) {
2310
+ case "text":
2311
+ case "error-text":
2312
+ contentValue = output.value;
2313
+ break;
2314
+ case "content":
2315
+ case "json":
2316
+ case "error-json":
2317
+ contentValue = JSON.stringify(output.value);
2318
+ break;
2319
+ }
2320
+ messages.push({
2321
+ type: "function_call_output",
2322
+ call_id: part.toolCallId,
2323
+ output: contentValue
2324
+ });
2325
+ }
2326
+ break;
2327
+ }
2328
+ default: {
2329
+ const _exhaustiveCheck = role;
2330
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2331
+ }
2332
+ }
2333
+ }
2334
+ return { messages, warnings };
2335
+ }
2336
+ var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
2337
+ itemId: import_v414.z.string().nullish(),
2338
+ reasoningEncryptedContent: import_v414.z.string().nullish()
2339
+ });
2340
+
2341
+ // src/responses/map-openai-responses-finish-reason.ts
2342
+ function mapOpenAIResponseFinishReason({
2343
+ finishReason,
2344
+ hasFunctionCall
2345
+ }) {
2346
+ switch (finishReason) {
2347
+ case void 0:
2348
+ case null:
2349
+ return hasFunctionCall ? "tool-calls" : "stop";
2350
+ case "max_output_tokens":
2351
+ return "length";
2352
+ case "content_filter":
2353
+ return "content-filter";
2354
+ default:
2355
+ return hasFunctionCall ? "tool-calls" : "unknown";
2356
+ }
2357
+ }
2358
+
2359
+ // src/responses/openai-responses-prepare-tools.ts
2360
+ var import_provider7 = require("@ai-sdk/provider");
2361
+
2362
+ // src/tool/code-interpreter.ts
2363
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
2364
+ var import_v415 = require("zod/v4");
2365
+ var codeInterpreterInputSchema = import_v415.z.object({
2366
+ code: import_v415.z.string().nullish(),
2367
+ containerId: import_v415.z.string()
2368
+ });
2369
+ var codeInterpreterOutputSchema = import_v415.z.object({
2370
+ outputs: import_v415.z.array(
2371
+ import_v415.z.discriminatedUnion("type", [
2372
+ import_v415.z.object({ type: import_v415.z.literal("logs"), logs: import_v415.z.string() }),
2373
+ import_v415.z.object({ type: import_v415.z.literal("image"), url: import_v415.z.string() })
2374
+ ])
2375
+ ).nullish()
2376
+ });
2377
+ var codeInterpreterArgsSchema = import_v415.z.object({
2378
+ container: import_v415.z.union([
2379
+ import_v415.z.string(),
2380
+ import_v415.z.object({
2381
+ fileIds: import_v415.z.array(import_v415.z.string()).optional()
2382
+ })
2383
+ ]).optional()
2384
+ });
2385
+ var codeInterpreterToolFactory = (0, import_provider_utils13.createProviderDefinedToolFactoryWithOutputSchema)({
2386
+ id: "openai.code_interpreter",
2387
+ name: "code_interpreter",
2388
+ inputSchema: codeInterpreterInputSchema,
2389
+ outputSchema: codeInterpreterOutputSchema
2390
+ });
2391
+
2392
+ // src/tool/web-search.ts
2393
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
2394
+ var import_v416 = require("zod/v4");
2395
+ var webSearchArgsSchema = import_v416.z.object({
2396
+ filters: import_v416.z.object({
2397
+ allowedDomains: import_v416.z.array(import_v416.z.string()).optional()
2398
+ }).optional(),
2399
+ searchContextSize: import_v416.z.enum(["low", "medium", "high"]).optional(),
2400
+ userLocation: import_v416.z.object({
2401
+ type: import_v416.z.literal("approximate"),
2402
+ country: import_v416.z.string().optional(),
2403
+ city: import_v416.z.string().optional(),
2404
+ region: import_v416.z.string().optional(),
2405
+ timezone: import_v416.z.string().optional()
2406
+ }).optional()
2407
+ });
2408
+ var webSearchToolFactory = (0, import_provider_utils14.createProviderDefinedToolFactory)({
2409
+ id: "openai.web_search",
2410
+ name: "web_search",
2411
+ inputSchema: import_v416.z.object({
2412
+ action: import_v416.z.discriminatedUnion("type", [
2413
+ import_v416.z.object({
2414
+ type: import_v416.z.literal("search"),
2415
+ query: import_v416.z.string().nullish()
2416
+ }),
2417
+ import_v416.z.object({
2418
+ type: import_v416.z.literal("open_page"),
2419
+ url: import_v416.z.string()
2420
+ }),
2421
+ import_v416.z.object({
2422
+ type: import_v416.z.literal("find"),
2423
+ url: import_v416.z.string(),
2424
+ pattern: import_v416.z.string()
2425
+ })
2426
+ ]).nullish()
2427
+ })
2428
+ });
2429
+
2430
+ // src/responses/openai-responses-prepare-tools.ts
2431
+ function prepareResponsesTools({
2432
+ tools,
2433
+ toolChoice,
2434
+ strictJsonSchema
2435
+ }) {
2436
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2437
+ const toolWarnings = [];
2438
+ if (tools == null) {
2439
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2440
+ }
2441
+ const openaiTools = [];
2442
+ for (const tool of tools) {
2443
+ switch (tool.type) {
2444
+ case "function":
2445
+ openaiTools.push({
2446
+ type: "function",
2447
+ name: tool.name,
2448
+ description: tool.description,
2449
+ parameters: tool.inputSchema,
2450
+ strict: strictJsonSchema
2451
+ });
2452
+ break;
2453
+ case "provider-defined": {
2454
+ switch (tool.id) {
2455
+ case "openai.file_search": {
2456
+ const args = fileSearchArgsSchema.parse(tool.args);
2457
+ openaiTools.push({
2458
+ type: "file_search",
2459
+ vector_store_ids: args.vectorStoreIds,
2460
+ max_num_results: args.maxNumResults,
2461
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2462
+ filters: args.filters
2463
+ });
2464
+ break;
2465
+ }
2466
+ case "openai.web_search_preview": {
2467
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
2468
+ openaiTools.push({
2469
+ type: "web_search_preview",
2470
+ search_context_size: args.searchContextSize,
2471
+ user_location: args.userLocation
2472
+ });
2473
+ break;
2474
+ }
2475
+ case "openai.web_search": {
2476
+ const args = webSearchArgsSchema.parse(tool.args);
2477
+ openaiTools.push({
2478
+ type: "web_search",
2479
+ filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
2480
+ search_context_size: args.searchContextSize,
2481
+ user_location: args.userLocation
2482
+ });
2483
+ break;
2484
+ }
2485
+ case "openai.code_interpreter": {
2486
+ const args = codeInterpreterArgsSchema.parse(tool.args);
2487
+ openaiTools.push({
2488
+ type: "code_interpreter",
2489
+ container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
2490
+ });
2491
+ break;
2492
+ }
2493
+ default: {
2494
+ toolWarnings.push({ type: "unsupported-tool", tool });
2495
+ break;
2496
+ }
2497
+ }
2498
+ break;
2499
+ }
2500
+ default:
2501
+ toolWarnings.push({ type: "unsupported-tool", tool });
2502
+ break;
2503
+ }
2504
+ }
2505
+ if (toolChoice == null) {
2506
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
2507
+ }
2508
+ const type = toolChoice.type;
2509
+ switch (type) {
2510
+ case "auto":
2511
+ case "none":
2512
+ case "required":
2513
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
2514
+ case "tool":
2515
+ return {
2516
+ tools: openaiTools,
2517
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2518
+ toolWarnings
2519
+ };
2520
+ default: {
2521
+ const _exhaustiveCheck = type;
2522
+ throw new import_provider7.UnsupportedFunctionalityError({
2523
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2524
+ });
2525
+ }
2526
+ }
2527
+ }
2528
+
2529
+ // src/responses/openai-responses-language-model.ts
2530
+ var webSearchCallItem = import_v417.z.object({
2531
+ type: import_v417.z.literal("web_search_call"),
2532
+ id: import_v417.z.string(),
2533
+ status: import_v417.z.string(),
2534
+ action: import_v417.z.discriminatedUnion("type", [
2535
+ import_v417.z.object({
2536
+ type: import_v417.z.literal("search"),
2537
+ query: import_v417.z.string().nullish()
2538
+ }),
2539
+ import_v417.z.object({
2540
+ type: import_v417.z.literal("open_page"),
2541
+ url: import_v417.z.string()
2542
+ }),
2543
+ import_v417.z.object({
2544
+ type: import_v417.z.literal("find"),
2545
+ url: import_v417.z.string(),
2546
+ pattern: import_v417.z.string()
2547
+ })
2548
+ ]).nullish()
2549
+ });
2550
+ var codeInterpreterCallItem = import_v417.z.object({
2551
+ type: import_v417.z.literal("code_interpreter_call"),
2552
+ id: import_v417.z.string(),
2553
+ code: import_v417.z.string().nullable(),
2554
+ container_id: import_v417.z.string(),
2555
+ outputs: import_v417.z.array(
2556
+ import_v417.z.discriminatedUnion("type", [
2557
+ import_v417.z.object({ type: import_v417.z.literal("logs"), logs: import_v417.z.string() }),
2558
+ import_v417.z.object({ type: import_v417.z.literal("image"), url: import_v417.z.string() })
2559
+ ])
2560
+ ).nullable()
2561
+ });
2562
+ var TOP_LOGPROBS_MAX = 20;
2563
+ var LOGPROBS_SCHEMA = import_v417.z.array(
2564
+ import_v417.z.object({
2565
+ token: import_v417.z.string(),
2566
+ logprob: import_v417.z.number(),
2567
+ top_logprobs: import_v417.z.array(
2568
+ import_v417.z.object({
2569
+ token: import_v417.z.string(),
2570
+ logprob: import_v417.z.number()
2571
+ })
2572
+ )
2573
+ })
2574
+ );
2575
+ var OpenAIResponsesLanguageModel = class {
2576
+ constructor(modelId, config) {
2577
+ this.specificationVersion = "v2";
2578
+ this.supportedUrls = {
2579
+ "image/*": [/^https?:\/\/.*$/],
2580
+ "application/pdf": [/^https?:\/\/.*$/]
2581
+ };
2582
+ this.modelId = modelId;
2583
+ this.config = config;
2584
+ }
2585
+ get provider() {
2586
+ return this.config.provider;
2587
+ }
2588
+ async getArgs({
2589
+ maxOutputTokens,
2590
+ temperature,
2591
+ stopSequences,
2592
+ topP,
2593
+ topK,
2594
+ presencePenalty,
2595
+ frequencyPenalty,
2596
+ seed,
2597
+ prompt,
2598
+ providerOptions,
2599
+ tools,
2600
+ toolChoice,
2601
+ responseFormat
2602
+ }) {
2603
+ var _a, _b, _c, _d;
2604
+ const warnings = [];
2605
+ const modelConfig = getResponsesModelConfig(this.modelId);
2606
+ if (topK != null) {
2607
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2608
+ }
2609
+ if (seed != null) {
2610
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2611
+ }
2612
+ if (presencePenalty != null) {
2613
+ warnings.push({
2614
+ type: "unsupported-setting",
2615
+ setting: "presencePenalty"
2616
+ });
2617
+ }
2618
+ if (frequencyPenalty != null) {
2619
+ warnings.push({
2620
+ type: "unsupported-setting",
2621
+ setting: "frequencyPenalty"
2622
+ });
2623
+ }
2624
+ if (stopSequences != null) {
2625
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2626
+ }
2627
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2628
+ prompt,
2629
+ systemMessageMode: modelConfig.systemMessageMode,
2630
+ fileIdPrefixes: this.config.fileIdPrefixes
2631
+ });
2632
+ warnings.push(...messageWarnings);
2633
+ const openaiOptions = await (0, import_provider_utils15.parseProviderOptions)({
2634
+ provider: "openai",
2635
+ providerOptions,
2636
+ schema: openaiResponsesProviderOptionsSchema
2637
+ });
2638
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2639
+ let include = openaiOptions == null ? void 0 : openaiOptions.include;
2640
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2641
+ include = topLogprobs ? Array.isArray(include) ? [...include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : include;
2642
+ const webSearchToolName = (_b = tools == null ? void 0 : tools.find(
2643
+ (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
2644
+ )) == null ? void 0 : _b.name;
2645
+ include = webSearchToolName ? Array.isArray(include) ? [...include, "web_search_call.action.sources"] : ["web_search_call.action.sources"] : include;
2646
+ const codeInterpreterToolName = (_c = tools == null ? void 0 : tools.find(
2647
+ (tool) => tool.type === "provider-defined" && tool.id === "openai.code_interpreter"
2648
+ )) == null ? void 0 : _c.name;
2649
+ include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
2650
+ const baseArgs = {
2651
+ model: this.modelId,
2652
+ input: messages,
2653
+ temperature,
2654
+ top_p: topP,
2655
+ max_output_tokens: maxOutputTokens,
2656
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2657
+ text: {
2658
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2659
+ format: responseFormat.schema != null ? {
2660
+ type: "json_schema",
2661
+ strict: strictJsonSchema,
2662
+ name: (_d = responseFormat.name) != null ? _d : "response",
2663
+ description: responseFormat.description,
2664
+ schema: responseFormat.schema
2665
+ } : { type: "json_object" }
2666
+ },
2667
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2668
+ verbosity: openaiOptions.textVerbosity
2669
+ }
2670
+ }
2671
+ },
2672
+ // provider options:
2673
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2674
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2675
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2676
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2677
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2678
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2679
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2680
+ include,
2681
+ prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
2682
+ safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
2683
+ top_logprobs: topLogprobs,
2684
+ // model-specific settings:
2685
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2686
+ reasoning: {
2687
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2688
+ effort: openaiOptions.reasoningEffort
2689
+ },
2690
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2691
+ summary: openaiOptions.reasoningSummary
2692
+ }
2693
+ }
2694
+ },
2695
+ ...modelConfig.requiredAutoTruncation && {
2696
+ truncation: "auto"
2697
+ }
2698
+ };
2699
+ if (modelConfig.isReasoningModel) {
2700
+ if (baseArgs.temperature != null) {
2701
+ baseArgs.temperature = void 0;
2702
+ warnings.push({
2703
+ type: "unsupported-setting",
2704
+ setting: "temperature",
2705
+ details: "temperature is not supported for reasoning models"
2706
+ });
2707
+ }
2708
+ if (baseArgs.top_p != null) {
2709
+ baseArgs.top_p = void 0;
2710
+ warnings.push({
2711
+ type: "unsupported-setting",
2712
+ setting: "topP",
2713
+ details: "topP is not supported for reasoning models"
2714
+ });
2715
+ }
2716
+ } else {
2717
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2718
+ warnings.push({
2719
+ type: "unsupported-setting",
2720
+ setting: "reasoningEffort",
2721
+ details: "reasoningEffort is not supported for non-reasoning models"
2722
+ });
2723
+ }
2724
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2725
+ warnings.push({
2726
+ type: "unsupported-setting",
2727
+ setting: "reasoningSummary",
2728
+ details: "reasoningSummary is not supported for non-reasoning models"
2729
+ });
2730
+ }
2731
+ }
2732
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
2733
+ warnings.push({
2734
+ type: "unsupported-setting",
2735
+ setting: "serviceTier",
2736
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2737
+ });
2738
+ delete baseArgs.service_tier;
2739
+ }
2740
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
2741
+ warnings.push({
2742
+ type: "unsupported-setting",
2743
+ setting: "serviceTier",
2744
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2745
+ });
2746
+ delete baseArgs.service_tier;
2747
+ }
2748
+ const {
2749
+ tools: openaiTools,
2750
+ toolChoice: openaiToolChoice,
2751
+ toolWarnings
2752
+ } = prepareResponsesTools({
2753
+ tools,
2754
+ toolChoice,
2755
+ strictJsonSchema
2756
+ });
2757
+ return {
2758
+ webSearchToolName,
2759
+ args: {
2760
+ ...baseArgs,
2761
+ tools: openaiTools,
2762
+ tool_choice: openaiToolChoice
2763
+ },
2764
+ warnings: [...warnings, ...toolWarnings]
2765
+ };
2766
+ }
2767
+ async doGenerate(options) {
2768
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
2769
+ const {
2770
+ args: body,
2771
+ warnings,
2772
+ webSearchToolName
2773
+ } = await this.getArgs(options);
2774
+ const url = this.config.url({
2775
+ path: "/responses",
2776
+ modelId: this.modelId
2777
+ });
2778
+ const {
2779
+ responseHeaders,
2780
+ value: response,
2781
+ rawValue: rawResponse
2782
+ } = await (0, import_provider_utils15.postJsonToApi)({
2783
+ url,
2784
+ headers: (0, import_provider_utils15.combineHeaders)(this.config.headers(), options.headers),
2785
+ body,
2786
+ failedResponseHandler: openaiFailedResponseHandler,
2787
+ successfulResponseHandler: (0, import_provider_utils15.createJsonResponseHandler)(
2788
+ import_v417.z.object({
2789
+ id: import_v417.z.string(),
2790
+ created_at: import_v417.z.number(),
2791
+ error: import_v417.z.object({
2792
+ code: import_v417.z.string(),
2793
+ message: import_v417.z.string()
2794
+ }).nullish(),
2795
+ model: import_v417.z.string(),
2796
+ output: import_v417.z.array(
2797
+ import_v417.z.discriminatedUnion("type", [
2798
+ import_v417.z.object({
2799
+ type: import_v417.z.literal("message"),
2800
+ role: import_v417.z.literal("assistant"),
2801
+ id: import_v417.z.string(),
2802
+ content: import_v417.z.array(
2803
+ import_v417.z.object({
2804
+ type: import_v417.z.literal("output_text"),
2805
+ text: import_v417.z.string(),
2806
+ logprobs: LOGPROBS_SCHEMA.nullish(),
2807
+ annotations: import_v417.z.array(
2808
+ import_v417.z.discriminatedUnion("type", [
2809
+ import_v417.z.object({
2810
+ type: import_v417.z.literal("url_citation"),
2811
+ start_index: import_v417.z.number(),
2812
+ end_index: import_v417.z.number(),
2813
+ url: import_v417.z.string(),
2814
+ title: import_v417.z.string()
2815
+ }),
2816
+ import_v417.z.object({
2817
+ type: import_v417.z.literal("file_citation"),
2818
+ file_id: import_v417.z.string(),
2819
+ filename: import_v417.z.string().nullish(),
2820
+ index: import_v417.z.number().nullish(),
2821
+ start_index: import_v417.z.number().nullish(),
2822
+ end_index: import_v417.z.number().nullish(),
2823
+ quote: import_v417.z.string().nullish()
2824
+ }),
2825
+ import_v417.z.object({
2826
+ type: import_v417.z.literal("container_file_citation")
2827
+ })
2828
+ ])
2829
+ )
2830
+ })
2831
+ )
2832
+ }),
2833
+ codeInterpreterCallItem,
2834
+ import_v417.z.object({
2835
+ type: import_v417.z.literal("function_call"),
2836
+ call_id: import_v417.z.string(),
2837
+ name: import_v417.z.string(),
2838
+ arguments: import_v417.z.string(),
2839
+ id: import_v417.z.string()
2840
+ }),
2841
+ webSearchCallItem,
2842
+ import_v417.z.object({
2843
+ type: import_v417.z.literal("computer_call"),
2844
+ id: import_v417.z.string(),
2845
+ status: import_v417.z.string().optional()
2846
+ }),
2847
+ import_v417.z.object({
2848
+ type: import_v417.z.literal("file_search_call"),
2849
+ id: import_v417.z.string(),
2850
+ status: import_v417.z.string().optional(),
2851
+ queries: import_v417.z.array(import_v417.z.string()).nullish(),
2852
+ results: import_v417.z.array(
2853
+ import_v417.z.object({
2854
+ attributes: import_v417.z.object({
2855
+ file_id: import_v417.z.string(),
2856
+ filename: import_v417.z.string(),
2857
+ score: import_v417.z.number(),
2858
+ text: import_v417.z.string()
2859
+ })
2860
+ })
2861
+ ).nullish()
2862
+ }),
2863
+ import_v417.z.object({
2864
+ type: import_v417.z.literal("reasoning"),
2865
+ id: import_v417.z.string(),
2866
+ encrypted_content: import_v417.z.string().nullish(),
2867
+ summary: import_v417.z.array(
2868
+ import_v417.z.object({
2869
+ type: import_v417.z.literal("summary_text"),
2870
+ text: import_v417.z.string()
2871
+ })
2872
+ )
2873
+ })
2874
+ ])
2875
+ ),
2876
+ service_tier: import_v417.z.string().nullish(),
2877
+ incomplete_details: import_v417.z.object({ reason: import_v417.z.string() }).nullable(),
2878
+ usage: usageSchema2
2879
+ })
2880
+ ),
2881
+ abortSignal: options.abortSignal,
2882
+ fetch: this.config.fetch
2883
+ });
2884
+ if (response.error) {
2885
+ throw new import_provider8.APICallError({
2886
+ message: response.error.message,
2887
+ url,
2888
+ requestBodyValues: body,
2889
+ statusCode: 400,
2890
+ responseHeaders,
2891
+ responseBody: rawResponse,
2892
+ isRetryable: false
2893
+ });
2894
+ }
2895
+ const content = [];
2896
+ const logprobs = [];
2897
+ let hasFunctionCall = false;
2898
+ for (const part of response.output) {
2899
+ switch (part.type) {
2900
+ case "reasoning": {
2901
+ if (part.summary.length === 0) {
2902
+ part.summary.push({ type: "summary_text", text: "" });
2903
+ }
2904
+ for (const summary of part.summary) {
2905
+ content.push({
2906
+ type: "reasoning",
2907
+ text: summary.text,
2908
+ providerMetadata: {
2909
+ openai: {
2910
+ itemId: part.id,
2911
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2912
+ }
2913
+ }
2914
+ });
2915
+ }
2916
+ break;
2917
+ }
2918
+ case "message": {
2919
+ for (const contentPart of part.content) {
2920
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
2921
+ logprobs.push(contentPart.logprobs);
2922
+ }
2923
+ content.push({
2924
+ type: "text",
2925
+ text: contentPart.text,
2926
+ providerMetadata: {
2927
+ openai: {
2928
+ itemId: part.id
2929
+ }
2930
+ }
2931
+ });
2932
+ for (const annotation of contentPart.annotations) {
2933
+ if (annotation.type === "url_citation") {
2934
+ content.push({
2935
+ type: "source",
2936
+ sourceType: "url",
2937
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils15.generateId)(),
2938
+ url: annotation.url,
2939
+ title: annotation.title
2940
+ });
2941
+ } else if (annotation.type === "file_citation") {
2942
+ content.push({
2943
+ type: "source",
2944
+ sourceType: "document",
2945
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils15.generateId)(),
2946
+ mediaType: "text/plain",
2947
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
2948
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id
2949
+ });
2950
+ }
2951
+ }
2952
+ }
2953
+ break;
2954
+ }
2955
+ case "function_call": {
2956
+ hasFunctionCall = true;
2957
+ content.push({
2958
+ type: "tool-call",
2959
+ toolCallId: part.call_id,
2960
+ toolName: part.name,
2961
+ input: part.arguments,
2962
+ providerMetadata: {
2963
+ openai: {
2964
+ itemId: part.id
2965
+ }
2966
+ }
2967
+ });
2968
+ break;
2969
+ }
2970
+ case "web_search_call": {
2971
+ content.push({
2972
+ type: "tool-call",
2973
+ toolCallId: part.id,
2974
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
2975
+ input: JSON.stringify({ action: part.action }),
2976
+ providerExecuted: true
2977
+ });
2978
+ content.push({
2979
+ type: "tool-result",
2980
+ toolCallId: part.id,
2981
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
2982
+ result: { status: part.status },
2983
+ providerExecuted: true
2984
+ });
2985
+ break;
2986
+ }
2987
+ case "computer_call": {
2988
+ content.push({
2989
+ type: "tool-call",
2990
+ toolCallId: part.id,
2991
+ toolName: "computer_use",
2992
+ input: "",
2993
+ providerExecuted: true
2994
+ });
2995
+ content.push({
2996
+ type: "tool-result",
2997
+ toolCallId: part.id,
2998
+ toolName: "computer_use",
2999
+ result: {
3000
+ type: "computer_use_tool_result",
3001
+ status: part.status || "completed"
3002
+ },
3003
+ providerExecuted: true
3004
+ });
3005
+ break;
3006
+ }
3007
+ case "file_search_call": {
3008
+ content.push({
3009
+ type: "tool-call",
3010
+ toolCallId: part.id,
3011
+ toolName: "file_search",
3012
+ input: "",
3013
+ providerExecuted: true
3014
+ });
3015
+ content.push({
3016
+ type: "tool-result",
3017
+ toolCallId: part.id,
3018
+ toolName: "file_search",
3019
+ result: {
3020
+ type: "file_search_tool_result",
3021
+ status: part.status || "completed",
3022
+ ...part.queries && { queries: part.queries },
3023
+ ...part.results && { results: part.results }
3024
+ },
3025
+ providerExecuted: true
3026
+ });
3027
+ break;
3028
+ }
3029
+ case "code_interpreter_call": {
3030
+ content.push({
3031
+ type: "tool-call",
3032
+ toolCallId: part.id,
3033
+ toolName: "code_interpreter",
3034
+ input: JSON.stringify({
3035
+ code: part.code,
3036
+ containerId: part.container_id
3037
+ }),
3038
+ providerExecuted: true
3039
+ });
3040
+ content.push({
3041
+ type: "tool-result",
3042
+ toolCallId: part.id,
3043
+ toolName: "code_interpreter",
3044
+ result: {
3045
+ outputs: part.outputs
3046
+ },
3047
+ providerExecuted: true
3048
+ });
3049
+ break;
3050
+ }
3051
+ }
3052
+ }
3053
+ const providerMetadata = {
3054
+ openai: { responseId: response.id }
3055
+ };
3056
+ if (logprobs.length > 0) {
3057
+ providerMetadata.openai.logprobs = logprobs;
3058
+ }
3059
+ if (typeof response.service_tier === "string") {
3060
+ providerMetadata.openai.serviceTier = response.service_tier;
3061
+ }
3062
+ return {
3063
+ content,
3064
+ finishReason: mapOpenAIResponseFinishReason({
3065
+ finishReason: (_m = response.incomplete_details) == null ? void 0 : _m.reason,
3066
+ hasFunctionCall
3067
+ }),
3068
+ usage: {
3069
+ inputTokens: response.usage.input_tokens,
3070
+ outputTokens: response.usage.output_tokens,
3071
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3072
+ reasoningTokens: (_o = (_n = response.usage.output_tokens_details) == null ? void 0 : _n.reasoning_tokens) != null ? _o : void 0,
3073
+ cachedInputTokens: (_q = (_p = response.usage.input_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
3074
+ },
3075
+ request: { body },
3076
+ response: {
3077
+ id: response.id,
3078
+ timestamp: new Date(response.created_at * 1e3),
3079
+ modelId: response.model,
3080
+ headers: responseHeaders,
3081
+ body: rawResponse
3082
+ },
3083
+ providerMetadata,
3084
+ warnings
3085
+ };
3086
+ }
3087
+ async doStream(options) {
3088
+ const {
3089
+ args: body,
3090
+ warnings,
3091
+ webSearchToolName
3092
+ } = await this.getArgs(options);
3093
+ const { responseHeaders, value: response } = await (0, import_provider_utils15.postJsonToApi)({
3094
+ url: this.config.url({
3095
+ path: "/responses",
3096
+ modelId: this.modelId
3097
+ }),
3098
+ headers: (0, import_provider_utils15.combineHeaders)(this.config.headers(), options.headers),
3099
+ body: {
3100
+ ...body,
3101
+ stream: true
3102
+ },
3103
+ failedResponseHandler: openaiFailedResponseHandler,
3104
+ successfulResponseHandler: (0, import_provider_utils15.createEventSourceResponseHandler)(
3105
+ openaiResponsesChunkSchema
3106
+ ),
3107
+ abortSignal: options.abortSignal,
3108
+ fetch: this.config.fetch
3109
+ });
3110
+ const self = this;
3111
+ let finishReason = "unknown";
3112
+ const usage = {
3113
+ inputTokens: void 0,
3114
+ outputTokens: void 0,
3115
+ totalTokens: void 0
3116
+ };
3117
+ const logprobs = [];
3118
+ let responseId = null;
3119
+ const ongoingToolCalls = {};
3120
+ let hasFunctionCall = false;
3121
+ const activeReasoning = {};
3122
+ let serviceTier;
3123
+ return {
3124
+ stream: response.pipeThrough(
3125
+ new TransformStream({
3126
+ start(controller) {
3127
+ controller.enqueue({ type: "stream-start", warnings });
3128
+ },
3129
+ transform(chunk, controller) {
3130
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
3131
+ if (options.includeRawChunks) {
3132
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3133
+ }
3134
+ if (!chunk.success) {
3135
+ finishReason = "error";
3136
+ controller.enqueue({ type: "error", error: chunk.error });
3137
+ return;
3138
+ }
3139
+ const value = chunk.value;
3140
+ if (isResponseOutputItemAddedChunk(value)) {
3141
+ if (value.item.type === "function_call") {
3142
+ ongoingToolCalls[value.output_index] = {
3143
+ toolName: value.item.name,
3144
+ toolCallId: value.item.call_id
3145
+ };
3146
+ controller.enqueue({
3147
+ type: "tool-input-start",
3148
+ id: value.item.call_id,
3149
+ toolName: value.item.name
3150
+ });
3151
+ } else if (value.item.type === "web_search_call") {
3152
+ ongoingToolCalls[value.output_index] = {
3153
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3154
+ toolCallId: value.item.id
3155
+ };
3156
+ controller.enqueue({
3157
+ type: "tool-input-start",
3158
+ id: value.item.id,
3159
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search"
3160
+ });
3161
+ } else if (value.item.type === "computer_call") {
3162
+ ongoingToolCalls[value.output_index] = {
3163
+ toolName: "computer_use",
3164
+ toolCallId: value.item.id
3165
+ };
3166
+ controller.enqueue({
3167
+ type: "tool-input-start",
3168
+ id: value.item.id,
3169
+ toolName: "computer_use"
3170
+ });
3171
+ } else if (value.item.type === "file_search_call") {
3172
+ ongoingToolCalls[value.output_index] = {
3173
+ toolName: "file_search",
3174
+ toolCallId: value.item.id
3175
+ };
3176
+ controller.enqueue({
3177
+ type: "tool-input-start",
3178
+ id: value.item.id,
3179
+ toolName: "file_search"
3180
+ });
3181
+ } else if (value.item.type === "message") {
3182
+ controller.enqueue({
3183
+ type: "text-start",
3184
+ id: value.item.id,
3185
+ providerMetadata: {
3186
+ openai: {
3187
+ itemId: value.item.id
3188
+ }
3189
+ }
3190
+ });
3191
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
3192
+ activeReasoning[value.item.id] = {
3193
+ encryptedContent: value.item.encrypted_content,
3194
+ summaryParts: [0]
3195
+ };
3196
+ controller.enqueue({
3197
+ type: "reasoning-start",
3198
+ id: `${value.item.id}:0`,
3199
+ providerMetadata: {
3200
+ openai: {
3201
+ itemId: value.item.id,
3202
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
3203
+ }
3204
+ }
3205
+ });
3206
+ }
3207
+ } else if (isResponseOutputItemDoneChunk(value)) {
3208
+ if (value.item.type === "function_call") {
3209
+ ongoingToolCalls[value.output_index] = void 0;
3210
+ hasFunctionCall = true;
3211
+ controller.enqueue({
3212
+ type: "tool-input-end",
3213
+ id: value.item.call_id
3214
+ });
3215
+ controller.enqueue({
3216
+ type: "tool-call",
3217
+ toolCallId: value.item.call_id,
3218
+ toolName: value.item.name,
3219
+ input: value.item.arguments,
3220
+ providerMetadata: {
3221
+ openai: {
3222
+ itemId: value.item.id
3223
+ }
3224
+ }
3225
+ });
3226
+ } else if (value.item.type === "web_search_call") {
3227
+ ongoingToolCalls[value.output_index] = void 0;
3228
+ controller.enqueue({
3229
+ type: "tool-input-end",
3230
+ id: value.item.id
3231
+ });
3232
+ controller.enqueue({
3233
+ type: "tool-call",
3234
+ toolCallId: value.item.id,
3235
+ toolName: "web_search",
3236
+ input: JSON.stringify({ action: value.item.action }),
3237
+ providerExecuted: true
3238
+ });
3239
+ controller.enqueue({
3240
+ type: "tool-result",
3241
+ toolCallId: value.item.id,
3242
+ toolName: "web_search",
3243
+ result: { status: value.item.status },
3244
+ providerExecuted: true
3245
+ });
3246
+ } else if (value.item.type === "computer_call") {
3247
+ ongoingToolCalls[value.output_index] = void 0;
3248
+ controller.enqueue({
3249
+ type: "tool-input-end",
3250
+ id: value.item.id
3251
+ });
3252
+ controller.enqueue({
3253
+ type: "tool-call",
3254
+ toolCallId: value.item.id,
3255
+ toolName: "computer_use",
3256
+ input: "",
3257
+ providerExecuted: true
3258
+ });
3259
+ controller.enqueue({
3260
+ type: "tool-result",
3261
+ toolCallId: value.item.id,
3262
+ toolName: "computer_use",
3263
+ result: {
3264
+ type: "computer_use_tool_result",
3265
+ status: value.item.status || "completed"
3266
+ },
3267
+ providerExecuted: true
3268
+ });
3269
+ } else if (value.item.type === "file_search_call") {
3270
+ ongoingToolCalls[value.output_index] = void 0;
3271
+ controller.enqueue({
3272
+ type: "tool-input-end",
3273
+ id: value.item.id
3274
+ });
3275
+ controller.enqueue({
3276
+ type: "tool-call",
3277
+ toolCallId: value.item.id,
3278
+ toolName: "file_search",
3279
+ input: "",
3280
+ providerExecuted: true
3281
+ });
3282
+ controller.enqueue({
3283
+ type: "tool-result",
3284
+ toolCallId: value.item.id,
3285
+ toolName: "file_search",
3286
+ result: {
3287
+ type: "file_search_tool_result",
3288
+ status: value.item.status || "completed",
3289
+ ...value.item.queries && { queries: value.item.queries },
3290
+ ...value.item.results && { results: value.item.results }
3291
+ },
3292
+ providerExecuted: true
3293
+ });
3294
+ } else if (value.item.type === "code_interpreter_call") {
3295
+ controller.enqueue({
3296
+ type: "tool-call",
3297
+ toolCallId: value.item.id,
3298
+ toolName: "code_interpreter",
3299
+ input: JSON.stringify({
3300
+ code: value.item.code,
3301
+ containerId: value.item.container_id
3302
+ }),
3303
+ providerExecuted: true
3304
+ });
3305
+ controller.enqueue({
3306
+ type: "tool-result",
3307
+ toolCallId: value.item.id,
3308
+ toolName: "code_interpreter",
3309
+ result: {
3310
+ outputs: value.item.outputs
3311
+ },
3312
+ providerExecuted: true
3313
+ });
3314
+ } else if (value.item.type === "message") {
3315
+ controller.enqueue({
3316
+ type: "text-end",
3317
+ id: value.item.id
3318
+ });
3319
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
3320
+ const activeReasoningPart = activeReasoning[value.item.id];
3321
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
3322
+ controller.enqueue({
3323
+ type: "reasoning-end",
3324
+ id: `${value.item.id}:${summaryIndex}`,
3325
+ providerMetadata: {
3326
+ openai: {
3327
+ itemId: value.item.id,
3328
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
3329
+ }
3330
+ }
3331
+ });
3332
+ }
3333
+ delete activeReasoning[value.item.id];
3334
+ }
3335
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3336
+ const toolCall = ongoingToolCalls[value.output_index];
3337
+ if (toolCall != null) {
3338
+ controller.enqueue({
3339
+ type: "tool-input-delta",
3340
+ id: toolCall.toolCallId,
3341
+ delta: value.delta
3342
+ });
3343
+ }
3344
+ } else if (isResponseCreatedChunk(value)) {
3345
+ responseId = value.response.id;
3346
+ controller.enqueue({
3347
+ type: "response-metadata",
3348
+ id: value.response.id,
3349
+ timestamp: new Date(value.response.created_at * 1e3),
3350
+ modelId: value.response.model
3351
+ });
3352
+ } else if (isTextDeltaChunk(value)) {
3353
+ controller.enqueue({
3354
+ type: "text-delta",
3355
+ id: value.item_id,
3356
+ delta: value.delta
3357
+ });
3358
+ if (((_d = (_c = options.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.logprobs) && value.logprobs) {
3359
+ logprobs.push(value.logprobs);
3360
+ }
3361
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3362
+ if (value.summary_index > 0) {
3363
+ (_e = activeReasoning[value.item_id]) == null ? void 0 : _e.summaryParts.push(
3364
+ value.summary_index
3365
+ );
3366
+ controller.enqueue({
3367
+ type: "reasoning-start",
3368
+ id: `${value.item_id}:${value.summary_index}`,
3369
+ providerMetadata: {
3370
+ openai: {
3371
+ itemId: value.item_id,
3372
+ reasoningEncryptedContent: (_g = (_f = activeReasoning[value.item_id]) == null ? void 0 : _f.encryptedContent) != null ? _g : null
3373
+ }
3374
+ }
3375
+ });
3376
+ }
3377
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
3378
+ controller.enqueue({
3379
+ type: "reasoning-delta",
3380
+ id: `${value.item_id}:${value.summary_index}`,
3381
+ delta: value.delta,
3382
+ providerMetadata: {
3383
+ openai: {
3384
+ itemId: value.item_id
3385
+ }
3386
+ }
3387
+ });
3388
+ } else if (isResponseFinishedChunk(value)) {
3389
+ finishReason = mapOpenAIResponseFinishReason({
3390
+ finishReason: (_h = value.response.incomplete_details) == null ? void 0 : _h.reason,
3391
+ hasFunctionCall
3392
+ });
3393
+ usage.inputTokens = value.response.usage.input_tokens;
3394
+ usage.outputTokens = value.response.usage.output_tokens;
3395
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3396
+ usage.reasoningTokens = (_j = (_i = value.response.usage.output_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0;
3397
+ usage.cachedInputTokens = (_l = (_k = value.response.usage.input_tokens_details) == null ? void 0 : _k.cached_tokens) != null ? _l : void 0;
3398
+ if (typeof value.response.service_tier === "string") {
3399
+ serviceTier = value.response.service_tier;
3400
+ }
3401
+ } else if (isResponseAnnotationAddedChunk(value)) {
3402
+ if (value.annotation.type === "url_citation") {
3403
+ controller.enqueue({
3404
+ type: "source",
3405
+ sourceType: "url",
3406
+ id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : (0, import_provider_utils15.generateId)(),
3407
+ url: value.annotation.url,
3408
+ title: value.annotation.title
3409
+ });
3410
+ } else if (value.annotation.type === "file_citation") {
3411
+ controller.enqueue({
3412
+ type: "source",
3413
+ sourceType: "document",
3414
+ id: (_r = (_q = (_p = self.config).generateId) == null ? void 0 : _q.call(_p)) != null ? _r : (0, import_provider_utils15.generateId)(),
3415
+ mediaType: "text/plain",
3416
+ title: (_t = (_s = value.annotation.quote) != null ? _s : value.annotation.filename) != null ? _t : "Document",
3417
+ filename: (_u = value.annotation.filename) != null ? _u : value.annotation.file_id
3418
+ });
3419
+ }
3420
+ } else if (isErrorChunk(value)) {
3421
+ controller.enqueue({ type: "error", error: value });
3422
+ }
3423
+ },
3424
+ flush(controller) {
3425
+ const providerMetadata = {
3426
+ openai: {
3427
+ responseId
3428
+ }
3429
+ };
3430
+ if (logprobs.length > 0) {
3431
+ providerMetadata.openai.logprobs = logprobs;
3432
+ }
3433
+ if (serviceTier !== void 0) {
3434
+ providerMetadata.openai.serviceTier = serviceTier;
3435
+ }
3436
+ controller.enqueue({
3437
+ type: "finish",
3438
+ finishReason,
3439
+ usage,
3440
+ providerMetadata
3441
+ });
3442
+ }
3443
+ })
3444
+ ),
3445
+ request: { body },
3446
+ response: { headers: responseHeaders }
3447
+ };
3448
+ }
3449
+ };
3450
+ var usageSchema2 = import_v417.z.object({
3451
+ input_tokens: import_v417.z.number(),
3452
+ input_tokens_details: import_v417.z.object({ cached_tokens: import_v417.z.number().nullish() }).nullish(),
3453
+ output_tokens: import_v417.z.number(),
3454
+ output_tokens_details: import_v417.z.object({ reasoning_tokens: import_v417.z.number().nullish() }).nullish()
3455
+ });
3456
+ var textDeltaChunkSchema = import_v417.z.object({
3457
+ type: import_v417.z.literal("response.output_text.delta"),
3458
+ item_id: import_v417.z.string(),
3459
+ delta: import_v417.z.string(),
3460
+ logprobs: LOGPROBS_SCHEMA.nullish()
3461
+ });
3462
+ var errorChunkSchema = import_v417.z.object({
3463
+ type: import_v417.z.literal("error"),
3464
+ code: import_v417.z.string(),
3465
+ message: import_v417.z.string(),
3466
+ param: import_v417.z.string().nullish(),
3467
+ sequence_number: import_v417.z.number()
3468
+ });
3469
+ var responseFinishedChunkSchema = import_v417.z.object({
3470
+ type: import_v417.z.enum(["response.completed", "response.incomplete"]),
3471
+ response: import_v417.z.object({
3472
+ incomplete_details: import_v417.z.object({ reason: import_v417.z.string() }).nullish(),
3473
+ usage: usageSchema2,
3474
+ service_tier: import_v417.z.string().nullish()
3475
+ })
3476
+ });
3477
+ var responseCreatedChunkSchema = import_v417.z.object({
3478
+ type: import_v417.z.literal("response.created"),
3479
+ response: import_v417.z.object({
3480
+ id: import_v417.z.string(),
3481
+ created_at: import_v417.z.number(),
3482
+ model: import_v417.z.string(),
3483
+ service_tier: import_v417.z.string().nullish()
3484
+ })
3485
+ });
3486
+ var responseOutputItemAddedSchema = import_v417.z.object({
3487
+ type: import_v417.z.literal("response.output_item.added"),
3488
+ output_index: import_v417.z.number(),
3489
+ item: import_v417.z.discriminatedUnion("type", [
3490
+ import_v417.z.object({
3491
+ type: import_v417.z.literal("message"),
3492
+ id: import_v417.z.string()
3493
+ }),
3494
+ import_v417.z.object({
3495
+ type: import_v417.z.literal("reasoning"),
3496
+ id: import_v417.z.string(),
3497
+ encrypted_content: import_v417.z.string().nullish()
3498
+ }),
3499
+ import_v417.z.object({
3500
+ type: import_v417.z.literal("function_call"),
3501
+ id: import_v417.z.string(),
3502
+ call_id: import_v417.z.string(),
3503
+ name: import_v417.z.string(),
3504
+ arguments: import_v417.z.string()
3505
+ }),
3506
+ import_v417.z.object({
3507
+ type: import_v417.z.literal("web_search_call"),
3508
+ id: import_v417.z.string(),
3509
+ status: import_v417.z.string(),
3510
+ action: import_v417.z.object({
3511
+ type: import_v417.z.literal("search"),
3512
+ query: import_v417.z.string().optional()
3513
+ }).nullish()
3514
+ }),
3515
+ import_v417.z.object({
3516
+ type: import_v417.z.literal("computer_call"),
3517
+ id: import_v417.z.string(),
3518
+ status: import_v417.z.string()
3519
+ }),
3520
+ import_v417.z.object({
3521
+ type: import_v417.z.literal("file_search_call"),
3522
+ id: import_v417.z.string(),
3523
+ status: import_v417.z.string(),
3524
+ queries: import_v417.z.array(import_v417.z.string()).nullish(),
3525
+ results: import_v417.z.array(
3526
+ import_v417.z.object({
3527
+ attributes: import_v417.z.object({
3528
+ file_id: import_v417.z.string(),
3529
+ filename: import_v417.z.string(),
3530
+ score: import_v417.z.number(),
3531
+ text: import_v417.z.string()
3532
+ })
3533
+ })
3534
+ ).optional()
3535
+ })
3536
+ ])
3537
+ });
3538
+ var responseOutputItemDoneSchema = import_v417.z.object({
3539
+ type: import_v417.z.literal("response.output_item.done"),
3540
+ output_index: import_v417.z.number(),
3541
+ item: import_v417.z.discriminatedUnion("type", [
3542
+ import_v417.z.object({
3543
+ type: import_v417.z.literal("message"),
3544
+ id: import_v417.z.string()
3545
+ }),
3546
+ import_v417.z.object({
3547
+ type: import_v417.z.literal("reasoning"),
3548
+ id: import_v417.z.string(),
3549
+ encrypted_content: import_v417.z.string().nullish()
3550
+ }),
3551
+ import_v417.z.object({
3552
+ type: import_v417.z.literal("function_call"),
3553
+ id: import_v417.z.string(),
3554
+ call_id: import_v417.z.string(),
3555
+ name: import_v417.z.string(),
3556
+ arguments: import_v417.z.string(),
3557
+ status: import_v417.z.literal("completed")
3558
+ }),
3559
+ codeInterpreterCallItem,
3560
+ webSearchCallItem,
3561
+ import_v417.z.object({
3562
+ type: import_v417.z.literal("computer_call"),
3563
+ id: import_v417.z.string(),
3564
+ status: import_v417.z.literal("completed")
3565
+ }),
3566
+ import_v417.z.object({
3567
+ type: import_v417.z.literal("file_search_call"),
3568
+ id: import_v417.z.string(),
3569
+ status: import_v417.z.literal("completed"),
3570
+ queries: import_v417.z.array(import_v417.z.string()).nullish(),
3571
+ results: import_v417.z.array(
3572
+ import_v417.z.object({
3573
+ attributes: import_v417.z.object({
3574
+ file_id: import_v417.z.string(),
3575
+ filename: import_v417.z.string(),
3576
+ score: import_v417.z.number(),
3577
+ text: import_v417.z.string()
3578
+ })
3579
+ })
3580
+ ).nullish()
3581
+ })
3582
+ ])
3583
+ });
3584
+ var responseFunctionCallArgumentsDeltaSchema = import_v417.z.object({
3585
+ type: import_v417.z.literal("response.function_call_arguments.delta"),
3586
+ item_id: import_v417.z.string(),
3587
+ output_index: import_v417.z.number(),
3588
+ delta: import_v417.z.string()
3589
+ });
3590
+ var responseAnnotationAddedSchema = import_v417.z.object({
3591
+ type: import_v417.z.literal("response.output_text.annotation.added"),
3592
+ annotation: import_v417.z.discriminatedUnion("type", [
3593
+ import_v417.z.object({
3594
+ type: import_v417.z.literal("url_citation"),
3595
+ url: import_v417.z.string(),
3596
+ title: import_v417.z.string()
3597
+ }),
3598
+ import_v417.z.object({
3599
+ type: import_v417.z.literal("file_citation"),
3600
+ file_id: import_v417.z.string(),
3601
+ filename: import_v417.z.string().nullish(),
3602
+ index: import_v417.z.number().nullish(),
3603
+ start_index: import_v417.z.number().nullish(),
3604
+ end_index: import_v417.z.number().nullish(),
3605
+ quote: import_v417.z.string().nullish()
3606
+ })
3607
+ ])
3608
+ });
3609
+ var responseReasoningSummaryPartAddedSchema = import_v417.z.object({
3610
+ type: import_v417.z.literal("response.reasoning_summary_part.added"),
3611
+ item_id: import_v417.z.string(),
3612
+ summary_index: import_v417.z.number()
3613
+ });
3614
+ var responseReasoningSummaryTextDeltaSchema = import_v417.z.object({
3615
+ type: import_v417.z.literal("response.reasoning_summary_text.delta"),
3616
+ item_id: import_v417.z.string(),
3617
+ summary_index: import_v417.z.number(),
3618
+ delta: import_v417.z.string()
3619
+ });
3620
+ var openaiResponsesChunkSchema = import_v417.z.union([
3621
+ textDeltaChunkSchema,
3622
+ responseFinishedChunkSchema,
3623
+ responseCreatedChunkSchema,
3624
+ responseOutputItemAddedSchema,
3625
+ responseOutputItemDoneSchema,
3626
+ responseFunctionCallArgumentsDeltaSchema,
3627
+ responseAnnotationAddedSchema,
3628
+ responseReasoningSummaryPartAddedSchema,
3629
+ responseReasoningSummaryTextDeltaSchema,
3630
+ errorChunkSchema,
3631
+ import_v417.z.object({ type: import_v417.z.string() }).loose()
3632
+ // fallback for unknown chunks
3633
+ ]);
3634
+ function isTextDeltaChunk(chunk) {
3635
+ return chunk.type === "response.output_text.delta";
3636
+ }
3637
+ function isResponseOutputItemDoneChunk(chunk) {
3638
+ return chunk.type === "response.output_item.done";
3639
+ }
3640
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3641
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3642
+ }
3643
+ function isResponseFinishedChunk(chunk) {
3644
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3645
+ }
3646
+ function isResponseCreatedChunk(chunk) {
3647
+ return chunk.type === "response.created";
3648
+ }
3649
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3650
+ return chunk.type === "response.function_call_arguments.delta";
3651
+ }
3652
+ function isResponseOutputItemAddedChunk(chunk) {
3653
+ return chunk.type === "response.output_item.added";
3654
+ }
3655
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3656
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3657
+ }
3658
+ function isResponseAnnotationAddedChunk(chunk) {
3659
+ return chunk.type === "response.output_text.annotation.added";
3660
+ }
3661
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3662
+ return chunk.type === "response.reasoning_summary_part.added";
3663
+ }
3664
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3665
+ return chunk.type === "response.reasoning_summary_text.delta";
3666
+ }
3667
+ function isErrorChunk(chunk) {
3668
+ return chunk.type === "error";
3669
+ }
3670
+ function getResponsesModelConfig(modelId) {
3671
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3672
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3673
+ const defaults = {
3674
+ requiredAutoTruncation: false,
3675
+ systemMessageMode: "system",
3676
+ supportsFlexProcessing: supportsFlexProcessing2,
3677
+ supportsPriorityProcessing: supportsPriorityProcessing2
3678
+ };
3679
+ if (modelId.startsWith("gpt-5-chat")) {
3680
+ return {
3681
+ ...defaults,
3682
+ isReasoningModel: false
3683
+ };
3684
+ }
3685
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3686
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3687
+ return {
3688
+ ...defaults,
3689
+ isReasoningModel: true,
3690
+ systemMessageMode: "remove"
3691
+ };
3692
+ }
3693
+ return {
3694
+ ...defaults,
3695
+ isReasoningModel: true,
3696
+ systemMessageMode: "developer"
3697
+ };
3698
+ }
3699
+ return {
3700
+ ...defaults,
3701
+ isReasoningModel: false
3702
+ };
3703
+ }
3704
+ var openaiResponsesProviderOptionsSchema = import_v417.z.object({
3705
+ metadata: import_v417.z.any().nullish(),
3706
+ parallelToolCalls: import_v417.z.boolean().nullish(),
3707
+ previousResponseId: import_v417.z.string().nullish(),
3708
+ store: import_v417.z.boolean().nullish(),
3709
+ user: import_v417.z.string().nullish(),
3710
+ reasoningEffort: import_v417.z.string().nullish(),
3711
+ strictJsonSchema: import_v417.z.boolean().nullish(),
3712
+ instructions: import_v417.z.string().nullish(),
3713
+ reasoningSummary: import_v417.z.string().nullish(),
3714
+ serviceTier: import_v417.z.enum(["auto", "flex", "priority"]).nullish(),
3715
+ include: import_v417.z.array(
3716
+ import_v417.z.enum([
3717
+ "reasoning.encrypted_content",
3718
+ "file_search_call.results",
3719
+ "message.output_text.logprobs"
3720
+ ])
3721
+ ).nullish(),
3722
+ textVerbosity: import_v417.z.enum(["low", "medium", "high"]).nullish(),
3723
+ promptCacheKey: import_v417.z.string().nullish(),
3724
+ safetyIdentifier: import_v417.z.string().nullish(),
3725
+ /**
3726
+ * Return the log probabilities of the tokens.
3727
+ *
3728
+ * Setting to true will return the log probabilities of the tokens that
3729
+ * were generated.
3730
+ *
3731
+ * Setting to a number will return the log probabilities of the top n
3732
+ * tokens that were generated.
3733
+ *
3734
+ * @see https://platform.openai.com/docs/api-reference/responses/create
3735
+ * @see https://cookbook.openai.com/examples/using_logprobs
3736
+ */
3737
+ logprobs: import_v417.z.union([import_v417.z.boolean(), import_v417.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3738
+ });
3739
+ // Annotate the CommonJS export names for ESM import in node:
3740
+ 0 && (module.exports = {
3741
+ OpenAIChatLanguageModel,
3742
+ OpenAICompletionLanguageModel,
3743
+ OpenAIEmbeddingModel,
3744
+ OpenAIImageModel,
3745
+ OpenAIResponsesLanguageModel,
3746
+ OpenAISpeechModel,
3747
+ OpenAITranscriptionModel,
3748
+ hasDefaultResponseFormat,
3749
+ modelMaxImagesPerCall,
3750
+ openAITranscriptionProviderOptions,
3751
+ openaiCompletionProviderOptions,
3752
+ openaiEmbeddingProviderOptions,
3753
+ openaiProviderOptions
3754
+ });
3755
+ //# sourceMappingURL=index.js.map