@ai-sdk/openai 0.0.0-013d7476-20250808163325

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3286 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/internal/index.ts
21
+ var internal_exports = {};
22
+ __export(internal_exports, {
23
+ OpenAIChatLanguageModel: () => OpenAIChatLanguageModel,
24
+ OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
+ OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
+ OpenAIImageModel: () => OpenAIImageModel,
27
+ OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
+ OpenAISpeechModel: () => OpenAISpeechModel,
29
+ OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
+ openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
33
+ openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
34
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
35
+ openaiProviderOptions: () => openaiProviderOptions
36
+ });
37
+ module.exports = __toCommonJS(internal_exports);
38
+
39
+ // src/openai-chat-language-model.ts
40
+ var import_provider3 = require("@ai-sdk/provider");
41
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
42
+ var import_v45 = require("zod/v4");
43
+
44
+ // src/convert-to-openai-chat-messages.ts
45
+ var import_provider = require("@ai-sdk/provider");
46
+ var import_provider_utils = require("@ai-sdk/provider-utils");
47
+ function convertToOpenAIChatMessages({
48
+ prompt,
49
+ systemMessageMode = "system"
50
+ }) {
51
+ const messages = [];
52
+ const warnings = [];
53
+ for (const { role, content } of prompt) {
54
+ switch (role) {
55
+ case "system": {
56
+ switch (systemMessageMode) {
57
+ case "system": {
58
+ messages.push({ role: "system", content });
59
+ break;
60
+ }
61
+ case "developer": {
62
+ messages.push({ role: "developer", content });
63
+ break;
64
+ }
65
+ case "remove": {
66
+ warnings.push({
67
+ type: "other",
68
+ message: "system messages are removed for this model"
69
+ });
70
+ break;
71
+ }
72
+ default: {
73
+ const _exhaustiveCheck = systemMessageMode;
74
+ throw new Error(
75
+ `Unsupported system message mode: ${_exhaustiveCheck}`
76
+ );
77
+ }
78
+ }
79
+ break;
80
+ }
81
+ case "user": {
82
+ if (content.length === 1 && content[0].type === "text") {
83
+ messages.push({ role: "user", content: content[0].text });
84
+ break;
85
+ }
86
+ messages.push({
87
+ role: "user",
88
+ content: content.map((part, index) => {
89
+ var _a, _b, _c;
90
+ switch (part.type) {
91
+ case "text": {
92
+ return { type: "text", text: part.text };
93
+ }
94
+ case "file": {
95
+ if (part.mediaType.startsWith("image/")) {
96
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
97
+ return {
98
+ type: "image_url",
99
+ image_url: {
100
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
101
+ // OpenAI specific extension: image detail
102
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
103
+ }
104
+ };
105
+ } else if (part.mediaType.startsWith("audio/")) {
106
+ if (part.data instanceof URL) {
107
+ throw new import_provider.UnsupportedFunctionalityError({
108
+ functionality: "audio file parts with URLs"
109
+ });
110
+ }
111
+ switch (part.mediaType) {
112
+ case "audio/wav": {
113
+ return {
114
+ type: "input_audio",
115
+ input_audio: {
116
+ data: (0, import_provider_utils.convertToBase64)(part.data),
117
+ format: "wav"
118
+ }
119
+ };
120
+ }
121
+ case "audio/mp3":
122
+ case "audio/mpeg": {
123
+ return {
124
+ type: "input_audio",
125
+ input_audio: {
126
+ data: (0, import_provider_utils.convertToBase64)(part.data),
127
+ format: "mp3"
128
+ }
129
+ };
130
+ }
131
+ default: {
132
+ throw new import_provider.UnsupportedFunctionalityError({
133
+ functionality: `audio content parts with media type ${part.mediaType}`
134
+ });
135
+ }
136
+ }
137
+ } else if (part.mediaType === "application/pdf") {
138
+ if (part.data instanceof URL) {
139
+ throw new import_provider.UnsupportedFunctionalityError({
140
+ functionality: "PDF file parts with URLs"
141
+ });
142
+ }
143
+ return {
144
+ type: "file",
145
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
146
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
147
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
148
+ }
149
+ };
150
+ } else {
151
+ throw new import_provider.UnsupportedFunctionalityError({
152
+ functionality: `file part media type ${part.mediaType}`
153
+ });
154
+ }
155
+ }
156
+ }
157
+ })
158
+ });
159
+ break;
160
+ }
161
+ case "assistant": {
162
+ let text = "";
163
+ const toolCalls = [];
164
+ for (const part of content) {
165
+ switch (part.type) {
166
+ case "text": {
167
+ text += part.text;
168
+ break;
169
+ }
170
+ case "tool-call": {
171
+ toolCalls.push({
172
+ id: part.toolCallId,
173
+ type: "function",
174
+ function: {
175
+ name: part.toolName,
176
+ arguments: JSON.stringify(part.input)
177
+ }
178
+ });
179
+ break;
180
+ }
181
+ }
182
+ }
183
+ messages.push({
184
+ role: "assistant",
185
+ content: text,
186
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
187
+ });
188
+ break;
189
+ }
190
+ case "tool": {
191
+ for (const toolResponse of content) {
192
+ const output = toolResponse.output;
193
+ let contentValue;
194
+ switch (output.type) {
195
+ case "text":
196
+ case "error-text":
197
+ contentValue = output.value;
198
+ break;
199
+ case "content":
200
+ case "json":
201
+ case "error-json":
202
+ contentValue = JSON.stringify(output.value);
203
+ break;
204
+ }
205
+ messages.push({
206
+ role: "tool",
207
+ tool_call_id: toolResponse.toolCallId,
208
+ content: contentValue
209
+ });
210
+ }
211
+ break;
212
+ }
213
+ default: {
214
+ const _exhaustiveCheck = role;
215
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
216
+ }
217
+ }
218
+ }
219
+ return { messages, warnings };
220
+ }
221
+
222
+ // src/get-response-metadata.ts
223
+ function getResponseMetadata({
224
+ id,
225
+ model,
226
+ created
227
+ }) {
228
+ return {
229
+ id: id != null ? id : void 0,
230
+ modelId: model != null ? model : void 0,
231
+ timestamp: created != null ? new Date(created * 1e3) : void 0
232
+ };
233
+ }
234
+
235
+ // src/map-openai-finish-reason.ts
236
+ function mapOpenAIFinishReason(finishReason) {
237
+ switch (finishReason) {
238
+ case "stop":
239
+ return "stop";
240
+ case "length":
241
+ return "length";
242
+ case "content_filter":
243
+ return "content-filter";
244
+ case "function_call":
245
+ case "tool_calls":
246
+ return "tool-calls";
247
+ default:
248
+ return "unknown";
249
+ }
250
+ }
251
+
252
+ // src/openai-chat-options.ts
253
+ var import_v4 = require("zod/v4");
254
+ var openaiProviderOptions = import_v4.z.object({
255
+ /**
256
+ * Modify the likelihood of specified tokens appearing in the completion.
257
+ *
258
+ * Accepts a JSON object that maps tokens (specified by their token ID in
259
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
260
+ */
261
+ logitBias: import_v4.z.record(import_v4.z.coerce.number(), import_v4.z.number()).optional(),
262
+ /**
263
+ * Return the log probabilities of the tokens.
264
+ *
265
+ * Setting to true will return the log probabilities of the tokens that
266
+ * were generated.
267
+ *
268
+ * Setting to a number will return the log probabilities of the top n
269
+ * tokens that were generated.
270
+ */
271
+ logprobs: import_v4.z.union([import_v4.z.boolean(), import_v4.z.number()]).optional(),
272
+ /**
273
+ * Whether to enable parallel function calling during tool use. Default to true.
274
+ */
275
+ parallelToolCalls: import_v4.z.boolean().optional(),
276
+ /**
277
+ * A unique identifier representing your end-user, which can help OpenAI to
278
+ * monitor and detect abuse.
279
+ */
280
+ user: import_v4.z.string().optional(),
281
+ /**
282
+ * Reasoning effort for reasoning models. Defaults to `medium`.
283
+ */
284
+ reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
285
+ /**
286
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
287
+ */
288
+ maxCompletionTokens: import_v4.z.number().optional(),
289
+ /**
290
+ * Whether to enable persistence in responses API.
291
+ */
292
+ store: import_v4.z.boolean().optional(),
293
+ /**
294
+ * Metadata to associate with the request.
295
+ */
296
+ metadata: import_v4.z.record(import_v4.z.string().max(64), import_v4.z.string().max(512)).optional(),
297
+ /**
298
+ * Parameters for prediction mode.
299
+ */
300
+ prediction: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
301
+ /**
302
+ * Whether to use structured outputs.
303
+ *
304
+ * @default true
305
+ */
306
+ structuredOutputs: import_v4.z.boolean().optional(),
307
+ /**
308
+ * Service tier for the request.
309
+ * - 'auto': Default service tier
310
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
311
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
312
+ *
313
+ * @default 'auto'
314
+ */
315
+ serviceTier: import_v4.z.enum(["auto", "flex", "priority"]).optional(),
316
+ /**
317
+ * Whether to use strict JSON schema validation.
318
+ *
319
+ * @default false
320
+ */
321
+ strictJsonSchema: import_v4.z.boolean().optional()
322
+ });
323
+
324
+ // src/openai-error.ts
325
+ var import_v42 = require("zod/v4");
326
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
327
+ var openaiErrorDataSchema = import_v42.z.object({
328
+ error: import_v42.z.object({
329
+ message: import_v42.z.string(),
330
+ // The additional information below is handled loosely to support
331
+ // OpenAI-compatible providers that have slightly different error
332
+ // responses:
333
+ type: import_v42.z.string().nullish(),
334
+ param: import_v42.z.any().nullish(),
335
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
336
+ })
337
+ });
338
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
339
+ errorSchema: openaiErrorDataSchema,
340
+ errorToMessage: (data) => data.error.message
341
+ });
342
+
343
+ // src/openai-prepare-tools.ts
344
+ var import_provider2 = require("@ai-sdk/provider");
345
+
346
+ // src/tool/file-search.ts
347
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
348
+ var import_v43 = require("zod/v4");
349
+ var comparisonFilterSchema = import_v43.z.object({
350
+ key: import_v43.z.string(),
351
+ type: import_v43.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
352
+ value: import_v43.z.union([import_v43.z.string(), import_v43.z.number(), import_v43.z.boolean()])
353
+ });
354
+ var compoundFilterSchema = import_v43.z.object({
355
+ type: import_v43.z.enum(["and", "or"]),
356
+ filters: import_v43.z.array(
357
+ import_v43.z.union([comparisonFilterSchema, import_v43.z.lazy(() => compoundFilterSchema)])
358
+ )
359
+ });
360
+ var filtersSchema = import_v43.z.union([comparisonFilterSchema, compoundFilterSchema]);
361
+ var fileSearchArgsSchema = import_v43.z.object({
362
+ /**
363
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
364
+ */
365
+ vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
366
+ /**
367
+ * Maximum number of search results to return. Defaults to 10.
368
+ */
369
+ maxNumResults: import_v43.z.number().optional(),
370
+ /**
371
+ * Ranking options for the search.
372
+ */
373
+ ranking: import_v43.z.object({
374
+ ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
375
+ }).optional(),
376
+ /**
377
+ * A filter to apply based on file attributes.
378
+ */
379
+ filters: filtersSchema.optional()
380
+ });
381
+ var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
382
+ id: "openai.file_search",
383
+ name: "file_search",
384
+ inputSchema: import_v43.z.object({
385
+ query: import_v43.z.string()
386
+ })
387
+ });
388
+
389
+ // src/tool/web-search-preview.ts
390
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
391
+ var import_v44 = require("zod/v4");
392
+ var webSearchPreviewArgsSchema = import_v44.z.object({
393
+ /**
394
+ * Search context size to use for the web search.
395
+ * - high: Most comprehensive context, highest cost, slower response
396
+ * - medium: Balanced context, cost, and latency (default)
397
+ * - low: Least context, lowest cost, fastest response
398
+ */
399
+ searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
400
+ /**
401
+ * User location information to provide geographically relevant search results.
402
+ */
403
+ userLocation: import_v44.z.object({
404
+ /**
405
+ * Type of location (always 'approximate')
406
+ */
407
+ type: import_v44.z.literal("approximate"),
408
+ /**
409
+ * Two-letter ISO country code (e.g., 'US', 'GB')
410
+ */
411
+ country: import_v44.z.string().optional(),
412
+ /**
413
+ * City name (free text, e.g., 'Minneapolis')
414
+ */
415
+ city: import_v44.z.string().optional(),
416
+ /**
417
+ * Region name (free text, e.g., 'Minnesota')
418
+ */
419
+ region: import_v44.z.string().optional(),
420
+ /**
421
+ * IANA timezone (e.g., 'America/Chicago')
422
+ */
423
+ timezone: import_v44.z.string().optional()
424
+ }).optional()
425
+ });
426
+ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
427
+ id: "openai.web_search_preview",
428
+ name: "web_search_preview",
429
+ inputSchema: import_v44.z.object({})
430
+ });
431
+
432
+ // src/openai-prepare-tools.ts
433
+ function prepareTools({
434
+ tools,
435
+ toolChoice,
436
+ structuredOutputs,
437
+ strictJsonSchema
438
+ }) {
439
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
440
+ const toolWarnings = [];
441
+ if (tools == null) {
442
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
443
+ }
444
+ const openaiTools = [];
445
+ for (const tool of tools) {
446
+ switch (tool.type) {
447
+ case "function":
448
+ openaiTools.push({
449
+ type: "function",
450
+ function: {
451
+ name: tool.name,
452
+ description: tool.description,
453
+ parameters: tool.inputSchema,
454
+ strict: structuredOutputs ? strictJsonSchema : void 0
455
+ }
456
+ });
457
+ break;
458
+ case "provider-defined":
459
+ switch (tool.id) {
460
+ case "openai.file_search": {
461
+ const args = fileSearchArgsSchema.parse(tool.args);
462
+ openaiTools.push({
463
+ type: "file_search",
464
+ vector_store_ids: args.vectorStoreIds,
465
+ max_num_results: args.maxNumResults,
466
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
467
+ filters: args.filters
468
+ });
469
+ break;
470
+ }
471
+ case "openai.web_search_preview": {
472
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
473
+ openaiTools.push({
474
+ type: "web_search_preview",
475
+ search_context_size: args.searchContextSize,
476
+ user_location: args.userLocation
477
+ });
478
+ break;
479
+ }
480
+ default:
481
+ toolWarnings.push({ type: "unsupported-tool", tool });
482
+ break;
483
+ }
484
+ break;
485
+ default:
486
+ toolWarnings.push({ type: "unsupported-tool", tool });
487
+ break;
488
+ }
489
+ }
490
+ if (toolChoice == null) {
491
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
492
+ }
493
+ const type = toolChoice.type;
494
+ switch (type) {
495
+ case "auto":
496
+ case "none":
497
+ case "required":
498
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
499
+ case "tool":
500
+ return {
501
+ tools: openaiTools,
502
+ toolChoice: {
503
+ type: "function",
504
+ function: {
505
+ name: toolChoice.toolName
506
+ }
507
+ },
508
+ toolWarnings
509
+ };
510
+ default: {
511
+ const _exhaustiveCheck = type;
512
+ throw new import_provider2.UnsupportedFunctionalityError({
513
+ functionality: `tool choice type: ${_exhaustiveCheck}`
514
+ });
515
+ }
516
+ }
517
+ }
518
+
519
+ // src/openai-chat-language-model.ts
520
+ var OpenAIChatLanguageModel = class {
521
+ constructor(modelId, config) {
522
+ this.specificationVersion = "v2";
523
+ this.supportedUrls = {
524
+ "image/*": [/^https?:\/\/.*$/]
525
+ };
526
+ this.modelId = modelId;
527
+ this.config = config;
528
+ }
529
+ get provider() {
530
+ return this.config.provider;
531
+ }
532
+ async getArgs({
533
+ prompt,
534
+ maxOutputTokens,
535
+ temperature,
536
+ topP,
537
+ topK,
538
+ frequencyPenalty,
539
+ presencePenalty,
540
+ stopSequences,
541
+ responseFormat,
542
+ seed,
543
+ tools,
544
+ toolChoice,
545
+ providerOptions
546
+ }) {
547
+ var _a, _b, _c, _d;
548
+ const warnings = [];
549
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
550
+ provider: "openai",
551
+ providerOptions,
552
+ schema: openaiProviderOptions
553
+ })) != null ? _a : {};
554
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
555
+ if (topK != null) {
556
+ warnings.push({
557
+ type: "unsupported-setting",
558
+ setting: "topK"
559
+ });
560
+ }
561
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
562
+ warnings.push({
563
+ type: "unsupported-setting",
564
+ setting: "responseFormat",
565
+ details: "JSON response format schema is only supported with structuredOutputs"
566
+ });
567
+ }
568
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
569
+ {
570
+ prompt,
571
+ systemMessageMode: getSystemMessageMode(this.modelId)
572
+ }
573
+ );
574
+ warnings.push(...messageWarnings);
575
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
576
+ const baseArgs = {
577
+ // model id:
578
+ model: this.modelId,
579
+ // model specific settings:
580
+ logit_bias: openaiOptions.logitBias,
581
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
582
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
583
+ user: openaiOptions.user,
584
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
585
+ // standardized settings:
586
+ max_tokens: maxOutputTokens,
587
+ temperature,
588
+ top_p: topP,
589
+ frequency_penalty: frequencyPenalty,
590
+ presence_penalty: presencePenalty,
591
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
592
+ type: "json_schema",
593
+ json_schema: {
594
+ schema: responseFormat.schema,
595
+ strict: strictJsonSchema,
596
+ name: (_d = responseFormat.name) != null ? _d : "response",
597
+ description: responseFormat.description
598
+ }
599
+ } : { type: "json_object" } : void 0,
600
+ stop: stopSequences,
601
+ seed,
602
+ // openai specific settings:
603
+ // TODO remove in next major version; we auto-map maxOutputTokens now
604
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
605
+ store: openaiOptions.store,
606
+ metadata: openaiOptions.metadata,
607
+ prediction: openaiOptions.prediction,
608
+ reasoning_effort: openaiOptions.reasoningEffort,
609
+ service_tier: openaiOptions.serviceTier,
610
+ // messages:
611
+ messages
612
+ };
613
+ if (isReasoningModel(this.modelId)) {
614
+ if (baseArgs.temperature != null) {
615
+ baseArgs.temperature = void 0;
616
+ warnings.push({
617
+ type: "unsupported-setting",
618
+ setting: "temperature",
619
+ details: "temperature is not supported for reasoning models"
620
+ });
621
+ }
622
+ if (baseArgs.top_p != null) {
623
+ baseArgs.top_p = void 0;
624
+ warnings.push({
625
+ type: "unsupported-setting",
626
+ setting: "topP",
627
+ details: "topP is not supported for reasoning models"
628
+ });
629
+ }
630
+ if (baseArgs.frequency_penalty != null) {
631
+ baseArgs.frequency_penalty = void 0;
632
+ warnings.push({
633
+ type: "unsupported-setting",
634
+ setting: "frequencyPenalty",
635
+ details: "frequencyPenalty is not supported for reasoning models"
636
+ });
637
+ }
638
+ if (baseArgs.presence_penalty != null) {
639
+ baseArgs.presence_penalty = void 0;
640
+ warnings.push({
641
+ type: "unsupported-setting",
642
+ setting: "presencePenalty",
643
+ details: "presencePenalty is not supported for reasoning models"
644
+ });
645
+ }
646
+ if (baseArgs.logit_bias != null) {
647
+ baseArgs.logit_bias = void 0;
648
+ warnings.push({
649
+ type: "other",
650
+ message: "logitBias is not supported for reasoning models"
651
+ });
652
+ }
653
+ if (baseArgs.logprobs != null) {
654
+ baseArgs.logprobs = void 0;
655
+ warnings.push({
656
+ type: "other",
657
+ message: "logprobs is not supported for reasoning models"
658
+ });
659
+ }
660
+ if (baseArgs.top_logprobs != null) {
661
+ baseArgs.top_logprobs = void 0;
662
+ warnings.push({
663
+ type: "other",
664
+ message: "topLogprobs is not supported for reasoning models"
665
+ });
666
+ }
667
+ if (baseArgs.max_tokens != null) {
668
+ if (baseArgs.max_completion_tokens == null) {
669
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
670
+ }
671
+ baseArgs.max_tokens = void 0;
672
+ }
673
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
674
+ if (baseArgs.temperature != null) {
675
+ baseArgs.temperature = void 0;
676
+ warnings.push({
677
+ type: "unsupported-setting",
678
+ setting: "temperature",
679
+ details: "temperature is not supported for the search preview models and has been removed."
680
+ });
681
+ }
682
+ }
683
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
684
+ warnings.push({
685
+ type: "unsupported-setting",
686
+ setting: "serviceTier",
687
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
688
+ });
689
+ baseArgs.service_tier = void 0;
690
+ }
691
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
692
+ warnings.push({
693
+ type: "unsupported-setting",
694
+ setting: "serviceTier",
695
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
696
+ });
697
+ baseArgs.service_tier = void 0;
698
+ }
699
+ const {
700
+ tools: openaiTools,
701
+ toolChoice: openaiToolChoice,
702
+ toolWarnings
703
+ } = prepareTools({
704
+ tools,
705
+ toolChoice,
706
+ structuredOutputs,
707
+ strictJsonSchema
708
+ });
709
+ return {
710
+ args: {
711
+ ...baseArgs,
712
+ tools: openaiTools,
713
+ tool_choice: openaiToolChoice
714
+ },
715
+ warnings: [...warnings, ...toolWarnings]
716
+ };
717
+ }
718
+ async doGenerate(options) {
719
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
720
+ const { args: body, warnings } = await this.getArgs(options);
721
+ const {
722
+ responseHeaders,
723
+ value: response,
724
+ rawValue: rawResponse
725
+ } = await (0, import_provider_utils5.postJsonToApi)({
726
+ url: this.config.url({
727
+ path: "/chat/completions",
728
+ modelId: this.modelId
729
+ }),
730
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
731
+ body,
732
+ failedResponseHandler: openaiFailedResponseHandler,
733
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
734
+ openaiChatResponseSchema
735
+ ),
736
+ abortSignal: options.abortSignal,
737
+ fetch: this.config.fetch
738
+ });
739
+ const choice = response.choices[0];
740
+ const content = [];
741
+ const text = choice.message.content;
742
+ if (text != null && text.length > 0) {
743
+ content.push({ type: "text", text });
744
+ }
745
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
746
+ content.push({
747
+ type: "tool-call",
748
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
749
+ toolName: toolCall.function.name,
750
+ input: toolCall.function.arguments
751
+ });
752
+ }
753
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
754
+ content.push({
755
+ type: "source",
756
+ sourceType: "url",
757
+ id: (0, import_provider_utils5.generateId)(),
758
+ url: annotation.url,
759
+ title: annotation.title
760
+ });
761
+ }
762
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
763
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
764
+ const providerMetadata = { openai: {} };
765
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
766
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
767
+ }
768
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
769
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
770
+ }
771
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
772
+ providerMetadata.openai.logprobs = choice.logprobs.content;
773
+ }
774
+ return {
775
+ content,
776
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
777
+ usage: {
778
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
779
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
780
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
781
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
782
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
783
+ },
784
+ request: { body },
785
+ response: {
786
+ ...getResponseMetadata(response),
787
+ headers: responseHeaders,
788
+ body: rawResponse
789
+ },
790
+ warnings,
791
+ providerMetadata
792
+ };
793
+ }
794
+ async doStream(options) {
795
+ const { args, warnings } = await this.getArgs(options);
796
+ const body = {
797
+ ...args,
798
+ stream: true,
799
+ stream_options: {
800
+ include_usage: true
801
+ }
802
+ };
803
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
804
+ url: this.config.url({
805
+ path: "/chat/completions",
806
+ modelId: this.modelId
807
+ }),
808
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
809
+ body,
810
+ failedResponseHandler: openaiFailedResponseHandler,
811
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
812
+ openaiChatChunkSchema
813
+ ),
814
+ abortSignal: options.abortSignal,
815
+ fetch: this.config.fetch
816
+ });
817
+ const toolCalls = [];
818
+ let finishReason = "unknown";
819
+ const usage = {
820
+ inputTokens: void 0,
821
+ outputTokens: void 0,
822
+ totalTokens: void 0
823
+ };
824
+ let isFirstChunk = true;
825
+ let isActiveText = false;
826
+ const providerMetadata = { openai: {} };
827
+ return {
828
+ stream: response.pipeThrough(
829
+ new TransformStream({
830
+ start(controller) {
831
+ controller.enqueue({ type: "stream-start", warnings });
832
+ },
833
+ transform(chunk, controller) {
834
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
835
+ if (options.includeRawChunks) {
836
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
837
+ }
838
+ if (!chunk.success) {
839
+ finishReason = "error";
840
+ controller.enqueue({ type: "error", error: chunk.error });
841
+ return;
842
+ }
843
+ const value = chunk.value;
844
+ if ("error" in value) {
845
+ finishReason = "error";
846
+ controller.enqueue({ type: "error", error: value.error });
847
+ return;
848
+ }
849
+ if (isFirstChunk) {
850
+ isFirstChunk = false;
851
+ controller.enqueue({
852
+ type: "response-metadata",
853
+ ...getResponseMetadata(value)
854
+ });
855
+ }
856
+ if (value.usage != null) {
857
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
858
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
859
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
860
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
861
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
862
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
863
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
864
+ }
865
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
866
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
867
+ }
868
+ }
869
+ const choice = value.choices[0];
870
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
871
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
872
+ }
873
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
874
+ providerMetadata.openai.logprobs = choice.logprobs.content;
875
+ }
876
+ if ((choice == null ? void 0 : choice.delta) == null) {
877
+ return;
878
+ }
879
+ const delta = choice.delta;
880
+ if (delta.content != null) {
881
+ if (!isActiveText) {
882
+ controller.enqueue({ type: "text-start", id: "0" });
883
+ isActiveText = true;
884
+ }
885
+ controller.enqueue({
886
+ type: "text-delta",
887
+ id: "0",
888
+ delta: delta.content
889
+ });
890
+ }
891
+ if (delta.tool_calls != null) {
892
+ for (const toolCallDelta of delta.tool_calls) {
893
+ const index = toolCallDelta.index;
894
+ if (toolCalls[index] == null) {
895
+ if (toolCallDelta.type !== "function") {
896
+ throw new import_provider3.InvalidResponseDataError({
897
+ data: toolCallDelta,
898
+ message: `Expected 'function' type.`
899
+ });
900
+ }
901
+ if (toolCallDelta.id == null) {
902
+ throw new import_provider3.InvalidResponseDataError({
903
+ data: toolCallDelta,
904
+ message: `Expected 'id' to be a string.`
905
+ });
906
+ }
907
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
908
+ throw new import_provider3.InvalidResponseDataError({
909
+ data: toolCallDelta,
910
+ message: `Expected 'function.name' to be a string.`
911
+ });
912
+ }
913
+ controller.enqueue({
914
+ type: "tool-input-start",
915
+ id: toolCallDelta.id,
916
+ toolName: toolCallDelta.function.name
917
+ });
918
+ toolCalls[index] = {
919
+ id: toolCallDelta.id,
920
+ type: "function",
921
+ function: {
922
+ name: toolCallDelta.function.name,
923
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
924
+ },
925
+ hasFinished: false
926
+ };
927
+ const toolCall2 = toolCalls[index];
928
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
929
+ if (toolCall2.function.arguments.length > 0) {
930
+ controller.enqueue({
931
+ type: "tool-input-delta",
932
+ id: toolCall2.id,
933
+ delta: toolCall2.function.arguments
934
+ });
935
+ }
936
+ if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
937
+ controller.enqueue({
938
+ type: "tool-input-end",
939
+ id: toolCall2.id
940
+ });
941
+ controller.enqueue({
942
+ type: "tool-call",
943
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
944
+ toolName: toolCall2.function.name,
945
+ input: toolCall2.function.arguments
946
+ });
947
+ toolCall2.hasFinished = true;
948
+ }
949
+ }
950
+ continue;
951
+ }
952
+ const toolCall = toolCalls[index];
953
+ if (toolCall.hasFinished) {
954
+ continue;
955
+ }
956
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
957
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
958
+ }
959
+ controller.enqueue({
960
+ type: "tool-input-delta",
961
+ id: toolCall.id,
962
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
963
+ });
964
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
965
+ controller.enqueue({
966
+ type: "tool-input-end",
967
+ id: toolCall.id
968
+ });
969
+ controller.enqueue({
970
+ type: "tool-call",
971
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
972
+ toolName: toolCall.function.name,
973
+ input: toolCall.function.arguments
974
+ });
975
+ toolCall.hasFinished = true;
976
+ }
977
+ }
978
+ }
979
+ if (delta.annotations != null) {
980
+ for (const annotation of delta.annotations) {
981
+ controller.enqueue({
982
+ type: "source",
983
+ sourceType: "url",
984
+ id: (0, import_provider_utils5.generateId)(),
985
+ url: annotation.url,
986
+ title: annotation.title
987
+ });
988
+ }
989
+ }
990
+ },
991
+ flush(controller) {
992
+ if (isActiveText) {
993
+ controller.enqueue({ type: "text-end", id: "0" });
994
+ }
995
+ controller.enqueue({
996
+ type: "finish",
997
+ finishReason,
998
+ usage,
999
+ ...providerMetadata != null ? { providerMetadata } : {}
1000
+ });
1001
+ }
1002
+ })
1003
+ ),
1004
+ request: { body },
1005
+ response: { headers: responseHeaders }
1006
+ };
1007
+ }
1008
+ };
1009
+ var openaiTokenUsageSchema = import_v45.z.object({
1010
+ prompt_tokens: import_v45.z.number().nullish(),
1011
+ completion_tokens: import_v45.z.number().nullish(),
1012
+ total_tokens: import_v45.z.number().nullish(),
1013
+ prompt_tokens_details: import_v45.z.object({
1014
+ cached_tokens: import_v45.z.number().nullish()
1015
+ }).nullish(),
1016
+ completion_tokens_details: import_v45.z.object({
1017
+ reasoning_tokens: import_v45.z.number().nullish(),
1018
+ accepted_prediction_tokens: import_v45.z.number().nullish(),
1019
+ rejected_prediction_tokens: import_v45.z.number().nullish()
1020
+ }).nullish()
1021
+ }).nullish();
1022
+ var openaiChatResponseSchema = import_v45.z.object({
1023
+ id: import_v45.z.string().nullish(),
1024
+ created: import_v45.z.number().nullish(),
1025
+ model: import_v45.z.string().nullish(),
1026
+ choices: import_v45.z.array(
1027
+ import_v45.z.object({
1028
+ message: import_v45.z.object({
1029
+ role: import_v45.z.literal("assistant").nullish(),
1030
+ content: import_v45.z.string().nullish(),
1031
+ tool_calls: import_v45.z.array(
1032
+ import_v45.z.object({
1033
+ id: import_v45.z.string().nullish(),
1034
+ type: import_v45.z.literal("function"),
1035
+ function: import_v45.z.object({
1036
+ name: import_v45.z.string(),
1037
+ arguments: import_v45.z.string()
1038
+ })
1039
+ })
1040
+ ).nullish(),
1041
+ annotations: import_v45.z.array(
1042
+ import_v45.z.object({
1043
+ type: import_v45.z.literal("url_citation"),
1044
+ start_index: import_v45.z.number(),
1045
+ end_index: import_v45.z.number(),
1046
+ url: import_v45.z.string(),
1047
+ title: import_v45.z.string()
1048
+ })
1049
+ ).nullish()
1050
+ }),
1051
+ index: import_v45.z.number(),
1052
+ logprobs: import_v45.z.object({
1053
+ content: import_v45.z.array(
1054
+ import_v45.z.object({
1055
+ token: import_v45.z.string(),
1056
+ logprob: import_v45.z.number(),
1057
+ top_logprobs: import_v45.z.array(
1058
+ import_v45.z.object({
1059
+ token: import_v45.z.string(),
1060
+ logprob: import_v45.z.number()
1061
+ })
1062
+ )
1063
+ })
1064
+ ).nullish()
1065
+ }).nullish(),
1066
+ finish_reason: import_v45.z.string().nullish()
1067
+ })
1068
+ ),
1069
+ usage: openaiTokenUsageSchema
1070
+ });
1071
+ var openaiChatChunkSchema = import_v45.z.union([
1072
+ import_v45.z.object({
1073
+ id: import_v45.z.string().nullish(),
1074
+ created: import_v45.z.number().nullish(),
1075
+ model: import_v45.z.string().nullish(),
1076
+ choices: import_v45.z.array(
1077
+ import_v45.z.object({
1078
+ delta: import_v45.z.object({
1079
+ role: import_v45.z.enum(["assistant"]).nullish(),
1080
+ content: import_v45.z.string().nullish(),
1081
+ tool_calls: import_v45.z.array(
1082
+ import_v45.z.object({
1083
+ index: import_v45.z.number(),
1084
+ id: import_v45.z.string().nullish(),
1085
+ type: import_v45.z.literal("function").nullish(),
1086
+ function: import_v45.z.object({
1087
+ name: import_v45.z.string().nullish(),
1088
+ arguments: import_v45.z.string().nullish()
1089
+ })
1090
+ })
1091
+ ).nullish(),
1092
+ annotations: import_v45.z.array(
1093
+ import_v45.z.object({
1094
+ type: import_v45.z.literal("url_citation"),
1095
+ start_index: import_v45.z.number(),
1096
+ end_index: import_v45.z.number(),
1097
+ url: import_v45.z.string(),
1098
+ title: import_v45.z.string()
1099
+ })
1100
+ ).nullish()
1101
+ }).nullish(),
1102
+ logprobs: import_v45.z.object({
1103
+ content: import_v45.z.array(
1104
+ import_v45.z.object({
1105
+ token: import_v45.z.string(),
1106
+ logprob: import_v45.z.number(),
1107
+ top_logprobs: import_v45.z.array(
1108
+ import_v45.z.object({
1109
+ token: import_v45.z.string(),
1110
+ logprob: import_v45.z.number()
1111
+ })
1112
+ )
1113
+ })
1114
+ ).nullish()
1115
+ }).nullish(),
1116
+ finish_reason: import_v45.z.string().nullish(),
1117
+ index: import_v45.z.number()
1118
+ })
1119
+ ),
1120
+ usage: openaiTokenUsageSchema
1121
+ }),
1122
+ openaiErrorDataSchema
1123
+ ]);
1124
+ function isReasoningModel(modelId) {
1125
+ return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1126
+ }
1127
+ function supportsFlexProcessing(modelId) {
1128
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1129
+ }
1130
+ function supportsPriorityProcessing(modelId) {
1131
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1132
+ }
1133
+ function getSystemMessageMode(modelId) {
1134
+ var _a, _b;
1135
+ if (!isReasoningModel(modelId)) {
1136
+ return "system";
1137
+ }
1138
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1139
+ }
1140
+ var reasoningModels = {
1141
+ "o1-mini": {
1142
+ systemMessageMode: "remove"
1143
+ },
1144
+ "o1-mini-2024-09-12": {
1145
+ systemMessageMode: "remove"
1146
+ },
1147
+ "o1-preview": {
1148
+ systemMessageMode: "remove"
1149
+ },
1150
+ "o1-preview-2024-09-12": {
1151
+ systemMessageMode: "remove"
1152
+ },
1153
+ o3: {
1154
+ systemMessageMode: "developer"
1155
+ },
1156
+ "o3-2025-04-16": {
1157
+ systemMessageMode: "developer"
1158
+ },
1159
+ "o3-mini": {
1160
+ systemMessageMode: "developer"
1161
+ },
1162
+ "o3-mini-2025-01-31": {
1163
+ systemMessageMode: "developer"
1164
+ },
1165
+ "o4-mini": {
1166
+ systemMessageMode: "developer"
1167
+ },
1168
+ "o4-mini-2025-04-16": {
1169
+ systemMessageMode: "developer"
1170
+ }
1171
+ };
1172
+
1173
+ // src/openai-completion-language-model.ts
1174
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1175
+ var import_v47 = require("zod/v4");
1176
+
1177
+ // src/convert-to-openai-completion-prompt.ts
1178
+ var import_provider4 = require("@ai-sdk/provider");
1179
+ function convertToOpenAICompletionPrompt({
1180
+ prompt,
1181
+ user = "user",
1182
+ assistant = "assistant"
1183
+ }) {
1184
+ let text = "";
1185
+ if (prompt[0].role === "system") {
1186
+ text += `${prompt[0].content}
1187
+
1188
+ `;
1189
+ prompt = prompt.slice(1);
1190
+ }
1191
+ for (const { role, content } of prompt) {
1192
+ switch (role) {
1193
+ case "system": {
1194
+ throw new import_provider4.InvalidPromptError({
1195
+ message: "Unexpected system message in prompt: ${content}",
1196
+ prompt
1197
+ });
1198
+ }
1199
+ case "user": {
1200
+ const userMessage = content.map((part) => {
1201
+ switch (part.type) {
1202
+ case "text": {
1203
+ return part.text;
1204
+ }
1205
+ }
1206
+ }).filter(Boolean).join("");
1207
+ text += `${user}:
1208
+ ${userMessage}
1209
+
1210
+ `;
1211
+ break;
1212
+ }
1213
+ case "assistant": {
1214
+ const assistantMessage = content.map((part) => {
1215
+ switch (part.type) {
1216
+ case "text": {
1217
+ return part.text;
1218
+ }
1219
+ case "tool-call": {
1220
+ throw new import_provider4.UnsupportedFunctionalityError({
1221
+ functionality: "tool-call messages"
1222
+ });
1223
+ }
1224
+ }
1225
+ }).join("");
1226
+ text += `${assistant}:
1227
+ ${assistantMessage}
1228
+
1229
+ `;
1230
+ break;
1231
+ }
1232
+ case "tool": {
1233
+ throw new import_provider4.UnsupportedFunctionalityError({
1234
+ functionality: "tool messages"
1235
+ });
1236
+ }
1237
+ default: {
1238
+ const _exhaustiveCheck = role;
1239
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1240
+ }
1241
+ }
1242
+ }
1243
+ text += `${assistant}:
1244
+ `;
1245
+ return {
1246
+ prompt: text,
1247
+ stopSequences: [`
1248
+ ${user}:`]
1249
+ };
1250
+ }
1251
+
1252
+ // src/openai-completion-options.ts
1253
+ var import_v46 = require("zod/v4");
1254
+ var openaiCompletionProviderOptions = import_v46.z.object({
1255
+ /**
1256
+ Echo back the prompt in addition to the completion.
1257
+ */
1258
+ echo: import_v46.z.boolean().optional(),
1259
+ /**
1260
+ Modify the likelihood of specified tokens appearing in the completion.
1261
+
1262
+ Accepts a JSON object that maps tokens (specified by their token ID in
1263
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1264
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1265
+ the bias is added to the logits generated by the model prior to sampling.
1266
+ The exact effect will vary per model, but values between -1 and 1 should
1267
+ decrease or increase likelihood of selection; values like -100 or 100
1268
+ should result in a ban or exclusive selection of the relevant token.
1269
+
1270
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1271
+ token from being generated.
1272
+ */
1273
+ logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
1274
+ /**
1275
+ The suffix that comes after a completion of inserted text.
1276
+ */
1277
+ suffix: import_v46.z.string().optional(),
1278
+ /**
1279
+ A unique identifier representing your end-user, which can help OpenAI to
1280
+ monitor and detect abuse. Learn more.
1281
+ */
1282
+ user: import_v46.z.string().optional(),
1283
+ /**
1284
+ Return the log probabilities of the tokens. Including logprobs will increase
1285
+ the response size and can slow down response times. However, it can
1286
+ be useful to better understand how the model is behaving.
1287
+ Setting to true will return the log probabilities of the tokens that
1288
+ were generated.
1289
+ Setting to a number will return the log probabilities of the top n
1290
+ tokens that were generated.
1291
+ */
1292
+ logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
1293
+ });
1294
+
1295
+ // src/openai-completion-language-model.ts
1296
+ var OpenAICompletionLanguageModel = class {
1297
+ constructor(modelId, config) {
1298
+ this.specificationVersion = "v2";
1299
+ this.supportedUrls = {
1300
+ // No URLs are supported for completion models.
1301
+ };
1302
+ this.modelId = modelId;
1303
+ this.config = config;
1304
+ }
1305
+ get providerOptionsName() {
1306
+ return this.config.provider.split(".")[0].trim();
1307
+ }
1308
+ get provider() {
1309
+ return this.config.provider;
1310
+ }
1311
+ async getArgs({
1312
+ prompt,
1313
+ maxOutputTokens,
1314
+ temperature,
1315
+ topP,
1316
+ topK,
1317
+ frequencyPenalty,
1318
+ presencePenalty,
1319
+ stopSequences: userStopSequences,
1320
+ responseFormat,
1321
+ tools,
1322
+ toolChoice,
1323
+ seed,
1324
+ providerOptions
1325
+ }) {
1326
+ const warnings = [];
1327
+ const openaiOptions = {
1328
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1329
+ provider: "openai",
1330
+ providerOptions,
1331
+ schema: openaiCompletionProviderOptions
1332
+ }),
1333
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1334
+ provider: this.providerOptionsName,
1335
+ providerOptions,
1336
+ schema: openaiCompletionProviderOptions
1337
+ })
1338
+ };
1339
+ if (topK != null) {
1340
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1341
+ }
1342
+ if (tools == null ? void 0 : tools.length) {
1343
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1344
+ }
1345
+ if (toolChoice != null) {
1346
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1347
+ }
1348
+ if (responseFormat != null && responseFormat.type !== "text") {
1349
+ warnings.push({
1350
+ type: "unsupported-setting",
1351
+ setting: "responseFormat",
1352
+ details: "JSON response format is not supported."
1353
+ });
1354
+ }
1355
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1356
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1357
+ return {
1358
+ args: {
1359
+ // model id:
1360
+ model: this.modelId,
1361
+ // model specific settings:
1362
+ echo: openaiOptions.echo,
1363
+ logit_bias: openaiOptions.logitBias,
1364
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1365
+ suffix: openaiOptions.suffix,
1366
+ user: openaiOptions.user,
1367
+ // standardized settings:
1368
+ max_tokens: maxOutputTokens,
1369
+ temperature,
1370
+ top_p: topP,
1371
+ frequency_penalty: frequencyPenalty,
1372
+ presence_penalty: presencePenalty,
1373
+ seed,
1374
+ // prompt:
1375
+ prompt: completionPrompt,
1376
+ // stop sequences:
1377
+ stop: stop.length > 0 ? stop : void 0
1378
+ },
1379
+ warnings
1380
+ };
1381
+ }
1382
+ async doGenerate(options) {
1383
+ var _a, _b, _c;
1384
+ const { args, warnings } = await this.getArgs(options);
1385
+ const {
1386
+ responseHeaders,
1387
+ value: response,
1388
+ rawValue: rawResponse
1389
+ } = await (0, import_provider_utils6.postJsonToApi)({
1390
+ url: this.config.url({
1391
+ path: "/completions",
1392
+ modelId: this.modelId
1393
+ }),
1394
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1395
+ body: args,
1396
+ failedResponseHandler: openaiFailedResponseHandler,
1397
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1398
+ openaiCompletionResponseSchema
1399
+ ),
1400
+ abortSignal: options.abortSignal,
1401
+ fetch: this.config.fetch
1402
+ });
1403
+ const choice = response.choices[0];
1404
+ const providerMetadata = { openai: {} };
1405
+ if (choice.logprobs != null) {
1406
+ providerMetadata.openai.logprobs = choice.logprobs;
1407
+ }
1408
+ return {
1409
+ content: [{ type: "text", text: choice.text }],
1410
+ usage: {
1411
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1412
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1413
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1414
+ },
1415
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1416
+ request: { body: args },
1417
+ response: {
1418
+ ...getResponseMetadata(response),
1419
+ headers: responseHeaders,
1420
+ body: rawResponse
1421
+ },
1422
+ providerMetadata,
1423
+ warnings
1424
+ };
1425
+ }
1426
+ async doStream(options) {
1427
+ const { args, warnings } = await this.getArgs(options);
1428
+ const body = {
1429
+ ...args,
1430
+ stream: true,
1431
+ stream_options: {
1432
+ include_usage: true
1433
+ }
1434
+ };
1435
+ const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
1436
+ url: this.config.url({
1437
+ path: "/completions",
1438
+ modelId: this.modelId
1439
+ }),
1440
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1441
+ body,
1442
+ failedResponseHandler: openaiFailedResponseHandler,
1443
+ successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(
1444
+ openaiCompletionChunkSchema
1445
+ ),
1446
+ abortSignal: options.abortSignal,
1447
+ fetch: this.config.fetch
1448
+ });
1449
+ let finishReason = "unknown";
1450
+ const providerMetadata = { openai: {} };
1451
+ const usage = {
1452
+ inputTokens: void 0,
1453
+ outputTokens: void 0,
1454
+ totalTokens: void 0
1455
+ };
1456
+ let isFirstChunk = true;
1457
+ return {
1458
+ stream: response.pipeThrough(
1459
+ new TransformStream({
1460
+ start(controller) {
1461
+ controller.enqueue({ type: "stream-start", warnings });
1462
+ },
1463
+ transform(chunk, controller) {
1464
+ if (options.includeRawChunks) {
1465
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1466
+ }
1467
+ if (!chunk.success) {
1468
+ finishReason = "error";
1469
+ controller.enqueue({ type: "error", error: chunk.error });
1470
+ return;
1471
+ }
1472
+ const value = chunk.value;
1473
+ if ("error" in value) {
1474
+ finishReason = "error";
1475
+ controller.enqueue({ type: "error", error: value.error });
1476
+ return;
1477
+ }
1478
+ if (isFirstChunk) {
1479
+ isFirstChunk = false;
1480
+ controller.enqueue({
1481
+ type: "response-metadata",
1482
+ ...getResponseMetadata(value)
1483
+ });
1484
+ controller.enqueue({ type: "text-start", id: "0" });
1485
+ }
1486
+ if (value.usage != null) {
1487
+ usage.inputTokens = value.usage.prompt_tokens;
1488
+ usage.outputTokens = value.usage.completion_tokens;
1489
+ usage.totalTokens = value.usage.total_tokens;
1490
+ }
1491
+ const choice = value.choices[0];
1492
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1493
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1494
+ }
1495
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1496
+ providerMetadata.openai.logprobs = choice.logprobs;
1497
+ }
1498
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1499
+ controller.enqueue({
1500
+ type: "text-delta",
1501
+ id: "0",
1502
+ delta: choice.text
1503
+ });
1504
+ }
1505
+ },
1506
+ flush(controller) {
1507
+ if (!isFirstChunk) {
1508
+ controller.enqueue({ type: "text-end", id: "0" });
1509
+ }
1510
+ controller.enqueue({
1511
+ type: "finish",
1512
+ finishReason,
1513
+ providerMetadata,
1514
+ usage
1515
+ });
1516
+ }
1517
+ })
1518
+ ),
1519
+ request: { body },
1520
+ response: { headers: responseHeaders }
1521
+ };
1522
+ }
1523
+ };
1524
+ var usageSchema = import_v47.z.object({
1525
+ prompt_tokens: import_v47.z.number(),
1526
+ completion_tokens: import_v47.z.number(),
1527
+ total_tokens: import_v47.z.number()
1528
+ });
1529
+ var openaiCompletionResponseSchema = import_v47.z.object({
1530
+ id: import_v47.z.string().nullish(),
1531
+ created: import_v47.z.number().nullish(),
1532
+ model: import_v47.z.string().nullish(),
1533
+ choices: import_v47.z.array(
1534
+ import_v47.z.object({
1535
+ text: import_v47.z.string(),
1536
+ finish_reason: import_v47.z.string(),
1537
+ logprobs: import_v47.z.object({
1538
+ tokens: import_v47.z.array(import_v47.z.string()),
1539
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1540
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1541
+ }).nullish()
1542
+ })
1543
+ ),
1544
+ usage: usageSchema.nullish()
1545
+ });
1546
+ var openaiCompletionChunkSchema = import_v47.z.union([
1547
+ import_v47.z.object({
1548
+ id: import_v47.z.string().nullish(),
1549
+ created: import_v47.z.number().nullish(),
1550
+ model: import_v47.z.string().nullish(),
1551
+ choices: import_v47.z.array(
1552
+ import_v47.z.object({
1553
+ text: import_v47.z.string(),
1554
+ finish_reason: import_v47.z.string().nullish(),
1555
+ index: import_v47.z.number(),
1556
+ logprobs: import_v47.z.object({
1557
+ tokens: import_v47.z.array(import_v47.z.string()),
1558
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1559
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1560
+ }).nullish()
1561
+ })
1562
+ ),
1563
+ usage: usageSchema.nullish()
1564
+ }),
1565
+ openaiErrorDataSchema
1566
+ ]);
1567
+
1568
+ // src/openai-embedding-model.ts
1569
+ var import_provider5 = require("@ai-sdk/provider");
1570
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1571
+ var import_v49 = require("zod/v4");
1572
+
1573
+ // src/openai-embedding-options.ts
1574
+ var import_v48 = require("zod/v4");
1575
+ var openaiEmbeddingProviderOptions = import_v48.z.object({
1576
+ /**
1577
+ The number of dimensions the resulting output embeddings should have.
1578
+ Only supported in text-embedding-3 and later models.
1579
+ */
1580
+ dimensions: import_v48.z.number().optional(),
1581
+ /**
1582
+ A unique identifier representing your end-user, which can help OpenAI to
1583
+ monitor and detect abuse. Learn more.
1584
+ */
1585
+ user: import_v48.z.string().optional()
1586
+ });
1587
+
1588
+ // src/openai-embedding-model.ts
1589
+ var OpenAIEmbeddingModel = class {
1590
+ constructor(modelId, config) {
1591
+ this.specificationVersion = "v2";
1592
+ this.maxEmbeddingsPerCall = 2048;
1593
+ this.supportsParallelCalls = true;
1594
+ this.modelId = modelId;
1595
+ this.config = config;
1596
+ }
1597
+ get provider() {
1598
+ return this.config.provider;
1599
+ }
1600
+ async doEmbed({
1601
+ values,
1602
+ headers,
1603
+ abortSignal,
1604
+ providerOptions
1605
+ }) {
1606
+ var _a;
1607
+ if (values.length > this.maxEmbeddingsPerCall) {
1608
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1609
+ provider: this.provider,
1610
+ modelId: this.modelId,
1611
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1612
+ values
1613
+ });
1614
+ }
1615
+ const openaiOptions = (_a = await (0, import_provider_utils7.parseProviderOptions)({
1616
+ provider: "openai",
1617
+ providerOptions,
1618
+ schema: openaiEmbeddingProviderOptions
1619
+ })) != null ? _a : {};
1620
+ const {
1621
+ responseHeaders,
1622
+ value: response,
1623
+ rawValue
1624
+ } = await (0, import_provider_utils7.postJsonToApi)({
1625
+ url: this.config.url({
1626
+ path: "/embeddings",
1627
+ modelId: this.modelId
1628
+ }),
1629
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), headers),
1630
+ body: {
1631
+ model: this.modelId,
1632
+ input: values,
1633
+ encoding_format: "float",
1634
+ dimensions: openaiOptions.dimensions,
1635
+ user: openaiOptions.user
1636
+ },
1637
+ failedResponseHandler: openaiFailedResponseHandler,
1638
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1639
+ openaiTextEmbeddingResponseSchema
1640
+ ),
1641
+ abortSignal,
1642
+ fetch: this.config.fetch
1643
+ });
1644
+ return {
1645
+ embeddings: response.data.map((item) => item.embedding),
1646
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1647
+ response: { headers: responseHeaders, body: rawValue }
1648
+ };
1649
+ }
1650
+ };
1651
+ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
1652
+ data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
1653
+ usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
1654
+ });
1655
+
1656
+ // src/openai-image-model.ts
1657
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1658
+ var import_v410 = require("zod/v4");
1659
+
1660
+ // src/openai-image-settings.ts
1661
+ var modelMaxImagesPerCall = {
1662
+ "dall-e-3": 1,
1663
+ "dall-e-2": 10,
1664
+ "gpt-image-1": 10
1665
+ };
1666
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1667
+
1668
+ // src/openai-image-model.ts
1669
+ var OpenAIImageModel = class {
1670
+ constructor(modelId, config) {
1671
+ this.modelId = modelId;
1672
+ this.config = config;
1673
+ this.specificationVersion = "v2";
1674
+ }
1675
+ get maxImagesPerCall() {
1676
+ var _a;
1677
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1678
+ }
1679
+ get provider() {
1680
+ return this.config.provider;
1681
+ }
1682
+ async doGenerate({
1683
+ prompt,
1684
+ n,
1685
+ size,
1686
+ aspectRatio,
1687
+ seed,
1688
+ providerOptions,
1689
+ headers,
1690
+ abortSignal
1691
+ }) {
1692
+ var _a, _b, _c, _d;
1693
+ const warnings = [];
1694
+ if (aspectRatio != null) {
1695
+ warnings.push({
1696
+ type: "unsupported-setting",
1697
+ setting: "aspectRatio",
1698
+ details: "This model does not support aspect ratio. Use `size` instead."
1699
+ });
1700
+ }
1701
+ if (seed != null) {
1702
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1703
+ }
1704
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1705
+ const { value: response, responseHeaders } = await (0, import_provider_utils8.postJsonToApi)({
1706
+ url: this.config.url({
1707
+ path: "/images/generations",
1708
+ modelId: this.modelId
1709
+ }),
1710
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), headers),
1711
+ body: {
1712
+ model: this.modelId,
1713
+ prompt,
1714
+ n,
1715
+ size,
1716
+ ...(_d = providerOptions.openai) != null ? _d : {},
1717
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1718
+ },
1719
+ failedResponseHandler: openaiFailedResponseHandler,
1720
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1721
+ openaiImageResponseSchema
1722
+ ),
1723
+ abortSignal,
1724
+ fetch: this.config.fetch
1725
+ });
1726
+ return {
1727
+ images: response.data.map((item) => item.b64_json),
1728
+ warnings,
1729
+ response: {
1730
+ timestamp: currentDate,
1731
+ modelId: this.modelId,
1732
+ headers: responseHeaders
1733
+ },
1734
+ providerMetadata: {
1735
+ openai: {
1736
+ images: response.data.map(
1737
+ (item) => item.revised_prompt ? {
1738
+ revisedPrompt: item.revised_prompt
1739
+ } : null
1740
+ )
1741
+ }
1742
+ }
1743
+ };
1744
+ }
1745
+ };
1746
+ var openaiImageResponseSchema = import_v410.z.object({
1747
+ data: import_v410.z.array(
1748
+ import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
1749
+ )
1750
+ });
1751
+
1752
+ // src/openai-transcription-model.ts
1753
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1754
+ var import_v412 = require("zod/v4");
1755
+
1756
+ // src/openai-transcription-options.ts
1757
+ var import_v411 = require("zod/v4");
1758
+ var openAITranscriptionProviderOptions = import_v411.z.object({
1759
+ /**
1760
+ * Additional information to include in the transcription response.
1761
+ */
1762
+ include: import_v411.z.array(import_v411.z.string()).optional(),
1763
+ /**
1764
+ * The language of the input audio in ISO-639-1 format.
1765
+ */
1766
+ language: import_v411.z.string().optional(),
1767
+ /**
1768
+ * An optional text to guide the model's style or continue a previous audio segment.
1769
+ */
1770
+ prompt: import_v411.z.string().optional(),
1771
+ /**
1772
+ * The sampling temperature, between 0 and 1.
1773
+ * @default 0
1774
+ */
1775
+ temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
1776
+ /**
1777
+ * The timestamp granularities to populate for this transcription.
1778
+ * @default ['segment']
1779
+ */
1780
+ timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
1781
+ });
1782
+
1783
+ // src/openai-transcription-model.ts
1784
+ var languageMap = {
1785
+ afrikaans: "af",
1786
+ arabic: "ar",
1787
+ armenian: "hy",
1788
+ azerbaijani: "az",
1789
+ belarusian: "be",
1790
+ bosnian: "bs",
1791
+ bulgarian: "bg",
1792
+ catalan: "ca",
1793
+ chinese: "zh",
1794
+ croatian: "hr",
1795
+ czech: "cs",
1796
+ danish: "da",
1797
+ dutch: "nl",
1798
+ english: "en",
1799
+ estonian: "et",
1800
+ finnish: "fi",
1801
+ french: "fr",
1802
+ galician: "gl",
1803
+ german: "de",
1804
+ greek: "el",
1805
+ hebrew: "he",
1806
+ hindi: "hi",
1807
+ hungarian: "hu",
1808
+ icelandic: "is",
1809
+ indonesian: "id",
1810
+ italian: "it",
1811
+ japanese: "ja",
1812
+ kannada: "kn",
1813
+ kazakh: "kk",
1814
+ korean: "ko",
1815
+ latvian: "lv",
1816
+ lithuanian: "lt",
1817
+ macedonian: "mk",
1818
+ malay: "ms",
1819
+ marathi: "mr",
1820
+ maori: "mi",
1821
+ nepali: "ne",
1822
+ norwegian: "no",
1823
+ persian: "fa",
1824
+ polish: "pl",
1825
+ portuguese: "pt",
1826
+ romanian: "ro",
1827
+ russian: "ru",
1828
+ serbian: "sr",
1829
+ slovak: "sk",
1830
+ slovenian: "sl",
1831
+ spanish: "es",
1832
+ swahili: "sw",
1833
+ swedish: "sv",
1834
+ tagalog: "tl",
1835
+ tamil: "ta",
1836
+ thai: "th",
1837
+ turkish: "tr",
1838
+ ukrainian: "uk",
1839
+ urdu: "ur",
1840
+ vietnamese: "vi",
1841
+ welsh: "cy"
1842
+ };
1843
+ var OpenAITranscriptionModel = class {
1844
+ constructor(modelId, config) {
1845
+ this.modelId = modelId;
1846
+ this.config = config;
1847
+ this.specificationVersion = "v2";
1848
+ }
1849
+ get provider() {
1850
+ return this.config.provider;
1851
+ }
1852
+ async getArgs({
1853
+ audio,
1854
+ mediaType,
1855
+ providerOptions
1856
+ }) {
1857
+ const warnings = [];
1858
+ const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
1859
+ provider: "openai",
1860
+ providerOptions,
1861
+ schema: openAITranscriptionProviderOptions
1862
+ });
1863
+ const formData = new FormData();
1864
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils9.convertBase64ToUint8Array)(audio)]);
1865
+ formData.append("model", this.modelId);
1866
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1867
+ if (openAIOptions) {
1868
+ const transcriptionModelOptions = {
1869
+ include: openAIOptions.include,
1870
+ language: openAIOptions.language,
1871
+ prompt: openAIOptions.prompt,
1872
+ temperature: openAIOptions.temperature,
1873
+ timestamp_granularities: openAIOptions.timestampGranularities
1874
+ };
1875
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1876
+ if (value != null) {
1877
+ formData.append(key, String(value));
1878
+ }
1879
+ }
1880
+ }
1881
+ return {
1882
+ formData,
1883
+ warnings
1884
+ };
1885
+ }
1886
+ async doGenerate(options) {
1887
+ var _a, _b, _c, _d, _e, _f;
1888
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1889
+ const { formData, warnings } = await this.getArgs(options);
1890
+ const {
1891
+ value: response,
1892
+ responseHeaders,
1893
+ rawValue: rawResponse
1894
+ } = await (0, import_provider_utils9.postFormDataToApi)({
1895
+ url: this.config.url({
1896
+ path: "/audio/transcriptions",
1897
+ modelId: this.modelId
1898
+ }),
1899
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
1900
+ formData,
1901
+ failedResponseHandler: openaiFailedResponseHandler,
1902
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
1903
+ openaiTranscriptionResponseSchema
1904
+ ),
1905
+ abortSignal: options.abortSignal,
1906
+ fetch: this.config.fetch
1907
+ });
1908
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1909
+ return {
1910
+ text: response.text,
1911
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1912
+ text: word.word,
1913
+ startSecond: word.start,
1914
+ endSecond: word.end
1915
+ }))) != null ? _e : [],
1916
+ language,
1917
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1918
+ warnings,
1919
+ response: {
1920
+ timestamp: currentDate,
1921
+ modelId: this.modelId,
1922
+ headers: responseHeaders,
1923
+ body: rawResponse
1924
+ }
1925
+ };
1926
+ }
1927
+ };
1928
+ var openaiTranscriptionResponseSchema = import_v412.z.object({
1929
+ text: import_v412.z.string(),
1930
+ language: import_v412.z.string().nullish(),
1931
+ duration: import_v412.z.number().nullish(),
1932
+ words: import_v412.z.array(
1933
+ import_v412.z.object({
1934
+ word: import_v412.z.string(),
1935
+ start: import_v412.z.number(),
1936
+ end: import_v412.z.number()
1937
+ })
1938
+ ).nullish()
1939
+ });
1940
+
1941
+ // src/openai-speech-model.ts
1942
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
1943
+ var import_v413 = require("zod/v4");
1944
+ var OpenAIProviderOptionsSchema = import_v413.z.object({
1945
+ instructions: import_v413.z.string().nullish(),
1946
+ speed: import_v413.z.number().min(0.25).max(4).default(1).nullish()
1947
+ });
1948
+ var OpenAISpeechModel = class {
1949
+ constructor(modelId, config) {
1950
+ this.modelId = modelId;
1951
+ this.config = config;
1952
+ this.specificationVersion = "v2";
1953
+ }
1954
+ get provider() {
1955
+ return this.config.provider;
1956
+ }
1957
+ async getArgs({
1958
+ text,
1959
+ voice = "alloy",
1960
+ outputFormat = "mp3",
1961
+ speed,
1962
+ instructions,
1963
+ language,
1964
+ providerOptions
1965
+ }) {
1966
+ const warnings = [];
1967
+ const openAIOptions = await (0, import_provider_utils10.parseProviderOptions)({
1968
+ provider: "openai",
1969
+ providerOptions,
1970
+ schema: OpenAIProviderOptionsSchema
1971
+ });
1972
+ const requestBody = {
1973
+ model: this.modelId,
1974
+ input: text,
1975
+ voice,
1976
+ response_format: "mp3",
1977
+ speed,
1978
+ instructions
1979
+ };
1980
+ if (outputFormat) {
1981
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1982
+ requestBody.response_format = outputFormat;
1983
+ } else {
1984
+ warnings.push({
1985
+ type: "unsupported-setting",
1986
+ setting: "outputFormat",
1987
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1988
+ });
1989
+ }
1990
+ }
1991
+ if (openAIOptions) {
1992
+ const speechModelOptions = {};
1993
+ for (const key in speechModelOptions) {
1994
+ const value = speechModelOptions[key];
1995
+ if (value !== void 0) {
1996
+ requestBody[key] = value;
1997
+ }
1998
+ }
1999
+ }
2000
+ if (language) {
2001
+ warnings.push({
2002
+ type: "unsupported-setting",
2003
+ setting: "language",
2004
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
2005
+ });
2006
+ }
2007
+ return {
2008
+ requestBody,
2009
+ warnings
2010
+ };
2011
+ }
2012
+ async doGenerate(options) {
2013
+ var _a, _b, _c;
2014
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2015
+ const { requestBody, warnings } = await this.getArgs(options);
2016
+ const {
2017
+ value: audio,
2018
+ responseHeaders,
2019
+ rawValue: rawResponse
2020
+ } = await (0, import_provider_utils10.postJsonToApi)({
2021
+ url: this.config.url({
2022
+ path: "/audio/speech",
2023
+ modelId: this.modelId
2024
+ }),
2025
+ headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2026
+ body: requestBody,
2027
+ failedResponseHandler: openaiFailedResponseHandler,
2028
+ successfulResponseHandler: (0, import_provider_utils10.createBinaryResponseHandler)(),
2029
+ abortSignal: options.abortSignal,
2030
+ fetch: this.config.fetch
2031
+ });
2032
+ return {
2033
+ audio,
2034
+ warnings,
2035
+ request: {
2036
+ body: JSON.stringify(requestBody)
2037
+ },
2038
+ response: {
2039
+ timestamp: currentDate,
2040
+ modelId: this.modelId,
2041
+ headers: responseHeaders,
2042
+ body: rawResponse
2043
+ }
2044
+ };
2045
+ }
2046
+ };
2047
+
2048
+ // src/responses/openai-responses-language-model.ts
2049
+ var import_provider8 = require("@ai-sdk/provider");
2050
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
2051
+ var import_v415 = require("zod/v4");
2052
+
2053
+ // src/responses/convert-to-openai-responses-messages.ts
2054
+ var import_provider6 = require("@ai-sdk/provider");
2055
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
2056
+ var import_v414 = require("zod/v4");
2057
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
2058
+ async function convertToOpenAIResponsesMessages({
2059
+ prompt,
2060
+ systemMessageMode
2061
+ }) {
2062
+ var _a, _b, _c, _d, _e, _f;
2063
+ const messages = [];
2064
+ const warnings = [];
2065
+ for (const { role, content } of prompt) {
2066
+ switch (role) {
2067
+ case "system": {
2068
+ switch (systemMessageMode) {
2069
+ case "system": {
2070
+ messages.push({ role: "system", content });
2071
+ break;
2072
+ }
2073
+ case "developer": {
2074
+ messages.push({ role: "developer", content });
2075
+ break;
2076
+ }
2077
+ case "remove": {
2078
+ warnings.push({
2079
+ type: "other",
2080
+ message: "system messages are removed for this model"
2081
+ });
2082
+ break;
2083
+ }
2084
+ default: {
2085
+ const _exhaustiveCheck = systemMessageMode;
2086
+ throw new Error(
2087
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2088
+ );
2089
+ }
2090
+ }
2091
+ break;
2092
+ }
2093
+ case "user": {
2094
+ messages.push({
2095
+ role: "user",
2096
+ content: content.map((part, index) => {
2097
+ var _a2, _b2, _c2;
2098
+ switch (part.type) {
2099
+ case "text": {
2100
+ return { type: "input_text", text: part.text };
2101
+ }
2102
+ case "file": {
2103
+ if (part.mediaType.startsWith("image/")) {
2104
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2105
+ return {
2106
+ type: "input_image",
2107
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2108
+ image_url: `data:${mediaType};base64,${part.data}`
2109
+ },
2110
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2111
+ };
2112
+ } else if (part.mediaType === "application/pdf") {
2113
+ if (part.data instanceof URL) {
2114
+ throw new import_provider6.UnsupportedFunctionalityError({
2115
+ functionality: "PDF file parts with URLs"
2116
+ });
2117
+ }
2118
+ return {
2119
+ type: "input_file",
2120
+ ...typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2121
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2122
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils12.convertToBase64)(part.data)}`
2123
+ }
2124
+ };
2125
+ } else {
2126
+ throw new import_provider6.UnsupportedFunctionalityError({
2127
+ functionality: `file part media type ${part.mediaType}`
2128
+ });
2129
+ }
2130
+ }
2131
+ }
2132
+ })
2133
+ });
2134
+ break;
2135
+ }
2136
+ case "assistant": {
2137
+ const reasoningMessages = {};
2138
+ for (const part of content) {
2139
+ switch (part.type) {
2140
+ case "text": {
2141
+ messages.push({
2142
+ role: "assistant",
2143
+ content: [{ type: "output_text", text: part.text }],
2144
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2145
+ });
2146
+ break;
2147
+ }
2148
+ case "tool-call": {
2149
+ if (part.providerExecuted) {
2150
+ break;
2151
+ }
2152
+ messages.push({
2153
+ type: "function_call",
2154
+ call_id: part.toolCallId,
2155
+ name: part.toolName,
2156
+ arguments: JSON.stringify(part.input),
2157
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2158
+ });
2159
+ break;
2160
+ }
2161
+ case "tool-result": {
2162
+ warnings.push({
2163
+ type: "other",
2164
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
2165
+ });
2166
+ break;
2167
+ }
2168
+ case "reasoning": {
2169
+ const providerOptions = await (0, import_provider_utils11.parseProviderOptions)({
2170
+ provider: "openai",
2171
+ providerOptions: part.providerOptions,
2172
+ schema: openaiResponsesReasoningProviderOptionsSchema
2173
+ });
2174
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2175
+ if (reasoningId != null) {
2176
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2177
+ const summaryParts = [];
2178
+ if (part.text.length > 0) {
2179
+ summaryParts.push({ type: "summary_text", text: part.text });
2180
+ } else if (existingReasoningMessage !== void 0) {
2181
+ warnings.push({
2182
+ type: "other",
2183
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2184
+ });
2185
+ }
2186
+ if (existingReasoningMessage === void 0) {
2187
+ reasoningMessages[reasoningId] = {
2188
+ type: "reasoning",
2189
+ id: reasoningId,
2190
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2191
+ summary: summaryParts
2192
+ };
2193
+ messages.push(reasoningMessages[reasoningId]);
2194
+ } else {
2195
+ existingReasoningMessage.summary.push(...summaryParts);
2196
+ }
2197
+ } else {
2198
+ warnings.push({
2199
+ type: "other",
2200
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2201
+ });
2202
+ }
2203
+ break;
2204
+ }
2205
+ }
2206
+ }
2207
+ break;
2208
+ }
2209
+ case "tool": {
2210
+ for (const part of content) {
2211
+ const output = part.output;
2212
+ let contentValue;
2213
+ switch (output.type) {
2214
+ case "text":
2215
+ case "error-text":
2216
+ contentValue = output.value;
2217
+ break;
2218
+ case "content":
2219
+ case "json":
2220
+ case "error-json":
2221
+ contentValue = JSON.stringify(output.value);
2222
+ break;
2223
+ }
2224
+ messages.push({
2225
+ type: "function_call_output",
2226
+ call_id: part.toolCallId,
2227
+ output: contentValue
2228
+ });
2229
+ }
2230
+ break;
2231
+ }
2232
+ default: {
2233
+ const _exhaustiveCheck = role;
2234
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2235
+ }
2236
+ }
2237
+ }
2238
+ return { messages, warnings };
2239
+ }
2240
+ var openaiResponsesReasoningProviderOptionsSchema = import_v414.z.object({
2241
+ itemId: import_v414.z.string().nullish(),
2242
+ reasoningEncryptedContent: import_v414.z.string().nullish()
2243
+ });
2244
+
2245
+ // src/responses/map-openai-responses-finish-reason.ts
2246
+ function mapOpenAIResponseFinishReason({
2247
+ finishReason,
2248
+ hasToolCalls
2249
+ }) {
2250
+ switch (finishReason) {
2251
+ case void 0:
2252
+ case null:
2253
+ return hasToolCalls ? "tool-calls" : "stop";
2254
+ case "max_output_tokens":
2255
+ return "length";
2256
+ case "content_filter":
2257
+ return "content-filter";
2258
+ default:
2259
+ return hasToolCalls ? "tool-calls" : "unknown";
2260
+ }
2261
+ }
2262
+
2263
+ // src/responses/openai-responses-prepare-tools.ts
2264
+ var import_provider7 = require("@ai-sdk/provider");
2265
+ function prepareResponsesTools({
2266
+ tools,
2267
+ toolChoice,
2268
+ strictJsonSchema
2269
+ }) {
2270
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2271
+ const toolWarnings = [];
2272
+ if (tools == null) {
2273
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2274
+ }
2275
+ const openaiTools = [];
2276
+ for (const tool of tools) {
2277
+ switch (tool.type) {
2278
+ case "function":
2279
+ openaiTools.push({
2280
+ type: "function",
2281
+ name: tool.name,
2282
+ description: tool.description,
2283
+ parameters: tool.inputSchema,
2284
+ strict: strictJsonSchema
2285
+ });
2286
+ break;
2287
+ case "provider-defined":
2288
+ switch (tool.id) {
2289
+ case "openai.file_search": {
2290
+ const args = fileSearchArgsSchema.parse(tool.args);
2291
+ openaiTools.push({
2292
+ type: "file_search",
2293
+ vector_store_ids: args.vectorStoreIds,
2294
+ max_num_results: args.maxNumResults,
2295
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2296
+ filters: args.filters
2297
+ });
2298
+ break;
2299
+ }
2300
+ case "openai.web_search_preview":
2301
+ openaiTools.push({
2302
+ type: "web_search_preview",
2303
+ search_context_size: tool.args.searchContextSize,
2304
+ user_location: tool.args.userLocation
2305
+ });
2306
+ break;
2307
+ default:
2308
+ toolWarnings.push({ type: "unsupported-tool", tool });
2309
+ break;
2310
+ }
2311
+ break;
2312
+ default:
2313
+ toolWarnings.push({ type: "unsupported-tool", tool });
2314
+ break;
2315
+ }
2316
+ }
2317
+ if (toolChoice == null) {
2318
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
2319
+ }
2320
+ const type = toolChoice.type;
2321
+ switch (type) {
2322
+ case "auto":
2323
+ case "none":
2324
+ case "required":
2325
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
2326
+ case "tool":
2327
+ return {
2328
+ tools: openaiTools,
2329
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2330
+ toolWarnings
2331
+ };
2332
+ default: {
2333
+ const _exhaustiveCheck = type;
2334
+ throw new import_provider7.UnsupportedFunctionalityError({
2335
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2336
+ });
2337
+ }
2338
+ }
2339
+ }
2340
+
2341
+ // src/responses/openai-responses-language-model.ts
2342
+ var OpenAIResponsesLanguageModel = class {
2343
+ constructor(modelId, config) {
2344
+ this.specificationVersion = "v2";
2345
+ this.supportedUrls = {
2346
+ "image/*": [/^https?:\/\/.*$/]
2347
+ };
2348
+ this.modelId = modelId;
2349
+ this.config = config;
2350
+ }
2351
+ get provider() {
2352
+ return this.config.provider;
2353
+ }
2354
+ async getArgs({
2355
+ maxOutputTokens,
2356
+ temperature,
2357
+ stopSequences,
2358
+ topP,
2359
+ topK,
2360
+ presencePenalty,
2361
+ frequencyPenalty,
2362
+ seed,
2363
+ prompt,
2364
+ providerOptions,
2365
+ tools,
2366
+ toolChoice,
2367
+ responseFormat
2368
+ }) {
2369
+ var _a, _b;
2370
+ const warnings = [];
2371
+ const modelConfig = getResponsesModelConfig(this.modelId);
2372
+ if (topK != null) {
2373
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2374
+ }
2375
+ if (seed != null) {
2376
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2377
+ }
2378
+ if (presencePenalty != null) {
2379
+ warnings.push({
2380
+ type: "unsupported-setting",
2381
+ setting: "presencePenalty"
2382
+ });
2383
+ }
2384
+ if (frequencyPenalty != null) {
2385
+ warnings.push({
2386
+ type: "unsupported-setting",
2387
+ setting: "frequencyPenalty"
2388
+ });
2389
+ }
2390
+ if (stopSequences != null) {
2391
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2392
+ }
2393
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2394
+ prompt,
2395
+ systemMessageMode: modelConfig.systemMessageMode
2396
+ });
2397
+ warnings.push(...messageWarnings);
2398
+ const openaiOptions = await (0, import_provider_utils13.parseProviderOptions)({
2399
+ provider: "openai",
2400
+ providerOptions,
2401
+ schema: openaiResponsesProviderOptionsSchema
2402
+ });
2403
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2404
+ const baseArgs = {
2405
+ model: this.modelId,
2406
+ input: messages,
2407
+ temperature,
2408
+ top_p: topP,
2409
+ max_output_tokens: maxOutputTokens,
2410
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2411
+ text: {
2412
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2413
+ format: responseFormat.schema != null ? {
2414
+ type: "json_schema",
2415
+ strict: strictJsonSchema,
2416
+ name: (_b = responseFormat.name) != null ? _b : "response",
2417
+ description: responseFormat.description,
2418
+ schema: responseFormat.schema
2419
+ } : { type: "json_object" }
2420
+ },
2421
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2422
+ verbosity: openaiOptions.textVerbosity
2423
+ }
2424
+ }
2425
+ },
2426
+ // provider options:
2427
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2428
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2429
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2430
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2431
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2432
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2433
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2434
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2435
+ // model-specific settings:
2436
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2437
+ reasoning: {
2438
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2439
+ effort: openaiOptions.reasoningEffort
2440
+ },
2441
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2442
+ summary: openaiOptions.reasoningSummary
2443
+ }
2444
+ }
2445
+ },
2446
+ ...modelConfig.requiredAutoTruncation && {
2447
+ truncation: "auto"
2448
+ }
2449
+ };
2450
+ if (modelConfig.isReasoningModel) {
2451
+ if (baseArgs.temperature != null) {
2452
+ baseArgs.temperature = void 0;
2453
+ warnings.push({
2454
+ type: "unsupported-setting",
2455
+ setting: "temperature",
2456
+ details: "temperature is not supported for reasoning models"
2457
+ });
2458
+ }
2459
+ if (baseArgs.top_p != null) {
2460
+ baseArgs.top_p = void 0;
2461
+ warnings.push({
2462
+ type: "unsupported-setting",
2463
+ setting: "topP",
2464
+ details: "topP is not supported for reasoning models"
2465
+ });
2466
+ }
2467
+ } else {
2468
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2469
+ warnings.push({
2470
+ type: "unsupported-setting",
2471
+ setting: "reasoningEffort",
2472
+ details: "reasoningEffort is not supported for non-reasoning models"
2473
+ });
2474
+ }
2475
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2476
+ warnings.push({
2477
+ type: "unsupported-setting",
2478
+ setting: "reasoningSummary",
2479
+ details: "reasoningSummary is not supported for non-reasoning models"
2480
+ });
2481
+ }
2482
+ }
2483
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2484
+ warnings.push({
2485
+ type: "unsupported-setting",
2486
+ setting: "serviceTier",
2487
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2488
+ });
2489
+ delete baseArgs.service_tier;
2490
+ }
2491
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2492
+ warnings.push({
2493
+ type: "unsupported-setting",
2494
+ setting: "serviceTier",
2495
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2496
+ });
2497
+ delete baseArgs.service_tier;
2498
+ }
2499
+ const {
2500
+ tools: openaiTools,
2501
+ toolChoice: openaiToolChoice,
2502
+ toolWarnings
2503
+ } = prepareResponsesTools({
2504
+ tools,
2505
+ toolChoice,
2506
+ strictJsonSchema
2507
+ });
2508
+ return {
2509
+ args: {
2510
+ ...baseArgs,
2511
+ tools: openaiTools,
2512
+ tool_choice: openaiToolChoice
2513
+ },
2514
+ warnings: [...warnings, ...toolWarnings]
2515
+ };
2516
+ }
2517
+ async doGenerate(options) {
2518
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2519
+ const { args: body, warnings } = await this.getArgs(options);
2520
+ const url = this.config.url({
2521
+ path: "/responses",
2522
+ modelId: this.modelId
2523
+ });
2524
+ const {
2525
+ responseHeaders,
2526
+ value: response,
2527
+ rawValue: rawResponse
2528
+ } = await (0, import_provider_utils13.postJsonToApi)({
2529
+ url,
2530
+ headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), options.headers),
2531
+ body,
2532
+ failedResponseHandler: openaiFailedResponseHandler,
2533
+ successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
2534
+ import_v415.z.object({
2535
+ id: import_v415.z.string(),
2536
+ created_at: import_v415.z.number(),
2537
+ error: import_v415.z.object({
2538
+ code: import_v415.z.string(),
2539
+ message: import_v415.z.string()
2540
+ }).nullish(),
2541
+ model: import_v415.z.string(),
2542
+ output: import_v415.z.array(
2543
+ import_v415.z.discriminatedUnion("type", [
2544
+ import_v415.z.object({
2545
+ type: import_v415.z.literal("message"),
2546
+ role: import_v415.z.literal("assistant"),
2547
+ id: import_v415.z.string(),
2548
+ content: import_v415.z.array(
2549
+ import_v415.z.object({
2550
+ type: import_v415.z.literal("output_text"),
2551
+ text: import_v415.z.string(),
2552
+ annotations: import_v415.z.array(
2553
+ import_v415.z.object({
2554
+ type: import_v415.z.literal("url_citation"),
2555
+ start_index: import_v415.z.number(),
2556
+ end_index: import_v415.z.number(),
2557
+ url: import_v415.z.string(),
2558
+ title: import_v415.z.string()
2559
+ })
2560
+ )
2561
+ })
2562
+ )
2563
+ }),
2564
+ import_v415.z.object({
2565
+ type: import_v415.z.literal("function_call"),
2566
+ call_id: import_v415.z.string(),
2567
+ name: import_v415.z.string(),
2568
+ arguments: import_v415.z.string(),
2569
+ id: import_v415.z.string()
2570
+ }),
2571
+ import_v415.z.object({
2572
+ type: import_v415.z.literal("web_search_call"),
2573
+ id: import_v415.z.string(),
2574
+ status: import_v415.z.string().optional()
2575
+ }),
2576
+ import_v415.z.object({
2577
+ type: import_v415.z.literal("computer_call"),
2578
+ id: import_v415.z.string(),
2579
+ status: import_v415.z.string().optional()
2580
+ }),
2581
+ import_v415.z.object({
2582
+ type: import_v415.z.literal("file_search_call"),
2583
+ id: import_v415.z.string(),
2584
+ status: import_v415.z.string().optional()
2585
+ }),
2586
+ import_v415.z.object({
2587
+ type: import_v415.z.literal("reasoning"),
2588
+ id: import_v415.z.string(),
2589
+ encrypted_content: import_v415.z.string().nullish(),
2590
+ summary: import_v415.z.array(
2591
+ import_v415.z.object({
2592
+ type: import_v415.z.literal("summary_text"),
2593
+ text: import_v415.z.string()
2594
+ })
2595
+ )
2596
+ })
2597
+ ])
2598
+ ),
2599
+ incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullable(),
2600
+ usage: usageSchema2
2601
+ })
2602
+ ),
2603
+ abortSignal: options.abortSignal,
2604
+ fetch: this.config.fetch
2605
+ });
2606
+ if (response.error) {
2607
+ throw new import_provider8.APICallError({
2608
+ message: response.error.message,
2609
+ url,
2610
+ requestBodyValues: body,
2611
+ statusCode: 400,
2612
+ responseHeaders,
2613
+ responseBody: rawResponse,
2614
+ isRetryable: false
2615
+ });
2616
+ }
2617
+ const content = [];
2618
+ for (const part of response.output) {
2619
+ switch (part.type) {
2620
+ case "reasoning": {
2621
+ if (part.summary.length === 0) {
2622
+ part.summary.push({ type: "summary_text", text: "" });
2623
+ }
2624
+ for (const summary of part.summary) {
2625
+ content.push({
2626
+ type: "reasoning",
2627
+ text: summary.text,
2628
+ providerMetadata: {
2629
+ openai: {
2630
+ itemId: part.id,
2631
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2632
+ }
2633
+ }
2634
+ });
2635
+ }
2636
+ break;
2637
+ }
2638
+ case "message": {
2639
+ for (const contentPart of part.content) {
2640
+ content.push({
2641
+ type: "text",
2642
+ text: contentPart.text,
2643
+ providerMetadata: {
2644
+ openai: {
2645
+ itemId: part.id
2646
+ }
2647
+ }
2648
+ });
2649
+ for (const annotation of contentPart.annotations) {
2650
+ content.push({
2651
+ type: "source",
2652
+ sourceType: "url",
2653
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils13.generateId)(),
2654
+ url: annotation.url,
2655
+ title: annotation.title
2656
+ });
2657
+ }
2658
+ }
2659
+ break;
2660
+ }
2661
+ case "function_call": {
2662
+ content.push({
2663
+ type: "tool-call",
2664
+ toolCallId: part.call_id,
2665
+ toolName: part.name,
2666
+ input: part.arguments,
2667
+ providerMetadata: {
2668
+ openai: {
2669
+ itemId: part.id
2670
+ }
2671
+ }
2672
+ });
2673
+ break;
2674
+ }
2675
+ case "web_search_call": {
2676
+ content.push({
2677
+ type: "tool-call",
2678
+ toolCallId: part.id,
2679
+ toolName: "web_search_preview",
2680
+ input: "",
2681
+ providerExecuted: true
2682
+ });
2683
+ content.push({
2684
+ type: "tool-result",
2685
+ toolCallId: part.id,
2686
+ toolName: "web_search_preview",
2687
+ result: { status: part.status || "completed" },
2688
+ providerExecuted: true
2689
+ });
2690
+ break;
2691
+ }
2692
+ case "computer_call": {
2693
+ content.push({
2694
+ type: "tool-call",
2695
+ toolCallId: part.id,
2696
+ toolName: "computer_use",
2697
+ input: "",
2698
+ providerExecuted: true
2699
+ });
2700
+ content.push({
2701
+ type: "tool-result",
2702
+ toolCallId: part.id,
2703
+ toolName: "computer_use",
2704
+ result: {
2705
+ type: "computer_use_tool_result",
2706
+ status: part.status || "completed"
2707
+ },
2708
+ providerExecuted: true
2709
+ });
2710
+ break;
2711
+ }
2712
+ case "file_search_call": {
2713
+ content.push({
2714
+ type: "tool-call",
2715
+ toolCallId: part.id,
2716
+ toolName: "file_search",
2717
+ input: "",
2718
+ providerExecuted: true
2719
+ });
2720
+ content.push({
2721
+ type: "tool-result",
2722
+ toolCallId: part.id,
2723
+ toolName: "file_search",
2724
+ result: {
2725
+ type: "file_search_tool_result",
2726
+ status: part.status || "completed"
2727
+ },
2728
+ providerExecuted: true
2729
+ });
2730
+ break;
2731
+ }
2732
+ }
2733
+ }
2734
+ return {
2735
+ content,
2736
+ finishReason: mapOpenAIResponseFinishReason({
2737
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2738
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2739
+ }),
2740
+ usage: {
2741
+ inputTokens: response.usage.input_tokens,
2742
+ outputTokens: response.usage.output_tokens,
2743
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2744
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2745
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2746
+ },
2747
+ request: { body },
2748
+ response: {
2749
+ id: response.id,
2750
+ timestamp: new Date(response.created_at * 1e3),
2751
+ modelId: response.model,
2752
+ headers: responseHeaders,
2753
+ body: rawResponse
2754
+ },
2755
+ providerMetadata: {
2756
+ openai: {
2757
+ responseId: response.id
2758
+ }
2759
+ },
2760
+ warnings
2761
+ };
2762
+ }
2763
+ async doStream(options) {
2764
+ const { args: body, warnings } = await this.getArgs(options);
2765
+ const { responseHeaders, value: response } = await (0, import_provider_utils13.postJsonToApi)({
2766
+ url: this.config.url({
2767
+ path: "/responses",
2768
+ modelId: this.modelId
2769
+ }),
2770
+ headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), options.headers),
2771
+ body: {
2772
+ ...body,
2773
+ stream: true
2774
+ },
2775
+ failedResponseHandler: openaiFailedResponseHandler,
2776
+ successfulResponseHandler: (0, import_provider_utils13.createEventSourceResponseHandler)(
2777
+ openaiResponsesChunkSchema
2778
+ ),
2779
+ abortSignal: options.abortSignal,
2780
+ fetch: this.config.fetch
2781
+ });
2782
+ const self = this;
2783
+ let finishReason = "unknown";
2784
+ const usage = {
2785
+ inputTokens: void 0,
2786
+ outputTokens: void 0,
2787
+ totalTokens: void 0
2788
+ };
2789
+ let responseId = null;
2790
+ const ongoingToolCalls = {};
2791
+ let hasToolCalls = false;
2792
+ const activeReasoning = {};
2793
+ return {
2794
+ stream: response.pipeThrough(
2795
+ new TransformStream({
2796
+ start(controller) {
2797
+ controller.enqueue({ type: "stream-start", warnings });
2798
+ },
2799
+ transform(chunk, controller) {
2800
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2801
+ if (options.includeRawChunks) {
2802
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2803
+ }
2804
+ if (!chunk.success) {
2805
+ finishReason = "error";
2806
+ controller.enqueue({ type: "error", error: chunk.error });
2807
+ return;
2808
+ }
2809
+ const value = chunk.value;
2810
+ if (isResponseOutputItemAddedChunk(value)) {
2811
+ if (value.item.type === "function_call") {
2812
+ ongoingToolCalls[value.output_index] = {
2813
+ toolName: value.item.name,
2814
+ toolCallId: value.item.call_id
2815
+ };
2816
+ controller.enqueue({
2817
+ type: "tool-input-start",
2818
+ id: value.item.call_id,
2819
+ toolName: value.item.name
2820
+ });
2821
+ } else if (value.item.type === "web_search_call") {
2822
+ ongoingToolCalls[value.output_index] = {
2823
+ toolName: "web_search_preview",
2824
+ toolCallId: value.item.id
2825
+ };
2826
+ controller.enqueue({
2827
+ type: "tool-input-start",
2828
+ id: value.item.id,
2829
+ toolName: "web_search_preview"
2830
+ });
2831
+ } else if (value.item.type === "computer_call") {
2832
+ ongoingToolCalls[value.output_index] = {
2833
+ toolName: "computer_use",
2834
+ toolCallId: value.item.id
2835
+ };
2836
+ controller.enqueue({
2837
+ type: "tool-input-start",
2838
+ id: value.item.id,
2839
+ toolName: "computer_use"
2840
+ });
2841
+ } else if (value.item.type === "message") {
2842
+ controller.enqueue({
2843
+ type: "text-start",
2844
+ id: value.item.id,
2845
+ providerMetadata: {
2846
+ openai: {
2847
+ itemId: value.item.id
2848
+ }
2849
+ }
2850
+ });
2851
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2852
+ activeReasoning[value.item.id] = {
2853
+ encryptedContent: value.item.encrypted_content,
2854
+ summaryParts: [0]
2855
+ };
2856
+ controller.enqueue({
2857
+ type: "reasoning-start",
2858
+ id: `${value.item.id}:0`,
2859
+ providerMetadata: {
2860
+ openai: {
2861
+ itemId: value.item.id,
2862
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2863
+ }
2864
+ }
2865
+ });
2866
+ }
2867
+ } else if (isResponseOutputItemDoneChunk(value)) {
2868
+ if (value.item.type === "function_call") {
2869
+ ongoingToolCalls[value.output_index] = void 0;
2870
+ hasToolCalls = true;
2871
+ controller.enqueue({
2872
+ type: "tool-input-end",
2873
+ id: value.item.call_id
2874
+ });
2875
+ controller.enqueue({
2876
+ type: "tool-call",
2877
+ toolCallId: value.item.call_id,
2878
+ toolName: value.item.name,
2879
+ input: value.item.arguments,
2880
+ providerMetadata: {
2881
+ openai: {
2882
+ itemId: value.item.id
2883
+ }
2884
+ }
2885
+ });
2886
+ } else if (value.item.type === "web_search_call") {
2887
+ ongoingToolCalls[value.output_index] = void 0;
2888
+ hasToolCalls = true;
2889
+ controller.enqueue({
2890
+ type: "tool-input-end",
2891
+ id: value.item.id
2892
+ });
2893
+ controller.enqueue({
2894
+ type: "tool-call",
2895
+ toolCallId: value.item.id,
2896
+ toolName: "web_search_preview",
2897
+ input: "",
2898
+ providerExecuted: true
2899
+ });
2900
+ controller.enqueue({
2901
+ type: "tool-result",
2902
+ toolCallId: value.item.id,
2903
+ toolName: "web_search_preview",
2904
+ result: {
2905
+ type: "web_search_tool_result",
2906
+ status: value.item.status || "completed"
2907
+ },
2908
+ providerExecuted: true
2909
+ });
2910
+ } else if (value.item.type === "computer_call") {
2911
+ ongoingToolCalls[value.output_index] = void 0;
2912
+ hasToolCalls = true;
2913
+ controller.enqueue({
2914
+ type: "tool-input-end",
2915
+ id: value.item.id
2916
+ });
2917
+ controller.enqueue({
2918
+ type: "tool-call",
2919
+ toolCallId: value.item.id,
2920
+ toolName: "computer_use",
2921
+ input: "",
2922
+ providerExecuted: true
2923
+ });
2924
+ controller.enqueue({
2925
+ type: "tool-result",
2926
+ toolCallId: value.item.id,
2927
+ toolName: "computer_use",
2928
+ result: {
2929
+ type: "computer_use_tool_result",
2930
+ status: value.item.status || "completed"
2931
+ },
2932
+ providerExecuted: true
2933
+ });
2934
+ } else if (value.item.type === "message") {
2935
+ controller.enqueue({
2936
+ type: "text-end",
2937
+ id: value.item.id
2938
+ });
2939
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2940
+ const activeReasoningPart = activeReasoning[value.item.id];
2941
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2942
+ controller.enqueue({
2943
+ type: "reasoning-end",
2944
+ id: `${value.item.id}:${summaryIndex}`,
2945
+ providerMetadata: {
2946
+ openai: {
2947
+ itemId: value.item.id,
2948
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2949
+ }
2950
+ }
2951
+ });
2952
+ }
2953
+ delete activeReasoning[value.item.id];
2954
+ }
2955
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2956
+ const toolCall = ongoingToolCalls[value.output_index];
2957
+ if (toolCall != null) {
2958
+ controller.enqueue({
2959
+ type: "tool-input-delta",
2960
+ id: toolCall.toolCallId,
2961
+ delta: value.delta
2962
+ });
2963
+ }
2964
+ } else if (isResponseCreatedChunk(value)) {
2965
+ responseId = value.response.id;
2966
+ controller.enqueue({
2967
+ type: "response-metadata",
2968
+ id: value.response.id,
2969
+ timestamp: new Date(value.response.created_at * 1e3),
2970
+ modelId: value.response.model
2971
+ });
2972
+ } else if (isTextDeltaChunk(value)) {
2973
+ controller.enqueue({
2974
+ type: "text-delta",
2975
+ id: value.item_id,
2976
+ delta: value.delta
2977
+ });
2978
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2979
+ if (value.summary_index > 0) {
2980
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2981
+ value.summary_index
2982
+ );
2983
+ controller.enqueue({
2984
+ type: "reasoning-start",
2985
+ id: `${value.item_id}:${value.summary_index}`,
2986
+ providerMetadata: {
2987
+ openai: {
2988
+ itemId: value.item_id,
2989
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2990
+ }
2991
+ }
2992
+ });
2993
+ }
2994
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2995
+ controller.enqueue({
2996
+ type: "reasoning-delta",
2997
+ id: `${value.item_id}:${value.summary_index}`,
2998
+ delta: value.delta,
2999
+ providerMetadata: {
3000
+ openai: {
3001
+ itemId: value.item_id
3002
+ }
3003
+ }
3004
+ });
3005
+ } else if (isResponseFinishedChunk(value)) {
3006
+ finishReason = mapOpenAIResponseFinishReason({
3007
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
3008
+ hasToolCalls
3009
+ });
3010
+ usage.inputTokens = value.response.usage.input_tokens;
3011
+ usage.outputTokens = value.response.usage.output_tokens;
3012
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3013
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
3014
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
3015
+ } else if (isResponseAnnotationAddedChunk(value)) {
3016
+ controller.enqueue({
3017
+ type: "source",
3018
+ sourceType: "url",
3019
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils13.generateId)(),
3020
+ url: value.annotation.url,
3021
+ title: value.annotation.title
3022
+ });
3023
+ } else if (isErrorChunk(value)) {
3024
+ controller.enqueue({ type: "error", error: value });
3025
+ }
3026
+ },
3027
+ flush(controller) {
3028
+ controller.enqueue({
3029
+ type: "finish",
3030
+ finishReason,
3031
+ usage,
3032
+ providerMetadata: {
3033
+ openai: {
3034
+ responseId
3035
+ }
3036
+ }
3037
+ });
3038
+ }
3039
+ })
3040
+ ),
3041
+ request: { body },
3042
+ response: { headers: responseHeaders }
3043
+ };
3044
+ }
3045
+ };
3046
+ var usageSchema2 = import_v415.z.object({
3047
+ input_tokens: import_v415.z.number(),
3048
+ input_tokens_details: import_v415.z.object({ cached_tokens: import_v415.z.number().nullish() }).nullish(),
3049
+ output_tokens: import_v415.z.number(),
3050
+ output_tokens_details: import_v415.z.object({ reasoning_tokens: import_v415.z.number().nullish() }).nullish()
3051
+ });
3052
+ var textDeltaChunkSchema = import_v415.z.object({
3053
+ type: import_v415.z.literal("response.output_text.delta"),
3054
+ item_id: import_v415.z.string(),
3055
+ delta: import_v415.z.string()
3056
+ });
3057
+ var errorChunkSchema = import_v415.z.object({
3058
+ type: import_v415.z.literal("error"),
3059
+ code: import_v415.z.string(),
3060
+ message: import_v415.z.string(),
3061
+ param: import_v415.z.string().nullish(),
3062
+ sequence_number: import_v415.z.number()
3063
+ });
3064
+ var responseFinishedChunkSchema = import_v415.z.object({
3065
+ type: import_v415.z.enum(["response.completed", "response.incomplete"]),
3066
+ response: import_v415.z.object({
3067
+ incomplete_details: import_v415.z.object({ reason: import_v415.z.string() }).nullish(),
3068
+ usage: usageSchema2
3069
+ })
3070
+ });
3071
+ var responseCreatedChunkSchema = import_v415.z.object({
3072
+ type: import_v415.z.literal("response.created"),
3073
+ response: import_v415.z.object({
3074
+ id: import_v415.z.string(),
3075
+ created_at: import_v415.z.number(),
3076
+ model: import_v415.z.string()
3077
+ })
3078
+ });
3079
+ var responseOutputItemAddedSchema = import_v415.z.object({
3080
+ type: import_v415.z.literal("response.output_item.added"),
3081
+ output_index: import_v415.z.number(),
3082
+ item: import_v415.z.discriminatedUnion("type", [
3083
+ import_v415.z.object({
3084
+ type: import_v415.z.literal("message"),
3085
+ id: import_v415.z.string()
3086
+ }),
3087
+ import_v415.z.object({
3088
+ type: import_v415.z.literal("reasoning"),
3089
+ id: import_v415.z.string(),
3090
+ encrypted_content: import_v415.z.string().nullish()
3091
+ }),
3092
+ import_v415.z.object({
3093
+ type: import_v415.z.literal("function_call"),
3094
+ id: import_v415.z.string(),
3095
+ call_id: import_v415.z.string(),
3096
+ name: import_v415.z.string(),
3097
+ arguments: import_v415.z.string()
3098
+ }),
3099
+ import_v415.z.object({
3100
+ type: import_v415.z.literal("web_search_call"),
3101
+ id: import_v415.z.string(),
3102
+ status: import_v415.z.string()
3103
+ }),
3104
+ import_v415.z.object({
3105
+ type: import_v415.z.literal("computer_call"),
3106
+ id: import_v415.z.string(),
3107
+ status: import_v415.z.string()
3108
+ }),
3109
+ import_v415.z.object({
3110
+ type: import_v415.z.literal("file_search_call"),
3111
+ id: import_v415.z.string(),
3112
+ status: import_v415.z.string()
3113
+ })
3114
+ ])
3115
+ });
3116
+ var responseOutputItemDoneSchema = import_v415.z.object({
3117
+ type: import_v415.z.literal("response.output_item.done"),
3118
+ output_index: import_v415.z.number(),
3119
+ item: import_v415.z.discriminatedUnion("type", [
3120
+ import_v415.z.object({
3121
+ type: import_v415.z.literal("message"),
3122
+ id: import_v415.z.string()
3123
+ }),
3124
+ import_v415.z.object({
3125
+ type: import_v415.z.literal("reasoning"),
3126
+ id: import_v415.z.string(),
3127
+ encrypted_content: import_v415.z.string().nullish()
3128
+ }),
3129
+ import_v415.z.object({
3130
+ type: import_v415.z.literal("function_call"),
3131
+ id: import_v415.z.string(),
3132
+ call_id: import_v415.z.string(),
3133
+ name: import_v415.z.string(),
3134
+ arguments: import_v415.z.string(),
3135
+ status: import_v415.z.literal("completed")
3136
+ }),
3137
+ import_v415.z.object({
3138
+ type: import_v415.z.literal("web_search_call"),
3139
+ id: import_v415.z.string(),
3140
+ status: import_v415.z.literal("completed")
3141
+ }),
3142
+ import_v415.z.object({
3143
+ type: import_v415.z.literal("computer_call"),
3144
+ id: import_v415.z.string(),
3145
+ status: import_v415.z.literal("completed")
3146
+ }),
3147
+ import_v415.z.object({
3148
+ type: import_v415.z.literal("file_search_call"),
3149
+ id: import_v415.z.string(),
3150
+ status: import_v415.z.literal("completed")
3151
+ })
3152
+ ])
3153
+ });
3154
+ var responseFunctionCallArgumentsDeltaSchema = import_v415.z.object({
3155
+ type: import_v415.z.literal("response.function_call_arguments.delta"),
3156
+ item_id: import_v415.z.string(),
3157
+ output_index: import_v415.z.number(),
3158
+ delta: import_v415.z.string()
3159
+ });
3160
+ var responseAnnotationAddedSchema = import_v415.z.object({
3161
+ type: import_v415.z.literal("response.output_text.annotation.added"),
3162
+ annotation: import_v415.z.object({
3163
+ type: import_v415.z.literal("url_citation"),
3164
+ url: import_v415.z.string(),
3165
+ title: import_v415.z.string()
3166
+ })
3167
+ });
3168
+ var responseReasoningSummaryPartAddedSchema = import_v415.z.object({
3169
+ type: import_v415.z.literal("response.reasoning_summary_part.added"),
3170
+ item_id: import_v415.z.string(),
3171
+ summary_index: import_v415.z.number()
3172
+ });
3173
+ var responseReasoningSummaryTextDeltaSchema = import_v415.z.object({
3174
+ type: import_v415.z.literal("response.reasoning_summary_text.delta"),
3175
+ item_id: import_v415.z.string(),
3176
+ summary_index: import_v415.z.number(),
3177
+ delta: import_v415.z.string()
3178
+ });
3179
+ var openaiResponsesChunkSchema = import_v415.z.union([
3180
+ textDeltaChunkSchema,
3181
+ responseFinishedChunkSchema,
3182
+ responseCreatedChunkSchema,
3183
+ responseOutputItemAddedSchema,
3184
+ responseOutputItemDoneSchema,
3185
+ responseFunctionCallArgumentsDeltaSchema,
3186
+ responseAnnotationAddedSchema,
3187
+ responseReasoningSummaryPartAddedSchema,
3188
+ responseReasoningSummaryTextDeltaSchema,
3189
+ errorChunkSchema,
3190
+ import_v415.z.object({ type: import_v415.z.string() }).loose()
3191
+ // fallback for unknown chunks
3192
+ ]);
3193
+ function isTextDeltaChunk(chunk) {
3194
+ return chunk.type === "response.output_text.delta";
3195
+ }
3196
+ function isResponseOutputItemDoneChunk(chunk) {
3197
+ return chunk.type === "response.output_item.done";
3198
+ }
3199
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3200
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3201
+ }
3202
+ function isResponseFinishedChunk(chunk) {
3203
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3204
+ }
3205
+ function isResponseCreatedChunk(chunk) {
3206
+ return chunk.type === "response.created";
3207
+ }
3208
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3209
+ return chunk.type === "response.function_call_arguments.delta";
3210
+ }
3211
+ function isResponseOutputItemAddedChunk(chunk) {
3212
+ return chunk.type === "response.output_item.added";
3213
+ }
3214
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3215
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3216
+ }
3217
+ function isResponseAnnotationAddedChunk(chunk) {
3218
+ return chunk.type === "response.output_text.annotation.added";
3219
+ }
3220
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3221
+ return chunk.type === "response.reasoning_summary_part.added";
3222
+ }
3223
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3224
+ return chunk.type === "response.reasoning_summary_text.delta";
3225
+ }
3226
+ function isErrorChunk(chunk) {
3227
+ return chunk.type === "error";
3228
+ }
3229
+ function getResponsesModelConfig(modelId) {
3230
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3231
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3232
+ return {
3233
+ isReasoningModel: true,
3234
+ systemMessageMode: "remove",
3235
+ requiredAutoTruncation: false
3236
+ };
3237
+ }
3238
+ return {
3239
+ isReasoningModel: true,
3240
+ systemMessageMode: "developer",
3241
+ requiredAutoTruncation: false
3242
+ };
3243
+ }
3244
+ return {
3245
+ isReasoningModel: false,
3246
+ systemMessageMode: "system",
3247
+ requiredAutoTruncation: false
3248
+ };
3249
+ }
3250
+ function supportsFlexProcessing2(modelId) {
3251
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3252
+ }
3253
+ function supportsPriorityProcessing2(modelId) {
3254
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3255
+ }
3256
+ var openaiResponsesProviderOptionsSchema = import_v415.z.object({
3257
+ metadata: import_v415.z.any().nullish(),
3258
+ parallelToolCalls: import_v415.z.boolean().nullish(),
3259
+ previousResponseId: import_v415.z.string().nullish(),
3260
+ store: import_v415.z.boolean().nullish(),
3261
+ user: import_v415.z.string().nullish(),
3262
+ reasoningEffort: import_v415.z.string().nullish(),
3263
+ strictJsonSchema: import_v415.z.boolean().nullish(),
3264
+ instructions: import_v415.z.string().nullish(),
3265
+ reasoningSummary: import_v415.z.string().nullish(),
3266
+ serviceTier: import_v415.z.enum(["auto", "flex", "priority"]).nullish(),
3267
+ include: import_v415.z.array(import_v415.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3268
+ textVerbosity: import_v415.z.enum(["low", "medium", "high"]).nullish()
3269
+ });
3270
+ // Annotate the CommonJS export names for ESM import in node:
3271
+ 0 && (module.exports = {
3272
+ OpenAIChatLanguageModel,
3273
+ OpenAICompletionLanguageModel,
3274
+ OpenAIEmbeddingModel,
3275
+ OpenAIImageModel,
3276
+ OpenAIResponsesLanguageModel,
3277
+ OpenAISpeechModel,
3278
+ OpenAITranscriptionModel,
3279
+ hasDefaultResponseFormat,
3280
+ modelMaxImagesPerCall,
3281
+ openAITranscriptionProviderOptions,
3282
+ openaiCompletionProviderOptions,
3283
+ openaiEmbeddingProviderOptions,
3284
+ openaiProviderOptions
3285
+ });
3286
+ //# sourceMappingURL=index.js.map