@ai-sdk/openai 0.0.0-013d7476-20250808163325

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,3361 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ createOpenAI: () => createOpenAI,
24
+ openai: () => openai
25
+ });
26
+ module.exports = __toCommonJS(src_exports);
27
+
28
+ // src/openai-provider.ts
29
+ var import_provider_utils14 = require("@ai-sdk/provider-utils");
30
+
31
+ // src/openai-chat-language-model.ts
32
+ var import_provider3 = require("@ai-sdk/provider");
33
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
34
+ var import_v45 = require("zod/v4");
35
+
36
+ // src/convert-to-openai-chat-messages.ts
37
+ var import_provider = require("@ai-sdk/provider");
38
+ var import_provider_utils = require("@ai-sdk/provider-utils");
39
+ function convertToOpenAIChatMessages({
40
+ prompt,
41
+ systemMessageMode = "system"
42
+ }) {
43
+ const messages = [];
44
+ const warnings = [];
45
+ for (const { role, content } of prompt) {
46
+ switch (role) {
47
+ case "system": {
48
+ switch (systemMessageMode) {
49
+ case "system": {
50
+ messages.push({ role: "system", content });
51
+ break;
52
+ }
53
+ case "developer": {
54
+ messages.push({ role: "developer", content });
55
+ break;
56
+ }
57
+ case "remove": {
58
+ warnings.push({
59
+ type: "other",
60
+ message: "system messages are removed for this model"
61
+ });
62
+ break;
63
+ }
64
+ default: {
65
+ const _exhaustiveCheck = systemMessageMode;
66
+ throw new Error(
67
+ `Unsupported system message mode: ${_exhaustiveCheck}`
68
+ );
69
+ }
70
+ }
71
+ break;
72
+ }
73
+ case "user": {
74
+ if (content.length === 1 && content[0].type === "text") {
75
+ messages.push({ role: "user", content: content[0].text });
76
+ break;
77
+ }
78
+ messages.push({
79
+ role: "user",
80
+ content: content.map((part, index) => {
81
+ var _a, _b, _c;
82
+ switch (part.type) {
83
+ case "text": {
84
+ return { type: "text", text: part.text };
85
+ }
86
+ case "file": {
87
+ if (part.mediaType.startsWith("image/")) {
88
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
89
+ return {
90
+ type: "image_url",
91
+ image_url: {
92
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
93
+ // OpenAI specific extension: image detail
94
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
95
+ }
96
+ };
97
+ } else if (part.mediaType.startsWith("audio/")) {
98
+ if (part.data instanceof URL) {
99
+ throw new import_provider.UnsupportedFunctionalityError({
100
+ functionality: "audio file parts with URLs"
101
+ });
102
+ }
103
+ switch (part.mediaType) {
104
+ case "audio/wav": {
105
+ return {
106
+ type: "input_audio",
107
+ input_audio: {
108
+ data: (0, import_provider_utils.convertToBase64)(part.data),
109
+ format: "wav"
110
+ }
111
+ };
112
+ }
113
+ case "audio/mp3":
114
+ case "audio/mpeg": {
115
+ return {
116
+ type: "input_audio",
117
+ input_audio: {
118
+ data: (0, import_provider_utils.convertToBase64)(part.data),
119
+ format: "mp3"
120
+ }
121
+ };
122
+ }
123
+ default: {
124
+ throw new import_provider.UnsupportedFunctionalityError({
125
+ functionality: `audio content parts with media type ${part.mediaType}`
126
+ });
127
+ }
128
+ }
129
+ } else if (part.mediaType === "application/pdf") {
130
+ if (part.data instanceof URL) {
131
+ throw new import_provider.UnsupportedFunctionalityError({
132
+ functionality: "PDF file parts with URLs"
133
+ });
134
+ }
135
+ return {
136
+ type: "file",
137
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
138
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
139
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils.convertToBase64)(part.data)}`
140
+ }
141
+ };
142
+ } else {
143
+ throw new import_provider.UnsupportedFunctionalityError({
144
+ functionality: `file part media type ${part.mediaType}`
145
+ });
146
+ }
147
+ }
148
+ }
149
+ })
150
+ });
151
+ break;
152
+ }
153
+ case "assistant": {
154
+ let text = "";
155
+ const toolCalls = [];
156
+ for (const part of content) {
157
+ switch (part.type) {
158
+ case "text": {
159
+ text += part.text;
160
+ break;
161
+ }
162
+ case "tool-call": {
163
+ toolCalls.push({
164
+ id: part.toolCallId,
165
+ type: "function",
166
+ function: {
167
+ name: part.toolName,
168
+ arguments: JSON.stringify(part.input)
169
+ }
170
+ });
171
+ break;
172
+ }
173
+ }
174
+ }
175
+ messages.push({
176
+ role: "assistant",
177
+ content: text,
178
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
179
+ });
180
+ break;
181
+ }
182
+ case "tool": {
183
+ for (const toolResponse of content) {
184
+ const output = toolResponse.output;
185
+ let contentValue;
186
+ switch (output.type) {
187
+ case "text":
188
+ case "error-text":
189
+ contentValue = output.value;
190
+ break;
191
+ case "content":
192
+ case "json":
193
+ case "error-json":
194
+ contentValue = JSON.stringify(output.value);
195
+ break;
196
+ }
197
+ messages.push({
198
+ role: "tool",
199
+ tool_call_id: toolResponse.toolCallId,
200
+ content: contentValue
201
+ });
202
+ }
203
+ break;
204
+ }
205
+ default: {
206
+ const _exhaustiveCheck = role;
207
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
208
+ }
209
+ }
210
+ }
211
+ return { messages, warnings };
212
+ }
213
+
214
+ // src/get-response-metadata.ts
215
+ function getResponseMetadata({
216
+ id,
217
+ model,
218
+ created
219
+ }) {
220
+ return {
221
+ id: id != null ? id : void 0,
222
+ modelId: model != null ? model : void 0,
223
+ timestamp: created != null ? new Date(created * 1e3) : void 0
224
+ };
225
+ }
226
+
227
+ // src/map-openai-finish-reason.ts
228
+ function mapOpenAIFinishReason(finishReason) {
229
+ switch (finishReason) {
230
+ case "stop":
231
+ return "stop";
232
+ case "length":
233
+ return "length";
234
+ case "content_filter":
235
+ return "content-filter";
236
+ case "function_call":
237
+ case "tool_calls":
238
+ return "tool-calls";
239
+ default:
240
+ return "unknown";
241
+ }
242
+ }
243
+
244
+ // src/openai-chat-options.ts
245
+ var import_v4 = require("zod/v4");
246
+ var openaiProviderOptions = import_v4.z.object({
247
+ /**
248
+ * Modify the likelihood of specified tokens appearing in the completion.
249
+ *
250
+ * Accepts a JSON object that maps tokens (specified by their token ID in
251
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
252
+ */
253
+ logitBias: import_v4.z.record(import_v4.z.coerce.number(), import_v4.z.number()).optional(),
254
+ /**
255
+ * Return the log probabilities of the tokens.
256
+ *
257
+ * Setting to true will return the log probabilities of the tokens that
258
+ * were generated.
259
+ *
260
+ * Setting to a number will return the log probabilities of the top n
261
+ * tokens that were generated.
262
+ */
263
+ logprobs: import_v4.z.union([import_v4.z.boolean(), import_v4.z.number()]).optional(),
264
+ /**
265
+ * Whether to enable parallel function calling during tool use. Default to true.
266
+ */
267
+ parallelToolCalls: import_v4.z.boolean().optional(),
268
+ /**
269
+ * A unique identifier representing your end-user, which can help OpenAI to
270
+ * monitor and detect abuse.
271
+ */
272
+ user: import_v4.z.string().optional(),
273
+ /**
274
+ * Reasoning effort for reasoning models. Defaults to `medium`.
275
+ */
276
+ reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
277
+ /**
278
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
279
+ */
280
+ maxCompletionTokens: import_v4.z.number().optional(),
281
+ /**
282
+ * Whether to enable persistence in responses API.
283
+ */
284
+ store: import_v4.z.boolean().optional(),
285
+ /**
286
+ * Metadata to associate with the request.
287
+ */
288
+ metadata: import_v4.z.record(import_v4.z.string().max(64), import_v4.z.string().max(512)).optional(),
289
+ /**
290
+ * Parameters for prediction mode.
291
+ */
292
+ prediction: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
293
+ /**
294
+ * Whether to use structured outputs.
295
+ *
296
+ * @default true
297
+ */
298
+ structuredOutputs: import_v4.z.boolean().optional(),
299
+ /**
300
+ * Service tier for the request.
301
+ * - 'auto': Default service tier
302
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
303
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
304
+ *
305
+ * @default 'auto'
306
+ */
307
+ serviceTier: import_v4.z.enum(["auto", "flex", "priority"]).optional(),
308
+ /**
309
+ * Whether to use strict JSON schema validation.
310
+ *
311
+ * @default false
312
+ */
313
+ strictJsonSchema: import_v4.z.boolean().optional()
314
+ });
315
+
316
+ // src/openai-error.ts
317
+ var import_v42 = require("zod/v4");
318
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
319
+ var openaiErrorDataSchema = import_v42.z.object({
320
+ error: import_v42.z.object({
321
+ message: import_v42.z.string(),
322
+ // The additional information below is handled loosely to support
323
+ // OpenAI-compatible providers that have slightly different error
324
+ // responses:
325
+ type: import_v42.z.string().nullish(),
326
+ param: import_v42.z.any().nullish(),
327
+ code: import_v42.z.union([import_v42.z.string(), import_v42.z.number()]).nullish()
328
+ })
329
+ });
330
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
331
+ errorSchema: openaiErrorDataSchema,
332
+ errorToMessage: (data) => data.error.message
333
+ });
334
+
335
+ // src/openai-prepare-tools.ts
336
+ var import_provider2 = require("@ai-sdk/provider");
337
+
338
+ // src/tool/file-search.ts
339
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
340
+ var import_v43 = require("zod/v4");
341
+ var comparisonFilterSchema = import_v43.z.object({
342
+ key: import_v43.z.string(),
343
+ type: import_v43.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
344
+ value: import_v43.z.union([import_v43.z.string(), import_v43.z.number(), import_v43.z.boolean()])
345
+ });
346
+ var compoundFilterSchema = import_v43.z.object({
347
+ type: import_v43.z.enum(["and", "or"]),
348
+ filters: import_v43.z.array(
349
+ import_v43.z.union([comparisonFilterSchema, import_v43.z.lazy(() => compoundFilterSchema)])
350
+ )
351
+ });
352
+ var filtersSchema = import_v43.z.union([comparisonFilterSchema, compoundFilterSchema]);
353
+ var fileSearchArgsSchema = import_v43.z.object({
354
+ /**
355
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
356
+ */
357
+ vectorStoreIds: import_v43.z.array(import_v43.z.string()).optional(),
358
+ /**
359
+ * Maximum number of search results to return. Defaults to 10.
360
+ */
361
+ maxNumResults: import_v43.z.number().optional(),
362
+ /**
363
+ * Ranking options for the search.
364
+ */
365
+ ranking: import_v43.z.object({
366
+ ranker: import_v43.z.enum(["auto", "default-2024-08-21"]).optional()
367
+ }).optional(),
368
+ /**
369
+ * A filter to apply based on file attributes.
370
+ */
371
+ filters: filtersSchema.optional()
372
+ });
373
+ var fileSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
374
+ id: "openai.file_search",
375
+ name: "file_search",
376
+ inputSchema: import_v43.z.object({
377
+ query: import_v43.z.string()
378
+ })
379
+ });
380
+
381
+ // src/tool/web-search-preview.ts
382
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
383
+ var import_v44 = require("zod/v4");
384
+ var webSearchPreviewArgsSchema = import_v44.z.object({
385
+ /**
386
+ * Search context size to use for the web search.
387
+ * - high: Most comprehensive context, highest cost, slower response
388
+ * - medium: Balanced context, cost, and latency (default)
389
+ * - low: Least context, lowest cost, fastest response
390
+ */
391
+ searchContextSize: import_v44.z.enum(["low", "medium", "high"]).optional(),
392
+ /**
393
+ * User location information to provide geographically relevant search results.
394
+ */
395
+ userLocation: import_v44.z.object({
396
+ /**
397
+ * Type of location (always 'approximate')
398
+ */
399
+ type: import_v44.z.literal("approximate"),
400
+ /**
401
+ * Two-letter ISO country code (e.g., 'US', 'GB')
402
+ */
403
+ country: import_v44.z.string().optional(),
404
+ /**
405
+ * City name (free text, e.g., 'Minneapolis')
406
+ */
407
+ city: import_v44.z.string().optional(),
408
+ /**
409
+ * Region name (free text, e.g., 'Minnesota')
410
+ */
411
+ region: import_v44.z.string().optional(),
412
+ /**
413
+ * IANA timezone (e.g., 'America/Chicago')
414
+ */
415
+ timezone: import_v44.z.string().optional()
416
+ }).optional()
417
+ });
418
+ var webSearchPreview = (0, import_provider_utils4.createProviderDefinedToolFactory)({
419
+ id: "openai.web_search_preview",
420
+ name: "web_search_preview",
421
+ inputSchema: import_v44.z.object({})
422
+ });
423
+
424
+ // src/openai-prepare-tools.ts
425
+ function prepareTools({
426
+ tools,
427
+ toolChoice,
428
+ structuredOutputs,
429
+ strictJsonSchema
430
+ }) {
431
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
432
+ const toolWarnings = [];
433
+ if (tools == null) {
434
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
435
+ }
436
+ const openaiTools2 = [];
437
+ for (const tool of tools) {
438
+ switch (tool.type) {
439
+ case "function":
440
+ openaiTools2.push({
441
+ type: "function",
442
+ function: {
443
+ name: tool.name,
444
+ description: tool.description,
445
+ parameters: tool.inputSchema,
446
+ strict: structuredOutputs ? strictJsonSchema : void 0
447
+ }
448
+ });
449
+ break;
450
+ case "provider-defined":
451
+ switch (tool.id) {
452
+ case "openai.file_search": {
453
+ const args = fileSearchArgsSchema.parse(tool.args);
454
+ openaiTools2.push({
455
+ type: "file_search",
456
+ vector_store_ids: args.vectorStoreIds,
457
+ max_num_results: args.maxNumResults,
458
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
459
+ filters: args.filters
460
+ });
461
+ break;
462
+ }
463
+ case "openai.web_search_preview": {
464
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
465
+ openaiTools2.push({
466
+ type: "web_search_preview",
467
+ search_context_size: args.searchContextSize,
468
+ user_location: args.userLocation
469
+ });
470
+ break;
471
+ }
472
+ default:
473
+ toolWarnings.push({ type: "unsupported-tool", tool });
474
+ break;
475
+ }
476
+ break;
477
+ default:
478
+ toolWarnings.push({ type: "unsupported-tool", tool });
479
+ break;
480
+ }
481
+ }
482
+ if (toolChoice == null) {
483
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
484
+ }
485
+ const type = toolChoice.type;
486
+ switch (type) {
487
+ case "auto":
488
+ case "none":
489
+ case "required":
490
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
491
+ case "tool":
492
+ return {
493
+ tools: openaiTools2,
494
+ toolChoice: {
495
+ type: "function",
496
+ function: {
497
+ name: toolChoice.toolName
498
+ }
499
+ },
500
+ toolWarnings
501
+ };
502
+ default: {
503
+ const _exhaustiveCheck = type;
504
+ throw new import_provider2.UnsupportedFunctionalityError({
505
+ functionality: `tool choice type: ${_exhaustiveCheck}`
506
+ });
507
+ }
508
+ }
509
+ }
510
+
511
+ // src/openai-chat-language-model.ts
512
+ var OpenAIChatLanguageModel = class {
513
+ constructor(modelId, config) {
514
+ this.specificationVersion = "v2";
515
+ this.supportedUrls = {
516
+ "image/*": [/^https?:\/\/.*$/]
517
+ };
518
+ this.modelId = modelId;
519
+ this.config = config;
520
+ }
521
+ get provider() {
522
+ return this.config.provider;
523
+ }
524
+ async getArgs({
525
+ prompt,
526
+ maxOutputTokens,
527
+ temperature,
528
+ topP,
529
+ topK,
530
+ frequencyPenalty,
531
+ presencePenalty,
532
+ stopSequences,
533
+ responseFormat,
534
+ seed,
535
+ tools,
536
+ toolChoice,
537
+ providerOptions
538
+ }) {
539
+ var _a, _b, _c, _d;
540
+ const warnings = [];
541
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
542
+ provider: "openai",
543
+ providerOptions,
544
+ schema: openaiProviderOptions
545
+ })) != null ? _a : {};
546
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
547
+ if (topK != null) {
548
+ warnings.push({
549
+ type: "unsupported-setting",
550
+ setting: "topK"
551
+ });
552
+ }
553
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
554
+ warnings.push({
555
+ type: "unsupported-setting",
556
+ setting: "responseFormat",
557
+ details: "JSON response format schema is only supported with structuredOutputs"
558
+ });
559
+ }
560
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
561
+ {
562
+ prompt,
563
+ systemMessageMode: getSystemMessageMode(this.modelId)
564
+ }
565
+ );
566
+ warnings.push(...messageWarnings);
567
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
568
+ const baseArgs = {
569
+ // model id:
570
+ model: this.modelId,
571
+ // model specific settings:
572
+ logit_bias: openaiOptions.logitBias,
573
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
574
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
575
+ user: openaiOptions.user,
576
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
577
+ // standardized settings:
578
+ max_tokens: maxOutputTokens,
579
+ temperature,
580
+ top_p: topP,
581
+ frequency_penalty: frequencyPenalty,
582
+ presence_penalty: presencePenalty,
583
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
584
+ type: "json_schema",
585
+ json_schema: {
586
+ schema: responseFormat.schema,
587
+ strict: strictJsonSchema,
588
+ name: (_d = responseFormat.name) != null ? _d : "response",
589
+ description: responseFormat.description
590
+ }
591
+ } : { type: "json_object" } : void 0,
592
+ stop: stopSequences,
593
+ seed,
594
+ // openai specific settings:
595
+ // TODO remove in next major version; we auto-map maxOutputTokens now
596
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
597
+ store: openaiOptions.store,
598
+ metadata: openaiOptions.metadata,
599
+ prediction: openaiOptions.prediction,
600
+ reasoning_effort: openaiOptions.reasoningEffort,
601
+ service_tier: openaiOptions.serviceTier,
602
+ // messages:
603
+ messages
604
+ };
605
+ if (isReasoningModel(this.modelId)) {
606
+ if (baseArgs.temperature != null) {
607
+ baseArgs.temperature = void 0;
608
+ warnings.push({
609
+ type: "unsupported-setting",
610
+ setting: "temperature",
611
+ details: "temperature is not supported for reasoning models"
612
+ });
613
+ }
614
+ if (baseArgs.top_p != null) {
615
+ baseArgs.top_p = void 0;
616
+ warnings.push({
617
+ type: "unsupported-setting",
618
+ setting: "topP",
619
+ details: "topP is not supported for reasoning models"
620
+ });
621
+ }
622
+ if (baseArgs.frequency_penalty != null) {
623
+ baseArgs.frequency_penalty = void 0;
624
+ warnings.push({
625
+ type: "unsupported-setting",
626
+ setting: "frequencyPenalty",
627
+ details: "frequencyPenalty is not supported for reasoning models"
628
+ });
629
+ }
630
+ if (baseArgs.presence_penalty != null) {
631
+ baseArgs.presence_penalty = void 0;
632
+ warnings.push({
633
+ type: "unsupported-setting",
634
+ setting: "presencePenalty",
635
+ details: "presencePenalty is not supported for reasoning models"
636
+ });
637
+ }
638
+ if (baseArgs.logit_bias != null) {
639
+ baseArgs.logit_bias = void 0;
640
+ warnings.push({
641
+ type: "other",
642
+ message: "logitBias is not supported for reasoning models"
643
+ });
644
+ }
645
+ if (baseArgs.logprobs != null) {
646
+ baseArgs.logprobs = void 0;
647
+ warnings.push({
648
+ type: "other",
649
+ message: "logprobs is not supported for reasoning models"
650
+ });
651
+ }
652
+ if (baseArgs.top_logprobs != null) {
653
+ baseArgs.top_logprobs = void 0;
654
+ warnings.push({
655
+ type: "other",
656
+ message: "topLogprobs is not supported for reasoning models"
657
+ });
658
+ }
659
+ if (baseArgs.max_tokens != null) {
660
+ if (baseArgs.max_completion_tokens == null) {
661
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
662
+ }
663
+ baseArgs.max_tokens = void 0;
664
+ }
665
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
666
+ if (baseArgs.temperature != null) {
667
+ baseArgs.temperature = void 0;
668
+ warnings.push({
669
+ type: "unsupported-setting",
670
+ setting: "temperature",
671
+ details: "temperature is not supported for the search preview models and has been removed."
672
+ });
673
+ }
674
+ }
675
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
676
+ warnings.push({
677
+ type: "unsupported-setting",
678
+ setting: "serviceTier",
679
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
680
+ });
681
+ baseArgs.service_tier = void 0;
682
+ }
683
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
684
+ warnings.push({
685
+ type: "unsupported-setting",
686
+ setting: "serviceTier",
687
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
688
+ });
689
+ baseArgs.service_tier = void 0;
690
+ }
691
+ const {
692
+ tools: openaiTools2,
693
+ toolChoice: openaiToolChoice,
694
+ toolWarnings
695
+ } = prepareTools({
696
+ tools,
697
+ toolChoice,
698
+ structuredOutputs,
699
+ strictJsonSchema
700
+ });
701
+ return {
702
+ args: {
703
+ ...baseArgs,
704
+ tools: openaiTools2,
705
+ tool_choice: openaiToolChoice
706
+ },
707
+ warnings: [...warnings, ...toolWarnings]
708
+ };
709
+ }
710
+ async doGenerate(options) {
711
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
712
+ const { args: body, warnings } = await this.getArgs(options);
713
+ const {
714
+ responseHeaders,
715
+ value: response,
716
+ rawValue: rawResponse
717
+ } = await (0, import_provider_utils5.postJsonToApi)({
718
+ url: this.config.url({
719
+ path: "/chat/completions",
720
+ modelId: this.modelId
721
+ }),
722
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
723
+ body,
724
+ failedResponseHandler: openaiFailedResponseHandler,
725
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
726
+ openaiChatResponseSchema
727
+ ),
728
+ abortSignal: options.abortSignal,
729
+ fetch: this.config.fetch
730
+ });
731
+ const choice = response.choices[0];
732
+ const content = [];
733
+ const text = choice.message.content;
734
+ if (text != null && text.length > 0) {
735
+ content.push({ type: "text", text });
736
+ }
737
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
738
+ content.push({
739
+ type: "tool-call",
740
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils5.generateId)(),
741
+ toolName: toolCall.function.name,
742
+ input: toolCall.function.arguments
743
+ });
744
+ }
745
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
746
+ content.push({
747
+ type: "source",
748
+ sourceType: "url",
749
+ id: (0, import_provider_utils5.generateId)(),
750
+ url: annotation.url,
751
+ title: annotation.title
752
+ });
753
+ }
754
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
755
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
756
+ const providerMetadata = { openai: {} };
757
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
758
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
759
+ }
760
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
761
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
762
+ }
763
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
764
+ providerMetadata.openai.logprobs = choice.logprobs.content;
765
+ }
766
+ return {
767
+ content,
768
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
769
+ usage: {
770
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
771
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
772
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
773
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
774
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
775
+ },
776
+ request: { body },
777
+ response: {
778
+ ...getResponseMetadata(response),
779
+ headers: responseHeaders,
780
+ body: rawResponse
781
+ },
782
+ warnings,
783
+ providerMetadata
784
+ };
785
+ }
786
+ async doStream(options) {
787
+ const { args, warnings } = await this.getArgs(options);
788
+ const body = {
789
+ ...args,
790
+ stream: true,
791
+ stream_options: {
792
+ include_usage: true
793
+ }
794
+ };
795
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
796
+ url: this.config.url({
797
+ path: "/chat/completions",
798
+ modelId: this.modelId
799
+ }),
800
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), options.headers),
801
+ body,
802
+ failedResponseHandler: openaiFailedResponseHandler,
803
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(
804
+ openaiChatChunkSchema
805
+ ),
806
+ abortSignal: options.abortSignal,
807
+ fetch: this.config.fetch
808
+ });
809
+ const toolCalls = [];
810
+ let finishReason = "unknown";
811
+ const usage = {
812
+ inputTokens: void 0,
813
+ outputTokens: void 0,
814
+ totalTokens: void 0
815
+ };
816
+ let isFirstChunk = true;
817
+ let isActiveText = false;
818
+ const providerMetadata = { openai: {} };
819
+ return {
820
+ stream: response.pipeThrough(
821
+ new TransformStream({
822
+ start(controller) {
823
+ controller.enqueue({ type: "stream-start", warnings });
824
+ },
825
+ transform(chunk, controller) {
826
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
827
+ if (options.includeRawChunks) {
828
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
829
+ }
830
+ if (!chunk.success) {
831
+ finishReason = "error";
832
+ controller.enqueue({ type: "error", error: chunk.error });
833
+ return;
834
+ }
835
+ const value = chunk.value;
836
+ if ("error" in value) {
837
+ finishReason = "error";
838
+ controller.enqueue({ type: "error", error: value.error });
839
+ return;
840
+ }
841
+ if (isFirstChunk) {
842
+ isFirstChunk = false;
843
+ controller.enqueue({
844
+ type: "response-metadata",
845
+ ...getResponseMetadata(value)
846
+ });
847
+ }
848
+ if (value.usage != null) {
849
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
850
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
851
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
852
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
853
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
854
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
855
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
856
+ }
857
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
858
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
859
+ }
860
+ }
861
+ const choice = value.choices[0];
862
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
863
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
864
+ }
865
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
866
+ providerMetadata.openai.logprobs = choice.logprobs.content;
867
+ }
868
+ if ((choice == null ? void 0 : choice.delta) == null) {
869
+ return;
870
+ }
871
+ const delta = choice.delta;
872
+ if (delta.content != null) {
873
+ if (!isActiveText) {
874
+ controller.enqueue({ type: "text-start", id: "0" });
875
+ isActiveText = true;
876
+ }
877
+ controller.enqueue({
878
+ type: "text-delta",
879
+ id: "0",
880
+ delta: delta.content
881
+ });
882
+ }
883
+ if (delta.tool_calls != null) {
884
+ for (const toolCallDelta of delta.tool_calls) {
885
+ const index = toolCallDelta.index;
886
+ if (toolCalls[index] == null) {
887
+ if (toolCallDelta.type !== "function") {
888
+ throw new import_provider3.InvalidResponseDataError({
889
+ data: toolCallDelta,
890
+ message: `Expected 'function' type.`
891
+ });
892
+ }
893
+ if (toolCallDelta.id == null) {
894
+ throw new import_provider3.InvalidResponseDataError({
895
+ data: toolCallDelta,
896
+ message: `Expected 'id' to be a string.`
897
+ });
898
+ }
899
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
900
+ throw new import_provider3.InvalidResponseDataError({
901
+ data: toolCallDelta,
902
+ message: `Expected 'function.name' to be a string.`
903
+ });
904
+ }
905
+ controller.enqueue({
906
+ type: "tool-input-start",
907
+ id: toolCallDelta.id,
908
+ toolName: toolCallDelta.function.name
909
+ });
910
+ toolCalls[index] = {
911
+ id: toolCallDelta.id,
912
+ type: "function",
913
+ function: {
914
+ name: toolCallDelta.function.name,
915
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
916
+ },
917
+ hasFinished: false
918
+ };
919
+ const toolCall2 = toolCalls[index];
920
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
921
+ if (toolCall2.function.arguments.length > 0) {
922
+ controller.enqueue({
923
+ type: "tool-input-delta",
924
+ id: toolCall2.id,
925
+ delta: toolCall2.function.arguments
926
+ });
927
+ }
928
+ if ((0, import_provider_utils5.isParsableJson)(toolCall2.function.arguments)) {
929
+ controller.enqueue({
930
+ type: "tool-input-end",
931
+ id: toolCall2.id
932
+ });
933
+ controller.enqueue({
934
+ type: "tool-call",
935
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
936
+ toolName: toolCall2.function.name,
937
+ input: toolCall2.function.arguments
938
+ });
939
+ toolCall2.hasFinished = true;
940
+ }
941
+ }
942
+ continue;
943
+ }
944
+ const toolCall = toolCalls[index];
945
+ if (toolCall.hasFinished) {
946
+ continue;
947
+ }
948
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
949
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
950
+ }
951
+ controller.enqueue({
952
+ type: "tool-input-delta",
953
+ id: toolCall.id,
954
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
955
+ });
956
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
957
+ controller.enqueue({
958
+ type: "tool-input-end",
959
+ id: toolCall.id
960
+ });
961
+ controller.enqueue({
962
+ type: "tool-call",
963
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
964
+ toolName: toolCall.function.name,
965
+ input: toolCall.function.arguments
966
+ });
967
+ toolCall.hasFinished = true;
968
+ }
969
+ }
970
+ }
971
+ if (delta.annotations != null) {
972
+ for (const annotation of delta.annotations) {
973
+ controller.enqueue({
974
+ type: "source",
975
+ sourceType: "url",
976
+ id: (0, import_provider_utils5.generateId)(),
977
+ url: annotation.url,
978
+ title: annotation.title
979
+ });
980
+ }
981
+ }
982
+ },
983
+ flush(controller) {
984
+ if (isActiveText) {
985
+ controller.enqueue({ type: "text-end", id: "0" });
986
+ }
987
+ controller.enqueue({
988
+ type: "finish",
989
+ finishReason,
990
+ usage,
991
+ ...providerMetadata != null ? { providerMetadata } : {}
992
+ });
993
+ }
994
+ })
995
+ ),
996
+ request: { body },
997
+ response: { headers: responseHeaders }
998
+ };
999
+ }
1000
+ };
1001
+ var openaiTokenUsageSchema = import_v45.z.object({
1002
+ prompt_tokens: import_v45.z.number().nullish(),
1003
+ completion_tokens: import_v45.z.number().nullish(),
1004
+ total_tokens: import_v45.z.number().nullish(),
1005
+ prompt_tokens_details: import_v45.z.object({
1006
+ cached_tokens: import_v45.z.number().nullish()
1007
+ }).nullish(),
1008
+ completion_tokens_details: import_v45.z.object({
1009
+ reasoning_tokens: import_v45.z.number().nullish(),
1010
+ accepted_prediction_tokens: import_v45.z.number().nullish(),
1011
+ rejected_prediction_tokens: import_v45.z.number().nullish()
1012
+ }).nullish()
1013
+ }).nullish();
1014
+ var openaiChatResponseSchema = import_v45.z.object({
1015
+ id: import_v45.z.string().nullish(),
1016
+ created: import_v45.z.number().nullish(),
1017
+ model: import_v45.z.string().nullish(),
1018
+ choices: import_v45.z.array(
1019
+ import_v45.z.object({
1020
+ message: import_v45.z.object({
1021
+ role: import_v45.z.literal("assistant").nullish(),
1022
+ content: import_v45.z.string().nullish(),
1023
+ tool_calls: import_v45.z.array(
1024
+ import_v45.z.object({
1025
+ id: import_v45.z.string().nullish(),
1026
+ type: import_v45.z.literal("function"),
1027
+ function: import_v45.z.object({
1028
+ name: import_v45.z.string(),
1029
+ arguments: import_v45.z.string()
1030
+ })
1031
+ })
1032
+ ).nullish(),
1033
+ annotations: import_v45.z.array(
1034
+ import_v45.z.object({
1035
+ type: import_v45.z.literal("url_citation"),
1036
+ start_index: import_v45.z.number(),
1037
+ end_index: import_v45.z.number(),
1038
+ url: import_v45.z.string(),
1039
+ title: import_v45.z.string()
1040
+ })
1041
+ ).nullish()
1042
+ }),
1043
+ index: import_v45.z.number(),
1044
+ logprobs: import_v45.z.object({
1045
+ content: import_v45.z.array(
1046
+ import_v45.z.object({
1047
+ token: import_v45.z.string(),
1048
+ logprob: import_v45.z.number(),
1049
+ top_logprobs: import_v45.z.array(
1050
+ import_v45.z.object({
1051
+ token: import_v45.z.string(),
1052
+ logprob: import_v45.z.number()
1053
+ })
1054
+ )
1055
+ })
1056
+ ).nullish()
1057
+ }).nullish(),
1058
+ finish_reason: import_v45.z.string().nullish()
1059
+ })
1060
+ ),
1061
+ usage: openaiTokenUsageSchema
1062
+ });
1063
+ var openaiChatChunkSchema = import_v45.z.union([
1064
+ import_v45.z.object({
1065
+ id: import_v45.z.string().nullish(),
1066
+ created: import_v45.z.number().nullish(),
1067
+ model: import_v45.z.string().nullish(),
1068
+ choices: import_v45.z.array(
1069
+ import_v45.z.object({
1070
+ delta: import_v45.z.object({
1071
+ role: import_v45.z.enum(["assistant"]).nullish(),
1072
+ content: import_v45.z.string().nullish(),
1073
+ tool_calls: import_v45.z.array(
1074
+ import_v45.z.object({
1075
+ index: import_v45.z.number(),
1076
+ id: import_v45.z.string().nullish(),
1077
+ type: import_v45.z.literal("function").nullish(),
1078
+ function: import_v45.z.object({
1079
+ name: import_v45.z.string().nullish(),
1080
+ arguments: import_v45.z.string().nullish()
1081
+ })
1082
+ })
1083
+ ).nullish(),
1084
+ annotations: import_v45.z.array(
1085
+ import_v45.z.object({
1086
+ type: import_v45.z.literal("url_citation"),
1087
+ start_index: import_v45.z.number(),
1088
+ end_index: import_v45.z.number(),
1089
+ url: import_v45.z.string(),
1090
+ title: import_v45.z.string()
1091
+ })
1092
+ ).nullish()
1093
+ }).nullish(),
1094
+ logprobs: import_v45.z.object({
1095
+ content: import_v45.z.array(
1096
+ import_v45.z.object({
1097
+ token: import_v45.z.string(),
1098
+ logprob: import_v45.z.number(),
1099
+ top_logprobs: import_v45.z.array(
1100
+ import_v45.z.object({
1101
+ token: import_v45.z.string(),
1102
+ logprob: import_v45.z.number()
1103
+ })
1104
+ )
1105
+ })
1106
+ ).nullish()
1107
+ }).nullish(),
1108
+ finish_reason: import_v45.z.string().nullish(),
1109
+ index: import_v45.z.number()
1110
+ })
1111
+ ),
1112
+ usage: openaiTokenUsageSchema
1113
+ }),
1114
+ openaiErrorDataSchema
1115
+ ]);
1116
+ function isReasoningModel(modelId) {
1117
+ return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1118
+ }
1119
+ function supportsFlexProcessing(modelId) {
1120
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1121
+ }
1122
+ function supportsPriorityProcessing(modelId) {
1123
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1124
+ }
1125
+ function getSystemMessageMode(modelId) {
1126
+ var _a, _b;
1127
+ if (!isReasoningModel(modelId)) {
1128
+ return "system";
1129
+ }
1130
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1131
+ }
1132
+ var reasoningModels = {
1133
+ "o1-mini": {
1134
+ systemMessageMode: "remove"
1135
+ },
1136
+ "o1-mini-2024-09-12": {
1137
+ systemMessageMode: "remove"
1138
+ },
1139
+ "o1-preview": {
1140
+ systemMessageMode: "remove"
1141
+ },
1142
+ "o1-preview-2024-09-12": {
1143
+ systemMessageMode: "remove"
1144
+ },
1145
+ o3: {
1146
+ systemMessageMode: "developer"
1147
+ },
1148
+ "o3-2025-04-16": {
1149
+ systemMessageMode: "developer"
1150
+ },
1151
+ "o3-mini": {
1152
+ systemMessageMode: "developer"
1153
+ },
1154
+ "o3-mini-2025-01-31": {
1155
+ systemMessageMode: "developer"
1156
+ },
1157
+ "o4-mini": {
1158
+ systemMessageMode: "developer"
1159
+ },
1160
+ "o4-mini-2025-04-16": {
1161
+ systemMessageMode: "developer"
1162
+ }
1163
+ };
1164
+
1165
+ // src/openai-completion-language-model.ts
1166
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1167
+ var import_v47 = require("zod/v4");
1168
+
1169
+ // src/convert-to-openai-completion-prompt.ts
1170
+ var import_provider4 = require("@ai-sdk/provider");
1171
+ function convertToOpenAICompletionPrompt({
1172
+ prompt,
1173
+ user = "user",
1174
+ assistant = "assistant"
1175
+ }) {
1176
+ let text = "";
1177
+ if (prompt[0].role === "system") {
1178
+ text += `${prompt[0].content}
1179
+
1180
+ `;
1181
+ prompt = prompt.slice(1);
1182
+ }
1183
+ for (const { role, content } of prompt) {
1184
+ switch (role) {
1185
+ case "system": {
1186
+ throw new import_provider4.InvalidPromptError({
1187
+ message: "Unexpected system message in prompt: ${content}",
1188
+ prompt
1189
+ });
1190
+ }
1191
+ case "user": {
1192
+ const userMessage = content.map((part) => {
1193
+ switch (part.type) {
1194
+ case "text": {
1195
+ return part.text;
1196
+ }
1197
+ }
1198
+ }).filter(Boolean).join("");
1199
+ text += `${user}:
1200
+ ${userMessage}
1201
+
1202
+ `;
1203
+ break;
1204
+ }
1205
+ case "assistant": {
1206
+ const assistantMessage = content.map((part) => {
1207
+ switch (part.type) {
1208
+ case "text": {
1209
+ return part.text;
1210
+ }
1211
+ case "tool-call": {
1212
+ throw new import_provider4.UnsupportedFunctionalityError({
1213
+ functionality: "tool-call messages"
1214
+ });
1215
+ }
1216
+ }
1217
+ }).join("");
1218
+ text += `${assistant}:
1219
+ ${assistantMessage}
1220
+
1221
+ `;
1222
+ break;
1223
+ }
1224
+ case "tool": {
1225
+ throw new import_provider4.UnsupportedFunctionalityError({
1226
+ functionality: "tool messages"
1227
+ });
1228
+ }
1229
+ default: {
1230
+ const _exhaustiveCheck = role;
1231
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1232
+ }
1233
+ }
1234
+ }
1235
+ text += `${assistant}:
1236
+ `;
1237
+ return {
1238
+ prompt: text,
1239
+ stopSequences: [`
1240
+ ${user}:`]
1241
+ };
1242
+ }
1243
+
1244
+ // src/openai-completion-options.ts
1245
+ var import_v46 = require("zod/v4");
1246
+ var openaiCompletionProviderOptions = import_v46.z.object({
1247
+ /**
1248
+ Echo back the prompt in addition to the completion.
1249
+ */
1250
+ echo: import_v46.z.boolean().optional(),
1251
+ /**
1252
+ Modify the likelihood of specified tokens appearing in the completion.
1253
+
1254
+ Accepts a JSON object that maps tokens (specified by their token ID in
1255
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1256
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1257
+ the bias is added to the logits generated by the model prior to sampling.
1258
+ The exact effect will vary per model, but values between -1 and 1 should
1259
+ decrease or increase likelihood of selection; values like -100 or 100
1260
+ should result in a ban or exclusive selection of the relevant token.
1261
+
1262
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1263
+ token from being generated.
1264
+ */
1265
+ logitBias: import_v46.z.record(import_v46.z.string(), import_v46.z.number()).optional(),
1266
+ /**
1267
+ The suffix that comes after a completion of inserted text.
1268
+ */
1269
+ suffix: import_v46.z.string().optional(),
1270
+ /**
1271
+ A unique identifier representing your end-user, which can help OpenAI to
1272
+ monitor and detect abuse. Learn more.
1273
+ */
1274
+ user: import_v46.z.string().optional(),
1275
+ /**
1276
+ Return the log probabilities of the tokens. Including logprobs will increase
1277
+ the response size and can slow down response times. However, it can
1278
+ be useful to better understand how the model is behaving.
1279
+ Setting to true will return the log probabilities of the tokens that
1280
+ were generated.
1281
+ Setting to a number will return the log probabilities of the top n
1282
+ tokens that were generated.
1283
+ */
1284
+ logprobs: import_v46.z.union([import_v46.z.boolean(), import_v46.z.number()]).optional()
1285
+ });
1286
+
1287
+ // src/openai-completion-language-model.ts
1288
+ var OpenAICompletionLanguageModel = class {
1289
+ constructor(modelId, config) {
1290
+ this.specificationVersion = "v2";
1291
+ this.supportedUrls = {
1292
+ // No URLs are supported for completion models.
1293
+ };
1294
+ this.modelId = modelId;
1295
+ this.config = config;
1296
+ }
1297
+ get providerOptionsName() {
1298
+ return this.config.provider.split(".")[0].trim();
1299
+ }
1300
+ get provider() {
1301
+ return this.config.provider;
1302
+ }
1303
+ async getArgs({
1304
+ prompt,
1305
+ maxOutputTokens,
1306
+ temperature,
1307
+ topP,
1308
+ topK,
1309
+ frequencyPenalty,
1310
+ presencePenalty,
1311
+ stopSequences: userStopSequences,
1312
+ responseFormat,
1313
+ tools,
1314
+ toolChoice,
1315
+ seed,
1316
+ providerOptions
1317
+ }) {
1318
+ const warnings = [];
1319
+ const openaiOptions = {
1320
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1321
+ provider: "openai",
1322
+ providerOptions,
1323
+ schema: openaiCompletionProviderOptions
1324
+ }),
1325
+ ...await (0, import_provider_utils6.parseProviderOptions)({
1326
+ provider: this.providerOptionsName,
1327
+ providerOptions,
1328
+ schema: openaiCompletionProviderOptions
1329
+ })
1330
+ };
1331
+ if (topK != null) {
1332
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1333
+ }
1334
+ if (tools == null ? void 0 : tools.length) {
1335
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1336
+ }
1337
+ if (toolChoice != null) {
1338
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1339
+ }
1340
+ if (responseFormat != null && responseFormat.type !== "text") {
1341
+ warnings.push({
1342
+ type: "unsupported-setting",
1343
+ setting: "responseFormat",
1344
+ details: "JSON response format is not supported."
1345
+ });
1346
+ }
1347
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1348
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1349
+ return {
1350
+ args: {
1351
+ // model id:
1352
+ model: this.modelId,
1353
+ // model specific settings:
1354
+ echo: openaiOptions.echo,
1355
+ logit_bias: openaiOptions.logitBias,
1356
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1357
+ suffix: openaiOptions.suffix,
1358
+ user: openaiOptions.user,
1359
+ // standardized settings:
1360
+ max_tokens: maxOutputTokens,
1361
+ temperature,
1362
+ top_p: topP,
1363
+ frequency_penalty: frequencyPenalty,
1364
+ presence_penalty: presencePenalty,
1365
+ seed,
1366
+ // prompt:
1367
+ prompt: completionPrompt,
1368
+ // stop sequences:
1369
+ stop: stop.length > 0 ? stop : void 0
1370
+ },
1371
+ warnings
1372
+ };
1373
+ }
1374
+ async doGenerate(options) {
1375
+ var _a, _b, _c;
1376
+ const { args, warnings } = await this.getArgs(options);
1377
+ const {
1378
+ responseHeaders,
1379
+ value: response,
1380
+ rawValue: rawResponse
1381
+ } = await (0, import_provider_utils6.postJsonToApi)({
1382
+ url: this.config.url({
1383
+ path: "/completions",
1384
+ modelId: this.modelId
1385
+ }),
1386
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1387
+ body: args,
1388
+ failedResponseHandler: openaiFailedResponseHandler,
1389
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1390
+ openaiCompletionResponseSchema
1391
+ ),
1392
+ abortSignal: options.abortSignal,
1393
+ fetch: this.config.fetch
1394
+ });
1395
+ const choice = response.choices[0];
1396
+ const providerMetadata = { openai: {} };
1397
+ if (choice.logprobs != null) {
1398
+ providerMetadata.openai.logprobs = choice.logprobs;
1399
+ }
1400
+ return {
1401
+ content: [{ type: "text", text: choice.text }],
1402
+ usage: {
1403
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1404
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1405
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1406
+ },
1407
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1408
+ request: { body: args },
1409
+ response: {
1410
+ ...getResponseMetadata(response),
1411
+ headers: responseHeaders,
1412
+ body: rawResponse
1413
+ },
1414
+ providerMetadata,
1415
+ warnings
1416
+ };
1417
+ }
1418
+ async doStream(options) {
1419
+ const { args, warnings } = await this.getArgs(options);
1420
+ const body = {
1421
+ ...args,
1422
+ stream: true,
1423
+ stream_options: {
1424
+ include_usage: true
1425
+ }
1426
+ };
1427
+ const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
1428
+ url: this.config.url({
1429
+ path: "/completions",
1430
+ modelId: this.modelId
1431
+ }),
1432
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), options.headers),
1433
+ body,
1434
+ failedResponseHandler: openaiFailedResponseHandler,
1435
+ successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(
1436
+ openaiCompletionChunkSchema
1437
+ ),
1438
+ abortSignal: options.abortSignal,
1439
+ fetch: this.config.fetch
1440
+ });
1441
+ let finishReason = "unknown";
1442
+ const providerMetadata = { openai: {} };
1443
+ const usage = {
1444
+ inputTokens: void 0,
1445
+ outputTokens: void 0,
1446
+ totalTokens: void 0
1447
+ };
1448
+ let isFirstChunk = true;
1449
+ return {
1450
+ stream: response.pipeThrough(
1451
+ new TransformStream({
1452
+ start(controller) {
1453
+ controller.enqueue({ type: "stream-start", warnings });
1454
+ },
1455
+ transform(chunk, controller) {
1456
+ if (options.includeRawChunks) {
1457
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1458
+ }
1459
+ if (!chunk.success) {
1460
+ finishReason = "error";
1461
+ controller.enqueue({ type: "error", error: chunk.error });
1462
+ return;
1463
+ }
1464
+ const value = chunk.value;
1465
+ if ("error" in value) {
1466
+ finishReason = "error";
1467
+ controller.enqueue({ type: "error", error: value.error });
1468
+ return;
1469
+ }
1470
+ if (isFirstChunk) {
1471
+ isFirstChunk = false;
1472
+ controller.enqueue({
1473
+ type: "response-metadata",
1474
+ ...getResponseMetadata(value)
1475
+ });
1476
+ controller.enqueue({ type: "text-start", id: "0" });
1477
+ }
1478
+ if (value.usage != null) {
1479
+ usage.inputTokens = value.usage.prompt_tokens;
1480
+ usage.outputTokens = value.usage.completion_tokens;
1481
+ usage.totalTokens = value.usage.total_tokens;
1482
+ }
1483
+ const choice = value.choices[0];
1484
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1485
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1486
+ }
1487
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1488
+ providerMetadata.openai.logprobs = choice.logprobs;
1489
+ }
1490
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1491
+ controller.enqueue({
1492
+ type: "text-delta",
1493
+ id: "0",
1494
+ delta: choice.text
1495
+ });
1496
+ }
1497
+ },
1498
+ flush(controller) {
1499
+ if (!isFirstChunk) {
1500
+ controller.enqueue({ type: "text-end", id: "0" });
1501
+ }
1502
+ controller.enqueue({
1503
+ type: "finish",
1504
+ finishReason,
1505
+ providerMetadata,
1506
+ usage
1507
+ });
1508
+ }
1509
+ })
1510
+ ),
1511
+ request: { body },
1512
+ response: { headers: responseHeaders }
1513
+ };
1514
+ }
1515
+ };
1516
+ var usageSchema = import_v47.z.object({
1517
+ prompt_tokens: import_v47.z.number(),
1518
+ completion_tokens: import_v47.z.number(),
1519
+ total_tokens: import_v47.z.number()
1520
+ });
1521
+ var openaiCompletionResponseSchema = import_v47.z.object({
1522
+ id: import_v47.z.string().nullish(),
1523
+ created: import_v47.z.number().nullish(),
1524
+ model: import_v47.z.string().nullish(),
1525
+ choices: import_v47.z.array(
1526
+ import_v47.z.object({
1527
+ text: import_v47.z.string(),
1528
+ finish_reason: import_v47.z.string(),
1529
+ logprobs: import_v47.z.object({
1530
+ tokens: import_v47.z.array(import_v47.z.string()),
1531
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1532
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1533
+ }).nullish()
1534
+ })
1535
+ ),
1536
+ usage: usageSchema.nullish()
1537
+ });
1538
+ var openaiCompletionChunkSchema = import_v47.z.union([
1539
+ import_v47.z.object({
1540
+ id: import_v47.z.string().nullish(),
1541
+ created: import_v47.z.number().nullish(),
1542
+ model: import_v47.z.string().nullish(),
1543
+ choices: import_v47.z.array(
1544
+ import_v47.z.object({
1545
+ text: import_v47.z.string(),
1546
+ finish_reason: import_v47.z.string().nullish(),
1547
+ index: import_v47.z.number(),
1548
+ logprobs: import_v47.z.object({
1549
+ tokens: import_v47.z.array(import_v47.z.string()),
1550
+ token_logprobs: import_v47.z.array(import_v47.z.number()),
1551
+ top_logprobs: import_v47.z.array(import_v47.z.record(import_v47.z.string(), import_v47.z.number())).nullish()
1552
+ }).nullish()
1553
+ })
1554
+ ),
1555
+ usage: usageSchema.nullish()
1556
+ }),
1557
+ openaiErrorDataSchema
1558
+ ]);
1559
+
1560
+ // src/openai-embedding-model.ts
1561
+ var import_provider5 = require("@ai-sdk/provider");
1562
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1563
+ var import_v49 = require("zod/v4");
1564
+
1565
+ // src/openai-embedding-options.ts
1566
+ var import_v48 = require("zod/v4");
1567
+ var openaiEmbeddingProviderOptions = import_v48.z.object({
1568
+ /**
1569
+ The number of dimensions the resulting output embeddings should have.
1570
+ Only supported in text-embedding-3 and later models.
1571
+ */
1572
+ dimensions: import_v48.z.number().optional(),
1573
+ /**
1574
+ A unique identifier representing your end-user, which can help OpenAI to
1575
+ monitor and detect abuse. Learn more.
1576
+ */
1577
+ user: import_v48.z.string().optional()
1578
+ });
1579
+
1580
+ // src/openai-embedding-model.ts
1581
+ var OpenAIEmbeddingModel = class {
1582
+ constructor(modelId, config) {
1583
+ this.specificationVersion = "v2";
1584
+ this.maxEmbeddingsPerCall = 2048;
1585
+ this.supportsParallelCalls = true;
1586
+ this.modelId = modelId;
1587
+ this.config = config;
1588
+ }
1589
+ get provider() {
1590
+ return this.config.provider;
1591
+ }
1592
+ async doEmbed({
1593
+ values,
1594
+ headers,
1595
+ abortSignal,
1596
+ providerOptions
1597
+ }) {
1598
+ var _a;
1599
+ if (values.length > this.maxEmbeddingsPerCall) {
1600
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1601
+ provider: this.provider,
1602
+ modelId: this.modelId,
1603
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1604
+ values
1605
+ });
1606
+ }
1607
+ const openaiOptions = (_a = await (0, import_provider_utils7.parseProviderOptions)({
1608
+ provider: "openai",
1609
+ providerOptions,
1610
+ schema: openaiEmbeddingProviderOptions
1611
+ })) != null ? _a : {};
1612
+ const {
1613
+ responseHeaders,
1614
+ value: response,
1615
+ rawValue
1616
+ } = await (0, import_provider_utils7.postJsonToApi)({
1617
+ url: this.config.url({
1618
+ path: "/embeddings",
1619
+ modelId: this.modelId
1620
+ }),
1621
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), headers),
1622
+ body: {
1623
+ model: this.modelId,
1624
+ input: values,
1625
+ encoding_format: "float",
1626
+ dimensions: openaiOptions.dimensions,
1627
+ user: openaiOptions.user
1628
+ },
1629
+ failedResponseHandler: openaiFailedResponseHandler,
1630
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1631
+ openaiTextEmbeddingResponseSchema
1632
+ ),
1633
+ abortSignal,
1634
+ fetch: this.config.fetch
1635
+ });
1636
+ return {
1637
+ embeddings: response.data.map((item) => item.embedding),
1638
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1639
+ response: { headers: responseHeaders, body: rawValue }
1640
+ };
1641
+ }
1642
+ };
1643
+ var openaiTextEmbeddingResponseSchema = import_v49.z.object({
1644
+ data: import_v49.z.array(import_v49.z.object({ embedding: import_v49.z.array(import_v49.z.number()) })),
1645
+ usage: import_v49.z.object({ prompt_tokens: import_v49.z.number() }).nullish()
1646
+ });
1647
+
1648
+ // src/openai-image-model.ts
1649
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1650
+ var import_v410 = require("zod/v4");
1651
+
1652
+ // src/openai-image-settings.ts
1653
+ var modelMaxImagesPerCall = {
1654
+ "dall-e-3": 1,
1655
+ "dall-e-2": 10,
1656
+ "gpt-image-1": 10
1657
+ };
1658
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1659
+
1660
+ // src/openai-image-model.ts
1661
+ var OpenAIImageModel = class {
1662
+ constructor(modelId, config) {
1663
+ this.modelId = modelId;
1664
+ this.config = config;
1665
+ this.specificationVersion = "v2";
1666
+ }
1667
+ get maxImagesPerCall() {
1668
+ var _a;
1669
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1670
+ }
1671
+ get provider() {
1672
+ return this.config.provider;
1673
+ }
1674
+ async doGenerate({
1675
+ prompt,
1676
+ n,
1677
+ size,
1678
+ aspectRatio,
1679
+ seed,
1680
+ providerOptions,
1681
+ headers,
1682
+ abortSignal
1683
+ }) {
1684
+ var _a, _b, _c, _d;
1685
+ const warnings = [];
1686
+ if (aspectRatio != null) {
1687
+ warnings.push({
1688
+ type: "unsupported-setting",
1689
+ setting: "aspectRatio",
1690
+ details: "This model does not support aspect ratio. Use `size` instead."
1691
+ });
1692
+ }
1693
+ if (seed != null) {
1694
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1695
+ }
1696
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1697
+ const { value: response, responseHeaders } = await (0, import_provider_utils8.postJsonToApi)({
1698
+ url: this.config.url({
1699
+ path: "/images/generations",
1700
+ modelId: this.modelId
1701
+ }),
1702
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), headers),
1703
+ body: {
1704
+ model: this.modelId,
1705
+ prompt,
1706
+ n,
1707
+ size,
1708
+ ...(_d = providerOptions.openai) != null ? _d : {},
1709
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1710
+ },
1711
+ failedResponseHandler: openaiFailedResponseHandler,
1712
+ successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1713
+ openaiImageResponseSchema
1714
+ ),
1715
+ abortSignal,
1716
+ fetch: this.config.fetch
1717
+ });
1718
+ return {
1719
+ images: response.data.map((item) => item.b64_json),
1720
+ warnings,
1721
+ response: {
1722
+ timestamp: currentDate,
1723
+ modelId: this.modelId,
1724
+ headers: responseHeaders
1725
+ },
1726
+ providerMetadata: {
1727
+ openai: {
1728
+ images: response.data.map(
1729
+ (item) => item.revised_prompt ? {
1730
+ revisedPrompt: item.revised_prompt
1731
+ } : null
1732
+ )
1733
+ }
1734
+ }
1735
+ };
1736
+ }
1737
+ };
1738
+ var openaiImageResponseSchema = import_v410.z.object({
1739
+ data: import_v410.z.array(
1740
+ import_v410.z.object({ b64_json: import_v410.z.string(), revised_prompt: import_v410.z.string().optional() })
1741
+ )
1742
+ });
1743
+
1744
+ // src/openai-tools.ts
1745
+ var openaiTools = {
1746
+ fileSearch,
1747
+ webSearchPreview
1748
+ };
1749
+
1750
+ // src/openai-transcription-model.ts
1751
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1752
+ var import_v412 = require("zod/v4");
1753
+
1754
+ // src/openai-transcription-options.ts
1755
+ var import_v411 = require("zod/v4");
1756
+ var openAITranscriptionProviderOptions = import_v411.z.object({
1757
+ /**
1758
+ * Additional information to include in the transcription response.
1759
+ */
1760
+ include: import_v411.z.array(import_v411.z.string()).optional(),
1761
+ /**
1762
+ * The language of the input audio in ISO-639-1 format.
1763
+ */
1764
+ language: import_v411.z.string().optional(),
1765
+ /**
1766
+ * An optional text to guide the model's style or continue a previous audio segment.
1767
+ */
1768
+ prompt: import_v411.z.string().optional(),
1769
+ /**
1770
+ * The sampling temperature, between 0 and 1.
1771
+ * @default 0
1772
+ */
1773
+ temperature: import_v411.z.number().min(0).max(1).default(0).optional(),
1774
+ /**
1775
+ * The timestamp granularities to populate for this transcription.
1776
+ * @default ['segment']
1777
+ */
1778
+ timestampGranularities: import_v411.z.array(import_v411.z.enum(["word", "segment"])).default(["segment"]).optional()
1779
+ });
1780
+
1781
+ // src/openai-transcription-model.ts
1782
+ var languageMap = {
1783
+ afrikaans: "af",
1784
+ arabic: "ar",
1785
+ armenian: "hy",
1786
+ azerbaijani: "az",
1787
+ belarusian: "be",
1788
+ bosnian: "bs",
1789
+ bulgarian: "bg",
1790
+ catalan: "ca",
1791
+ chinese: "zh",
1792
+ croatian: "hr",
1793
+ czech: "cs",
1794
+ danish: "da",
1795
+ dutch: "nl",
1796
+ english: "en",
1797
+ estonian: "et",
1798
+ finnish: "fi",
1799
+ french: "fr",
1800
+ galician: "gl",
1801
+ german: "de",
1802
+ greek: "el",
1803
+ hebrew: "he",
1804
+ hindi: "hi",
1805
+ hungarian: "hu",
1806
+ icelandic: "is",
1807
+ indonesian: "id",
1808
+ italian: "it",
1809
+ japanese: "ja",
1810
+ kannada: "kn",
1811
+ kazakh: "kk",
1812
+ korean: "ko",
1813
+ latvian: "lv",
1814
+ lithuanian: "lt",
1815
+ macedonian: "mk",
1816
+ malay: "ms",
1817
+ marathi: "mr",
1818
+ maori: "mi",
1819
+ nepali: "ne",
1820
+ norwegian: "no",
1821
+ persian: "fa",
1822
+ polish: "pl",
1823
+ portuguese: "pt",
1824
+ romanian: "ro",
1825
+ russian: "ru",
1826
+ serbian: "sr",
1827
+ slovak: "sk",
1828
+ slovenian: "sl",
1829
+ spanish: "es",
1830
+ swahili: "sw",
1831
+ swedish: "sv",
1832
+ tagalog: "tl",
1833
+ tamil: "ta",
1834
+ thai: "th",
1835
+ turkish: "tr",
1836
+ ukrainian: "uk",
1837
+ urdu: "ur",
1838
+ vietnamese: "vi",
1839
+ welsh: "cy"
1840
+ };
1841
+ var OpenAITranscriptionModel = class {
1842
+ constructor(modelId, config) {
1843
+ this.modelId = modelId;
1844
+ this.config = config;
1845
+ this.specificationVersion = "v2";
1846
+ }
1847
+ get provider() {
1848
+ return this.config.provider;
1849
+ }
1850
+ async getArgs({
1851
+ audio,
1852
+ mediaType,
1853
+ providerOptions
1854
+ }) {
1855
+ const warnings = [];
1856
+ const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
1857
+ provider: "openai",
1858
+ providerOptions,
1859
+ schema: openAITranscriptionProviderOptions
1860
+ });
1861
+ const formData = new FormData();
1862
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils9.convertBase64ToUint8Array)(audio)]);
1863
+ formData.append("model", this.modelId);
1864
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1865
+ if (openAIOptions) {
1866
+ const transcriptionModelOptions = {
1867
+ include: openAIOptions.include,
1868
+ language: openAIOptions.language,
1869
+ prompt: openAIOptions.prompt,
1870
+ temperature: openAIOptions.temperature,
1871
+ timestamp_granularities: openAIOptions.timestampGranularities
1872
+ };
1873
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1874
+ if (value != null) {
1875
+ formData.append(key, String(value));
1876
+ }
1877
+ }
1878
+ }
1879
+ return {
1880
+ formData,
1881
+ warnings
1882
+ };
1883
+ }
1884
+ async doGenerate(options) {
1885
+ var _a, _b, _c, _d, _e, _f;
1886
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1887
+ const { formData, warnings } = await this.getArgs(options);
1888
+ const {
1889
+ value: response,
1890
+ responseHeaders,
1891
+ rawValue: rawResponse
1892
+ } = await (0, import_provider_utils9.postFormDataToApi)({
1893
+ url: this.config.url({
1894
+ path: "/audio/transcriptions",
1895
+ modelId: this.modelId
1896
+ }),
1897
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
1898
+ formData,
1899
+ failedResponseHandler: openaiFailedResponseHandler,
1900
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
1901
+ openaiTranscriptionResponseSchema
1902
+ ),
1903
+ abortSignal: options.abortSignal,
1904
+ fetch: this.config.fetch
1905
+ });
1906
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1907
+ return {
1908
+ text: response.text,
1909
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1910
+ text: word.word,
1911
+ startSecond: word.start,
1912
+ endSecond: word.end
1913
+ }))) != null ? _e : [],
1914
+ language,
1915
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1916
+ warnings,
1917
+ response: {
1918
+ timestamp: currentDate,
1919
+ modelId: this.modelId,
1920
+ headers: responseHeaders,
1921
+ body: rawResponse
1922
+ }
1923
+ };
1924
+ }
1925
+ };
1926
+ var openaiTranscriptionResponseSchema = import_v412.z.object({
1927
+ text: import_v412.z.string(),
1928
+ language: import_v412.z.string().nullish(),
1929
+ duration: import_v412.z.number().nullish(),
1930
+ words: import_v412.z.array(
1931
+ import_v412.z.object({
1932
+ word: import_v412.z.string(),
1933
+ start: import_v412.z.number(),
1934
+ end: import_v412.z.number()
1935
+ })
1936
+ ).nullish()
1937
+ });
1938
+
1939
+ // src/responses/openai-responses-language-model.ts
1940
+ var import_provider8 = require("@ai-sdk/provider");
1941
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
1942
+ var import_v414 = require("zod/v4");
1943
+
1944
+ // src/responses/convert-to-openai-responses-messages.ts
1945
+ var import_provider6 = require("@ai-sdk/provider");
1946
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
1947
+ var import_v413 = require("zod/v4");
1948
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
1949
+ async function convertToOpenAIResponsesMessages({
1950
+ prompt,
1951
+ systemMessageMode
1952
+ }) {
1953
+ var _a, _b, _c, _d, _e, _f;
1954
+ const messages = [];
1955
+ const warnings = [];
1956
+ for (const { role, content } of prompt) {
1957
+ switch (role) {
1958
+ case "system": {
1959
+ switch (systemMessageMode) {
1960
+ case "system": {
1961
+ messages.push({ role: "system", content });
1962
+ break;
1963
+ }
1964
+ case "developer": {
1965
+ messages.push({ role: "developer", content });
1966
+ break;
1967
+ }
1968
+ case "remove": {
1969
+ warnings.push({
1970
+ type: "other",
1971
+ message: "system messages are removed for this model"
1972
+ });
1973
+ break;
1974
+ }
1975
+ default: {
1976
+ const _exhaustiveCheck = systemMessageMode;
1977
+ throw new Error(
1978
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1979
+ );
1980
+ }
1981
+ }
1982
+ break;
1983
+ }
1984
+ case "user": {
1985
+ messages.push({
1986
+ role: "user",
1987
+ content: content.map((part, index) => {
1988
+ var _a2, _b2, _c2;
1989
+ switch (part.type) {
1990
+ case "text": {
1991
+ return { type: "input_text", text: part.text };
1992
+ }
1993
+ case "file": {
1994
+ if (part.mediaType.startsWith("image/")) {
1995
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1996
+ return {
1997
+ type: "input_image",
1998
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
1999
+ image_url: `data:${mediaType};base64,${part.data}`
2000
+ },
2001
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2002
+ };
2003
+ } else if (part.mediaType === "application/pdf") {
2004
+ if (part.data instanceof URL) {
2005
+ throw new import_provider6.UnsupportedFunctionalityError({
2006
+ functionality: "PDF file parts with URLs"
2007
+ });
2008
+ }
2009
+ return {
2010
+ type: "input_file",
2011
+ ...typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2012
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2013
+ file_data: `data:application/pdf;base64,${(0, import_provider_utils11.convertToBase64)(part.data)}`
2014
+ }
2015
+ };
2016
+ } else {
2017
+ throw new import_provider6.UnsupportedFunctionalityError({
2018
+ functionality: `file part media type ${part.mediaType}`
2019
+ });
2020
+ }
2021
+ }
2022
+ }
2023
+ })
2024
+ });
2025
+ break;
2026
+ }
2027
+ case "assistant": {
2028
+ const reasoningMessages = {};
2029
+ for (const part of content) {
2030
+ switch (part.type) {
2031
+ case "text": {
2032
+ messages.push({
2033
+ role: "assistant",
2034
+ content: [{ type: "output_text", text: part.text }],
2035
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2036
+ });
2037
+ break;
2038
+ }
2039
+ case "tool-call": {
2040
+ if (part.providerExecuted) {
2041
+ break;
2042
+ }
2043
+ messages.push({
2044
+ type: "function_call",
2045
+ call_id: part.toolCallId,
2046
+ name: part.toolName,
2047
+ arguments: JSON.stringify(part.input),
2048
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2049
+ });
2050
+ break;
2051
+ }
2052
+ case "tool-result": {
2053
+ warnings.push({
2054
+ type: "other",
2055
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
2056
+ });
2057
+ break;
2058
+ }
2059
+ case "reasoning": {
2060
+ const providerOptions = await (0, import_provider_utils10.parseProviderOptions)({
2061
+ provider: "openai",
2062
+ providerOptions: part.providerOptions,
2063
+ schema: openaiResponsesReasoningProviderOptionsSchema
2064
+ });
2065
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2066
+ if (reasoningId != null) {
2067
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2068
+ const summaryParts = [];
2069
+ if (part.text.length > 0) {
2070
+ summaryParts.push({ type: "summary_text", text: part.text });
2071
+ } else if (existingReasoningMessage !== void 0) {
2072
+ warnings.push({
2073
+ type: "other",
2074
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2075
+ });
2076
+ }
2077
+ if (existingReasoningMessage === void 0) {
2078
+ reasoningMessages[reasoningId] = {
2079
+ type: "reasoning",
2080
+ id: reasoningId,
2081
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2082
+ summary: summaryParts
2083
+ };
2084
+ messages.push(reasoningMessages[reasoningId]);
2085
+ } else {
2086
+ existingReasoningMessage.summary.push(...summaryParts);
2087
+ }
2088
+ } else {
2089
+ warnings.push({
2090
+ type: "other",
2091
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2092
+ });
2093
+ }
2094
+ break;
2095
+ }
2096
+ }
2097
+ }
2098
+ break;
2099
+ }
2100
+ case "tool": {
2101
+ for (const part of content) {
2102
+ const output = part.output;
2103
+ let contentValue;
2104
+ switch (output.type) {
2105
+ case "text":
2106
+ case "error-text":
2107
+ contentValue = output.value;
2108
+ break;
2109
+ case "content":
2110
+ case "json":
2111
+ case "error-json":
2112
+ contentValue = JSON.stringify(output.value);
2113
+ break;
2114
+ }
2115
+ messages.push({
2116
+ type: "function_call_output",
2117
+ call_id: part.toolCallId,
2118
+ output: contentValue
2119
+ });
2120
+ }
2121
+ break;
2122
+ }
2123
+ default: {
2124
+ const _exhaustiveCheck = role;
2125
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2126
+ }
2127
+ }
2128
+ }
2129
+ return { messages, warnings };
2130
+ }
2131
+ var openaiResponsesReasoningProviderOptionsSchema = import_v413.z.object({
2132
+ itemId: import_v413.z.string().nullish(),
2133
+ reasoningEncryptedContent: import_v413.z.string().nullish()
2134
+ });
2135
+
2136
+ // src/responses/map-openai-responses-finish-reason.ts
2137
+ function mapOpenAIResponseFinishReason({
2138
+ finishReason,
2139
+ hasToolCalls
2140
+ }) {
2141
+ switch (finishReason) {
2142
+ case void 0:
2143
+ case null:
2144
+ return hasToolCalls ? "tool-calls" : "stop";
2145
+ case "max_output_tokens":
2146
+ return "length";
2147
+ case "content_filter":
2148
+ return "content-filter";
2149
+ default:
2150
+ return hasToolCalls ? "tool-calls" : "unknown";
2151
+ }
2152
+ }
2153
+
2154
+ // src/responses/openai-responses-prepare-tools.ts
2155
+ var import_provider7 = require("@ai-sdk/provider");
2156
+ function prepareResponsesTools({
2157
+ tools,
2158
+ toolChoice,
2159
+ strictJsonSchema
2160
+ }) {
2161
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2162
+ const toolWarnings = [];
2163
+ if (tools == null) {
2164
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2165
+ }
2166
+ const openaiTools2 = [];
2167
+ for (const tool of tools) {
2168
+ switch (tool.type) {
2169
+ case "function":
2170
+ openaiTools2.push({
2171
+ type: "function",
2172
+ name: tool.name,
2173
+ description: tool.description,
2174
+ parameters: tool.inputSchema,
2175
+ strict: strictJsonSchema
2176
+ });
2177
+ break;
2178
+ case "provider-defined":
2179
+ switch (tool.id) {
2180
+ case "openai.file_search": {
2181
+ const args = fileSearchArgsSchema.parse(tool.args);
2182
+ openaiTools2.push({
2183
+ type: "file_search",
2184
+ vector_store_ids: args.vectorStoreIds,
2185
+ max_num_results: args.maxNumResults,
2186
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2187
+ filters: args.filters
2188
+ });
2189
+ break;
2190
+ }
2191
+ case "openai.web_search_preview":
2192
+ openaiTools2.push({
2193
+ type: "web_search_preview",
2194
+ search_context_size: tool.args.searchContextSize,
2195
+ user_location: tool.args.userLocation
2196
+ });
2197
+ break;
2198
+ default:
2199
+ toolWarnings.push({ type: "unsupported-tool", tool });
2200
+ break;
2201
+ }
2202
+ break;
2203
+ default:
2204
+ toolWarnings.push({ type: "unsupported-tool", tool });
2205
+ break;
2206
+ }
2207
+ }
2208
+ if (toolChoice == null) {
2209
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
2210
+ }
2211
+ const type = toolChoice.type;
2212
+ switch (type) {
2213
+ case "auto":
2214
+ case "none":
2215
+ case "required":
2216
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
2217
+ case "tool":
2218
+ return {
2219
+ tools: openaiTools2,
2220
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2221
+ toolWarnings
2222
+ };
2223
+ default: {
2224
+ const _exhaustiveCheck = type;
2225
+ throw new import_provider7.UnsupportedFunctionalityError({
2226
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2227
+ });
2228
+ }
2229
+ }
2230
+ }
2231
+
2232
+ // src/responses/openai-responses-language-model.ts
2233
+ var OpenAIResponsesLanguageModel = class {
2234
+ constructor(modelId, config) {
2235
+ this.specificationVersion = "v2";
2236
+ this.supportedUrls = {
2237
+ "image/*": [/^https?:\/\/.*$/]
2238
+ };
2239
+ this.modelId = modelId;
2240
+ this.config = config;
2241
+ }
2242
+ get provider() {
2243
+ return this.config.provider;
2244
+ }
2245
+ async getArgs({
2246
+ maxOutputTokens,
2247
+ temperature,
2248
+ stopSequences,
2249
+ topP,
2250
+ topK,
2251
+ presencePenalty,
2252
+ frequencyPenalty,
2253
+ seed,
2254
+ prompt,
2255
+ providerOptions,
2256
+ tools,
2257
+ toolChoice,
2258
+ responseFormat
2259
+ }) {
2260
+ var _a, _b;
2261
+ const warnings = [];
2262
+ const modelConfig = getResponsesModelConfig(this.modelId);
2263
+ if (topK != null) {
2264
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2265
+ }
2266
+ if (seed != null) {
2267
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2268
+ }
2269
+ if (presencePenalty != null) {
2270
+ warnings.push({
2271
+ type: "unsupported-setting",
2272
+ setting: "presencePenalty"
2273
+ });
2274
+ }
2275
+ if (frequencyPenalty != null) {
2276
+ warnings.push({
2277
+ type: "unsupported-setting",
2278
+ setting: "frequencyPenalty"
2279
+ });
2280
+ }
2281
+ if (stopSequences != null) {
2282
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2283
+ }
2284
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2285
+ prompt,
2286
+ systemMessageMode: modelConfig.systemMessageMode
2287
+ });
2288
+ warnings.push(...messageWarnings);
2289
+ const openaiOptions = await (0, import_provider_utils12.parseProviderOptions)({
2290
+ provider: "openai",
2291
+ providerOptions,
2292
+ schema: openaiResponsesProviderOptionsSchema
2293
+ });
2294
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2295
+ const baseArgs = {
2296
+ model: this.modelId,
2297
+ input: messages,
2298
+ temperature,
2299
+ top_p: topP,
2300
+ max_output_tokens: maxOutputTokens,
2301
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2302
+ text: {
2303
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2304
+ format: responseFormat.schema != null ? {
2305
+ type: "json_schema",
2306
+ strict: strictJsonSchema,
2307
+ name: (_b = responseFormat.name) != null ? _b : "response",
2308
+ description: responseFormat.description,
2309
+ schema: responseFormat.schema
2310
+ } : { type: "json_object" }
2311
+ },
2312
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2313
+ verbosity: openaiOptions.textVerbosity
2314
+ }
2315
+ }
2316
+ },
2317
+ // provider options:
2318
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2319
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2320
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2321
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2322
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2323
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2324
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2325
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2326
+ // model-specific settings:
2327
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2328
+ reasoning: {
2329
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2330
+ effort: openaiOptions.reasoningEffort
2331
+ },
2332
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2333
+ summary: openaiOptions.reasoningSummary
2334
+ }
2335
+ }
2336
+ },
2337
+ ...modelConfig.requiredAutoTruncation && {
2338
+ truncation: "auto"
2339
+ }
2340
+ };
2341
+ if (modelConfig.isReasoningModel) {
2342
+ if (baseArgs.temperature != null) {
2343
+ baseArgs.temperature = void 0;
2344
+ warnings.push({
2345
+ type: "unsupported-setting",
2346
+ setting: "temperature",
2347
+ details: "temperature is not supported for reasoning models"
2348
+ });
2349
+ }
2350
+ if (baseArgs.top_p != null) {
2351
+ baseArgs.top_p = void 0;
2352
+ warnings.push({
2353
+ type: "unsupported-setting",
2354
+ setting: "topP",
2355
+ details: "topP is not supported for reasoning models"
2356
+ });
2357
+ }
2358
+ } else {
2359
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2360
+ warnings.push({
2361
+ type: "unsupported-setting",
2362
+ setting: "reasoningEffort",
2363
+ details: "reasoningEffort is not supported for non-reasoning models"
2364
+ });
2365
+ }
2366
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2367
+ warnings.push({
2368
+ type: "unsupported-setting",
2369
+ setting: "reasoningSummary",
2370
+ details: "reasoningSummary is not supported for non-reasoning models"
2371
+ });
2372
+ }
2373
+ }
2374
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2375
+ warnings.push({
2376
+ type: "unsupported-setting",
2377
+ setting: "serviceTier",
2378
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2379
+ });
2380
+ delete baseArgs.service_tier;
2381
+ }
2382
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2383
+ warnings.push({
2384
+ type: "unsupported-setting",
2385
+ setting: "serviceTier",
2386
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2387
+ });
2388
+ delete baseArgs.service_tier;
2389
+ }
2390
+ const {
2391
+ tools: openaiTools2,
2392
+ toolChoice: openaiToolChoice,
2393
+ toolWarnings
2394
+ } = prepareResponsesTools({
2395
+ tools,
2396
+ toolChoice,
2397
+ strictJsonSchema
2398
+ });
2399
+ return {
2400
+ args: {
2401
+ ...baseArgs,
2402
+ tools: openaiTools2,
2403
+ tool_choice: openaiToolChoice
2404
+ },
2405
+ warnings: [...warnings, ...toolWarnings]
2406
+ };
2407
+ }
2408
+ async doGenerate(options) {
2409
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2410
+ const { args: body, warnings } = await this.getArgs(options);
2411
+ const url = this.config.url({
2412
+ path: "/responses",
2413
+ modelId: this.modelId
2414
+ });
2415
+ const {
2416
+ responseHeaders,
2417
+ value: response,
2418
+ rawValue: rawResponse
2419
+ } = await (0, import_provider_utils12.postJsonToApi)({
2420
+ url,
2421
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2422
+ body,
2423
+ failedResponseHandler: openaiFailedResponseHandler,
2424
+ successfulResponseHandler: (0, import_provider_utils12.createJsonResponseHandler)(
2425
+ import_v414.z.object({
2426
+ id: import_v414.z.string(),
2427
+ created_at: import_v414.z.number(),
2428
+ error: import_v414.z.object({
2429
+ code: import_v414.z.string(),
2430
+ message: import_v414.z.string()
2431
+ }).nullish(),
2432
+ model: import_v414.z.string(),
2433
+ output: import_v414.z.array(
2434
+ import_v414.z.discriminatedUnion("type", [
2435
+ import_v414.z.object({
2436
+ type: import_v414.z.literal("message"),
2437
+ role: import_v414.z.literal("assistant"),
2438
+ id: import_v414.z.string(),
2439
+ content: import_v414.z.array(
2440
+ import_v414.z.object({
2441
+ type: import_v414.z.literal("output_text"),
2442
+ text: import_v414.z.string(),
2443
+ annotations: import_v414.z.array(
2444
+ import_v414.z.object({
2445
+ type: import_v414.z.literal("url_citation"),
2446
+ start_index: import_v414.z.number(),
2447
+ end_index: import_v414.z.number(),
2448
+ url: import_v414.z.string(),
2449
+ title: import_v414.z.string()
2450
+ })
2451
+ )
2452
+ })
2453
+ )
2454
+ }),
2455
+ import_v414.z.object({
2456
+ type: import_v414.z.literal("function_call"),
2457
+ call_id: import_v414.z.string(),
2458
+ name: import_v414.z.string(),
2459
+ arguments: import_v414.z.string(),
2460
+ id: import_v414.z.string()
2461
+ }),
2462
+ import_v414.z.object({
2463
+ type: import_v414.z.literal("web_search_call"),
2464
+ id: import_v414.z.string(),
2465
+ status: import_v414.z.string().optional()
2466
+ }),
2467
+ import_v414.z.object({
2468
+ type: import_v414.z.literal("computer_call"),
2469
+ id: import_v414.z.string(),
2470
+ status: import_v414.z.string().optional()
2471
+ }),
2472
+ import_v414.z.object({
2473
+ type: import_v414.z.literal("file_search_call"),
2474
+ id: import_v414.z.string(),
2475
+ status: import_v414.z.string().optional()
2476
+ }),
2477
+ import_v414.z.object({
2478
+ type: import_v414.z.literal("reasoning"),
2479
+ id: import_v414.z.string(),
2480
+ encrypted_content: import_v414.z.string().nullish(),
2481
+ summary: import_v414.z.array(
2482
+ import_v414.z.object({
2483
+ type: import_v414.z.literal("summary_text"),
2484
+ text: import_v414.z.string()
2485
+ })
2486
+ )
2487
+ })
2488
+ ])
2489
+ ),
2490
+ incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullable(),
2491
+ usage: usageSchema2
2492
+ })
2493
+ ),
2494
+ abortSignal: options.abortSignal,
2495
+ fetch: this.config.fetch
2496
+ });
2497
+ if (response.error) {
2498
+ throw new import_provider8.APICallError({
2499
+ message: response.error.message,
2500
+ url,
2501
+ requestBodyValues: body,
2502
+ statusCode: 400,
2503
+ responseHeaders,
2504
+ responseBody: rawResponse,
2505
+ isRetryable: false
2506
+ });
2507
+ }
2508
+ const content = [];
2509
+ for (const part of response.output) {
2510
+ switch (part.type) {
2511
+ case "reasoning": {
2512
+ if (part.summary.length === 0) {
2513
+ part.summary.push({ type: "summary_text", text: "" });
2514
+ }
2515
+ for (const summary of part.summary) {
2516
+ content.push({
2517
+ type: "reasoning",
2518
+ text: summary.text,
2519
+ providerMetadata: {
2520
+ openai: {
2521
+ itemId: part.id,
2522
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2523
+ }
2524
+ }
2525
+ });
2526
+ }
2527
+ break;
2528
+ }
2529
+ case "message": {
2530
+ for (const contentPart of part.content) {
2531
+ content.push({
2532
+ type: "text",
2533
+ text: contentPart.text,
2534
+ providerMetadata: {
2535
+ openai: {
2536
+ itemId: part.id
2537
+ }
2538
+ }
2539
+ });
2540
+ for (const annotation of contentPart.annotations) {
2541
+ content.push({
2542
+ type: "source",
2543
+ sourceType: "url",
2544
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils12.generateId)(),
2545
+ url: annotation.url,
2546
+ title: annotation.title
2547
+ });
2548
+ }
2549
+ }
2550
+ break;
2551
+ }
2552
+ case "function_call": {
2553
+ content.push({
2554
+ type: "tool-call",
2555
+ toolCallId: part.call_id,
2556
+ toolName: part.name,
2557
+ input: part.arguments,
2558
+ providerMetadata: {
2559
+ openai: {
2560
+ itemId: part.id
2561
+ }
2562
+ }
2563
+ });
2564
+ break;
2565
+ }
2566
+ case "web_search_call": {
2567
+ content.push({
2568
+ type: "tool-call",
2569
+ toolCallId: part.id,
2570
+ toolName: "web_search_preview",
2571
+ input: "",
2572
+ providerExecuted: true
2573
+ });
2574
+ content.push({
2575
+ type: "tool-result",
2576
+ toolCallId: part.id,
2577
+ toolName: "web_search_preview",
2578
+ result: { status: part.status || "completed" },
2579
+ providerExecuted: true
2580
+ });
2581
+ break;
2582
+ }
2583
+ case "computer_call": {
2584
+ content.push({
2585
+ type: "tool-call",
2586
+ toolCallId: part.id,
2587
+ toolName: "computer_use",
2588
+ input: "",
2589
+ providerExecuted: true
2590
+ });
2591
+ content.push({
2592
+ type: "tool-result",
2593
+ toolCallId: part.id,
2594
+ toolName: "computer_use",
2595
+ result: {
2596
+ type: "computer_use_tool_result",
2597
+ status: part.status || "completed"
2598
+ },
2599
+ providerExecuted: true
2600
+ });
2601
+ break;
2602
+ }
2603
+ case "file_search_call": {
2604
+ content.push({
2605
+ type: "tool-call",
2606
+ toolCallId: part.id,
2607
+ toolName: "file_search",
2608
+ input: "",
2609
+ providerExecuted: true
2610
+ });
2611
+ content.push({
2612
+ type: "tool-result",
2613
+ toolCallId: part.id,
2614
+ toolName: "file_search",
2615
+ result: {
2616
+ type: "file_search_tool_result",
2617
+ status: part.status || "completed"
2618
+ },
2619
+ providerExecuted: true
2620
+ });
2621
+ break;
2622
+ }
2623
+ }
2624
+ }
2625
+ return {
2626
+ content,
2627
+ finishReason: mapOpenAIResponseFinishReason({
2628
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2629
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2630
+ }),
2631
+ usage: {
2632
+ inputTokens: response.usage.input_tokens,
2633
+ outputTokens: response.usage.output_tokens,
2634
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2635
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2636
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2637
+ },
2638
+ request: { body },
2639
+ response: {
2640
+ id: response.id,
2641
+ timestamp: new Date(response.created_at * 1e3),
2642
+ modelId: response.model,
2643
+ headers: responseHeaders,
2644
+ body: rawResponse
2645
+ },
2646
+ providerMetadata: {
2647
+ openai: {
2648
+ responseId: response.id
2649
+ }
2650
+ },
2651
+ warnings
2652
+ };
2653
+ }
2654
+ async doStream(options) {
2655
+ const { args: body, warnings } = await this.getArgs(options);
2656
+ const { responseHeaders, value: response } = await (0, import_provider_utils12.postJsonToApi)({
2657
+ url: this.config.url({
2658
+ path: "/responses",
2659
+ modelId: this.modelId
2660
+ }),
2661
+ headers: (0, import_provider_utils12.combineHeaders)(this.config.headers(), options.headers),
2662
+ body: {
2663
+ ...body,
2664
+ stream: true
2665
+ },
2666
+ failedResponseHandler: openaiFailedResponseHandler,
2667
+ successfulResponseHandler: (0, import_provider_utils12.createEventSourceResponseHandler)(
2668
+ openaiResponsesChunkSchema
2669
+ ),
2670
+ abortSignal: options.abortSignal,
2671
+ fetch: this.config.fetch
2672
+ });
2673
+ const self = this;
2674
+ let finishReason = "unknown";
2675
+ const usage = {
2676
+ inputTokens: void 0,
2677
+ outputTokens: void 0,
2678
+ totalTokens: void 0
2679
+ };
2680
+ let responseId = null;
2681
+ const ongoingToolCalls = {};
2682
+ let hasToolCalls = false;
2683
+ const activeReasoning = {};
2684
+ return {
2685
+ stream: response.pipeThrough(
2686
+ new TransformStream({
2687
+ start(controller) {
2688
+ controller.enqueue({ type: "stream-start", warnings });
2689
+ },
2690
+ transform(chunk, controller) {
2691
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2692
+ if (options.includeRawChunks) {
2693
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2694
+ }
2695
+ if (!chunk.success) {
2696
+ finishReason = "error";
2697
+ controller.enqueue({ type: "error", error: chunk.error });
2698
+ return;
2699
+ }
2700
+ const value = chunk.value;
2701
+ if (isResponseOutputItemAddedChunk(value)) {
2702
+ if (value.item.type === "function_call") {
2703
+ ongoingToolCalls[value.output_index] = {
2704
+ toolName: value.item.name,
2705
+ toolCallId: value.item.call_id
2706
+ };
2707
+ controller.enqueue({
2708
+ type: "tool-input-start",
2709
+ id: value.item.call_id,
2710
+ toolName: value.item.name
2711
+ });
2712
+ } else if (value.item.type === "web_search_call") {
2713
+ ongoingToolCalls[value.output_index] = {
2714
+ toolName: "web_search_preview",
2715
+ toolCallId: value.item.id
2716
+ };
2717
+ controller.enqueue({
2718
+ type: "tool-input-start",
2719
+ id: value.item.id,
2720
+ toolName: "web_search_preview"
2721
+ });
2722
+ } else if (value.item.type === "computer_call") {
2723
+ ongoingToolCalls[value.output_index] = {
2724
+ toolName: "computer_use",
2725
+ toolCallId: value.item.id
2726
+ };
2727
+ controller.enqueue({
2728
+ type: "tool-input-start",
2729
+ id: value.item.id,
2730
+ toolName: "computer_use"
2731
+ });
2732
+ } else if (value.item.type === "message") {
2733
+ controller.enqueue({
2734
+ type: "text-start",
2735
+ id: value.item.id,
2736
+ providerMetadata: {
2737
+ openai: {
2738
+ itemId: value.item.id
2739
+ }
2740
+ }
2741
+ });
2742
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2743
+ activeReasoning[value.item.id] = {
2744
+ encryptedContent: value.item.encrypted_content,
2745
+ summaryParts: [0]
2746
+ };
2747
+ controller.enqueue({
2748
+ type: "reasoning-start",
2749
+ id: `${value.item.id}:0`,
2750
+ providerMetadata: {
2751
+ openai: {
2752
+ itemId: value.item.id,
2753
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2754
+ }
2755
+ }
2756
+ });
2757
+ }
2758
+ } else if (isResponseOutputItemDoneChunk(value)) {
2759
+ if (value.item.type === "function_call") {
2760
+ ongoingToolCalls[value.output_index] = void 0;
2761
+ hasToolCalls = true;
2762
+ controller.enqueue({
2763
+ type: "tool-input-end",
2764
+ id: value.item.call_id
2765
+ });
2766
+ controller.enqueue({
2767
+ type: "tool-call",
2768
+ toolCallId: value.item.call_id,
2769
+ toolName: value.item.name,
2770
+ input: value.item.arguments,
2771
+ providerMetadata: {
2772
+ openai: {
2773
+ itemId: value.item.id
2774
+ }
2775
+ }
2776
+ });
2777
+ } else if (value.item.type === "web_search_call") {
2778
+ ongoingToolCalls[value.output_index] = void 0;
2779
+ hasToolCalls = true;
2780
+ controller.enqueue({
2781
+ type: "tool-input-end",
2782
+ id: value.item.id
2783
+ });
2784
+ controller.enqueue({
2785
+ type: "tool-call",
2786
+ toolCallId: value.item.id,
2787
+ toolName: "web_search_preview",
2788
+ input: "",
2789
+ providerExecuted: true
2790
+ });
2791
+ controller.enqueue({
2792
+ type: "tool-result",
2793
+ toolCallId: value.item.id,
2794
+ toolName: "web_search_preview",
2795
+ result: {
2796
+ type: "web_search_tool_result",
2797
+ status: value.item.status || "completed"
2798
+ },
2799
+ providerExecuted: true
2800
+ });
2801
+ } else if (value.item.type === "computer_call") {
2802
+ ongoingToolCalls[value.output_index] = void 0;
2803
+ hasToolCalls = true;
2804
+ controller.enqueue({
2805
+ type: "tool-input-end",
2806
+ id: value.item.id
2807
+ });
2808
+ controller.enqueue({
2809
+ type: "tool-call",
2810
+ toolCallId: value.item.id,
2811
+ toolName: "computer_use",
2812
+ input: "",
2813
+ providerExecuted: true
2814
+ });
2815
+ controller.enqueue({
2816
+ type: "tool-result",
2817
+ toolCallId: value.item.id,
2818
+ toolName: "computer_use",
2819
+ result: {
2820
+ type: "computer_use_tool_result",
2821
+ status: value.item.status || "completed"
2822
+ },
2823
+ providerExecuted: true
2824
+ });
2825
+ } else if (value.item.type === "message") {
2826
+ controller.enqueue({
2827
+ type: "text-end",
2828
+ id: value.item.id
2829
+ });
2830
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2831
+ const activeReasoningPart = activeReasoning[value.item.id];
2832
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2833
+ controller.enqueue({
2834
+ type: "reasoning-end",
2835
+ id: `${value.item.id}:${summaryIndex}`,
2836
+ providerMetadata: {
2837
+ openai: {
2838
+ itemId: value.item.id,
2839
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2840
+ }
2841
+ }
2842
+ });
2843
+ }
2844
+ delete activeReasoning[value.item.id];
2845
+ }
2846
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2847
+ const toolCall = ongoingToolCalls[value.output_index];
2848
+ if (toolCall != null) {
2849
+ controller.enqueue({
2850
+ type: "tool-input-delta",
2851
+ id: toolCall.toolCallId,
2852
+ delta: value.delta
2853
+ });
2854
+ }
2855
+ } else if (isResponseCreatedChunk(value)) {
2856
+ responseId = value.response.id;
2857
+ controller.enqueue({
2858
+ type: "response-metadata",
2859
+ id: value.response.id,
2860
+ timestamp: new Date(value.response.created_at * 1e3),
2861
+ modelId: value.response.model
2862
+ });
2863
+ } else if (isTextDeltaChunk(value)) {
2864
+ controller.enqueue({
2865
+ type: "text-delta",
2866
+ id: value.item_id,
2867
+ delta: value.delta
2868
+ });
2869
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2870
+ if (value.summary_index > 0) {
2871
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2872
+ value.summary_index
2873
+ );
2874
+ controller.enqueue({
2875
+ type: "reasoning-start",
2876
+ id: `${value.item_id}:${value.summary_index}`,
2877
+ providerMetadata: {
2878
+ openai: {
2879
+ itemId: value.item_id,
2880
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2881
+ }
2882
+ }
2883
+ });
2884
+ }
2885
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2886
+ controller.enqueue({
2887
+ type: "reasoning-delta",
2888
+ id: `${value.item_id}:${value.summary_index}`,
2889
+ delta: value.delta,
2890
+ providerMetadata: {
2891
+ openai: {
2892
+ itemId: value.item_id
2893
+ }
2894
+ }
2895
+ });
2896
+ } else if (isResponseFinishedChunk(value)) {
2897
+ finishReason = mapOpenAIResponseFinishReason({
2898
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2899
+ hasToolCalls
2900
+ });
2901
+ usage.inputTokens = value.response.usage.input_tokens;
2902
+ usage.outputTokens = value.response.usage.output_tokens;
2903
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2904
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2905
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2906
+ } else if (isResponseAnnotationAddedChunk(value)) {
2907
+ controller.enqueue({
2908
+ type: "source",
2909
+ sourceType: "url",
2910
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils12.generateId)(),
2911
+ url: value.annotation.url,
2912
+ title: value.annotation.title
2913
+ });
2914
+ } else if (isErrorChunk(value)) {
2915
+ controller.enqueue({ type: "error", error: value });
2916
+ }
2917
+ },
2918
+ flush(controller) {
2919
+ controller.enqueue({
2920
+ type: "finish",
2921
+ finishReason,
2922
+ usage,
2923
+ providerMetadata: {
2924
+ openai: {
2925
+ responseId
2926
+ }
2927
+ }
2928
+ });
2929
+ }
2930
+ })
2931
+ ),
2932
+ request: { body },
2933
+ response: { headers: responseHeaders }
2934
+ };
2935
+ }
2936
+ };
2937
+ var usageSchema2 = import_v414.z.object({
2938
+ input_tokens: import_v414.z.number(),
2939
+ input_tokens_details: import_v414.z.object({ cached_tokens: import_v414.z.number().nullish() }).nullish(),
2940
+ output_tokens: import_v414.z.number(),
2941
+ output_tokens_details: import_v414.z.object({ reasoning_tokens: import_v414.z.number().nullish() }).nullish()
2942
+ });
2943
+ var textDeltaChunkSchema = import_v414.z.object({
2944
+ type: import_v414.z.literal("response.output_text.delta"),
2945
+ item_id: import_v414.z.string(),
2946
+ delta: import_v414.z.string()
2947
+ });
2948
+ var errorChunkSchema = import_v414.z.object({
2949
+ type: import_v414.z.literal("error"),
2950
+ code: import_v414.z.string(),
2951
+ message: import_v414.z.string(),
2952
+ param: import_v414.z.string().nullish(),
2953
+ sequence_number: import_v414.z.number()
2954
+ });
2955
+ var responseFinishedChunkSchema = import_v414.z.object({
2956
+ type: import_v414.z.enum(["response.completed", "response.incomplete"]),
2957
+ response: import_v414.z.object({
2958
+ incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullish(),
2959
+ usage: usageSchema2
2960
+ })
2961
+ });
2962
+ var responseCreatedChunkSchema = import_v414.z.object({
2963
+ type: import_v414.z.literal("response.created"),
2964
+ response: import_v414.z.object({
2965
+ id: import_v414.z.string(),
2966
+ created_at: import_v414.z.number(),
2967
+ model: import_v414.z.string()
2968
+ })
2969
+ });
2970
+ var responseOutputItemAddedSchema = import_v414.z.object({
2971
+ type: import_v414.z.literal("response.output_item.added"),
2972
+ output_index: import_v414.z.number(),
2973
+ item: import_v414.z.discriminatedUnion("type", [
2974
+ import_v414.z.object({
2975
+ type: import_v414.z.literal("message"),
2976
+ id: import_v414.z.string()
2977
+ }),
2978
+ import_v414.z.object({
2979
+ type: import_v414.z.literal("reasoning"),
2980
+ id: import_v414.z.string(),
2981
+ encrypted_content: import_v414.z.string().nullish()
2982
+ }),
2983
+ import_v414.z.object({
2984
+ type: import_v414.z.literal("function_call"),
2985
+ id: import_v414.z.string(),
2986
+ call_id: import_v414.z.string(),
2987
+ name: import_v414.z.string(),
2988
+ arguments: import_v414.z.string()
2989
+ }),
2990
+ import_v414.z.object({
2991
+ type: import_v414.z.literal("web_search_call"),
2992
+ id: import_v414.z.string(),
2993
+ status: import_v414.z.string()
2994
+ }),
2995
+ import_v414.z.object({
2996
+ type: import_v414.z.literal("computer_call"),
2997
+ id: import_v414.z.string(),
2998
+ status: import_v414.z.string()
2999
+ }),
3000
+ import_v414.z.object({
3001
+ type: import_v414.z.literal("file_search_call"),
3002
+ id: import_v414.z.string(),
3003
+ status: import_v414.z.string()
3004
+ })
3005
+ ])
3006
+ });
3007
+ var responseOutputItemDoneSchema = import_v414.z.object({
3008
+ type: import_v414.z.literal("response.output_item.done"),
3009
+ output_index: import_v414.z.number(),
3010
+ item: import_v414.z.discriminatedUnion("type", [
3011
+ import_v414.z.object({
3012
+ type: import_v414.z.literal("message"),
3013
+ id: import_v414.z.string()
3014
+ }),
3015
+ import_v414.z.object({
3016
+ type: import_v414.z.literal("reasoning"),
3017
+ id: import_v414.z.string(),
3018
+ encrypted_content: import_v414.z.string().nullish()
3019
+ }),
3020
+ import_v414.z.object({
3021
+ type: import_v414.z.literal("function_call"),
3022
+ id: import_v414.z.string(),
3023
+ call_id: import_v414.z.string(),
3024
+ name: import_v414.z.string(),
3025
+ arguments: import_v414.z.string(),
3026
+ status: import_v414.z.literal("completed")
3027
+ }),
3028
+ import_v414.z.object({
3029
+ type: import_v414.z.literal("web_search_call"),
3030
+ id: import_v414.z.string(),
3031
+ status: import_v414.z.literal("completed")
3032
+ }),
3033
+ import_v414.z.object({
3034
+ type: import_v414.z.literal("computer_call"),
3035
+ id: import_v414.z.string(),
3036
+ status: import_v414.z.literal("completed")
3037
+ }),
3038
+ import_v414.z.object({
3039
+ type: import_v414.z.literal("file_search_call"),
3040
+ id: import_v414.z.string(),
3041
+ status: import_v414.z.literal("completed")
3042
+ })
3043
+ ])
3044
+ });
3045
+ var responseFunctionCallArgumentsDeltaSchema = import_v414.z.object({
3046
+ type: import_v414.z.literal("response.function_call_arguments.delta"),
3047
+ item_id: import_v414.z.string(),
3048
+ output_index: import_v414.z.number(),
3049
+ delta: import_v414.z.string()
3050
+ });
3051
+ var responseAnnotationAddedSchema = import_v414.z.object({
3052
+ type: import_v414.z.literal("response.output_text.annotation.added"),
3053
+ annotation: import_v414.z.object({
3054
+ type: import_v414.z.literal("url_citation"),
3055
+ url: import_v414.z.string(),
3056
+ title: import_v414.z.string()
3057
+ })
3058
+ });
3059
+ var responseReasoningSummaryPartAddedSchema = import_v414.z.object({
3060
+ type: import_v414.z.literal("response.reasoning_summary_part.added"),
3061
+ item_id: import_v414.z.string(),
3062
+ summary_index: import_v414.z.number()
3063
+ });
3064
+ var responseReasoningSummaryTextDeltaSchema = import_v414.z.object({
3065
+ type: import_v414.z.literal("response.reasoning_summary_text.delta"),
3066
+ item_id: import_v414.z.string(),
3067
+ summary_index: import_v414.z.number(),
3068
+ delta: import_v414.z.string()
3069
+ });
3070
+ var openaiResponsesChunkSchema = import_v414.z.union([
3071
+ textDeltaChunkSchema,
3072
+ responseFinishedChunkSchema,
3073
+ responseCreatedChunkSchema,
3074
+ responseOutputItemAddedSchema,
3075
+ responseOutputItemDoneSchema,
3076
+ responseFunctionCallArgumentsDeltaSchema,
3077
+ responseAnnotationAddedSchema,
3078
+ responseReasoningSummaryPartAddedSchema,
3079
+ responseReasoningSummaryTextDeltaSchema,
3080
+ errorChunkSchema,
3081
+ import_v414.z.object({ type: import_v414.z.string() }).loose()
3082
+ // fallback for unknown chunks
3083
+ ]);
3084
+ function isTextDeltaChunk(chunk) {
3085
+ return chunk.type === "response.output_text.delta";
3086
+ }
3087
+ function isResponseOutputItemDoneChunk(chunk) {
3088
+ return chunk.type === "response.output_item.done";
3089
+ }
3090
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3091
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3092
+ }
3093
+ function isResponseFinishedChunk(chunk) {
3094
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3095
+ }
3096
+ function isResponseCreatedChunk(chunk) {
3097
+ return chunk.type === "response.created";
3098
+ }
3099
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3100
+ return chunk.type === "response.function_call_arguments.delta";
3101
+ }
3102
+ function isResponseOutputItemAddedChunk(chunk) {
3103
+ return chunk.type === "response.output_item.added";
3104
+ }
3105
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3106
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3107
+ }
3108
+ function isResponseAnnotationAddedChunk(chunk) {
3109
+ return chunk.type === "response.output_text.annotation.added";
3110
+ }
3111
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3112
+ return chunk.type === "response.reasoning_summary_part.added";
3113
+ }
3114
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3115
+ return chunk.type === "response.reasoning_summary_text.delta";
3116
+ }
3117
+ function isErrorChunk(chunk) {
3118
+ return chunk.type === "error";
3119
+ }
3120
+ function getResponsesModelConfig(modelId) {
3121
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3122
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3123
+ return {
3124
+ isReasoningModel: true,
3125
+ systemMessageMode: "remove",
3126
+ requiredAutoTruncation: false
3127
+ };
3128
+ }
3129
+ return {
3130
+ isReasoningModel: true,
3131
+ systemMessageMode: "developer",
3132
+ requiredAutoTruncation: false
3133
+ };
3134
+ }
3135
+ return {
3136
+ isReasoningModel: false,
3137
+ systemMessageMode: "system",
3138
+ requiredAutoTruncation: false
3139
+ };
3140
+ }
3141
+ function supportsFlexProcessing2(modelId) {
3142
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3143
+ }
3144
+ function supportsPriorityProcessing2(modelId) {
3145
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3146
+ }
3147
+ var openaiResponsesProviderOptionsSchema = import_v414.z.object({
3148
+ metadata: import_v414.z.any().nullish(),
3149
+ parallelToolCalls: import_v414.z.boolean().nullish(),
3150
+ previousResponseId: import_v414.z.string().nullish(),
3151
+ store: import_v414.z.boolean().nullish(),
3152
+ user: import_v414.z.string().nullish(),
3153
+ reasoningEffort: import_v414.z.string().nullish(),
3154
+ strictJsonSchema: import_v414.z.boolean().nullish(),
3155
+ instructions: import_v414.z.string().nullish(),
3156
+ reasoningSummary: import_v414.z.string().nullish(),
3157
+ serviceTier: import_v414.z.enum(["auto", "flex", "priority"]).nullish(),
3158
+ include: import_v414.z.array(import_v414.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3159
+ textVerbosity: import_v414.z.enum(["low", "medium", "high"]).nullish()
3160
+ });
3161
+
3162
+ // src/openai-speech-model.ts
3163
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
3164
+ var import_v415 = require("zod/v4");
3165
+ var OpenAIProviderOptionsSchema = import_v415.z.object({
3166
+ instructions: import_v415.z.string().nullish(),
3167
+ speed: import_v415.z.number().min(0.25).max(4).default(1).nullish()
3168
+ });
3169
+ var OpenAISpeechModel = class {
3170
+ constructor(modelId, config) {
3171
+ this.modelId = modelId;
3172
+ this.config = config;
3173
+ this.specificationVersion = "v2";
3174
+ }
3175
+ get provider() {
3176
+ return this.config.provider;
3177
+ }
3178
+ async getArgs({
3179
+ text,
3180
+ voice = "alloy",
3181
+ outputFormat = "mp3",
3182
+ speed,
3183
+ instructions,
3184
+ language,
3185
+ providerOptions
3186
+ }) {
3187
+ const warnings = [];
3188
+ const openAIOptions = await (0, import_provider_utils13.parseProviderOptions)({
3189
+ provider: "openai",
3190
+ providerOptions,
3191
+ schema: OpenAIProviderOptionsSchema
3192
+ });
3193
+ const requestBody = {
3194
+ model: this.modelId,
3195
+ input: text,
3196
+ voice,
3197
+ response_format: "mp3",
3198
+ speed,
3199
+ instructions
3200
+ };
3201
+ if (outputFormat) {
3202
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
3203
+ requestBody.response_format = outputFormat;
3204
+ } else {
3205
+ warnings.push({
3206
+ type: "unsupported-setting",
3207
+ setting: "outputFormat",
3208
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
3209
+ });
3210
+ }
3211
+ }
3212
+ if (openAIOptions) {
3213
+ const speechModelOptions = {};
3214
+ for (const key in speechModelOptions) {
3215
+ const value = speechModelOptions[key];
3216
+ if (value !== void 0) {
3217
+ requestBody[key] = value;
3218
+ }
3219
+ }
3220
+ }
3221
+ if (language) {
3222
+ warnings.push({
3223
+ type: "unsupported-setting",
3224
+ setting: "language",
3225
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
3226
+ });
3227
+ }
3228
+ return {
3229
+ requestBody,
3230
+ warnings
3231
+ };
3232
+ }
3233
+ async doGenerate(options) {
3234
+ var _a, _b, _c;
3235
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
3236
+ const { requestBody, warnings } = await this.getArgs(options);
3237
+ const {
3238
+ value: audio,
3239
+ responseHeaders,
3240
+ rawValue: rawResponse
3241
+ } = await (0, import_provider_utils13.postJsonToApi)({
3242
+ url: this.config.url({
3243
+ path: "/audio/speech",
3244
+ modelId: this.modelId
3245
+ }),
3246
+ headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), options.headers),
3247
+ body: requestBody,
3248
+ failedResponseHandler: openaiFailedResponseHandler,
3249
+ successfulResponseHandler: (0, import_provider_utils13.createBinaryResponseHandler)(),
3250
+ abortSignal: options.abortSignal,
3251
+ fetch: this.config.fetch
3252
+ });
3253
+ return {
3254
+ audio,
3255
+ warnings,
3256
+ request: {
3257
+ body: JSON.stringify(requestBody)
3258
+ },
3259
+ response: {
3260
+ timestamp: currentDate,
3261
+ modelId: this.modelId,
3262
+ headers: responseHeaders,
3263
+ body: rawResponse
3264
+ }
3265
+ };
3266
+ }
3267
+ };
3268
+
3269
+ // src/openai-provider.ts
3270
+ function createOpenAI(options = {}) {
3271
+ var _a, _b;
3272
+ const baseURL = (_a = (0, import_provider_utils14.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
3273
+ const providerName = (_b = options.name) != null ? _b : "openai";
3274
+ const getHeaders = () => ({
3275
+ Authorization: `Bearer ${(0, import_provider_utils14.loadApiKey)({
3276
+ apiKey: options.apiKey,
3277
+ environmentVariableName: "OPENAI_API_KEY",
3278
+ description: "OpenAI"
3279
+ })}`,
3280
+ "OpenAI-Organization": options.organization,
3281
+ "OpenAI-Project": options.project,
3282
+ ...options.headers
3283
+ });
3284
+ const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
3285
+ provider: `${providerName}.chat`,
3286
+ url: ({ path }) => `${baseURL}${path}`,
3287
+ headers: getHeaders,
3288
+ fetch: options.fetch
3289
+ });
3290
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
3291
+ provider: `${providerName}.completion`,
3292
+ url: ({ path }) => `${baseURL}${path}`,
3293
+ headers: getHeaders,
3294
+ fetch: options.fetch
3295
+ });
3296
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
3297
+ provider: `${providerName}.embedding`,
3298
+ url: ({ path }) => `${baseURL}${path}`,
3299
+ headers: getHeaders,
3300
+ fetch: options.fetch
3301
+ });
3302
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
3303
+ provider: `${providerName}.image`,
3304
+ url: ({ path }) => `${baseURL}${path}`,
3305
+ headers: getHeaders,
3306
+ fetch: options.fetch
3307
+ });
3308
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
3309
+ provider: `${providerName}.transcription`,
3310
+ url: ({ path }) => `${baseURL}${path}`,
3311
+ headers: getHeaders,
3312
+ fetch: options.fetch
3313
+ });
3314
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
3315
+ provider: `${providerName}.speech`,
3316
+ url: ({ path }) => `${baseURL}${path}`,
3317
+ headers: getHeaders,
3318
+ fetch: options.fetch
3319
+ });
3320
+ const createLanguageModel = (modelId) => {
3321
+ if (new.target) {
3322
+ throw new Error(
3323
+ "The OpenAI model function cannot be called with the new keyword."
3324
+ );
3325
+ }
3326
+ return createResponsesModel(modelId);
3327
+ };
3328
+ const createResponsesModel = (modelId) => {
3329
+ return new OpenAIResponsesLanguageModel(modelId, {
3330
+ provider: `${providerName}.responses`,
3331
+ url: ({ path }) => `${baseURL}${path}`,
3332
+ headers: getHeaders,
3333
+ fetch: options.fetch
3334
+ });
3335
+ };
3336
+ const provider = function(modelId) {
3337
+ return createLanguageModel(modelId);
3338
+ };
3339
+ provider.languageModel = createLanguageModel;
3340
+ provider.chat = createChatModel;
3341
+ provider.completion = createCompletionModel;
3342
+ provider.responses = createResponsesModel;
3343
+ provider.embedding = createEmbeddingModel;
3344
+ provider.textEmbedding = createEmbeddingModel;
3345
+ provider.textEmbeddingModel = createEmbeddingModel;
3346
+ provider.image = createImageModel;
3347
+ provider.imageModel = createImageModel;
3348
+ provider.transcription = createTranscriptionModel;
3349
+ provider.transcriptionModel = createTranscriptionModel;
3350
+ provider.speech = createSpeechModel;
3351
+ provider.speechModel = createSpeechModel;
3352
+ provider.tools = openaiTools;
3353
+ return provider;
3354
+ }
3355
+ var openai = createOpenAI();
3356
+ // Annotate the CommonJS export names for ESM import in node:
3357
+ 0 && (module.exports = {
3358
+ createOpenAI,
3359
+ openai
3360
+ });
3361
+ //# sourceMappingURL=index.js.map