@ai-sdk/openai 0.0.0-013d7476-20250808163325

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,3394 @@
1
+ // src/openai-provider.ts
2
+ import {
3
+ loadApiKey,
4
+ withoutTrailingSlash
5
+ } from "@ai-sdk/provider-utils";
6
+
7
+ // src/openai-chat-language-model.ts
8
+ import {
9
+ InvalidResponseDataError
10
+ } from "@ai-sdk/provider";
11
+ import {
12
+ combineHeaders,
13
+ createEventSourceResponseHandler,
14
+ createJsonResponseHandler,
15
+ generateId,
16
+ isParsableJson,
17
+ parseProviderOptions,
18
+ postJsonToApi
19
+ } from "@ai-sdk/provider-utils";
20
+ import { z as z5 } from "zod/v4";
21
+
22
+ // src/convert-to-openai-chat-messages.ts
23
+ import {
24
+ UnsupportedFunctionalityError
25
+ } from "@ai-sdk/provider";
26
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
27
+ function convertToOpenAIChatMessages({
28
+ prompt,
29
+ systemMessageMode = "system"
30
+ }) {
31
+ const messages = [];
32
+ const warnings = [];
33
+ for (const { role, content } of prompt) {
34
+ switch (role) {
35
+ case "system": {
36
+ switch (systemMessageMode) {
37
+ case "system": {
38
+ messages.push({ role: "system", content });
39
+ break;
40
+ }
41
+ case "developer": {
42
+ messages.push({ role: "developer", content });
43
+ break;
44
+ }
45
+ case "remove": {
46
+ warnings.push({
47
+ type: "other",
48
+ message: "system messages are removed for this model"
49
+ });
50
+ break;
51
+ }
52
+ default: {
53
+ const _exhaustiveCheck = systemMessageMode;
54
+ throw new Error(
55
+ `Unsupported system message mode: ${_exhaustiveCheck}`
56
+ );
57
+ }
58
+ }
59
+ break;
60
+ }
61
+ case "user": {
62
+ if (content.length === 1 && content[0].type === "text") {
63
+ messages.push({ role: "user", content: content[0].text });
64
+ break;
65
+ }
66
+ messages.push({
67
+ role: "user",
68
+ content: content.map((part, index) => {
69
+ var _a, _b, _c;
70
+ switch (part.type) {
71
+ case "text": {
72
+ return { type: "text", text: part.text };
73
+ }
74
+ case "file": {
75
+ if (part.mediaType.startsWith("image/")) {
76
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
77
+ return {
78
+ type: "image_url",
79
+ image_url: {
80
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
81
+ // OpenAI specific extension: image detail
82
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
83
+ }
84
+ };
85
+ } else if (part.mediaType.startsWith("audio/")) {
86
+ if (part.data instanceof URL) {
87
+ throw new UnsupportedFunctionalityError({
88
+ functionality: "audio file parts with URLs"
89
+ });
90
+ }
91
+ switch (part.mediaType) {
92
+ case "audio/wav": {
93
+ return {
94
+ type: "input_audio",
95
+ input_audio: {
96
+ data: convertToBase64(part.data),
97
+ format: "wav"
98
+ }
99
+ };
100
+ }
101
+ case "audio/mp3":
102
+ case "audio/mpeg": {
103
+ return {
104
+ type: "input_audio",
105
+ input_audio: {
106
+ data: convertToBase64(part.data),
107
+ format: "mp3"
108
+ }
109
+ };
110
+ }
111
+ default: {
112
+ throw new UnsupportedFunctionalityError({
113
+ functionality: `audio content parts with media type ${part.mediaType}`
114
+ });
115
+ }
116
+ }
117
+ } else if (part.mediaType === "application/pdf") {
118
+ if (part.data instanceof URL) {
119
+ throw new UnsupportedFunctionalityError({
120
+ functionality: "PDF file parts with URLs"
121
+ });
122
+ }
123
+ return {
124
+ type: "file",
125
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
126
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
127
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
128
+ }
129
+ };
130
+ } else {
131
+ throw new UnsupportedFunctionalityError({
132
+ functionality: `file part media type ${part.mediaType}`
133
+ });
134
+ }
135
+ }
136
+ }
137
+ })
138
+ });
139
+ break;
140
+ }
141
+ case "assistant": {
142
+ let text = "";
143
+ const toolCalls = [];
144
+ for (const part of content) {
145
+ switch (part.type) {
146
+ case "text": {
147
+ text += part.text;
148
+ break;
149
+ }
150
+ case "tool-call": {
151
+ toolCalls.push({
152
+ id: part.toolCallId,
153
+ type: "function",
154
+ function: {
155
+ name: part.toolName,
156
+ arguments: JSON.stringify(part.input)
157
+ }
158
+ });
159
+ break;
160
+ }
161
+ }
162
+ }
163
+ messages.push({
164
+ role: "assistant",
165
+ content: text,
166
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
167
+ });
168
+ break;
169
+ }
170
+ case "tool": {
171
+ for (const toolResponse of content) {
172
+ const output = toolResponse.output;
173
+ let contentValue;
174
+ switch (output.type) {
175
+ case "text":
176
+ case "error-text":
177
+ contentValue = output.value;
178
+ break;
179
+ case "content":
180
+ case "json":
181
+ case "error-json":
182
+ contentValue = JSON.stringify(output.value);
183
+ break;
184
+ }
185
+ messages.push({
186
+ role: "tool",
187
+ tool_call_id: toolResponse.toolCallId,
188
+ content: contentValue
189
+ });
190
+ }
191
+ break;
192
+ }
193
+ default: {
194
+ const _exhaustiveCheck = role;
195
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
196
+ }
197
+ }
198
+ }
199
+ return { messages, warnings };
200
+ }
201
+
202
+ // src/get-response-metadata.ts
203
+ function getResponseMetadata({
204
+ id,
205
+ model,
206
+ created
207
+ }) {
208
+ return {
209
+ id: id != null ? id : void 0,
210
+ modelId: model != null ? model : void 0,
211
+ timestamp: created != null ? new Date(created * 1e3) : void 0
212
+ };
213
+ }
214
+
215
+ // src/map-openai-finish-reason.ts
216
+ function mapOpenAIFinishReason(finishReason) {
217
+ switch (finishReason) {
218
+ case "stop":
219
+ return "stop";
220
+ case "length":
221
+ return "length";
222
+ case "content_filter":
223
+ return "content-filter";
224
+ case "function_call":
225
+ case "tool_calls":
226
+ return "tool-calls";
227
+ default:
228
+ return "unknown";
229
+ }
230
+ }
231
+
232
+ // src/openai-chat-options.ts
233
+ import { z } from "zod/v4";
234
+ var openaiProviderOptions = z.object({
235
+ /**
236
+ * Modify the likelihood of specified tokens appearing in the completion.
237
+ *
238
+ * Accepts a JSON object that maps tokens (specified by their token ID in
239
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
240
+ */
241
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
242
+ /**
243
+ * Return the log probabilities of the tokens.
244
+ *
245
+ * Setting to true will return the log probabilities of the tokens that
246
+ * were generated.
247
+ *
248
+ * Setting to a number will return the log probabilities of the top n
249
+ * tokens that were generated.
250
+ */
251
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
252
+ /**
253
+ * Whether to enable parallel function calling during tool use. Default to true.
254
+ */
255
+ parallelToolCalls: z.boolean().optional(),
256
+ /**
257
+ * A unique identifier representing your end-user, which can help OpenAI to
258
+ * monitor and detect abuse.
259
+ */
260
+ user: z.string().optional(),
261
+ /**
262
+ * Reasoning effort for reasoning models. Defaults to `medium`.
263
+ */
264
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
265
+ /**
266
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
267
+ */
268
+ maxCompletionTokens: z.number().optional(),
269
+ /**
270
+ * Whether to enable persistence in responses API.
271
+ */
272
+ store: z.boolean().optional(),
273
+ /**
274
+ * Metadata to associate with the request.
275
+ */
276
+ metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
277
+ /**
278
+ * Parameters for prediction mode.
279
+ */
280
+ prediction: z.record(z.string(), z.any()).optional(),
281
+ /**
282
+ * Whether to use structured outputs.
283
+ *
284
+ * @default true
285
+ */
286
+ structuredOutputs: z.boolean().optional(),
287
+ /**
288
+ * Service tier for the request.
289
+ * - 'auto': Default service tier
290
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
291
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
292
+ *
293
+ * @default 'auto'
294
+ */
295
+ serviceTier: z.enum(["auto", "flex", "priority"]).optional(),
296
+ /**
297
+ * Whether to use strict JSON schema validation.
298
+ *
299
+ * @default false
300
+ */
301
+ strictJsonSchema: z.boolean().optional()
302
+ });
303
+
304
+ // src/openai-error.ts
305
+ import { z as z2 } from "zod/v4";
306
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
307
+ var openaiErrorDataSchema = z2.object({
308
+ error: z2.object({
309
+ message: z2.string(),
310
+ // The additional information below is handled loosely to support
311
+ // OpenAI-compatible providers that have slightly different error
312
+ // responses:
313
+ type: z2.string().nullish(),
314
+ param: z2.any().nullish(),
315
+ code: z2.union([z2.string(), z2.number()]).nullish()
316
+ })
317
+ });
318
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
319
+ errorSchema: openaiErrorDataSchema,
320
+ errorToMessage: (data) => data.error.message
321
+ });
322
+
323
+ // src/openai-prepare-tools.ts
324
+ import {
325
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
326
+ } from "@ai-sdk/provider";
327
+
328
+ // src/tool/file-search.ts
329
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
330
+ import { z as z3 } from "zod/v4";
331
+ var comparisonFilterSchema = z3.object({
332
+ key: z3.string(),
333
+ type: z3.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
334
+ value: z3.union([z3.string(), z3.number(), z3.boolean()])
335
+ });
336
+ var compoundFilterSchema = z3.object({
337
+ type: z3.enum(["and", "or"]),
338
+ filters: z3.array(
339
+ z3.union([comparisonFilterSchema, z3.lazy(() => compoundFilterSchema)])
340
+ )
341
+ });
342
+ var filtersSchema = z3.union([comparisonFilterSchema, compoundFilterSchema]);
343
+ var fileSearchArgsSchema = z3.object({
344
+ /**
345
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
346
+ */
347
+ vectorStoreIds: z3.array(z3.string()).optional(),
348
+ /**
349
+ * Maximum number of search results to return. Defaults to 10.
350
+ */
351
+ maxNumResults: z3.number().optional(),
352
+ /**
353
+ * Ranking options for the search.
354
+ */
355
+ ranking: z3.object({
356
+ ranker: z3.enum(["auto", "default-2024-08-21"]).optional()
357
+ }).optional(),
358
+ /**
359
+ * A filter to apply based on file attributes.
360
+ */
361
+ filters: filtersSchema.optional()
362
+ });
363
+ var fileSearch = createProviderDefinedToolFactory({
364
+ id: "openai.file_search",
365
+ name: "file_search",
366
+ inputSchema: z3.object({
367
+ query: z3.string()
368
+ })
369
+ });
370
+
371
+ // src/tool/web-search-preview.ts
372
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
373
+ import { z as z4 } from "zod/v4";
374
+ var webSearchPreviewArgsSchema = z4.object({
375
+ /**
376
+ * Search context size to use for the web search.
377
+ * - high: Most comprehensive context, highest cost, slower response
378
+ * - medium: Balanced context, cost, and latency (default)
379
+ * - low: Least context, lowest cost, fastest response
380
+ */
381
+ searchContextSize: z4.enum(["low", "medium", "high"]).optional(),
382
+ /**
383
+ * User location information to provide geographically relevant search results.
384
+ */
385
+ userLocation: z4.object({
386
+ /**
387
+ * Type of location (always 'approximate')
388
+ */
389
+ type: z4.literal("approximate"),
390
+ /**
391
+ * Two-letter ISO country code (e.g., 'US', 'GB')
392
+ */
393
+ country: z4.string().optional(),
394
+ /**
395
+ * City name (free text, e.g., 'Minneapolis')
396
+ */
397
+ city: z4.string().optional(),
398
+ /**
399
+ * Region name (free text, e.g., 'Minnesota')
400
+ */
401
+ region: z4.string().optional(),
402
+ /**
403
+ * IANA timezone (e.g., 'America/Chicago')
404
+ */
405
+ timezone: z4.string().optional()
406
+ }).optional()
407
+ });
408
+ var webSearchPreview = createProviderDefinedToolFactory2({
409
+ id: "openai.web_search_preview",
410
+ name: "web_search_preview",
411
+ inputSchema: z4.object({})
412
+ });
413
+
414
+ // src/openai-prepare-tools.ts
415
+ function prepareTools({
416
+ tools,
417
+ toolChoice,
418
+ structuredOutputs,
419
+ strictJsonSchema
420
+ }) {
421
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
422
+ const toolWarnings = [];
423
+ if (tools == null) {
424
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
425
+ }
426
+ const openaiTools2 = [];
427
+ for (const tool of tools) {
428
+ switch (tool.type) {
429
+ case "function":
430
+ openaiTools2.push({
431
+ type: "function",
432
+ function: {
433
+ name: tool.name,
434
+ description: tool.description,
435
+ parameters: tool.inputSchema,
436
+ strict: structuredOutputs ? strictJsonSchema : void 0
437
+ }
438
+ });
439
+ break;
440
+ case "provider-defined":
441
+ switch (tool.id) {
442
+ case "openai.file_search": {
443
+ const args = fileSearchArgsSchema.parse(tool.args);
444
+ openaiTools2.push({
445
+ type: "file_search",
446
+ vector_store_ids: args.vectorStoreIds,
447
+ max_num_results: args.maxNumResults,
448
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
449
+ filters: args.filters
450
+ });
451
+ break;
452
+ }
453
+ case "openai.web_search_preview": {
454
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
455
+ openaiTools2.push({
456
+ type: "web_search_preview",
457
+ search_context_size: args.searchContextSize,
458
+ user_location: args.userLocation
459
+ });
460
+ break;
461
+ }
462
+ default:
463
+ toolWarnings.push({ type: "unsupported-tool", tool });
464
+ break;
465
+ }
466
+ break;
467
+ default:
468
+ toolWarnings.push({ type: "unsupported-tool", tool });
469
+ break;
470
+ }
471
+ }
472
+ if (toolChoice == null) {
473
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
474
+ }
475
+ const type = toolChoice.type;
476
+ switch (type) {
477
+ case "auto":
478
+ case "none":
479
+ case "required":
480
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
481
+ case "tool":
482
+ return {
483
+ tools: openaiTools2,
484
+ toolChoice: {
485
+ type: "function",
486
+ function: {
487
+ name: toolChoice.toolName
488
+ }
489
+ },
490
+ toolWarnings
491
+ };
492
+ default: {
493
+ const _exhaustiveCheck = type;
494
+ throw new UnsupportedFunctionalityError2({
495
+ functionality: `tool choice type: ${_exhaustiveCheck}`
496
+ });
497
+ }
498
+ }
499
+ }
500
+
501
+ // src/openai-chat-language-model.ts
502
+ var OpenAIChatLanguageModel = class {
503
+ constructor(modelId, config) {
504
+ this.specificationVersion = "v2";
505
+ this.supportedUrls = {
506
+ "image/*": [/^https?:\/\/.*$/]
507
+ };
508
+ this.modelId = modelId;
509
+ this.config = config;
510
+ }
511
+ get provider() {
512
+ return this.config.provider;
513
+ }
514
+ async getArgs({
515
+ prompt,
516
+ maxOutputTokens,
517
+ temperature,
518
+ topP,
519
+ topK,
520
+ frequencyPenalty,
521
+ presencePenalty,
522
+ stopSequences,
523
+ responseFormat,
524
+ seed,
525
+ tools,
526
+ toolChoice,
527
+ providerOptions
528
+ }) {
529
+ var _a, _b, _c, _d;
530
+ const warnings = [];
531
+ const openaiOptions = (_a = await parseProviderOptions({
532
+ provider: "openai",
533
+ providerOptions,
534
+ schema: openaiProviderOptions
535
+ })) != null ? _a : {};
536
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
537
+ if (topK != null) {
538
+ warnings.push({
539
+ type: "unsupported-setting",
540
+ setting: "topK"
541
+ });
542
+ }
543
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
544
+ warnings.push({
545
+ type: "unsupported-setting",
546
+ setting: "responseFormat",
547
+ details: "JSON response format schema is only supported with structuredOutputs"
548
+ });
549
+ }
550
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
551
+ {
552
+ prompt,
553
+ systemMessageMode: getSystemMessageMode(this.modelId)
554
+ }
555
+ );
556
+ warnings.push(...messageWarnings);
557
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
558
+ const baseArgs = {
559
+ // model id:
560
+ model: this.modelId,
561
+ // model specific settings:
562
+ logit_bias: openaiOptions.logitBias,
563
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
564
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
565
+ user: openaiOptions.user,
566
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
567
+ // standardized settings:
568
+ max_tokens: maxOutputTokens,
569
+ temperature,
570
+ top_p: topP,
571
+ frequency_penalty: frequencyPenalty,
572
+ presence_penalty: presencePenalty,
573
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
574
+ type: "json_schema",
575
+ json_schema: {
576
+ schema: responseFormat.schema,
577
+ strict: strictJsonSchema,
578
+ name: (_d = responseFormat.name) != null ? _d : "response",
579
+ description: responseFormat.description
580
+ }
581
+ } : { type: "json_object" } : void 0,
582
+ stop: stopSequences,
583
+ seed,
584
+ // openai specific settings:
585
+ // TODO remove in next major version; we auto-map maxOutputTokens now
586
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
587
+ store: openaiOptions.store,
588
+ metadata: openaiOptions.metadata,
589
+ prediction: openaiOptions.prediction,
590
+ reasoning_effort: openaiOptions.reasoningEffort,
591
+ service_tier: openaiOptions.serviceTier,
592
+ // messages:
593
+ messages
594
+ };
595
+ if (isReasoningModel(this.modelId)) {
596
+ if (baseArgs.temperature != null) {
597
+ baseArgs.temperature = void 0;
598
+ warnings.push({
599
+ type: "unsupported-setting",
600
+ setting: "temperature",
601
+ details: "temperature is not supported for reasoning models"
602
+ });
603
+ }
604
+ if (baseArgs.top_p != null) {
605
+ baseArgs.top_p = void 0;
606
+ warnings.push({
607
+ type: "unsupported-setting",
608
+ setting: "topP",
609
+ details: "topP is not supported for reasoning models"
610
+ });
611
+ }
612
+ if (baseArgs.frequency_penalty != null) {
613
+ baseArgs.frequency_penalty = void 0;
614
+ warnings.push({
615
+ type: "unsupported-setting",
616
+ setting: "frequencyPenalty",
617
+ details: "frequencyPenalty is not supported for reasoning models"
618
+ });
619
+ }
620
+ if (baseArgs.presence_penalty != null) {
621
+ baseArgs.presence_penalty = void 0;
622
+ warnings.push({
623
+ type: "unsupported-setting",
624
+ setting: "presencePenalty",
625
+ details: "presencePenalty is not supported for reasoning models"
626
+ });
627
+ }
628
+ if (baseArgs.logit_bias != null) {
629
+ baseArgs.logit_bias = void 0;
630
+ warnings.push({
631
+ type: "other",
632
+ message: "logitBias is not supported for reasoning models"
633
+ });
634
+ }
635
+ if (baseArgs.logprobs != null) {
636
+ baseArgs.logprobs = void 0;
637
+ warnings.push({
638
+ type: "other",
639
+ message: "logprobs is not supported for reasoning models"
640
+ });
641
+ }
642
+ if (baseArgs.top_logprobs != null) {
643
+ baseArgs.top_logprobs = void 0;
644
+ warnings.push({
645
+ type: "other",
646
+ message: "topLogprobs is not supported for reasoning models"
647
+ });
648
+ }
649
+ if (baseArgs.max_tokens != null) {
650
+ if (baseArgs.max_completion_tokens == null) {
651
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
652
+ }
653
+ baseArgs.max_tokens = void 0;
654
+ }
655
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
656
+ if (baseArgs.temperature != null) {
657
+ baseArgs.temperature = void 0;
658
+ warnings.push({
659
+ type: "unsupported-setting",
660
+ setting: "temperature",
661
+ details: "temperature is not supported for the search preview models and has been removed."
662
+ });
663
+ }
664
+ }
665
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
666
+ warnings.push({
667
+ type: "unsupported-setting",
668
+ setting: "serviceTier",
669
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
670
+ });
671
+ baseArgs.service_tier = void 0;
672
+ }
673
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
674
+ warnings.push({
675
+ type: "unsupported-setting",
676
+ setting: "serviceTier",
677
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
678
+ });
679
+ baseArgs.service_tier = void 0;
680
+ }
681
+ const {
682
+ tools: openaiTools2,
683
+ toolChoice: openaiToolChoice,
684
+ toolWarnings
685
+ } = prepareTools({
686
+ tools,
687
+ toolChoice,
688
+ structuredOutputs,
689
+ strictJsonSchema
690
+ });
691
+ return {
692
+ args: {
693
+ ...baseArgs,
694
+ tools: openaiTools2,
695
+ tool_choice: openaiToolChoice
696
+ },
697
+ warnings: [...warnings, ...toolWarnings]
698
+ };
699
+ }
700
+ async doGenerate(options) {
701
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
702
+ const { args: body, warnings } = await this.getArgs(options);
703
+ const {
704
+ responseHeaders,
705
+ value: response,
706
+ rawValue: rawResponse
707
+ } = await postJsonToApi({
708
+ url: this.config.url({
709
+ path: "/chat/completions",
710
+ modelId: this.modelId
711
+ }),
712
+ headers: combineHeaders(this.config.headers(), options.headers),
713
+ body,
714
+ failedResponseHandler: openaiFailedResponseHandler,
715
+ successfulResponseHandler: createJsonResponseHandler(
716
+ openaiChatResponseSchema
717
+ ),
718
+ abortSignal: options.abortSignal,
719
+ fetch: this.config.fetch
720
+ });
721
+ const choice = response.choices[0];
722
+ const content = [];
723
+ const text = choice.message.content;
724
+ if (text != null && text.length > 0) {
725
+ content.push({ type: "text", text });
726
+ }
727
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
728
+ content.push({
729
+ type: "tool-call",
730
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
731
+ toolName: toolCall.function.name,
732
+ input: toolCall.function.arguments
733
+ });
734
+ }
735
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
736
+ content.push({
737
+ type: "source",
738
+ sourceType: "url",
739
+ id: generateId(),
740
+ url: annotation.url,
741
+ title: annotation.title
742
+ });
743
+ }
744
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
745
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
746
+ const providerMetadata = { openai: {} };
747
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
748
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
749
+ }
750
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
751
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
752
+ }
753
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
754
+ providerMetadata.openai.logprobs = choice.logprobs.content;
755
+ }
756
+ return {
757
+ content,
758
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
759
+ usage: {
760
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
761
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
762
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
763
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
764
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
765
+ },
766
+ request: { body },
767
+ response: {
768
+ ...getResponseMetadata(response),
769
+ headers: responseHeaders,
770
+ body: rawResponse
771
+ },
772
+ warnings,
773
+ providerMetadata
774
+ };
775
+ }
776
+ async doStream(options) {
777
+ const { args, warnings } = await this.getArgs(options);
778
+ const body = {
779
+ ...args,
780
+ stream: true,
781
+ stream_options: {
782
+ include_usage: true
783
+ }
784
+ };
785
+ const { responseHeaders, value: response } = await postJsonToApi({
786
+ url: this.config.url({
787
+ path: "/chat/completions",
788
+ modelId: this.modelId
789
+ }),
790
+ headers: combineHeaders(this.config.headers(), options.headers),
791
+ body,
792
+ failedResponseHandler: openaiFailedResponseHandler,
793
+ successfulResponseHandler: createEventSourceResponseHandler(
794
+ openaiChatChunkSchema
795
+ ),
796
+ abortSignal: options.abortSignal,
797
+ fetch: this.config.fetch
798
+ });
799
+ const toolCalls = [];
800
+ let finishReason = "unknown";
801
+ const usage = {
802
+ inputTokens: void 0,
803
+ outputTokens: void 0,
804
+ totalTokens: void 0
805
+ };
806
+ let isFirstChunk = true;
807
+ let isActiveText = false;
808
+ const providerMetadata = { openai: {} };
809
+ return {
810
+ stream: response.pipeThrough(
811
+ new TransformStream({
812
+ start(controller) {
813
+ controller.enqueue({ type: "stream-start", warnings });
814
+ },
815
+ transform(chunk, controller) {
816
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
817
+ if (options.includeRawChunks) {
818
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
819
+ }
820
+ if (!chunk.success) {
821
+ finishReason = "error";
822
+ controller.enqueue({ type: "error", error: chunk.error });
823
+ return;
824
+ }
825
+ const value = chunk.value;
826
+ if ("error" in value) {
827
+ finishReason = "error";
828
+ controller.enqueue({ type: "error", error: value.error });
829
+ return;
830
+ }
831
+ if (isFirstChunk) {
832
+ isFirstChunk = false;
833
+ controller.enqueue({
834
+ type: "response-metadata",
835
+ ...getResponseMetadata(value)
836
+ });
837
+ }
838
+ if (value.usage != null) {
839
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
840
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
841
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
842
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
843
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
844
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
845
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
846
+ }
847
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
848
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
849
+ }
850
+ }
851
+ const choice = value.choices[0];
852
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
853
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
854
+ }
855
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
856
+ providerMetadata.openai.logprobs = choice.logprobs.content;
857
+ }
858
+ if ((choice == null ? void 0 : choice.delta) == null) {
859
+ return;
860
+ }
861
+ const delta = choice.delta;
862
+ if (delta.content != null) {
863
+ if (!isActiveText) {
864
+ controller.enqueue({ type: "text-start", id: "0" });
865
+ isActiveText = true;
866
+ }
867
+ controller.enqueue({
868
+ type: "text-delta",
869
+ id: "0",
870
+ delta: delta.content
871
+ });
872
+ }
873
+ if (delta.tool_calls != null) {
874
+ for (const toolCallDelta of delta.tool_calls) {
875
+ const index = toolCallDelta.index;
876
+ if (toolCalls[index] == null) {
877
+ if (toolCallDelta.type !== "function") {
878
+ throw new InvalidResponseDataError({
879
+ data: toolCallDelta,
880
+ message: `Expected 'function' type.`
881
+ });
882
+ }
883
+ if (toolCallDelta.id == null) {
884
+ throw new InvalidResponseDataError({
885
+ data: toolCallDelta,
886
+ message: `Expected 'id' to be a string.`
887
+ });
888
+ }
889
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
890
+ throw new InvalidResponseDataError({
891
+ data: toolCallDelta,
892
+ message: `Expected 'function.name' to be a string.`
893
+ });
894
+ }
895
+ controller.enqueue({
896
+ type: "tool-input-start",
897
+ id: toolCallDelta.id,
898
+ toolName: toolCallDelta.function.name
899
+ });
900
+ toolCalls[index] = {
901
+ id: toolCallDelta.id,
902
+ type: "function",
903
+ function: {
904
+ name: toolCallDelta.function.name,
905
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
906
+ },
907
+ hasFinished: false
908
+ };
909
+ const toolCall2 = toolCalls[index];
910
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
911
+ if (toolCall2.function.arguments.length > 0) {
912
+ controller.enqueue({
913
+ type: "tool-input-delta",
914
+ id: toolCall2.id,
915
+ delta: toolCall2.function.arguments
916
+ });
917
+ }
918
+ if (isParsableJson(toolCall2.function.arguments)) {
919
+ controller.enqueue({
920
+ type: "tool-input-end",
921
+ id: toolCall2.id
922
+ });
923
+ controller.enqueue({
924
+ type: "tool-call",
925
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
926
+ toolName: toolCall2.function.name,
927
+ input: toolCall2.function.arguments
928
+ });
929
+ toolCall2.hasFinished = true;
930
+ }
931
+ }
932
+ continue;
933
+ }
934
+ const toolCall = toolCalls[index];
935
+ if (toolCall.hasFinished) {
936
+ continue;
937
+ }
938
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
939
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
940
+ }
941
+ controller.enqueue({
942
+ type: "tool-input-delta",
943
+ id: toolCall.id,
944
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
945
+ });
946
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
947
+ controller.enqueue({
948
+ type: "tool-input-end",
949
+ id: toolCall.id
950
+ });
951
+ controller.enqueue({
952
+ type: "tool-call",
953
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
954
+ toolName: toolCall.function.name,
955
+ input: toolCall.function.arguments
956
+ });
957
+ toolCall.hasFinished = true;
958
+ }
959
+ }
960
+ }
961
+ if (delta.annotations != null) {
962
+ for (const annotation of delta.annotations) {
963
+ controller.enqueue({
964
+ type: "source",
965
+ sourceType: "url",
966
+ id: generateId(),
967
+ url: annotation.url,
968
+ title: annotation.title
969
+ });
970
+ }
971
+ }
972
+ },
973
+ flush(controller) {
974
+ if (isActiveText) {
975
+ controller.enqueue({ type: "text-end", id: "0" });
976
+ }
977
+ controller.enqueue({
978
+ type: "finish",
979
+ finishReason,
980
+ usage,
981
+ ...providerMetadata != null ? { providerMetadata } : {}
982
+ });
983
+ }
984
+ })
985
+ ),
986
+ request: { body },
987
+ response: { headers: responseHeaders }
988
+ };
989
+ }
990
+ };
991
+ var openaiTokenUsageSchema = z5.object({
992
+ prompt_tokens: z5.number().nullish(),
993
+ completion_tokens: z5.number().nullish(),
994
+ total_tokens: z5.number().nullish(),
995
+ prompt_tokens_details: z5.object({
996
+ cached_tokens: z5.number().nullish()
997
+ }).nullish(),
998
+ completion_tokens_details: z5.object({
999
+ reasoning_tokens: z5.number().nullish(),
1000
+ accepted_prediction_tokens: z5.number().nullish(),
1001
+ rejected_prediction_tokens: z5.number().nullish()
1002
+ }).nullish()
1003
+ }).nullish();
1004
+ var openaiChatResponseSchema = z5.object({
1005
+ id: z5.string().nullish(),
1006
+ created: z5.number().nullish(),
1007
+ model: z5.string().nullish(),
1008
+ choices: z5.array(
1009
+ z5.object({
1010
+ message: z5.object({
1011
+ role: z5.literal("assistant").nullish(),
1012
+ content: z5.string().nullish(),
1013
+ tool_calls: z5.array(
1014
+ z5.object({
1015
+ id: z5.string().nullish(),
1016
+ type: z5.literal("function"),
1017
+ function: z5.object({
1018
+ name: z5.string(),
1019
+ arguments: z5.string()
1020
+ })
1021
+ })
1022
+ ).nullish(),
1023
+ annotations: z5.array(
1024
+ z5.object({
1025
+ type: z5.literal("url_citation"),
1026
+ start_index: z5.number(),
1027
+ end_index: z5.number(),
1028
+ url: z5.string(),
1029
+ title: z5.string()
1030
+ })
1031
+ ).nullish()
1032
+ }),
1033
+ index: z5.number(),
1034
+ logprobs: z5.object({
1035
+ content: z5.array(
1036
+ z5.object({
1037
+ token: z5.string(),
1038
+ logprob: z5.number(),
1039
+ top_logprobs: z5.array(
1040
+ z5.object({
1041
+ token: z5.string(),
1042
+ logprob: z5.number()
1043
+ })
1044
+ )
1045
+ })
1046
+ ).nullish()
1047
+ }).nullish(),
1048
+ finish_reason: z5.string().nullish()
1049
+ })
1050
+ ),
1051
+ usage: openaiTokenUsageSchema
1052
+ });
1053
+ var openaiChatChunkSchema = z5.union([
1054
+ z5.object({
1055
+ id: z5.string().nullish(),
1056
+ created: z5.number().nullish(),
1057
+ model: z5.string().nullish(),
1058
+ choices: z5.array(
1059
+ z5.object({
1060
+ delta: z5.object({
1061
+ role: z5.enum(["assistant"]).nullish(),
1062
+ content: z5.string().nullish(),
1063
+ tool_calls: z5.array(
1064
+ z5.object({
1065
+ index: z5.number(),
1066
+ id: z5.string().nullish(),
1067
+ type: z5.literal("function").nullish(),
1068
+ function: z5.object({
1069
+ name: z5.string().nullish(),
1070
+ arguments: z5.string().nullish()
1071
+ })
1072
+ })
1073
+ ).nullish(),
1074
+ annotations: z5.array(
1075
+ z5.object({
1076
+ type: z5.literal("url_citation"),
1077
+ start_index: z5.number(),
1078
+ end_index: z5.number(),
1079
+ url: z5.string(),
1080
+ title: z5.string()
1081
+ })
1082
+ ).nullish()
1083
+ }).nullish(),
1084
+ logprobs: z5.object({
1085
+ content: z5.array(
1086
+ z5.object({
1087
+ token: z5.string(),
1088
+ logprob: z5.number(),
1089
+ top_logprobs: z5.array(
1090
+ z5.object({
1091
+ token: z5.string(),
1092
+ logprob: z5.number()
1093
+ })
1094
+ )
1095
+ })
1096
+ ).nullish()
1097
+ }).nullish(),
1098
+ finish_reason: z5.string().nullish(),
1099
+ index: z5.number()
1100
+ })
1101
+ ),
1102
+ usage: openaiTokenUsageSchema
1103
+ }),
1104
+ openaiErrorDataSchema
1105
+ ]);
1106
+ function isReasoningModel(modelId) {
1107
+ return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1108
+ }
1109
+ function supportsFlexProcessing(modelId) {
1110
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1111
+ }
1112
+ function supportsPriorityProcessing(modelId) {
1113
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1114
+ }
1115
+ function getSystemMessageMode(modelId) {
1116
+ var _a, _b;
1117
+ if (!isReasoningModel(modelId)) {
1118
+ return "system";
1119
+ }
1120
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1121
+ }
1122
+ var reasoningModels = {
1123
+ "o1-mini": {
1124
+ systemMessageMode: "remove"
1125
+ },
1126
+ "o1-mini-2024-09-12": {
1127
+ systemMessageMode: "remove"
1128
+ },
1129
+ "o1-preview": {
1130
+ systemMessageMode: "remove"
1131
+ },
1132
+ "o1-preview-2024-09-12": {
1133
+ systemMessageMode: "remove"
1134
+ },
1135
+ o3: {
1136
+ systemMessageMode: "developer"
1137
+ },
1138
+ "o3-2025-04-16": {
1139
+ systemMessageMode: "developer"
1140
+ },
1141
+ "o3-mini": {
1142
+ systemMessageMode: "developer"
1143
+ },
1144
+ "o3-mini-2025-01-31": {
1145
+ systemMessageMode: "developer"
1146
+ },
1147
+ "o4-mini": {
1148
+ systemMessageMode: "developer"
1149
+ },
1150
+ "o4-mini-2025-04-16": {
1151
+ systemMessageMode: "developer"
1152
+ }
1153
+ };
1154
+
1155
+ // src/openai-completion-language-model.ts
1156
+ import {
1157
+ combineHeaders as combineHeaders2,
1158
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
1159
+ createJsonResponseHandler as createJsonResponseHandler2,
1160
+ parseProviderOptions as parseProviderOptions2,
1161
+ postJsonToApi as postJsonToApi2
1162
+ } from "@ai-sdk/provider-utils";
1163
+ import { z as z7 } from "zod/v4";
1164
+
1165
+ // src/convert-to-openai-completion-prompt.ts
1166
+ import {
1167
+ InvalidPromptError,
1168
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1169
+ } from "@ai-sdk/provider";
1170
+ function convertToOpenAICompletionPrompt({
1171
+ prompt,
1172
+ user = "user",
1173
+ assistant = "assistant"
1174
+ }) {
1175
+ let text = "";
1176
+ if (prompt[0].role === "system") {
1177
+ text += `${prompt[0].content}
1178
+
1179
+ `;
1180
+ prompt = prompt.slice(1);
1181
+ }
1182
+ for (const { role, content } of prompt) {
1183
+ switch (role) {
1184
+ case "system": {
1185
+ throw new InvalidPromptError({
1186
+ message: "Unexpected system message in prompt: ${content}",
1187
+ prompt
1188
+ });
1189
+ }
1190
+ case "user": {
1191
+ const userMessage = content.map((part) => {
1192
+ switch (part.type) {
1193
+ case "text": {
1194
+ return part.text;
1195
+ }
1196
+ }
1197
+ }).filter(Boolean).join("");
1198
+ text += `${user}:
1199
+ ${userMessage}
1200
+
1201
+ `;
1202
+ break;
1203
+ }
1204
+ case "assistant": {
1205
+ const assistantMessage = content.map((part) => {
1206
+ switch (part.type) {
1207
+ case "text": {
1208
+ return part.text;
1209
+ }
1210
+ case "tool-call": {
1211
+ throw new UnsupportedFunctionalityError3({
1212
+ functionality: "tool-call messages"
1213
+ });
1214
+ }
1215
+ }
1216
+ }).join("");
1217
+ text += `${assistant}:
1218
+ ${assistantMessage}
1219
+
1220
+ `;
1221
+ break;
1222
+ }
1223
+ case "tool": {
1224
+ throw new UnsupportedFunctionalityError3({
1225
+ functionality: "tool messages"
1226
+ });
1227
+ }
1228
+ default: {
1229
+ const _exhaustiveCheck = role;
1230
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1231
+ }
1232
+ }
1233
+ }
1234
+ text += `${assistant}:
1235
+ `;
1236
+ return {
1237
+ prompt: text,
1238
+ stopSequences: [`
1239
+ ${user}:`]
1240
+ };
1241
+ }
1242
+
1243
+ // src/openai-completion-options.ts
1244
+ import { z as z6 } from "zod/v4";
1245
+ var openaiCompletionProviderOptions = z6.object({
1246
+ /**
1247
+ Echo back the prompt in addition to the completion.
1248
+ */
1249
+ echo: z6.boolean().optional(),
1250
+ /**
1251
+ Modify the likelihood of specified tokens appearing in the completion.
1252
+
1253
+ Accepts a JSON object that maps tokens (specified by their token ID in
1254
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1255
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1256
+ the bias is added to the logits generated by the model prior to sampling.
1257
+ The exact effect will vary per model, but values between -1 and 1 should
1258
+ decrease or increase likelihood of selection; values like -100 or 100
1259
+ should result in a ban or exclusive selection of the relevant token.
1260
+
1261
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1262
+ token from being generated.
1263
+ */
1264
+ logitBias: z6.record(z6.string(), z6.number()).optional(),
1265
+ /**
1266
+ The suffix that comes after a completion of inserted text.
1267
+ */
1268
+ suffix: z6.string().optional(),
1269
+ /**
1270
+ A unique identifier representing your end-user, which can help OpenAI to
1271
+ monitor and detect abuse. Learn more.
1272
+ */
1273
+ user: z6.string().optional(),
1274
+ /**
1275
+ Return the log probabilities of the tokens. Including logprobs will increase
1276
+ the response size and can slow down response times. However, it can
1277
+ be useful to better understand how the model is behaving.
1278
+ Setting to true will return the log probabilities of the tokens that
1279
+ were generated.
1280
+ Setting to a number will return the log probabilities of the top n
1281
+ tokens that were generated.
1282
+ */
1283
+ logprobs: z6.union([z6.boolean(), z6.number()]).optional()
1284
+ });
1285
+
1286
+ // src/openai-completion-language-model.ts
1287
+ var OpenAICompletionLanguageModel = class {
1288
+ constructor(modelId, config) {
1289
+ this.specificationVersion = "v2";
1290
+ this.supportedUrls = {
1291
+ // No URLs are supported for completion models.
1292
+ };
1293
+ this.modelId = modelId;
1294
+ this.config = config;
1295
+ }
1296
+ get providerOptionsName() {
1297
+ return this.config.provider.split(".")[0].trim();
1298
+ }
1299
+ get provider() {
1300
+ return this.config.provider;
1301
+ }
1302
+ async getArgs({
1303
+ prompt,
1304
+ maxOutputTokens,
1305
+ temperature,
1306
+ topP,
1307
+ topK,
1308
+ frequencyPenalty,
1309
+ presencePenalty,
1310
+ stopSequences: userStopSequences,
1311
+ responseFormat,
1312
+ tools,
1313
+ toolChoice,
1314
+ seed,
1315
+ providerOptions
1316
+ }) {
1317
+ const warnings = [];
1318
+ const openaiOptions = {
1319
+ ...await parseProviderOptions2({
1320
+ provider: "openai",
1321
+ providerOptions,
1322
+ schema: openaiCompletionProviderOptions
1323
+ }),
1324
+ ...await parseProviderOptions2({
1325
+ provider: this.providerOptionsName,
1326
+ providerOptions,
1327
+ schema: openaiCompletionProviderOptions
1328
+ })
1329
+ };
1330
+ if (topK != null) {
1331
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1332
+ }
1333
+ if (tools == null ? void 0 : tools.length) {
1334
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1335
+ }
1336
+ if (toolChoice != null) {
1337
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1338
+ }
1339
+ if (responseFormat != null && responseFormat.type !== "text") {
1340
+ warnings.push({
1341
+ type: "unsupported-setting",
1342
+ setting: "responseFormat",
1343
+ details: "JSON response format is not supported."
1344
+ });
1345
+ }
1346
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1347
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1348
+ return {
1349
+ args: {
1350
+ // model id:
1351
+ model: this.modelId,
1352
+ // model specific settings:
1353
+ echo: openaiOptions.echo,
1354
+ logit_bias: openaiOptions.logitBias,
1355
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1356
+ suffix: openaiOptions.suffix,
1357
+ user: openaiOptions.user,
1358
+ // standardized settings:
1359
+ max_tokens: maxOutputTokens,
1360
+ temperature,
1361
+ top_p: topP,
1362
+ frequency_penalty: frequencyPenalty,
1363
+ presence_penalty: presencePenalty,
1364
+ seed,
1365
+ // prompt:
1366
+ prompt: completionPrompt,
1367
+ // stop sequences:
1368
+ stop: stop.length > 0 ? stop : void 0
1369
+ },
1370
+ warnings
1371
+ };
1372
+ }
1373
+ async doGenerate(options) {
1374
+ var _a, _b, _c;
1375
+ const { args, warnings } = await this.getArgs(options);
1376
+ const {
1377
+ responseHeaders,
1378
+ value: response,
1379
+ rawValue: rawResponse
1380
+ } = await postJsonToApi2({
1381
+ url: this.config.url({
1382
+ path: "/completions",
1383
+ modelId: this.modelId
1384
+ }),
1385
+ headers: combineHeaders2(this.config.headers(), options.headers),
1386
+ body: args,
1387
+ failedResponseHandler: openaiFailedResponseHandler,
1388
+ successfulResponseHandler: createJsonResponseHandler2(
1389
+ openaiCompletionResponseSchema
1390
+ ),
1391
+ abortSignal: options.abortSignal,
1392
+ fetch: this.config.fetch
1393
+ });
1394
+ const choice = response.choices[0];
1395
+ const providerMetadata = { openai: {} };
1396
+ if (choice.logprobs != null) {
1397
+ providerMetadata.openai.logprobs = choice.logprobs;
1398
+ }
1399
+ return {
1400
+ content: [{ type: "text", text: choice.text }],
1401
+ usage: {
1402
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1403
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1404
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1405
+ },
1406
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1407
+ request: { body: args },
1408
+ response: {
1409
+ ...getResponseMetadata(response),
1410
+ headers: responseHeaders,
1411
+ body: rawResponse
1412
+ },
1413
+ providerMetadata,
1414
+ warnings
1415
+ };
1416
+ }
1417
+ async doStream(options) {
1418
+ const { args, warnings } = await this.getArgs(options);
1419
+ const body = {
1420
+ ...args,
1421
+ stream: true,
1422
+ stream_options: {
1423
+ include_usage: true
1424
+ }
1425
+ };
1426
+ const { responseHeaders, value: response } = await postJsonToApi2({
1427
+ url: this.config.url({
1428
+ path: "/completions",
1429
+ modelId: this.modelId
1430
+ }),
1431
+ headers: combineHeaders2(this.config.headers(), options.headers),
1432
+ body,
1433
+ failedResponseHandler: openaiFailedResponseHandler,
1434
+ successfulResponseHandler: createEventSourceResponseHandler2(
1435
+ openaiCompletionChunkSchema
1436
+ ),
1437
+ abortSignal: options.abortSignal,
1438
+ fetch: this.config.fetch
1439
+ });
1440
+ let finishReason = "unknown";
1441
+ const providerMetadata = { openai: {} };
1442
+ const usage = {
1443
+ inputTokens: void 0,
1444
+ outputTokens: void 0,
1445
+ totalTokens: void 0
1446
+ };
1447
+ let isFirstChunk = true;
1448
+ return {
1449
+ stream: response.pipeThrough(
1450
+ new TransformStream({
1451
+ start(controller) {
1452
+ controller.enqueue({ type: "stream-start", warnings });
1453
+ },
1454
+ transform(chunk, controller) {
1455
+ if (options.includeRawChunks) {
1456
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1457
+ }
1458
+ if (!chunk.success) {
1459
+ finishReason = "error";
1460
+ controller.enqueue({ type: "error", error: chunk.error });
1461
+ return;
1462
+ }
1463
+ const value = chunk.value;
1464
+ if ("error" in value) {
1465
+ finishReason = "error";
1466
+ controller.enqueue({ type: "error", error: value.error });
1467
+ return;
1468
+ }
1469
+ if (isFirstChunk) {
1470
+ isFirstChunk = false;
1471
+ controller.enqueue({
1472
+ type: "response-metadata",
1473
+ ...getResponseMetadata(value)
1474
+ });
1475
+ controller.enqueue({ type: "text-start", id: "0" });
1476
+ }
1477
+ if (value.usage != null) {
1478
+ usage.inputTokens = value.usage.prompt_tokens;
1479
+ usage.outputTokens = value.usage.completion_tokens;
1480
+ usage.totalTokens = value.usage.total_tokens;
1481
+ }
1482
+ const choice = value.choices[0];
1483
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1484
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1485
+ }
1486
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1487
+ providerMetadata.openai.logprobs = choice.logprobs;
1488
+ }
1489
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1490
+ controller.enqueue({
1491
+ type: "text-delta",
1492
+ id: "0",
1493
+ delta: choice.text
1494
+ });
1495
+ }
1496
+ },
1497
+ flush(controller) {
1498
+ if (!isFirstChunk) {
1499
+ controller.enqueue({ type: "text-end", id: "0" });
1500
+ }
1501
+ controller.enqueue({
1502
+ type: "finish",
1503
+ finishReason,
1504
+ providerMetadata,
1505
+ usage
1506
+ });
1507
+ }
1508
+ })
1509
+ ),
1510
+ request: { body },
1511
+ response: { headers: responseHeaders }
1512
+ };
1513
+ }
1514
+ };
1515
+ var usageSchema = z7.object({
1516
+ prompt_tokens: z7.number(),
1517
+ completion_tokens: z7.number(),
1518
+ total_tokens: z7.number()
1519
+ });
1520
+ var openaiCompletionResponseSchema = z7.object({
1521
+ id: z7.string().nullish(),
1522
+ created: z7.number().nullish(),
1523
+ model: z7.string().nullish(),
1524
+ choices: z7.array(
1525
+ z7.object({
1526
+ text: z7.string(),
1527
+ finish_reason: z7.string(),
1528
+ logprobs: z7.object({
1529
+ tokens: z7.array(z7.string()),
1530
+ token_logprobs: z7.array(z7.number()),
1531
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1532
+ }).nullish()
1533
+ })
1534
+ ),
1535
+ usage: usageSchema.nullish()
1536
+ });
1537
+ var openaiCompletionChunkSchema = z7.union([
1538
+ z7.object({
1539
+ id: z7.string().nullish(),
1540
+ created: z7.number().nullish(),
1541
+ model: z7.string().nullish(),
1542
+ choices: z7.array(
1543
+ z7.object({
1544
+ text: z7.string(),
1545
+ finish_reason: z7.string().nullish(),
1546
+ index: z7.number(),
1547
+ logprobs: z7.object({
1548
+ tokens: z7.array(z7.string()),
1549
+ token_logprobs: z7.array(z7.number()),
1550
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1551
+ }).nullish()
1552
+ })
1553
+ ),
1554
+ usage: usageSchema.nullish()
1555
+ }),
1556
+ openaiErrorDataSchema
1557
+ ]);
1558
+
1559
+ // src/openai-embedding-model.ts
1560
+ import {
1561
+ TooManyEmbeddingValuesForCallError
1562
+ } from "@ai-sdk/provider";
1563
+ import {
1564
+ combineHeaders as combineHeaders3,
1565
+ createJsonResponseHandler as createJsonResponseHandler3,
1566
+ parseProviderOptions as parseProviderOptions3,
1567
+ postJsonToApi as postJsonToApi3
1568
+ } from "@ai-sdk/provider-utils";
1569
+ import { z as z9 } from "zod/v4";
1570
+
1571
+ // src/openai-embedding-options.ts
1572
+ import { z as z8 } from "zod/v4";
1573
+ var openaiEmbeddingProviderOptions = z8.object({
1574
+ /**
1575
+ The number of dimensions the resulting output embeddings should have.
1576
+ Only supported in text-embedding-3 and later models.
1577
+ */
1578
+ dimensions: z8.number().optional(),
1579
+ /**
1580
+ A unique identifier representing your end-user, which can help OpenAI to
1581
+ monitor and detect abuse. Learn more.
1582
+ */
1583
+ user: z8.string().optional()
1584
+ });
1585
+
1586
+ // src/openai-embedding-model.ts
1587
+ var OpenAIEmbeddingModel = class {
1588
+ constructor(modelId, config) {
1589
+ this.specificationVersion = "v2";
1590
+ this.maxEmbeddingsPerCall = 2048;
1591
+ this.supportsParallelCalls = true;
1592
+ this.modelId = modelId;
1593
+ this.config = config;
1594
+ }
1595
+ get provider() {
1596
+ return this.config.provider;
1597
+ }
1598
+ async doEmbed({
1599
+ values,
1600
+ headers,
1601
+ abortSignal,
1602
+ providerOptions
1603
+ }) {
1604
+ var _a;
1605
+ if (values.length > this.maxEmbeddingsPerCall) {
1606
+ throw new TooManyEmbeddingValuesForCallError({
1607
+ provider: this.provider,
1608
+ modelId: this.modelId,
1609
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1610
+ values
1611
+ });
1612
+ }
1613
+ const openaiOptions = (_a = await parseProviderOptions3({
1614
+ provider: "openai",
1615
+ providerOptions,
1616
+ schema: openaiEmbeddingProviderOptions
1617
+ })) != null ? _a : {};
1618
+ const {
1619
+ responseHeaders,
1620
+ value: response,
1621
+ rawValue
1622
+ } = await postJsonToApi3({
1623
+ url: this.config.url({
1624
+ path: "/embeddings",
1625
+ modelId: this.modelId
1626
+ }),
1627
+ headers: combineHeaders3(this.config.headers(), headers),
1628
+ body: {
1629
+ model: this.modelId,
1630
+ input: values,
1631
+ encoding_format: "float",
1632
+ dimensions: openaiOptions.dimensions,
1633
+ user: openaiOptions.user
1634
+ },
1635
+ failedResponseHandler: openaiFailedResponseHandler,
1636
+ successfulResponseHandler: createJsonResponseHandler3(
1637
+ openaiTextEmbeddingResponseSchema
1638
+ ),
1639
+ abortSignal,
1640
+ fetch: this.config.fetch
1641
+ });
1642
+ return {
1643
+ embeddings: response.data.map((item) => item.embedding),
1644
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1645
+ response: { headers: responseHeaders, body: rawValue }
1646
+ };
1647
+ }
1648
+ };
1649
+ var openaiTextEmbeddingResponseSchema = z9.object({
1650
+ data: z9.array(z9.object({ embedding: z9.array(z9.number()) })),
1651
+ usage: z9.object({ prompt_tokens: z9.number() }).nullish()
1652
+ });
1653
+
1654
+ // src/openai-image-model.ts
1655
+ import {
1656
+ combineHeaders as combineHeaders4,
1657
+ createJsonResponseHandler as createJsonResponseHandler4,
1658
+ postJsonToApi as postJsonToApi4
1659
+ } from "@ai-sdk/provider-utils";
1660
+ import { z as z10 } from "zod/v4";
1661
+
1662
+ // src/openai-image-settings.ts
1663
+ var modelMaxImagesPerCall = {
1664
+ "dall-e-3": 1,
1665
+ "dall-e-2": 10,
1666
+ "gpt-image-1": 10
1667
+ };
1668
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1669
+
1670
+ // src/openai-image-model.ts
1671
+ var OpenAIImageModel = class {
1672
+ constructor(modelId, config) {
1673
+ this.modelId = modelId;
1674
+ this.config = config;
1675
+ this.specificationVersion = "v2";
1676
+ }
1677
+ get maxImagesPerCall() {
1678
+ var _a;
1679
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1680
+ }
1681
+ get provider() {
1682
+ return this.config.provider;
1683
+ }
1684
+ async doGenerate({
1685
+ prompt,
1686
+ n,
1687
+ size,
1688
+ aspectRatio,
1689
+ seed,
1690
+ providerOptions,
1691
+ headers,
1692
+ abortSignal
1693
+ }) {
1694
+ var _a, _b, _c, _d;
1695
+ const warnings = [];
1696
+ if (aspectRatio != null) {
1697
+ warnings.push({
1698
+ type: "unsupported-setting",
1699
+ setting: "aspectRatio",
1700
+ details: "This model does not support aspect ratio. Use `size` instead."
1701
+ });
1702
+ }
1703
+ if (seed != null) {
1704
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1705
+ }
1706
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1707
+ const { value: response, responseHeaders } = await postJsonToApi4({
1708
+ url: this.config.url({
1709
+ path: "/images/generations",
1710
+ modelId: this.modelId
1711
+ }),
1712
+ headers: combineHeaders4(this.config.headers(), headers),
1713
+ body: {
1714
+ model: this.modelId,
1715
+ prompt,
1716
+ n,
1717
+ size,
1718
+ ...(_d = providerOptions.openai) != null ? _d : {},
1719
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1720
+ },
1721
+ failedResponseHandler: openaiFailedResponseHandler,
1722
+ successfulResponseHandler: createJsonResponseHandler4(
1723
+ openaiImageResponseSchema
1724
+ ),
1725
+ abortSignal,
1726
+ fetch: this.config.fetch
1727
+ });
1728
+ return {
1729
+ images: response.data.map((item) => item.b64_json),
1730
+ warnings,
1731
+ response: {
1732
+ timestamp: currentDate,
1733
+ modelId: this.modelId,
1734
+ headers: responseHeaders
1735
+ },
1736
+ providerMetadata: {
1737
+ openai: {
1738
+ images: response.data.map(
1739
+ (item) => item.revised_prompt ? {
1740
+ revisedPrompt: item.revised_prompt
1741
+ } : null
1742
+ )
1743
+ }
1744
+ }
1745
+ };
1746
+ }
1747
+ };
1748
+ var openaiImageResponseSchema = z10.object({
1749
+ data: z10.array(
1750
+ z10.object({ b64_json: z10.string(), revised_prompt: z10.string().optional() })
1751
+ )
1752
+ });
1753
+
1754
+ // src/openai-tools.ts
1755
+ var openaiTools = {
1756
+ fileSearch,
1757
+ webSearchPreview
1758
+ };
1759
+
1760
+ // src/openai-transcription-model.ts
1761
+ import {
1762
+ combineHeaders as combineHeaders5,
1763
+ convertBase64ToUint8Array,
1764
+ createJsonResponseHandler as createJsonResponseHandler5,
1765
+ parseProviderOptions as parseProviderOptions4,
1766
+ postFormDataToApi
1767
+ } from "@ai-sdk/provider-utils";
1768
+ import { z as z12 } from "zod/v4";
1769
+
1770
+ // src/openai-transcription-options.ts
1771
+ import { z as z11 } from "zod/v4";
1772
+ var openAITranscriptionProviderOptions = z11.object({
1773
+ /**
1774
+ * Additional information to include in the transcription response.
1775
+ */
1776
+ include: z11.array(z11.string()).optional(),
1777
+ /**
1778
+ * The language of the input audio in ISO-639-1 format.
1779
+ */
1780
+ language: z11.string().optional(),
1781
+ /**
1782
+ * An optional text to guide the model's style or continue a previous audio segment.
1783
+ */
1784
+ prompt: z11.string().optional(),
1785
+ /**
1786
+ * The sampling temperature, between 0 and 1.
1787
+ * @default 0
1788
+ */
1789
+ temperature: z11.number().min(0).max(1).default(0).optional(),
1790
+ /**
1791
+ * The timestamp granularities to populate for this transcription.
1792
+ * @default ['segment']
1793
+ */
1794
+ timestampGranularities: z11.array(z11.enum(["word", "segment"])).default(["segment"]).optional()
1795
+ });
1796
+
1797
+ // src/openai-transcription-model.ts
1798
+ var languageMap = {
1799
+ afrikaans: "af",
1800
+ arabic: "ar",
1801
+ armenian: "hy",
1802
+ azerbaijani: "az",
1803
+ belarusian: "be",
1804
+ bosnian: "bs",
1805
+ bulgarian: "bg",
1806
+ catalan: "ca",
1807
+ chinese: "zh",
1808
+ croatian: "hr",
1809
+ czech: "cs",
1810
+ danish: "da",
1811
+ dutch: "nl",
1812
+ english: "en",
1813
+ estonian: "et",
1814
+ finnish: "fi",
1815
+ french: "fr",
1816
+ galician: "gl",
1817
+ german: "de",
1818
+ greek: "el",
1819
+ hebrew: "he",
1820
+ hindi: "hi",
1821
+ hungarian: "hu",
1822
+ icelandic: "is",
1823
+ indonesian: "id",
1824
+ italian: "it",
1825
+ japanese: "ja",
1826
+ kannada: "kn",
1827
+ kazakh: "kk",
1828
+ korean: "ko",
1829
+ latvian: "lv",
1830
+ lithuanian: "lt",
1831
+ macedonian: "mk",
1832
+ malay: "ms",
1833
+ marathi: "mr",
1834
+ maori: "mi",
1835
+ nepali: "ne",
1836
+ norwegian: "no",
1837
+ persian: "fa",
1838
+ polish: "pl",
1839
+ portuguese: "pt",
1840
+ romanian: "ro",
1841
+ russian: "ru",
1842
+ serbian: "sr",
1843
+ slovak: "sk",
1844
+ slovenian: "sl",
1845
+ spanish: "es",
1846
+ swahili: "sw",
1847
+ swedish: "sv",
1848
+ tagalog: "tl",
1849
+ tamil: "ta",
1850
+ thai: "th",
1851
+ turkish: "tr",
1852
+ ukrainian: "uk",
1853
+ urdu: "ur",
1854
+ vietnamese: "vi",
1855
+ welsh: "cy"
1856
+ };
1857
+ var OpenAITranscriptionModel = class {
1858
+ constructor(modelId, config) {
1859
+ this.modelId = modelId;
1860
+ this.config = config;
1861
+ this.specificationVersion = "v2";
1862
+ }
1863
+ get provider() {
1864
+ return this.config.provider;
1865
+ }
1866
+ async getArgs({
1867
+ audio,
1868
+ mediaType,
1869
+ providerOptions
1870
+ }) {
1871
+ const warnings = [];
1872
+ const openAIOptions = await parseProviderOptions4({
1873
+ provider: "openai",
1874
+ providerOptions,
1875
+ schema: openAITranscriptionProviderOptions
1876
+ });
1877
+ const formData = new FormData();
1878
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1879
+ formData.append("model", this.modelId);
1880
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1881
+ if (openAIOptions) {
1882
+ const transcriptionModelOptions = {
1883
+ include: openAIOptions.include,
1884
+ language: openAIOptions.language,
1885
+ prompt: openAIOptions.prompt,
1886
+ temperature: openAIOptions.temperature,
1887
+ timestamp_granularities: openAIOptions.timestampGranularities
1888
+ };
1889
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1890
+ if (value != null) {
1891
+ formData.append(key, String(value));
1892
+ }
1893
+ }
1894
+ }
1895
+ return {
1896
+ formData,
1897
+ warnings
1898
+ };
1899
+ }
1900
+ async doGenerate(options) {
1901
+ var _a, _b, _c, _d, _e, _f;
1902
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1903
+ const { formData, warnings } = await this.getArgs(options);
1904
+ const {
1905
+ value: response,
1906
+ responseHeaders,
1907
+ rawValue: rawResponse
1908
+ } = await postFormDataToApi({
1909
+ url: this.config.url({
1910
+ path: "/audio/transcriptions",
1911
+ modelId: this.modelId
1912
+ }),
1913
+ headers: combineHeaders5(this.config.headers(), options.headers),
1914
+ formData,
1915
+ failedResponseHandler: openaiFailedResponseHandler,
1916
+ successfulResponseHandler: createJsonResponseHandler5(
1917
+ openaiTranscriptionResponseSchema
1918
+ ),
1919
+ abortSignal: options.abortSignal,
1920
+ fetch: this.config.fetch
1921
+ });
1922
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1923
+ return {
1924
+ text: response.text,
1925
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1926
+ text: word.word,
1927
+ startSecond: word.start,
1928
+ endSecond: word.end
1929
+ }))) != null ? _e : [],
1930
+ language,
1931
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1932
+ warnings,
1933
+ response: {
1934
+ timestamp: currentDate,
1935
+ modelId: this.modelId,
1936
+ headers: responseHeaders,
1937
+ body: rawResponse
1938
+ }
1939
+ };
1940
+ }
1941
+ };
1942
+ var openaiTranscriptionResponseSchema = z12.object({
1943
+ text: z12.string(),
1944
+ language: z12.string().nullish(),
1945
+ duration: z12.number().nullish(),
1946
+ words: z12.array(
1947
+ z12.object({
1948
+ word: z12.string(),
1949
+ start: z12.number(),
1950
+ end: z12.number()
1951
+ })
1952
+ ).nullish()
1953
+ });
1954
+
1955
+ // src/responses/openai-responses-language-model.ts
1956
+ import {
1957
+ APICallError
1958
+ } from "@ai-sdk/provider";
1959
+ import {
1960
+ combineHeaders as combineHeaders6,
1961
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
1962
+ createJsonResponseHandler as createJsonResponseHandler6,
1963
+ generateId as generateId2,
1964
+ parseProviderOptions as parseProviderOptions6,
1965
+ postJsonToApi as postJsonToApi5
1966
+ } from "@ai-sdk/provider-utils";
1967
+ import { z as z14 } from "zod/v4";
1968
+
1969
+ // src/responses/convert-to-openai-responses-messages.ts
1970
+ import {
1971
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1972
+ } from "@ai-sdk/provider";
1973
+ import { parseProviderOptions as parseProviderOptions5 } from "@ai-sdk/provider-utils";
1974
+ import { z as z13 } from "zod/v4";
1975
+ import { convertToBase64 as convertToBase642 } from "@ai-sdk/provider-utils";
1976
+ async function convertToOpenAIResponsesMessages({
1977
+ prompt,
1978
+ systemMessageMode
1979
+ }) {
1980
+ var _a, _b, _c, _d, _e, _f;
1981
+ const messages = [];
1982
+ const warnings = [];
1983
+ for (const { role, content } of prompt) {
1984
+ switch (role) {
1985
+ case "system": {
1986
+ switch (systemMessageMode) {
1987
+ case "system": {
1988
+ messages.push({ role: "system", content });
1989
+ break;
1990
+ }
1991
+ case "developer": {
1992
+ messages.push({ role: "developer", content });
1993
+ break;
1994
+ }
1995
+ case "remove": {
1996
+ warnings.push({
1997
+ type: "other",
1998
+ message: "system messages are removed for this model"
1999
+ });
2000
+ break;
2001
+ }
2002
+ default: {
2003
+ const _exhaustiveCheck = systemMessageMode;
2004
+ throw new Error(
2005
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2006
+ );
2007
+ }
2008
+ }
2009
+ break;
2010
+ }
2011
+ case "user": {
2012
+ messages.push({
2013
+ role: "user",
2014
+ content: content.map((part, index) => {
2015
+ var _a2, _b2, _c2;
2016
+ switch (part.type) {
2017
+ case "text": {
2018
+ return { type: "input_text", text: part.text };
2019
+ }
2020
+ case "file": {
2021
+ if (part.mediaType.startsWith("image/")) {
2022
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2023
+ return {
2024
+ type: "input_image",
2025
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2026
+ image_url: `data:${mediaType};base64,${part.data}`
2027
+ },
2028
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2029
+ };
2030
+ } else if (part.mediaType === "application/pdf") {
2031
+ if (part.data instanceof URL) {
2032
+ throw new UnsupportedFunctionalityError4({
2033
+ functionality: "PDF file parts with URLs"
2034
+ });
2035
+ }
2036
+ return {
2037
+ type: "input_file",
2038
+ ...typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2039
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2040
+ file_data: `data:application/pdf;base64,${convertToBase642(part.data)}`
2041
+ }
2042
+ };
2043
+ } else {
2044
+ throw new UnsupportedFunctionalityError4({
2045
+ functionality: `file part media type ${part.mediaType}`
2046
+ });
2047
+ }
2048
+ }
2049
+ }
2050
+ })
2051
+ });
2052
+ break;
2053
+ }
2054
+ case "assistant": {
2055
+ const reasoningMessages = {};
2056
+ for (const part of content) {
2057
+ switch (part.type) {
2058
+ case "text": {
2059
+ messages.push({
2060
+ role: "assistant",
2061
+ content: [{ type: "output_text", text: part.text }],
2062
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2063
+ });
2064
+ break;
2065
+ }
2066
+ case "tool-call": {
2067
+ if (part.providerExecuted) {
2068
+ break;
2069
+ }
2070
+ messages.push({
2071
+ type: "function_call",
2072
+ call_id: part.toolCallId,
2073
+ name: part.toolName,
2074
+ arguments: JSON.stringify(part.input),
2075
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2076
+ });
2077
+ break;
2078
+ }
2079
+ case "tool-result": {
2080
+ warnings.push({
2081
+ type: "other",
2082
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
2083
+ });
2084
+ break;
2085
+ }
2086
+ case "reasoning": {
2087
+ const providerOptions = await parseProviderOptions5({
2088
+ provider: "openai",
2089
+ providerOptions: part.providerOptions,
2090
+ schema: openaiResponsesReasoningProviderOptionsSchema
2091
+ });
2092
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2093
+ if (reasoningId != null) {
2094
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2095
+ const summaryParts = [];
2096
+ if (part.text.length > 0) {
2097
+ summaryParts.push({ type: "summary_text", text: part.text });
2098
+ } else if (existingReasoningMessage !== void 0) {
2099
+ warnings.push({
2100
+ type: "other",
2101
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2102
+ });
2103
+ }
2104
+ if (existingReasoningMessage === void 0) {
2105
+ reasoningMessages[reasoningId] = {
2106
+ type: "reasoning",
2107
+ id: reasoningId,
2108
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2109
+ summary: summaryParts
2110
+ };
2111
+ messages.push(reasoningMessages[reasoningId]);
2112
+ } else {
2113
+ existingReasoningMessage.summary.push(...summaryParts);
2114
+ }
2115
+ } else {
2116
+ warnings.push({
2117
+ type: "other",
2118
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2119
+ });
2120
+ }
2121
+ break;
2122
+ }
2123
+ }
2124
+ }
2125
+ break;
2126
+ }
2127
+ case "tool": {
2128
+ for (const part of content) {
2129
+ const output = part.output;
2130
+ let contentValue;
2131
+ switch (output.type) {
2132
+ case "text":
2133
+ case "error-text":
2134
+ contentValue = output.value;
2135
+ break;
2136
+ case "content":
2137
+ case "json":
2138
+ case "error-json":
2139
+ contentValue = JSON.stringify(output.value);
2140
+ break;
2141
+ }
2142
+ messages.push({
2143
+ type: "function_call_output",
2144
+ call_id: part.toolCallId,
2145
+ output: contentValue
2146
+ });
2147
+ }
2148
+ break;
2149
+ }
2150
+ default: {
2151
+ const _exhaustiveCheck = role;
2152
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2153
+ }
2154
+ }
2155
+ }
2156
+ return { messages, warnings };
2157
+ }
2158
+ var openaiResponsesReasoningProviderOptionsSchema = z13.object({
2159
+ itemId: z13.string().nullish(),
2160
+ reasoningEncryptedContent: z13.string().nullish()
2161
+ });
2162
+
2163
+ // src/responses/map-openai-responses-finish-reason.ts
2164
+ function mapOpenAIResponseFinishReason({
2165
+ finishReason,
2166
+ hasToolCalls
2167
+ }) {
2168
+ switch (finishReason) {
2169
+ case void 0:
2170
+ case null:
2171
+ return hasToolCalls ? "tool-calls" : "stop";
2172
+ case "max_output_tokens":
2173
+ return "length";
2174
+ case "content_filter":
2175
+ return "content-filter";
2176
+ default:
2177
+ return hasToolCalls ? "tool-calls" : "unknown";
2178
+ }
2179
+ }
2180
+
2181
+ // src/responses/openai-responses-prepare-tools.ts
2182
+ import {
2183
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
2184
+ } from "@ai-sdk/provider";
2185
+ function prepareResponsesTools({
2186
+ tools,
2187
+ toolChoice,
2188
+ strictJsonSchema
2189
+ }) {
2190
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2191
+ const toolWarnings = [];
2192
+ if (tools == null) {
2193
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2194
+ }
2195
+ const openaiTools2 = [];
2196
+ for (const tool of tools) {
2197
+ switch (tool.type) {
2198
+ case "function":
2199
+ openaiTools2.push({
2200
+ type: "function",
2201
+ name: tool.name,
2202
+ description: tool.description,
2203
+ parameters: tool.inputSchema,
2204
+ strict: strictJsonSchema
2205
+ });
2206
+ break;
2207
+ case "provider-defined":
2208
+ switch (tool.id) {
2209
+ case "openai.file_search": {
2210
+ const args = fileSearchArgsSchema.parse(tool.args);
2211
+ openaiTools2.push({
2212
+ type: "file_search",
2213
+ vector_store_ids: args.vectorStoreIds,
2214
+ max_num_results: args.maxNumResults,
2215
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2216
+ filters: args.filters
2217
+ });
2218
+ break;
2219
+ }
2220
+ case "openai.web_search_preview":
2221
+ openaiTools2.push({
2222
+ type: "web_search_preview",
2223
+ search_context_size: tool.args.searchContextSize,
2224
+ user_location: tool.args.userLocation
2225
+ });
2226
+ break;
2227
+ default:
2228
+ toolWarnings.push({ type: "unsupported-tool", tool });
2229
+ break;
2230
+ }
2231
+ break;
2232
+ default:
2233
+ toolWarnings.push({ type: "unsupported-tool", tool });
2234
+ break;
2235
+ }
2236
+ }
2237
+ if (toolChoice == null) {
2238
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
2239
+ }
2240
+ const type = toolChoice.type;
2241
+ switch (type) {
2242
+ case "auto":
2243
+ case "none":
2244
+ case "required":
2245
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
2246
+ case "tool":
2247
+ return {
2248
+ tools: openaiTools2,
2249
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2250
+ toolWarnings
2251
+ };
2252
+ default: {
2253
+ const _exhaustiveCheck = type;
2254
+ throw new UnsupportedFunctionalityError5({
2255
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2256
+ });
2257
+ }
2258
+ }
2259
+ }
2260
+
2261
+ // src/responses/openai-responses-language-model.ts
2262
+ var OpenAIResponsesLanguageModel = class {
2263
+ constructor(modelId, config) {
2264
+ this.specificationVersion = "v2";
2265
+ this.supportedUrls = {
2266
+ "image/*": [/^https?:\/\/.*$/]
2267
+ };
2268
+ this.modelId = modelId;
2269
+ this.config = config;
2270
+ }
2271
+ get provider() {
2272
+ return this.config.provider;
2273
+ }
2274
+ async getArgs({
2275
+ maxOutputTokens,
2276
+ temperature,
2277
+ stopSequences,
2278
+ topP,
2279
+ topK,
2280
+ presencePenalty,
2281
+ frequencyPenalty,
2282
+ seed,
2283
+ prompt,
2284
+ providerOptions,
2285
+ tools,
2286
+ toolChoice,
2287
+ responseFormat
2288
+ }) {
2289
+ var _a, _b;
2290
+ const warnings = [];
2291
+ const modelConfig = getResponsesModelConfig(this.modelId);
2292
+ if (topK != null) {
2293
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2294
+ }
2295
+ if (seed != null) {
2296
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2297
+ }
2298
+ if (presencePenalty != null) {
2299
+ warnings.push({
2300
+ type: "unsupported-setting",
2301
+ setting: "presencePenalty"
2302
+ });
2303
+ }
2304
+ if (frequencyPenalty != null) {
2305
+ warnings.push({
2306
+ type: "unsupported-setting",
2307
+ setting: "frequencyPenalty"
2308
+ });
2309
+ }
2310
+ if (stopSequences != null) {
2311
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2312
+ }
2313
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2314
+ prompt,
2315
+ systemMessageMode: modelConfig.systemMessageMode
2316
+ });
2317
+ warnings.push(...messageWarnings);
2318
+ const openaiOptions = await parseProviderOptions6({
2319
+ provider: "openai",
2320
+ providerOptions,
2321
+ schema: openaiResponsesProviderOptionsSchema
2322
+ });
2323
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2324
+ const baseArgs = {
2325
+ model: this.modelId,
2326
+ input: messages,
2327
+ temperature,
2328
+ top_p: topP,
2329
+ max_output_tokens: maxOutputTokens,
2330
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2331
+ text: {
2332
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2333
+ format: responseFormat.schema != null ? {
2334
+ type: "json_schema",
2335
+ strict: strictJsonSchema,
2336
+ name: (_b = responseFormat.name) != null ? _b : "response",
2337
+ description: responseFormat.description,
2338
+ schema: responseFormat.schema
2339
+ } : { type: "json_object" }
2340
+ },
2341
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2342
+ verbosity: openaiOptions.textVerbosity
2343
+ }
2344
+ }
2345
+ },
2346
+ // provider options:
2347
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2348
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2349
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2350
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2351
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2352
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2353
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2354
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2355
+ // model-specific settings:
2356
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2357
+ reasoning: {
2358
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2359
+ effort: openaiOptions.reasoningEffort
2360
+ },
2361
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2362
+ summary: openaiOptions.reasoningSummary
2363
+ }
2364
+ }
2365
+ },
2366
+ ...modelConfig.requiredAutoTruncation && {
2367
+ truncation: "auto"
2368
+ }
2369
+ };
2370
+ if (modelConfig.isReasoningModel) {
2371
+ if (baseArgs.temperature != null) {
2372
+ baseArgs.temperature = void 0;
2373
+ warnings.push({
2374
+ type: "unsupported-setting",
2375
+ setting: "temperature",
2376
+ details: "temperature is not supported for reasoning models"
2377
+ });
2378
+ }
2379
+ if (baseArgs.top_p != null) {
2380
+ baseArgs.top_p = void 0;
2381
+ warnings.push({
2382
+ type: "unsupported-setting",
2383
+ setting: "topP",
2384
+ details: "topP is not supported for reasoning models"
2385
+ });
2386
+ }
2387
+ } else {
2388
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2389
+ warnings.push({
2390
+ type: "unsupported-setting",
2391
+ setting: "reasoningEffort",
2392
+ details: "reasoningEffort is not supported for non-reasoning models"
2393
+ });
2394
+ }
2395
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2396
+ warnings.push({
2397
+ type: "unsupported-setting",
2398
+ setting: "reasoningSummary",
2399
+ details: "reasoningSummary is not supported for non-reasoning models"
2400
+ });
2401
+ }
2402
+ }
2403
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2404
+ warnings.push({
2405
+ type: "unsupported-setting",
2406
+ setting: "serviceTier",
2407
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2408
+ });
2409
+ delete baseArgs.service_tier;
2410
+ }
2411
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2412
+ warnings.push({
2413
+ type: "unsupported-setting",
2414
+ setting: "serviceTier",
2415
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2416
+ });
2417
+ delete baseArgs.service_tier;
2418
+ }
2419
+ const {
2420
+ tools: openaiTools2,
2421
+ toolChoice: openaiToolChoice,
2422
+ toolWarnings
2423
+ } = prepareResponsesTools({
2424
+ tools,
2425
+ toolChoice,
2426
+ strictJsonSchema
2427
+ });
2428
+ return {
2429
+ args: {
2430
+ ...baseArgs,
2431
+ tools: openaiTools2,
2432
+ tool_choice: openaiToolChoice
2433
+ },
2434
+ warnings: [...warnings, ...toolWarnings]
2435
+ };
2436
+ }
2437
+ async doGenerate(options) {
2438
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2439
+ const { args: body, warnings } = await this.getArgs(options);
2440
+ const url = this.config.url({
2441
+ path: "/responses",
2442
+ modelId: this.modelId
2443
+ });
2444
+ const {
2445
+ responseHeaders,
2446
+ value: response,
2447
+ rawValue: rawResponse
2448
+ } = await postJsonToApi5({
2449
+ url,
2450
+ headers: combineHeaders6(this.config.headers(), options.headers),
2451
+ body,
2452
+ failedResponseHandler: openaiFailedResponseHandler,
2453
+ successfulResponseHandler: createJsonResponseHandler6(
2454
+ z14.object({
2455
+ id: z14.string(),
2456
+ created_at: z14.number(),
2457
+ error: z14.object({
2458
+ code: z14.string(),
2459
+ message: z14.string()
2460
+ }).nullish(),
2461
+ model: z14.string(),
2462
+ output: z14.array(
2463
+ z14.discriminatedUnion("type", [
2464
+ z14.object({
2465
+ type: z14.literal("message"),
2466
+ role: z14.literal("assistant"),
2467
+ id: z14.string(),
2468
+ content: z14.array(
2469
+ z14.object({
2470
+ type: z14.literal("output_text"),
2471
+ text: z14.string(),
2472
+ annotations: z14.array(
2473
+ z14.object({
2474
+ type: z14.literal("url_citation"),
2475
+ start_index: z14.number(),
2476
+ end_index: z14.number(),
2477
+ url: z14.string(),
2478
+ title: z14.string()
2479
+ })
2480
+ )
2481
+ })
2482
+ )
2483
+ }),
2484
+ z14.object({
2485
+ type: z14.literal("function_call"),
2486
+ call_id: z14.string(),
2487
+ name: z14.string(),
2488
+ arguments: z14.string(),
2489
+ id: z14.string()
2490
+ }),
2491
+ z14.object({
2492
+ type: z14.literal("web_search_call"),
2493
+ id: z14.string(),
2494
+ status: z14.string().optional()
2495
+ }),
2496
+ z14.object({
2497
+ type: z14.literal("computer_call"),
2498
+ id: z14.string(),
2499
+ status: z14.string().optional()
2500
+ }),
2501
+ z14.object({
2502
+ type: z14.literal("file_search_call"),
2503
+ id: z14.string(),
2504
+ status: z14.string().optional()
2505
+ }),
2506
+ z14.object({
2507
+ type: z14.literal("reasoning"),
2508
+ id: z14.string(),
2509
+ encrypted_content: z14.string().nullish(),
2510
+ summary: z14.array(
2511
+ z14.object({
2512
+ type: z14.literal("summary_text"),
2513
+ text: z14.string()
2514
+ })
2515
+ )
2516
+ })
2517
+ ])
2518
+ ),
2519
+ incomplete_details: z14.object({ reason: z14.string() }).nullable(),
2520
+ usage: usageSchema2
2521
+ })
2522
+ ),
2523
+ abortSignal: options.abortSignal,
2524
+ fetch: this.config.fetch
2525
+ });
2526
+ if (response.error) {
2527
+ throw new APICallError({
2528
+ message: response.error.message,
2529
+ url,
2530
+ requestBodyValues: body,
2531
+ statusCode: 400,
2532
+ responseHeaders,
2533
+ responseBody: rawResponse,
2534
+ isRetryable: false
2535
+ });
2536
+ }
2537
+ const content = [];
2538
+ for (const part of response.output) {
2539
+ switch (part.type) {
2540
+ case "reasoning": {
2541
+ if (part.summary.length === 0) {
2542
+ part.summary.push({ type: "summary_text", text: "" });
2543
+ }
2544
+ for (const summary of part.summary) {
2545
+ content.push({
2546
+ type: "reasoning",
2547
+ text: summary.text,
2548
+ providerMetadata: {
2549
+ openai: {
2550
+ itemId: part.id,
2551
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2552
+ }
2553
+ }
2554
+ });
2555
+ }
2556
+ break;
2557
+ }
2558
+ case "message": {
2559
+ for (const contentPart of part.content) {
2560
+ content.push({
2561
+ type: "text",
2562
+ text: contentPart.text,
2563
+ providerMetadata: {
2564
+ openai: {
2565
+ itemId: part.id
2566
+ }
2567
+ }
2568
+ });
2569
+ for (const annotation of contentPart.annotations) {
2570
+ content.push({
2571
+ type: "source",
2572
+ sourceType: "url",
2573
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
2574
+ url: annotation.url,
2575
+ title: annotation.title
2576
+ });
2577
+ }
2578
+ }
2579
+ break;
2580
+ }
2581
+ case "function_call": {
2582
+ content.push({
2583
+ type: "tool-call",
2584
+ toolCallId: part.call_id,
2585
+ toolName: part.name,
2586
+ input: part.arguments,
2587
+ providerMetadata: {
2588
+ openai: {
2589
+ itemId: part.id
2590
+ }
2591
+ }
2592
+ });
2593
+ break;
2594
+ }
2595
+ case "web_search_call": {
2596
+ content.push({
2597
+ type: "tool-call",
2598
+ toolCallId: part.id,
2599
+ toolName: "web_search_preview",
2600
+ input: "",
2601
+ providerExecuted: true
2602
+ });
2603
+ content.push({
2604
+ type: "tool-result",
2605
+ toolCallId: part.id,
2606
+ toolName: "web_search_preview",
2607
+ result: { status: part.status || "completed" },
2608
+ providerExecuted: true
2609
+ });
2610
+ break;
2611
+ }
2612
+ case "computer_call": {
2613
+ content.push({
2614
+ type: "tool-call",
2615
+ toolCallId: part.id,
2616
+ toolName: "computer_use",
2617
+ input: "",
2618
+ providerExecuted: true
2619
+ });
2620
+ content.push({
2621
+ type: "tool-result",
2622
+ toolCallId: part.id,
2623
+ toolName: "computer_use",
2624
+ result: {
2625
+ type: "computer_use_tool_result",
2626
+ status: part.status || "completed"
2627
+ },
2628
+ providerExecuted: true
2629
+ });
2630
+ break;
2631
+ }
2632
+ case "file_search_call": {
2633
+ content.push({
2634
+ type: "tool-call",
2635
+ toolCallId: part.id,
2636
+ toolName: "file_search",
2637
+ input: "",
2638
+ providerExecuted: true
2639
+ });
2640
+ content.push({
2641
+ type: "tool-result",
2642
+ toolCallId: part.id,
2643
+ toolName: "file_search",
2644
+ result: {
2645
+ type: "file_search_tool_result",
2646
+ status: part.status || "completed"
2647
+ },
2648
+ providerExecuted: true
2649
+ });
2650
+ break;
2651
+ }
2652
+ }
2653
+ }
2654
+ return {
2655
+ content,
2656
+ finishReason: mapOpenAIResponseFinishReason({
2657
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2658
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2659
+ }),
2660
+ usage: {
2661
+ inputTokens: response.usage.input_tokens,
2662
+ outputTokens: response.usage.output_tokens,
2663
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2664
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2665
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2666
+ },
2667
+ request: { body },
2668
+ response: {
2669
+ id: response.id,
2670
+ timestamp: new Date(response.created_at * 1e3),
2671
+ modelId: response.model,
2672
+ headers: responseHeaders,
2673
+ body: rawResponse
2674
+ },
2675
+ providerMetadata: {
2676
+ openai: {
2677
+ responseId: response.id
2678
+ }
2679
+ },
2680
+ warnings
2681
+ };
2682
+ }
2683
+ async doStream(options) {
2684
+ const { args: body, warnings } = await this.getArgs(options);
2685
+ const { responseHeaders, value: response } = await postJsonToApi5({
2686
+ url: this.config.url({
2687
+ path: "/responses",
2688
+ modelId: this.modelId
2689
+ }),
2690
+ headers: combineHeaders6(this.config.headers(), options.headers),
2691
+ body: {
2692
+ ...body,
2693
+ stream: true
2694
+ },
2695
+ failedResponseHandler: openaiFailedResponseHandler,
2696
+ successfulResponseHandler: createEventSourceResponseHandler3(
2697
+ openaiResponsesChunkSchema
2698
+ ),
2699
+ abortSignal: options.abortSignal,
2700
+ fetch: this.config.fetch
2701
+ });
2702
+ const self = this;
2703
+ let finishReason = "unknown";
2704
+ const usage = {
2705
+ inputTokens: void 0,
2706
+ outputTokens: void 0,
2707
+ totalTokens: void 0
2708
+ };
2709
+ let responseId = null;
2710
+ const ongoingToolCalls = {};
2711
+ let hasToolCalls = false;
2712
+ const activeReasoning = {};
2713
+ return {
2714
+ stream: response.pipeThrough(
2715
+ new TransformStream({
2716
+ start(controller) {
2717
+ controller.enqueue({ type: "stream-start", warnings });
2718
+ },
2719
+ transform(chunk, controller) {
2720
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2721
+ if (options.includeRawChunks) {
2722
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2723
+ }
2724
+ if (!chunk.success) {
2725
+ finishReason = "error";
2726
+ controller.enqueue({ type: "error", error: chunk.error });
2727
+ return;
2728
+ }
2729
+ const value = chunk.value;
2730
+ if (isResponseOutputItemAddedChunk(value)) {
2731
+ if (value.item.type === "function_call") {
2732
+ ongoingToolCalls[value.output_index] = {
2733
+ toolName: value.item.name,
2734
+ toolCallId: value.item.call_id
2735
+ };
2736
+ controller.enqueue({
2737
+ type: "tool-input-start",
2738
+ id: value.item.call_id,
2739
+ toolName: value.item.name
2740
+ });
2741
+ } else if (value.item.type === "web_search_call") {
2742
+ ongoingToolCalls[value.output_index] = {
2743
+ toolName: "web_search_preview",
2744
+ toolCallId: value.item.id
2745
+ };
2746
+ controller.enqueue({
2747
+ type: "tool-input-start",
2748
+ id: value.item.id,
2749
+ toolName: "web_search_preview"
2750
+ });
2751
+ } else if (value.item.type === "computer_call") {
2752
+ ongoingToolCalls[value.output_index] = {
2753
+ toolName: "computer_use",
2754
+ toolCallId: value.item.id
2755
+ };
2756
+ controller.enqueue({
2757
+ type: "tool-input-start",
2758
+ id: value.item.id,
2759
+ toolName: "computer_use"
2760
+ });
2761
+ } else if (value.item.type === "message") {
2762
+ controller.enqueue({
2763
+ type: "text-start",
2764
+ id: value.item.id,
2765
+ providerMetadata: {
2766
+ openai: {
2767
+ itemId: value.item.id
2768
+ }
2769
+ }
2770
+ });
2771
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2772
+ activeReasoning[value.item.id] = {
2773
+ encryptedContent: value.item.encrypted_content,
2774
+ summaryParts: [0]
2775
+ };
2776
+ controller.enqueue({
2777
+ type: "reasoning-start",
2778
+ id: `${value.item.id}:0`,
2779
+ providerMetadata: {
2780
+ openai: {
2781
+ itemId: value.item.id,
2782
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2783
+ }
2784
+ }
2785
+ });
2786
+ }
2787
+ } else if (isResponseOutputItemDoneChunk(value)) {
2788
+ if (value.item.type === "function_call") {
2789
+ ongoingToolCalls[value.output_index] = void 0;
2790
+ hasToolCalls = true;
2791
+ controller.enqueue({
2792
+ type: "tool-input-end",
2793
+ id: value.item.call_id
2794
+ });
2795
+ controller.enqueue({
2796
+ type: "tool-call",
2797
+ toolCallId: value.item.call_id,
2798
+ toolName: value.item.name,
2799
+ input: value.item.arguments,
2800
+ providerMetadata: {
2801
+ openai: {
2802
+ itemId: value.item.id
2803
+ }
2804
+ }
2805
+ });
2806
+ } else if (value.item.type === "web_search_call") {
2807
+ ongoingToolCalls[value.output_index] = void 0;
2808
+ hasToolCalls = true;
2809
+ controller.enqueue({
2810
+ type: "tool-input-end",
2811
+ id: value.item.id
2812
+ });
2813
+ controller.enqueue({
2814
+ type: "tool-call",
2815
+ toolCallId: value.item.id,
2816
+ toolName: "web_search_preview",
2817
+ input: "",
2818
+ providerExecuted: true
2819
+ });
2820
+ controller.enqueue({
2821
+ type: "tool-result",
2822
+ toolCallId: value.item.id,
2823
+ toolName: "web_search_preview",
2824
+ result: {
2825
+ type: "web_search_tool_result",
2826
+ status: value.item.status || "completed"
2827
+ },
2828
+ providerExecuted: true
2829
+ });
2830
+ } else if (value.item.type === "computer_call") {
2831
+ ongoingToolCalls[value.output_index] = void 0;
2832
+ hasToolCalls = true;
2833
+ controller.enqueue({
2834
+ type: "tool-input-end",
2835
+ id: value.item.id
2836
+ });
2837
+ controller.enqueue({
2838
+ type: "tool-call",
2839
+ toolCallId: value.item.id,
2840
+ toolName: "computer_use",
2841
+ input: "",
2842
+ providerExecuted: true
2843
+ });
2844
+ controller.enqueue({
2845
+ type: "tool-result",
2846
+ toolCallId: value.item.id,
2847
+ toolName: "computer_use",
2848
+ result: {
2849
+ type: "computer_use_tool_result",
2850
+ status: value.item.status || "completed"
2851
+ },
2852
+ providerExecuted: true
2853
+ });
2854
+ } else if (value.item.type === "message") {
2855
+ controller.enqueue({
2856
+ type: "text-end",
2857
+ id: value.item.id
2858
+ });
2859
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2860
+ const activeReasoningPart = activeReasoning[value.item.id];
2861
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2862
+ controller.enqueue({
2863
+ type: "reasoning-end",
2864
+ id: `${value.item.id}:${summaryIndex}`,
2865
+ providerMetadata: {
2866
+ openai: {
2867
+ itemId: value.item.id,
2868
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2869
+ }
2870
+ }
2871
+ });
2872
+ }
2873
+ delete activeReasoning[value.item.id];
2874
+ }
2875
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2876
+ const toolCall = ongoingToolCalls[value.output_index];
2877
+ if (toolCall != null) {
2878
+ controller.enqueue({
2879
+ type: "tool-input-delta",
2880
+ id: toolCall.toolCallId,
2881
+ delta: value.delta
2882
+ });
2883
+ }
2884
+ } else if (isResponseCreatedChunk(value)) {
2885
+ responseId = value.response.id;
2886
+ controller.enqueue({
2887
+ type: "response-metadata",
2888
+ id: value.response.id,
2889
+ timestamp: new Date(value.response.created_at * 1e3),
2890
+ modelId: value.response.model
2891
+ });
2892
+ } else if (isTextDeltaChunk(value)) {
2893
+ controller.enqueue({
2894
+ type: "text-delta",
2895
+ id: value.item_id,
2896
+ delta: value.delta
2897
+ });
2898
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2899
+ if (value.summary_index > 0) {
2900
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2901
+ value.summary_index
2902
+ );
2903
+ controller.enqueue({
2904
+ type: "reasoning-start",
2905
+ id: `${value.item_id}:${value.summary_index}`,
2906
+ providerMetadata: {
2907
+ openai: {
2908
+ itemId: value.item_id,
2909
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2910
+ }
2911
+ }
2912
+ });
2913
+ }
2914
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2915
+ controller.enqueue({
2916
+ type: "reasoning-delta",
2917
+ id: `${value.item_id}:${value.summary_index}`,
2918
+ delta: value.delta,
2919
+ providerMetadata: {
2920
+ openai: {
2921
+ itemId: value.item_id
2922
+ }
2923
+ }
2924
+ });
2925
+ } else if (isResponseFinishedChunk(value)) {
2926
+ finishReason = mapOpenAIResponseFinishReason({
2927
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2928
+ hasToolCalls
2929
+ });
2930
+ usage.inputTokens = value.response.usage.input_tokens;
2931
+ usage.outputTokens = value.response.usage.output_tokens;
2932
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2933
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2934
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2935
+ } else if (isResponseAnnotationAddedChunk(value)) {
2936
+ controller.enqueue({
2937
+ type: "source",
2938
+ sourceType: "url",
2939
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
2940
+ url: value.annotation.url,
2941
+ title: value.annotation.title
2942
+ });
2943
+ } else if (isErrorChunk(value)) {
2944
+ controller.enqueue({ type: "error", error: value });
2945
+ }
2946
+ },
2947
+ flush(controller) {
2948
+ controller.enqueue({
2949
+ type: "finish",
2950
+ finishReason,
2951
+ usage,
2952
+ providerMetadata: {
2953
+ openai: {
2954
+ responseId
2955
+ }
2956
+ }
2957
+ });
2958
+ }
2959
+ })
2960
+ ),
2961
+ request: { body },
2962
+ response: { headers: responseHeaders }
2963
+ };
2964
+ }
2965
+ };
2966
+ var usageSchema2 = z14.object({
2967
+ input_tokens: z14.number(),
2968
+ input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
2969
+ output_tokens: z14.number(),
2970
+ output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
2971
+ });
2972
+ var textDeltaChunkSchema = z14.object({
2973
+ type: z14.literal("response.output_text.delta"),
2974
+ item_id: z14.string(),
2975
+ delta: z14.string()
2976
+ });
2977
+ var errorChunkSchema = z14.object({
2978
+ type: z14.literal("error"),
2979
+ code: z14.string(),
2980
+ message: z14.string(),
2981
+ param: z14.string().nullish(),
2982
+ sequence_number: z14.number()
2983
+ });
2984
+ var responseFinishedChunkSchema = z14.object({
2985
+ type: z14.enum(["response.completed", "response.incomplete"]),
2986
+ response: z14.object({
2987
+ incomplete_details: z14.object({ reason: z14.string() }).nullish(),
2988
+ usage: usageSchema2
2989
+ })
2990
+ });
2991
+ var responseCreatedChunkSchema = z14.object({
2992
+ type: z14.literal("response.created"),
2993
+ response: z14.object({
2994
+ id: z14.string(),
2995
+ created_at: z14.number(),
2996
+ model: z14.string()
2997
+ })
2998
+ });
2999
+ var responseOutputItemAddedSchema = z14.object({
3000
+ type: z14.literal("response.output_item.added"),
3001
+ output_index: z14.number(),
3002
+ item: z14.discriminatedUnion("type", [
3003
+ z14.object({
3004
+ type: z14.literal("message"),
3005
+ id: z14.string()
3006
+ }),
3007
+ z14.object({
3008
+ type: z14.literal("reasoning"),
3009
+ id: z14.string(),
3010
+ encrypted_content: z14.string().nullish()
3011
+ }),
3012
+ z14.object({
3013
+ type: z14.literal("function_call"),
3014
+ id: z14.string(),
3015
+ call_id: z14.string(),
3016
+ name: z14.string(),
3017
+ arguments: z14.string()
3018
+ }),
3019
+ z14.object({
3020
+ type: z14.literal("web_search_call"),
3021
+ id: z14.string(),
3022
+ status: z14.string()
3023
+ }),
3024
+ z14.object({
3025
+ type: z14.literal("computer_call"),
3026
+ id: z14.string(),
3027
+ status: z14.string()
3028
+ }),
3029
+ z14.object({
3030
+ type: z14.literal("file_search_call"),
3031
+ id: z14.string(),
3032
+ status: z14.string()
3033
+ })
3034
+ ])
3035
+ });
3036
+ var responseOutputItemDoneSchema = z14.object({
3037
+ type: z14.literal("response.output_item.done"),
3038
+ output_index: z14.number(),
3039
+ item: z14.discriminatedUnion("type", [
3040
+ z14.object({
3041
+ type: z14.literal("message"),
3042
+ id: z14.string()
3043
+ }),
3044
+ z14.object({
3045
+ type: z14.literal("reasoning"),
3046
+ id: z14.string(),
3047
+ encrypted_content: z14.string().nullish()
3048
+ }),
3049
+ z14.object({
3050
+ type: z14.literal("function_call"),
3051
+ id: z14.string(),
3052
+ call_id: z14.string(),
3053
+ name: z14.string(),
3054
+ arguments: z14.string(),
3055
+ status: z14.literal("completed")
3056
+ }),
3057
+ z14.object({
3058
+ type: z14.literal("web_search_call"),
3059
+ id: z14.string(),
3060
+ status: z14.literal("completed")
3061
+ }),
3062
+ z14.object({
3063
+ type: z14.literal("computer_call"),
3064
+ id: z14.string(),
3065
+ status: z14.literal("completed")
3066
+ }),
3067
+ z14.object({
3068
+ type: z14.literal("file_search_call"),
3069
+ id: z14.string(),
3070
+ status: z14.literal("completed")
3071
+ })
3072
+ ])
3073
+ });
3074
+ var responseFunctionCallArgumentsDeltaSchema = z14.object({
3075
+ type: z14.literal("response.function_call_arguments.delta"),
3076
+ item_id: z14.string(),
3077
+ output_index: z14.number(),
3078
+ delta: z14.string()
3079
+ });
3080
+ var responseAnnotationAddedSchema = z14.object({
3081
+ type: z14.literal("response.output_text.annotation.added"),
3082
+ annotation: z14.object({
3083
+ type: z14.literal("url_citation"),
3084
+ url: z14.string(),
3085
+ title: z14.string()
3086
+ })
3087
+ });
3088
+ var responseReasoningSummaryPartAddedSchema = z14.object({
3089
+ type: z14.literal("response.reasoning_summary_part.added"),
3090
+ item_id: z14.string(),
3091
+ summary_index: z14.number()
3092
+ });
3093
+ var responseReasoningSummaryTextDeltaSchema = z14.object({
3094
+ type: z14.literal("response.reasoning_summary_text.delta"),
3095
+ item_id: z14.string(),
3096
+ summary_index: z14.number(),
3097
+ delta: z14.string()
3098
+ });
3099
+ var openaiResponsesChunkSchema = z14.union([
3100
+ textDeltaChunkSchema,
3101
+ responseFinishedChunkSchema,
3102
+ responseCreatedChunkSchema,
3103
+ responseOutputItemAddedSchema,
3104
+ responseOutputItemDoneSchema,
3105
+ responseFunctionCallArgumentsDeltaSchema,
3106
+ responseAnnotationAddedSchema,
3107
+ responseReasoningSummaryPartAddedSchema,
3108
+ responseReasoningSummaryTextDeltaSchema,
3109
+ errorChunkSchema,
3110
+ z14.object({ type: z14.string() }).loose()
3111
+ // fallback for unknown chunks
3112
+ ]);
3113
+ function isTextDeltaChunk(chunk) {
3114
+ return chunk.type === "response.output_text.delta";
3115
+ }
3116
+ function isResponseOutputItemDoneChunk(chunk) {
3117
+ return chunk.type === "response.output_item.done";
3118
+ }
3119
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3120
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3121
+ }
3122
+ function isResponseFinishedChunk(chunk) {
3123
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3124
+ }
3125
+ function isResponseCreatedChunk(chunk) {
3126
+ return chunk.type === "response.created";
3127
+ }
3128
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3129
+ return chunk.type === "response.function_call_arguments.delta";
3130
+ }
3131
+ function isResponseOutputItemAddedChunk(chunk) {
3132
+ return chunk.type === "response.output_item.added";
3133
+ }
3134
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3135
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3136
+ }
3137
+ function isResponseAnnotationAddedChunk(chunk) {
3138
+ return chunk.type === "response.output_text.annotation.added";
3139
+ }
3140
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3141
+ return chunk.type === "response.reasoning_summary_part.added";
3142
+ }
3143
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3144
+ return chunk.type === "response.reasoning_summary_text.delta";
3145
+ }
3146
+ function isErrorChunk(chunk) {
3147
+ return chunk.type === "error";
3148
+ }
3149
+ function getResponsesModelConfig(modelId) {
3150
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3151
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3152
+ return {
3153
+ isReasoningModel: true,
3154
+ systemMessageMode: "remove",
3155
+ requiredAutoTruncation: false
3156
+ };
3157
+ }
3158
+ return {
3159
+ isReasoningModel: true,
3160
+ systemMessageMode: "developer",
3161
+ requiredAutoTruncation: false
3162
+ };
3163
+ }
3164
+ return {
3165
+ isReasoningModel: false,
3166
+ systemMessageMode: "system",
3167
+ requiredAutoTruncation: false
3168
+ };
3169
+ }
3170
+ function supportsFlexProcessing2(modelId) {
3171
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3172
+ }
3173
+ function supportsPriorityProcessing2(modelId) {
3174
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3175
+ }
3176
+ var openaiResponsesProviderOptionsSchema = z14.object({
3177
+ metadata: z14.any().nullish(),
3178
+ parallelToolCalls: z14.boolean().nullish(),
3179
+ previousResponseId: z14.string().nullish(),
3180
+ store: z14.boolean().nullish(),
3181
+ user: z14.string().nullish(),
3182
+ reasoningEffort: z14.string().nullish(),
3183
+ strictJsonSchema: z14.boolean().nullish(),
3184
+ instructions: z14.string().nullish(),
3185
+ reasoningSummary: z14.string().nullish(),
3186
+ serviceTier: z14.enum(["auto", "flex", "priority"]).nullish(),
3187
+ include: z14.array(z14.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3188
+ textVerbosity: z14.enum(["low", "medium", "high"]).nullish()
3189
+ });
3190
+
3191
+ // src/openai-speech-model.ts
3192
+ import {
3193
+ combineHeaders as combineHeaders7,
3194
+ createBinaryResponseHandler,
3195
+ parseProviderOptions as parseProviderOptions7,
3196
+ postJsonToApi as postJsonToApi6
3197
+ } from "@ai-sdk/provider-utils";
3198
+ import { z as z15 } from "zod/v4";
3199
+ var OpenAIProviderOptionsSchema = z15.object({
3200
+ instructions: z15.string().nullish(),
3201
+ speed: z15.number().min(0.25).max(4).default(1).nullish()
3202
+ });
3203
+ var OpenAISpeechModel = class {
3204
+ constructor(modelId, config) {
3205
+ this.modelId = modelId;
3206
+ this.config = config;
3207
+ this.specificationVersion = "v2";
3208
+ }
3209
+ get provider() {
3210
+ return this.config.provider;
3211
+ }
3212
+ async getArgs({
3213
+ text,
3214
+ voice = "alloy",
3215
+ outputFormat = "mp3",
3216
+ speed,
3217
+ instructions,
3218
+ language,
3219
+ providerOptions
3220
+ }) {
3221
+ const warnings = [];
3222
+ const openAIOptions = await parseProviderOptions7({
3223
+ provider: "openai",
3224
+ providerOptions,
3225
+ schema: OpenAIProviderOptionsSchema
3226
+ });
3227
+ const requestBody = {
3228
+ model: this.modelId,
3229
+ input: text,
3230
+ voice,
3231
+ response_format: "mp3",
3232
+ speed,
3233
+ instructions
3234
+ };
3235
+ if (outputFormat) {
3236
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
3237
+ requestBody.response_format = outputFormat;
3238
+ } else {
3239
+ warnings.push({
3240
+ type: "unsupported-setting",
3241
+ setting: "outputFormat",
3242
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
3243
+ });
3244
+ }
3245
+ }
3246
+ if (openAIOptions) {
3247
+ const speechModelOptions = {};
3248
+ for (const key in speechModelOptions) {
3249
+ const value = speechModelOptions[key];
3250
+ if (value !== void 0) {
3251
+ requestBody[key] = value;
3252
+ }
3253
+ }
3254
+ }
3255
+ if (language) {
3256
+ warnings.push({
3257
+ type: "unsupported-setting",
3258
+ setting: "language",
3259
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
3260
+ });
3261
+ }
3262
+ return {
3263
+ requestBody,
3264
+ warnings
3265
+ };
3266
+ }
3267
+ async doGenerate(options) {
3268
+ var _a, _b, _c;
3269
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
3270
+ const { requestBody, warnings } = await this.getArgs(options);
3271
+ const {
3272
+ value: audio,
3273
+ responseHeaders,
3274
+ rawValue: rawResponse
3275
+ } = await postJsonToApi6({
3276
+ url: this.config.url({
3277
+ path: "/audio/speech",
3278
+ modelId: this.modelId
3279
+ }),
3280
+ headers: combineHeaders7(this.config.headers(), options.headers),
3281
+ body: requestBody,
3282
+ failedResponseHandler: openaiFailedResponseHandler,
3283
+ successfulResponseHandler: createBinaryResponseHandler(),
3284
+ abortSignal: options.abortSignal,
3285
+ fetch: this.config.fetch
3286
+ });
3287
+ return {
3288
+ audio,
3289
+ warnings,
3290
+ request: {
3291
+ body: JSON.stringify(requestBody)
3292
+ },
3293
+ response: {
3294
+ timestamp: currentDate,
3295
+ modelId: this.modelId,
3296
+ headers: responseHeaders,
3297
+ body: rawResponse
3298
+ }
3299
+ };
3300
+ }
3301
+ };
3302
+
3303
+ // src/openai-provider.ts
3304
+ function createOpenAI(options = {}) {
3305
+ var _a, _b;
3306
+ const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
3307
+ const providerName = (_b = options.name) != null ? _b : "openai";
3308
+ const getHeaders = () => ({
3309
+ Authorization: `Bearer ${loadApiKey({
3310
+ apiKey: options.apiKey,
3311
+ environmentVariableName: "OPENAI_API_KEY",
3312
+ description: "OpenAI"
3313
+ })}`,
3314
+ "OpenAI-Organization": options.organization,
3315
+ "OpenAI-Project": options.project,
3316
+ ...options.headers
3317
+ });
3318
+ const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
3319
+ provider: `${providerName}.chat`,
3320
+ url: ({ path }) => `${baseURL}${path}`,
3321
+ headers: getHeaders,
3322
+ fetch: options.fetch
3323
+ });
3324
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
3325
+ provider: `${providerName}.completion`,
3326
+ url: ({ path }) => `${baseURL}${path}`,
3327
+ headers: getHeaders,
3328
+ fetch: options.fetch
3329
+ });
3330
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
3331
+ provider: `${providerName}.embedding`,
3332
+ url: ({ path }) => `${baseURL}${path}`,
3333
+ headers: getHeaders,
3334
+ fetch: options.fetch
3335
+ });
3336
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
3337
+ provider: `${providerName}.image`,
3338
+ url: ({ path }) => `${baseURL}${path}`,
3339
+ headers: getHeaders,
3340
+ fetch: options.fetch
3341
+ });
3342
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
3343
+ provider: `${providerName}.transcription`,
3344
+ url: ({ path }) => `${baseURL}${path}`,
3345
+ headers: getHeaders,
3346
+ fetch: options.fetch
3347
+ });
3348
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
3349
+ provider: `${providerName}.speech`,
3350
+ url: ({ path }) => `${baseURL}${path}`,
3351
+ headers: getHeaders,
3352
+ fetch: options.fetch
3353
+ });
3354
+ const createLanguageModel = (modelId) => {
3355
+ if (new.target) {
3356
+ throw new Error(
3357
+ "The OpenAI model function cannot be called with the new keyword."
3358
+ );
3359
+ }
3360
+ return createResponsesModel(modelId);
3361
+ };
3362
+ const createResponsesModel = (modelId) => {
3363
+ return new OpenAIResponsesLanguageModel(modelId, {
3364
+ provider: `${providerName}.responses`,
3365
+ url: ({ path }) => `${baseURL}${path}`,
3366
+ headers: getHeaders,
3367
+ fetch: options.fetch
3368
+ });
3369
+ };
3370
+ const provider = function(modelId) {
3371
+ return createLanguageModel(modelId);
3372
+ };
3373
+ provider.languageModel = createLanguageModel;
3374
+ provider.chat = createChatModel;
3375
+ provider.completion = createCompletionModel;
3376
+ provider.responses = createResponsesModel;
3377
+ provider.embedding = createEmbeddingModel;
3378
+ provider.textEmbedding = createEmbeddingModel;
3379
+ provider.textEmbeddingModel = createEmbeddingModel;
3380
+ provider.image = createImageModel;
3381
+ provider.imageModel = createImageModel;
3382
+ provider.transcription = createTranscriptionModel;
3383
+ provider.transcriptionModel = createTranscriptionModel;
3384
+ provider.speech = createSpeechModel;
3385
+ provider.speechModel = createSpeechModel;
3386
+ provider.tools = openaiTools;
3387
+ return provider;
3388
+ }
3389
+ var openai = createOpenAI();
3390
+ export {
3391
+ createOpenAI,
3392
+ openai
3393
+ };
3394
+ //# sourceMappingURL=index.mjs.map