@ai-sdk/openai 0.0.0-013d7476-20250808163325

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3305 @@
1
+ // src/openai-chat-language-model.ts
2
+ import {
3
+ InvalidResponseDataError
4
+ } from "@ai-sdk/provider";
5
+ import {
6
+ combineHeaders,
7
+ createEventSourceResponseHandler,
8
+ createJsonResponseHandler,
9
+ generateId,
10
+ isParsableJson,
11
+ parseProviderOptions,
12
+ postJsonToApi
13
+ } from "@ai-sdk/provider-utils";
14
+ import { z as z5 } from "zod/v4";
15
+
16
+ // src/convert-to-openai-chat-messages.ts
17
+ import {
18
+ UnsupportedFunctionalityError
19
+ } from "@ai-sdk/provider";
20
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
21
+ function convertToOpenAIChatMessages({
22
+ prompt,
23
+ systemMessageMode = "system"
24
+ }) {
25
+ const messages = [];
26
+ const warnings = [];
27
+ for (const { role, content } of prompt) {
28
+ switch (role) {
29
+ case "system": {
30
+ switch (systemMessageMode) {
31
+ case "system": {
32
+ messages.push({ role: "system", content });
33
+ break;
34
+ }
35
+ case "developer": {
36
+ messages.push({ role: "developer", content });
37
+ break;
38
+ }
39
+ case "remove": {
40
+ warnings.push({
41
+ type: "other",
42
+ message: "system messages are removed for this model"
43
+ });
44
+ break;
45
+ }
46
+ default: {
47
+ const _exhaustiveCheck = systemMessageMode;
48
+ throw new Error(
49
+ `Unsupported system message mode: ${_exhaustiveCheck}`
50
+ );
51
+ }
52
+ }
53
+ break;
54
+ }
55
+ case "user": {
56
+ if (content.length === 1 && content[0].type === "text") {
57
+ messages.push({ role: "user", content: content[0].text });
58
+ break;
59
+ }
60
+ messages.push({
61
+ role: "user",
62
+ content: content.map((part, index) => {
63
+ var _a, _b, _c;
64
+ switch (part.type) {
65
+ case "text": {
66
+ return { type: "text", text: part.text };
67
+ }
68
+ case "file": {
69
+ if (part.mediaType.startsWith("image/")) {
70
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
71
+ return {
72
+ type: "image_url",
73
+ image_url: {
74
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
75
+ // OpenAI specific extension: image detail
76
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
77
+ }
78
+ };
79
+ } else if (part.mediaType.startsWith("audio/")) {
80
+ if (part.data instanceof URL) {
81
+ throw new UnsupportedFunctionalityError({
82
+ functionality: "audio file parts with URLs"
83
+ });
84
+ }
85
+ switch (part.mediaType) {
86
+ case "audio/wav": {
87
+ return {
88
+ type: "input_audio",
89
+ input_audio: {
90
+ data: convertToBase64(part.data),
91
+ format: "wav"
92
+ }
93
+ };
94
+ }
95
+ case "audio/mp3":
96
+ case "audio/mpeg": {
97
+ return {
98
+ type: "input_audio",
99
+ input_audio: {
100
+ data: convertToBase64(part.data),
101
+ format: "mp3"
102
+ }
103
+ };
104
+ }
105
+ default: {
106
+ throw new UnsupportedFunctionalityError({
107
+ functionality: `audio content parts with media type ${part.mediaType}`
108
+ });
109
+ }
110
+ }
111
+ } else if (part.mediaType === "application/pdf") {
112
+ if (part.data instanceof URL) {
113
+ throw new UnsupportedFunctionalityError({
114
+ functionality: "PDF file parts with URLs"
115
+ });
116
+ }
117
+ return {
118
+ type: "file",
119
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
120
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
121
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
122
+ }
123
+ };
124
+ } else {
125
+ throw new UnsupportedFunctionalityError({
126
+ functionality: `file part media type ${part.mediaType}`
127
+ });
128
+ }
129
+ }
130
+ }
131
+ })
132
+ });
133
+ break;
134
+ }
135
+ case "assistant": {
136
+ let text = "";
137
+ const toolCalls = [];
138
+ for (const part of content) {
139
+ switch (part.type) {
140
+ case "text": {
141
+ text += part.text;
142
+ break;
143
+ }
144
+ case "tool-call": {
145
+ toolCalls.push({
146
+ id: part.toolCallId,
147
+ type: "function",
148
+ function: {
149
+ name: part.toolName,
150
+ arguments: JSON.stringify(part.input)
151
+ }
152
+ });
153
+ break;
154
+ }
155
+ }
156
+ }
157
+ messages.push({
158
+ role: "assistant",
159
+ content: text,
160
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
161
+ });
162
+ break;
163
+ }
164
+ case "tool": {
165
+ for (const toolResponse of content) {
166
+ const output = toolResponse.output;
167
+ let contentValue;
168
+ switch (output.type) {
169
+ case "text":
170
+ case "error-text":
171
+ contentValue = output.value;
172
+ break;
173
+ case "content":
174
+ case "json":
175
+ case "error-json":
176
+ contentValue = JSON.stringify(output.value);
177
+ break;
178
+ }
179
+ messages.push({
180
+ role: "tool",
181
+ tool_call_id: toolResponse.toolCallId,
182
+ content: contentValue
183
+ });
184
+ }
185
+ break;
186
+ }
187
+ default: {
188
+ const _exhaustiveCheck = role;
189
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
190
+ }
191
+ }
192
+ }
193
+ return { messages, warnings };
194
+ }
195
+
196
+ // src/get-response-metadata.ts
197
+ function getResponseMetadata({
198
+ id,
199
+ model,
200
+ created
201
+ }) {
202
+ return {
203
+ id: id != null ? id : void 0,
204
+ modelId: model != null ? model : void 0,
205
+ timestamp: created != null ? new Date(created * 1e3) : void 0
206
+ };
207
+ }
208
+
209
+ // src/map-openai-finish-reason.ts
210
+ function mapOpenAIFinishReason(finishReason) {
211
+ switch (finishReason) {
212
+ case "stop":
213
+ return "stop";
214
+ case "length":
215
+ return "length";
216
+ case "content_filter":
217
+ return "content-filter";
218
+ case "function_call":
219
+ case "tool_calls":
220
+ return "tool-calls";
221
+ default:
222
+ return "unknown";
223
+ }
224
+ }
225
+
226
+ // src/openai-chat-options.ts
227
+ import { z } from "zod/v4";
228
+ var openaiProviderOptions = z.object({
229
+ /**
230
+ * Modify the likelihood of specified tokens appearing in the completion.
231
+ *
232
+ * Accepts a JSON object that maps tokens (specified by their token ID in
233
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
234
+ */
235
+ logitBias: z.record(z.coerce.number(), z.number()).optional(),
236
+ /**
237
+ * Return the log probabilities of the tokens.
238
+ *
239
+ * Setting to true will return the log probabilities of the tokens that
240
+ * were generated.
241
+ *
242
+ * Setting to a number will return the log probabilities of the top n
243
+ * tokens that were generated.
244
+ */
245
+ logprobs: z.union([z.boolean(), z.number()]).optional(),
246
+ /**
247
+ * Whether to enable parallel function calling during tool use. Default to true.
248
+ */
249
+ parallelToolCalls: z.boolean().optional(),
250
+ /**
251
+ * A unique identifier representing your end-user, which can help OpenAI to
252
+ * monitor and detect abuse.
253
+ */
254
+ user: z.string().optional(),
255
+ /**
256
+ * Reasoning effort for reasoning models. Defaults to `medium`.
257
+ */
258
+ reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
259
+ /**
260
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
261
+ */
262
+ maxCompletionTokens: z.number().optional(),
263
+ /**
264
+ * Whether to enable persistence in responses API.
265
+ */
266
+ store: z.boolean().optional(),
267
+ /**
268
+ * Metadata to associate with the request.
269
+ */
270
+ metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
271
+ /**
272
+ * Parameters for prediction mode.
273
+ */
274
+ prediction: z.record(z.string(), z.any()).optional(),
275
+ /**
276
+ * Whether to use structured outputs.
277
+ *
278
+ * @default true
279
+ */
280
+ structuredOutputs: z.boolean().optional(),
281
+ /**
282
+ * Service tier for the request.
283
+ * - 'auto': Default service tier
284
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
285
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
286
+ *
287
+ * @default 'auto'
288
+ */
289
+ serviceTier: z.enum(["auto", "flex", "priority"]).optional(),
290
+ /**
291
+ * Whether to use strict JSON schema validation.
292
+ *
293
+ * @default false
294
+ */
295
+ strictJsonSchema: z.boolean().optional()
296
+ });
297
+
298
+ // src/openai-error.ts
299
+ import { z as z2 } from "zod/v4";
300
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
301
+ var openaiErrorDataSchema = z2.object({
302
+ error: z2.object({
303
+ message: z2.string(),
304
+ // The additional information below is handled loosely to support
305
+ // OpenAI-compatible providers that have slightly different error
306
+ // responses:
307
+ type: z2.string().nullish(),
308
+ param: z2.any().nullish(),
309
+ code: z2.union([z2.string(), z2.number()]).nullish()
310
+ })
311
+ });
312
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
313
+ errorSchema: openaiErrorDataSchema,
314
+ errorToMessage: (data) => data.error.message
315
+ });
316
+
317
+ // src/openai-prepare-tools.ts
318
+ import {
319
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
320
+ } from "@ai-sdk/provider";
321
+
322
+ // src/tool/file-search.ts
323
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
324
+ import { z as z3 } from "zod/v4";
325
+ var comparisonFilterSchema = z3.object({
326
+ key: z3.string(),
327
+ type: z3.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
328
+ value: z3.union([z3.string(), z3.number(), z3.boolean()])
329
+ });
330
+ var compoundFilterSchema = z3.object({
331
+ type: z3.enum(["and", "or"]),
332
+ filters: z3.array(
333
+ z3.union([comparisonFilterSchema, z3.lazy(() => compoundFilterSchema)])
334
+ )
335
+ });
336
+ var filtersSchema = z3.union([comparisonFilterSchema, compoundFilterSchema]);
337
+ var fileSearchArgsSchema = z3.object({
338
+ /**
339
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
340
+ */
341
+ vectorStoreIds: z3.array(z3.string()).optional(),
342
+ /**
343
+ * Maximum number of search results to return. Defaults to 10.
344
+ */
345
+ maxNumResults: z3.number().optional(),
346
+ /**
347
+ * Ranking options for the search.
348
+ */
349
+ ranking: z3.object({
350
+ ranker: z3.enum(["auto", "default-2024-08-21"]).optional()
351
+ }).optional(),
352
+ /**
353
+ * A filter to apply based on file attributes.
354
+ */
355
+ filters: filtersSchema.optional()
356
+ });
357
+ var fileSearch = createProviderDefinedToolFactory({
358
+ id: "openai.file_search",
359
+ name: "file_search",
360
+ inputSchema: z3.object({
361
+ query: z3.string()
362
+ })
363
+ });
364
+
365
+ // src/tool/web-search-preview.ts
366
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
367
+ import { z as z4 } from "zod/v4";
368
+ var webSearchPreviewArgsSchema = z4.object({
369
+ /**
370
+ * Search context size to use for the web search.
371
+ * - high: Most comprehensive context, highest cost, slower response
372
+ * - medium: Balanced context, cost, and latency (default)
373
+ * - low: Least context, lowest cost, fastest response
374
+ */
375
+ searchContextSize: z4.enum(["low", "medium", "high"]).optional(),
376
+ /**
377
+ * User location information to provide geographically relevant search results.
378
+ */
379
+ userLocation: z4.object({
380
+ /**
381
+ * Type of location (always 'approximate')
382
+ */
383
+ type: z4.literal("approximate"),
384
+ /**
385
+ * Two-letter ISO country code (e.g., 'US', 'GB')
386
+ */
387
+ country: z4.string().optional(),
388
+ /**
389
+ * City name (free text, e.g., 'Minneapolis')
390
+ */
391
+ city: z4.string().optional(),
392
+ /**
393
+ * Region name (free text, e.g., 'Minnesota')
394
+ */
395
+ region: z4.string().optional(),
396
+ /**
397
+ * IANA timezone (e.g., 'America/Chicago')
398
+ */
399
+ timezone: z4.string().optional()
400
+ }).optional()
401
+ });
402
+ var webSearchPreview = createProviderDefinedToolFactory2({
403
+ id: "openai.web_search_preview",
404
+ name: "web_search_preview",
405
+ inputSchema: z4.object({})
406
+ });
407
+
408
+ // src/openai-prepare-tools.ts
409
+ function prepareTools({
410
+ tools,
411
+ toolChoice,
412
+ structuredOutputs,
413
+ strictJsonSchema
414
+ }) {
415
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
416
+ const toolWarnings = [];
417
+ if (tools == null) {
418
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
419
+ }
420
+ const openaiTools = [];
421
+ for (const tool of tools) {
422
+ switch (tool.type) {
423
+ case "function":
424
+ openaiTools.push({
425
+ type: "function",
426
+ function: {
427
+ name: tool.name,
428
+ description: tool.description,
429
+ parameters: tool.inputSchema,
430
+ strict: structuredOutputs ? strictJsonSchema : void 0
431
+ }
432
+ });
433
+ break;
434
+ case "provider-defined":
435
+ switch (tool.id) {
436
+ case "openai.file_search": {
437
+ const args = fileSearchArgsSchema.parse(tool.args);
438
+ openaiTools.push({
439
+ type: "file_search",
440
+ vector_store_ids: args.vectorStoreIds,
441
+ max_num_results: args.maxNumResults,
442
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
443
+ filters: args.filters
444
+ });
445
+ break;
446
+ }
447
+ case "openai.web_search_preview": {
448
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
449
+ openaiTools.push({
450
+ type: "web_search_preview",
451
+ search_context_size: args.searchContextSize,
452
+ user_location: args.userLocation
453
+ });
454
+ break;
455
+ }
456
+ default:
457
+ toolWarnings.push({ type: "unsupported-tool", tool });
458
+ break;
459
+ }
460
+ break;
461
+ default:
462
+ toolWarnings.push({ type: "unsupported-tool", tool });
463
+ break;
464
+ }
465
+ }
466
+ if (toolChoice == null) {
467
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
468
+ }
469
+ const type = toolChoice.type;
470
+ switch (type) {
471
+ case "auto":
472
+ case "none":
473
+ case "required":
474
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
475
+ case "tool":
476
+ return {
477
+ tools: openaiTools,
478
+ toolChoice: {
479
+ type: "function",
480
+ function: {
481
+ name: toolChoice.toolName
482
+ }
483
+ },
484
+ toolWarnings
485
+ };
486
+ default: {
487
+ const _exhaustiveCheck = type;
488
+ throw new UnsupportedFunctionalityError2({
489
+ functionality: `tool choice type: ${_exhaustiveCheck}`
490
+ });
491
+ }
492
+ }
493
+ }
494
+
495
+ // src/openai-chat-language-model.ts
496
+ var OpenAIChatLanguageModel = class {
497
+ constructor(modelId, config) {
498
+ this.specificationVersion = "v2";
499
+ this.supportedUrls = {
500
+ "image/*": [/^https?:\/\/.*$/]
501
+ };
502
+ this.modelId = modelId;
503
+ this.config = config;
504
+ }
505
+ get provider() {
506
+ return this.config.provider;
507
+ }
508
+ async getArgs({
509
+ prompt,
510
+ maxOutputTokens,
511
+ temperature,
512
+ topP,
513
+ topK,
514
+ frequencyPenalty,
515
+ presencePenalty,
516
+ stopSequences,
517
+ responseFormat,
518
+ seed,
519
+ tools,
520
+ toolChoice,
521
+ providerOptions
522
+ }) {
523
+ var _a, _b, _c, _d;
524
+ const warnings = [];
525
+ const openaiOptions = (_a = await parseProviderOptions({
526
+ provider: "openai",
527
+ providerOptions,
528
+ schema: openaiProviderOptions
529
+ })) != null ? _a : {};
530
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
531
+ if (topK != null) {
532
+ warnings.push({
533
+ type: "unsupported-setting",
534
+ setting: "topK"
535
+ });
536
+ }
537
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
538
+ warnings.push({
539
+ type: "unsupported-setting",
540
+ setting: "responseFormat",
541
+ details: "JSON response format schema is only supported with structuredOutputs"
542
+ });
543
+ }
544
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
545
+ {
546
+ prompt,
547
+ systemMessageMode: getSystemMessageMode(this.modelId)
548
+ }
549
+ );
550
+ warnings.push(...messageWarnings);
551
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
552
+ const baseArgs = {
553
+ // model id:
554
+ model: this.modelId,
555
+ // model specific settings:
556
+ logit_bias: openaiOptions.logitBias,
557
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
558
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
559
+ user: openaiOptions.user,
560
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
561
+ // standardized settings:
562
+ max_tokens: maxOutputTokens,
563
+ temperature,
564
+ top_p: topP,
565
+ frequency_penalty: frequencyPenalty,
566
+ presence_penalty: presencePenalty,
567
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
568
+ type: "json_schema",
569
+ json_schema: {
570
+ schema: responseFormat.schema,
571
+ strict: strictJsonSchema,
572
+ name: (_d = responseFormat.name) != null ? _d : "response",
573
+ description: responseFormat.description
574
+ }
575
+ } : { type: "json_object" } : void 0,
576
+ stop: stopSequences,
577
+ seed,
578
+ // openai specific settings:
579
+ // TODO remove in next major version; we auto-map maxOutputTokens now
580
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
581
+ store: openaiOptions.store,
582
+ metadata: openaiOptions.metadata,
583
+ prediction: openaiOptions.prediction,
584
+ reasoning_effort: openaiOptions.reasoningEffort,
585
+ service_tier: openaiOptions.serviceTier,
586
+ // messages:
587
+ messages
588
+ };
589
+ if (isReasoningModel(this.modelId)) {
590
+ if (baseArgs.temperature != null) {
591
+ baseArgs.temperature = void 0;
592
+ warnings.push({
593
+ type: "unsupported-setting",
594
+ setting: "temperature",
595
+ details: "temperature is not supported for reasoning models"
596
+ });
597
+ }
598
+ if (baseArgs.top_p != null) {
599
+ baseArgs.top_p = void 0;
600
+ warnings.push({
601
+ type: "unsupported-setting",
602
+ setting: "topP",
603
+ details: "topP is not supported for reasoning models"
604
+ });
605
+ }
606
+ if (baseArgs.frequency_penalty != null) {
607
+ baseArgs.frequency_penalty = void 0;
608
+ warnings.push({
609
+ type: "unsupported-setting",
610
+ setting: "frequencyPenalty",
611
+ details: "frequencyPenalty is not supported for reasoning models"
612
+ });
613
+ }
614
+ if (baseArgs.presence_penalty != null) {
615
+ baseArgs.presence_penalty = void 0;
616
+ warnings.push({
617
+ type: "unsupported-setting",
618
+ setting: "presencePenalty",
619
+ details: "presencePenalty is not supported for reasoning models"
620
+ });
621
+ }
622
+ if (baseArgs.logit_bias != null) {
623
+ baseArgs.logit_bias = void 0;
624
+ warnings.push({
625
+ type: "other",
626
+ message: "logitBias is not supported for reasoning models"
627
+ });
628
+ }
629
+ if (baseArgs.logprobs != null) {
630
+ baseArgs.logprobs = void 0;
631
+ warnings.push({
632
+ type: "other",
633
+ message: "logprobs is not supported for reasoning models"
634
+ });
635
+ }
636
+ if (baseArgs.top_logprobs != null) {
637
+ baseArgs.top_logprobs = void 0;
638
+ warnings.push({
639
+ type: "other",
640
+ message: "topLogprobs is not supported for reasoning models"
641
+ });
642
+ }
643
+ if (baseArgs.max_tokens != null) {
644
+ if (baseArgs.max_completion_tokens == null) {
645
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
646
+ }
647
+ baseArgs.max_tokens = void 0;
648
+ }
649
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
650
+ if (baseArgs.temperature != null) {
651
+ baseArgs.temperature = void 0;
652
+ warnings.push({
653
+ type: "unsupported-setting",
654
+ setting: "temperature",
655
+ details: "temperature is not supported for the search preview models and has been removed."
656
+ });
657
+ }
658
+ }
659
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
660
+ warnings.push({
661
+ type: "unsupported-setting",
662
+ setting: "serviceTier",
663
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
664
+ });
665
+ baseArgs.service_tier = void 0;
666
+ }
667
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
668
+ warnings.push({
669
+ type: "unsupported-setting",
670
+ setting: "serviceTier",
671
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
672
+ });
673
+ baseArgs.service_tier = void 0;
674
+ }
675
+ const {
676
+ tools: openaiTools,
677
+ toolChoice: openaiToolChoice,
678
+ toolWarnings
679
+ } = prepareTools({
680
+ tools,
681
+ toolChoice,
682
+ structuredOutputs,
683
+ strictJsonSchema
684
+ });
685
+ return {
686
+ args: {
687
+ ...baseArgs,
688
+ tools: openaiTools,
689
+ tool_choice: openaiToolChoice
690
+ },
691
+ warnings: [...warnings, ...toolWarnings]
692
+ };
693
+ }
694
+ async doGenerate(options) {
695
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
696
+ const { args: body, warnings } = await this.getArgs(options);
697
+ const {
698
+ responseHeaders,
699
+ value: response,
700
+ rawValue: rawResponse
701
+ } = await postJsonToApi({
702
+ url: this.config.url({
703
+ path: "/chat/completions",
704
+ modelId: this.modelId
705
+ }),
706
+ headers: combineHeaders(this.config.headers(), options.headers),
707
+ body,
708
+ failedResponseHandler: openaiFailedResponseHandler,
709
+ successfulResponseHandler: createJsonResponseHandler(
710
+ openaiChatResponseSchema
711
+ ),
712
+ abortSignal: options.abortSignal,
713
+ fetch: this.config.fetch
714
+ });
715
+ const choice = response.choices[0];
716
+ const content = [];
717
+ const text = choice.message.content;
718
+ if (text != null && text.length > 0) {
719
+ content.push({ type: "text", text });
720
+ }
721
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
722
+ content.push({
723
+ type: "tool-call",
724
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
725
+ toolName: toolCall.function.name,
726
+ input: toolCall.function.arguments
727
+ });
728
+ }
729
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
730
+ content.push({
731
+ type: "source",
732
+ sourceType: "url",
733
+ id: generateId(),
734
+ url: annotation.url,
735
+ title: annotation.title
736
+ });
737
+ }
738
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
739
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
740
+ const providerMetadata = { openai: {} };
741
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
742
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
743
+ }
744
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
745
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
746
+ }
747
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
748
+ providerMetadata.openai.logprobs = choice.logprobs.content;
749
+ }
750
+ return {
751
+ content,
752
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
753
+ usage: {
754
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
755
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
756
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
757
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
758
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
759
+ },
760
+ request: { body },
761
+ response: {
762
+ ...getResponseMetadata(response),
763
+ headers: responseHeaders,
764
+ body: rawResponse
765
+ },
766
+ warnings,
767
+ providerMetadata
768
+ };
769
+ }
770
+ async doStream(options) {
771
+ const { args, warnings } = await this.getArgs(options);
772
+ const body = {
773
+ ...args,
774
+ stream: true,
775
+ stream_options: {
776
+ include_usage: true
777
+ }
778
+ };
779
+ const { responseHeaders, value: response } = await postJsonToApi({
780
+ url: this.config.url({
781
+ path: "/chat/completions",
782
+ modelId: this.modelId
783
+ }),
784
+ headers: combineHeaders(this.config.headers(), options.headers),
785
+ body,
786
+ failedResponseHandler: openaiFailedResponseHandler,
787
+ successfulResponseHandler: createEventSourceResponseHandler(
788
+ openaiChatChunkSchema
789
+ ),
790
+ abortSignal: options.abortSignal,
791
+ fetch: this.config.fetch
792
+ });
793
+ const toolCalls = [];
794
+ let finishReason = "unknown";
795
+ const usage = {
796
+ inputTokens: void 0,
797
+ outputTokens: void 0,
798
+ totalTokens: void 0
799
+ };
800
+ let isFirstChunk = true;
801
+ let isActiveText = false;
802
+ const providerMetadata = { openai: {} };
803
+ return {
804
+ stream: response.pipeThrough(
805
+ new TransformStream({
806
+ start(controller) {
807
+ controller.enqueue({ type: "stream-start", warnings });
808
+ },
809
+ transform(chunk, controller) {
810
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
811
+ if (options.includeRawChunks) {
812
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
813
+ }
814
+ if (!chunk.success) {
815
+ finishReason = "error";
816
+ controller.enqueue({ type: "error", error: chunk.error });
817
+ return;
818
+ }
819
+ const value = chunk.value;
820
+ if ("error" in value) {
821
+ finishReason = "error";
822
+ controller.enqueue({ type: "error", error: value.error });
823
+ return;
824
+ }
825
+ if (isFirstChunk) {
826
+ isFirstChunk = false;
827
+ controller.enqueue({
828
+ type: "response-metadata",
829
+ ...getResponseMetadata(value)
830
+ });
831
+ }
832
+ if (value.usage != null) {
833
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
834
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
835
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
836
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
837
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
838
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
839
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
840
+ }
841
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
842
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
843
+ }
844
+ }
845
+ const choice = value.choices[0];
846
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
847
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
848
+ }
849
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
850
+ providerMetadata.openai.logprobs = choice.logprobs.content;
851
+ }
852
+ if ((choice == null ? void 0 : choice.delta) == null) {
853
+ return;
854
+ }
855
+ const delta = choice.delta;
856
+ if (delta.content != null) {
857
+ if (!isActiveText) {
858
+ controller.enqueue({ type: "text-start", id: "0" });
859
+ isActiveText = true;
860
+ }
861
+ controller.enqueue({
862
+ type: "text-delta",
863
+ id: "0",
864
+ delta: delta.content
865
+ });
866
+ }
867
+ if (delta.tool_calls != null) {
868
+ for (const toolCallDelta of delta.tool_calls) {
869
+ const index = toolCallDelta.index;
870
+ if (toolCalls[index] == null) {
871
+ if (toolCallDelta.type !== "function") {
872
+ throw new InvalidResponseDataError({
873
+ data: toolCallDelta,
874
+ message: `Expected 'function' type.`
875
+ });
876
+ }
877
+ if (toolCallDelta.id == null) {
878
+ throw new InvalidResponseDataError({
879
+ data: toolCallDelta,
880
+ message: `Expected 'id' to be a string.`
881
+ });
882
+ }
883
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
884
+ throw new InvalidResponseDataError({
885
+ data: toolCallDelta,
886
+ message: `Expected 'function.name' to be a string.`
887
+ });
888
+ }
889
+ controller.enqueue({
890
+ type: "tool-input-start",
891
+ id: toolCallDelta.id,
892
+ toolName: toolCallDelta.function.name
893
+ });
894
+ toolCalls[index] = {
895
+ id: toolCallDelta.id,
896
+ type: "function",
897
+ function: {
898
+ name: toolCallDelta.function.name,
899
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
900
+ },
901
+ hasFinished: false
902
+ };
903
+ const toolCall2 = toolCalls[index];
904
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
905
+ if (toolCall2.function.arguments.length > 0) {
906
+ controller.enqueue({
907
+ type: "tool-input-delta",
908
+ id: toolCall2.id,
909
+ delta: toolCall2.function.arguments
910
+ });
911
+ }
912
+ if (isParsableJson(toolCall2.function.arguments)) {
913
+ controller.enqueue({
914
+ type: "tool-input-end",
915
+ id: toolCall2.id
916
+ });
917
+ controller.enqueue({
918
+ type: "tool-call",
919
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
920
+ toolName: toolCall2.function.name,
921
+ input: toolCall2.function.arguments
922
+ });
923
+ toolCall2.hasFinished = true;
924
+ }
925
+ }
926
+ continue;
927
+ }
928
+ const toolCall = toolCalls[index];
929
+ if (toolCall.hasFinished) {
930
+ continue;
931
+ }
932
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
933
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
934
+ }
935
+ controller.enqueue({
936
+ type: "tool-input-delta",
937
+ id: toolCall.id,
938
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
939
+ });
940
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
941
+ controller.enqueue({
942
+ type: "tool-input-end",
943
+ id: toolCall.id
944
+ });
945
+ controller.enqueue({
946
+ type: "tool-call",
947
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
948
+ toolName: toolCall.function.name,
949
+ input: toolCall.function.arguments
950
+ });
951
+ toolCall.hasFinished = true;
952
+ }
953
+ }
954
+ }
955
+ if (delta.annotations != null) {
956
+ for (const annotation of delta.annotations) {
957
+ controller.enqueue({
958
+ type: "source",
959
+ sourceType: "url",
960
+ id: generateId(),
961
+ url: annotation.url,
962
+ title: annotation.title
963
+ });
964
+ }
965
+ }
966
+ },
967
+ flush(controller) {
968
+ if (isActiveText) {
969
+ controller.enqueue({ type: "text-end", id: "0" });
970
+ }
971
+ controller.enqueue({
972
+ type: "finish",
973
+ finishReason,
974
+ usage,
975
+ ...providerMetadata != null ? { providerMetadata } : {}
976
+ });
977
+ }
978
+ })
979
+ ),
980
+ request: { body },
981
+ response: { headers: responseHeaders }
982
+ };
983
+ }
984
+ };
985
+ var openaiTokenUsageSchema = z5.object({
986
+ prompt_tokens: z5.number().nullish(),
987
+ completion_tokens: z5.number().nullish(),
988
+ total_tokens: z5.number().nullish(),
989
+ prompt_tokens_details: z5.object({
990
+ cached_tokens: z5.number().nullish()
991
+ }).nullish(),
992
+ completion_tokens_details: z5.object({
993
+ reasoning_tokens: z5.number().nullish(),
994
+ accepted_prediction_tokens: z5.number().nullish(),
995
+ rejected_prediction_tokens: z5.number().nullish()
996
+ }).nullish()
997
+ }).nullish();
998
+ var openaiChatResponseSchema = z5.object({
999
+ id: z5.string().nullish(),
1000
+ created: z5.number().nullish(),
1001
+ model: z5.string().nullish(),
1002
+ choices: z5.array(
1003
+ z5.object({
1004
+ message: z5.object({
1005
+ role: z5.literal("assistant").nullish(),
1006
+ content: z5.string().nullish(),
1007
+ tool_calls: z5.array(
1008
+ z5.object({
1009
+ id: z5.string().nullish(),
1010
+ type: z5.literal("function"),
1011
+ function: z5.object({
1012
+ name: z5.string(),
1013
+ arguments: z5.string()
1014
+ })
1015
+ })
1016
+ ).nullish(),
1017
+ annotations: z5.array(
1018
+ z5.object({
1019
+ type: z5.literal("url_citation"),
1020
+ start_index: z5.number(),
1021
+ end_index: z5.number(),
1022
+ url: z5.string(),
1023
+ title: z5.string()
1024
+ })
1025
+ ).nullish()
1026
+ }),
1027
+ index: z5.number(),
1028
+ logprobs: z5.object({
1029
+ content: z5.array(
1030
+ z5.object({
1031
+ token: z5.string(),
1032
+ logprob: z5.number(),
1033
+ top_logprobs: z5.array(
1034
+ z5.object({
1035
+ token: z5.string(),
1036
+ logprob: z5.number()
1037
+ })
1038
+ )
1039
+ })
1040
+ ).nullish()
1041
+ }).nullish(),
1042
+ finish_reason: z5.string().nullish()
1043
+ })
1044
+ ),
1045
+ usage: openaiTokenUsageSchema
1046
+ });
1047
+ var openaiChatChunkSchema = z5.union([
1048
+ z5.object({
1049
+ id: z5.string().nullish(),
1050
+ created: z5.number().nullish(),
1051
+ model: z5.string().nullish(),
1052
+ choices: z5.array(
1053
+ z5.object({
1054
+ delta: z5.object({
1055
+ role: z5.enum(["assistant"]).nullish(),
1056
+ content: z5.string().nullish(),
1057
+ tool_calls: z5.array(
1058
+ z5.object({
1059
+ index: z5.number(),
1060
+ id: z5.string().nullish(),
1061
+ type: z5.literal("function").nullish(),
1062
+ function: z5.object({
1063
+ name: z5.string().nullish(),
1064
+ arguments: z5.string().nullish()
1065
+ })
1066
+ })
1067
+ ).nullish(),
1068
+ annotations: z5.array(
1069
+ z5.object({
1070
+ type: z5.literal("url_citation"),
1071
+ start_index: z5.number(),
1072
+ end_index: z5.number(),
1073
+ url: z5.string(),
1074
+ title: z5.string()
1075
+ })
1076
+ ).nullish()
1077
+ }).nullish(),
1078
+ logprobs: z5.object({
1079
+ content: z5.array(
1080
+ z5.object({
1081
+ token: z5.string(),
1082
+ logprob: z5.number(),
1083
+ top_logprobs: z5.array(
1084
+ z5.object({
1085
+ token: z5.string(),
1086
+ logprob: z5.number()
1087
+ })
1088
+ )
1089
+ })
1090
+ ).nullish()
1091
+ }).nullish(),
1092
+ finish_reason: z5.string().nullish(),
1093
+ index: z5.number()
1094
+ })
1095
+ ),
1096
+ usage: openaiTokenUsageSchema
1097
+ }),
1098
+ openaiErrorDataSchema
1099
+ ]);
1100
+ function isReasoningModel(modelId) {
1101
+ return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1102
+ }
1103
+ function supportsFlexProcessing(modelId) {
1104
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1105
+ }
1106
+ function supportsPriorityProcessing(modelId) {
1107
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1108
+ }
1109
+ function getSystemMessageMode(modelId) {
1110
+ var _a, _b;
1111
+ if (!isReasoningModel(modelId)) {
1112
+ return "system";
1113
+ }
1114
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1115
+ }
1116
+ var reasoningModels = {
1117
+ "o1-mini": {
1118
+ systemMessageMode: "remove"
1119
+ },
1120
+ "o1-mini-2024-09-12": {
1121
+ systemMessageMode: "remove"
1122
+ },
1123
+ "o1-preview": {
1124
+ systemMessageMode: "remove"
1125
+ },
1126
+ "o1-preview-2024-09-12": {
1127
+ systemMessageMode: "remove"
1128
+ },
1129
+ o3: {
1130
+ systemMessageMode: "developer"
1131
+ },
1132
+ "o3-2025-04-16": {
1133
+ systemMessageMode: "developer"
1134
+ },
1135
+ "o3-mini": {
1136
+ systemMessageMode: "developer"
1137
+ },
1138
+ "o3-mini-2025-01-31": {
1139
+ systemMessageMode: "developer"
1140
+ },
1141
+ "o4-mini": {
1142
+ systemMessageMode: "developer"
1143
+ },
1144
+ "o4-mini-2025-04-16": {
1145
+ systemMessageMode: "developer"
1146
+ }
1147
+ };
1148
+
1149
+ // src/openai-completion-language-model.ts
1150
+ import {
1151
+ combineHeaders as combineHeaders2,
1152
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
1153
+ createJsonResponseHandler as createJsonResponseHandler2,
1154
+ parseProviderOptions as parseProviderOptions2,
1155
+ postJsonToApi as postJsonToApi2
1156
+ } from "@ai-sdk/provider-utils";
1157
+ import { z as z7 } from "zod/v4";
1158
+
1159
+ // src/convert-to-openai-completion-prompt.ts
1160
+ import {
1161
+ InvalidPromptError,
1162
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1163
+ } from "@ai-sdk/provider";
1164
+ function convertToOpenAICompletionPrompt({
1165
+ prompt,
1166
+ user = "user",
1167
+ assistant = "assistant"
1168
+ }) {
1169
+ let text = "";
1170
+ if (prompt[0].role === "system") {
1171
+ text += `${prompt[0].content}
1172
+
1173
+ `;
1174
+ prompt = prompt.slice(1);
1175
+ }
1176
+ for (const { role, content } of prompt) {
1177
+ switch (role) {
1178
+ case "system": {
1179
+ throw new InvalidPromptError({
1180
+ message: "Unexpected system message in prompt: ${content}",
1181
+ prompt
1182
+ });
1183
+ }
1184
+ case "user": {
1185
+ const userMessage = content.map((part) => {
1186
+ switch (part.type) {
1187
+ case "text": {
1188
+ return part.text;
1189
+ }
1190
+ }
1191
+ }).filter(Boolean).join("");
1192
+ text += `${user}:
1193
+ ${userMessage}
1194
+
1195
+ `;
1196
+ break;
1197
+ }
1198
+ case "assistant": {
1199
+ const assistantMessage = content.map((part) => {
1200
+ switch (part.type) {
1201
+ case "text": {
1202
+ return part.text;
1203
+ }
1204
+ case "tool-call": {
1205
+ throw new UnsupportedFunctionalityError3({
1206
+ functionality: "tool-call messages"
1207
+ });
1208
+ }
1209
+ }
1210
+ }).join("");
1211
+ text += `${assistant}:
1212
+ ${assistantMessage}
1213
+
1214
+ `;
1215
+ break;
1216
+ }
1217
+ case "tool": {
1218
+ throw new UnsupportedFunctionalityError3({
1219
+ functionality: "tool messages"
1220
+ });
1221
+ }
1222
+ default: {
1223
+ const _exhaustiveCheck = role;
1224
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1225
+ }
1226
+ }
1227
+ }
1228
+ text += `${assistant}:
1229
+ `;
1230
+ return {
1231
+ prompt: text,
1232
+ stopSequences: [`
1233
+ ${user}:`]
1234
+ };
1235
+ }
1236
+
1237
+ // src/openai-completion-options.ts
1238
+ import { z as z6 } from "zod/v4";
1239
+ var openaiCompletionProviderOptions = z6.object({
1240
+ /**
1241
+ Echo back the prompt in addition to the completion.
1242
+ */
1243
+ echo: z6.boolean().optional(),
1244
+ /**
1245
+ Modify the likelihood of specified tokens appearing in the completion.
1246
+
1247
+ Accepts a JSON object that maps tokens (specified by their token ID in
1248
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1249
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1250
+ the bias is added to the logits generated by the model prior to sampling.
1251
+ The exact effect will vary per model, but values between -1 and 1 should
1252
+ decrease or increase likelihood of selection; values like -100 or 100
1253
+ should result in a ban or exclusive selection of the relevant token.
1254
+
1255
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1256
+ token from being generated.
1257
+ */
1258
+ logitBias: z6.record(z6.string(), z6.number()).optional(),
1259
+ /**
1260
+ The suffix that comes after a completion of inserted text.
1261
+ */
1262
+ suffix: z6.string().optional(),
1263
+ /**
1264
+ A unique identifier representing your end-user, which can help OpenAI to
1265
+ monitor and detect abuse. Learn more.
1266
+ */
1267
+ user: z6.string().optional(),
1268
+ /**
1269
+ Return the log probabilities of the tokens. Including logprobs will increase
1270
+ the response size and can slow down response times. However, it can
1271
+ be useful to better understand how the model is behaving.
1272
+ Setting to true will return the log probabilities of the tokens that
1273
+ were generated.
1274
+ Setting to a number will return the log probabilities of the top n
1275
+ tokens that were generated.
1276
+ */
1277
+ logprobs: z6.union([z6.boolean(), z6.number()]).optional()
1278
+ });
1279
+
1280
+ // src/openai-completion-language-model.ts
1281
+ var OpenAICompletionLanguageModel = class {
1282
+ constructor(modelId, config) {
1283
+ this.specificationVersion = "v2";
1284
+ this.supportedUrls = {
1285
+ // No URLs are supported for completion models.
1286
+ };
1287
+ this.modelId = modelId;
1288
+ this.config = config;
1289
+ }
1290
+ get providerOptionsName() {
1291
+ return this.config.provider.split(".")[0].trim();
1292
+ }
1293
+ get provider() {
1294
+ return this.config.provider;
1295
+ }
1296
+ async getArgs({
1297
+ prompt,
1298
+ maxOutputTokens,
1299
+ temperature,
1300
+ topP,
1301
+ topK,
1302
+ frequencyPenalty,
1303
+ presencePenalty,
1304
+ stopSequences: userStopSequences,
1305
+ responseFormat,
1306
+ tools,
1307
+ toolChoice,
1308
+ seed,
1309
+ providerOptions
1310
+ }) {
1311
+ const warnings = [];
1312
+ const openaiOptions = {
1313
+ ...await parseProviderOptions2({
1314
+ provider: "openai",
1315
+ providerOptions,
1316
+ schema: openaiCompletionProviderOptions
1317
+ }),
1318
+ ...await parseProviderOptions2({
1319
+ provider: this.providerOptionsName,
1320
+ providerOptions,
1321
+ schema: openaiCompletionProviderOptions
1322
+ })
1323
+ };
1324
+ if (topK != null) {
1325
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1326
+ }
1327
+ if (tools == null ? void 0 : tools.length) {
1328
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1329
+ }
1330
+ if (toolChoice != null) {
1331
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1332
+ }
1333
+ if (responseFormat != null && responseFormat.type !== "text") {
1334
+ warnings.push({
1335
+ type: "unsupported-setting",
1336
+ setting: "responseFormat",
1337
+ details: "JSON response format is not supported."
1338
+ });
1339
+ }
1340
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1341
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1342
+ return {
1343
+ args: {
1344
+ // model id:
1345
+ model: this.modelId,
1346
+ // model specific settings:
1347
+ echo: openaiOptions.echo,
1348
+ logit_bias: openaiOptions.logitBias,
1349
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1350
+ suffix: openaiOptions.suffix,
1351
+ user: openaiOptions.user,
1352
+ // standardized settings:
1353
+ max_tokens: maxOutputTokens,
1354
+ temperature,
1355
+ top_p: topP,
1356
+ frequency_penalty: frequencyPenalty,
1357
+ presence_penalty: presencePenalty,
1358
+ seed,
1359
+ // prompt:
1360
+ prompt: completionPrompt,
1361
+ // stop sequences:
1362
+ stop: stop.length > 0 ? stop : void 0
1363
+ },
1364
+ warnings
1365
+ };
1366
+ }
1367
+ async doGenerate(options) {
1368
+ var _a, _b, _c;
1369
+ const { args, warnings } = await this.getArgs(options);
1370
+ const {
1371
+ responseHeaders,
1372
+ value: response,
1373
+ rawValue: rawResponse
1374
+ } = await postJsonToApi2({
1375
+ url: this.config.url({
1376
+ path: "/completions",
1377
+ modelId: this.modelId
1378
+ }),
1379
+ headers: combineHeaders2(this.config.headers(), options.headers),
1380
+ body: args,
1381
+ failedResponseHandler: openaiFailedResponseHandler,
1382
+ successfulResponseHandler: createJsonResponseHandler2(
1383
+ openaiCompletionResponseSchema
1384
+ ),
1385
+ abortSignal: options.abortSignal,
1386
+ fetch: this.config.fetch
1387
+ });
1388
+ const choice = response.choices[0];
1389
+ const providerMetadata = { openai: {} };
1390
+ if (choice.logprobs != null) {
1391
+ providerMetadata.openai.logprobs = choice.logprobs;
1392
+ }
1393
+ return {
1394
+ content: [{ type: "text", text: choice.text }],
1395
+ usage: {
1396
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1397
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1398
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1399
+ },
1400
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1401
+ request: { body: args },
1402
+ response: {
1403
+ ...getResponseMetadata(response),
1404
+ headers: responseHeaders,
1405
+ body: rawResponse
1406
+ },
1407
+ providerMetadata,
1408
+ warnings
1409
+ };
1410
+ }
1411
+ async doStream(options) {
1412
+ const { args, warnings } = await this.getArgs(options);
1413
+ const body = {
1414
+ ...args,
1415
+ stream: true,
1416
+ stream_options: {
1417
+ include_usage: true
1418
+ }
1419
+ };
1420
+ const { responseHeaders, value: response } = await postJsonToApi2({
1421
+ url: this.config.url({
1422
+ path: "/completions",
1423
+ modelId: this.modelId
1424
+ }),
1425
+ headers: combineHeaders2(this.config.headers(), options.headers),
1426
+ body,
1427
+ failedResponseHandler: openaiFailedResponseHandler,
1428
+ successfulResponseHandler: createEventSourceResponseHandler2(
1429
+ openaiCompletionChunkSchema
1430
+ ),
1431
+ abortSignal: options.abortSignal,
1432
+ fetch: this.config.fetch
1433
+ });
1434
+ let finishReason = "unknown";
1435
+ const providerMetadata = { openai: {} };
1436
+ const usage = {
1437
+ inputTokens: void 0,
1438
+ outputTokens: void 0,
1439
+ totalTokens: void 0
1440
+ };
1441
+ let isFirstChunk = true;
1442
+ return {
1443
+ stream: response.pipeThrough(
1444
+ new TransformStream({
1445
+ start(controller) {
1446
+ controller.enqueue({ type: "stream-start", warnings });
1447
+ },
1448
+ transform(chunk, controller) {
1449
+ if (options.includeRawChunks) {
1450
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1451
+ }
1452
+ if (!chunk.success) {
1453
+ finishReason = "error";
1454
+ controller.enqueue({ type: "error", error: chunk.error });
1455
+ return;
1456
+ }
1457
+ const value = chunk.value;
1458
+ if ("error" in value) {
1459
+ finishReason = "error";
1460
+ controller.enqueue({ type: "error", error: value.error });
1461
+ return;
1462
+ }
1463
+ if (isFirstChunk) {
1464
+ isFirstChunk = false;
1465
+ controller.enqueue({
1466
+ type: "response-metadata",
1467
+ ...getResponseMetadata(value)
1468
+ });
1469
+ controller.enqueue({ type: "text-start", id: "0" });
1470
+ }
1471
+ if (value.usage != null) {
1472
+ usage.inputTokens = value.usage.prompt_tokens;
1473
+ usage.outputTokens = value.usage.completion_tokens;
1474
+ usage.totalTokens = value.usage.total_tokens;
1475
+ }
1476
+ const choice = value.choices[0];
1477
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1478
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1479
+ }
1480
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1481
+ providerMetadata.openai.logprobs = choice.logprobs;
1482
+ }
1483
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1484
+ controller.enqueue({
1485
+ type: "text-delta",
1486
+ id: "0",
1487
+ delta: choice.text
1488
+ });
1489
+ }
1490
+ },
1491
+ flush(controller) {
1492
+ if (!isFirstChunk) {
1493
+ controller.enqueue({ type: "text-end", id: "0" });
1494
+ }
1495
+ controller.enqueue({
1496
+ type: "finish",
1497
+ finishReason,
1498
+ providerMetadata,
1499
+ usage
1500
+ });
1501
+ }
1502
+ })
1503
+ ),
1504
+ request: { body },
1505
+ response: { headers: responseHeaders }
1506
+ };
1507
+ }
1508
+ };
1509
+ var usageSchema = z7.object({
1510
+ prompt_tokens: z7.number(),
1511
+ completion_tokens: z7.number(),
1512
+ total_tokens: z7.number()
1513
+ });
1514
+ var openaiCompletionResponseSchema = z7.object({
1515
+ id: z7.string().nullish(),
1516
+ created: z7.number().nullish(),
1517
+ model: z7.string().nullish(),
1518
+ choices: z7.array(
1519
+ z7.object({
1520
+ text: z7.string(),
1521
+ finish_reason: z7.string(),
1522
+ logprobs: z7.object({
1523
+ tokens: z7.array(z7.string()),
1524
+ token_logprobs: z7.array(z7.number()),
1525
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1526
+ }).nullish()
1527
+ })
1528
+ ),
1529
+ usage: usageSchema.nullish()
1530
+ });
1531
+ var openaiCompletionChunkSchema = z7.union([
1532
+ z7.object({
1533
+ id: z7.string().nullish(),
1534
+ created: z7.number().nullish(),
1535
+ model: z7.string().nullish(),
1536
+ choices: z7.array(
1537
+ z7.object({
1538
+ text: z7.string(),
1539
+ finish_reason: z7.string().nullish(),
1540
+ index: z7.number(),
1541
+ logprobs: z7.object({
1542
+ tokens: z7.array(z7.string()),
1543
+ token_logprobs: z7.array(z7.number()),
1544
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1545
+ }).nullish()
1546
+ })
1547
+ ),
1548
+ usage: usageSchema.nullish()
1549
+ }),
1550
+ openaiErrorDataSchema
1551
+ ]);
1552
+
1553
+ // src/openai-embedding-model.ts
1554
+ import {
1555
+ TooManyEmbeddingValuesForCallError
1556
+ } from "@ai-sdk/provider";
1557
+ import {
1558
+ combineHeaders as combineHeaders3,
1559
+ createJsonResponseHandler as createJsonResponseHandler3,
1560
+ parseProviderOptions as parseProviderOptions3,
1561
+ postJsonToApi as postJsonToApi3
1562
+ } from "@ai-sdk/provider-utils";
1563
+ import { z as z9 } from "zod/v4";
1564
+
1565
+ // src/openai-embedding-options.ts
1566
+ import { z as z8 } from "zod/v4";
1567
+ var openaiEmbeddingProviderOptions = z8.object({
1568
+ /**
1569
+ The number of dimensions the resulting output embeddings should have.
1570
+ Only supported in text-embedding-3 and later models.
1571
+ */
1572
+ dimensions: z8.number().optional(),
1573
+ /**
1574
+ A unique identifier representing your end-user, which can help OpenAI to
1575
+ monitor and detect abuse. Learn more.
1576
+ */
1577
+ user: z8.string().optional()
1578
+ });
1579
+
1580
+ // src/openai-embedding-model.ts
1581
+ var OpenAIEmbeddingModel = class {
1582
+ constructor(modelId, config) {
1583
+ this.specificationVersion = "v2";
1584
+ this.maxEmbeddingsPerCall = 2048;
1585
+ this.supportsParallelCalls = true;
1586
+ this.modelId = modelId;
1587
+ this.config = config;
1588
+ }
1589
+ get provider() {
1590
+ return this.config.provider;
1591
+ }
1592
+ async doEmbed({
1593
+ values,
1594
+ headers,
1595
+ abortSignal,
1596
+ providerOptions
1597
+ }) {
1598
+ var _a;
1599
+ if (values.length > this.maxEmbeddingsPerCall) {
1600
+ throw new TooManyEmbeddingValuesForCallError({
1601
+ provider: this.provider,
1602
+ modelId: this.modelId,
1603
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1604
+ values
1605
+ });
1606
+ }
1607
+ const openaiOptions = (_a = await parseProviderOptions3({
1608
+ provider: "openai",
1609
+ providerOptions,
1610
+ schema: openaiEmbeddingProviderOptions
1611
+ })) != null ? _a : {};
1612
+ const {
1613
+ responseHeaders,
1614
+ value: response,
1615
+ rawValue
1616
+ } = await postJsonToApi3({
1617
+ url: this.config.url({
1618
+ path: "/embeddings",
1619
+ modelId: this.modelId
1620
+ }),
1621
+ headers: combineHeaders3(this.config.headers(), headers),
1622
+ body: {
1623
+ model: this.modelId,
1624
+ input: values,
1625
+ encoding_format: "float",
1626
+ dimensions: openaiOptions.dimensions,
1627
+ user: openaiOptions.user
1628
+ },
1629
+ failedResponseHandler: openaiFailedResponseHandler,
1630
+ successfulResponseHandler: createJsonResponseHandler3(
1631
+ openaiTextEmbeddingResponseSchema
1632
+ ),
1633
+ abortSignal,
1634
+ fetch: this.config.fetch
1635
+ });
1636
+ return {
1637
+ embeddings: response.data.map((item) => item.embedding),
1638
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1639
+ response: { headers: responseHeaders, body: rawValue }
1640
+ };
1641
+ }
1642
+ };
1643
+ var openaiTextEmbeddingResponseSchema = z9.object({
1644
+ data: z9.array(z9.object({ embedding: z9.array(z9.number()) })),
1645
+ usage: z9.object({ prompt_tokens: z9.number() }).nullish()
1646
+ });
1647
+
1648
+ // src/openai-image-model.ts
1649
+ import {
1650
+ combineHeaders as combineHeaders4,
1651
+ createJsonResponseHandler as createJsonResponseHandler4,
1652
+ postJsonToApi as postJsonToApi4
1653
+ } from "@ai-sdk/provider-utils";
1654
+ import { z as z10 } from "zod/v4";
1655
+
1656
+ // src/openai-image-settings.ts
1657
+ var modelMaxImagesPerCall = {
1658
+ "dall-e-3": 1,
1659
+ "dall-e-2": 10,
1660
+ "gpt-image-1": 10
1661
+ };
1662
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1663
+
1664
+ // src/openai-image-model.ts
1665
+ var OpenAIImageModel = class {
1666
+ constructor(modelId, config) {
1667
+ this.modelId = modelId;
1668
+ this.config = config;
1669
+ this.specificationVersion = "v2";
1670
+ }
1671
+ get maxImagesPerCall() {
1672
+ var _a;
1673
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1674
+ }
1675
+ get provider() {
1676
+ return this.config.provider;
1677
+ }
1678
+ async doGenerate({
1679
+ prompt,
1680
+ n,
1681
+ size,
1682
+ aspectRatio,
1683
+ seed,
1684
+ providerOptions,
1685
+ headers,
1686
+ abortSignal
1687
+ }) {
1688
+ var _a, _b, _c, _d;
1689
+ const warnings = [];
1690
+ if (aspectRatio != null) {
1691
+ warnings.push({
1692
+ type: "unsupported-setting",
1693
+ setting: "aspectRatio",
1694
+ details: "This model does not support aspect ratio. Use `size` instead."
1695
+ });
1696
+ }
1697
+ if (seed != null) {
1698
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1699
+ }
1700
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1701
+ const { value: response, responseHeaders } = await postJsonToApi4({
1702
+ url: this.config.url({
1703
+ path: "/images/generations",
1704
+ modelId: this.modelId
1705
+ }),
1706
+ headers: combineHeaders4(this.config.headers(), headers),
1707
+ body: {
1708
+ model: this.modelId,
1709
+ prompt,
1710
+ n,
1711
+ size,
1712
+ ...(_d = providerOptions.openai) != null ? _d : {},
1713
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1714
+ },
1715
+ failedResponseHandler: openaiFailedResponseHandler,
1716
+ successfulResponseHandler: createJsonResponseHandler4(
1717
+ openaiImageResponseSchema
1718
+ ),
1719
+ abortSignal,
1720
+ fetch: this.config.fetch
1721
+ });
1722
+ return {
1723
+ images: response.data.map((item) => item.b64_json),
1724
+ warnings,
1725
+ response: {
1726
+ timestamp: currentDate,
1727
+ modelId: this.modelId,
1728
+ headers: responseHeaders
1729
+ },
1730
+ providerMetadata: {
1731
+ openai: {
1732
+ images: response.data.map(
1733
+ (item) => item.revised_prompt ? {
1734
+ revisedPrompt: item.revised_prompt
1735
+ } : null
1736
+ )
1737
+ }
1738
+ }
1739
+ };
1740
+ }
1741
+ };
1742
+ var openaiImageResponseSchema = z10.object({
1743
+ data: z10.array(
1744
+ z10.object({ b64_json: z10.string(), revised_prompt: z10.string().optional() })
1745
+ )
1746
+ });
1747
+
1748
+ // src/openai-transcription-model.ts
1749
+ import {
1750
+ combineHeaders as combineHeaders5,
1751
+ convertBase64ToUint8Array,
1752
+ createJsonResponseHandler as createJsonResponseHandler5,
1753
+ parseProviderOptions as parseProviderOptions4,
1754
+ postFormDataToApi
1755
+ } from "@ai-sdk/provider-utils";
1756
+ import { z as z12 } from "zod/v4";
1757
+
1758
+ // src/openai-transcription-options.ts
1759
+ import { z as z11 } from "zod/v4";
1760
+ var openAITranscriptionProviderOptions = z11.object({
1761
+ /**
1762
+ * Additional information to include in the transcription response.
1763
+ */
1764
+ include: z11.array(z11.string()).optional(),
1765
+ /**
1766
+ * The language of the input audio in ISO-639-1 format.
1767
+ */
1768
+ language: z11.string().optional(),
1769
+ /**
1770
+ * An optional text to guide the model's style or continue a previous audio segment.
1771
+ */
1772
+ prompt: z11.string().optional(),
1773
+ /**
1774
+ * The sampling temperature, between 0 and 1.
1775
+ * @default 0
1776
+ */
1777
+ temperature: z11.number().min(0).max(1).default(0).optional(),
1778
+ /**
1779
+ * The timestamp granularities to populate for this transcription.
1780
+ * @default ['segment']
1781
+ */
1782
+ timestampGranularities: z11.array(z11.enum(["word", "segment"])).default(["segment"]).optional()
1783
+ });
1784
+
1785
+ // src/openai-transcription-model.ts
1786
+ var languageMap = {
1787
+ afrikaans: "af",
1788
+ arabic: "ar",
1789
+ armenian: "hy",
1790
+ azerbaijani: "az",
1791
+ belarusian: "be",
1792
+ bosnian: "bs",
1793
+ bulgarian: "bg",
1794
+ catalan: "ca",
1795
+ chinese: "zh",
1796
+ croatian: "hr",
1797
+ czech: "cs",
1798
+ danish: "da",
1799
+ dutch: "nl",
1800
+ english: "en",
1801
+ estonian: "et",
1802
+ finnish: "fi",
1803
+ french: "fr",
1804
+ galician: "gl",
1805
+ german: "de",
1806
+ greek: "el",
1807
+ hebrew: "he",
1808
+ hindi: "hi",
1809
+ hungarian: "hu",
1810
+ icelandic: "is",
1811
+ indonesian: "id",
1812
+ italian: "it",
1813
+ japanese: "ja",
1814
+ kannada: "kn",
1815
+ kazakh: "kk",
1816
+ korean: "ko",
1817
+ latvian: "lv",
1818
+ lithuanian: "lt",
1819
+ macedonian: "mk",
1820
+ malay: "ms",
1821
+ marathi: "mr",
1822
+ maori: "mi",
1823
+ nepali: "ne",
1824
+ norwegian: "no",
1825
+ persian: "fa",
1826
+ polish: "pl",
1827
+ portuguese: "pt",
1828
+ romanian: "ro",
1829
+ russian: "ru",
1830
+ serbian: "sr",
1831
+ slovak: "sk",
1832
+ slovenian: "sl",
1833
+ spanish: "es",
1834
+ swahili: "sw",
1835
+ swedish: "sv",
1836
+ tagalog: "tl",
1837
+ tamil: "ta",
1838
+ thai: "th",
1839
+ turkish: "tr",
1840
+ ukrainian: "uk",
1841
+ urdu: "ur",
1842
+ vietnamese: "vi",
1843
+ welsh: "cy"
1844
+ };
1845
+ var OpenAITranscriptionModel = class {
1846
+ constructor(modelId, config) {
1847
+ this.modelId = modelId;
1848
+ this.config = config;
1849
+ this.specificationVersion = "v2";
1850
+ }
1851
+ get provider() {
1852
+ return this.config.provider;
1853
+ }
1854
+ async getArgs({
1855
+ audio,
1856
+ mediaType,
1857
+ providerOptions
1858
+ }) {
1859
+ const warnings = [];
1860
+ const openAIOptions = await parseProviderOptions4({
1861
+ provider: "openai",
1862
+ providerOptions,
1863
+ schema: openAITranscriptionProviderOptions
1864
+ });
1865
+ const formData = new FormData();
1866
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1867
+ formData.append("model", this.modelId);
1868
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1869
+ if (openAIOptions) {
1870
+ const transcriptionModelOptions = {
1871
+ include: openAIOptions.include,
1872
+ language: openAIOptions.language,
1873
+ prompt: openAIOptions.prompt,
1874
+ temperature: openAIOptions.temperature,
1875
+ timestamp_granularities: openAIOptions.timestampGranularities
1876
+ };
1877
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1878
+ if (value != null) {
1879
+ formData.append(key, String(value));
1880
+ }
1881
+ }
1882
+ }
1883
+ return {
1884
+ formData,
1885
+ warnings
1886
+ };
1887
+ }
1888
+ async doGenerate(options) {
1889
+ var _a, _b, _c, _d, _e, _f;
1890
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1891
+ const { formData, warnings } = await this.getArgs(options);
1892
+ const {
1893
+ value: response,
1894
+ responseHeaders,
1895
+ rawValue: rawResponse
1896
+ } = await postFormDataToApi({
1897
+ url: this.config.url({
1898
+ path: "/audio/transcriptions",
1899
+ modelId: this.modelId
1900
+ }),
1901
+ headers: combineHeaders5(this.config.headers(), options.headers),
1902
+ formData,
1903
+ failedResponseHandler: openaiFailedResponseHandler,
1904
+ successfulResponseHandler: createJsonResponseHandler5(
1905
+ openaiTranscriptionResponseSchema
1906
+ ),
1907
+ abortSignal: options.abortSignal,
1908
+ fetch: this.config.fetch
1909
+ });
1910
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1911
+ return {
1912
+ text: response.text,
1913
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1914
+ text: word.word,
1915
+ startSecond: word.start,
1916
+ endSecond: word.end
1917
+ }))) != null ? _e : [],
1918
+ language,
1919
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1920
+ warnings,
1921
+ response: {
1922
+ timestamp: currentDate,
1923
+ modelId: this.modelId,
1924
+ headers: responseHeaders,
1925
+ body: rawResponse
1926
+ }
1927
+ };
1928
+ }
1929
+ };
1930
+ var openaiTranscriptionResponseSchema = z12.object({
1931
+ text: z12.string(),
1932
+ language: z12.string().nullish(),
1933
+ duration: z12.number().nullish(),
1934
+ words: z12.array(
1935
+ z12.object({
1936
+ word: z12.string(),
1937
+ start: z12.number(),
1938
+ end: z12.number()
1939
+ })
1940
+ ).nullish()
1941
+ });
1942
+
1943
+ // src/openai-speech-model.ts
1944
+ import {
1945
+ combineHeaders as combineHeaders6,
1946
+ createBinaryResponseHandler,
1947
+ parseProviderOptions as parseProviderOptions5,
1948
+ postJsonToApi as postJsonToApi5
1949
+ } from "@ai-sdk/provider-utils";
1950
+ import { z as z13 } from "zod/v4";
1951
+ var OpenAIProviderOptionsSchema = z13.object({
1952
+ instructions: z13.string().nullish(),
1953
+ speed: z13.number().min(0.25).max(4).default(1).nullish()
1954
+ });
1955
+ var OpenAISpeechModel = class {
1956
+ constructor(modelId, config) {
1957
+ this.modelId = modelId;
1958
+ this.config = config;
1959
+ this.specificationVersion = "v2";
1960
+ }
1961
+ get provider() {
1962
+ return this.config.provider;
1963
+ }
1964
+ async getArgs({
1965
+ text,
1966
+ voice = "alloy",
1967
+ outputFormat = "mp3",
1968
+ speed,
1969
+ instructions,
1970
+ language,
1971
+ providerOptions
1972
+ }) {
1973
+ const warnings = [];
1974
+ const openAIOptions = await parseProviderOptions5({
1975
+ provider: "openai",
1976
+ providerOptions,
1977
+ schema: OpenAIProviderOptionsSchema
1978
+ });
1979
+ const requestBody = {
1980
+ model: this.modelId,
1981
+ input: text,
1982
+ voice,
1983
+ response_format: "mp3",
1984
+ speed,
1985
+ instructions
1986
+ };
1987
+ if (outputFormat) {
1988
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1989
+ requestBody.response_format = outputFormat;
1990
+ } else {
1991
+ warnings.push({
1992
+ type: "unsupported-setting",
1993
+ setting: "outputFormat",
1994
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1995
+ });
1996
+ }
1997
+ }
1998
+ if (openAIOptions) {
1999
+ const speechModelOptions = {};
2000
+ for (const key in speechModelOptions) {
2001
+ const value = speechModelOptions[key];
2002
+ if (value !== void 0) {
2003
+ requestBody[key] = value;
2004
+ }
2005
+ }
2006
+ }
2007
+ if (language) {
2008
+ warnings.push({
2009
+ type: "unsupported-setting",
2010
+ setting: "language",
2011
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
2012
+ });
2013
+ }
2014
+ return {
2015
+ requestBody,
2016
+ warnings
2017
+ };
2018
+ }
2019
+ async doGenerate(options) {
2020
+ var _a, _b, _c;
2021
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2022
+ const { requestBody, warnings } = await this.getArgs(options);
2023
+ const {
2024
+ value: audio,
2025
+ responseHeaders,
2026
+ rawValue: rawResponse
2027
+ } = await postJsonToApi5({
2028
+ url: this.config.url({
2029
+ path: "/audio/speech",
2030
+ modelId: this.modelId
2031
+ }),
2032
+ headers: combineHeaders6(this.config.headers(), options.headers),
2033
+ body: requestBody,
2034
+ failedResponseHandler: openaiFailedResponseHandler,
2035
+ successfulResponseHandler: createBinaryResponseHandler(),
2036
+ abortSignal: options.abortSignal,
2037
+ fetch: this.config.fetch
2038
+ });
2039
+ return {
2040
+ audio,
2041
+ warnings,
2042
+ request: {
2043
+ body: JSON.stringify(requestBody)
2044
+ },
2045
+ response: {
2046
+ timestamp: currentDate,
2047
+ modelId: this.modelId,
2048
+ headers: responseHeaders,
2049
+ body: rawResponse
2050
+ }
2051
+ };
2052
+ }
2053
+ };
2054
+
2055
+ // src/responses/openai-responses-language-model.ts
2056
+ import {
2057
+ APICallError
2058
+ } from "@ai-sdk/provider";
2059
+ import {
2060
+ combineHeaders as combineHeaders7,
2061
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
2062
+ createJsonResponseHandler as createJsonResponseHandler6,
2063
+ generateId as generateId2,
2064
+ parseProviderOptions as parseProviderOptions7,
2065
+ postJsonToApi as postJsonToApi6
2066
+ } from "@ai-sdk/provider-utils";
2067
+ import { z as z15 } from "zod/v4";
2068
+
2069
+ // src/responses/convert-to-openai-responses-messages.ts
2070
+ import {
2071
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
2072
+ } from "@ai-sdk/provider";
2073
+ import { parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
2074
+ import { z as z14 } from "zod/v4";
2075
+ import { convertToBase64 as convertToBase642 } from "@ai-sdk/provider-utils";
2076
+ async function convertToOpenAIResponsesMessages({
2077
+ prompt,
2078
+ systemMessageMode
2079
+ }) {
2080
+ var _a, _b, _c, _d, _e, _f;
2081
+ const messages = [];
2082
+ const warnings = [];
2083
+ for (const { role, content } of prompt) {
2084
+ switch (role) {
2085
+ case "system": {
2086
+ switch (systemMessageMode) {
2087
+ case "system": {
2088
+ messages.push({ role: "system", content });
2089
+ break;
2090
+ }
2091
+ case "developer": {
2092
+ messages.push({ role: "developer", content });
2093
+ break;
2094
+ }
2095
+ case "remove": {
2096
+ warnings.push({
2097
+ type: "other",
2098
+ message: "system messages are removed for this model"
2099
+ });
2100
+ break;
2101
+ }
2102
+ default: {
2103
+ const _exhaustiveCheck = systemMessageMode;
2104
+ throw new Error(
2105
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2106
+ );
2107
+ }
2108
+ }
2109
+ break;
2110
+ }
2111
+ case "user": {
2112
+ messages.push({
2113
+ role: "user",
2114
+ content: content.map((part, index) => {
2115
+ var _a2, _b2, _c2;
2116
+ switch (part.type) {
2117
+ case "text": {
2118
+ return { type: "input_text", text: part.text };
2119
+ }
2120
+ case "file": {
2121
+ if (part.mediaType.startsWith("image/")) {
2122
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2123
+ return {
2124
+ type: "input_image",
2125
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2126
+ image_url: `data:${mediaType};base64,${part.data}`
2127
+ },
2128
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2129
+ };
2130
+ } else if (part.mediaType === "application/pdf") {
2131
+ if (part.data instanceof URL) {
2132
+ throw new UnsupportedFunctionalityError4({
2133
+ functionality: "PDF file parts with URLs"
2134
+ });
2135
+ }
2136
+ return {
2137
+ type: "input_file",
2138
+ ...typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
2139
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2140
+ file_data: `data:application/pdf;base64,${convertToBase642(part.data)}`
2141
+ }
2142
+ };
2143
+ } else {
2144
+ throw new UnsupportedFunctionalityError4({
2145
+ functionality: `file part media type ${part.mediaType}`
2146
+ });
2147
+ }
2148
+ }
2149
+ }
2150
+ })
2151
+ });
2152
+ break;
2153
+ }
2154
+ case "assistant": {
2155
+ const reasoningMessages = {};
2156
+ for (const part of content) {
2157
+ switch (part.type) {
2158
+ case "text": {
2159
+ messages.push({
2160
+ role: "assistant",
2161
+ content: [{ type: "output_text", text: part.text }],
2162
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2163
+ });
2164
+ break;
2165
+ }
2166
+ case "tool-call": {
2167
+ if (part.providerExecuted) {
2168
+ break;
2169
+ }
2170
+ messages.push({
2171
+ type: "function_call",
2172
+ call_id: part.toolCallId,
2173
+ name: part.toolName,
2174
+ arguments: JSON.stringify(part.input),
2175
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2176
+ });
2177
+ break;
2178
+ }
2179
+ case "tool-result": {
2180
+ warnings.push({
2181
+ type: "other",
2182
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
2183
+ });
2184
+ break;
2185
+ }
2186
+ case "reasoning": {
2187
+ const providerOptions = await parseProviderOptions6({
2188
+ provider: "openai",
2189
+ providerOptions: part.providerOptions,
2190
+ schema: openaiResponsesReasoningProviderOptionsSchema
2191
+ });
2192
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2193
+ if (reasoningId != null) {
2194
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2195
+ const summaryParts = [];
2196
+ if (part.text.length > 0) {
2197
+ summaryParts.push({ type: "summary_text", text: part.text });
2198
+ } else if (existingReasoningMessage !== void 0) {
2199
+ warnings.push({
2200
+ type: "other",
2201
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2202
+ });
2203
+ }
2204
+ if (existingReasoningMessage === void 0) {
2205
+ reasoningMessages[reasoningId] = {
2206
+ type: "reasoning",
2207
+ id: reasoningId,
2208
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2209
+ summary: summaryParts
2210
+ };
2211
+ messages.push(reasoningMessages[reasoningId]);
2212
+ } else {
2213
+ existingReasoningMessage.summary.push(...summaryParts);
2214
+ }
2215
+ } else {
2216
+ warnings.push({
2217
+ type: "other",
2218
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2219
+ });
2220
+ }
2221
+ break;
2222
+ }
2223
+ }
2224
+ }
2225
+ break;
2226
+ }
2227
+ case "tool": {
2228
+ for (const part of content) {
2229
+ const output = part.output;
2230
+ let contentValue;
2231
+ switch (output.type) {
2232
+ case "text":
2233
+ case "error-text":
2234
+ contentValue = output.value;
2235
+ break;
2236
+ case "content":
2237
+ case "json":
2238
+ case "error-json":
2239
+ contentValue = JSON.stringify(output.value);
2240
+ break;
2241
+ }
2242
+ messages.push({
2243
+ type: "function_call_output",
2244
+ call_id: part.toolCallId,
2245
+ output: contentValue
2246
+ });
2247
+ }
2248
+ break;
2249
+ }
2250
+ default: {
2251
+ const _exhaustiveCheck = role;
2252
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2253
+ }
2254
+ }
2255
+ }
2256
+ return { messages, warnings };
2257
+ }
2258
+ var openaiResponsesReasoningProviderOptionsSchema = z14.object({
2259
+ itemId: z14.string().nullish(),
2260
+ reasoningEncryptedContent: z14.string().nullish()
2261
+ });
2262
+
2263
+ // src/responses/map-openai-responses-finish-reason.ts
2264
+ function mapOpenAIResponseFinishReason({
2265
+ finishReason,
2266
+ hasToolCalls
2267
+ }) {
2268
+ switch (finishReason) {
2269
+ case void 0:
2270
+ case null:
2271
+ return hasToolCalls ? "tool-calls" : "stop";
2272
+ case "max_output_tokens":
2273
+ return "length";
2274
+ case "content_filter":
2275
+ return "content-filter";
2276
+ default:
2277
+ return hasToolCalls ? "tool-calls" : "unknown";
2278
+ }
2279
+ }
2280
+
2281
+ // src/responses/openai-responses-prepare-tools.ts
2282
+ import {
2283
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
2284
+ } from "@ai-sdk/provider";
2285
+ function prepareResponsesTools({
2286
+ tools,
2287
+ toolChoice,
2288
+ strictJsonSchema
2289
+ }) {
2290
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2291
+ const toolWarnings = [];
2292
+ if (tools == null) {
2293
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2294
+ }
2295
+ const openaiTools = [];
2296
+ for (const tool of tools) {
2297
+ switch (tool.type) {
2298
+ case "function":
2299
+ openaiTools.push({
2300
+ type: "function",
2301
+ name: tool.name,
2302
+ description: tool.description,
2303
+ parameters: tool.inputSchema,
2304
+ strict: strictJsonSchema
2305
+ });
2306
+ break;
2307
+ case "provider-defined":
2308
+ switch (tool.id) {
2309
+ case "openai.file_search": {
2310
+ const args = fileSearchArgsSchema.parse(tool.args);
2311
+ openaiTools.push({
2312
+ type: "file_search",
2313
+ vector_store_ids: args.vectorStoreIds,
2314
+ max_num_results: args.maxNumResults,
2315
+ ranking_options: args.ranking ? { ranker: args.ranking.ranker } : void 0,
2316
+ filters: args.filters
2317
+ });
2318
+ break;
2319
+ }
2320
+ case "openai.web_search_preview":
2321
+ openaiTools.push({
2322
+ type: "web_search_preview",
2323
+ search_context_size: tool.args.searchContextSize,
2324
+ user_location: tool.args.userLocation
2325
+ });
2326
+ break;
2327
+ default:
2328
+ toolWarnings.push({ type: "unsupported-tool", tool });
2329
+ break;
2330
+ }
2331
+ break;
2332
+ default:
2333
+ toolWarnings.push({ type: "unsupported-tool", tool });
2334
+ break;
2335
+ }
2336
+ }
2337
+ if (toolChoice == null) {
2338
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
2339
+ }
2340
+ const type = toolChoice.type;
2341
+ switch (type) {
2342
+ case "auto":
2343
+ case "none":
2344
+ case "required":
2345
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
2346
+ case "tool":
2347
+ return {
2348
+ tools: openaiTools,
2349
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2350
+ toolWarnings
2351
+ };
2352
+ default: {
2353
+ const _exhaustiveCheck = type;
2354
+ throw new UnsupportedFunctionalityError5({
2355
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2356
+ });
2357
+ }
2358
+ }
2359
+ }
2360
+
2361
+ // src/responses/openai-responses-language-model.ts
2362
+ var OpenAIResponsesLanguageModel = class {
2363
+ constructor(modelId, config) {
2364
+ this.specificationVersion = "v2";
2365
+ this.supportedUrls = {
2366
+ "image/*": [/^https?:\/\/.*$/]
2367
+ };
2368
+ this.modelId = modelId;
2369
+ this.config = config;
2370
+ }
2371
+ get provider() {
2372
+ return this.config.provider;
2373
+ }
2374
+ async getArgs({
2375
+ maxOutputTokens,
2376
+ temperature,
2377
+ stopSequences,
2378
+ topP,
2379
+ topK,
2380
+ presencePenalty,
2381
+ frequencyPenalty,
2382
+ seed,
2383
+ prompt,
2384
+ providerOptions,
2385
+ tools,
2386
+ toolChoice,
2387
+ responseFormat
2388
+ }) {
2389
+ var _a, _b;
2390
+ const warnings = [];
2391
+ const modelConfig = getResponsesModelConfig(this.modelId);
2392
+ if (topK != null) {
2393
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2394
+ }
2395
+ if (seed != null) {
2396
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2397
+ }
2398
+ if (presencePenalty != null) {
2399
+ warnings.push({
2400
+ type: "unsupported-setting",
2401
+ setting: "presencePenalty"
2402
+ });
2403
+ }
2404
+ if (frequencyPenalty != null) {
2405
+ warnings.push({
2406
+ type: "unsupported-setting",
2407
+ setting: "frequencyPenalty"
2408
+ });
2409
+ }
2410
+ if (stopSequences != null) {
2411
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2412
+ }
2413
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2414
+ prompt,
2415
+ systemMessageMode: modelConfig.systemMessageMode
2416
+ });
2417
+ warnings.push(...messageWarnings);
2418
+ const openaiOptions = await parseProviderOptions7({
2419
+ provider: "openai",
2420
+ providerOptions,
2421
+ schema: openaiResponsesProviderOptionsSchema
2422
+ });
2423
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2424
+ const baseArgs = {
2425
+ model: this.modelId,
2426
+ input: messages,
2427
+ temperature,
2428
+ top_p: topP,
2429
+ max_output_tokens: maxOutputTokens,
2430
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2431
+ text: {
2432
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2433
+ format: responseFormat.schema != null ? {
2434
+ type: "json_schema",
2435
+ strict: strictJsonSchema,
2436
+ name: (_b = responseFormat.name) != null ? _b : "response",
2437
+ description: responseFormat.description,
2438
+ schema: responseFormat.schema
2439
+ } : { type: "json_object" }
2440
+ },
2441
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2442
+ verbosity: openaiOptions.textVerbosity
2443
+ }
2444
+ }
2445
+ },
2446
+ // provider options:
2447
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2448
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2449
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2450
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2451
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2452
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2453
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2454
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2455
+ // model-specific settings:
2456
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2457
+ reasoning: {
2458
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2459
+ effort: openaiOptions.reasoningEffort
2460
+ },
2461
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2462
+ summary: openaiOptions.reasoningSummary
2463
+ }
2464
+ }
2465
+ },
2466
+ ...modelConfig.requiredAutoTruncation && {
2467
+ truncation: "auto"
2468
+ }
2469
+ };
2470
+ if (modelConfig.isReasoningModel) {
2471
+ if (baseArgs.temperature != null) {
2472
+ baseArgs.temperature = void 0;
2473
+ warnings.push({
2474
+ type: "unsupported-setting",
2475
+ setting: "temperature",
2476
+ details: "temperature is not supported for reasoning models"
2477
+ });
2478
+ }
2479
+ if (baseArgs.top_p != null) {
2480
+ baseArgs.top_p = void 0;
2481
+ warnings.push({
2482
+ type: "unsupported-setting",
2483
+ setting: "topP",
2484
+ details: "topP is not supported for reasoning models"
2485
+ });
2486
+ }
2487
+ } else {
2488
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2489
+ warnings.push({
2490
+ type: "unsupported-setting",
2491
+ setting: "reasoningEffort",
2492
+ details: "reasoningEffort is not supported for non-reasoning models"
2493
+ });
2494
+ }
2495
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2496
+ warnings.push({
2497
+ type: "unsupported-setting",
2498
+ setting: "reasoningSummary",
2499
+ details: "reasoningSummary is not supported for non-reasoning models"
2500
+ });
2501
+ }
2502
+ }
2503
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2504
+ warnings.push({
2505
+ type: "unsupported-setting",
2506
+ setting: "serviceTier",
2507
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2508
+ });
2509
+ delete baseArgs.service_tier;
2510
+ }
2511
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !supportsPriorityProcessing2(this.modelId)) {
2512
+ warnings.push({
2513
+ type: "unsupported-setting",
2514
+ setting: "serviceTier",
2515
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2516
+ });
2517
+ delete baseArgs.service_tier;
2518
+ }
2519
+ const {
2520
+ tools: openaiTools,
2521
+ toolChoice: openaiToolChoice,
2522
+ toolWarnings
2523
+ } = prepareResponsesTools({
2524
+ tools,
2525
+ toolChoice,
2526
+ strictJsonSchema
2527
+ });
2528
+ return {
2529
+ args: {
2530
+ ...baseArgs,
2531
+ tools: openaiTools,
2532
+ tool_choice: openaiToolChoice
2533
+ },
2534
+ warnings: [...warnings, ...toolWarnings]
2535
+ };
2536
+ }
2537
+ async doGenerate(options) {
2538
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2539
+ const { args: body, warnings } = await this.getArgs(options);
2540
+ const url = this.config.url({
2541
+ path: "/responses",
2542
+ modelId: this.modelId
2543
+ });
2544
+ const {
2545
+ responseHeaders,
2546
+ value: response,
2547
+ rawValue: rawResponse
2548
+ } = await postJsonToApi6({
2549
+ url,
2550
+ headers: combineHeaders7(this.config.headers(), options.headers),
2551
+ body,
2552
+ failedResponseHandler: openaiFailedResponseHandler,
2553
+ successfulResponseHandler: createJsonResponseHandler6(
2554
+ z15.object({
2555
+ id: z15.string(),
2556
+ created_at: z15.number(),
2557
+ error: z15.object({
2558
+ code: z15.string(),
2559
+ message: z15.string()
2560
+ }).nullish(),
2561
+ model: z15.string(),
2562
+ output: z15.array(
2563
+ z15.discriminatedUnion("type", [
2564
+ z15.object({
2565
+ type: z15.literal("message"),
2566
+ role: z15.literal("assistant"),
2567
+ id: z15.string(),
2568
+ content: z15.array(
2569
+ z15.object({
2570
+ type: z15.literal("output_text"),
2571
+ text: z15.string(),
2572
+ annotations: z15.array(
2573
+ z15.object({
2574
+ type: z15.literal("url_citation"),
2575
+ start_index: z15.number(),
2576
+ end_index: z15.number(),
2577
+ url: z15.string(),
2578
+ title: z15.string()
2579
+ })
2580
+ )
2581
+ })
2582
+ )
2583
+ }),
2584
+ z15.object({
2585
+ type: z15.literal("function_call"),
2586
+ call_id: z15.string(),
2587
+ name: z15.string(),
2588
+ arguments: z15.string(),
2589
+ id: z15.string()
2590
+ }),
2591
+ z15.object({
2592
+ type: z15.literal("web_search_call"),
2593
+ id: z15.string(),
2594
+ status: z15.string().optional()
2595
+ }),
2596
+ z15.object({
2597
+ type: z15.literal("computer_call"),
2598
+ id: z15.string(),
2599
+ status: z15.string().optional()
2600
+ }),
2601
+ z15.object({
2602
+ type: z15.literal("file_search_call"),
2603
+ id: z15.string(),
2604
+ status: z15.string().optional()
2605
+ }),
2606
+ z15.object({
2607
+ type: z15.literal("reasoning"),
2608
+ id: z15.string(),
2609
+ encrypted_content: z15.string().nullish(),
2610
+ summary: z15.array(
2611
+ z15.object({
2612
+ type: z15.literal("summary_text"),
2613
+ text: z15.string()
2614
+ })
2615
+ )
2616
+ })
2617
+ ])
2618
+ ),
2619
+ incomplete_details: z15.object({ reason: z15.string() }).nullable(),
2620
+ usage: usageSchema2
2621
+ })
2622
+ ),
2623
+ abortSignal: options.abortSignal,
2624
+ fetch: this.config.fetch
2625
+ });
2626
+ if (response.error) {
2627
+ throw new APICallError({
2628
+ message: response.error.message,
2629
+ url,
2630
+ requestBodyValues: body,
2631
+ statusCode: 400,
2632
+ responseHeaders,
2633
+ responseBody: rawResponse,
2634
+ isRetryable: false
2635
+ });
2636
+ }
2637
+ const content = [];
2638
+ for (const part of response.output) {
2639
+ switch (part.type) {
2640
+ case "reasoning": {
2641
+ if (part.summary.length === 0) {
2642
+ part.summary.push({ type: "summary_text", text: "" });
2643
+ }
2644
+ for (const summary of part.summary) {
2645
+ content.push({
2646
+ type: "reasoning",
2647
+ text: summary.text,
2648
+ providerMetadata: {
2649
+ openai: {
2650
+ itemId: part.id,
2651
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2652
+ }
2653
+ }
2654
+ });
2655
+ }
2656
+ break;
2657
+ }
2658
+ case "message": {
2659
+ for (const contentPart of part.content) {
2660
+ content.push({
2661
+ type: "text",
2662
+ text: contentPart.text,
2663
+ providerMetadata: {
2664
+ openai: {
2665
+ itemId: part.id
2666
+ }
2667
+ }
2668
+ });
2669
+ for (const annotation of contentPart.annotations) {
2670
+ content.push({
2671
+ type: "source",
2672
+ sourceType: "url",
2673
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
2674
+ url: annotation.url,
2675
+ title: annotation.title
2676
+ });
2677
+ }
2678
+ }
2679
+ break;
2680
+ }
2681
+ case "function_call": {
2682
+ content.push({
2683
+ type: "tool-call",
2684
+ toolCallId: part.call_id,
2685
+ toolName: part.name,
2686
+ input: part.arguments,
2687
+ providerMetadata: {
2688
+ openai: {
2689
+ itemId: part.id
2690
+ }
2691
+ }
2692
+ });
2693
+ break;
2694
+ }
2695
+ case "web_search_call": {
2696
+ content.push({
2697
+ type: "tool-call",
2698
+ toolCallId: part.id,
2699
+ toolName: "web_search_preview",
2700
+ input: "",
2701
+ providerExecuted: true
2702
+ });
2703
+ content.push({
2704
+ type: "tool-result",
2705
+ toolCallId: part.id,
2706
+ toolName: "web_search_preview",
2707
+ result: { status: part.status || "completed" },
2708
+ providerExecuted: true
2709
+ });
2710
+ break;
2711
+ }
2712
+ case "computer_call": {
2713
+ content.push({
2714
+ type: "tool-call",
2715
+ toolCallId: part.id,
2716
+ toolName: "computer_use",
2717
+ input: "",
2718
+ providerExecuted: true
2719
+ });
2720
+ content.push({
2721
+ type: "tool-result",
2722
+ toolCallId: part.id,
2723
+ toolName: "computer_use",
2724
+ result: {
2725
+ type: "computer_use_tool_result",
2726
+ status: part.status || "completed"
2727
+ },
2728
+ providerExecuted: true
2729
+ });
2730
+ break;
2731
+ }
2732
+ case "file_search_call": {
2733
+ content.push({
2734
+ type: "tool-call",
2735
+ toolCallId: part.id,
2736
+ toolName: "file_search",
2737
+ input: "",
2738
+ providerExecuted: true
2739
+ });
2740
+ content.push({
2741
+ type: "tool-result",
2742
+ toolCallId: part.id,
2743
+ toolName: "file_search",
2744
+ result: {
2745
+ type: "file_search_tool_result",
2746
+ status: part.status || "completed"
2747
+ },
2748
+ providerExecuted: true
2749
+ });
2750
+ break;
2751
+ }
2752
+ }
2753
+ }
2754
+ return {
2755
+ content,
2756
+ finishReason: mapOpenAIResponseFinishReason({
2757
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2758
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2759
+ }),
2760
+ usage: {
2761
+ inputTokens: response.usage.input_tokens,
2762
+ outputTokens: response.usage.output_tokens,
2763
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2764
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2765
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2766
+ },
2767
+ request: { body },
2768
+ response: {
2769
+ id: response.id,
2770
+ timestamp: new Date(response.created_at * 1e3),
2771
+ modelId: response.model,
2772
+ headers: responseHeaders,
2773
+ body: rawResponse
2774
+ },
2775
+ providerMetadata: {
2776
+ openai: {
2777
+ responseId: response.id
2778
+ }
2779
+ },
2780
+ warnings
2781
+ };
2782
+ }
2783
+ async doStream(options) {
2784
+ const { args: body, warnings } = await this.getArgs(options);
2785
+ const { responseHeaders, value: response } = await postJsonToApi6({
2786
+ url: this.config.url({
2787
+ path: "/responses",
2788
+ modelId: this.modelId
2789
+ }),
2790
+ headers: combineHeaders7(this.config.headers(), options.headers),
2791
+ body: {
2792
+ ...body,
2793
+ stream: true
2794
+ },
2795
+ failedResponseHandler: openaiFailedResponseHandler,
2796
+ successfulResponseHandler: createEventSourceResponseHandler3(
2797
+ openaiResponsesChunkSchema
2798
+ ),
2799
+ abortSignal: options.abortSignal,
2800
+ fetch: this.config.fetch
2801
+ });
2802
+ const self = this;
2803
+ let finishReason = "unknown";
2804
+ const usage = {
2805
+ inputTokens: void 0,
2806
+ outputTokens: void 0,
2807
+ totalTokens: void 0
2808
+ };
2809
+ let responseId = null;
2810
+ const ongoingToolCalls = {};
2811
+ let hasToolCalls = false;
2812
+ const activeReasoning = {};
2813
+ return {
2814
+ stream: response.pipeThrough(
2815
+ new TransformStream({
2816
+ start(controller) {
2817
+ controller.enqueue({ type: "stream-start", warnings });
2818
+ },
2819
+ transform(chunk, controller) {
2820
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2821
+ if (options.includeRawChunks) {
2822
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2823
+ }
2824
+ if (!chunk.success) {
2825
+ finishReason = "error";
2826
+ controller.enqueue({ type: "error", error: chunk.error });
2827
+ return;
2828
+ }
2829
+ const value = chunk.value;
2830
+ if (isResponseOutputItemAddedChunk(value)) {
2831
+ if (value.item.type === "function_call") {
2832
+ ongoingToolCalls[value.output_index] = {
2833
+ toolName: value.item.name,
2834
+ toolCallId: value.item.call_id
2835
+ };
2836
+ controller.enqueue({
2837
+ type: "tool-input-start",
2838
+ id: value.item.call_id,
2839
+ toolName: value.item.name
2840
+ });
2841
+ } else if (value.item.type === "web_search_call") {
2842
+ ongoingToolCalls[value.output_index] = {
2843
+ toolName: "web_search_preview",
2844
+ toolCallId: value.item.id
2845
+ };
2846
+ controller.enqueue({
2847
+ type: "tool-input-start",
2848
+ id: value.item.id,
2849
+ toolName: "web_search_preview"
2850
+ });
2851
+ } else if (value.item.type === "computer_call") {
2852
+ ongoingToolCalls[value.output_index] = {
2853
+ toolName: "computer_use",
2854
+ toolCallId: value.item.id
2855
+ };
2856
+ controller.enqueue({
2857
+ type: "tool-input-start",
2858
+ id: value.item.id,
2859
+ toolName: "computer_use"
2860
+ });
2861
+ } else if (value.item.type === "message") {
2862
+ controller.enqueue({
2863
+ type: "text-start",
2864
+ id: value.item.id,
2865
+ providerMetadata: {
2866
+ openai: {
2867
+ itemId: value.item.id
2868
+ }
2869
+ }
2870
+ });
2871
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2872
+ activeReasoning[value.item.id] = {
2873
+ encryptedContent: value.item.encrypted_content,
2874
+ summaryParts: [0]
2875
+ };
2876
+ controller.enqueue({
2877
+ type: "reasoning-start",
2878
+ id: `${value.item.id}:0`,
2879
+ providerMetadata: {
2880
+ openai: {
2881
+ itemId: value.item.id,
2882
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2883
+ }
2884
+ }
2885
+ });
2886
+ }
2887
+ } else if (isResponseOutputItemDoneChunk(value)) {
2888
+ if (value.item.type === "function_call") {
2889
+ ongoingToolCalls[value.output_index] = void 0;
2890
+ hasToolCalls = true;
2891
+ controller.enqueue({
2892
+ type: "tool-input-end",
2893
+ id: value.item.call_id
2894
+ });
2895
+ controller.enqueue({
2896
+ type: "tool-call",
2897
+ toolCallId: value.item.call_id,
2898
+ toolName: value.item.name,
2899
+ input: value.item.arguments,
2900
+ providerMetadata: {
2901
+ openai: {
2902
+ itemId: value.item.id
2903
+ }
2904
+ }
2905
+ });
2906
+ } else if (value.item.type === "web_search_call") {
2907
+ ongoingToolCalls[value.output_index] = void 0;
2908
+ hasToolCalls = true;
2909
+ controller.enqueue({
2910
+ type: "tool-input-end",
2911
+ id: value.item.id
2912
+ });
2913
+ controller.enqueue({
2914
+ type: "tool-call",
2915
+ toolCallId: value.item.id,
2916
+ toolName: "web_search_preview",
2917
+ input: "",
2918
+ providerExecuted: true
2919
+ });
2920
+ controller.enqueue({
2921
+ type: "tool-result",
2922
+ toolCallId: value.item.id,
2923
+ toolName: "web_search_preview",
2924
+ result: {
2925
+ type: "web_search_tool_result",
2926
+ status: value.item.status || "completed"
2927
+ },
2928
+ providerExecuted: true
2929
+ });
2930
+ } else if (value.item.type === "computer_call") {
2931
+ ongoingToolCalls[value.output_index] = void 0;
2932
+ hasToolCalls = true;
2933
+ controller.enqueue({
2934
+ type: "tool-input-end",
2935
+ id: value.item.id
2936
+ });
2937
+ controller.enqueue({
2938
+ type: "tool-call",
2939
+ toolCallId: value.item.id,
2940
+ toolName: "computer_use",
2941
+ input: "",
2942
+ providerExecuted: true
2943
+ });
2944
+ controller.enqueue({
2945
+ type: "tool-result",
2946
+ toolCallId: value.item.id,
2947
+ toolName: "computer_use",
2948
+ result: {
2949
+ type: "computer_use_tool_result",
2950
+ status: value.item.status || "completed"
2951
+ },
2952
+ providerExecuted: true
2953
+ });
2954
+ } else if (value.item.type === "message") {
2955
+ controller.enqueue({
2956
+ type: "text-end",
2957
+ id: value.item.id
2958
+ });
2959
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2960
+ const activeReasoningPart = activeReasoning[value.item.id];
2961
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2962
+ controller.enqueue({
2963
+ type: "reasoning-end",
2964
+ id: `${value.item.id}:${summaryIndex}`,
2965
+ providerMetadata: {
2966
+ openai: {
2967
+ itemId: value.item.id,
2968
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2969
+ }
2970
+ }
2971
+ });
2972
+ }
2973
+ delete activeReasoning[value.item.id];
2974
+ }
2975
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2976
+ const toolCall = ongoingToolCalls[value.output_index];
2977
+ if (toolCall != null) {
2978
+ controller.enqueue({
2979
+ type: "tool-input-delta",
2980
+ id: toolCall.toolCallId,
2981
+ delta: value.delta
2982
+ });
2983
+ }
2984
+ } else if (isResponseCreatedChunk(value)) {
2985
+ responseId = value.response.id;
2986
+ controller.enqueue({
2987
+ type: "response-metadata",
2988
+ id: value.response.id,
2989
+ timestamp: new Date(value.response.created_at * 1e3),
2990
+ modelId: value.response.model
2991
+ });
2992
+ } else if (isTextDeltaChunk(value)) {
2993
+ controller.enqueue({
2994
+ type: "text-delta",
2995
+ id: value.item_id,
2996
+ delta: value.delta
2997
+ });
2998
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2999
+ if (value.summary_index > 0) {
3000
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
3001
+ value.summary_index
3002
+ );
3003
+ controller.enqueue({
3004
+ type: "reasoning-start",
3005
+ id: `${value.item_id}:${value.summary_index}`,
3006
+ providerMetadata: {
3007
+ openai: {
3008
+ itemId: value.item_id,
3009
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
3010
+ }
3011
+ }
3012
+ });
3013
+ }
3014
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
3015
+ controller.enqueue({
3016
+ type: "reasoning-delta",
3017
+ id: `${value.item_id}:${value.summary_index}`,
3018
+ delta: value.delta,
3019
+ providerMetadata: {
3020
+ openai: {
3021
+ itemId: value.item_id
3022
+ }
3023
+ }
3024
+ });
3025
+ } else if (isResponseFinishedChunk(value)) {
3026
+ finishReason = mapOpenAIResponseFinishReason({
3027
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
3028
+ hasToolCalls
3029
+ });
3030
+ usage.inputTokens = value.response.usage.input_tokens;
3031
+ usage.outputTokens = value.response.usage.output_tokens;
3032
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3033
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
3034
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
3035
+ } else if (isResponseAnnotationAddedChunk(value)) {
3036
+ controller.enqueue({
3037
+ type: "source",
3038
+ sourceType: "url",
3039
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
3040
+ url: value.annotation.url,
3041
+ title: value.annotation.title
3042
+ });
3043
+ } else if (isErrorChunk(value)) {
3044
+ controller.enqueue({ type: "error", error: value });
3045
+ }
3046
+ },
3047
+ flush(controller) {
3048
+ controller.enqueue({
3049
+ type: "finish",
3050
+ finishReason,
3051
+ usage,
3052
+ providerMetadata: {
3053
+ openai: {
3054
+ responseId
3055
+ }
3056
+ }
3057
+ });
3058
+ }
3059
+ })
3060
+ ),
3061
+ request: { body },
3062
+ response: { headers: responseHeaders }
3063
+ };
3064
+ }
3065
+ };
3066
+ var usageSchema2 = z15.object({
3067
+ input_tokens: z15.number(),
3068
+ input_tokens_details: z15.object({ cached_tokens: z15.number().nullish() }).nullish(),
3069
+ output_tokens: z15.number(),
3070
+ output_tokens_details: z15.object({ reasoning_tokens: z15.number().nullish() }).nullish()
3071
+ });
3072
+ var textDeltaChunkSchema = z15.object({
3073
+ type: z15.literal("response.output_text.delta"),
3074
+ item_id: z15.string(),
3075
+ delta: z15.string()
3076
+ });
3077
+ var errorChunkSchema = z15.object({
3078
+ type: z15.literal("error"),
3079
+ code: z15.string(),
3080
+ message: z15.string(),
3081
+ param: z15.string().nullish(),
3082
+ sequence_number: z15.number()
3083
+ });
3084
+ var responseFinishedChunkSchema = z15.object({
3085
+ type: z15.enum(["response.completed", "response.incomplete"]),
3086
+ response: z15.object({
3087
+ incomplete_details: z15.object({ reason: z15.string() }).nullish(),
3088
+ usage: usageSchema2
3089
+ })
3090
+ });
3091
+ var responseCreatedChunkSchema = z15.object({
3092
+ type: z15.literal("response.created"),
3093
+ response: z15.object({
3094
+ id: z15.string(),
3095
+ created_at: z15.number(),
3096
+ model: z15.string()
3097
+ })
3098
+ });
3099
+ var responseOutputItemAddedSchema = z15.object({
3100
+ type: z15.literal("response.output_item.added"),
3101
+ output_index: z15.number(),
3102
+ item: z15.discriminatedUnion("type", [
3103
+ z15.object({
3104
+ type: z15.literal("message"),
3105
+ id: z15.string()
3106
+ }),
3107
+ z15.object({
3108
+ type: z15.literal("reasoning"),
3109
+ id: z15.string(),
3110
+ encrypted_content: z15.string().nullish()
3111
+ }),
3112
+ z15.object({
3113
+ type: z15.literal("function_call"),
3114
+ id: z15.string(),
3115
+ call_id: z15.string(),
3116
+ name: z15.string(),
3117
+ arguments: z15.string()
3118
+ }),
3119
+ z15.object({
3120
+ type: z15.literal("web_search_call"),
3121
+ id: z15.string(),
3122
+ status: z15.string()
3123
+ }),
3124
+ z15.object({
3125
+ type: z15.literal("computer_call"),
3126
+ id: z15.string(),
3127
+ status: z15.string()
3128
+ }),
3129
+ z15.object({
3130
+ type: z15.literal("file_search_call"),
3131
+ id: z15.string(),
3132
+ status: z15.string()
3133
+ })
3134
+ ])
3135
+ });
3136
+ var responseOutputItemDoneSchema = z15.object({
3137
+ type: z15.literal("response.output_item.done"),
3138
+ output_index: z15.number(),
3139
+ item: z15.discriminatedUnion("type", [
3140
+ z15.object({
3141
+ type: z15.literal("message"),
3142
+ id: z15.string()
3143
+ }),
3144
+ z15.object({
3145
+ type: z15.literal("reasoning"),
3146
+ id: z15.string(),
3147
+ encrypted_content: z15.string().nullish()
3148
+ }),
3149
+ z15.object({
3150
+ type: z15.literal("function_call"),
3151
+ id: z15.string(),
3152
+ call_id: z15.string(),
3153
+ name: z15.string(),
3154
+ arguments: z15.string(),
3155
+ status: z15.literal("completed")
3156
+ }),
3157
+ z15.object({
3158
+ type: z15.literal("web_search_call"),
3159
+ id: z15.string(),
3160
+ status: z15.literal("completed")
3161
+ }),
3162
+ z15.object({
3163
+ type: z15.literal("computer_call"),
3164
+ id: z15.string(),
3165
+ status: z15.literal("completed")
3166
+ }),
3167
+ z15.object({
3168
+ type: z15.literal("file_search_call"),
3169
+ id: z15.string(),
3170
+ status: z15.literal("completed")
3171
+ })
3172
+ ])
3173
+ });
3174
+ var responseFunctionCallArgumentsDeltaSchema = z15.object({
3175
+ type: z15.literal("response.function_call_arguments.delta"),
3176
+ item_id: z15.string(),
3177
+ output_index: z15.number(),
3178
+ delta: z15.string()
3179
+ });
3180
+ var responseAnnotationAddedSchema = z15.object({
3181
+ type: z15.literal("response.output_text.annotation.added"),
3182
+ annotation: z15.object({
3183
+ type: z15.literal("url_citation"),
3184
+ url: z15.string(),
3185
+ title: z15.string()
3186
+ })
3187
+ });
3188
+ var responseReasoningSummaryPartAddedSchema = z15.object({
3189
+ type: z15.literal("response.reasoning_summary_part.added"),
3190
+ item_id: z15.string(),
3191
+ summary_index: z15.number()
3192
+ });
3193
+ var responseReasoningSummaryTextDeltaSchema = z15.object({
3194
+ type: z15.literal("response.reasoning_summary_text.delta"),
3195
+ item_id: z15.string(),
3196
+ summary_index: z15.number(),
3197
+ delta: z15.string()
3198
+ });
3199
+ var openaiResponsesChunkSchema = z15.union([
3200
+ textDeltaChunkSchema,
3201
+ responseFinishedChunkSchema,
3202
+ responseCreatedChunkSchema,
3203
+ responseOutputItemAddedSchema,
3204
+ responseOutputItemDoneSchema,
3205
+ responseFunctionCallArgumentsDeltaSchema,
3206
+ responseAnnotationAddedSchema,
3207
+ responseReasoningSummaryPartAddedSchema,
3208
+ responseReasoningSummaryTextDeltaSchema,
3209
+ errorChunkSchema,
3210
+ z15.object({ type: z15.string() }).loose()
3211
+ // fallback for unknown chunks
3212
+ ]);
3213
+ function isTextDeltaChunk(chunk) {
3214
+ return chunk.type === "response.output_text.delta";
3215
+ }
3216
+ function isResponseOutputItemDoneChunk(chunk) {
3217
+ return chunk.type === "response.output_item.done";
3218
+ }
3219
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3220
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3221
+ }
3222
+ function isResponseFinishedChunk(chunk) {
3223
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3224
+ }
3225
+ function isResponseCreatedChunk(chunk) {
3226
+ return chunk.type === "response.created";
3227
+ }
3228
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3229
+ return chunk.type === "response.function_call_arguments.delta";
3230
+ }
3231
+ function isResponseOutputItemAddedChunk(chunk) {
3232
+ return chunk.type === "response.output_item.added";
3233
+ }
3234
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3235
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3236
+ }
3237
+ function isResponseAnnotationAddedChunk(chunk) {
3238
+ return chunk.type === "response.output_text.annotation.added";
3239
+ }
3240
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3241
+ return chunk.type === "response.reasoning_summary_part.added";
3242
+ }
3243
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3244
+ return chunk.type === "response.reasoning_summary_text.delta";
3245
+ }
3246
+ function isErrorChunk(chunk) {
3247
+ return chunk.type === "error";
3248
+ }
3249
+ function getResponsesModelConfig(modelId) {
3250
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3251
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3252
+ return {
3253
+ isReasoningModel: true,
3254
+ systemMessageMode: "remove",
3255
+ requiredAutoTruncation: false
3256
+ };
3257
+ }
3258
+ return {
3259
+ isReasoningModel: true,
3260
+ systemMessageMode: "developer",
3261
+ requiredAutoTruncation: false
3262
+ };
3263
+ }
3264
+ return {
3265
+ isReasoningModel: false,
3266
+ systemMessageMode: "system",
3267
+ requiredAutoTruncation: false
3268
+ };
3269
+ }
3270
+ function supportsFlexProcessing2(modelId) {
3271
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3272
+ }
3273
+ function supportsPriorityProcessing2(modelId) {
3274
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3275
+ }
3276
+ var openaiResponsesProviderOptionsSchema = z15.object({
3277
+ metadata: z15.any().nullish(),
3278
+ parallelToolCalls: z15.boolean().nullish(),
3279
+ previousResponseId: z15.string().nullish(),
3280
+ store: z15.boolean().nullish(),
3281
+ user: z15.string().nullish(),
3282
+ reasoningEffort: z15.string().nullish(),
3283
+ strictJsonSchema: z15.boolean().nullish(),
3284
+ instructions: z15.string().nullish(),
3285
+ reasoningSummary: z15.string().nullish(),
3286
+ serviceTier: z15.enum(["auto", "flex", "priority"]).nullish(),
3287
+ include: z15.array(z15.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3288
+ textVerbosity: z15.enum(["low", "medium", "high"]).nullish()
3289
+ });
3290
+ export {
3291
+ OpenAIChatLanguageModel,
3292
+ OpenAICompletionLanguageModel,
3293
+ OpenAIEmbeddingModel,
3294
+ OpenAIImageModel,
3295
+ OpenAIResponsesLanguageModel,
3296
+ OpenAISpeechModel,
3297
+ OpenAITranscriptionModel,
3298
+ hasDefaultResponseFormat,
3299
+ modelMaxImagesPerCall,
3300
+ openAITranscriptionProviderOptions,
3301
+ openaiCompletionProviderOptions,
3302
+ openaiEmbeddingProviderOptions,
3303
+ openaiProviderOptions
3304
+ };
3305
+ //# sourceMappingURL=index.mjs.map