@zenning/openai 1.6.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3849 @@
1
+ // src/chat/openai-chat-language-model.ts
2
+ import {
3
+ InvalidResponseDataError
4
+ } from "@zenning/provider";
5
+ import {
6
+ combineHeaders,
7
+ createEventSourceResponseHandler,
8
+ createJsonResponseHandler,
9
+ generateId,
10
+ isParsableJson,
11
+ parseProviderOptions,
12
+ postJsonToApi
13
+ } from "@ai-sdk/provider-utils";
14
+ import { z as z3 } from "zod/v4";
15
+
16
+ // src/openai-error.ts
17
+ import { z } from "zod/v4";
18
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
19
+ var openaiErrorDataSchema = z.object({
20
+ error: z.object({
21
+ message: z.string(),
22
+ // The additional information below is handled loosely to support
23
+ // OpenAI-compatible providers that have slightly different error
24
+ // responses:
25
+ type: z.string().nullish(),
26
+ param: z.any().nullish(),
27
+ code: z.union([z.string(), z.number()]).nullish()
28
+ })
29
+ });
30
+ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
31
+ errorSchema: openaiErrorDataSchema,
32
+ errorToMessage: (data) => data.error.message
33
+ });
34
+
35
+ // src/chat/convert-to-openai-chat-messages.ts
36
+ import {
37
+ UnsupportedFunctionalityError
38
+ } from "@zenning/provider";
39
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
40
+ function convertToOpenAIChatMessages({
41
+ prompt,
42
+ systemMessageMode = "system"
43
+ }) {
44
+ const messages = [];
45
+ const warnings = [];
46
+ for (const { role, content } of prompt) {
47
+ switch (role) {
48
+ case "system": {
49
+ switch (systemMessageMode) {
50
+ case "system": {
51
+ messages.push({ role: "system", content });
52
+ break;
53
+ }
54
+ case "developer": {
55
+ messages.push({ role: "developer", content });
56
+ break;
57
+ }
58
+ case "remove": {
59
+ warnings.push({
60
+ type: "other",
61
+ message: "system messages are removed for this model"
62
+ });
63
+ break;
64
+ }
65
+ default: {
66
+ const _exhaustiveCheck = systemMessageMode;
67
+ throw new Error(
68
+ `Unsupported system message mode: ${_exhaustiveCheck}`
69
+ );
70
+ }
71
+ }
72
+ break;
73
+ }
74
+ case "user": {
75
+ if (content.length === 1 && content[0].type === "text") {
76
+ messages.push({ role: "user", content: content[0].text });
77
+ break;
78
+ }
79
+ messages.push({
80
+ role: "user",
81
+ content: content.map((part, index) => {
82
+ var _a, _b, _c;
83
+ switch (part.type) {
84
+ case "text": {
85
+ return { type: "text", text: part.text };
86
+ }
87
+ case "file": {
88
+ if (part.mediaType.startsWith("image/")) {
89
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
90
+ return {
91
+ type: "image_url",
92
+ image_url: {
93
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`,
94
+ // OpenAI specific extension: image detail
95
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
96
+ }
97
+ };
98
+ } else if (part.mediaType.startsWith("audio/")) {
99
+ if (part.data instanceof URL) {
100
+ throw new UnsupportedFunctionalityError({
101
+ functionality: "audio file parts with URLs"
102
+ });
103
+ }
104
+ switch (part.mediaType) {
105
+ case "audio/wav": {
106
+ return {
107
+ type: "input_audio",
108
+ input_audio: {
109
+ data: convertToBase64(part.data),
110
+ format: "wav"
111
+ }
112
+ };
113
+ }
114
+ case "audio/mp3":
115
+ case "audio/mpeg": {
116
+ return {
117
+ type: "input_audio",
118
+ input_audio: {
119
+ data: convertToBase64(part.data),
120
+ format: "mp3"
121
+ }
122
+ };
123
+ }
124
+ default: {
125
+ throw new UnsupportedFunctionalityError({
126
+ functionality: `audio content parts with media type ${part.mediaType}`
127
+ });
128
+ }
129
+ }
130
+ } else if (part.mediaType === "application/pdf") {
131
+ if (part.data instanceof URL) {
132
+ throw new UnsupportedFunctionalityError({
133
+ functionality: "PDF file parts with URLs"
134
+ });
135
+ }
136
+ return {
137
+ type: "file",
138
+ file: typeof part.data === "string" && part.data.startsWith("file-") ? { file_id: part.data } : {
139
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
140
+ file_data: `data:application/pdf;base64,${convertToBase64(part.data)}`
141
+ }
142
+ };
143
+ } else {
144
+ throw new UnsupportedFunctionalityError({
145
+ functionality: `file part media type ${part.mediaType}`
146
+ });
147
+ }
148
+ }
149
+ }
150
+ })
151
+ });
152
+ break;
153
+ }
154
+ case "assistant": {
155
+ let text = "";
156
+ const toolCalls = [];
157
+ for (const part of content) {
158
+ switch (part.type) {
159
+ case "text": {
160
+ text += part.text;
161
+ break;
162
+ }
163
+ case "tool-call": {
164
+ toolCalls.push({
165
+ id: part.toolCallId,
166
+ type: "function",
167
+ function: {
168
+ name: part.toolName,
169
+ arguments: JSON.stringify(part.input)
170
+ }
171
+ });
172
+ break;
173
+ }
174
+ }
175
+ }
176
+ messages.push({
177
+ role: "assistant",
178
+ content: text,
179
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
180
+ });
181
+ break;
182
+ }
183
+ case "tool": {
184
+ for (const toolResponse of content) {
185
+ const output = toolResponse.output;
186
+ let contentValue;
187
+ switch (output.type) {
188
+ case "text":
189
+ case "error-text":
190
+ contentValue = output.value;
191
+ break;
192
+ case "content":
193
+ case "json":
194
+ case "error-json":
195
+ contentValue = JSON.stringify(output.value);
196
+ break;
197
+ }
198
+ messages.push({
199
+ role: "tool",
200
+ tool_call_id: toolResponse.toolCallId,
201
+ content: contentValue
202
+ });
203
+ }
204
+ break;
205
+ }
206
+ default: {
207
+ const _exhaustiveCheck = role;
208
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
209
+ }
210
+ }
211
+ }
212
+ return { messages, warnings };
213
+ }
214
+
215
+ // src/chat/get-response-metadata.ts
216
+ function getResponseMetadata({
217
+ id,
218
+ model,
219
+ created
220
+ }) {
221
+ return {
222
+ id: id != null ? id : void 0,
223
+ modelId: model != null ? model : void 0,
224
+ timestamp: created != null ? new Date(created * 1e3) : void 0
225
+ };
226
+ }
227
+
228
+ // src/chat/map-openai-finish-reason.ts
229
+ function mapOpenAIFinishReason(finishReason) {
230
+ switch (finishReason) {
231
+ case "stop":
232
+ return "stop";
233
+ case "length":
234
+ return "length";
235
+ case "content_filter":
236
+ return "content-filter";
237
+ case "function_call":
238
+ case "tool_calls":
239
+ return "tool-calls";
240
+ default:
241
+ return "unknown";
242
+ }
243
+ }
244
+
245
+ // src/chat/openai-chat-options.ts
246
+ import { z as z2 } from "zod/v4";
247
+ var openaiProviderOptions = z2.object({
248
+ /**
249
+ * Modify the likelihood of specified tokens appearing in the completion.
250
+ *
251
+ * Accepts a JSON object that maps tokens (specified by their token ID in
252
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
253
+ */
254
+ logitBias: z2.record(z2.coerce.number(), z2.number()).optional(),
255
+ /**
256
+ * Return the log probabilities of the tokens.
257
+ *
258
+ * Setting to true will return the log probabilities of the tokens that
259
+ * were generated.
260
+ *
261
+ * Setting to a number will return the log probabilities of the top n
262
+ * tokens that were generated.
263
+ */
264
+ logprobs: z2.union([z2.boolean(), z2.number()]).optional(),
265
+ /**
266
+ * Whether to enable parallel function calling during tool use. Default to true.
267
+ */
268
+ parallelToolCalls: z2.boolean().optional(),
269
+ /**
270
+ * A unique identifier representing your end-user, which can help OpenAI to
271
+ * monitor and detect abuse.
272
+ */
273
+ user: z2.string().optional(),
274
+ /**
275
+ * Reasoning effort for reasoning models. Defaults to `medium`.
276
+ */
277
+ reasoningEffort: z2.enum(["minimal", "low", "medium", "high"]).optional(),
278
+ /**
279
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
280
+ */
281
+ maxCompletionTokens: z2.number().optional(),
282
+ /**
283
+ * Whether to enable persistence in responses API.
284
+ */
285
+ store: z2.boolean().optional(),
286
+ /**
287
+ * Metadata to associate with the request.
288
+ */
289
+ metadata: z2.record(z2.string().max(64), z2.string().max(512)).optional(),
290
+ /**
291
+ * Parameters for prediction mode.
292
+ */
293
+ prediction: z2.record(z2.string(), z2.any()).optional(),
294
+ /**
295
+ * Whether to use structured outputs.
296
+ *
297
+ * @default true
298
+ */
299
+ structuredOutputs: z2.boolean().optional(),
300
+ /**
301
+ * Service tier for the request.
302
+ * - 'auto': Default service tier
303
+ * - 'flex': 50% cheaper processing at the cost of increased latency. Only available for o3 and o4-mini models.
304
+ * - 'priority': Higher-speed processing with predictably low latency at premium cost. Available for Enterprise customers.
305
+ *
306
+ * @default 'auto'
307
+ */
308
+ serviceTier: z2.enum(["auto", "flex", "priority"]).optional(),
309
+ /**
310
+ * Whether to use strict JSON schema validation.
311
+ *
312
+ * @default false
313
+ */
314
+ strictJsonSchema: z2.boolean().optional(),
315
+ /**
316
+ * Controls the verbosity of the model's responses.
317
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
318
+ */
319
+ textVerbosity: z2.enum(["low", "medium", "high"]).optional(),
320
+ /**
321
+ * A cache key for prompt caching. Allows manual control over prompt caching behavior.
322
+ * Useful for improving cache hit rates and working around automatic caching issues.
323
+ */
324
+ promptCacheKey: z2.string().optional(),
325
+ /**
326
+ * A stable identifier used to help detect users of your application
327
+ * that may be violating OpenAI's usage policies. The IDs should be a
328
+ * string that uniquely identifies each user. We recommend hashing their
329
+ * username or email address, in order to avoid sending us any identifying
330
+ * information.
331
+ */
332
+ safetyIdentifier: z2.string().optional()
333
+ });
334
+
335
+ // src/chat/openai-chat-prepare-tools.ts
336
+ import {
337
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
338
+ } from "@zenning/provider";
339
+ function prepareChatTools({
340
+ tools,
341
+ toolChoice,
342
+ structuredOutputs,
343
+ strictJsonSchema
344
+ }) {
345
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
346
+ const toolWarnings = [];
347
+ if (tools == null) {
348
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
349
+ }
350
+ const openaiTools = [];
351
+ for (const tool of tools) {
352
+ switch (tool.type) {
353
+ case "function":
354
+ openaiTools.push({
355
+ type: "function",
356
+ function: {
357
+ name: tool.name,
358
+ description: tool.description,
359
+ parameters: tool.inputSchema,
360
+ strict: structuredOutputs ? strictJsonSchema : void 0
361
+ }
362
+ });
363
+ break;
364
+ default:
365
+ toolWarnings.push({ type: "unsupported-tool", tool });
366
+ break;
367
+ }
368
+ }
369
+ if (toolChoice == null) {
370
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
371
+ }
372
+ const type = toolChoice.type;
373
+ switch (type) {
374
+ case "auto":
375
+ case "none":
376
+ case "required":
377
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
378
+ case "tool":
379
+ return {
380
+ tools: openaiTools,
381
+ toolChoice: {
382
+ type: "function",
383
+ function: {
384
+ name: toolChoice.toolName
385
+ }
386
+ },
387
+ toolWarnings
388
+ };
389
+ default: {
390
+ const _exhaustiveCheck = type;
391
+ throw new UnsupportedFunctionalityError2({
392
+ functionality: `tool choice type: ${_exhaustiveCheck}`
393
+ });
394
+ }
395
+ }
396
+ }
397
+
398
+ // src/chat/openai-chat-language-model.ts
399
+ var OpenAIChatLanguageModel = class {
400
+ constructor(modelId, config) {
401
+ this.specificationVersion = "v2";
402
+ this.supportedUrls = {
403
+ "image/*": [/^https?:\/\/.*$/]
404
+ };
405
+ this.modelId = modelId;
406
+ this.config = config;
407
+ }
408
+ get provider() {
409
+ return this.config.provider;
410
+ }
411
+ async getArgs({
412
+ prompt,
413
+ maxOutputTokens,
414
+ temperature,
415
+ topP,
416
+ topK,
417
+ frequencyPenalty,
418
+ presencePenalty,
419
+ stopSequences,
420
+ responseFormat,
421
+ seed,
422
+ tools,
423
+ toolChoice,
424
+ providerOptions
425
+ }) {
426
+ var _a, _b, _c, _d;
427
+ const warnings = [];
428
+ const openaiOptions = (_a = await parseProviderOptions({
429
+ provider: "openai",
430
+ providerOptions,
431
+ schema: openaiProviderOptions
432
+ })) != null ? _a : {};
433
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
434
+ if (topK != null) {
435
+ warnings.push({
436
+ type: "unsupported-setting",
437
+ setting: "topK"
438
+ });
439
+ }
440
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
441
+ warnings.push({
442
+ type: "unsupported-setting",
443
+ setting: "responseFormat",
444
+ details: "JSON response format schema is only supported with structuredOutputs"
445
+ });
446
+ }
447
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
448
+ {
449
+ prompt,
450
+ systemMessageMode: getSystemMessageMode(this.modelId)
451
+ }
452
+ );
453
+ warnings.push(...messageWarnings);
454
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
455
+ const baseArgs = {
456
+ // model id:
457
+ model: this.modelId,
458
+ // model specific settings:
459
+ logit_bias: openaiOptions.logitBias,
460
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
461
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
462
+ user: openaiOptions.user,
463
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
464
+ // standardized settings:
465
+ max_tokens: maxOutputTokens,
466
+ temperature,
467
+ top_p: topP,
468
+ frequency_penalty: frequencyPenalty,
469
+ presence_penalty: presencePenalty,
470
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
471
+ type: "json_schema",
472
+ json_schema: {
473
+ schema: responseFormat.schema,
474
+ strict: strictJsonSchema,
475
+ name: (_d = responseFormat.name) != null ? _d : "response",
476
+ description: responseFormat.description
477
+ }
478
+ } : { type: "json_object" } : void 0,
479
+ stop: stopSequences,
480
+ seed,
481
+ verbosity: openaiOptions.textVerbosity,
482
+ // openai specific settings:
483
+ // TODO AI SDK 6: remove, we auto-map maxOutputTokens now
484
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
485
+ store: openaiOptions.store,
486
+ metadata: openaiOptions.metadata,
487
+ prediction: openaiOptions.prediction,
488
+ reasoning_effort: openaiOptions.reasoningEffort,
489
+ service_tier: openaiOptions.serviceTier,
490
+ prompt_cache_key: openaiOptions.promptCacheKey,
491
+ safety_identifier: openaiOptions.safetyIdentifier,
492
+ // messages:
493
+ messages
494
+ };
495
+ if (isReasoningModel(this.modelId)) {
496
+ if (baseArgs.temperature != null) {
497
+ baseArgs.temperature = void 0;
498
+ warnings.push({
499
+ type: "unsupported-setting",
500
+ setting: "temperature",
501
+ details: "temperature is not supported for reasoning models"
502
+ });
503
+ }
504
+ if (baseArgs.top_p != null) {
505
+ baseArgs.top_p = void 0;
506
+ warnings.push({
507
+ type: "unsupported-setting",
508
+ setting: "topP",
509
+ details: "topP is not supported for reasoning models"
510
+ });
511
+ }
512
+ if (baseArgs.frequency_penalty != null) {
513
+ baseArgs.frequency_penalty = void 0;
514
+ warnings.push({
515
+ type: "unsupported-setting",
516
+ setting: "frequencyPenalty",
517
+ details: "frequencyPenalty is not supported for reasoning models"
518
+ });
519
+ }
520
+ if (baseArgs.presence_penalty != null) {
521
+ baseArgs.presence_penalty = void 0;
522
+ warnings.push({
523
+ type: "unsupported-setting",
524
+ setting: "presencePenalty",
525
+ details: "presencePenalty is not supported for reasoning models"
526
+ });
527
+ }
528
+ if (baseArgs.logit_bias != null) {
529
+ baseArgs.logit_bias = void 0;
530
+ warnings.push({
531
+ type: "other",
532
+ message: "logitBias is not supported for reasoning models"
533
+ });
534
+ }
535
+ if (baseArgs.logprobs != null) {
536
+ baseArgs.logprobs = void 0;
537
+ warnings.push({
538
+ type: "other",
539
+ message: "logprobs is not supported for reasoning models"
540
+ });
541
+ }
542
+ if (baseArgs.top_logprobs != null) {
543
+ baseArgs.top_logprobs = void 0;
544
+ warnings.push({
545
+ type: "other",
546
+ message: "topLogprobs is not supported for reasoning models"
547
+ });
548
+ }
549
+ if (baseArgs.max_tokens != null) {
550
+ if (baseArgs.max_completion_tokens == null) {
551
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
552
+ }
553
+ baseArgs.max_tokens = void 0;
554
+ }
555
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
556
+ if (baseArgs.temperature != null) {
557
+ baseArgs.temperature = void 0;
558
+ warnings.push({
559
+ type: "unsupported-setting",
560
+ setting: "temperature",
561
+ details: "temperature is not supported for the search preview models and has been removed."
562
+ });
563
+ }
564
+ }
565
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
566
+ warnings.push({
567
+ type: "unsupported-setting",
568
+ setting: "serviceTier",
569
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
570
+ });
571
+ baseArgs.service_tier = void 0;
572
+ }
573
+ if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
574
+ warnings.push({
575
+ type: "unsupported-setting",
576
+ setting: "serviceTier",
577
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
578
+ });
579
+ baseArgs.service_tier = void 0;
580
+ }
581
+ const {
582
+ tools: openaiTools,
583
+ toolChoice: openaiToolChoice,
584
+ toolWarnings
585
+ } = prepareChatTools({
586
+ tools,
587
+ toolChoice,
588
+ structuredOutputs,
589
+ strictJsonSchema
590
+ });
591
+ return {
592
+ args: {
593
+ ...baseArgs,
594
+ tools: openaiTools,
595
+ tool_choice: openaiToolChoice
596
+ },
597
+ warnings: [...warnings, ...toolWarnings]
598
+ };
599
+ }
600
+ async doGenerate(options) {
601
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
602
+ const { args: body, warnings } = await this.getArgs(options);
603
+ const {
604
+ responseHeaders,
605
+ value: response,
606
+ rawValue: rawResponse
607
+ } = await postJsonToApi({
608
+ url: this.config.url({
609
+ path: "/chat/completions",
610
+ modelId: this.modelId
611
+ }),
612
+ headers: combineHeaders(this.config.headers(), options.headers),
613
+ body,
614
+ failedResponseHandler: openaiFailedResponseHandler,
615
+ successfulResponseHandler: createJsonResponseHandler(
616
+ openaiChatResponseSchema
617
+ ),
618
+ abortSignal: options.abortSignal,
619
+ fetch: this.config.fetch
620
+ });
621
+ const choice = response.choices[0];
622
+ const content = [];
623
+ const text = choice.message.content;
624
+ if (text != null && text.length > 0) {
625
+ content.push({ type: "text", text });
626
+ }
627
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
628
+ content.push({
629
+ type: "tool-call",
630
+ toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
631
+ toolName: toolCall.function.name,
632
+ input: toolCall.function.arguments
633
+ });
634
+ }
635
+ for (const annotation of (_c = choice.message.annotations) != null ? _c : []) {
636
+ content.push({
637
+ type: "source",
638
+ sourceType: "url",
639
+ id: generateId(),
640
+ url: annotation.url,
641
+ title: annotation.title
642
+ });
643
+ }
644
+ const completionTokenDetails = (_d = response.usage) == null ? void 0 : _d.completion_tokens_details;
645
+ const promptTokenDetails = (_e = response.usage) == null ? void 0 : _e.prompt_tokens_details;
646
+ const providerMetadata = { openai: {} };
647
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
648
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
649
+ }
650
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
651
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
652
+ }
653
+ if (((_f = choice.logprobs) == null ? void 0 : _f.content) != null) {
654
+ providerMetadata.openai.logprobs = choice.logprobs.content;
655
+ }
656
+ return {
657
+ content,
658
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
659
+ usage: {
660
+ inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
661
+ outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
662
+ totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
663
+ reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
664
+ cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
665
+ },
666
+ request: { body },
667
+ response: {
668
+ ...getResponseMetadata(response),
669
+ headers: responseHeaders,
670
+ body: rawResponse
671
+ },
672
+ warnings,
673
+ providerMetadata
674
+ };
675
+ }
676
+ async doStream(options) {
677
+ const { args, warnings } = await this.getArgs(options);
678
+ const body = {
679
+ ...args,
680
+ stream: true,
681
+ stream_options: {
682
+ include_usage: true
683
+ }
684
+ };
685
+ const { responseHeaders, value: response } = await postJsonToApi({
686
+ url: this.config.url({
687
+ path: "/chat/completions",
688
+ modelId: this.modelId
689
+ }),
690
+ headers: combineHeaders(this.config.headers(), options.headers),
691
+ body,
692
+ failedResponseHandler: openaiFailedResponseHandler,
693
+ successfulResponseHandler: createEventSourceResponseHandler(
694
+ openaiChatChunkSchema
695
+ ),
696
+ abortSignal: options.abortSignal,
697
+ fetch: this.config.fetch
698
+ });
699
+ const toolCalls = [];
700
+ let finishReason = "unknown";
701
+ const usage = {
702
+ inputTokens: void 0,
703
+ outputTokens: void 0,
704
+ totalTokens: void 0
705
+ };
706
+ let isFirstChunk = true;
707
+ let isActiveText = false;
708
+ const providerMetadata = { openai: {} };
709
+ return {
710
+ stream: response.pipeThrough(
711
+ new TransformStream({
712
+ start(controller) {
713
+ controller.enqueue({ type: "stream-start", warnings });
714
+ },
715
+ transform(chunk, controller) {
716
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
717
+ if (options.includeRawChunks) {
718
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
719
+ }
720
+ if (!chunk.success) {
721
+ finishReason = "error";
722
+ controller.enqueue({ type: "error", error: chunk.error });
723
+ return;
724
+ }
725
+ const value = chunk.value;
726
+ if ("error" in value) {
727
+ finishReason = "error";
728
+ controller.enqueue({ type: "error", error: value.error });
729
+ return;
730
+ }
731
+ if (isFirstChunk) {
732
+ isFirstChunk = false;
733
+ controller.enqueue({
734
+ type: "response-metadata",
735
+ ...getResponseMetadata(value)
736
+ });
737
+ }
738
+ if (value.usage != null) {
739
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
740
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
741
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
742
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
743
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
744
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
745
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
746
+ }
747
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
748
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
749
+ }
750
+ }
751
+ const choice = value.choices[0];
752
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
753
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
754
+ }
755
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
756
+ providerMetadata.openai.logprobs = choice.logprobs.content;
757
+ }
758
+ if ((choice == null ? void 0 : choice.delta) == null) {
759
+ return;
760
+ }
761
+ const delta = choice.delta;
762
+ if (delta.content != null) {
763
+ if (!isActiveText) {
764
+ controller.enqueue({ type: "text-start", id: "0" });
765
+ isActiveText = true;
766
+ }
767
+ controller.enqueue({
768
+ type: "text-delta",
769
+ id: "0",
770
+ delta: delta.content
771
+ });
772
+ }
773
+ if (delta.tool_calls != null) {
774
+ for (const toolCallDelta of delta.tool_calls) {
775
+ const index = toolCallDelta.index;
776
+ if (toolCalls[index] == null) {
777
+ if (toolCallDelta.type !== "function") {
778
+ throw new InvalidResponseDataError({
779
+ data: toolCallDelta,
780
+ message: `Expected 'function' type.`
781
+ });
782
+ }
783
+ if (toolCallDelta.id == null) {
784
+ throw new InvalidResponseDataError({
785
+ data: toolCallDelta,
786
+ message: `Expected 'id' to be a string.`
787
+ });
788
+ }
789
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
790
+ throw new InvalidResponseDataError({
791
+ data: toolCallDelta,
792
+ message: `Expected 'function.name' to be a string.`
793
+ });
794
+ }
795
+ controller.enqueue({
796
+ type: "tool-input-start",
797
+ id: toolCallDelta.id,
798
+ toolName: toolCallDelta.function.name
799
+ });
800
+ toolCalls[index] = {
801
+ id: toolCallDelta.id,
802
+ type: "function",
803
+ function: {
804
+ name: toolCallDelta.function.name,
805
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
806
+ },
807
+ hasFinished: false
808
+ };
809
+ const toolCall2 = toolCalls[index];
810
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
811
+ if (toolCall2.function.arguments.length > 0) {
812
+ controller.enqueue({
813
+ type: "tool-input-delta",
814
+ id: toolCall2.id,
815
+ delta: toolCall2.function.arguments
816
+ });
817
+ }
818
+ if (isParsableJson(toolCall2.function.arguments)) {
819
+ controller.enqueue({
820
+ type: "tool-input-end",
821
+ id: toolCall2.id
822
+ });
823
+ controller.enqueue({
824
+ type: "tool-call",
825
+ toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
826
+ toolName: toolCall2.function.name,
827
+ input: toolCall2.function.arguments
828
+ });
829
+ toolCall2.hasFinished = true;
830
+ }
831
+ }
832
+ continue;
833
+ }
834
+ const toolCall = toolCalls[index];
835
+ if (toolCall.hasFinished) {
836
+ continue;
837
+ }
838
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
839
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
840
+ }
841
+ controller.enqueue({
842
+ type: "tool-input-delta",
843
+ id: toolCall.id,
844
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
845
+ });
846
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
847
+ controller.enqueue({
848
+ type: "tool-input-end",
849
+ id: toolCall.id
850
+ });
851
+ controller.enqueue({
852
+ type: "tool-call",
853
+ toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
854
+ toolName: toolCall.function.name,
855
+ input: toolCall.function.arguments
856
+ });
857
+ toolCall.hasFinished = true;
858
+ }
859
+ }
860
+ }
861
+ if (delta.annotations != null) {
862
+ for (const annotation of delta.annotations) {
863
+ controller.enqueue({
864
+ type: "source",
865
+ sourceType: "url",
866
+ id: generateId(),
867
+ url: annotation.url,
868
+ title: annotation.title
869
+ });
870
+ }
871
+ }
872
+ },
873
+ flush(controller) {
874
+ if (isActiveText) {
875
+ controller.enqueue({ type: "text-end", id: "0" });
876
+ }
877
+ controller.enqueue({
878
+ type: "finish",
879
+ finishReason,
880
+ usage,
881
+ ...providerMetadata != null ? { providerMetadata } : {}
882
+ });
883
+ }
884
+ })
885
+ ),
886
+ request: { body },
887
+ response: { headers: responseHeaders }
888
+ };
889
+ }
890
+ };
891
+ var openaiTokenUsageSchema = z3.object({
892
+ prompt_tokens: z3.number().nullish(),
893
+ completion_tokens: z3.number().nullish(),
894
+ total_tokens: z3.number().nullish(),
895
+ prompt_tokens_details: z3.object({
896
+ cached_tokens: z3.number().nullish()
897
+ }).nullish(),
898
+ completion_tokens_details: z3.object({
899
+ reasoning_tokens: z3.number().nullish(),
900
+ accepted_prediction_tokens: z3.number().nullish(),
901
+ rejected_prediction_tokens: z3.number().nullish()
902
+ }).nullish()
903
+ }).nullish();
904
+ var openaiChatResponseSchema = z3.object({
905
+ id: z3.string().nullish(),
906
+ created: z3.number().nullish(),
907
+ model: z3.string().nullish(),
908
+ choices: z3.array(
909
+ z3.object({
910
+ message: z3.object({
911
+ role: z3.literal("assistant").nullish(),
912
+ content: z3.string().nullish(),
913
+ tool_calls: z3.array(
914
+ z3.object({
915
+ id: z3.string().nullish(),
916
+ type: z3.literal("function"),
917
+ function: z3.object({
918
+ name: z3.string(),
919
+ arguments: z3.string()
920
+ })
921
+ })
922
+ ).nullish(),
923
+ annotations: z3.array(
924
+ z3.object({
925
+ type: z3.literal("url_citation"),
926
+ start_index: z3.number(),
927
+ end_index: z3.number(),
928
+ url: z3.string(),
929
+ title: z3.string()
930
+ })
931
+ ).nullish()
932
+ }),
933
+ index: z3.number(),
934
+ logprobs: z3.object({
935
+ content: z3.array(
936
+ z3.object({
937
+ token: z3.string(),
938
+ logprob: z3.number(),
939
+ top_logprobs: z3.array(
940
+ z3.object({
941
+ token: z3.string(),
942
+ logprob: z3.number()
943
+ })
944
+ )
945
+ })
946
+ ).nullish()
947
+ }).nullish(),
948
+ finish_reason: z3.string().nullish()
949
+ })
950
+ ),
951
+ usage: openaiTokenUsageSchema
952
+ });
953
+ var openaiChatChunkSchema = z3.union([
954
+ z3.object({
955
+ id: z3.string().nullish(),
956
+ created: z3.number().nullish(),
957
+ model: z3.string().nullish(),
958
+ choices: z3.array(
959
+ z3.object({
960
+ delta: z3.object({
961
+ role: z3.enum(["assistant"]).nullish(),
962
+ content: z3.string().nullish(),
963
+ tool_calls: z3.array(
964
+ z3.object({
965
+ index: z3.number(),
966
+ id: z3.string().nullish(),
967
+ type: z3.literal("function").nullish(),
968
+ function: z3.object({
969
+ name: z3.string().nullish(),
970
+ arguments: z3.string().nullish()
971
+ })
972
+ })
973
+ ).nullish(),
974
+ annotations: z3.array(
975
+ z3.object({
976
+ type: z3.literal("url_citation"),
977
+ start_index: z3.number(),
978
+ end_index: z3.number(),
979
+ url: z3.string(),
980
+ title: z3.string()
981
+ })
982
+ ).nullish()
983
+ }).nullish(),
984
+ logprobs: z3.object({
985
+ content: z3.array(
986
+ z3.object({
987
+ token: z3.string(),
988
+ logprob: z3.number(),
989
+ top_logprobs: z3.array(
990
+ z3.object({
991
+ token: z3.string(),
992
+ logprob: z3.number()
993
+ })
994
+ )
995
+ })
996
+ ).nullish()
997
+ }).nullish(),
998
+ finish_reason: z3.string().nullish(),
999
+ index: z3.number()
1000
+ })
1001
+ ),
1002
+ usage: openaiTokenUsageSchema
1003
+ }),
1004
+ openaiErrorDataSchema
1005
+ ]);
1006
+ function isReasoningModel(modelId) {
1007
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1008
+ }
1009
+ function supportsFlexProcessing(modelId) {
1010
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1011
+ }
1012
+ function supportsPriorityProcessing(modelId) {
1013
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1014
+ }
1015
+ function getSystemMessageMode(modelId) {
1016
+ var _a, _b;
1017
+ if (!isReasoningModel(modelId)) {
1018
+ return "system";
1019
+ }
1020
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1021
+ }
1022
+ var reasoningModels = {
1023
+ "o1-mini": {
1024
+ systemMessageMode: "remove"
1025
+ },
1026
+ "o1-mini-2024-09-12": {
1027
+ systemMessageMode: "remove"
1028
+ },
1029
+ "o1-preview": {
1030
+ systemMessageMode: "remove"
1031
+ },
1032
+ "o1-preview-2024-09-12": {
1033
+ systemMessageMode: "remove"
1034
+ },
1035
+ o3: {
1036
+ systemMessageMode: "developer"
1037
+ },
1038
+ "o3-2025-04-16": {
1039
+ systemMessageMode: "developer"
1040
+ },
1041
+ "o3-mini": {
1042
+ systemMessageMode: "developer"
1043
+ },
1044
+ "o3-mini-2025-01-31": {
1045
+ systemMessageMode: "developer"
1046
+ },
1047
+ "o4-mini": {
1048
+ systemMessageMode: "developer"
1049
+ },
1050
+ "o4-mini-2025-04-16": {
1051
+ systemMessageMode: "developer"
1052
+ }
1053
+ };
1054
+
1055
+ // src/completion/openai-completion-language-model.ts
1056
+ import {
1057
+ combineHeaders as combineHeaders2,
1058
+ createEventSourceResponseHandler as createEventSourceResponseHandler2,
1059
+ createJsonResponseHandler as createJsonResponseHandler2,
1060
+ parseProviderOptions as parseProviderOptions2,
1061
+ postJsonToApi as postJsonToApi2
1062
+ } from "@ai-sdk/provider-utils";
1063
+ import { z as z5 } from "zod/v4";
1064
+
1065
+ // src/completion/convert-to-openai-completion-prompt.ts
1066
+ import {
1067
+ InvalidPromptError,
1068
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError3
1069
+ } from "@zenning/provider";
1070
+ function convertToOpenAICompletionPrompt({
1071
+ prompt,
1072
+ user = "user",
1073
+ assistant = "assistant"
1074
+ }) {
1075
+ let text = "";
1076
+ if (prompt[0].role === "system") {
1077
+ text += `${prompt[0].content}
1078
+
1079
+ `;
1080
+ prompt = prompt.slice(1);
1081
+ }
1082
+ for (const { role, content } of prompt) {
1083
+ switch (role) {
1084
+ case "system": {
1085
+ throw new InvalidPromptError({
1086
+ message: "Unexpected system message in prompt: ${content}",
1087
+ prompt
1088
+ });
1089
+ }
1090
+ case "user": {
1091
+ const userMessage = content.map((part) => {
1092
+ switch (part.type) {
1093
+ case "text": {
1094
+ return part.text;
1095
+ }
1096
+ }
1097
+ }).filter(Boolean).join("");
1098
+ text += `${user}:
1099
+ ${userMessage}
1100
+
1101
+ `;
1102
+ break;
1103
+ }
1104
+ case "assistant": {
1105
+ const assistantMessage = content.map((part) => {
1106
+ switch (part.type) {
1107
+ case "text": {
1108
+ return part.text;
1109
+ }
1110
+ case "tool-call": {
1111
+ throw new UnsupportedFunctionalityError3({
1112
+ functionality: "tool-call messages"
1113
+ });
1114
+ }
1115
+ }
1116
+ }).join("");
1117
+ text += `${assistant}:
1118
+ ${assistantMessage}
1119
+
1120
+ `;
1121
+ break;
1122
+ }
1123
+ case "tool": {
1124
+ throw new UnsupportedFunctionalityError3({
1125
+ functionality: "tool messages"
1126
+ });
1127
+ }
1128
+ default: {
1129
+ const _exhaustiveCheck = role;
1130
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1131
+ }
1132
+ }
1133
+ }
1134
+ text += `${assistant}:
1135
+ `;
1136
+ return {
1137
+ prompt: text,
1138
+ stopSequences: [`
1139
+ ${user}:`]
1140
+ };
1141
+ }
1142
+
1143
+ // src/completion/get-response-metadata.ts
1144
+ function getResponseMetadata2({
1145
+ id,
1146
+ model,
1147
+ created
1148
+ }) {
1149
+ return {
1150
+ id: id != null ? id : void 0,
1151
+ modelId: model != null ? model : void 0,
1152
+ timestamp: created != null ? new Date(created * 1e3) : void 0
1153
+ };
1154
+ }
1155
+
1156
+ // src/completion/map-openai-finish-reason.ts
1157
+ function mapOpenAIFinishReason2(finishReason) {
1158
+ switch (finishReason) {
1159
+ case "stop":
1160
+ return "stop";
1161
+ case "length":
1162
+ return "length";
1163
+ case "content_filter":
1164
+ return "content-filter";
1165
+ case "function_call":
1166
+ case "tool_calls":
1167
+ return "tool-calls";
1168
+ default:
1169
+ return "unknown";
1170
+ }
1171
+ }
1172
+
1173
+ // src/completion/openai-completion-options.ts
1174
+ import { z as z4 } from "zod/v4";
1175
+ var openaiCompletionProviderOptions = z4.object({
1176
+ /**
1177
+ Echo back the prompt in addition to the completion.
1178
+ */
1179
+ echo: z4.boolean().optional(),
1180
+ /**
1181
+ Modify the likelihood of specified tokens appearing in the completion.
1182
+
1183
+ Accepts a JSON object that maps tokens (specified by their token ID in
1184
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1185
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1186
+ the bias is added to the logits generated by the model prior to sampling.
1187
+ The exact effect will vary per model, but values between -1 and 1 should
1188
+ decrease or increase likelihood of selection; values like -100 or 100
1189
+ should result in a ban or exclusive selection of the relevant token.
1190
+
1191
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1192
+ token from being generated.
1193
+ */
1194
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
1195
+ /**
1196
+ The suffix that comes after a completion of inserted text.
1197
+ */
1198
+ suffix: z4.string().optional(),
1199
+ /**
1200
+ A unique identifier representing your end-user, which can help OpenAI to
1201
+ monitor and detect abuse. Learn more.
1202
+ */
1203
+ user: z4.string().optional(),
1204
+ /**
1205
+ Return the log probabilities of the tokens. Including logprobs will increase
1206
+ the response size and can slow down response times. However, it can
1207
+ be useful to better understand how the model is behaving.
1208
+ Setting to true will return the log probabilities of the tokens that
1209
+ were generated.
1210
+ Setting to a number will return the log probabilities of the top n
1211
+ tokens that were generated.
1212
+ */
1213
+ logprobs: z4.union([z4.boolean(), z4.number()]).optional()
1214
+ });
1215
+
1216
+ // src/completion/openai-completion-language-model.ts
1217
+ var OpenAICompletionLanguageModel = class {
1218
+ constructor(modelId, config) {
1219
+ this.specificationVersion = "v2";
1220
+ this.supportedUrls = {
1221
+ // No URLs are supported for completion models.
1222
+ };
1223
+ this.modelId = modelId;
1224
+ this.config = config;
1225
+ }
1226
+ get providerOptionsName() {
1227
+ return this.config.provider.split(".")[0].trim();
1228
+ }
1229
+ get provider() {
1230
+ return this.config.provider;
1231
+ }
1232
+ async getArgs({
1233
+ prompt,
1234
+ maxOutputTokens,
1235
+ temperature,
1236
+ topP,
1237
+ topK,
1238
+ frequencyPenalty,
1239
+ presencePenalty,
1240
+ stopSequences: userStopSequences,
1241
+ responseFormat,
1242
+ tools,
1243
+ toolChoice,
1244
+ seed,
1245
+ providerOptions
1246
+ }) {
1247
+ const warnings = [];
1248
+ const openaiOptions = {
1249
+ ...await parseProviderOptions2({
1250
+ provider: "openai",
1251
+ providerOptions,
1252
+ schema: openaiCompletionProviderOptions
1253
+ }),
1254
+ ...await parseProviderOptions2({
1255
+ provider: this.providerOptionsName,
1256
+ providerOptions,
1257
+ schema: openaiCompletionProviderOptions
1258
+ })
1259
+ };
1260
+ if (topK != null) {
1261
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1262
+ }
1263
+ if (tools == null ? void 0 : tools.length) {
1264
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1265
+ }
1266
+ if (toolChoice != null) {
1267
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1268
+ }
1269
+ if (responseFormat != null && responseFormat.type !== "text") {
1270
+ warnings.push({
1271
+ type: "unsupported-setting",
1272
+ setting: "responseFormat",
1273
+ details: "JSON response format is not supported."
1274
+ });
1275
+ }
1276
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1277
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1278
+ return {
1279
+ args: {
1280
+ // model id:
1281
+ model: this.modelId,
1282
+ // model specific settings:
1283
+ echo: openaiOptions.echo,
1284
+ logit_bias: openaiOptions.logitBias,
1285
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1286
+ suffix: openaiOptions.suffix,
1287
+ user: openaiOptions.user,
1288
+ // standardized settings:
1289
+ max_tokens: maxOutputTokens,
1290
+ temperature,
1291
+ top_p: topP,
1292
+ frequency_penalty: frequencyPenalty,
1293
+ presence_penalty: presencePenalty,
1294
+ seed,
1295
+ // prompt:
1296
+ prompt: completionPrompt,
1297
+ // stop sequences:
1298
+ stop: stop.length > 0 ? stop : void 0
1299
+ },
1300
+ warnings
1301
+ };
1302
+ }
1303
+ async doGenerate(options) {
1304
+ var _a, _b, _c;
1305
+ const { args, warnings } = await this.getArgs(options);
1306
+ const {
1307
+ responseHeaders,
1308
+ value: response,
1309
+ rawValue: rawResponse
1310
+ } = await postJsonToApi2({
1311
+ url: this.config.url({
1312
+ path: "/completions",
1313
+ modelId: this.modelId
1314
+ }),
1315
+ headers: combineHeaders2(this.config.headers(), options.headers),
1316
+ body: args,
1317
+ failedResponseHandler: openaiFailedResponseHandler,
1318
+ successfulResponseHandler: createJsonResponseHandler2(
1319
+ openaiCompletionResponseSchema
1320
+ ),
1321
+ abortSignal: options.abortSignal,
1322
+ fetch: this.config.fetch
1323
+ });
1324
+ const choice = response.choices[0];
1325
+ const providerMetadata = { openai: {} };
1326
+ if (choice.logprobs != null) {
1327
+ providerMetadata.openai.logprobs = choice.logprobs;
1328
+ }
1329
+ return {
1330
+ content: [{ type: "text", text: choice.text }],
1331
+ usage: {
1332
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1333
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1334
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1335
+ },
1336
+ finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1337
+ request: { body: args },
1338
+ response: {
1339
+ ...getResponseMetadata2(response),
1340
+ headers: responseHeaders,
1341
+ body: rawResponse
1342
+ },
1343
+ providerMetadata,
1344
+ warnings
1345
+ };
1346
+ }
1347
+ async doStream(options) {
1348
+ const { args, warnings } = await this.getArgs(options);
1349
+ const body = {
1350
+ ...args,
1351
+ stream: true,
1352
+ stream_options: {
1353
+ include_usage: true
1354
+ }
1355
+ };
1356
+ const { responseHeaders, value: response } = await postJsonToApi2({
1357
+ url: this.config.url({
1358
+ path: "/completions",
1359
+ modelId: this.modelId
1360
+ }),
1361
+ headers: combineHeaders2(this.config.headers(), options.headers),
1362
+ body,
1363
+ failedResponseHandler: openaiFailedResponseHandler,
1364
+ successfulResponseHandler: createEventSourceResponseHandler2(
1365
+ openaiCompletionChunkSchema
1366
+ ),
1367
+ abortSignal: options.abortSignal,
1368
+ fetch: this.config.fetch
1369
+ });
1370
+ let finishReason = "unknown";
1371
+ const providerMetadata = { openai: {} };
1372
+ const usage = {
1373
+ inputTokens: void 0,
1374
+ outputTokens: void 0,
1375
+ totalTokens: void 0
1376
+ };
1377
+ let isFirstChunk = true;
1378
+ return {
1379
+ stream: response.pipeThrough(
1380
+ new TransformStream({
1381
+ start(controller) {
1382
+ controller.enqueue({ type: "stream-start", warnings });
1383
+ },
1384
+ transform(chunk, controller) {
1385
+ if (options.includeRawChunks) {
1386
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1387
+ }
1388
+ if (!chunk.success) {
1389
+ finishReason = "error";
1390
+ controller.enqueue({ type: "error", error: chunk.error });
1391
+ return;
1392
+ }
1393
+ const value = chunk.value;
1394
+ if ("error" in value) {
1395
+ finishReason = "error";
1396
+ controller.enqueue({ type: "error", error: value.error });
1397
+ return;
1398
+ }
1399
+ if (isFirstChunk) {
1400
+ isFirstChunk = false;
1401
+ controller.enqueue({
1402
+ type: "response-metadata",
1403
+ ...getResponseMetadata2(value)
1404
+ });
1405
+ controller.enqueue({ type: "text-start", id: "0" });
1406
+ }
1407
+ if (value.usage != null) {
1408
+ usage.inputTokens = value.usage.prompt_tokens;
1409
+ usage.outputTokens = value.usage.completion_tokens;
1410
+ usage.totalTokens = value.usage.total_tokens;
1411
+ }
1412
+ const choice = value.choices[0];
1413
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1414
+ finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1415
+ }
1416
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1417
+ providerMetadata.openai.logprobs = choice.logprobs;
1418
+ }
1419
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1420
+ controller.enqueue({
1421
+ type: "text-delta",
1422
+ id: "0",
1423
+ delta: choice.text
1424
+ });
1425
+ }
1426
+ },
1427
+ flush(controller) {
1428
+ if (!isFirstChunk) {
1429
+ controller.enqueue({ type: "text-end", id: "0" });
1430
+ }
1431
+ controller.enqueue({
1432
+ type: "finish",
1433
+ finishReason,
1434
+ providerMetadata,
1435
+ usage
1436
+ });
1437
+ }
1438
+ })
1439
+ ),
1440
+ request: { body },
1441
+ response: { headers: responseHeaders }
1442
+ };
1443
+ }
1444
+ };
1445
+ var usageSchema = z5.object({
1446
+ prompt_tokens: z5.number(),
1447
+ completion_tokens: z5.number(),
1448
+ total_tokens: z5.number()
1449
+ });
1450
+ var openaiCompletionResponseSchema = z5.object({
1451
+ id: z5.string().nullish(),
1452
+ created: z5.number().nullish(),
1453
+ model: z5.string().nullish(),
1454
+ choices: z5.array(
1455
+ z5.object({
1456
+ text: z5.string(),
1457
+ finish_reason: z5.string(),
1458
+ logprobs: z5.object({
1459
+ tokens: z5.array(z5.string()),
1460
+ token_logprobs: z5.array(z5.number()),
1461
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1462
+ }).nullish()
1463
+ })
1464
+ ),
1465
+ usage: usageSchema.nullish()
1466
+ });
1467
+ var openaiCompletionChunkSchema = z5.union([
1468
+ z5.object({
1469
+ id: z5.string().nullish(),
1470
+ created: z5.number().nullish(),
1471
+ model: z5.string().nullish(),
1472
+ choices: z5.array(
1473
+ z5.object({
1474
+ text: z5.string(),
1475
+ finish_reason: z5.string().nullish(),
1476
+ index: z5.number(),
1477
+ logprobs: z5.object({
1478
+ tokens: z5.array(z5.string()),
1479
+ token_logprobs: z5.array(z5.number()),
1480
+ top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1481
+ }).nullish()
1482
+ })
1483
+ ),
1484
+ usage: usageSchema.nullish()
1485
+ }),
1486
+ openaiErrorDataSchema
1487
+ ]);
1488
+
1489
+ // src/embedding/openai-embedding-model.ts
1490
+ import {
1491
+ TooManyEmbeddingValuesForCallError
1492
+ } from "@zenning/provider";
1493
+ import {
1494
+ combineHeaders as combineHeaders3,
1495
+ createJsonResponseHandler as createJsonResponseHandler3,
1496
+ parseProviderOptions as parseProviderOptions3,
1497
+ postJsonToApi as postJsonToApi3
1498
+ } from "@ai-sdk/provider-utils";
1499
+ import { z as z7 } from "zod/v4";
1500
+
1501
+ // src/embedding/openai-embedding-options.ts
1502
+ import { z as z6 } from "zod/v4";
1503
+ var openaiEmbeddingProviderOptions = z6.object({
1504
+ /**
1505
+ The number of dimensions the resulting output embeddings should have.
1506
+ Only supported in text-embedding-3 and later models.
1507
+ */
1508
+ dimensions: z6.number().optional(),
1509
+ /**
1510
+ A unique identifier representing your end-user, which can help OpenAI to
1511
+ monitor and detect abuse. Learn more.
1512
+ */
1513
+ user: z6.string().optional()
1514
+ });
1515
+
1516
+ // src/embedding/openai-embedding-model.ts
1517
+ var OpenAIEmbeddingModel = class {
1518
+ constructor(modelId, config) {
1519
+ this.specificationVersion = "v2";
1520
+ this.maxEmbeddingsPerCall = 2048;
1521
+ this.supportsParallelCalls = true;
1522
+ this.modelId = modelId;
1523
+ this.config = config;
1524
+ }
1525
+ get provider() {
1526
+ return this.config.provider;
1527
+ }
1528
+ async doEmbed({
1529
+ values,
1530
+ headers,
1531
+ abortSignal,
1532
+ providerOptions
1533
+ }) {
1534
+ var _a;
1535
+ if (values.length > this.maxEmbeddingsPerCall) {
1536
+ throw new TooManyEmbeddingValuesForCallError({
1537
+ provider: this.provider,
1538
+ modelId: this.modelId,
1539
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1540
+ values
1541
+ });
1542
+ }
1543
+ const openaiOptions = (_a = await parseProviderOptions3({
1544
+ provider: "openai",
1545
+ providerOptions,
1546
+ schema: openaiEmbeddingProviderOptions
1547
+ })) != null ? _a : {};
1548
+ const {
1549
+ responseHeaders,
1550
+ value: response,
1551
+ rawValue
1552
+ } = await postJsonToApi3({
1553
+ url: this.config.url({
1554
+ path: "/embeddings",
1555
+ modelId: this.modelId
1556
+ }),
1557
+ headers: combineHeaders3(this.config.headers(), headers),
1558
+ body: {
1559
+ model: this.modelId,
1560
+ input: values,
1561
+ encoding_format: "float",
1562
+ dimensions: openaiOptions.dimensions,
1563
+ user: openaiOptions.user
1564
+ },
1565
+ failedResponseHandler: openaiFailedResponseHandler,
1566
+ successfulResponseHandler: createJsonResponseHandler3(
1567
+ openaiTextEmbeddingResponseSchema
1568
+ ),
1569
+ abortSignal,
1570
+ fetch: this.config.fetch
1571
+ });
1572
+ return {
1573
+ embeddings: response.data.map((item) => item.embedding),
1574
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1575
+ response: { headers: responseHeaders, body: rawValue }
1576
+ };
1577
+ }
1578
+ };
1579
+ var openaiTextEmbeddingResponseSchema = z7.object({
1580
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1581
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1582
+ });
1583
+
1584
+ // src/image/openai-image-model.ts
1585
+ import {
1586
+ combineHeaders as combineHeaders4,
1587
+ createJsonResponseHandler as createJsonResponseHandler4,
1588
+ postJsonToApi as postJsonToApi4
1589
+ } from "@ai-sdk/provider-utils";
1590
+ import { z as z8 } from "zod/v4";
1591
+
1592
+ // src/image/openai-image-options.ts
1593
+ var modelMaxImagesPerCall = {
1594
+ "dall-e-3": 1,
1595
+ "dall-e-2": 10,
1596
+ "gpt-image-1": 10
1597
+ };
1598
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1599
+
1600
+ // src/image/openai-image-model.ts
1601
+ var OpenAIImageModel = class {
1602
+ constructor(modelId, config) {
1603
+ this.modelId = modelId;
1604
+ this.config = config;
1605
+ this.specificationVersion = "v2";
1606
+ }
1607
+ get maxImagesPerCall() {
1608
+ var _a;
1609
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1610
+ }
1611
+ get provider() {
1612
+ return this.config.provider;
1613
+ }
1614
+ async doGenerate({
1615
+ prompt,
1616
+ n,
1617
+ size,
1618
+ aspectRatio,
1619
+ seed,
1620
+ providerOptions,
1621
+ headers,
1622
+ abortSignal
1623
+ }) {
1624
+ var _a, _b, _c, _d;
1625
+ const warnings = [];
1626
+ if (aspectRatio != null) {
1627
+ warnings.push({
1628
+ type: "unsupported-setting",
1629
+ setting: "aspectRatio",
1630
+ details: "This model does not support aspect ratio. Use `size` instead."
1631
+ });
1632
+ }
1633
+ if (seed != null) {
1634
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1635
+ }
1636
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1637
+ const { value: response, responseHeaders } = await postJsonToApi4({
1638
+ url: this.config.url({
1639
+ path: "/images/generations",
1640
+ modelId: this.modelId
1641
+ }),
1642
+ headers: combineHeaders4(this.config.headers(), headers),
1643
+ body: {
1644
+ model: this.modelId,
1645
+ prompt,
1646
+ n,
1647
+ size,
1648
+ ...(_d = providerOptions.openai) != null ? _d : {},
1649
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1650
+ },
1651
+ failedResponseHandler: openaiFailedResponseHandler,
1652
+ successfulResponseHandler: createJsonResponseHandler4(
1653
+ openaiImageResponseSchema
1654
+ ),
1655
+ abortSignal,
1656
+ fetch: this.config.fetch
1657
+ });
1658
+ return {
1659
+ images: response.data.map((item) => item.b64_json),
1660
+ warnings,
1661
+ response: {
1662
+ timestamp: currentDate,
1663
+ modelId: this.modelId,
1664
+ headers: responseHeaders
1665
+ },
1666
+ providerMetadata: {
1667
+ openai: {
1668
+ images: response.data.map(
1669
+ (item) => item.revised_prompt ? {
1670
+ revisedPrompt: item.revised_prompt
1671
+ } : null
1672
+ )
1673
+ }
1674
+ }
1675
+ };
1676
+ }
1677
+ };
1678
+ var openaiImageResponseSchema = z8.object({
1679
+ data: z8.array(
1680
+ z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1681
+ )
1682
+ });
1683
+
1684
+ // src/transcription/openai-transcription-model.ts
1685
+ import {
1686
+ combineHeaders as combineHeaders5,
1687
+ convertBase64ToUint8Array,
1688
+ createJsonResponseHandler as createJsonResponseHandler5,
1689
+ mediaTypeToExtension,
1690
+ parseProviderOptions as parseProviderOptions4,
1691
+ postFormDataToApi
1692
+ } from "@ai-sdk/provider-utils";
1693
+ import { z as z10 } from "zod/v4";
1694
+
1695
+ // src/transcription/openai-transcription-options.ts
1696
+ import { z as z9 } from "zod/v4";
1697
+ var openAITranscriptionProviderOptions = z9.object({
1698
+ /**
1699
+ * Additional information to include in the transcription response.
1700
+ */
1701
+ include: z9.array(z9.string()).optional(),
1702
+ /**
1703
+ * The language of the input audio in ISO-639-1 format.
1704
+ */
1705
+ language: z9.string().optional(),
1706
+ /**
1707
+ * An optional text to guide the model's style or continue a previous audio segment.
1708
+ */
1709
+ prompt: z9.string().optional(),
1710
+ /**
1711
+ * The sampling temperature, between 0 and 1.
1712
+ * @default 0
1713
+ */
1714
+ temperature: z9.number().min(0).max(1).default(0).optional(),
1715
+ /**
1716
+ * The timestamp granularities to populate for this transcription.
1717
+ * @default ['segment']
1718
+ */
1719
+ timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
1720
+ });
1721
+
1722
+ // src/transcription/openai-transcription-model.ts
1723
+ var languageMap = {
1724
+ afrikaans: "af",
1725
+ arabic: "ar",
1726
+ armenian: "hy",
1727
+ azerbaijani: "az",
1728
+ belarusian: "be",
1729
+ bosnian: "bs",
1730
+ bulgarian: "bg",
1731
+ catalan: "ca",
1732
+ chinese: "zh",
1733
+ croatian: "hr",
1734
+ czech: "cs",
1735
+ danish: "da",
1736
+ dutch: "nl",
1737
+ english: "en",
1738
+ estonian: "et",
1739
+ finnish: "fi",
1740
+ french: "fr",
1741
+ galician: "gl",
1742
+ german: "de",
1743
+ greek: "el",
1744
+ hebrew: "he",
1745
+ hindi: "hi",
1746
+ hungarian: "hu",
1747
+ icelandic: "is",
1748
+ indonesian: "id",
1749
+ italian: "it",
1750
+ japanese: "ja",
1751
+ kannada: "kn",
1752
+ kazakh: "kk",
1753
+ korean: "ko",
1754
+ latvian: "lv",
1755
+ lithuanian: "lt",
1756
+ macedonian: "mk",
1757
+ malay: "ms",
1758
+ marathi: "mr",
1759
+ maori: "mi",
1760
+ nepali: "ne",
1761
+ norwegian: "no",
1762
+ persian: "fa",
1763
+ polish: "pl",
1764
+ portuguese: "pt",
1765
+ romanian: "ro",
1766
+ russian: "ru",
1767
+ serbian: "sr",
1768
+ slovak: "sk",
1769
+ slovenian: "sl",
1770
+ spanish: "es",
1771
+ swahili: "sw",
1772
+ swedish: "sv",
1773
+ tagalog: "tl",
1774
+ tamil: "ta",
1775
+ thai: "th",
1776
+ turkish: "tr",
1777
+ ukrainian: "uk",
1778
+ urdu: "ur",
1779
+ vietnamese: "vi",
1780
+ welsh: "cy"
1781
+ };
1782
+ var OpenAITranscriptionModel = class {
1783
+ constructor(modelId, config) {
1784
+ this.modelId = modelId;
1785
+ this.config = config;
1786
+ this.specificationVersion = "v2";
1787
+ }
1788
+ get provider() {
1789
+ return this.config.provider;
1790
+ }
1791
+ async getArgs({
1792
+ audio,
1793
+ mediaType,
1794
+ providerOptions
1795
+ }) {
1796
+ const warnings = [];
1797
+ const openAIOptions = await parseProviderOptions4({
1798
+ provider: "openai",
1799
+ providerOptions,
1800
+ schema: openAITranscriptionProviderOptions
1801
+ });
1802
+ const formData = new FormData();
1803
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
1804
+ formData.append("model", this.modelId);
1805
+ const fileExtension = mediaTypeToExtension(mediaType);
1806
+ formData.append(
1807
+ "file",
1808
+ new File([blob], "audio", { type: mediaType }),
1809
+ `audio.${fileExtension}`
1810
+ );
1811
+ if (openAIOptions) {
1812
+ const transcriptionModelOptions = {
1813
+ include: openAIOptions.include,
1814
+ language: openAIOptions.language,
1815
+ prompt: openAIOptions.prompt,
1816
+ // https://platform.openai.com/docs/api-reference/audio/createTranscription#audio_createtranscription-response_format
1817
+ // prefer verbose_json to get segments for models that support it
1818
+ response_format: [
1819
+ "gpt-4o-transcribe",
1820
+ "gpt-4o-mini-transcribe"
1821
+ ].includes(this.modelId) ? "json" : "verbose_json",
1822
+ temperature: openAIOptions.temperature,
1823
+ timestamp_granularities: openAIOptions.timestampGranularities
1824
+ };
1825
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1826
+ if (value != null) {
1827
+ if (Array.isArray(value)) {
1828
+ for (const item of value) {
1829
+ formData.append(`${key}[]`, String(item));
1830
+ }
1831
+ } else {
1832
+ formData.append(key, String(value));
1833
+ }
1834
+ }
1835
+ }
1836
+ }
1837
+ return {
1838
+ formData,
1839
+ warnings
1840
+ };
1841
+ }
1842
+ async doGenerate(options) {
1843
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1844
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1845
+ const { formData, warnings } = await this.getArgs(options);
1846
+ const {
1847
+ value: response,
1848
+ responseHeaders,
1849
+ rawValue: rawResponse
1850
+ } = await postFormDataToApi({
1851
+ url: this.config.url({
1852
+ path: "/audio/transcriptions",
1853
+ modelId: this.modelId
1854
+ }),
1855
+ headers: combineHeaders5(this.config.headers(), options.headers),
1856
+ formData,
1857
+ failedResponseHandler: openaiFailedResponseHandler,
1858
+ successfulResponseHandler: createJsonResponseHandler5(
1859
+ openaiTranscriptionResponseSchema
1860
+ ),
1861
+ abortSignal: options.abortSignal,
1862
+ fetch: this.config.fetch
1863
+ });
1864
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1865
+ return {
1866
+ text: response.text,
1867
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
1868
+ text: segment.text,
1869
+ startSecond: segment.start,
1870
+ endSecond: segment.end
1871
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
1872
+ text: word.word,
1873
+ startSecond: word.start,
1874
+ endSecond: word.end
1875
+ }))) != null ? _g : [],
1876
+ language,
1877
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
1878
+ warnings,
1879
+ response: {
1880
+ timestamp: currentDate,
1881
+ modelId: this.modelId,
1882
+ headers: responseHeaders,
1883
+ body: rawResponse
1884
+ }
1885
+ };
1886
+ }
1887
+ };
1888
+ var openaiTranscriptionResponseSchema = z10.object({
1889
+ text: z10.string(),
1890
+ language: z10.string().nullish(),
1891
+ duration: z10.number().nullish(),
1892
+ words: z10.array(
1893
+ z10.object({
1894
+ word: z10.string(),
1895
+ start: z10.number(),
1896
+ end: z10.number()
1897
+ })
1898
+ ).nullish(),
1899
+ segments: z10.array(
1900
+ z10.object({
1901
+ id: z10.number(),
1902
+ seek: z10.number(),
1903
+ start: z10.number(),
1904
+ end: z10.number(),
1905
+ text: z10.string(),
1906
+ tokens: z10.array(z10.number()),
1907
+ temperature: z10.number(),
1908
+ avg_logprob: z10.number(),
1909
+ compression_ratio: z10.number(),
1910
+ no_speech_prob: z10.number()
1911
+ })
1912
+ ).nullish()
1913
+ });
1914
+
1915
+ // src/speech/openai-speech-model.ts
1916
+ import {
1917
+ combineHeaders as combineHeaders6,
1918
+ createBinaryResponseHandler,
1919
+ parseProviderOptions as parseProviderOptions5,
1920
+ postJsonToApi as postJsonToApi5
1921
+ } from "@ai-sdk/provider-utils";
1922
+ import { z as z11 } from "zod/v4";
1923
+ var OpenAIProviderOptionsSchema = z11.object({
1924
+ instructions: z11.string().nullish(),
1925
+ speed: z11.number().min(0.25).max(4).default(1).nullish()
1926
+ });
1927
+ var OpenAISpeechModel = class {
1928
+ constructor(modelId, config) {
1929
+ this.modelId = modelId;
1930
+ this.config = config;
1931
+ this.specificationVersion = "v2";
1932
+ }
1933
+ get provider() {
1934
+ return this.config.provider;
1935
+ }
1936
+ async getArgs({
1937
+ text,
1938
+ voice = "alloy",
1939
+ outputFormat = "mp3",
1940
+ speed,
1941
+ instructions,
1942
+ language,
1943
+ providerOptions
1944
+ }) {
1945
+ const warnings = [];
1946
+ const openAIOptions = await parseProviderOptions5({
1947
+ provider: "openai",
1948
+ providerOptions,
1949
+ schema: OpenAIProviderOptionsSchema
1950
+ });
1951
+ const requestBody = {
1952
+ model: this.modelId,
1953
+ input: text,
1954
+ voice,
1955
+ response_format: "mp3",
1956
+ speed,
1957
+ instructions
1958
+ };
1959
+ if (outputFormat) {
1960
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1961
+ requestBody.response_format = outputFormat;
1962
+ } else {
1963
+ warnings.push({
1964
+ type: "unsupported-setting",
1965
+ setting: "outputFormat",
1966
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1967
+ });
1968
+ }
1969
+ }
1970
+ if (openAIOptions) {
1971
+ const speechModelOptions = {};
1972
+ for (const key in speechModelOptions) {
1973
+ const value = speechModelOptions[key];
1974
+ if (value !== void 0) {
1975
+ requestBody[key] = value;
1976
+ }
1977
+ }
1978
+ }
1979
+ if (language) {
1980
+ warnings.push({
1981
+ type: "unsupported-setting",
1982
+ setting: "language",
1983
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
1984
+ });
1985
+ }
1986
+ return {
1987
+ requestBody,
1988
+ warnings
1989
+ };
1990
+ }
1991
+ async doGenerate(options) {
1992
+ var _a, _b, _c;
1993
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1994
+ const { requestBody, warnings } = await this.getArgs(options);
1995
+ const {
1996
+ value: audio,
1997
+ responseHeaders,
1998
+ rawValue: rawResponse
1999
+ } = await postJsonToApi5({
2000
+ url: this.config.url({
2001
+ path: "/audio/speech",
2002
+ modelId: this.modelId
2003
+ }),
2004
+ headers: combineHeaders6(this.config.headers(), options.headers),
2005
+ body: requestBody,
2006
+ failedResponseHandler: openaiFailedResponseHandler,
2007
+ successfulResponseHandler: createBinaryResponseHandler(),
2008
+ abortSignal: options.abortSignal,
2009
+ fetch: this.config.fetch
2010
+ });
2011
+ return {
2012
+ audio,
2013
+ warnings,
2014
+ request: {
2015
+ body: JSON.stringify(requestBody)
2016
+ },
2017
+ response: {
2018
+ timestamp: currentDate,
2019
+ modelId: this.modelId,
2020
+ headers: responseHeaders,
2021
+ body: rawResponse
2022
+ }
2023
+ };
2024
+ }
2025
+ };
2026
+
2027
+ // src/responses/openai-responses-language-model.ts
2028
+ import {
2029
+ APICallError
2030
+ } from "@zenning/provider";
2031
+ import {
2032
+ combineHeaders as combineHeaders7,
2033
+ createEventSourceResponseHandler as createEventSourceResponseHandler3,
2034
+ createJsonResponseHandler as createJsonResponseHandler6,
2035
+ generateId as generateId2,
2036
+ parseProviderOptions as parseProviderOptions7,
2037
+ postJsonToApi as postJsonToApi6
2038
+ } from "@ai-sdk/provider-utils";
2039
+ import { z as z18 } from "zod/v4";
2040
+
2041
+ // src/responses/convert-to-openai-responses-input.ts
2042
+ import {
2043
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError4
2044
+ } from "@zenning/provider";
2045
+ import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
2046
+ import { z as z12 } from "zod/v4";
2047
+ function isFileId(data, prefixes) {
2048
+ if (!prefixes) return false;
2049
+ return prefixes.some((prefix) => data.startsWith(prefix));
2050
+ }
2051
+ async function convertToOpenAIResponsesInput({
2052
+ prompt,
2053
+ systemMessageMode,
2054
+ fileIdPrefixes,
2055
+ store
2056
+ }) {
2057
+ var _a, _b, _c, _d, _e, _f;
2058
+ const input = [];
2059
+ const warnings = [];
2060
+ for (const { role, content } of prompt) {
2061
+ switch (role) {
2062
+ case "system": {
2063
+ switch (systemMessageMode) {
2064
+ case "system": {
2065
+ input.push({ role: "system", content });
2066
+ break;
2067
+ }
2068
+ case "developer": {
2069
+ input.push({ role: "developer", content });
2070
+ break;
2071
+ }
2072
+ case "remove": {
2073
+ warnings.push({
2074
+ type: "other",
2075
+ message: "system messages are removed for this model"
2076
+ });
2077
+ break;
2078
+ }
2079
+ default: {
2080
+ const _exhaustiveCheck = systemMessageMode;
2081
+ throw new Error(
2082
+ `Unsupported system message mode: ${_exhaustiveCheck}`
2083
+ );
2084
+ }
2085
+ }
2086
+ break;
2087
+ }
2088
+ case "user": {
2089
+ input.push({
2090
+ role: "user",
2091
+ content: content.map((part, index) => {
2092
+ var _a2, _b2, _c2;
2093
+ switch (part.type) {
2094
+ case "text": {
2095
+ return { type: "input_text", text: part.text };
2096
+ }
2097
+ case "file": {
2098
+ if (part.mediaType.startsWith("image/")) {
2099
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
2100
+ return {
2101
+ type: "input_image",
2102
+ ...part.data instanceof URL ? { image_url: part.data.toString() } : typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2103
+ image_url: `data:${mediaType};base64,${convertToBase642(part.data)}`
2104
+ },
2105
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
2106
+ };
2107
+ } else if (part.mediaType === "application/pdf") {
2108
+ if (part.data instanceof URL) {
2109
+ return {
2110
+ type: "input_file",
2111
+ file_url: part.data.toString()
2112
+ };
2113
+ }
2114
+ return {
2115
+ type: "input_file",
2116
+ ...typeof part.data === "string" && isFileId(part.data, fileIdPrefixes) ? { file_id: part.data } : {
2117
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
2118
+ file_data: `data:application/pdf;base64,${convertToBase642(part.data)}`
2119
+ }
2120
+ };
2121
+ } else {
2122
+ throw new UnsupportedFunctionalityError4({
2123
+ functionality: `file part media type ${part.mediaType}`
2124
+ });
2125
+ }
2126
+ }
2127
+ }
2128
+ })
2129
+ });
2130
+ break;
2131
+ }
2132
+ case "assistant": {
2133
+ const reasoningMessages = {};
2134
+ const toolCallParts = {};
2135
+ for (const part of content) {
2136
+ switch (part.type) {
2137
+ case "text": {
2138
+ input.push({
2139
+ role: "assistant",
2140
+ content: [{ type: "output_text", text: part.text }],
2141
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2142
+ });
2143
+ break;
2144
+ }
2145
+ case "tool-call": {
2146
+ toolCallParts[part.toolCallId] = part;
2147
+ if (part.providerExecuted) {
2148
+ break;
2149
+ }
2150
+ input.push({
2151
+ type: "function_call",
2152
+ call_id: part.toolCallId,
2153
+ name: part.toolName,
2154
+ arguments: JSON.stringify(part.input),
2155
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2156
+ });
2157
+ break;
2158
+ }
2159
+ // assistant tool result parts are from provider-executed tools:
2160
+ case "tool-result": {
2161
+ if (store) {
2162
+ input.push({ type: "item_reference", id: part.toolCallId });
2163
+ } else {
2164
+ warnings.push({
2165
+ type: "other",
2166
+ message: `Results for OpenAI tool ${part.toolName} are not sent to the API when store is false`
2167
+ });
2168
+ }
2169
+ break;
2170
+ }
2171
+ case "reasoning": {
2172
+ const providerOptions = await parseProviderOptions6({
2173
+ provider: "openai",
2174
+ providerOptions: part.providerOptions,
2175
+ schema: openaiResponsesReasoningProviderOptionsSchema
2176
+ });
2177
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2178
+ if (reasoningId != null) {
2179
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2180
+ const summaryParts = [];
2181
+ if (part.text.length > 0) {
2182
+ summaryParts.push({ type: "summary_text", text: part.text });
2183
+ } else if (existingReasoningMessage !== void 0) {
2184
+ warnings.push({
2185
+ type: "other",
2186
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2187
+ });
2188
+ }
2189
+ if (existingReasoningMessage === void 0) {
2190
+ reasoningMessages[reasoningId] = {
2191
+ type: "reasoning",
2192
+ id: reasoningId,
2193
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2194
+ summary: summaryParts
2195
+ };
2196
+ input.push(reasoningMessages[reasoningId]);
2197
+ } else {
2198
+ existingReasoningMessage.summary.push(...summaryParts);
2199
+ }
2200
+ } else {
2201
+ warnings.push({
2202
+ type: "other",
2203
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2204
+ });
2205
+ }
2206
+ break;
2207
+ }
2208
+ }
2209
+ }
2210
+ break;
2211
+ }
2212
+ case "tool": {
2213
+ for (const part of content) {
2214
+ const output = part.output;
2215
+ let contentValue;
2216
+ switch (output.type) {
2217
+ case "text":
2218
+ case "error-text":
2219
+ contentValue = output.value;
2220
+ break;
2221
+ case "content":
2222
+ case "json":
2223
+ case "error-json":
2224
+ contentValue = JSON.stringify(output.value);
2225
+ break;
2226
+ }
2227
+ input.push({
2228
+ type: "function_call_output",
2229
+ call_id: part.toolCallId,
2230
+ output: contentValue
2231
+ });
2232
+ }
2233
+ break;
2234
+ }
2235
+ default: {
2236
+ const _exhaustiveCheck = role;
2237
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2238
+ }
2239
+ }
2240
+ }
2241
+ return { input, warnings };
2242
+ }
2243
+ var openaiResponsesReasoningProviderOptionsSchema = z12.object({
2244
+ itemId: z12.string().nullish(),
2245
+ reasoningEncryptedContent: z12.string().nullish()
2246
+ });
2247
+
2248
+ // src/responses/map-openai-responses-finish-reason.ts
2249
+ function mapOpenAIResponseFinishReason({
2250
+ finishReason,
2251
+ hasFunctionCall
2252
+ }) {
2253
+ switch (finishReason) {
2254
+ case void 0:
2255
+ case null:
2256
+ return hasFunctionCall ? "tool-calls" : "stop";
2257
+ case "max_output_tokens":
2258
+ return "length";
2259
+ case "content_filter":
2260
+ return "content-filter";
2261
+ default:
2262
+ return hasFunctionCall ? "tool-calls" : "unknown";
2263
+ }
2264
+ }
2265
+
2266
+ // src/responses/openai-responses-prepare-tools.ts
2267
+ import {
2268
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError5
2269
+ } from "@zenning/provider";
2270
+
2271
+ // src/tool/code-interpreter.ts
2272
+ import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils";
2273
+ import { z as z13 } from "zod/v4";
2274
+ var codeInterpreterInputSchema = z13.object({
2275
+ code: z13.string().nullish(),
2276
+ containerId: z13.string()
2277
+ });
2278
+ var codeInterpreterOutputSchema = z13.object({
2279
+ outputs: z13.array(
2280
+ z13.discriminatedUnion("type", [
2281
+ z13.object({ type: z13.literal("logs"), logs: z13.string() }),
2282
+ z13.object({ type: z13.literal("image"), url: z13.string() })
2283
+ ])
2284
+ ).nullish()
2285
+ });
2286
+ var codeInterpreterArgsSchema = z13.object({
2287
+ container: z13.union([
2288
+ z13.string(),
2289
+ z13.object({
2290
+ fileIds: z13.array(z13.string()).optional()
2291
+ })
2292
+ ]).optional()
2293
+ });
2294
+ var codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema({
2295
+ id: "openai.code_interpreter",
2296
+ name: "code_interpreter",
2297
+ inputSchema: codeInterpreterInputSchema,
2298
+ outputSchema: codeInterpreterOutputSchema
2299
+ });
2300
+
2301
+ // src/tool/file-search.ts
2302
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema2 } from "@ai-sdk/provider-utils";
2303
+ import { z as z14 } from "zod/v4";
2304
+ var comparisonFilterSchema = z14.object({
2305
+ key: z14.string(),
2306
+ type: z14.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
2307
+ value: z14.union([z14.string(), z14.number(), z14.boolean()])
2308
+ });
2309
+ var compoundFilterSchema = z14.object({
2310
+ type: z14.enum(["and", "or"]),
2311
+ filters: z14.array(
2312
+ z14.union([comparisonFilterSchema, z14.lazy(() => compoundFilterSchema)])
2313
+ )
2314
+ });
2315
+ var fileSearchArgsSchema = z14.object({
2316
+ vectorStoreIds: z14.array(z14.string()),
2317
+ maxNumResults: z14.number().optional(),
2318
+ ranking: z14.object({
2319
+ ranker: z14.string().optional(),
2320
+ scoreThreshold: z14.number().optional()
2321
+ }).optional(),
2322
+ filters: z14.union([comparisonFilterSchema, compoundFilterSchema]).optional()
2323
+ });
2324
+ var fileSearchOutputSchema = z14.object({
2325
+ queries: z14.array(z14.string()),
2326
+ results: z14.array(
2327
+ z14.object({
2328
+ attributes: z14.record(z14.string(), z14.unknown()),
2329
+ fileId: z14.string(),
2330
+ filename: z14.string(),
2331
+ score: z14.number(),
2332
+ text: z14.string()
2333
+ })
2334
+ ).nullable()
2335
+ });
2336
+ var fileSearch = createProviderDefinedToolFactoryWithOutputSchema2({
2337
+ id: "openai.file_search",
2338
+ name: "file_search",
2339
+ inputSchema: z14.object({}),
2340
+ outputSchema: fileSearchOutputSchema
2341
+ });
2342
+
2343
+ // src/tool/web-search.ts
2344
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
2345
+ import { z as z15 } from "zod/v4";
2346
+ var webSearchArgsSchema = z15.object({
2347
+ filters: z15.object({
2348
+ allowedDomains: z15.array(z15.string()).optional()
2349
+ }).optional(),
2350
+ searchContextSize: z15.enum(["low", "medium", "high"]).optional(),
2351
+ userLocation: z15.object({
2352
+ type: z15.literal("approximate"),
2353
+ country: z15.string().optional(),
2354
+ city: z15.string().optional(),
2355
+ region: z15.string().optional(),
2356
+ timezone: z15.string().optional()
2357
+ }).optional()
2358
+ });
2359
+ var webSearchToolFactory = createProviderDefinedToolFactory({
2360
+ id: "openai.web_search",
2361
+ name: "web_search",
2362
+ inputSchema: z15.object({
2363
+ action: z15.discriminatedUnion("type", [
2364
+ z15.object({
2365
+ type: z15.literal("search"),
2366
+ query: z15.string().nullish()
2367
+ }),
2368
+ z15.object({
2369
+ type: z15.literal("open_page"),
2370
+ url: z15.string()
2371
+ }),
2372
+ z15.object({
2373
+ type: z15.literal("find"),
2374
+ url: z15.string(),
2375
+ pattern: z15.string()
2376
+ })
2377
+ ]).nullish()
2378
+ })
2379
+ });
2380
+
2381
+ // src/tool/web-search-preview.ts
2382
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
2383
+ import { z as z16 } from "zod/v4";
2384
+ var webSearchPreviewArgsSchema = z16.object({
2385
+ /**
2386
+ * Search context size to use for the web search.
2387
+ * - high: Most comprehensive context, highest cost, slower response
2388
+ * - medium: Balanced context, cost, and latency (default)
2389
+ * - low: Least context, lowest cost, fastest response
2390
+ */
2391
+ searchContextSize: z16.enum(["low", "medium", "high"]).optional(),
2392
+ /**
2393
+ * User location information to provide geographically relevant search results.
2394
+ */
2395
+ userLocation: z16.object({
2396
+ /**
2397
+ * Type of location (always 'approximate')
2398
+ */
2399
+ type: z16.literal("approximate"),
2400
+ /**
2401
+ * Two-letter ISO country code (e.g., 'US', 'GB')
2402
+ */
2403
+ country: z16.string().optional(),
2404
+ /**
2405
+ * City name (free text, e.g., 'Minneapolis')
2406
+ */
2407
+ city: z16.string().optional(),
2408
+ /**
2409
+ * Region name (free text, e.g., 'Minnesota')
2410
+ */
2411
+ region: z16.string().optional(),
2412
+ /**
2413
+ * IANA timezone (e.g., 'America/Chicago')
2414
+ */
2415
+ timezone: z16.string().optional()
2416
+ }).optional()
2417
+ });
2418
+ var webSearchPreview = createProviderDefinedToolFactory2({
2419
+ id: "openai.web_search_preview",
2420
+ name: "web_search_preview",
2421
+ inputSchema: z16.object({
2422
+ action: z16.discriminatedUnion("type", [
2423
+ z16.object({
2424
+ type: z16.literal("search"),
2425
+ query: z16.string().nullish()
2426
+ }),
2427
+ z16.object({
2428
+ type: z16.literal("open_page"),
2429
+ url: z16.string()
2430
+ }),
2431
+ z16.object({
2432
+ type: z16.literal("find"),
2433
+ url: z16.string(),
2434
+ pattern: z16.string()
2435
+ })
2436
+ ]).nullish()
2437
+ })
2438
+ });
2439
+
2440
+ // src/tool/image-generation.ts
2441
+ import { createProviderDefinedToolFactoryWithOutputSchema as createProviderDefinedToolFactoryWithOutputSchema3 } from "@ai-sdk/provider-utils";
2442
+ import { z as z17 } from "zod/v4";
2443
+ var imageGenerationArgsSchema = z17.object({
2444
+ background: z17.enum(["auto", "opaque", "transparent"]).optional(),
2445
+ inputFidelity: z17.enum(["low", "high"]).optional(),
2446
+ inputImageMask: z17.object({
2447
+ fileId: z17.string().optional(),
2448
+ imageUrl: z17.string().optional()
2449
+ }).optional(),
2450
+ model: z17.string().optional(),
2451
+ moderation: z17.enum(["auto"]).optional(),
2452
+ outputCompression: z17.number().int().min(0).max(100).optional(),
2453
+ outputFormat: z17.enum(["png", "jpeg", "webp"]).optional(),
2454
+ quality: z17.enum(["auto", "low", "medium", "high"]).optional(),
2455
+ size: z17.enum(["1024x1024", "1024x1536", "1536x1024", "auto"]).optional()
2456
+ }).strict();
2457
+ var imageGenerationOutputSchema = z17.object({
2458
+ result: z17.string()
2459
+ });
2460
+ var imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema3({
2461
+ id: "openai.image_generation",
2462
+ name: "image_generation",
2463
+ inputSchema: z17.object({}),
2464
+ outputSchema: imageGenerationOutputSchema
2465
+ });
2466
+
2467
+ // src/responses/openai-responses-prepare-tools.ts
2468
+ function prepareResponsesTools({
2469
+ tools,
2470
+ toolChoice,
2471
+ strictJsonSchema
2472
+ }) {
2473
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2474
+ const toolWarnings = [];
2475
+ if (tools == null) {
2476
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2477
+ }
2478
+ const openaiTools = [];
2479
+ for (const tool of tools) {
2480
+ switch (tool.type) {
2481
+ case "function":
2482
+ openaiTools.push({
2483
+ type: "function",
2484
+ name: tool.name,
2485
+ description: tool.description,
2486
+ parameters: tool.inputSchema,
2487
+ strict: strictJsonSchema
2488
+ });
2489
+ break;
2490
+ case "provider-defined": {
2491
+ switch (tool.id) {
2492
+ case "openai.file_search": {
2493
+ const args = fileSearchArgsSchema.parse(tool.args);
2494
+ openaiTools.push({
2495
+ type: "file_search",
2496
+ vector_store_ids: args.vectorStoreIds,
2497
+ max_num_results: args.maxNumResults,
2498
+ ranking_options: args.ranking ? {
2499
+ ranker: args.ranking.ranker,
2500
+ score_threshold: args.ranking.scoreThreshold
2501
+ } : void 0,
2502
+ filters: args.filters
2503
+ });
2504
+ break;
2505
+ }
2506
+ case "openai.web_search_preview": {
2507
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
2508
+ openaiTools.push({
2509
+ type: "web_search_preview",
2510
+ search_context_size: args.searchContextSize,
2511
+ user_location: args.userLocation
2512
+ });
2513
+ break;
2514
+ }
2515
+ case "openai.web_search": {
2516
+ const args = webSearchArgsSchema.parse(tool.args);
2517
+ openaiTools.push({
2518
+ type: "web_search",
2519
+ filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
2520
+ search_context_size: args.searchContextSize,
2521
+ user_location: args.userLocation
2522
+ });
2523
+ break;
2524
+ }
2525
+ case "openai.code_interpreter": {
2526
+ const args = codeInterpreterArgsSchema.parse(tool.args);
2527
+ openaiTools.push({
2528
+ type: "code_interpreter",
2529
+ container: args.container == null ? { type: "auto", file_ids: void 0 } : typeof args.container === "string" ? args.container : { type: "auto", file_ids: args.container.fileIds }
2530
+ });
2531
+ break;
2532
+ }
2533
+ case "openai.image_generation": {
2534
+ const args = imageGenerationArgsSchema.parse(tool.args);
2535
+ openaiTools.push({
2536
+ type: "image_generation",
2537
+ background: args.background,
2538
+ input_fidelity: args.inputFidelity,
2539
+ input_image_mask: args.inputImageMask ? {
2540
+ file_id: args.inputImageMask.fileId,
2541
+ image_url: args.inputImageMask.imageUrl
2542
+ } : void 0,
2543
+ model: args.model,
2544
+ size: args.size,
2545
+ quality: args.quality,
2546
+ moderation: args.moderation,
2547
+ output_format: args.outputFormat,
2548
+ output_compression: args.outputCompression
2549
+ });
2550
+ break;
2551
+ }
2552
+ }
2553
+ break;
2554
+ }
2555
+ default:
2556
+ toolWarnings.push({ type: "unsupported-tool", tool });
2557
+ break;
2558
+ }
2559
+ }
2560
+ if (toolChoice == null) {
2561
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
2562
+ }
2563
+ const type = toolChoice.type;
2564
+ switch (type) {
2565
+ case "auto":
2566
+ case "none":
2567
+ case "required":
2568
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
2569
+ case "tool":
2570
+ return {
2571
+ tools: openaiTools,
2572
+ toolChoice: toolChoice.toolName === "code_interpreter" || toolChoice.toolName === "file_search" || toolChoice.toolName === "image_generation" || toolChoice.toolName === "web_search_preview" || toolChoice.toolName === "web_search" ? { type: toolChoice.toolName } : { type: "function", name: toolChoice.toolName },
2573
+ toolWarnings
2574
+ };
2575
+ default: {
2576
+ const _exhaustiveCheck = type;
2577
+ throw new UnsupportedFunctionalityError5({
2578
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2579
+ });
2580
+ }
2581
+ }
2582
+ }
2583
+
2584
+ // src/responses/openai-responses-language-model.ts
2585
+ var webSearchCallItem = z18.object({
2586
+ type: z18.literal("web_search_call"),
2587
+ id: z18.string(),
2588
+ status: z18.string(),
2589
+ action: z18.discriminatedUnion("type", [
2590
+ z18.object({
2591
+ type: z18.literal("search"),
2592
+ query: z18.string().nullish()
2593
+ }),
2594
+ z18.object({
2595
+ type: z18.literal("open_page"),
2596
+ url: z18.string()
2597
+ }),
2598
+ z18.object({
2599
+ type: z18.literal("find"),
2600
+ url: z18.string(),
2601
+ pattern: z18.string()
2602
+ })
2603
+ ]).nullish()
2604
+ });
2605
+ var fileSearchCallItem = z18.object({
2606
+ type: z18.literal("file_search_call"),
2607
+ id: z18.string(),
2608
+ queries: z18.array(z18.string()),
2609
+ results: z18.array(
2610
+ z18.object({
2611
+ attributes: z18.record(z18.string(), z18.unknown()),
2612
+ file_id: z18.string(),
2613
+ filename: z18.string(),
2614
+ score: z18.number(),
2615
+ text: z18.string()
2616
+ })
2617
+ ).nullish()
2618
+ });
2619
+ var codeInterpreterCallItem = z18.object({
2620
+ type: z18.literal("code_interpreter_call"),
2621
+ id: z18.string(),
2622
+ code: z18.string().nullable(),
2623
+ container_id: z18.string(),
2624
+ outputs: z18.array(
2625
+ z18.discriminatedUnion("type", [
2626
+ z18.object({ type: z18.literal("logs"), logs: z18.string() }),
2627
+ z18.object({ type: z18.literal("image"), url: z18.string() })
2628
+ ])
2629
+ ).nullable()
2630
+ });
2631
+ var imageGenerationCallItem = z18.object({
2632
+ type: z18.literal("image_generation_call"),
2633
+ id: z18.string(),
2634
+ result: z18.string()
2635
+ });
2636
+ var TOP_LOGPROBS_MAX = 20;
2637
+ var LOGPROBS_SCHEMA = z18.array(
2638
+ z18.object({
2639
+ token: z18.string(),
2640
+ logprob: z18.number(),
2641
+ top_logprobs: z18.array(
2642
+ z18.object({
2643
+ token: z18.string(),
2644
+ logprob: z18.number()
2645
+ })
2646
+ )
2647
+ })
2648
+ );
2649
+ var OpenAIResponsesLanguageModel = class {
2650
+ constructor(modelId, config) {
2651
+ this.specificationVersion = "v2";
2652
+ this.supportedUrls = {
2653
+ "image/*": [/^https?:\/\/.*$/],
2654
+ "application/pdf": [/^https?:\/\/.*$/]
2655
+ };
2656
+ this.modelId = modelId;
2657
+ this.config = config;
2658
+ }
2659
+ get provider() {
2660
+ return this.config.provider;
2661
+ }
2662
+ async getArgs({
2663
+ maxOutputTokens,
2664
+ temperature,
2665
+ stopSequences,
2666
+ topP,
2667
+ topK,
2668
+ presencePenalty,
2669
+ frequencyPenalty,
2670
+ seed,
2671
+ prompt,
2672
+ providerOptions,
2673
+ tools,
2674
+ toolChoice,
2675
+ responseFormat
2676
+ }) {
2677
+ var _a, _b, _c, _d;
2678
+ const warnings = [];
2679
+ const modelConfig = getResponsesModelConfig(this.modelId);
2680
+ if (topK != null) {
2681
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2682
+ }
2683
+ if (seed != null) {
2684
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2685
+ }
2686
+ if (presencePenalty != null) {
2687
+ warnings.push({
2688
+ type: "unsupported-setting",
2689
+ setting: "presencePenalty"
2690
+ });
2691
+ }
2692
+ if (frequencyPenalty != null) {
2693
+ warnings.push({
2694
+ type: "unsupported-setting",
2695
+ setting: "frequencyPenalty"
2696
+ });
2697
+ }
2698
+ if (stopSequences != null) {
2699
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2700
+ }
2701
+ const openaiOptions = await parseProviderOptions7({
2702
+ provider: "openai",
2703
+ providerOptions,
2704
+ schema: openaiResponsesProviderOptionsSchema
2705
+ });
2706
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
2707
+ prompt,
2708
+ systemMessageMode: modelConfig.systemMessageMode,
2709
+ fileIdPrefixes: this.config.fileIdPrefixes,
2710
+ store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true
2711
+ });
2712
+ warnings.push(...inputWarnings);
2713
+ const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : false;
2714
+ let include = openaiOptions == null ? void 0 : openaiOptions.include;
2715
+ function addInclude(key) {
2716
+ include = include != null ? [...include, key] : [key];
2717
+ }
2718
+ function hasOpenAITool(id) {
2719
+ return (tools == null ? void 0 : tools.find(
2720
+ (tool) => tool.type === "provider-defined" && tool.id === id
2721
+ )) != null;
2722
+ }
2723
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2724
+ if (topLogprobs) {
2725
+ addInclude("message.output_text.logprobs");
2726
+ }
2727
+ const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
2728
+ (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
2729
+ )) == null ? void 0 : _c.name;
2730
+ if (webSearchToolName) {
2731
+ addInclude("web_search_call.action.sources");
2732
+ }
2733
+ if (hasOpenAITool("openai.code_interpreter")) {
2734
+ addInclude("code_interpreter_call.outputs");
2735
+ }
2736
+ const baseArgs = {
2737
+ model: this.modelId,
2738
+ input,
2739
+ temperature,
2740
+ top_p: topP,
2741
+ max_output_tokens: maxOutputTokens,
2742
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2743
+ text: {
2744
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2745
+ format: responseFormat.schema != null ? {
2746
+ type: "json_schema",
2747
+ strict: strictJsonSchema,
2748
+ name: (_d = responseFormat.name) != null ? _d : "response",
2749
+ description: responseFormat.description,
2750
+ schema: responseFormat.schema
2751
+ } : { type: "json_object" }
2752
+ },
2753
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2754
+ verbosity: openaiOptions.textVerbosity
2755
+ }
2756
+ }
2757
+ },
2758
+ // provider options:
2759
+ max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
2760
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2761
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2762
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2763
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2764
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2765
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2766
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2767
+ include,
2768
+ prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
2769
+ safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
2770
+ top_logprobs: topLogprobs,
2771
+ // model-specific settings:
2772
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2773
+ reasoning: {
2774
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2775
+ effort: openaiOptions.reasoningEffort
2776
+ },
2777
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2778
+ summary: openaiOptions.reasoningSummary
2779
+ }
2780
+ }
2781
+ },
2782
+ ...modelConfig.requiredAutoTruncation && {
2783
+ truncation: "auto"
2784
+ }
2785
+ };
2786
+ if (modelConfig.isReasoningModel) {
2787
+ if (baseArgs.temperature != null) {
2788
+ baseArgs.temperature = void 0;
2789
+ warnings.push({
2790
+ type: "unsupported-setting",
2791
+ setting: "temperature",
2792
+ details: "temperature is not supported for reasoning models"
2793
+ });
2794
+ }
2795
+ if (baseArgs.top_p != null) {
2796
+ baseArgs.top_p = void 0;
2797
+ warnings.push({
2798
+ type: "unsupported-setting",
2799
+ setting: "topP",
2800
+ details: "topP is not supported for reasoning models"
2801
+ });
2802
+ }
2803
+ } else {
2804
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2805
+ warnings.push({
2806
+ type: "unsupported-setting",
2807
+ setting: "reasoningEffort",
2808
+ details: "reasoningEffort is not supported for non-reasoning models"
2809
+ });
2810
+ }
2811
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2812
+ warnings.push({
2813
+ type: "unsupported-setting",
2814
+ setting: "reasoningSummary",
2815
+ details: "reasoningSummary is not supported for non-reasoning models"
2816
+ });
2817
+ }
2818
+ }
2819
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
2820
+ warnings.push({
2821
+ type: "unsupported-setting",
2822
+ setting: "serviceTier",
2823
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2824
+ });
2825
+ delete baseArgs.service_tier;
2826
+ }
2827
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
2828
+ warnings.push({
2829
+ type: "unsupported-setting",
2830
+ setting: "serviceTier",
2831
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2832
+ });
2833
+ delete baseArgs.service_tier;
2834
+ }
2835
+ const {
2836
+ tools: openaiTools,
2837
+ toolChoice: openaiToolChoice,
2838
+ toolWarnings
2839
+ } = prepareResponsesTools({
2840
+ tools,
2841
+ toolChoice,
2842
+ strictJsonSchema
2843
+ });
2844
+ return {
2845
+ webSearchToolName,
2846
+ args: {
2847
+ ...baseArgs,
2848
+ tools: openaiTools,
2849
+ tool_choice: openaiToolChoice
2850
+ },
2851
+ warnings: [...warnings, ...toolWarnings]
2852
+ };
2853
+ }
2854
+ async doGenerate(options) {
2855
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
2856
+ const {
2857
+ args: body,
2858
+ warnings,
2859
+ webSearchToolName
2860
+ } = await this.getArgs(options);
2861
+ const url = this.config.url({
2862
+ path: "/responses",
2863
+ modelId: this.modelId
2864
+ });
2865
+ const {
2866
+ responseHeaders,
2867
+ value: response,
2868
+ rawValue: rawResponse
2869
+ } = await postJsonToApi6({
2870
+ url,
2871
+ headers: combineHeaders7(this.config.headers(), options.headers),
2872
+ body,
2873
+ failedResponseHandler: openaiFailedResponseHandler,
2874
+ successfulResponseHandler: createJsonResponseHandler6(
2875
+ z18.object({
2876
+ id: z18.string(),
2877
+ created_at: z18.number(),
2878
+ error: z18.object({
2879
+ code: z18.string(),
2880
+ message: z18.string()
2881
+ }).nullish(),
2882
+ model: z18.string(),
2883
+ output: z18.array(
2884
+ z18.discriminatedUnion("type", [
2885
+ z18.object({
2886
+ type: z18.literal("message"),
2887
+ role: z18.literal("assistant"),
2888
+ id: z18.string(),
2889
+ content: z18.array(
2890
+ z18.object({
2891
+ type: z18.literal("output_text"),
2892
+ text: z18.string(),
2893
+ logprobs: LOGPROBS_SCHEMA.nullish(),
2894
+ annotations: z18.array(
2895
+ z18.discriminatedUnion("type", [
2896
+ z18.object({
2897
+ type: z18.literal("url_citation"),
2898
+ start_index: z18.number(),
2899
+ end_index: z18.number(),
2900
+ url: z18.string(),
2901
+ title: z18.string()
2902
+ }),
2903
+ z18.object({
2904
+ type: z18.literal("file_citation"),
2905
+ file_id: z18.string(),
2906
+ filename: z18.string().nullish(),
2907
+ index: z18.number().nullish(),
2908
+ start_index: z18.number().nullish(),
2909
+ end_index: z18.number().nullish(),
2910
+ quote: z18.string().nullish()
2911
+ }),
2912
+ z18.object({
2913
+ type: z18.literal("container_file_citation")
2914
+ })
2915
+ ])
2916
+ )
2917
+ })
2918
+ )
2919
+ }),
2920
+ webSearchCallItem,
2921
+ fileSearchCallItem,
2922
+ codeInterpreterCallItem,
2923
+ imageGenerationCallItem,
2924
+ z18.object({
2925
+ type: z18.literal("function_call"),
2926
+ call_id: z18.string(),
2927
+ name: z18.string(),
2928
+ arguments: z18.string(),
2929
+ id: z18.string()
2930
+ }),
2931
+ z18.object({
2932
+ type: z18.literal("computer_call"),
2933
+ id: z18.string(),
2934
+ status: z18.string().optional()
2935
+ }),
2936
+ z18.object({
2937
+ type: z18.literal("reasoning"),
2938
+ id: z18.string(),
2939
+ encrypted_content: z18.string().nullish(),
2940
+ summary: z18.array(
2941
+ z18.object({
2942
+ type: z18.literal("summary_text"),
2943
+ text: z18.string()
2944
+ })
2945
+ )
2946
+ })
2947
+ ])
2948
+ ),
2949
+ service_tier: z18.string().nullish(),
2950
+ incomplete_details: z18.object({ reason: z18.string() }).nullable(),
2951
+ usage: usageSchema2
2952
+ })
2953
+ ),
2954
+ abortSignal: options.abortSignal,
2955
+ fetch: this.config.fetch
2956
+ });
2957
+ if (response.error) {
2958
+ throw new APICallError({
2959
+ message: response.error.message,
2960
+ url,
2961
+ requestBodyValues: body,
2962
+ statusCode: 400,
2963
+ responseHeaders,
2964
+ responseBody: rawResponse,
2965
+ isRetryable: false
2966
+ });
2967
+ }
2968
+ const content = [];
2969
+ const logprobs = [];
2970
+ let hasFunctionCall = false;
2971
+ for (const part of response.output) {
2972
+ switch (part.type) {
2973
+ case "reasoning": {
2974
+ if (part.summary.length === 0) {
2975
+ part.summary.push({ type: "summary_text", text: "" });
2976
+ }
2977
+ for (const summary of part.summary) {
2978
+ content.push({
2979
+ type: "reasoning",
2980
+ text: summary.text,
2981
+ providerMetadata: {
2982
+ openai: {
2983
+ itemId: part.id,
2984
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2985
+ }
2986
+ }
2987
+ });
2988
+ }
2989
+ break;
2990
+ }
2991
+ case "image_generation_call": {
2992
+ content.push({
2993
+ type: "tool-call",
2994
+ toolCallId: part.id,
2995
+ toolName: "image_generation",
2996
+ input: "{}",
2997
+ providerExecuted: true
2998
+ });
2999
+ content.push({
3000
+ type: "tool-result",
3001
+ toolCallId: part.id,
3002
+ toolName: "image_generation",
3003
+ result: {
3004
+ result: part.result
3005
+ },
3006
+ providerExecuted: true
3007
+ });
3008
+ break;
3009
+ }
3010
+ case "message": {
3011
+ for (const contentPart of part.content) {
3012
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
3013
+ logprobs.push(contentPart.logprobs);
3014
+ }
3015
+ content.push({
3016
+ type: "text",
3017
+ text: contentPart.text,
3018
+ providerMetadata: {
3019
+ openai: {
3020
+ itemId: part.id
3021
+ }
3022
+ }
3023
+ });
3024
+ for (const annotation of contentPart.annotations) {
3025
+ if (annotation.type === "url_citation") {
3026
+ content.push({
3027
+ type: "source",
3028
+ sourceType: "url",
3029
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : generateId2(),
3030
+ url: annotation.url,
3031
+ title: annotation.title
3032
+ });
3033
+ } else if (annotation.type === "file_citation") {
3034
+ content.push({
3035
+ type: "source",
3036
+ sourceType: "document",
3037
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : generateId2(),
3038
+ mediaType: "text/plain",
3039
+ title: (_k = (_j = annotation.quote) != null ? _j : annotation.filename) != null ? _k : "Document",
3040
+ filename: (_l = annotation.filename) != null ? _l : annotation.file_id,
3041
+ fileId: annotation.file_id,
3042
+ startIndex: (_m = annotation.start_index) != null ? _m : void 0,
3043
+ endIndex: (_n = annotation.end_index) != null ? _n : void 0
3044
+ });
3045
+ }
3046
+ }
3047
+ }
3048
+ break;
3049
+ }
3050
+ case "function_call": {
3051
+ hasFunctionCall = true;
3052
+ content.push({
3053
+ type: "tool-call",
3054
+ toolCallId: part.call_id,
3055
+ toolName: part.name,
3056
+ input: part.arguments,
3057
+ providerMetadata: {
3058
+ openai: {
3059
+ itemId: part.id
3060
+ }
3061
+ }
3062
+ });
3063
+ break;
3064
+ }
3065
+ case "web_search_call": {
3066
+ content.push({
3067
+ type: "tool-call",
3068
+ toolCallId: part.id,
3069
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3070
+ input: JSON.stringify({ action: part.action }),
3071
+ providerExecuted: true
3072
+ });
3073
+ content.push({
3074
+ type: "tool-result",
3075
+ toolCallId: part.id,
3076
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3077
+ result: { status: part.status },
3078
+ providerExecuted: true
3079
+ });
3080
+ break;
3081
+ }
3082
+ case "computer_call": {
3083
+ content.push({
3084
+ type: "tool-call",
3085
+ toolCallId: part.id,
3086
+ toolName: "computer_use",
3087
+ input: "",
3088
+ providerExecuted: true
3089
+ });
3090
+ content.push({
3091
+ type: "tool-result",
3092
+ toolCallId: part.id,
3093
+ toolName: "computer_use",
3094
+ result: {
3095
+ type: "computer_use_tool_result",
3096
+ status: part.status || "completed"
3097
+ },
3098
+ providerExecuted: true
3099
+ });
3100
+ break;
3101
+ }
3102
+ case "file_search_call": {
3103
+ content.push({
3104
+ type: "tool-call",
3105
+ toolCallId: part.id,
3106
+ toolName: "file_search",
3107
+ input: "{}",
3108
+ providerExecuted: true
3109
+ });
3110
+ content.push({
3111
+ type: "tool-result",
3112
+ toolCallId: part.id,
3113
+ toolName: "file_search",
3114
+ result: {
3115
+ queries: part.queries,
3116
+ results: (_p = (_o = part.results) == null ? void 0 : _o.map((result) => ({
3117
+ attributes: result.attributes,
3118
+ fileId: result.file_id,
3119
+ filename: result.filename,
3120
+ score: result.score,
3121
+ text: result.text
3122
+ }))) != null ? _p : null
3123
+ },
3124
+ providerExecuted: true
3125
+ });
3126
+ break;
3127
+ }
3128
+ case "code_interpreter_call": {
3129
+ content.push({
3130
+ type: "tool-call",
3131
+ toolCallId: part.id,
3132
+ toolName: "code_interpreter",
3133
+ input: JSON.stringify({
3134
+ code: part.code,
3135
+ containerId: part.container_id
3136
+ }),
3137
+ providerExecuted: true
3138
+ });
3139
+ content.push({
3140
+ type: "tool-result",
3141
+ toolCallId: part.id,
3142
+ toolName: "code_interpreter",
3143
+ result: {
3144
+ outputs: part.outputs
3145
+ },
3146
+ providerExecuted: true
3147
+ });
3148
+ break;
3149
+ }
3150
+ }
3151
+ }
3152
+ const providerMetadata = {
3153
+ openai: { responseId: response.id }
3154
+ };
3155
+ if (logprobs.length > 0) {
3156
+ providerMetadata.openai.logprobs = logprobs;
3157
+ }
3158
+ if (typeof response.service_tier === "string") {
3159
+ providerMetadata.openai.serviceTier = response.service_tier;
3160
+ }
3161
+ return {
3162
+ content,
3163
+ finishReason: mapOpenAIResponseFinishReason({
3164
+ finishReason: (_q = response.incomplete_details) == null ? void 0 : _q.reason,
3165
+ hasFunctionCall
3166
+ }),
3167
+ usage: {
3168
+ inputTokens: response.usage.input_tokens,
3169
+ outputTokens: response.usage.output_tokens,
3170
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3171
+ reasoningTokens: (_s = (_r = response.usage.output_tokens_details) == null ? void 0 : _r.reasoning_tokens) != null ? _s : void 0,
3172
+ cachedInputTokens: (_u = (_t = response.usage.input_tokens_details) == null ? void 0 : _t.cached_tokens) != null ? _u : void 0
3173
+ },
3174
+ request: { body },
3175
+ response: {
3176
+ id: response.id,
3177
+ timestamp: new Date(response.created_at * 1e3),
3178
+ modelId: response.model,
3179
+ headers: responseHeaders,
3180
+ body: rawResponse
3181
+ },
3182
+ providerMetadata,
3183
+ warnings
3184
+ };
3185
+ }
3186
+ async doStream(options) {
3187
+ const {
3188
+ args: body,
3189
+ warnings,
3190
+ webSearchToolName
3191
+ } = await this.getArgs(options);
3192
+ const { responseHeaders, value: response } = await postJsonToApi6({
3193
+ url: this.config.url({
3194
+ path: "/responses",
3195
+ modelId: this.modelId
3196
+ }),
3197
+ headers: combineHeaders7(this.config.headers(), options.headers),
3198
+ body: {
3199
+ ...body,
3200
+ stream: true
3201
+ },
3202
+ failedResponseHandler: openaiFailedResponseHandler,
3203
+ successfulResponseHandler: createEventSourceResponseHandler3(
3204
+ openaiResponsesChunkSchema
3205
+ ),
3206
+ abortSignal: options.abortSignal,
3207
+ fetch: this.config.fetch
3208
+ });
3209
+ const self = this;
3210
+ let finishReason = "unknown";
3211
+ const usage = {
3212
+ inputTokens: void 0,
3213
+ outputTokens: void 0,
3214
+ totalTokens: void 0
3215
+ };
3216
+ const logprobs = [];
3217
+ let responseId = null;
3218
+ const ongoingToolCalls = {};
3219
+ let hasFunctionCall = false;
3220
+ const activeReasoning = {};
3221
+ let serviceTier;
3222
+ return {
3223
+ stream: response.pipeThrough(
3224
+ new TransformStream({
3225
+ start(controller) {
3226
+ controller.enqueue({ type: "stream-start", warnings });
3227
+ },
3228
+ transform(chunk, controller) {
3229
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
3230
+ if (options.includeRawChunks) {
3231
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3232
+ }
3233
+ if (!chunk.success) {
3234
+ finishReason = "error";
3235
+ controller.enqueue({ type: "error", error: chunk.error });
3236
+ return;
3237
+ }
3238
+ const value = chunk.value;
3239
+ if (isResponseOutputItemAddedChunk(value)) {
3240
+ if (value.item.type === "function_call") {
3241
+ ongoingToolCalls[value.output_index] = {
3242
+ toolName: value.item.name,
3243
+ toolCallId: value.item.call_id
3244
+ };
3245
+ controller.enqueue({
3246
+ type: "tool-input-start",
3247
+ id: value.item.call_id,
3248
+ toolName: value.item.name
3249
+ });
3250
+ } else if (value.item.type === "web_search_call") {
3251
+ ongoingToolCalls[value.output_index] = {
3252
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
3253
+ toolCallId: value.item.id
3254
+ };
3255
+ controller.enqueue({
3256
+ type: "tool-input-start",
3257
+ id: value.item.id,
3258
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search"
3259
+ });
3260
+ } else if (value.item.type === "computer_call") {
3261
+ ongoingToolCalls[value.output_index] = {
3262
+ toolName: "computer_use",
3263
+ toolCallId: value.item.id
3264
+ };
3265
+ controller.enqueue({
3266
+ type: "tool-input-start",
3267
+ id: value.item.id,
3268
+ toolName: "computer_use"
3269
+ });
3270
+ } else if (value.item.type === "file_search_call") {
3271
+ controller.enqueue({
3272
+ type: "tool-call",
3273
+ toolCallId: value.item.id,
3274
+ toolName: "file_search",
3275
+ input: "{}",
3276
+ providerExecuted: true
3277
+ });
3278
+ } else if (value.item.type === "image_generation_call") {
3279
+ controller.enqueue({
3280
+ type: "tool-call",
3281
+ toolCallId: value.item.id,
3282
+ toolName: "image_generation",
3283
+ input: "{}",
3284
+ providerExecuted: true
3285
+ });
3286
+ } else if (value.item.type === "message") {
3287
+ controller.enqueue({
3288
+ type: "text-start",
3289
+ id: value.item.id,
3290
+ providerMetadata: {
3291
+ openai: {
3292
+ itemId: value.item.id
3293
+ }
3294
+ }
3295
+ });
3296
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
3297
+ activeReasoning[value.item.id] = {
3298
+ encryptedContent: value.item.encrypted_content,
3299
+ summaryParts: [0]
3300
+ };
3301
+ controller.enqueue({
3302
+ type: "reasoning-start",
3303
+ id: `${value.item.id}:0`,
3304
+ providerMetadata: {
3305
+ openai: {
3306
+ itemId: value.item.id,
3307
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
3308
+ }
3309
+ }
3310
+ });
3311
+ }
3312
+ } else if (isResponseOutputItemDoneChunk(value)) {
3313
+ if (value.item.type === "function_call") {
3314
+ ongoingToolCalls[value.output_index] = void 0;
3315
+ hasFunctionCall = true;
3316
+ controller.enqueue({
3317
+ type: "tool-input-end",
3318
+ id: value.item.call_id
3319
+ });
3320
+ controller.enqueue({
3321
+ type: "tool-call",
3322
+ toolCallId: value.item.call_id,
3323
+ toolName: value.item.name,
3324
+ input: value.item.arguments,
3325
+ providerMetadata: {
3326
+ openai: {
3327
+ itemId: value.item.id
3328
+ }
3329
+ }
3330
+ });
3331
+ } else if (value.item.type === "web_search_call") {
3332
+ ongoingToolCalls[value.output_index] = void 0;
3333
+ controller.enqueue({
3334
+ type: "tool-input-end",
3335
+ id: value.item.id
3336
+ });
3337
+ controller.enqueue({
3338
+ type: "tool-call",
3339
+ toolCallId: value.item.id,
3340
+ toolName: "web_search",
3341
+ input: JSON.stringify({ action: value.item.action }),
3342
+ providerExecuted: true
3343
+ });
3344
+ controller.enqueue({
3345
+ type: "tool-result",
3346
+ toolCallId: value.item.id,
3347
+ toolName: "web_search",
3348
+ result: { status: value.item.status },
3349
+ providerExecuted: true
3350
+ });
3351
+ } else if (value.item.type === "computer_call") {
3352
+ ongoingToolCalls[value.output_index] = void 0;
3353
+ controller.enqueue({
3354
+ type: "tool-input-end",
3355
+ id: value.item.id
3356
+ });
3357
+ controller.enqueue({
3358
+ type: "tool-call",
3359
+ toolCallId: value.item.id,
3360
+ toolName: "computer_use",
3361
+ input: "",
3362
+ providerExecuted: true
3363
+ });
3364
+ controller.enqueue({
3365
+ type: "tool-result",
3366
+ toolCallId: value.item.id,
3367
+ toolName: "computer_use",
3368
+ result: {
3369
+ type: "computer_use_tool_result",
3370
+ status: value.item.status || "completed"
3371
+ },
3372
+ providerExecuted: true
3373
+ });
3374
+ } else if (value.item.type === "file_search_call") {
3375
+ ongoingToolCalls[value.output_index] = void 0;
3376
+ controller.enqueue({
3377
+ type: "tool-result",
3378
+ toolCallId: value.item.id,
3379
+ toolName: "file_search",
3380
+ result: {
3381
+ queries: value.item.queries,
3382
+ results: (_c = (_b = value.item.results) == null ? void 0 : _b.map((result) => ({
3383
+ attributes: result.attributes,
3384
+ fileId: result.file_id,
3385
+ filename: result.filename,
3386
+ score: result.score,
3387
+ text: result.text
3388
+ }))) != null ? _c : null
3389
+ },
3390
+ providerExecuted: true
3391
+ });
3392
+ } else if (value.item.type === "code_interpreter_call") {
3393
+ controller.enqueue({
3394
+ type: "tool-call",
3395
+ toolCallId: value.item.id,
3396
+ toolName: "code_interpreter",
3397
+ input: JSON.stringify({
3398
+ code: value.item.code,
3399
+ containerId: value.item.container_id
3400
+ }),
3401
+ providerExecuted: true
3402
+ });
3403
+ controller.enqueue({
3404
+ type: "tool-result",
3405
+ toolCallId: value.item.id,
3406
+ toolName: "code_interpreter",
3407
+ result: {
3408
+ outputs: value.item.outputs
3409
+ },
3410
+ providerExecuted: true
3411
+ });
3412
+ } else if (value.item.type === "image_generation_call") {
3413
+ controller.enqueue({
3414
+ type: "tool-result",
3415
+ toolCallId: value.item.id,
3416
+ toolName: "image_generation",
3417
+ result: {
3418
+ result: value.item.result
3419
+ },
3420
+ providerExecuted: true
3421
+ });
3422
+ } else if (value.item.type === "message") {
3423
+ controller.enqueue({
3424
+ type: "text-end",
3425
+ id: value.item.id
3426
+ });
3427
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
3428
+ const activeReasoningPart = activeReasoning[value.item.id];
3429
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
3430
+ controller.enqueue({
3431
+ type: "reasoning-end",
3432
+ id: `${value.item.id}:${summaryIndex}`,
3433
+ providerMetadata: {
3434
+ openai: {
3435
+ itemId: value.item.id,
3436
+ reasoningEncryptedContent: (_d = value.item.encrypted_content) != null ? _d : null
3437
+ }
3438
+ }
3439
+ });
3440
+ }
3441
+ delete activeReasoning[value.item.id];
3442
+ }
3443
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
3444
+ const toolCall = ongoingToolCalls[value.output_index];
3445
+ if (toolCall != null) {
3446
+ controller.enqueue({
3447
+ type: "tool-input-delta",
3448
+ id: toolCall.toolCallId,
3449
+ delta: value.delta
3450
+ });
3451
+ }
3452
+ } else if (isResponseCreatedChunk(value)) {
3453
+ responseId = value.response.id;
3454
+ controller.enqueue({
3455
+ type: "response-metadata",
3456
+ id: value.response.id,
3457
+ timestamp: new Date(value.response.created_at * 1e3),
3458
+ modelId: value.response.model
3459
+ });
3460
+ } else if (isTextDeltaChunk(value)) {
3461
+ controller.enqueue({
3462
+ type: "text-delta",
3463
+ id: value.item_id,
3464
+ delta: value.delta
3465
+ });
3466
+ if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
3467
+ logprobs.push(value.logprobs);
3468
+ }
3469
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3470
+ if (value.summary_index > 0) {
3471
+ (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.summaryParts.push(
3472
+ value.summary_index
3473
+ );
3474
+ controller.enqueue({
3475
+ type: "reasoning-start",
3476
+ id: `${value.item_id}:${value.summary_index}`,
3477
+ providerMetadata: {
3478
+ openai: {
3479
+ itemId: value.item_id,
3480
+ reasoningEncryptedContent: (_i = (_h = activeReasoning[value.item_id]) == null ? void 0 : _h.encryptedContent) != null ? _i : null
3481
+ }
3482
+ }
3483
+ });
3484
+ }
3485
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
3486
+ controller.enqueue({
3487
+ type: "reasoning-delta",
3488
+ id: `${value.item_id}:${value.summary_index}`,
3489
+ delta: value.delta,
3490
+ providerMetadata: {
3491
+ openai: {
3492
+ itemId: value.item_id
3493
+ }
3494
+ }
3495
+ });
3496
+ } else if (isResponseFinishedChunk(value)) {
3497
+ finishReason = mapOpenAIResponseFinishReason({
3498
+ finishReason: (_j = value.response.incomplete_details) == null ? void 0 : _j.reason,
3499
+ hasFunctionCall
3500
+ });
3501
+ usage.inputTokens = value.response.usage.input_tokens;
3502
+ usage.outputTokens = value.response.usage.output_tokens;
3503
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
3504
+ usage.reasoningTokens = (_l = (_k = value.response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0;
3505
+ usage.cachedInputTokens = (_n = (_m = value.response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0;
3506
+ if (typeof value.response.service_tier === "string") {
3507
+ serviceTier = value.response.service_tier;
3508
+ }
3509
+ } else if (isResponseAnnotationAddedChunk(value)) {
3510
+ if (value.annotation.type === "url_citation") {
3511
+ controller.enqueue({
3512
+ type: "source",
3513
+ sourceType: "url",
3514
+ id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : generateId2(),
3515
+ url: value.annotation.url,
3516
+ title: value.annotation.title
3517
+ });
3518
+ } else if (value.annotation.type === "file_citation") {
3519
+ controller.enqueue({
3520
+ type: "source",
3521
+ sourceType: "document",
3522
+ id: (_t = (_s = (_r = self.config).generateId) == null ? void 0 : _s.call(_r)) != null ? _t : generateId2(),
3523
+ mediaType: "text/plain",
3524
+ title: (_v = (_u = value.annotation.quote) != null ? _u : value.annotation.filename) != null ? _v : "Document",
3525
+ filename: (_w = value.annotation.filename) != null ? _w : value.annotation.file_id,
3526
+ fileId: value.annotation.file_id,
3527
+ startIndex: (_x = value.annotation.start_index) != null ? _x : void 0,
3528
+ endIndex: (_y = value.annotation.end_index) != null ? _y : void 0
3529
+ });
3530
+ }
3531
+ } else if (isErrorChunk(value)) {
3532
+ controller.enqueue({ type: "error", error: value });
3533
+ }
3534
+ },
3535
+ flush(controller) {
3536
+ const providerMetadata = {
3537
+ openai: {
3538
+ responseId
3539
+ }
3540
+ };
3541
+ if (logprobs.length > 0) {
3542
+ providerMetadata.openai.logprobs = logprobs;
3543
+ }
3544
+ if (serviceTier !== void 0) {
3545
+ providerMetadata.openai.serviceTier = serviceTier;
3546
+ }
3547
+ controller.enqueue({
3548
+ type: "finish",
3549
+ finishReason,
3550
+ usage,
3551
+ providerMetadata
3552
+ });
3553
+ }
3554
+ })
3555
+ ),
3556
+ request: { body },
3557
+ response: { headers: responseHeaders }
3558
+ };
3559
+ }
3560
+ };
3561
+ var usageSchema2 = z18.object({
3562
+ input_tokens: z18.number(),
3563
+ input_tokens_details: z18.object({ cached_tokens: z18.number().nullish() }).nullish(),
3564
+ output_tokens: z18.number(),
3565
+ output_tokens_details: z18.object({ reasoning_tokens: z18.number().nullish() }).nullish()
3566
+ });
3567
+ var textDeltaChunkSchema = z18.object({
3568
+ type: z18.literal("response.output_text.delta"),
3569
+ item_id: z18.string(),
3570
+ delta: z18.string(),
3571
+ logprobs: LOGPROBS_SCHEMA.nullish()
3572
+ });
3573
+ var errorChunkSchema = z18.object({
3574
+ type: z18.literal("error"),
3575
+ code: z18.string(),
3576
+ message: z18.string(),
3577
+ param: z18.string().nullish(),
3578
+ sequence_number: z18.number()
3579
+ });
3580
+ var responseFinishedChunkSchema = z18.object({
3581
+ type: z18.enum(["response.completed", "response.incomplete"]),
3582
+ response: z18.object({
3583
+ incomplete_details: z18.object({ reason: z18.string() }).nullish(),
3584
+ usage: usageSchema2,
3585
+ service_tier: z18.string().nullish()
3586
+ })
3587
+ });
3588
+ var responseCreatedChunkSchema = z18.object({
3589
+ type: z18.literal("response.created"),
3590
+ response: z18.object({
3591
+ id: z18.string(),
3592
+ created_at: z18.number(),
3593
+ model: z18.string(),
3594
+ service_tier: z18.string().nullish()
3595
+ })
3596
+ });
3597
+ var responseOutputItemAddedSchema = z18.object({
3598
+ type: z18.literal("response.output_item.added"),
3599
+ output_index: z18.number(),
3600
+ item: z18.discriminatedUnion("type", [
3601
+ z18.object({
3602
+ type: z18.literal("message"),
3603
+ id: z18.string()
3604
+ }),
3605
+ z18.object({
3606
+ type: z18.literal("reasoning"),
3607
+ id: z18.string(),
3608
+ encrypted_content: z18.string().nullish()
3609
+ }),
3610
+ z18.object({
3611
+ type: z18.literal("function_call"),
3612
+ id: z18.string(),
3613
+ call_id: z18.string(),
3614
+ name: z18.string(),
3615
+ arguments: z18.string()
3616
+ }),
3617
+ z18.object({
3618
+ type: z18.literal("web_search_call"),
3619
+ id: z18.string(),
3620
+ status: z18.string(),
3621
+ action: z18.object({
3622
+ type: z18.literal("search"),
3623
+ query: z18.string().optional()
3624
+ }).nullish()
3625
+ }),
3626
+ z18.object({
3627
+ type: z18.literal("computer_call"),
3628
+ id: z18.string(),
3629
+ status: z18.string()
3630
+ }),
3631
+ z18.object({
3632
+ type: z18.literal("file_search_call"),
3633
+ id: z18.string()
3634
+ }),
3635
+ z18.object({
3636
+ type: z18.literal("image_generation_call"),
3637
+ id: z18.string()
3638
+ })
3639
+ ])
3640
+ });
3641
+ var responseOutputItemDoneSchema = z18.object({
3642
+ type: z18.literal("response.output_item.done"),
3643
+ output_index: z18.number(),
3644
+ item: z18.discriminatedUnion("type", [
3645
+ z18.object({
3646
+ type: z18.literal("message"),
3647
+ id: z18.string()
3648
+ }),
3649
+ z18.object({
3650
+ type: z18.literal("reasoning"),
3651
+ id: z18.string(),
3652
+ encrypted_content: z18.string().nullish()
3653
+ }),
3654
+ z18.object({
3655
+ type: z18.literal("function_call"),
3656
+ id: z18.string(),
3657
+ call_id: z18.string(),
3658
+ name: z18.string(),
3659
+ arguments: z18.string(),
3660
+ status: z18.literal("completed")
3661
+ }),
3662
+ codeInterpreterCallItem,
3663
+ imageGenerationCallItem,
3664
+ webSearchCallItem,
3665
+ fileSearchCallItem,
3666
+ z18.object({
3667
+ type: z18.literal("computer_call"),
3668
+ id: z18.string(),
3669
+ status: z18.literal("completed")
3670
+ })
3671
+ ])
3672
+ });
3673
+ var responseFunctionCallArgumentsDeltaSchema = z18.object({
3674
+ type: z18.literal("response.function_call_arguments.delta"),
3675
+ item_id: z18.string(),
3676
+ output_index: z18.number(),
3677
+ delta: z18.string()
3678
+ });
3679
+ var responseAnnotationAddedSchema = z18.object({
3680
+ type: z18.literal("response.output_text.annotation.added"),
3681
+ annotation: z18.discriminatedUnion("type", [
3682
+ z18.object({
3683
+ type: z18.literal("url_citation"),
3684
+ url: z18.string(),
3685
+ title: z18.string()
3686
+ }),
3687
+ z18.object({
3688
+ type: z18.literal("file_citation"),
3689
+ file_id: z18.string(),
3690
+ filename: z18.string().nullish(),
3691
+ index: z18.number().nullish(),
3692
+ start_index: z18.number().nullish(),
3693
+ end_index: z18.number().nullish(),
3694
+ quote: z18.string().nullish()
3695
+ })
3696
+ ])
3697
+ });
3698
+ var responseReasoningSummaryPartAddedSchema = z18.object({
3699
+ type: z18.literal("response.reasoning_summary_part.added"),
3700
+ item_id: z18.string(),
3701
+ summary_index: z18.number()
3702
+ });
3703
+ var responseReasoningSummaryTextDeltaSchema = z18.object({
3704
+ type: z18.literal("response.reasoning_summary_text.delta"),
3705
+ item_id: z18.string(),
3706
+ summary_index: z18.number(),
3707
+ delta: z18.string()
3708
+ });
3709
+ var openaiResponsesChunkSchema = z18.union([
3710
+ textDeltaChunkSchema,
3711
+ responseFinishedChunkSchema,
3712
+ responseCreatedChunkSchema,
3713
+ responseOutputItemAddedSchema,
3714
+ responseOutputItemDoneSchema,
3715
+ responseFunctionCallArgumentsDeltaSchema,
3716
+ responseAnnotationAddedSchema,
3717
+ responseReasoningSummaryPartAddedSchema,
3718
+ responseReasoningSummaryTextDeltaSchema,
3719
+ errorChunkSchema,
3720
+ z18.object({ type: z18.string() }).loose()
3721
+ // fallback for unknown chunks
3722
+ ]);
3723
+ function isTextDeltaChunk(chunk) {
3724
+ return chunk.type === "response.output_text.delta";
3725
+ }
3726
+ function isResponseOutputItemDoneChunk(chunk) {
3727
+ return chunk.type === "response.output_item.done";
3728
+ }
3729
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3730
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3731
+ }
3732
+ function isResponseFinishedChunk(chunk) {
3733
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
3734
+ }
3735
+ function isResponseCreatedChunk(chunk) {
3736
+ return chunk.type === "response.created";
3737
+ }
3738
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
3739
+ return chunk.type === "response.function_call_arguments.delta";
3740
+ }
3741
+ function isResponseOutputItemAddedChunk(chunk) {
3742
+ return chunk.type === "response.output_item.added";
3743
+ }
3744
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3745
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3746
+ }
3747
+ function isResponseAnnotationAddedChunk(chunk) {
3748
+ return chunk.type === "response.output_text.annotation.added";
3749
+ }
3750
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3751
+ return chunk.type === "response.reasoning_summary_part.added";
3752
+ }
3753
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
3754
+ return chunk.type === "response.reasoning_summary_text.delta";
3755
+ }
3756
+ function isErrorChunk(chunk) {
3757
+ return chunk.type === "error";
3758
+ }
3759
+ function getResponsesModelConfig(modelId) {
3760
+ const supportsFlexProcessing2 = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3761
+ const supportsPriorityProcessing2 = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3762
+ const defaults = {
3763
+ requiredAutoTruncation: false,
3764
+ systemMessageMode: "system",
3765
+ supportsFlexProcessing: supportsFlexProcessing2,
3766
+ supportsPriorityProcessing: supportsPriorityProcessing2
3767
+ };
3768
+ if (modelId.startsWith("gpt-5-chat")) {
3769
+ return {
3770
+ ...defaults,
3771
+ isReasoningModel: false
3772
+ };
3773
+ }
3774
+ if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3775
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3776
+ return {
3777
+ ...defaults,
3778
+ isReasoningModel: true,
3779
+ systemMessageMode: "remove"
3780
+ };
3781
+ }
3782
+ return {
3783
+ ...defaults,
3784
+ isReasoningModel: true,
3785
+ systemMessageMode: "developer"
3786
+ };
3787
+ }
3788
+ return {
3789
+ ...defaults,
3790
+ isReasoningModel: false
3791
+ };
3792
+ }
3793
+ var openaiResponsesProviderOptionsSchema = z18.object({
3794
+ include: z18.array(
3795
+ z18.enum([
3796
+ "reasoning.encrypted_content",
3797
+ "file_search_call.results",
3798
+ "message.output_text.logprobs"
3799
+ ])
3800
+ ).nullish(),
3801
+ instructions: z18.string().nullish(),
3802
+ /**
3803
+ * Return the log probabilities of the tokens.
3804
+ *
3805
+ * Setting to true will return the log probabilities of the tokens that
3806
+ * were generated.
3807
+ *
3808
+ * Setting to a number will return the log probabilities of the top n
3809
+ * tokens that were generated.
3810
+ *
3811
+ * @see https://platform.openai.com/docs/api-reference/responses/create
3812
+ * @see https://cookbook.openai.com/examples/using_logprobs
3813
+ */
3814
+ logprobs: z18.union([z18.boolean(), z18.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
3815
+ /**
3816
+ * The maximum number of total calls to built-in tools that can be processed in a response.
3817
+ * This maximum number applies across all built-in tool calls, not per individual tool.
3818
+ * Any further attempts to call a tool by the model will be ignored.
3819
+ */
3820
+ maxToolCalls: z18.number().nullish(),
3821
+ metadata: z18.any().nullish(),
3822
+ parallelToolCalls: z18.boolean().nullish(),
3823
+ previousResponseId: z18.string().nullish(),
3824
+ promptCacheKey: z18.string().nullish(),
3825
+ reasoningEffort: z18.string().nullish(),
3826
+ reasoningSummary: z18.string().nullish(),
3827
+ safetyIdentifier: z18.string().nullish(),
3828
+ serviceTier: z18.enum(["auto", "flex", "priority"]).nullish(),
3829
+ store: z18.boolean().nullish(),
3830
+ strictJsonSchema: z18.boolean().nullish(),
3831
+ textVerbosity: z18.enum(["low", "medium", "high"]).nullish(),
3832
+ user: z18.string().nullish()
3833
+ });
3834
+ export {
3835
+ OpenAIChatLanguageModel,
3836
+ OpenAICompletionLanguageModel,
3837
+ OpenAIEmbeddingModel,
3838
+ OpenAIImageModel,
3839
+ OpenAIResponsesLanguageModel,
3840
+ OpenAISpeechModel,
3841
+ OpenAITranscriptionModel,
3842
+ hasDefaultResponseFormat,
3843
+ modelMaxImagesPerCall,
3844
+ openAITranscriptionProviderOptions,
3845
+ openaiCompletionProviderOptions,
3846
+ openaiEmbeddingProviderOptions,
3847
+ openaiProviderOptions
3848
+ };
3849
+ //# sourceMappingURL=index.mjs.map