@zenning/openai 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2768 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/internal/index.ts
21
+ var internal_exports = {};
22
+ __export(internal_exports, {
23
+ OpenAIChatLanguageModel: () => OpenAIChatLanguageModel,
24
+ OpenAICompletionLanguageModel: () => OpenAICompletionLanguageModel,
25
+ OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
+ OpenAIImageModel: () => OpenAIImageModel,
27
+ OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
+ OpenAISpeechModel: () => OpenAISpeechModel,
29
+ OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall
32
+ });
33
+ module.exports = __toCommonJS(internal_exports);
34
+
35
+ // src/openai-chat-language-model.ts
36
+ var import_provider3 = require("@ai-sdk/provider");
37
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
38
+ var import_zod2 = require("zod");
39
+
40
+ // src/convert-to-openai-chat-messages.ts
41
+ var import_provider = require("@ai-sdk/provider");
42
+ var import_provider_utils = require("@ai-sdk/provider-utils");
43
+ function convertToOpenAIChatMessages({
44
+ prompt,
45
+ useLegacyFunctionCalling = false,
46
+ systemMessageMode = "system"
47
+ }) {
48
+ const messages = [];
49
+ const warnings = [];
50
+ for (const { role, content } of prompt) {
51
+ switch (role) {
52
+ case "system": {
53
+ switch (systemMessageMode) {
54
+ case "system": {
55
+ messages.push({ role: "system", content });
56
+ break;
57
+ }
58
+ case "developer": {
59
+ messages.push({ role: "developer", content });
60
+ break;
61
+ }
62
+ case "remove": {
63
+ warnings.push({
64
+ type: "other",
65
+ message: "system messages are removed for this model"
66
+ });
67
+ break;
68
+ }
69
+ default: {
70
+ const _exhaustiveCheck = systemMessageMode;
71
+ throw new Error(
72
+ `Unsupported system message mode: ${_exhaustiveCheck}`
73
+ );
74
+ }
75
+ }
76
+ break;
77
+ }
78
+ case "user": {
79
+ if (content.length === 1 && content[0].type === "text") {
80
+ messages.push({ role: "user", content: content[0].text });
81
+ break;
82
+ }
83
+ messages.push({
84
+ role: "user",
85
+ content: content.map((part, index) => {
86
+ var _a, _b, _c, _d;
87
+ switch (part.type) {
88
+ case "text": {
89
+ return { type: "text", text: part.text };
90
+ }
91
+ case "image": {
92
+ return {
93
+ type: "image_url",
94
+ image_url: {
95
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
96
+ // OpenAI specific extension: image detail
97
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
98
+ }
99
+ };
100
+ }
101
+ case "file": {
102
+ if (part.data instanceof URL) {
103
+ throw new import_provider.UnsupportedFunctionalityError({
104
+ functionality: "'File content parts with URL data' functionality not supported."
105
+ });
106
+ }
107
+ switch (part.mimeType) {
108
+ case "audio/wav": {
109
+ return {
110
+ type: "input_audio",
111
+ input_audio: { data: part.data, format: "wav" }
112
+ };
113
+ }
114
+ case "audio/mp3":
115
+ case "audio/mpeg": {
116
+ return {
117
+ type: "input_audio",
118
+ input_audio: { data: part.data, format: "mp3" }
119
+ };
120
+ }
121
+ case "application/pdf": {
122
+ return {
123
+ type: "file",
124
+ file: {
125
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
126
+ file_data: `data:application/pdf;base64,${part.data}`
127
+ }
128
+ };
129
+ }
130
+ default: {
131
+ throw new import_provider.UnsupportedFunctionalityError({
132
+ functionality: `File content part type ${part.mimeType} in user messages`
133
+ });
134
+ }
135
+ }
136
+ }
137
+ }
138
+ })
139
+ });
140
+ break;
141
+ }
142
+ case "assistant": {
143
+ let text = "";
144
+ const toolCalls = [];
145
+ for (const part of content) {
146
+ switch (part.type) {
147
+ case "text": {
148
+ text += part.text;
149
+ break;
150
+ }
151
+ case "tool-call": {
152
+ toolCalls.push({
153
+ id: part.toolCallId,
154
+ type: "function",
155
+ function: {
156
+ name: part.toolName,
157
+ arguments: JSON.stringify(part.args)
158
+ }
159
+ });
160
+ break;
161
+ }
162
+ }
163
+ }
164
+ if (useLegacyFunctionCalling) {
165
+ if (toolCalls.length > 1) {
166
+ throw new import_provider.UnsupportedFunctionalityError({
167
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
168
+ });
169
+ }
170
+ messages.push({
171
+ role: "assistant",
172
+ content: text,
173
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
174
+ });
175
+ } else {
176
+ messages.push({
177
+ role: "assistant",
178
+ content: text,
179
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
180
+ });
181
+ }
182
+ break;
183
+ }
184
+ case "tool": {
185
+ for (const toolResponse of content) {
186
+ if (useLegacyFunctionCalling) {
187
+ messages.push({
188
+ role: "function",
189
+ name: toolResponse.toolName,
190
+ content: JSON.stringify(toolResponse.result)
191
+ });
192
+ } else {
193
+ messages.push({
194
+ role: "tool",
195
+ tool_call_id: toolResponse.toolCallId,
196
+ content: JSON.stringify(toolResponse.result)
197
+ });
198
+ }
199
+ }
200
+ break;
201
+ }
202
+ default: {
203
+ const _exhaustiveCheck = role;
204
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
205
+ }
206
+ }
207
+ }
208
+ return { messages, warnings };
209
+ }
210
+
211
+ // src/map-openai-chat-logprobs.ts
212
+ function mapOpenAIChatLogProbsOutput(logprobs) {
213
+ var _a, _b;
214
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
215
+ token,
216
+ logprob,
217
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
218
+ token: token2,
219
+ logprob: logprob2
220
+ })) : []
221
+ }))) != null ? _b : void 0;
222
+ }
223
+
224
+ // src/map-openai-finish-reason.ts
225
+ function mapOpenAIFinishReason(finishReason) {
226
+ switch (finishReason) {
227
+ case "stop":
228
+ return "stop";
229
+ case "length":
230
+ return "length";
231
+ case "content_filter":
232
+ return "content-filter";
233
+ case "function_call":
234
+ case "tool_calls":
235
+ return "tool-calls";
236
+ default:
237
+ return "unknown";
238
+ }
239
+ }
240
+
241
+ // src/openai-error.ts
242
+ var import_zod = require("zod");
243
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
244
+ var openaiErrorDataSchema = import_zod.z.object({
245
+ error: import_zod.z.object({
246
+ message: import_zod.z.string(),
247
+ // The additional information below is handled loosely to support
248
+ // OpenAI-compatible providers that have slightly different error
249
+ // responses:
250
+ type: import_zod.z.string().nullish(),
251
+ param: import_zod.z.any().nullish(),
252
+ code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
253
+ })
254
+ });
255
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
256
+ errorSchema: openaiErrorDataSchema,
257
+ errorToMessage: (data) => data.error.message
258
+ });
259
+
260
+ // src/get-response-metadata.ts
261
+ function getResponseMetadata({
262
+ id,
263
+ model,
264
+ created
265
+ }) {
266
+ return {
267
+ id: id != null ? id : void 0,
268
+ modelId: model != null ? model : void 0,
269
+ timestamp: created != null ? new Date(created * 1e3) : void 0
270
+ };
271
+ }
272
+
273
+ // src/openai-prepare-tools.ts
274
+ var import_provider2 = require("@ai-sdk/provider");
275
+ function prepareTools({
276
+ mode,
277
+ useLegacyFunctionCalling = false,
278
+ structuredOutputs
279
+ }) {
280
+ var _a;
281
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
282
+ const toolWarnings = [];
283
+ if (tools == null) {
284
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
285
+ }
286
+ const toolChoice = mode.toolChoice;
287
+ if (useLegacyFunctionCalling) {
288
+ const openaiFunctions = [];
289
+ for (const tool of tools) {
290
+ if (tool.type === "provider-defined") {
291
+ toolWarnings.push({ type: "unsupported-tool", tool });
292
+ } else {
293
+ openaiFunctions.push({
294
+ name: tool.name,
295
+ description: tool.description,
296
+ parameters: tool.parameters
297
+ });
298
+ }
299
+ }
300
+ if (toolChoice == null) {
301
+ return {
302
+ functions: openaiFunctions,
303
+ function_call: void 0,
304
+ toolWarnings
305
+ };
306
+ }
307
+ const type2 = toolChoice.type;
308
+ switch (type2) {
309
+ case "auto":
310
+ case "none":
311
+ case void 0:
312
+ return {
313
+ functions: openaiFunctions,
314
+ function_call: void 0,
315
+ toolWarnings
316
+ };
317
+ case "required":
318
+ throw new import_provider2.UnsupportedFunctionalityError({
319
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
320
+ });
321
+ default:
322
+ return {
323
+ functions: openaiFunctions,
324
+ function_call: { name: toolChoice.toolName },
325
+ toolWarnings
326
+ };
327
+ }
328
+ }
329
+ const openaiTools = [];
330
+ for (const tool of tools) {
331
+ if (tool.type === "provider-defined") {
332
+ toolWarnings.push({ type: "unsupported-tool", tool });
333
+ } else {
334
+ openaiTools.push({
335
+ type: "function",
336
+ function: {
337
+ name: tool.name,
338
+ description: tool.description,
339
+ parameters: tool.parameters,
340
+ strict: structuredOutputs ? true : void 0
341
+ }
342
+ });
343
+ }
344
+ }
345
+ if (toolChoice == null) {
346
+ return { tools: openaiTools, tool_choice: void 0, toolWarnings };
347
+ }
348
+ const type = toolChoice.type;
349
+ switch (type) {
350
+ case "auto":
351
+ case "none":
352
+ case "required":
353
+ return { tools: openaiTools, tool_choice: type, toolWarnings };
354
+ case "tool":
355
+ return {
356
+ tools: openaiTools,
357
+ tool_choice: {
358
+ type: "function",
359
+ function: {
360
+ name: toolChoice.toolName
361
+ }
362
+ },
363
+ toolWarnings
364
+ };
365
+ default: {
366
+ const _exhaustiveCheck = type;
367
+ throw new import_provider2.UnsupportedFunctionalityError({
368
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
369
+ });
370
+ }
371
+ }
372
+ }
373
+
374
+ // src/openai-chat-language-model.ts
375
+ var OpenAIChatLanguageModel = class {
376
+ constructor(modelId, settings, config) {
377
+ this.specificationVersion = "v1";
378
+ this.modelId = modelId;
379
+ this.settings = settings;
380
+ this.config = config;
381
+ }
382
+ get supportsStructuredOutputs() {
383
+ var _a;
384
+ return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
385
+ }
386
+ get defaultObjectGenerationMode() {
387
+ if (isAudioModel(this.modelId)) {
388
+ return "tool";
389
+ }
390
+ return this.supportsStructuredOutputs ? "json" : "tool";
391
+ }
392
+ get provider() {
393
+ return this.config.provider;
394
+ }
395
+ get supportsImageUrls() {
396
+ return !this.settings.downloadImages;
397
+ }
398
+ getArgs({
399
+ mode,
400
+ prompt,
401
+ maxTokens,
402
+ temperature,
403
+ topP,
404
+ topK,
405
+ frequencyPenalty,
406
+ presencePenalty,
407
+ stopSequences,
408
+ responseFormat,
409
+ seed,
410
+ providerMetadata
411
+ }) {
412
+ var _a, _b, _c, _d, _e, _f, _g, _h;
413
+ const type = mode.type;
414
+ const warnings = [];
415
+ if (topK != null) {
416
+ warnings.push({
417
+ type: "unsupported-setting",
418
+ setting: "topK"
419
+ });
420
+ }
421
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
422
+ warnings.push({
423
+ type: "unsupported-setting",
424
+ setting: "responseFormat",
425
+ details: "JSON response format schema is only supported with structuredOutputs"
426
+ });
427
+ }
428
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
429
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
430
+ throw new import_provider3.UnsupportedFunctionalityError({
431
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
432
+ });
433
+ }
434
+ if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
435
+ throw new import_provider3.UnsupportedFunctionalityError({
436
+ functionality: "structuredOutputs with useLegacyFunctionCalling"
437
+ });
438
+ }
439
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
440
+ {
441
+ prompt,
442
+ useLegacyFunctionCalling,
443
+ systemMessageMode: getSystemMessageMode(this.modelId)
444
+ }
445
+ );
446
+ warnings.push(...messageWarnings);
447
+ const baseArgs = {
448
+ // model id:
449
+ model: this.modelId,
450
+ // model specific settings:
451
+ logit_bias: this.settings.logitBias,
452
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
453
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
454
+ user: this.settings.user,
455
+ parallel_tool_calls: this.settings.parallelToolCalls,
456
+ // standardized settings:
457
+ max_tokens: maxTokens,
458
+ temperature,
459
+ top_p: topP,
460
+ frequency_penalty: frequencyPenalty,
461
+ presence_penalty: presencePenalty,
462
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
463
+ type: "json_schema",
464
+ json_schema: {
465
+ schema: responseFormat.schema,
466
+ strict: true,
467
+ name: (_a = responseFormat.name) != null ? _a : "response",
468
+ description: responseFormat.description
469
+ }
470
+ } : { type: "json_object" } : void 0,
471
+ stop: stopSequences,
472
+ seed,
473
+ // openai specific settings:
474
+ // TODO remove in next major version; we auto-map maxTokens now
475
+ max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
476
+ store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
477
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
478
+ prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
479
+ reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
480
+ // messages:
481
+ messages
482
+ };
483
+ if (isReasoningModel(this.modelId)) {
484
+ if (baseArgs.temperature != null) {
485
+ baseArgs.temperature = void 0;
486
+ warnings.push({
487
+ type: "unsupported-setting",
488
+ setting: "temperature",
489
+ details: "temperature is not supported for reasoning models"
490
+ });
491
+ }
492
+ if (baseArgs.top_p != null) {
493
+ baseArgs.top_p = void 0;
494
+ warnings.push({
495
+ type: "unsupported-setting",
496
+ setting: "topP",
497
+ details: "topP is not supported for reasoning models"
498
+ });
499
+ }
500
+ if (baseArgs.frequency_penalty != null) {
501
+ baseArgs.frequency_penalty = void 0;
502
+ warnings.push({
503
+ type: "unsupported-setting",
504
+ setting: "frequencyPenalty",
505
+ details: "frequencyPenalty is not supported for reasoning models"
506
+ });
507
+ }
508
+ if (baseArgs.presence_penalty != null) {
509
+ baseArgs.presence_penalty = void 0;
510
+ warnings.push({
511
+ type: "unsupported-setting",
512
+ setting: "presencePenalty",
513
+ details: "presencePenalty is not supported for reasoning models"
514
+ });
515
+ }
516
+ if (baseArgs.logit_bias != null) {
517
+ baseArgs.logit_bias = void 0;
518
+ warnings.push({
519
+ type: "other",
520
+ message: "logitBias is not supported for reasoning models"
521
+ });
522
+ }
523
+ if (baseArgs.logprobs != null) {
524
+ baseArgs.logprobs = void 0;
525
+ warnings.push({
526
+ type: "other",
527
+ message: "logprobs is not supported for reasoning models"
528
+ });
529
+ }
530
+ if (baseArgs.top_logprobs != null) {
531
+ baseArgs.top_logprobs = void 0;
532
+ warnings.push({
533
+ type: "other",
534
+ message: "topLogprobs is not supported for reasoning models"
535
+ });
536
+ }
537
+ if (baseArgs.max_tokens != null) {
538
+ if (baseArgs.max_completion_tokens == null) {
539
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
540
+ }
541
+ baseArgs.max_tokens = void 0;
542
+ }
543
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
544
+ if (baseArgs.temperature != null) {
545
+ baseArgs.temperature = void 0;
546
+ warnings.push({
547
+ type: "unsupported-setting",
548
+ setting: "temperature",
549
+ details: "temperature is not supported for the search preview models and has been removed."
550
+ });
551
+ }
552
+ }
553
+ switch (type) {
554
+ case "regular": {
555
+ const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
556
+ mode,
557
+ useLegacyFunctionCalling,
558
+ structuredOutputs: this.supportsStructuredOutputs
559
+ });
560
+ return {
561
+ args: {
562
+ ...baseArgs,
563
+ tools,
564
+ tool_choice,
565
+ functions,
566
+ function_call
567
+ },
568
+ warnings: [...warnings, ...toolWarnings]
569
+ };
570
+ }
571
+ case "object-json": {
572
+ return {
573
+ args: {
574
+ ...baseArgs,
575
+ response_format: this.supportsStructuredOutputs && mode.schema != null ? {
576
+ type: "json_schema",
577
+ json_schema: {
578
+ schema: mode.schema,
579
+ strict: true,
580
+ name: (_h = mode.name) != null ? _h : "response",
581
+ description: mode.description
582
+ }
583
+ } : { type: "json_object" }
584
+ },
585
+ warnings
586
+ };
587
+ }
588
+ case "object-tool": {
589
+ return {
590
+ args: useLegacyFunctionCalling ? {
591
+ ...baseArgs,
592
+ function_call: {
593
+ name: mode.tool.name
594
+ },
595
+ functions: [
596
+ {
597
+ name: mode.tool.name,
598
+ description: mode.tool.description,
599
+ parameters: mode.tool.parameters
600
+ }
601
+ ]
602
+ } : {
603
+ ...baseArgs,
604
+ tool_choice: {
605
+ type: "function",
606
+ function: { name: mode.tool.name }
607
+ },
608
+ tools: [
609
+ {
610
+ type: "function",
611
+ function: {
612
+ name: mode.tool.name,
613
+ description: mode.tool.description,
614
+ parameters: mode.tool.parameters,
615
+ strict: this.supportsStructuredOutputs ? true : void 0
616
+ }
617
+ }
618
+ ]
619
+ },
620
+ warnings
621
+ };
622
+ }
623
+ default: {
624
+ const _exhaustiveCheck = type;
625
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
626
+ }
627
+ }
628
+ }
629
+ async doGenerate(options) {
630
+ var _a, _b, _c, _d, _e, _f, _g, _h;
631
+ const { args: body, warnings } = this.getArgs(options);
632
+ const {
633
+ responseHeaders,
634
+ value: response,
635
+ rawValue: rawResponse
636
+ } = await (0, import_provider_utils3.postJsonToApi)({
637
+ url: this.config.url({
638
+ path: "/chat/completions",
639
+ modelId: this.modelId
640
+ }),
641
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
642
+ body,
643
+ failedResponseHandler: openaiFailedResponseHandler,
644
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
645
+ openaiChatResponseSchema
646
+ ),
647
+ abortSignal: options.abortSignal,
648
+ fetch: this.config.fetch
649
+ });
650
+ const { messages: rawPrompt, ...rawSettings } = body;
651
+ const choice = response.choices[0];
652
+ const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
653
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
654
+ const providerMetadata = { openai: {} };
655
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
656
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
657
+ }
658
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
659
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
660
+ }
661
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
662
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
663
+ }
664
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
665
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
666
+ }
667
+ return {
668
+ text: (_c = choice.message.content) != null ? _c : void 0,
669
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
670
+ {
671
+ toolCallType: "function",
672
+ toolCallId: (0, import_provider_utils3.generateId)(),
673
+ toolName: choice.message.function_call.name,
674
+ args: choice.message.function_call.arguments
675
+ }
676
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
677
+ var _a2;
678
+ return {
679
+ toolCallType: "function",
680
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
681
+ toolName: toolCall.function.name,
682
+ args: toolCall.function.arguments
683
+ };
684
+ }),
685
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
686
+ usage: {
687
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
688
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
689
+ },
690
+ rawCall: { rawPrompt, rawSettings },
691
+ rawResponse: { headers: responseHeaders, body: rawResponse },
692
+ request: { body: JSON.stringify(body) },
693
+ response: getResponseMetadata(response),
694
+ warnings,
695
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
696
+ providerMetadata
697
+ };
698
+ }
699
+ async doStream(options) {
700
+ if (this.settings.simulateStreaming) {
701
+ const result = await this.doGenerate(options);
702
+ const simulatedStream = new ReadableStream({
703
+ start(controller) {
704
+ controller.enqueue({ type: "response-metadata", ...result.response });
705
+ if (result.text) {
706
+ controller.enqueue({
707
+ type: "text-delta",
708
+ textDelta: result.text
709
+ });
710
+ }
711
+ if (result.toolCalls) {
712
+ for (const toolCall of result.toolCalls) {
713
+ controller.enqueue({
714
+ type: "tool-call-delta",
715
+ toolCallType: "function",
716
+ toolCallId: toolCall.toolCallId,
717
+ toolName: toolCall.toolName,
718
+ argsTextDelta: toolCall.args
719
+ });
720
+ controller.enqueue({
721
+ type: "tool-call",
722
+ ...toolCall
723
+ });
724
+ }
725
+ }
726
+ controller.enqueue({
727
+ type: "finish",
728
+ finishReason: result.finishReason,
729
+ usage: result.usage,
730
+ logprobs: result.logprobs,
731
+ providerMetadata: result.providerMetadata
732
+ });
733
+ controller.close();
734
+ }
735
+ });
736
+ return {
737
+ stream: simulatedStream,
738
+ rawCall: result.rawCall,
739
+ rawResponse: result.rawResponse,
740
+ warnings: result.warnings
741
+ };
742
+ }
743
+ const { args, warnings } = this.getArgs(options);
744
+ const body = {
745
+ ...args,
746
+ stream: true,
747
+ // only include stream_options when in strict compatibility mode:
748
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
749
+ };
750
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
751
+ url: this.config.url({
752
+ path: "/chat/completions",
753
+ modelId: this.modelId
754
+ }),
755
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
756
+ body,
757
+ failedResponseHandler: openaiFailedResponseHandler,
758
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
759
+ openaiChatChunkSchema
760
+ ),
761
+ abortSignal: options.abortSignal,
762
+ fetch: this.config.fetch
763
+ });
764
+ const { messages: rawPrompt, ...rawSettings } = args;
765
+ const toolCalls = [];
766
+ let finishReason = "unknown";
767
+ let usage = {
768
+ promptTokens: void 0,
769
+ completionTokens: void 0
770
+ };
771
+ let logprobs;
772
+ let isFirstChunk = true;
773
+ const { useLegacyFunctionCalling } = this.settings;
774
+ const providerMetadata = { openai: {} };
775
+ return {
776
+ stream: response.pipeThrough(
777
+ new TransformStream({
778
+ transform(chunk, controller) {
779
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
780
+ if (!chunk.success) {
781
+ finishReason = "error";
782
+ controller.enqueue({ type: "error", error: chunk.error });
783
+ return;
784
+ }
785
+ const value = chunk.value;
786
+ if ("error" in value) {
787
+ finishReason = "error";
788
+ controller.enqueue({ type: "error", error: value.error });
789
+ return;
790
+ }
791
+ if (isFirstChunk) {
792
+ isFirstChunk = false;
793
+ controller.enqueue({
794
+ type: "response-metadata",
795
+ ...getResponseMetadata(value)
796
+ });
797
+ }
798
+ if (value.usage != null) {
799
+ const {
800
+ prompt_tokens,
801
+ completion_tokens,
802
+ prompt_tokens_details,
803
+ completion_tokens_details
804
+ } = value.usage;
805
+ usage = {
806
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
807
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
808
+ };
809
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
810
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
811
+ }
812
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
813
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
814
+ }
815
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
816
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
817
+ }
818
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
819
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
820
+ }
821
+ }
822
+ const choice = value.choices[0];
823
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
824
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
825
+ }
826
+ if ((choice == null ? void 0 : choice.delta) == null) {
827
+ return;
828
+ }
829
+ const delta = choice.delta;
830
+ if (delta.content != null) {
831
+ controller.enqueue({
832
+ type: "text-delta",
833
+ textDelta: delta.content
834
+ });
835
+ }
836
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
837
+ choice == null ? void 0 : choice.logprobs
838
+ );
839
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
840
+ if (logprobs === void 0) logprobs = [];
841
+ logprobs.push(...mappedLogprobs);
842
+ }
843
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
844
+ {
845
+ type: "function",
846
+ id: (0, import_provider_utils3.generateId)(),
847
+ function: delta.function_call,
848
+ index: 0
849
+ }
850
+ ] : delta.tool_calls;
851
+ if (mappedToolCalls != null) {
852
+ for (const toolCallDelta of mappedToolCalls) {
853
+ const index = toolCallDelta.index;
854
+ if (toolCalls[index] == null) {
855
+ if (toolCallDelta.type !== "function") {
856
+ throw new import_provider3.InvalidResponseDataError({
857
+ data: toolCallDelta,
858
+ message: `Expected 'function' type.`
859
+ });
860
+ }
861
+ if (toolCallDelta.id == null) {
862
+ throw new import_provider3.InvalidResponseDataError({
863
+ data: toolCallDelta,
864
+ message: `Expected 'id' to be a string.`
865
+ });
866
+ }
867
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
868
+ throw new import_provider3.InvalidResponseDataError({
869
+ data: toolCallDelta,
870
+ message: `Expected 'function.name' to be a string.`
871
+ });
872
+ }
873
+ toolCalls[index] = {
874
+ id: toolCallDelta.id,
875
+ type: "function",
876
+ function: {
877
+ name: toolCallDelta.function.name,
878
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
879
+ },
880
+ hasFinished: false
881
+ };
882
+ const toolCall2 = toolCalls[index];
883
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
884
+ if (toolCall2.function.arguments.length > 0) {
885
+ controller.enqueue({
886
+ type: "tool-call-delta",
887
+ toolCallType: "function",
888
+ toolCallId: toolCall2.id,
889
+ toolName: toolCall2.function.name,
890
+ argsTextDelta: toolCall2.function.arguments
891
+ });
892
+ }
893
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
894
+ controller.enqueue({
895
+ type: "tool-call",
896
+ toolCallType: "function",
897
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
898
+ toolName: toolCall2.function.name,
899
+ args: toolCall2.function.arguments
900
+ });
901
+ toolCall2.hasFinished = true;
902
+ }
903
+ }
904
+ continue;
905
+ }
906
+ const toolCall = toolCalls[index];
907
+ if (toolCall.hasFinished) {
908
+ continue;
909
+ }
910
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
911
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
912
+ }
913
+ controller.enqueue({
914
+ type: "tool-call-delta",
915
+ toolCallType: "function",
916
+ toolCallId: toolCall.id,
917
+ toolName: toolCall.function.name,
918
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
919
+ });
920
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
921
+ controller.enqueue({
922
+ type: "tool-call",
923
+ toolCallType: "function",
924
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
925
+ toolName: toolCall.function.name,
926
+ args: toolCall.function.arguments
927
+ });
928
+ toolCall.hasFinished = true;
929
+ }
930
+ }
931
+ }
932
+ },
933
+ flush(controller) {
934
+ var _a, _b;
935
+ controller.enqueue({
936
+ type: "finish",
937
+ finishReason,
938
+ logprobs,
939
+ usage: {
940
+ promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
941
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
942
+ },
943
+ ...providerMetadata != null ? { providerMetadata } : {}
944
+ });
945
+ }
946
+ })
947
+ ),
948
+ rawCall: { rawPrompt, rawSettings },
949
+ rawResponse: { headers: responseHeaders },
950
+ request: { body: JSON.stringify(body) },
951
+ warnings
952
+ };
953
+ }
954
+ };
955
+ var openaiTokenUsageSchema = import_zod2.z.object({
956
+ prompt_tokens: import_zod2.z.number().nullish(),
957
+ completion_tokens: import_zod2.z.number().nullish(),
958
+ prompt_tokens_details: import_zod2.z.object({
959
+ cached_tokens: import_zod2.z.number().nullish()
960
+ }).nullish(),
961
+ completion_tokens_details: import_zod2.z.object({
962
+ reasoning_tokens: import_zod2.z.number().nullish(),
963
+ accepted_prediction_tokens: import_zod2.z.number().nullish(),
964
+ rejected_prediction_tokens: import_zod2.z.number().nullish()
965
+ }).nullish()
966
+ }).nullish();
967
+ var openaiChatResponseSchema = import_zod2.z.object({
968
+ id: import_zod2.z.string().nullish(),
969
+ created: import_zod2.z.number().nullish(),
970
+ model: import_zod2.z.string().nullish(),
971
+ choices: import_zod2.z.array(
972
+ import_zod2.z.object({
973
+ message: import_zod2.z.object({
974
+ role: import_zod2.z.literal("assistant").nullish(),
975
+ content: import_zod2.z.string().nullish(),
976
+ function_call: import_zod2.z.object({
977
+ arguments: import_zod2.z.string(),
978
+ name: import_zod2.z.string()
979
+ }).nullish(),
980
+ tool_calls: import_zod2.z.array(
981
+ import_zod2.z.object({
982
+ id: import_zod2.z.string().nullish(),
983
+ type: import_zod2.z.literal("function"),
984
+ function: import_zod2.z.object({
985
+ name: import_zod2.z.string(),
986
+ arguments: import_zod2.z.string()
987
+ })
988
+ })
989
+ ).nullish()
990
+ }),
991
+ index: import_zod2.z.number(),
992
+ logprobs: import_zod2.z.object({
993
+ content: import_zod2.z.array(
994
+ import_zod2.z.object({
995
+ token: import_zod2.z.string(),
996
+ logprob: import_zod2.z.number(),
997
+ top_logprobs: import_zod2.z.array(
998
+ import_zod2.z.object({
999
+ token: import_zod2.z.string(),
1000
+ logprob: import_zod2.z.number()
1001
+ })
1002
+ )
1003
+ })
1004
+ ).nullable()
1005
+ }).nullish(),
1006
+ finish_reason: import_zod2.z.string().nullish()
1007
+ })
1008
+ ),
1009
+ usage: openaiTokenUsageSchema
1010
+ });
1011
+ var openaiChatChunkSchema = import_zod2.z.union([
1012
+ import_zod2.z.object({
1013
+ id: import_zod2.z.string().nullish(),
1014
+ created: import_zod2.z.number().nullish(),
1015
+ model: import_zod2.z.string().nullish(),
1016
+ choices: import_zod2.z.array(
1017
+ import_zod2.z.object({
1018
+ delta: import_zod2.z.object({
1019
+ role: import_zod2.z.enum(["assistant"]).nullish(),
1020
+ content: import_zod2.z.string().nullish(),
1021
+ function_call: import_zod2.z.object({
1022
+ name: import_zod2.z.string().optional(),
1023
+ arguments: import_zod2.z.string().optional()
1024
+ }).nullish(),
1025
+ tool_calls: import_zod2.z.array(
1026
+ import_zod2.z.object({
1027
+ index: import_zod2.z.number(),
1028
+ id: import_zod2.z.string().nullish(),
1029
+ type: import_zod2.z.literal("function").nullish(),
1030
+ function: import_zod2.z.object({
1031
+ name: import_zod2.z.string().nullish(),
1032
+ arguments: import_zod2.z.string().nullish()
1033
+ })
1034
+ })
1035
+ ).nullish()
1036
+ }).nullish(),
1037
+ logprobs: import_zod2.z.object({
1038
+ content: import_zod2.z.array(
1039
+ import_zod2.z.object({
1040
+ token: import_zod2.z.string(),
1041
+ logprob: import_zod2.z.number(),
1042
+ top_logprobs: import_zod2.z.array(
1043
+ import_zod2.z.object({
1044
+ token: import_zod2.z.string(),
1045
+ logprob: import_zod2.z.number()
1046
+ })
1047
+ )
1048
+ })
1049
+ ).nullable()
1050
+ }).nullish(),
1051
+ finish_reason: import_zod2.z.string().nullish(),
1052
+ index: import_zod2.z.number()
1053
+ })
1054
+ ),
1055
+ usage: openaiTokenUsageSchema
1056
+ }),
1057
+ openaiErrorDataSchema
1058
+ ]);
1059
+ function isReasoningModel(modelId) {
1060
+ return modelId.startsWith("o");
1061
+ }
1062
+ function isAudioModel(modelId) {
1063
+ return modelId.startsWith("gpt-4o-audio-preview");
1064
+ }
1065
+ function getSystemMessageMode(modelId) {
1066
+ var _a, _b;
1067
+ if (!isReasoningModel(modelId)) {
1068
+ return "system";
1069
+ }
1070
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1071
+ }
1072
+ var reasoningModels = {
1073
+ "o1-mini": {
1074
+ systemMessageMode: "remove"
1075
+ },
1076
+ "o1-mini-2024-09-12": {
1077
+ systemMessageMode: "remove"
1078
+ },
1079
+ "o1-preview": {
1080
+ systemMessageMode: "remove"
1081
+ },
1082
+ "o1-preview-2024-09-12": {
1083
+ systemMessageMode: "remove"
1084
+ },
1085
+ o3: {
1086
+ systemMessageMode: "developer"
1087
+ },
1088
+ "o3-2025-04-16": {
1089
+ systemMessageMode: "developer"
1090
+ },
1091
+ "o3-mini": {
1092
+ systemMessageMode: "developer"
1093
+ },
1094
+ "o3-mini-2025-01-31": {
1095
+ systemMessageMode: "developer"
1096
+ },
1097
+ "o4-mini": {
1098
+ systemMessageMode: "developer"
1099
+ },
1100
+ "o4-mini-2025-04-16": {
1101
+ systemMessageMode: "developer"
1102
+ }
1103
+ };
1104
+
1105
+ // src/openai-completion-language-model.ts
1106
+ var import_provider5 = require("@ai-sdk/provider");
1107
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
1108
+ var import_zod3 = require("zod");
1109
+
1110
+ // src/convert-to-openai-completion-prompt.ts
1111
+ var import_provider4 = require("@ai-sdk/provider");
1112
+ function convertToOpenAICompletionPrompt({
1113
+ prompt,
1114
+ inputFormat,
1115
+ user = "user",
1116
+ assistant = "assistant"
1117
+ }) {
1118
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1119
+ return { prompt: prompt[0].content[0].text };
1120
+ }
1121
+ let text = "";
1122
+ if (prompt[0].role === "system") {
1123
+ text += `${prompt[0].content}
1124
+
1125
+ `;
1126
+ prompt = prompt.slice(1);
1127
+ }
1128
+ for (const { role, content } of prompt) {
1129
+ switch (role) {
1130
+ case "system": {
1131
+ throw new import_provider4.InvalidPromptError({
1132
+ message: "Unexpected system message in prompt: ${content}",
1133
+ prompt
1134
+ });
1135
+ }
1136
+ case "user": {
1137
+ const userMessage = content.map((part) => {
1138
+ switch (part.type) {
1139
+ case "text": {
1140
+ return part.text;
1141
+ }
1142
+ case "image": {
1143
+ throw new import_provider4.UnsupportedFunctionalityError({
1144
+ functionality: "images"
1145
+ });
1146
+ }
1147
+ }
1148
+ }).join("");
1149
+ text += `${user}:
1150
+ ${userMessage}
1151
+
1152
+ `;
1153
+ break;
1154
+ }
1155
+ case "assistant": {
1156
+ const assistantMessage = content.map((part) => {
1157
+ switch (part.type) {
1158
+ case "text": {
1159
+ return part.text;
1160
+ }
1161
+ case "tool-call": {
1162
+ throw new import_provider4.UnsupportedFunctionalityError({
1163
+ functionality: "tool-call messages"
1164
+ });
1165
+ }
1166
+ }
1167
+ }).join("");
1168
+ text += `${assistant}:
1169
+ ${assistantMessage}
1170
+
1171
+ `;
1172
+ break;
1173
+ }
1174
+ case "tool": {
1175
+ throw new import_provider4.UnsupportedFunctionalityError({
1176
+ functionality: "tool messages"
1177
+ });
1178
+ }
1179
+ default: {
1180
+ const _exhaustiveCheck = role;
1181
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1182
+ }
1183
+ }
1184
+ }
1185
+ text += `${assistant}:
1186
+ `;
1187
+ return {
1188
+ prompt: text,
1189
+ stopSequences: [`
1190
+ ${user}:`]
1191
+ };
1192
+ }
1193
+
1194
+ // src/map-openai-completion-logprobs.ts
1195
+ function mapOpenAICompletionLogProbs(logprobs) {
1196
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1197
+ token,
1198
+ logprob: logprobs.token_logprobs[index],
1199
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1200
+ ([token2, logprob]) => ({
1201
+ token: token2,
1202
+ logprob
1203
+ })
1204
+ ) : []
1205
+ }));
1206
+ }
1207
+
1208
+ // src/openai-completion-language-model.ts
1209
+ var OpenAICompletionLanguageModel = class {
1210
+ constructor(modelId, settings, config) {
1211
+ this.specificationVersion = "v1";
1212
+ this.defaultObjectGenerationMode = void 0;
1213
+ this.modelId = modelId;
1214
+ this.settings = settings;
1215
+ this.config = config;
1216
+ }
1217
+ get provider() {
1218
+ return this.config.provider;
1219
+ }
1220
+ getArgs({
1221
+ mode,
1222
+ inputFormat,
1223
+ prompt,
1224
+ maxTokens,
1225
+ temperature,
1226
+ topP,
1227
+ topK,
1228
+ frequencyPenalty,
1229
+ presencePenalty,
1230
+ stopSequences: userStopSequences,
1231
+ responseFormat,
1232
+ seed
1233
+ }) {
1234
+ var _a;
1235
+ const type = mode.type;
1236
+ const warnings = [];
1237
+ if (topK != null) {
1238
+ warnings.push({
1239
+ type: "unsupported-setting",
1240
+ setting: "topK"
1241
+ });
1242
+ }
1243
+ if (responseFormat != null && responseFormat.type !== "text") {
1244
+ warnings.push({
1245
+ type: "unsupported-setting",
1246
+ setting: "responseFormat",
1247
+ details: "JSON response format is not supported."
1248
+ });
1249
+ }
1250
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1251
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1252
+ const baseArgs = {
1253
+ // model id:
1254
+ model: this.modelId,
1255
+ // model specific settings:
1256
+ echo: this.settings.echo,
1257
+ logit_bias: this.settings.logitBias,
1258
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1259
+ suffix: this.settings.suffix,
1260
+ user: this.settings.user,
1261
+ // standardized settings:
1262
+ max_tokens: maxTokens,
1263
+ temperature,
1264
+ top_p: topP,
1265
+ frequency_penalty: frequencyPenalty,
1266
+ presence_penalty: presencePenalty,
1267
+ seed,
1268
+ // prompt:
1269
+ prompt: completionPrompt,
1270
+ // stop sequences:
1271
+ stop: stop.length > 0 ? stop : void 0
1272
+ };
1273
+ switch (type) {
1274
+ case "regular": {
1275
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
1276
+ throw new import_provider5.UnsupportedFunctionalityError({
1277
+ functionality: "tools"
1278
+ });
1279
+ }
1280
+ if (mode.toolChoice) {
1281
+ throw new import_provider5.UnsupportedFunctionalityError({
1282
+ functionality: "toolChoice"
1283
+ });
1284
+ }
1285
+ return { args: baseArgs, warnings };
1286
+ }
1287
+ case "object-json": {
1288
+ throw new import_provider5.UnsupportedFunctionalityError({
1289
+ functionality: "object-json mode"
1290
+ });
1291
+ }
1292
+ case "object-tool": {
1293
+ throw new import_provider5.UnsupportedFunctionalityError({
1294
+ functionality: "object-tool mode"
1295
+ });
1296
+ }
1297
+ default: {
1298
+ const _exhaustiveCheck = type;
1299
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1300
+ }
1301
+ }
1302
+ }
1303
+ async doGenerate(options) {
1304
+ const { args, warnings } = this.getArgs(options);
1305
+ const {
1306
+ responseHeaders,
1307
+ value: response,
1308
+ rawValue: rawResponse
1309
+ } = await (0, import_provider_utils4.postJsonToApi)({
1310
+ url: this.config.url({
1311
+ path: "/completions",
1312
+ modelId: this.modelId
1313
+ }),
1314
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1315
+ body: args,
1316
+ failedResponseHandler: openaiFailedResponseHandler,
1317
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1318
+ openaiCompletionResponseSchema
1319
+ ),
1320
+ abortSignal: options.abortSignal,
1321
+ fetch: this.config.fetch
1322
+ });
1323
+ const { prompt: rawPrompt, ...rawSettings } = args;
1324
+ const choice = response.choices[0];
1325
+ return {
1326
+ text: choice.text,
1327
+ usage: {
1328
+ promptTokens: response.usage.prompt_tokens,
1329
+ completionTokens: response.usage.completion_tokens
1330
+ },
1331
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1332
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1333
+ rawCall: { rawPrompt, rawSettings },
1334
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1335
+ response: getResponseMetadata(response),
1336
+ warnings,
1337
+ request: { body: JSON.stringify(args) }
1338
+ };
1339
+ }
1340
+ async doStream(options) {
1341
+ const { args, warnings } = this.getArgs(options);
1342
+ const body = {
1343
+ ...args,
1344
+ stream: true,
1345
+ // only include stream_options when in strict compatibility mode:
1346
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1347
+ };
1348
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1349
+ url: this.config.url({
1350
+ path: "/completions",
1351
+ modelId: this.modelId
1352
+ }),
1353
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1354
+ body,
1355
+ failedResponseHandler: openaiFailedResponseHandler,
1356
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1357
+ openaiCompletionChunkSchema
1358
+ ),
1359
+ abortSignal: options.abortSignal,
1360
+ fetch: this.config.fetch
1361
+ });
1362
+ const { prompt: rawPrompt, ...rawSettings } = args;
1363
+ let finishReason = "unknown";
1364
+ let usage = {
1365
+ promptTokens: Number.NaN,
1366
+ completionTokens: Number.NaN
1367
+ };
1368
+ let logprobs;
1369
+ let isFirstChunk = true;
1370
+ return {
1371
+ stream: response.pipeThrough(
1372
+ new TransformStream({
1373
+ transform(chunk, controller) {
1374
+ if (!chunk.success) {
1375
+ finishReason = "error";
1376
+ controller.enqueue({ type: "error", error: chunk.error });
1377
+ return;
1378
+ }
1379
+ const value = chunk.value;
1380
+ if ("error" in value) {
1381
+ finishReason = "error";
1382
+ controller.enqueue({ type: "error", error: value.error });
1383
+ return;
1384
+ }
1385
+ if (isFirstChunk) {
1386
+ isFirstChunk = false;
1387
+ controller.enqueue({
1388
+ type: "response-metadata",
1389
+ ...getResponseMetadata(value)
1390
+ });
1391
+ }
1392
+ if (value.usage != null) {
1393
+ usage = {
1394
+ promptTokens: value.usage.prompt_tokens,
1395
+ completionTokens: value.usage.completion_tokens
1396
+ };
1397
+ }
1398
+ const choice = value.choices[0];
1399
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1400
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1401
+ }
1402
+ if ((choice == null ? void 0 : choice.text) != null) {
1403
+ controller.enqueue({
1404
+ type: "text-delta",
1405
+ textDelta: choice.text
1406
+ });
1407
+ }
1408
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
1409
+ choice == null ? void 0 : choice.logprobs
1410
+ );
1411
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1412
+ if (logprobs === void 0) logprobs = [];
1413
+ logprobs.push(...mappedLogprobs);
1414
+ }
1415
+ },
1416
+ flush(controller) {
1417
+ controller.enqueue({
1418
+ type: "finish",
1419
+ finishReason,
1420
+ logprobs,
1421
+ usage
1422
+ });
1423
+ }
1424
+ })
1425
+ ),
1426
+ rawCall: { rawPrompt, rawSettings },
1427
+ rawResponse: { headers: responseHeaders },
1428
+ warnings,
1429
+ request: { body: JSON.stringify(body) }
1430
+ };
1431
+ }
1432
+ };
1433
+ var openaiCompletionResponseSchema = import_zod3.z.object({
1434
+ id: import_zod3.z.string().nullish(),
1435
+ created: import_zod3.z.number().nullish(),
1436
+ model: import_zod3.z.string().nullish(),
1437
+ choices: import_zod3.z.array(
1438
+ import_zod3.z.object({
1439
+ text: import_zod3.z.string(),
1440
+ finish_reason: import_zod3.z.string(),
1441
+ logprobs: import_zod3.z.object({
1442
+ tokens: import_zod3.z.array(import_zod3.z.string()),
1443
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1444
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1445
+ }).nullish()
1446
+ })
1447
+ ),
1448
+ usage: import_zod3.z.object({
1449
+ prompt_tokens: import_zod3.z.number(),
1450
+ completion_tokens: import_zod3.z.number()
1451
+ })
1452
+ });
1453
+ var openaiCompletionChunkSchema = import_zod3.z.union([
1454
+ import_zod3.z.object({
1455
+ id: import_zod3.z.string().nullish(),
1456
+ created: import_zod3.z.number().nullish(),
1457
+ model: import_zod3.z.string().nullish(),
1458
+ choices: import_zod3.z.array(
1459
+ import_zod3.z.object({
1460
+ text: import_zod3.z.string(),
1461
+ finish_reason: import_zod3.z.string().nullish(),
1462
+ index: import_zod3.z.number(),
1463
+ logprobs: import_zod3.z.object({
1464
+ tokens: import_zod3.z.array(import_zod3.z.string()),
1465
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1466
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1467
+ }).nullish()
1468
+ })
1469
+ ),
1470
+ usage: import_zod3.z.object({
1471
+ prompt_tokens: import_zod3.z.number(),
1472
+ completion_tokens: import_zod3.z.number()
1473
+ }).nullish()
1474
+ }),
1475
+ openaiErrorDataSchema
1476
+ ]);
1477
+
1478
+ // src/openai-embedding-model.ts
1479
+ var import_provider6 = require("@ai-sdk/provider");
1480
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1481
+ var import_zod4 = require("zod");
1482
+ var OpenAIEmbeddingModel = class {
1483
+ constructor(modelId, settings, config) {
1484
+ this.specificationVersion = "v1";
1485
+ this.modelId = modelId;
1486
+ this.settings = settings;
1487
+ this.config = config;
1488
+ }
1489
+ get provider() {
1490
+ return this.config.provider;
1491
+ }
1492
+ get maxEmbeddingsPerCall() {
1493
+ var _a;
1494
+ return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1495
+ }
1496
+ get supportsParallelCalls() {
1497
+ var _a;
1498
+ return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1499
+ }
1500
+ async doEmbed({
1501
+ values,
1502
+ headers,
1503
+ abortSignal
1504
+ }) {
1505
+ if (values.length > this.maxEmbeddingsPerCall) {
1506
+ throw new import_provider6.TooManyEmbeddingValuesForCallError({
1507
+ provider: this.provider,
1508
+ modelId: this.modelId,
1509
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1510
+ values
1511
+ });
1512
+ }
1513
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1514
+ url: this.config.url({
1515
+ path: "/embeddings",
1516
+ modelId: this.modelId
1517
+ }),
1518
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1519
+ body: {
1520
+ model: this.modelId,
1521
+ input: values,
1522
+ encoding_format: "float",
1523
+ dimensions: this.settings.dimensions,
1524
+ user: this.settings.user
1525
+ },
1526
+ failedResponseHandler: openaiFailedResponseHandler,
1527
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1528
+ openaiTextEmbeddingResponseSchema
1529
+ ),
1530
+ abortSignal,
1531
+ fetch: this.config.fetch
1532
+ });
1533
+ return {
1534
+ embeddings: response.data.map((item) => item.embedding),
1535
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1536
+ rawResponse: { headers: responseHeaders }
1537
+ };
1538
+ }
1539
+ };
1540
+ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1541
+ data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1542
+ usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1543
+ });
1544
+
1545
+ // src/openai-image-model.ts
1546
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1547
+ var import_zod5 = require("zod");
1548
+
1549
+ // src/openai-image-settings.ts
1550
+ var modelMaxImagesPerCall = {
1551
+ "dall-e-3": 1,
1552
+ "dall-e-2": 10,
1553
+ "gpt-image-1": 10
1554
+ };
1555
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1556
+
1557
+ // src/openai-image-model.ts
1558
+ var OpenAIImageModel = class {
1559
+ constructor(modelId, settings, config) {
1560
+ this.modelId = modelId;
1561
+ this.settings = settings;
1562
+ this.config = config;
1563
+ this.specificationVersion = "v1";
1564
+ }
1565
+ get maxImagesPerCall() {
1566
+ var _a, _b;
1567
+ return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1568
+ }
1569
+ get provider() {
1570
+ return this.config.provider;
1571
+ }
1572
+ async doGenerate({
1573
+ prompt,
1574
+ n,
1575
+ size,
1576
+ aspectRatio,
1577
+ seed,
1578
+ providerOptions,
1579
+ headers,
1580
+ abortSignal
1581
+ }) {
1582
+ var _a, _b, _c, _d;
1583
+ const warnings = [];
1584
+ if (aspectRatio != null) {
1585
+ warnings.push({
1586
+ type: "unsupported-setting",
1587
+ setting: "aspectRatio",
1588
+ details: "This model does not support aspect ratio. Use `size` instead."
1589
+ });
1590
+ }
1591
+ if (seed != null) {
1592
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1593
+ }
1594
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1595
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1596
+ url: this.config.url({
1597
+ path: "/images/generations",
1598
+ modelId: this.modelId
1599
+ }),
1600
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1601
+ body: {
1602
+ model: this.modelId,
1603
+ prompt,
1604
+ n,
1605
+ size,
1606
+ ...(_d = providerOptions.openai) != null ? _d : {},
1607
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1608
+ },
1609
+ failedResponseHandler: openaiFailedResponseHandler,
1610
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1611
+ openaiImageResponseSchema
1612
+ ),
1613
+ abortSignal,
1614
+ fetch: this.config.fetch
1615
+ });
1616
+ return {
1617
+ images: response.data.map((item) => item.b64_json),
1618
+ warnings,
1619
+ response: {
1620
+ timestamp: currentDate,
1621
+ modelId: this.modelId,
1622
+ headers: responseHeaders
1623
+ }
1624
+ };
1625
+ }
1626
+ };
1627
+ var openaiImageResponseSchema = import_zod5.z.object({
1628
+ data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1629
+ });
1630
+
1631
+ // src/openai-transcription-model.ts
1632
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1633
+ var import_zod6 = require("zod");
1634
+ var openAIProviderOptionsSchema = import_zod6.z.object({
1635
+ include: import_zod6.z.array(import_zod6.z.string()).nullish(),
1636
+ language: import_zod6.z.string().nullish(),
1637
+ prompt: import_zod6.z.string().nullish(),
1638
+ temperature: import_zod6.z.number().min(0).max(1).nullish().default(0),
1639
+ timestampGranularities: import_zod6.z.array(import_zod6.z.enum(["word", "segment"])).nullish().default(["segment"])
1640
+ });
1641
+ var languageMap = {
1642
+ afrikaans: "af",
1643
+ arabic: "ar",
1644
+ armenian: "hy",
1645
+ azerbaijani: "az",
1646
+ belarusian: "be",
1647
+ bosnian: "bs",
1648
+ bulgarian: "bg",
1649
+ catalan: "ca",
1650
+ chinese: "zh",
1651
+ croatian: "hr",
1652
+ czech: "cs",
1653
+ danish: "da",
1654
+ dutch: "nl",
1655
+ english: "en",
1656
+ estonian: "et",
1657
+ finnish: "fi",
1658
+ french: "fr",
1659
+ galician: "gl",
1660
+ german: "de",
1661
+ greek: "el",
1662
+ hebrew: "he",
1663
+ hindi: "hi",
1664
+ hungarian: "hu",
1665
+ icelandic: "is",
1666
+ indonesian: "id",
1667
+ italian: "it",
1668
+ japanese: "ja",
1669
+ kannada: "kn",
1670
+ kazakh: "kk",
1671
+ korean: "ko",
1672
+ latvian: "lv",
1673
+ lithuanian: "lt",
1674
+ macedonian: "mk",
1675
+ malay: "ms",
1676
+ marathi: "mr",
1677
+ maori: "mi",
1678
+ nepali: "ne",
1679
+ norwegian: "no",
1680
+ persian: "fa",
1681
+ polish: "pl",
1682
+ portuguese: "pt",
1683
+ romanian: "ro",
1684
+ russian: "ru",
1685
+ serbian: "sr",
1686
+ slovak: "sk",
1687
+ slovenian: "sl",
1688
+ spanish: "es",
1689
+ swahili: "sw",
1690
+ swedish: "sv",
1691
+ tagalog: "tl",
1692
+ tamil: "ta",
1693
+ thai: "th",
1694
+ turkish: "tr",
1695
+ ukrainian: "uk",
1696
+ urdu: "ur",
1697
+ vietnamese: "vi",
1698
+ welsh: "cy"
1699
+ };
1700
+ var OpenAITranscriptionModel = class {
1701
+ constructor(modelId, config) {
1702
+ this.modelId = modelId;
1703
+ this.config = config;
1704
+ this.specificationVersion = "v1";
1705
+ }
1706
+ get provider() {
1707
+ return this.config.provider;
1708
+ }
1709
+ getArgs({
1710
+ audio,
1711
+ mediaType,
1712
+ providerOptions
1713
+ }) {
1714
+ var _a, _b, _c, _d, _e;
1715
+ const warnings = [];
1716
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1717
+ provider: "openai",
1718
+ providerOptions,
1719
+ schema: openAIProviderOptionsSchema
1720
+ });
1721
+ const formData = new FormData();
1722
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1723
+ formData.append("model", this.modelId);
1724
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1725
+ if (openAIOptions) {
1726
+ const transcriptionModelOptions = {
1727
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1728
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1729
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1730
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1731
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1732
+ };
1733
+ for (const key in transcriptionModelOptions) {
1734
+ const value = transcriptionModelOptions[key];
1735
+ if (value !== void 0) {
1736
+ formData.append(key, String(value));
1737
+ }
1738
+ }
1739
+ }
1740
+ return {
1741
+ formData,
1742
+ warnings
1743
+ };
1744
+ }
1745
+ async doGenerate(options) {
1746
+ var _a, _b, _c, _d, _e, _f;
1747
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1748
+ const { formData, warnings } = this.getArgs(options);
1749
+ const {
1750
+ value: response,
1751
+ responseHeaders,
1752
+ rawValue: rawResponse
1753
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1754
+ url: this.config.url({
1755
+ path: "/audio/transcriptions",
1756
+ modelId: this.modelId
1757
+ }),
1758
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1759
+ formData,
1760
+ failedResponseHandler: openaiFailedResponseHandler,
1761
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1762
+ openaiTranscriptionResponseSchema
1763
+ ),
1764
+ abortSignal: options.abortSignal,
1765
+ fetch: this.config.fetch
1766
+ });
1767
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1768
+ return {
1769
+ text: response.text,
1770
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1771
+ text: word.word,
1772
+ startSecond: word.start,
1773
+ endSecond: word.end
1774
+ }))) != null ? _e : [],
1775
+ language,
1776
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1777
+ warnings,
1778
+ response: {
1779
+ timestamp: currentDate,
1780
+ modelId: this.modelId,
1781
+ headers: responseHeaders,
1782
+ body: rawResponse
1783
+ }
1784
+ };
1785
+ }
1786
+ };
1787
+ var openaiTranscriptionResponseSchema = import_zod6.z.object({
1788
+ text: import_zod6.z.string(),
1789
+ language: import_zod6.z.string().nullish(),
1790
+ duration: import_zod6.z.number().nullish(),
1791
+ words: import_zod6.z.array(
1792
+ import_zod6.z.object({
1793
+ word: import_zod6.z.string(),
1794
+ start: import_zod6.z.number(),
1795
+ end: import_zod6.z.number()
1796
+ })
1797
+ ).nullish()
1798
+ });
1799
+
1800
+ // src/openai-speech-model.ts
1801
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1802
+ var import_zod7 = require("zod");
1803
+ var OpenAIProviderOptionsSchema = import_zod7.z.object({
1804
+ instructions: import_zod7.z.string().nullish(),
1805
+ speed: import_zod7.z.number().min(0.25).max(4).default(1).nullish()
1806
+ });
1807
+ var OpenAISpeechModel = class {
1808
+ constructor(modelId, config) {
1809
+ this.modelId = modelId;
1810
+ this.config = config;
1811
+ this.specificationVersion = "v1";
1812
+ }
1813
+ get provider() {
1814
+ return this.config.provider;
1815
+ }
1816
+ getArgs({
1817
+ text,
1818
+ voice = "alloy",
1819
+ outputFormat = "mp3",
1820
+ speed,
1821
+ instructions,
1822
+ providerOptions
1823
+ }) {
1824
+ const warnings = [];
1825
+ const openAIOptions = (0, import_provider_utils8.parseProviderOptions)({
1826
+ provider: "openai",
1827
+ providerOptions,
1828
+ schema: OpenAIProviderOptionsSchema
1829
+ });
1830
+ const requestBody = {
1831
+ model: this.modelId,
1832
+ input: text,
1833
+ voice,
1834
+ response_format: "mp3",
1835
+ speed,
1836
+ instructions
1837
+ };
1838
+ if (outputFormat) {
1839
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1840
+ requestBody.response_format = outputFormat;
1841
+ } else {
1842
+ warnings.push({
1843
+ type: "unsupported-setting",
1844
+ setting: "outputFormat",
1845
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1846
+ });
1847
+ }
1848
+ }
1849
+ if (openAIOptions) {
1850
+ const speechModelOptions = {};
1851
+ for (const key in speechModelOptions) {
1852
+ const value = speechModelOptions[key];
1853
+ if (value !== void 0) {
1854
+ requestBody[key] = value;
1855
+ }
1856
+ }
1857
+ }
1858
+ return {
1859
+ requestBody,
1860
+ warnings
1861
+ };
1862
+ }
1863
+ async doGenerate(options) {
1864
+ var _a, _b, _c;
1865
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1866
+ const { requestBody, warnings } = this.getArgs(options);
1867
+ const {
1868
+ value: audio,
1869
+ responseHeaders,
1870
+ rawValue: rawResponse
1871
+ } = await (0, import_provider_utils8.postJsonToApi)({
1872
+ url: this.config.url({
1873
+ path: "/audio/speech",
1874
+ modelId: this.modelId
1875
+ }),
1876
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1877
+ body: requestBody,
1878
+ failedResponseHandler: openaiFailedResponseHandler,
1879
+ successfulResponseHandler: (0, import_provider_utils8.createBinaryResponseHandler)(),
1880
+ abortSignal: options.abortSignal,
1881
+ fetch: this.config.fetch
1882
+ });
1883
+ return {
1884
+ audio,
1885
+ warnings,
1886
+ request: {
1887
+ body: JSON.stringify(requestBody)
1888
+ },
1889
+ response: {
1890
+ timestamp: currentDate,
1891
+ modelId: this.modelId,
1892
+ headers: responseHeaders,
1893
+ body: rawResponse
1894
+ }
1895
+ };
1896
+ }
1897
+ };
1898
+
1899
+ // src/responses/openai-responses-language-model.ts
1900
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
1901
+ var import_zod8 = require("zod");
1902
+
1903
+ // src/responses/convert-to-openai-responses-messages.ts
1904
+ var import_provider7 = require("@ai-sdk/provider");
1905
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1906
+ function convertToOpenAIResponsesMessages({
1907
+ prompt,
1908
+ systemMessageMode
1909
+ }) {
1910
+ const messages = [];
1911
+ const warnings = [];
1912
+ for (const { role, content } of prompt) {
1913
+ switch (role) {
1914
+ case "system": {
1915
+ switch (systemMessageMode) {
1916
+ case "system": {
1917
+ messages.push({ role: "system", content });
1918
+ break;
1919
+ }
1920
+ case "developer": {
1921
+ messages.push({ role: "developer", content });
1922
+ break;
1923
+ }
1924
+ case "remove": {
1925
+ warnings.push({
1926
+ type: "other",
1927
+ message: "system messages are removed for this model"
1928
+ });
1929
+ break;
1930
+ }
1931
+ default: {
1932
+ const _exhaustiveCheck = systemMessageMode;
1933
+ throw new Error(
1934
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1935
+ );
1936
+ }
1937
+ }
1938
+ break;
1939
+ }
1940
+ case "user": {
1941
+ messages.push({
1942
+ role: "user",
1943
+ content: content.map((part, index) => {
1944
+ var _a, _b, _c, _d;
1945
+ switch (part.type) {
1946
+ case "text": {
1947
+ return { type: "input_text", text: part.text };
1948
+ }
1949
+ case "image": {
1950
+ return {
1951
+ type: "input_image",
1952
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils9.convertUint8ArrayToBase64)(part.image)}`,
1953
+ // OpenAI specific extension: image detail
1954
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1955
+ };
1956
+ }
1957
+ case "file": {
1958
+ if (part.data instanceof URL) {
1959
+ throw new import_provider7.UnsupportedFunctionalityError({
1960
+ functionality: "File URLs in user messages"
1961
+ });
1962
+ }
1963
+ switch (part.mimeType) {
1964
+ case "application/pdf": {
1965
+ return {
1966
+ type: "input_file",
1967
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1968
+ file_data: `data:application/pdf;base64,${part.data}`
1969
+ };
1970
+ }
1971
+ default: {
1972
+ throw new import_provider7.UnsupportedFunctionalityError({
1973
+ functionality: "Only PDF files are supported in user messages"
1974
+ });
1975
+ }
1976
+ }
1977
+ }
1978
+ }
1979
+ })
1980
+ });
1981
+ break;
1982
+ }
1983
+ case "assistant": {
1984
+ for (const part of content) {
1985
+ switch (part.type) {
1986
+ case "text": {
1987
+ messages.push({
1988
+ role: "assistant",
1989
+ content: [{ type: "output_text", text: part.text }]
1990
+ });
1991
+ break;
1992
+ }
1993
+ case "tool-call": {
1994
+ messages.push({
1995
+ type: "function_call",
1996
+ call_id: part.toolCallId,
1997
+ name: part.toolName,
1998
+ arguments: JSON.stringify(part.args)
1999
+ });
2000
+ break;
2001
+ }
2002
+ }
2003
+ }
2004
+ break;
2005
+ }
2006
+ case "tool": {
2007
+ for (const part of content) {
2008
+ messages.push({
2009
+ type: "function_call_output",
2010
+ call_id: part.toolCallId,
2011
+ output: JSON.stringify(part.result)
2012
+ });
2013
+ }
2014
+ break;
2015
+ }
2016
+ default: {
2017
+ const _exhaustiveCheck = role;
2018
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
2019
+ }
2020
+ }
2021
+ }
2022
+ return { messages, warnings };
2023
+ }
2024
+
2025
+ // src/responses/map-openai-responses-finish-reason.ts
2026
+ function mapOpenAIResponseFinishReason({
2027
+ finishReason,
2028
+ hasToolCalls
2029
+ }) {
2030
+ switch (finishReason) {
2031
+ case void 0:
2032
+ case null:
2033
+ return hasToolCalls ? "tool-calls" : "stop";
2034
+ case "max_output_tokens":
2035
+ return "length";
2036
+ case "content_filter":
2037
+ return "content-filter";
2038
+ default:
2039
+ return hasToolCalls ? "tool-calls" : "unknown";
2040
+ }
2041
+ }
2042
+
2043
+ // src/responses/openai-responses-prepare-tools.ts
2044
+ var import_provider8 = require("@ai-sdk/provider");
2045
+ function prepareResponsesTools({
2046
+ mode,
2047
+ strict
2048
+ }) {
2049
+ var _a;
2050
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
2051
+ const toolWarnings = [];
2052
+ if (tools == null) {
2053
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
2054
+ }
2055
+ const toolChoice = mode.toolChoice;
2056
+ const openaiTools = [];
2057
+ for (const tool of tools) {
2058
+ switch (tool.type) {
2059
+ case "function":
2060
+ openaiTools.push({
2061
+ type: "function",
2062
+ name: tool.name,
2063
+ description: tool.description,
2064
+ parameters: tool.parameters,
2065
+ strict: strict ? true : void 0
2066
+ });
2067
+ break;
2068
+ case "provider-defined":
2069
+ switch (tool.id) {
2070
+ case "openai.file_search":
2071
+ openaiTools.push({
2072
+ type: "file_search",
2073
+ vector_store_ids: tool.args.vectorStoreIds,
2074
+ max_num_results: tool.args.maxNumResults,
2075
+ ranking: tool.args.ranking,
2076
+ filters: tool.args.filters
2077
+ });
2078
+ break;
2079
+ case "openai.web_search_preview":
2080
+ openaiTools.push({
2081
+ type: "web_search_preview",
2082
+ search_context_size: tool.args.searchContextSize,
2083
+ user_location: tool.args.userLocation
2084
+ });
2085
+ break;
2086
+ case "openai.code_interpreter":
2087
+ openaiTools.push({
2088
+ type: "code_interpreter",
2089
+ container: tool.args.container
2090
+ });
2091
+ break;
2092
+ default:
2093
+ toolWarnings.push({ type: "unsupported-tool", tool });
2094
+ break;
2095
+ }
2096
+ break;
2097
+ default:
2098
+ toolWarnings.push({ type: "unsupported-tool", tool });
2099
+ break;
2100
+ }
2101
+ }
2102
+ if (toolChoice == null) {
2103
+ return { tools: openaiTools, tool_choice: void 0, toolWarnings };
2104
+ }
2105
+ const type = toolChoice.type;
2106
+ switch (type) {
2107
+ case "auto":
2108
+ case "none":
2109
+ case "required":
2110
+ return { tools: openaiTools, tool_choice: type, toolWarnings };
2111
+ case "tool": {
2112
+ if (toolChoice.toolName === "web_search_preview") {
2113
+ return {
2114
+ tools: openaiTools,
2115
+ tool_choice: {
2116
+ type: "web_search_preview"
2117
+ },
2118
+ toolWarnings
2119
+ };
2120
+ }
2121
+ if (toolChoice.toolName === "code_interpreter") {
2122
+ return {
2123
+ tools: openaiTools,
2124
+ tool_choice: {
2125
+ type: "code_interpreter"
2126
+ },
2127
+ toolWarnings
2128
+ };
2129
+ }
2130
+ return {
2131
+ tools: openaiTools,
2132
+ tool_choice: {
2133
+ type: "function",
2134
+ name: toolChoice.toolName
2135
+ },
2136
+ toolWarnings
2137
+ };
2138
+ }
2139
+ default: {
2140
+ const _exhaustiveCheck = type;
2141
+ throw new import_provider8.UnsupportedFunctionalityError({
2142
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2143
+ });
2144
+ }
2145
+ }
2146
+ }
2147
+
2148
+ // src/responses/openai-responses-language-model.ts
2149
+ var OpenAIResponsesLanguageModel = class {
2150
+ constructor(modelId, config) {
2151
+ this.specificationVersion = "v1";
2152
+ this.defaultObjectGenerationMode = "json";
2153
+ this.supportsStructuredOutputs = true;
2154
+ this.modelId = modelId;
2155
+ this.config = config;
2156
+ }
2157
+ get provider() {
2158
+ return this.config.provider;
2159
+ }
2160
+ getArgs({
2161
+ mode,
2162
+ maxTokens,
2163
+ temperature,
2164
+ stopSequences,
2165
+ topP,
2166
+ topK,
2167
+ presencePenalty,
2168
+ frequencyPenalty,
2169
+ seed,
2170
+ prompt,
2171
+ providerMetadata,
2172
+ responseFormat
2173
+ }) {
2174
+ var _a, _b, _c;
2175
+ const warnings = [];
2176
+ const modelConfig = getResponsesModelConfig(this.modelId);
2177
+ const type = mode.type;
2178
+ if (topK != null) {
2179
+ warnings.push({
2180
+ type: "unsupported-setting",
2181
+ setting: "topK"
2182
+ });
2183
+ }
2184
+ if (seed != null) {
2185
+ warnings.push({
2186
+ type: "unsupported-setting",
2187
+ setting: "seed"
2188
+ });
2189
+ }
2190
+ if (presencePenalty != null) {
2191
+ warnings.push({
2192
+ type: "unsupported-setting",
2193
+ setting: "presencePenalty"
2194
+ });
2195
+ }
2196
+ if (frequencyPenalty != null) {
2197
+ warnings.push({
2198
+ type: "unsupported-setting",
2199
+ setting: "frequencyPenalty"
2200
+ });
2201
+ }
2202
+ if (stopSequences != null) {
2203
+ warnings.push({
2204
+ type: "unsupported-setting",
2205
+ setting: "stopSequences"
2206
+ });
2207
+ }
2208
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2209
+ prompt,
2210
+ systemMessageMode: modelConfig.systemMessageMode
2211
+ });
2212
+ warnings.push(...messageWarnings);
2213
+ console.log("providerMetadata", JSON.stringify(providerMetadata));
2214
+ const openaiOptions = (0, import_provider_utils10.parseProviderOptions)({
2215
+ provider: "openai",
2216
+ providerOptions: providerMetadata,
2217
+ schema: openaiResponsesProviderOptionsSchema
2218
+ });
2219
+ const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2220
+ console.log("openaiOptions", JSON.stringify(openaiOptions));
2221
+ const baseArgs = {
2222
+ model: this.modelId,
2223
+ input: messages,
2224
+ temperature: (openaiOptions == null ? void 0 : openaiOptions.forceNoTemperature) ? void 0 : temperature,
2225
+ top_p: topP,
2226
+ max_output_tokens: maxTokens,
2227
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2228
+ text: {
2229
+ format: responseFormat.schema != null ? {
2230
+ type: "json_schema",
2231
+ strict: isStrict,
2232
+ name: (_b = responseFormat.name) != null ? _b : "response",
2233
+ description: responseFormat.description,
2234
+ schema: responseFormat.schema
2235
+ } : { type: "json_object" }
2236
+ }
2237
+ },
2238
+ // provider options:
2239
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2240
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2241
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2242
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2243
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2244
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2245
+ // model-specific settings:
2246
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2247
+ reasoning: {
2248
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2249
+ effort: openaiOptions.reasoningEffort
2250
+ },
2251
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2252
+ summary: openaiOptions.reasoningSummary
2253
+ }
2254
+ }
2255
+ },
2256
+ ...modelConfig.requiredAutoTruncation && {
2257
+ truncation: "auto"
2258
+ }
2259
+ };
2260
+ console.log("baseArgs", JSON.stringify(baseArgs));
2261
+ if (modelConfig.isReasoningModel) {
2262
+ if (baseArgs.temperature != null) {
2263
+ baseArgs.temperature = void 0;
2264
+ warnings.push({
2265
+ type: "unsupported-setting",
2266
+ setting: "temperature",
2267
+ details: "temperature is not supported for reasoning models"
2268
+ });
2269
+ }
2270
+ if (baseArgs.top_p != null) {
2271
+ baseArgs.top_p = void 0;
2272
+ warnings.push({
2273
+ type: "unsupported-setting",
2274
+ setting: "topP",
2275
+ details: "topP is not supported for reasoning models"
2276
+ });
2277
+ }
2278
+ }
2279
+ switch (type) {
2280
+ case "regular": {
2281
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
2282
+ mode,
2283
+ strict: isStrict
2284
+ // TODO support provider options on tools
2285
+ });
2286
+ return {
2287
+ args: {
2288
+ ...baseArgs,
2289
+ tools,
2290
+ tool_choice
2291
+ },
2292
+ warnings: [...warnings, ...toolWarnings]
2293
+ };
2294
+ }
2295
+ case "object-json": {
2296
+ return {
2297
+ args: {
2298
+ ...baseArgs,
2299
+ text: {
2300
+ format: mode.schema != null ? {
2301
+ type: "json_schema",
2302
+ strict: isStrict,
2303
+ name: (_c = mode.name) != null ? _c : "response",
2304
+ description: mode.description,
2305
+ schema: mode.schema
2306
+ } : { type: "json_object" }
2307
+ }
2308
+ },
2309
+ warnings
2310
+ };
2311
+ }
2312
+ case "object-tool": {
2313
+ return {
2314
+ args: {
2315
+ ...baseArgs,
2316
+ tool_choice: { type: "function", name: mode.tool.name },
2317
+ tools: [
2318
+ {
2319
+ type: "function",
2320
+ name: mode.tool.name,
2321
+ description: mode.tool.description,
2322
+ parameters: mode.tool.parameters,
2323
+ strict: isStrict
2324
+ }
2325
+ ]
2326
+ },
2327
+ warnings
2328
+ };
2329
+ }
2330
+ default: {
2331
+ const _exhaustiveCheck = type;
2332
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2333
+ }
2334
+ }
2335
+ }
2336
+ async doGenerate(options) {
2337
+ var _a, _b, _c, _d, _e, _f, _g;
2338
+ const { args: body, warnings } = this.getArgs(options);
2339
+ const {
2340
+ responseHeaders,
2341
+ value: response,
2342
+ rawValue: rawResponse
2343
+ } = await (0, import_provider_utils10.postJsonToApi)({
2344
+ url: this.config.url({
2345
+ path: "/responses",
2346
+ modelId: this.modelId
2347
+ }),
2348
+ headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2349
+ body,
2350
+ failedResponseHandler: openaiFailedResponseHandler,
2351
+ successfulResponseHandler: (0, import_provider_utils10.createJsonResponseHandler)(
2352
+ import_zod8.z.object({
2353
+ id: import_zod8.z.string(),
2354
+ created_at: import_zod8.z.number(),
2355
+ model: import_zod8.z.string(),
2356
+ output: import_zod8.z.array(
2357
+ import_zod8.z.discriminatedUnion("type", [
2358
+ import_zod8.z.object({
2359
+ type: import_zod8.z.literal("message"),
2360
+ role: import_zod8.z.literal("assistant"),
2361
+ content: import_zod8.z.array(
2362
+ import_zod8.z.object({
2363
+ type: import_zod8.z.literal("output_text"),
2364
+ text: import_zod8.z.string(),
2365
+ annotations: import_zod8.z.array(
2366
+ import_zod8.z.object({
2367
+ type: import_zod8.z.literal("url_citation"),
2368
+ start_index: import_zod8.z.number(),
2369
+ end_index: import_zod8.z.number(),
2370
+ url: import_zod8.z.string(),
2371
+ title: import_zod8.z.string()
2372
+ })
2373
+ )
2374
+ })
2375
+ )
2376
+ }),
2377
+ import_zod8.z.object({
2378
+ type: import_zod8.z.literal("function_call"),
2379
+ call_id: import_zod8.z.string(),
2380
+ name: import_zod8.z.string(),
2381
+ arguments: import_zod8.z.string()
2382
+ }),
2383
+ import_zod8.z.object({
2384
+ type: import_zod8.z.literal("web_search_call")
2385
+ }),
2386
+ import_zod8.z.object({
2387
+ type: import_zod8.z.literal("computer_call")
2388
+ }),
2389
+ import_zod8.z.object({
2390
+ type: import_zod8.z.literal("reasoning"),
2391
+ summary: import_zod8.z.array(
2392
+ import_zod8.z.object({
2393
+ type: import_zod8.z.literal("summary_text"),
2394
+ text: import_zod8.z.string()
2395
+ })
2396
+ )
2397
+ })
2398
+ ])
2399
+ ),
2400
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullable(),
2401
+ usage: usageSchema
2402
+ })
2403
+ ),
2404
+ abortSignal: options.abortSignal,
2405
+ fetch: this.config.fetch
2406
+ });
2407
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2408
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2409
+ toolCallType: "function",
2410
+ toolCallId: output.call_id,
2411
+ toolName: output.name,
2412
+ args: output.arguments
2413
+ }));
2414
+ const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2415
+ return {
2416
+ text: outputTextElements.map((content) => content.text).join("\n"),
2417
+ sources: outputTextElements.flatMap(
2418
+ (content) => content.annotations.map((annotation) => {
2419
+ var _a2, _b2, _c2;
2420
+ return {
2421
+ sourceType: "url",
2422
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils10.generateId)(),
2423
+ url: annotation.url,
2424
+ title: annotation.title
2425
+ };
2426
+ })
2427
+ ),
2428
+ finishReason: mapOpenAIResponseFinishReason({
2429
+ finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2430
+ hasToolCalls: toolCalls.length > 0
2431
+ }),
2432
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2433
+ reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2434
+ type: "text",
2435
+ text: summary.text
2436
+ })) : void 0,
2437
+ usage: {
2438
+ promptTokens: response.usage.input_tokens,
2439
+ completionTokens: response.usage.output_tokens
2440
+ },
2441
+ rawCall: {
2442
+ rawPrompt: void 0,
2443
+ rawSettings: {}
2444
+ },
2445
+ rawResponse: {
2446
+ headers: responseHeaders,
2447
+ body: rawResponse
2448
+ },
2449
+ request: {
2450
+ body: JSON.stringify(body)
2451
+ },
2452
+ response: {
2453
+ id: response.id,
2454
+ timestamp: new Date(response.created_at * 1e3),
2455
+ modelId: response.model
2456
+ },
2457
+ providerMetadata: {
2458
+ openai: {
2459
+ responseId: response.id,
2460
+ cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2461
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2462
+ }
2463
+ },
2464
+ warnings
2465
+ };
2466
+ }
2467
+ async doStream(options) {
2468
+ const { args: body, warnings } = this.getArgs(options);
2469
+ const { responseHeaders, value: response } = await (0, import_provider_utils10.postJsonToApi)({
2470
+ url: this.config.url({
2471
+ path: "/responses",
2472
+ modelId: this.modelId
2473
+ }),
2474
+ headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2475
+ body: {
2476
+ ...body,
2477
+ stream: true
2478
+ },
2479
+ failedResponseHandler: openaiFailedResponseHandler,
2480
+ successfulResponseHandler: (0, import_provider_utils10.createEventSourceResponseHandler)(
2481
+ openaiResponsesChunkSchema
2482
+ ),
2483
+ abortSignal: options.abortSignal,
2484
+ fetch: this.config.fetch
2485
+ });
2486
+ const self = this;
2487
+ let finishReason = "unknown";
2488
+ let promptTokens = NaN;
2489
+ let completionTokens = NaN;
2490
+ let cachedPromptTokens = null;
2491
+ let reasoningTokens = null;
2492
+ let responseId = null;
2493
+ const ongoingToolCalls = {};
2494
+ let hasToolCalls = false;
2495
+ return {
2496
+ stream: response.pipeThrough(
2497
+ new TransformStream({
2498
+ transform(chunk, controller) {
2499
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2500
+ if (!chunk.success) {
2501
+ finishReason = "error";
2502
+ controller.enqueue({ type: "error", error: chunk.error });
2503
+ return;
2504
+ }
2505
+ const value = chunk.value;
2506
+ if (isResponseOutputItemAddedChunk(value)) {
2507
+ if (value.item.type === "function_call") {
2508
+ ongoingToolCalls[value.output_index] = {
2509
+ toolName: value.item.name,
2510
+ toolCallId: value.item.call_id
2511
+ };
2512
+ controller.enqueue({
2513
+ type: "tool-call-delta",
2514
+ toolCallType: "function",
2515
+ toolCallId: value.item.call_id,
2516
+ toolName: value.item.name,
2517
+ argsTextDelta: value.item.arguments
2518
+ });
2519
+ }
2520
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2521
+ const toolCall = ongoingToolCalls[value.output_index];
2522
+ if (toolCall != null) {
2523
+ controller.enqueue({
2524
+ type: "tool-call-delta",
2525
+ toolCallType: "function",
2526
+ toolCallId: toolCall.toolCallId,
2527
+ toolName: toolCall.toolName,
2528
+ argsTextDelta: value.delta
2529
+ });
2530
+ }
2531
+ } else if (isResponseCreatedChunk(value)) {
2532
+ responseId = value.response.id;
2533
+ controller.enqueue({
2534
+ type: "response-metadata",
2535
+ id: value.response.id,
2536
+ timestamp: new Date(value.response.created_at * 1e3),
2537
+ modelId: value.response.model
2538
+ });
2539
+ } else if (isTextDeltaChunk(value)) {
2540
+ controller.enqueue({
2541
+ type: "text-delta",
2542
+ textDelta: value.delta
2543
+ });
2544
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2545
+ controller.enqueue({
2546
+ type: "reasoning",
2547
+ textDelta: value.delta
2548
+ });
2549
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2550
+ ongoingToolCalls[value.output_index] = void 0;
2551
+ hasToolCalls = true;
2552
+ controller.enqueue({
2553
+ type: "tool-call",
2554
+ toolCallType: "function",
2555
+ toolCallId: value.item.call_id,
2556
+ toolName: value.item.name,
2557
+ args: value.item.arguments
2558
+ });
2559
+ } else if (isResponseFinishedChunk(value)) {
2560
+ finishReason = mapOpenAIResponseFinishReason({
2561
+ finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2562
+ hasToolCalls
2563
+ });
2564
+ promptTokens = value.response.usage.input_tokens;
2565
+ completionTokens = value.response.usage.output_tokens;
2566
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2567
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2568
+ } else if (isResponseAnnotationAddedChunk(value)) {
2569
+ controller.enqueue({
2570
+ type: "source",
2571
+ source: {
2572
+ sourceType: "url",
2573
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils10.generateId)(),
2574
+ url: value.annotation.url,
2575
+ title: value.annotation.title
2576
+ }
2577
+ });
2578
+ }
2579
+ },
2580
+ flush(controller) {
2581
+ controller.enqueue({
2582
+ type: "finish",
2583
+ finishReason,
2584
+ usage: { promptTokens, completionTokens },
2585
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
2586
+ providerMetadata: {
2587
+ openai: {
2588
+ responseId,
2589
+ cachedPromptTokens,
2590
+ reasoningTokens
2591
+ }
2592
+ }
2593
+ }
2594
+ });
2595
+ }
2596
+ })
2597
+ ),
2598
+ rawCall: {
2599
+ rawPrompt: void 0,
2600
+ rawSettings: {}
2601
+ },
2602
+ rawResponse: { headers: responseHeaders },
2603
+ request: { body: JSON.stringify(body) },
2604
+ warnings
2605
+ };
2606
+ }
2607
+ };
2608
+ var usageSchema = import_zod8.z.object({
2609
+ input_tokens: import_zod8.z.number(),
2610
+ input_tokens_details: import_zod8.z.object({ cached_tokens: import_zod8.z.number().nullish() }).nullish(),
2611
+ output_tokens: import_zod8.z.number(),
2612
+ output_tokens_details: import_zod8.z.object({ reasoning_tokens: import_zod8.z.number().nullish() }).nullish()
2613
+ });
2614
+ var textDeltaChunkSchema = import_zod8.z.object({
2615
+ type: import_zod8.z.literal("response.output_text.delta"),
2616
+ delta: import_zod8.z.string()
2617
+ });
2618
+ var responseFinishedChunkSchema = import_zod8.z.object({
2619
+ type: import_zod8.z.enum(["response.completed", "response.incomplete"]),
2620
+ response: import_zod8.z.object({
2621
+ incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullish(),
2622
+ usage: usageSchema
2623
+ })
2624
+ });
2625
+ var responseCreatedChunkSchema = import_zod8.z.object({
2626
+ type: import_zod8.z.literal("response.created"),
2627
+ response: import_zod8.z.object({
2628
+ id: import_zod8.z.string(),
2629
+ created_at: import_zod8.z.number(),
2630
+ model: import_zod8.z.string()
2631
+ })
2632
+ });
2633
+ var responseOutputItemDoneSchema = import_zod8.z.object({
2634
+ type: import_zod8.z.literal("response.output_item.done"),
2635
+ output_index: import_zod8.z.number(),
2636
+ item: import_zod8.z.discriminatedUnion("type", [
2637
+ import_zod8.z.object({
2638
+ type: import_zod8.z.literal("message")
2639
+ }),
2640
+ import_zod8.z.object({
2641
+ type: import_zod8.z.literal("function_call"),
2642
+ id: import_zod8.z.string(),
2643
+ call_id: import_zod8.z.string(),
2644
+ name: import_zod8.z.string(),
2645
+ arguments: import_zod8.z.string(),
2646
+ status: import_zod8.z.literal("completed")
2647
+ })
2648
+ ])
2649
+ });
2650
+ var responseFunctionCallArgumentsDeltaSchema = import_zod8.z.object({
2651
+ type: import_zod8.z.literal("response.function_call_arguments.delta"),
2652
+ item_id: import_zod8.z.string(),
2653
+ output_index: import_zod8.z.number(),
2654
+ delta: import_zod8.z.string()
2655
+ });
2656
+ var responseOutputItemAddedSchema = import_zod8.z.object({
2657
+ type: import_zod8.z.literal("response.output_item.added"),
2658
+ output_index: import_zod8.z.number(),
2659
+ item: import_zod8.z.discriminatedUnion("type", [
2660
+ import_zod8.z.object({
2661
+ type: import_zod8.z.literal("message")
2662
+ }),
2663
+ import_zod8.z.object({
2664
+ type: import_zod8.z.literal("function_call"),
2665
+ id: import_zod8.z.string(),
2666
+ call_id: import_zod8.z.string(),
2667
+ name: import_zod8.z.string(),
2668
+ arguments: import_zod8.z.string()
2669
+ })
2670
+ ])
2671
+ });
2672
+ var responseAnnotationAddedSchema = import_zod8.z.object({
2673
+ type: import_zod8.z.literal("response.output_text.annotation.added"),
2674
+ annotation: import_zod8.z.object({
2675
+ type: import_zod8.z.literal("url_citation"),
2676
+ url: import_zod8.z.string(),
2677
+ title: import_zod8.z.string()
2678
+ })
2679
+ });
2680
+ var responseReasoningSummaryTextDeltaSchema = import_zod8.z.object({
2681
+ type: import_zod8.z.literal("response.reasoning_summary_text.delta"),
2682
+ item_id: import_zod8.z.string(),
2683
+ output_index: import_zod8.z.number(),
2684
+ summary_index: import_zod8.z.number(),
2685
+ delta: import_zod8.z.string()
2686
+ });
2687
+ var openaiResponsesChunkSchema = import_zod8.z.union([
2688
+ textDeltaChunkSchema,
2689
+ responseFinishedChunkSchema,
2690
+ responseCreatedChunkSchema,
2691
+ responseOutputItemDoneSchema,
2692
+ responseFunctionCallArgumentsDeltaSchema,
2693
+ responseOutputItemAddedSchema,
2694
+ responseAnnotationAddedSchema,
2695
+ responseReasoningSummaryTextDeltaSchema,
2696
+ import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
2697
+ // fallback for unknown chunks
2698
+ ]);
2699
+ function isTextDeltaChunk(chunk) {
2700
+ return chunk.type === "response.output_text.delta";
2701
+ }
2702
+ function isResponseOutputItemDoneChunk(chunk) {
2703
+ return chunk.type === "response.output_item.done";
2704
+ }
2705
+ function isResponseFinishedChunk(chunk) {
2706
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2707
+ }
2708
+ function isResponseCreatedChunk(chunk) {
2709
+ return chunk.type === "response.created";
2710
+ }
2711
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2712
+ return chunk.type === "response.function_call_arguments.delta";
2713
+ }
2714
+ function isResponseOutputItemAddedChunk(chunk) {
2715
+ return chunk.type === "response.output_item.added";
2716
+ }
2717
+ function isResponseAnnotationAddedChunk(chunk) {
2718
+ return chunk.type === "response.output_text.annotation.added";
2719
+ }
2720
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2721
+ return chunk.type === "response.reasoning_summary_text.delta";
2722
+ }
2723
+ function getResponsesModelConfig(modelId) {
2724
+ if (modelId.startsWith("o")) {
2725
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2726
+ return {
2727
+ isReasoningModel: true,
2728
+ systemMessageMode: "remove",
2729
+ requiredAutoTruncation: false
2730
+ };
2731
+ }
2732
+ return {
2733
+ isReasoningModel: true,
2734
+ systemMessageMode: "developer",
2735
+ requiredAutoTruncation: false
2736
+ };
2737
+ }
2738
+ return {
2739
+ isReasoningModel: false,
2740
+ systemMessageMode: "system",
2741
+ requiredAutoTruncation: false
2742
+ };
2743
+ }
2744
+ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2745
+ metadata: import_zod8.z.any().nullish(),
2746
+ parallelToolCalls: import_zod8.z.boolean().nullish(),
2747
+ previousResponseId: import_zod8.z.string().nullish(),
2748
+ forceNoTemperature: import_zod8.z.boolean().nullish(),
2749
+ store: import_zod8.z.boolean().nullish(),
2750
+ user: import_zod8.z.string().nullish(),
2751
+ reasoningEffort: import_zod8.z.string().nullish(),
2752
+ strictSchemas: import_zod8.z.boolean().nullish(),
2753
+ instructions: import_zod8.z.string().nullish(),
2754
+ reasoningSummary: import_zod8.z.string().nullish()
2755
+ });
2756
+ // Annotate the CommonJS export names for ESM import in node:
2757
+ 0 && (module.exports = {
2758
+ OpenAIChatLanguageModel,
2759
+ OpenAICompletionLanguageModel,
2760
+ OpenAIEmbeddingModel,
2761
+ OpenAIImageModel,
2762
+ OpenAIResponsesLanguageModel,
2763
+ OpenAISpeechModel,
2764
+ OpenAITranscriptionModel,
2765
+ hasDefaultResponseFormat,
2766
+ modelMaxImagesPerCall
2767
+ });
2768
+ //# sourceMappingURL=index.js.map