@zenning/openai 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,2952 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ createOpenAI: () => createOpenAI,
24
+ openai: () => openai
25
+ });
26
+ module.exports = __toCommonJS(src_exports);
27
+
28
+ // src/openai-provider.ts
29
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
30
+
31
+ // src/openai-chat-language-model.ts
32
+ var import_provider3 = require("@ai-sdk/provider");
33
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
34
+ var import_zod2 = require("zod");
35
+
36
+ // src/convert-to-openai-chat-messages.ts
37
+ var import_provider = require("@ai-sdk/provider");
38
+ var import_provider_utils = require("@ai-sdk/provider-utils");
39
+ function convertToOpenAIChatMessages({
40
+ prompt,
41
+ useLegacyFunctionCalling = false,
42
+ systemMessageMode = "system"
43
+ }) {
44
+ const messages = [];
45
+ const warnings = [];
46
+ for (const { role, content } of prompt) {
47
+ switch (role) {
48
+ case "system": {
49
+ switch (systemMessageMode) {
50
+ case "system": {
51
+ messages.push({ role: "system", content });
52
+ break;
53
+ }
54
+ case "developer": {
55
+ messages.push({ role: "developer", content });
56
+ break;
57
+ }
58
+ case "remove": {
59
+ warnings.push({
60
+ type: "other",
61
+ message: "system messages are removed for this model"
62
+ });
63
+ break;
64
+ }
65
+ default: {
66
+ const _exhaustiveCheck = systemMessageMode;
67
+ throw new Error(
68
+ `Unsupported system message mode: ${_exhaustiveCheck}`
69
+ );
70
+ }
71
+ }
72
+ break;
73
+ }
74
+ case "user": {
75
+ if (content.length === 1 && content[0].type === "text") {
76
+ messages.push({ role: "user", content: content[0].text });
77
+ break;
78
+ }
79
+ messages.push({
80
+ role: "user",
81
+ content: content.map((part, index) => {
82
+ var _a, _b, _c, _d;
83
+ switch (part.type) {
84
+ case "text": {
85
+ return { type: "text", text: part.text };
86
+ }
87
+ case "image": {
88
+ return {
89
+ type: "image_url",
90
+ image_url: {
91
+ url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
92
+ // OpenAI specific extension: image detail
93
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
94
+ }
95
+ };
96
+ }
97
+ case "file": {
98
+ if (part.data instanceof URL) {
99
+ throw new import_provider.UnsupportedFunctionalityError({
100
+ functionality: "'File content parts with URL data' functionality not supported."
101
+ });
102
+ }
103
+ switch (part.mimeType) {
104
+ case "audio/wav": {
105
+ return {
106
+ type: "input_audio",
107
+ input_audio: { data: part.data, format: "wav" }
108
+ };
109
+ }
110
+ case "audio/mp3":
111
+ case "audio/mpeg": {
112
+ return {
113
+ type: "input_audio",
114
+ input_audio: { data: part.data, format: "mp3" }
115
+ };
116
+ }
117
+ case "application/pdf": {
118
+ return {
119
+ type: "file",
120
+ file: {
121
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
122
+ file_data: `data:application/pdf;base64,${part.data}`
123
+ }
124
+ };
125
+ }
126
+ default: {
127
+ throw new import_provider.UnsupportedFunctionalityError({
128
+ functionality: `File content part type ${part.mimeType} in user messages`
129
+ });
130
+ }
131
+ }
132
+ }
133
+ }
134
+ })
135
+ });
136
+ break;
137
+ }
138
+ case "assistant": {
139
+ let text = "";
140
+ const toolCalls = [];
141
+ for (const part of content) {
142
+ switch (part.type) {
143
+ case "text": {
144
+ text += part.text;
145
+ break;
146
+ }
147
+ case "tool-call": {
148
+ toolCalls.push({
149
+ id: part.toolCallId,
150
+ type: "function",
151
+ function: {
152
+ name: part.toolName,
153
+ arguments: JSON.stringify(part.args)
154
+ }
155
+ });
156
+ break;
157
+ }
158
+ }
159
+ }
160
+ if (useLegacyFunctionCalling) {
161
+ if (toolCalls.length > 1) {
162
+ throw new import_provider.UnsupportedFunctionalityError({
163
+ functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
164
+ });
165
+ }
166
+ messages.push({
167
+ role: "assistant",
168
+ content: text,
169
+ function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
170
+ });
171
+ } else {
172
+ messages.push({
173
+ role: "assistant",
174
+ content: text,
175
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
176
+ });
177
+ }
178
+ break;
179
+ }
180
+ case "tool": {
181
+ for (const toolResponse of content) {
182
+ if (useLegacyFunctionCalling) {
183
+ messages.push({
184
+ role: "function",
185
+ name: toolResponse.toolName,
186
+ content: JSON.stringify(toolResponse.result)
187
+ });
188
+ } else {
189
+ messages.push({
190
+ role: "tool",
191
+ tool_call_id: toolResponse.toolCallId,
192
+ content: JSON.stringify(toolResponse.result)
193
+ });
194
+ }
195
+ }
196
+ break;
197
+ }
198
+ default: {
199
+ const _exhaustiveCheck = role;
200
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
201
+ }
202
+ }
203
+ }
204
+ return { messages, warnings };
205
+ }
206
+
207
+ // src/map-openai-chat-logprobs.ts
208
+ function mapOpenAIChatLogProbsOutput(logprobs) {
209
+ var _a, _b;
210
+ return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
211
+ token,
212
+ logprob,
213
+ topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
214
+ token: token2,
215
+ logprob: logprob2
216
+ })) : []
217
+ }))) != null ? _b : void 0;
218
+ }
219
+
220
+ // src/map-openai-finish-reason.ts
221
+ function mapOpenAIFinishReason(finishReason) {
222
+ switch (finishReason) {
223
+ case "stop":
224
+ return "stop";
225
+ case "length":
226
+ return "length";
227
+ case "content_filter":
228
+ return "content-filter";
229
+ case "function_call":
230
+ case "tool_calls":
231
+ return "tool-calls";
232
+ default:
233
+ return "unknown";
234
+ }
235
+ }
236
+
237
+ // src/openai-error.ts
238
+ var import_zod = require("zod");
239
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
240
+ var openaiErrorDataSchema = import_zod.z.object({
241
+ error: import_zod.z.object({
242
+ message: import_zod.z.string(),
243
+ // The additional information below is handled loosely to support
244
+ // OpenAI-compatible providers that have slightly different error
245
+ // responses:
246
+ type: import_zod.z.string().nullish(),
247
+ param: import_zod.z.any().nullish(),
248
+ code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
249
+ })
250
+ });
251
+ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
252
+ errorSchema: openaiErrorDataSchema,
253
+ errorToMessage: (data) => data.error.message
254
+ });
255
+
256
+ // src/get-response-metadata.ts
257
+ function getResponseMetadata({
258
+ id,
259
+ model,
260
+ created
261
+ }) {
262
+ return {
263
+ id: id != null ? id : void 0,
264
+ modelId: model != null ? model : void 0,
265
+ timestamp: created != null ? new Date(created * 1e3) : void 0
266
+ };
267
+ }
268
+
269
+ // src/openai-prepare-tools.ts
270
+ var import_provider2 = require("@ai-sdk/provider");
271
+ function prepareTools({
272
+ mode,
273
+ useLegacyFunctionCalling = false,
274
+ structuredOutputs
275
+ }) {
276
+ var _a;
277
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
278
+ const toolWarnings = [];
279
+ if (tools == null) {
280
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
281
+ }
282
+ const toolChoice = mode.toolChoice;
283
+ if (useLegacyFunctionCalling) {
284
+ const openaiFunctions = [];
285
+ for (const tool of tools) {
286
+ if (tool.type === "provider-defined") {
287
+ toolWarnings.push({ type: "unsupported-tool", tool });
288
+ } else {
289
+ openaiFunctions.push({
290
+ name: tool.name,
291
+ description: tool.description,
292
+ parameters: tool.parameters
293
+ });
294
+ }
295
+ }
296
+ if (toolChoice == null) {
297
+ return {
298
+ functions: openaiFunctions,
299
+ function_call: void 0,
300
+ toolWarnings
301
+ };
302
+ }
303
+ const type2 = toolChoice.type;
304
+ switch (type2) {
305
+ case "auto":
306
+ case "none":
307
+ case void 0:
308
+ return {
309
+ functions: openaiFunctions,
310
+ function_call: void 0,
311
+ toolWarnings
312
+ };
313
+ case "required":
314
+ throw new import_provider2.UnsupportedFunctionalityError({
315
+ functionality: "useLegacyFunctionCalling and toolChoice: required"
316
+ });
317
+ default:
318
+ return {
319
+ functions: openaiFunctions,
320
+ function_call: { name: toolChoice.toolName },
321
+ toolWarnings
322
+ };
323
+ }
324
+ }
325
+ const openaiTools2 = [];
326
+ for (const tool of tools) {
327
+ if (tool.type === "provider-defined") {
328
+ toolWarnings.push({ type: "unsupported-tool", tool });
329
+ } else {
330
+ openaiTools2.push({
331
+ type: "function",
332
+ function: {
333
+ name: tool.name,
334
+ description: tool.description,
335
+ parameters: tool.parameters,
336
+ strict: structuredOutputs ? true : void 0
337
+ }
338
+ });
339
+ }
340
+ }
341
+ if (toolChoice == null) {
342
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
343
+ }
344
+ const type = toolChoice.type;
345
+ switch (type) {
346
+ case "auto":
347
+ case "none":
348
+ case "required":
349
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
350
+ case "tool":
351
+ return {
352
+ tools: openaiTools2,
353
+ tool_choice: {
354
+ type: "function",
355
+ function: {
356
+ name: toolChoice.toolName
357
+ }
358
+ },
359
+ toolWarnings
360
+ };
361
+ default: {
362
+ const _exhaustiveCheck = type;
363
+ throw new import_provider2.UnsupportedFunctionalityError({
364
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
365
+ });
366
+ }
367
+ }
368
+ }
369
+
370
+ // src/openai-chat-language-model.ts
371
+ var OpenAIChatLanguageModel = class {
372
+ constructor(modelId, settings, config) {
373
+ this.specificationVersion = "v1";
374
+ this.modelId = modelId;
375
+ this.settings = settings;
376
+ this.config = config;
377
+ }
378
+ get supportsStructuredOutputs() {
379
+ var _a;
380
+ return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
381
+ }
382
+ get defaultObjectGenerationMode() {
383
+ if (isAudioModel(this.modelId)) {
384
+ return "tool";
385
+ }
386
+ return this.supportsStructuredOutputs ? "json" : "tool";
387
+ }
388
+ get provider() {
389
+ return this.config.provider;
390
+ }
391
+ get supportsImageUrls() {
392
+ return !this.settings.downloadImages;
393
+ }
394
+ getArgs({
395
+ mode,
396
+ prompt,
397
+ maxTokens,
398
+ temperature,
399
+ topP,
400
+ topK,
401
+ frequencyPenalty,
402
+ presencePenalty,
403
+ stopSequences,
404
+ responseFormat,
405
+ seed,
406
+ providerMetadata
407
+ }) {
408
+ var _a, _b, _c, _d, _e, _f, _g, _h;
409
+ const type = mode.type;
410
+ const warnings = [];
411
+ if (topK != null) {
412
+ warnings.push({
413
+ type: "unsupported-setting",
414
+ setting: "topK"
415
+ });
416
+ }
417
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
418
+ warnings.push({
419
+ type: "unsupported-setting",
420
+ setting: "responseFormat",
421
+ details: "JSON response format schema is only supported with structuredOutputs"
422
+ });
423
+ }
424
+ const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
425
+ if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
426
+ throw new import_provider3.UnsupportedFunctionalityError({
427
+ functionality: "useLegacyFunctionCalling with parallelToolCalls"
428
+ });
429
+ }
430
+ if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
431
+ throw new import_provider3.UnsupportedFunctionalityError({
432
+ functionality: "structuredOutputs with useLegacyFunctionCalling"
433
+ });
434
+ }
435
+ const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
436
+ {
437
+ prompt,
438
+ useLegacyFunctionCalling,
439
+ systemMessageMode: getSystemMessageMode(this.modelId)
440
+ }
441
+ );
442
+ warnings.push(...messageWarnings);
443
+ const baseArgs = {
444
+ // model id:
445
+ model: this.modelId,
446
+ // model specific settings:
447
+ logit_bias: this.settings.logitBias,
448
+ logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
449
+ top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
450
+ user: this.settings.user,
451
+ parallel_tool_calls: this.settings.parallelToolCalls,
452
+ // standardized settings:
453
+ max_tokens: maxTokens,
454
+ temperature,
455
+ top_p: topP,
456
+ frequency_penalty: frequencyPenalty,
457
+ presence_penalty: presencePenalty,
458
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
459
+ type: "json_schema",
460
+ json_schema: {
461
+ schema: responseFormat.schema,
462
+ strict: true,
463
+ name: (_a = responseFormat.name) != null ? _a : "response",
464
+ description: responseFormat.description
465
+ }
466
+ } : { type: "json_object" } : void 0,
467
+ stop: stopSequences,
468
+ seed,
469
+ // openai specific settings:
470
+ // TODO remove in next major version; we auto-map maxTokens now
471
+ max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
472
+ store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
473
+ metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
474
+ prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
475
+ reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
476
+ // messages:
477
+ messages
478
+ };
479
+ if (isReasoningModel(this.modelId)) {
480
+ if (baseArgs.temperature != null) {
481
+ baseArgs.temperature = void 0;
482
+ warnings.push({
483
+ type: "unsupported-setting",
484
+ setting: "temperature",
485
+ details: "temperature is not supported for reasoning models"
486
+ });
487
+ }
488
+ if (baseArgs.top_p != null) {
489
+ baseArgs.top_p = void 0;
490
+ warnings.push({
491
+ type: "unsupported-setting",
492
+ setting: "topP",
493
+ details: "topP is not supported for reasoning models"
494
+ });
495
+ }
496
+ if (baseArgs.frequency_penalty != null) {
497
+ baseArgs.frequency_penalty = void 0;
498
+ warnings.push({
499
+ type: "unsupported-setting",
500
+ setting: "frequencyPenalty",
501
+ details: "frequencyPenalty is not supported for reasoning models"
502
+ });
503
+ }
504
+ if (baseArgs.presence_penalty != null) {
505
+ baseArgs.presence_penalty = void 0;
506
+ warnings.push({
507
+ type: "unsupported-setting",
508
+ setting: "presencePenalty",
509
+ details: "presencePenalty is not supported for reasoning models"
510
+ });
511
+ }
512
+ if (baseArgs.logit_bias != null) {
513
+ baseArgs.logit_bias = void 0;
514
+ warnings.push({
515
+ type: "other",
516
+ message: "logitBias is not supported for reasoning models"
517
+ });
518
+ }
519
+ if (baseArgs.logprobs != null) {
520
+ baseArgs.logprobs = void 0;
521
+ warnings.push({
522
+ type: "other",
523
+ message: "logprobs is not supported for reasoning models"
524
+ });
525
+ }
526
+ if (baseArgs.top_logprobs != null) {
527
+ baseArgs.top_logprobs = void 0;
528
+ warnings.push({
529
+ type: "other",
530
+ message: "topLogprobs is not supported for reasoning models"
531
+ });
532
+ }
533
+ if (baseArgs.max_tokens != null) {
534
+ if (baseArgs.max_completion_tokens == null) {
535
+ baseArgs.max_completion_tokens = baseArgs.max_tokens;
536
+ }
537
+ baseArgs.max_tokens = void 0;
538
+ }
539
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
540
+ if (baseArgs.temperature != null) {
541
+ baseArgs.temperature = void 0;
542
+ warnings.push({
543
+ type: "unsupported-setting",
544
+ setting: "temperature",
545
+ details: "temperature is not supported for the search preview models and has been removed."
546
+ });
547
+ }
548
+ }
549
+ switch (type) {
550
+ case "regular": {
551
+ const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
552
+ mode,
553
+ useLegacyFunctionCalling,
554
+ structuredOutputs: this.supportsStructuredOutputs
555
+ });
556
+ return {
557
+ args: {
558
+ ...baseArgs,
559
+ tools,
560
+ tool_choice,
561
+ functions,
562
+ function_call
563
+ },
564
+ warnings: [...warnings, ...toolWarnings]
565
+ };
566
+ }
567
+ case "object-json": {
568
+ return {
569
+ args: {
570
+ ...baseArgs,
571
+ response_format: this.supportsStructuredOutputs && mode.schema != null ? {
572
+ type: "json_schema",
573
+ json_schema: {
574
+ schema: mode.schema,
575
+ strict: true,
576
+ name: (_h = mode.name) != null ? _h : "response",
577
+ description: mode.description
578
+ }
579
+ } : { type: "json_object" }
580
+ },
581
+ warnings
582
+ };
583
+ }
584
+ case "object-tool": {
585
+ return {
586
+ args: useLegacyFunctionCalling ? {
587
+ ...baseArgs,
588
+ function_call: {
589
+ name: mode.tool.name
590
+ },
591
+ functions: [
592
+ {
593
+ name: mode.tool.name,
594
+ description: mode.tool.description,
595
+ parameters: mode.tool.parameters
596
+ }
597
+ ]
598
+ } : {
599
+ ...baseArgs,
600
+ tool_choice: {
601
+ type: "function",
602
+ function: { name: mode.tool.name }
603
+ },
604
+ tools: [
605
+ {
606
+ type: "function",
607
+ function: {
608
+ name: mode.tool.name,
609
+ description: mode.tool.description,
610
+ parameters: mode.tool.parameters,
611
+ strict: this.supportsStructuredOutputs ? true : void 0
612
+ }
613
+ }
614
+ ]
615
+ },
616
+ warnings
617
+ };
618
+ }
619
+ default: {
620
+ const _exhaustiveCheck = type;
621
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
622
+ }
623
+ }
624
+ }
625
+ async doGenerate(options) {
626
+ var _a, _b, _c, _d, _e, _f, _g, _h;
627
+ const { args: body, warnings } = this.getArgs(options);
628
+ const {
629
+ responseHeaders,
630
+ value: response,
631
+ rawValue: rawResponse
632
+ } = await (0, import_provider_utils3.postJsonToApi)({
633
+ url: this.config.url({
634
+ path: "/chat/completions",
635
+ modelId: this.modelId
636
+ }),
637
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
638
+ body,
639
+ failedResponseHandler: openaiFailedResponseHandler,
640
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
641
+ openaiChatResponseSchema
642
+ ),
643
+ abortSignal: options.abortSignal,
644
+ fetch: this.config.fetch
645
+ });
646
+ const { messages: rawPrompt, ...rawSettings } = body;
647
+ const choice = response.choices[0];
648
+ const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
649
+ const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
650
+ const providerMetadata = { openai: {} };
651
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
652
+ providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
653
+ }
654
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
655
+ providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
656
+ }
657
+ if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
658
+ providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
659
+ }
660
+ if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
661
+ providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
662
+ }
663
+ return {
664
+ text: (_c = choice.message.content) != null ? _c : void 0,
665
+ toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
666
+ {
667
+ toolCallType: "function",
668
+ toolCallId: (0, import_provider_utils3.generateId)(),
669
+ toolName: choice.message.function_call.name,
670
+ args: choice.message.function_call.arguments
671
+ }
672
+ ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
673
+ var _a2;
674
+ return {
675
+ toolCallType: "function",
676
+ toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
677
+ toolName: toolCall.function.name,
678
+ args: toolCall.function.arguments
679
+ };
680
+ }),
681
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
682
+ usage: {
683
+ promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
684
+ completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
685
+ },
686
+ rawCall: { rawPrompt, rawSettings },
687
+ rawResponse: { headers: responseHeaders, body: rawResponse },
688
+ request: { body: JSON.stringify(body) },
689
+ response: getResponseMetadata(response),
690
+ warnings,
691
+ logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
692
+ providerMetadata
693
+ };
694
+ }
695
+ async doStream(options) {
696
+ if (this.settings.simulateStreaming) {
697
+ const result = await this.doGenerate(options);
698
+ const simulatedStream = new ReadableStream({
699
+ start(controller) {
700
+ controller.enqueue({ type: "response-metadata", ...result.response });
701
+ if (result.text) {
702
+ controller.enqueue({
703
+ type: "text-delta",
704
+ textDelta: result.text
705
+ });
706
+ }
707
+ if (result.toolCalls) {
708
+ for (const toolCall of result.toolCalls) {
709
+ controller.enqueue({
710
+ type: "tool-call-delta",
711
+ toolCallType: "function",
712
+ toolCallId: toolCall.toolCallId,
713
+ toolName: toolCall.toolName,
714
+ argsTextDelta: toolCall.args
715
+ });
716
+ controller.enqueue({
717
+ type: "tool-call",
718
+ ...toolCall
719
+ });
720
+ }
721
+ }
722
+ controller.enqueue({
723
+ type: "finish",
724
+ finishReason: result.finishReason,
725
+ usage: result.usage,
726
+ logprobs: result.logprobs,
727
+ providerMetadata: result.providerMetadata
728
+ });
729
+ controller.close();
730
+ }
731
+ });
732
+ return {
733
+ stream: simulatedStream,
734
+ rawCall: result.rawCall,
735
+ rawResponse: result.rawResponse,
736
+ warnings: result.warnings
737
+ };
738
+ }
739
+ const { args, warnings } = this.getArgs(options);
740
+ const body = {
741
+ ...args,
742
+ stream: true,
743
+ // only include stream_options when in strict compatibility mode:
744
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
745
+ };
746
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
747
+ url: this.config.url({
748
+ path: "/chat/completions",
749
+ modelId: this.modelId
750
+ }),
751
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
752
+ body,
753
+ failedResponseHandler: openaiFailedResponseHandler,
754
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
755
+ openaiChatChunkSchema
756
+ ),
757
+ abortSignal: options.abortSignal,
758
+ fetch: this.config.fetch
759
+ });
760
+ const { messages: rawPrompt, ...rawSettings } = args;
761
+ const toolCalls = [];
762
+ let finishReason = "unknown";
763
+ let usage = {
764
+ promptTokens: void 0,
765
+ completionTokens: void 0
766
+ };
767
+ let logprobs;
768
+ let isFirstChunk = true;
769
+ const { useLegacyFunctionCalling } = this.settings;
770
+ const providerMetadata = { openai: {} };
771
+ return {
772
+ stream: response.pipeThrough(
773
+ new TransformStream({
774
+ transform(chunk, controller) {
775
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
776
+ if (!chunk.success) {
777
+ finishReason = "error";
778
+ controller.enqueue({ type: "error", error: chunk.error });
779
+ return;
780
+ }
781
+ const value = chunk.value;
782
+ if ("error" in value) {
783
+ finishReason = "error";
784
+ controller.enqueue({ type: "error", error: value.error });
785
+ return;
786
+ }
787
+ if (isFirstChunk) {
788
+ isFirstChunk = false;
789
+ controller.enqueue({
790
+ type: "response-metadata",
791
+ ...getResponseMetadata(value)
792
+ });
793
+ }
794
+ if (value.usage != null) {
795
+ const {
796
+ prompt_tokens,
797
+ completion_tokens,
798
+ prompt_tokens_details,
799
+ completion_tokens_details
800
+ } = value.usage;
801
+ usage = {
802
+ promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
803
+ completionTokens: completion_tokens != null ? completion_tokens : void 0
804
+ };
805
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
806
+ providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
807
+ }
808
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
809
+ providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
810
+ }
811
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
812
+ providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
813
+ }
814
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
815
+ providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
816
+ }
817
+ }
818
+ const choice = value.choices[0];
819
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
820
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
821
+ }
822
+ if ((choice == null ? void 0 : choice.delta) == null) {
823
+ return;
824
+ }
825
+ const delta = choice.delta;
826
+ if (delta.content != null) {
827
+ controller.enqueue({
828
+ type: "text-delta",
829
+ textDelta: delta.content
830
+ });
831
+ }
832
+ const mappedLogprobs = mapOpenAIChatLogProbsOutput(
833
+ choice == null ? void 0 : choice.logprobs
834
+ );
835
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
836
+ if (logprobs === void 0) logprobs = [];
837
+ logprobs.push(...mappedLogprobs);
838
+ }
839
+ const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
840
+ {
841
+ type: "function",
842
+ id: (0, import_provider_utils3.generateId)(),
843
+ function: delta.function_call,
844
+ index: 0
845
+ }
846
+ ] : delta.tool_calls;
847
+ if (mappedToolCalls != null) {
848
+ for (const toolCallDelta of mappedToolCalls) {
849
+ const index = toolCallDelta.index;
850
+ if (toolCalls[index] == null) {
851
+ if (toolCallDelta.type !== "function") {
852
+ throw new import_provider3.InvalidResponseDataError({
853
+ data: toolCallDelta,
854
+ message: `Expected 'function' type.`
855
+ });
856
+ }
857
+ if (toolCallDelta.id == null) {
858
+ throw new import_provider3.InvalidResponseDataError({
859
+ data: toolCallDelta,
860
+ message: `Expected 'id' to be a string.`
861
+ });
862
+ }
863
+ if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
864
+ throw new import_provider3.InvalidResponseDataError({
865
+ data: toolCallDelta,
866
+ message: `Expected 'function.name' to be a string.`
867
+ });
868
+ }
869
+ toolCalls[index] = {
870
+ id: toolCallDelta.id,
871
+ type: "function",
872
+ function: {
873
+ name: toolCallDelta.function.name,
874
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
875
+ },
876
+ hasFinished: false
877
+ };
878
+ const toolCall2 = toolCalls[index];
879
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
880
+ if (toolCall2.function.arguments.length > 0) {
881
+ controller.enqueue({
882
+ type: "tool-call-delta",
883
+ toolCallType: "function",
884
+ toolCallId: toolCall2.id,
885
+ toolName: toolCall2.function.name,
886
+ argsTextDelta: toolCall2.function.arguments
887
+ });
888
+ }
889
+ if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
890
+ controller.enqueue({
891
+ type: "tool-call",
892
+ toolCallType: "function",
893
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
894
+ toolName: toolCall2.function.name,
895
+ args: toolCall2.function.arguments
896
+ });
897
+ toolCall2.hasFinished = true;
898
+ }
899
+ }
900
+ continue;
901
+ }
902
+ const toolCall = toolCalls[index];
903
+ if (toolCall.hasFinished) {
904
+ continue;
905
+ }
906
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
907
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
908
+ }
909
+ controller.enqueue({
910
+ type: "tool-call-delta",
911
+ toolCallType: "function",
912
+ toolCallId: toolCall.id,
913
+ toolName: toolCall.function.name,
914
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
915
+ });
916
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
917
+ controller.enqueue({
918
+ type: "tool-call",
919
+ toolCallType: "function",
920
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
921
+ toolName: toolCall.function.name,
922
+ args: toolCall.function.arguments
923
+ });
924
+ toolCall.hasFinished = true;
925
+ }
926
+ }
927
+ }
928
+ },
929
+ flush(controller) {
930
+ var _a, _b;
931
+ controller.enqueue({
932
+ type: "finish",
933
+ finishReason,
934
+ logprobs,
935
+ usage: {
936
+ promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
937
+ completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
938
+ },
939
+ ...providerMetadata != null ? { providerMetadata } : {}
940
+ });
941
+ }
942
+ })
943
+ ),
944
+ rawCall: { rawPrompt, rawSettings },
945
+ rawResponse: { headers: responseHeaders },
946
+ request: { body: JSON.stringify(body) },
947
+ warnings
948
+ };
949
+ }
950
+ };
951
+ var openaiTokenUsageSchema = import_zod2.z.object({
952
+ prompt_tokens: import_zod2.z.number().nullish(),
953
+ completion_tokens: import_zod2.z.number().nullish(),
954
+ prompt_tokens_details: import_zod2.z.object({
955
+ cached_tokens: import_zod2.z.number().nullish()
956
+ }).nullish(),
957
+ completion_tokens_details: import_zod2.z.object({
958
+ reasoning_tokens: import_zod2.z.number().nullish(),
959
+ accepted_prediction_tokens: import_zod2.z.number().nullish(),
960
+ rejected_prediction_tokens: import_zod2.z.number().nullish()
961
+ }).nullish()
962
+ }).nullish();
963
+ var openaiChatResponseSchema = import_zod2.z.object({
964
+ id: import_zod2.z.string().nullish(),
965
+ created: import_zod2.z.number().nullish(),
966
+ model: import_zod2.z.string().nullish(),
967
+ choices: import_zod2.z.array(
968
+ import_zod2.z.object({
969
+ message: import_zod2.z.object({
970
+ role: import_zod2.z.literal("assistant").nullish(),
971
+ content: import_zod2.z.string().nullish(),
972
+ function_call: import_zod2.z.object({
973
+ arguments: import_zod2.z.string(),
974
+ name: import_zod2.z.string()
975
+ }).nullish(),
976
+ tool_calls: import_zod2.z.array(
977
+ import_zod2.z.object({
978
+ id: import_zod2.z.string().nullish(),
979
+ type: import_zod2.z.literal("function"),
980
+ function: import_zod2.z.object({
981
+ name: import_zod2.z.string(),
982
+ arguments: import_zod2.z.string()
983
+ })
984
+ })
985
+ ).nullish()
986
+ }),
987
+ index: import_zod2.z.number(),
988
+ logprobs: import_zod2.z.object({
989
+ content: import_zod2.z.array(
990
+ import_zod2.z.object({
991
+ token: import_zod2.z.string(),
992
+ logprob: import_zod2.z.number(),
993
+ top_logprobs: import_zod2.z.array(
994
+ import_zod2.z.object({
995
+ token: import_zod2.z.string(),
996
+ logprob: import_zod2.z.number()
997
+ })
998
+ )
999
+ })
1000
+ ).nullable()
1001
+ }).nullish(),
1002
+ finish_reason: import_zod2.z.string().nullish()
1003
+ })
1004
+ ),
1005
+ usage: openaiTokenUsageSchema
1006
+ });
1007
+ var openaiChatChunkSchema = import_zod2.z.union([
1008
+ import_zod2.z.object({
1009
+ id: import_zod2.z.string().nullish(),
1010
+ created: import_zod2.z.number().nullish(),
1011
+ model: import_zod2.z.string().nullish(),
1012
+ choices: import_zod2.z.array(
1013
+ import_zod2.z.object({
1014
+ delta: import_zod2.z.object({
1015
+ role: import_zod2.z.enum(["assistant"]).nullish(),
1016
+ content: import_zod2.z.string().nullish(),
1017
+ function_call: import_zod2.z.object({
1018
+ name: import_zod2.z.string().optional(),
1019
+ arguments: import_zod2.z.string().optional()
1020
+ }).nullish(),
1021
+ tool_calls: import_zod2.z.array(
1022
+ import_zod2.z.object({
1023
+ index: import_zod2.z.number(),
1024
+ id: import_zod2.z.string().nullish(),
1025
+ type: import_zod2.z.literal("function").nullish(),
1026
+ function: import_zod2.z.object({
1027
+ name: import_zod2.z.string().nullish(),
1028
+ arguments: import_zod2.z.string().nullish()
1029
+ })
1030
+ })
1031
+ ).nullish()
1032
+ }).nullish(),
1033
+ logprobs: import_zod2.z.object({
1034
+ content: import_zod2.z.array(
1035
+ import_zod2.z.object({
1036
+ token: import_zod2.z.string(),
1037
+ logprob: import_zod2.z.number(),
1038
+ top_logprobs: import_zod2.z.array(
1039
+ import_zod2.z.object({
1040
+ token: import_zod2.z.string(),
1041
+ logprob: import_zod2.z.number()
1042
+ })
1043
+ )
1044
+ })
1045
+ ).nullable()
1046
+ }).nullish(),
1047
+ finish_reason: import_zod2.z.string().nullish(),
1048
+ index: import_zod2.z.number()
1049
+ })
1050
+ ),
1051
+ usage: openaiTokenUsageSchema
1052
+ }),
1053
+ openaiErrorDataSchema
1054
+ ]);
1055
+ function isReasoningModel(modelId) {
1056
+ return modelId.startsWith("o");
1057
+ }
1058
+ function isAudioModel(modelId) {
1059
+ return modelId.startsWith("gpt-4o-audio-preview");
1060
+ }
1061
+ function getSystemMessageMode(modelId) {
1062
+ var _a, _b;
1063
+ if (!isReasoningModel(modelId)) {
1064
+ return "system";
1065
+ }
1066
+ return (_b = (_a = reasoningModels[modelId]) == null ? void 0 : _a.systemMessageMode) != null ? _b : "developer";
1067
+ }
1068
+ var reasoningModels = {
1069
+ "o1-mini": {
1070
+ systemMessageMode: "remove"
1071
+ },
1072
+ "o1-mini-2024-09-12": {
1073
+ systemMessageMode: "remove"
1074
+ },
1075
+ "o1-preview": {
1076
+ systemMessageMode: "remove"
1077
+ },
1078
+ "o1-preview-2024-09-12": {
1079
+ systemMessageMode: "remove"
1080
+ },
1081
+ o3: {
1082
+ systemMessageMode: "developer"
1083
+ },
1084
+ "o3-2025-04-16": {
1085
+ systemMessageMode: "developer"
1086
+ },
1087
+ "o3-mini": {
1088
+ systemMessageMode: "developer"
1089
+ },
1090
+ "o3-mini-2025-01-31": {
1091
+ systemMessageMode: "developer"
1092
+ },
1093
+ "o4-mini": {
1094
+ systemMessageMode: "developer"
1095
+ },
1096
+ "o4-mini-2025-04-16": {
1097
+ systemMessageMode: "developer"
1098
+ }
1099
+ };
1100
+
1101
+ // src/openai-completion-language-model.ts
1102
+ var import_provider5 = require("@ai-sdk/provider");
1103
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
1104
+ var import_zod3 = require("zod");
1105
+
1106
+ // src/convert-to-openai-completion-prompt.ts
1107
+ var import_provider4 = require("@ai-sdk/provider");
1108
+ function convertToOpenAICompletionPrompt({
1109
+ prompt,
1110
+ inputFormat,
1111
+ user = "user",
1112
+ assistant = "assistant"
1113
+ }) {
1114
+ if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1115
+ return { prompt: prompt[0].content[0].text };
1116
+ }
1117
+ let text = "";
1118
+ if (prompt[0].role === "system") {
1119
+ text += `${prompt[0].content}
1120
+
1121
+ `;
1122
+ prompt = prompt.slice(1);
1123
+ }
1124
+ for (const { role, content } of prompt) {
1125
+ switch (role) {
1126
+ case "system": {
1127
+ throw new import_provider4.InvalidPromptError({
1128
+ message: "Unexpected system message in prompt: ${content}",
1129
+ prompt
1130
+ });
1131
+ }
1132
+ case "user": {
1133
+ const userMessage = content.map((part) => {
1134
+ switch (part.type) {
1135
+ case "text": {
1136
+ return part.text;
1137
+ }
1138
+ case "image": {
1139
+ throw new import_provider4.UnsupportedFunctionalityError({
1140
+ functionality: "images"
1141
+ });
1142
+ }
1143
+ }
1144
+ }).join("");
1145
+ text += `${user}:
1146
+ ${userMessage}
1147
+
1148
+ `;
1149
+ break;
1150
+ }
1151
+ case "assistant": {
1152
+ const assistantMessage = content.map((part) => {
1153
+ switch (part.type) {
1154
+ case "text": {
1155
+ return part.text;
1156
+ }
1157
+ case "tool-call": {
1158
+ throw new import_provider4.UnsupportedFunctionalityError({
1159
+ functionality: "tool-call messages"
1160
+ });
1161
+ }
1162
+ }
1163
+ }).join("");
1164
+ text += `${assistant}:
1165
+ ${assistantMessage}
1166
+
1167
+ `;
1168
+ break;
1169
+ }
1170
+ case "tool": {
1171
+ throw new import_provider4.UnsupportedFunctionalityError({
1172
+ functionality: "tool messages"
1173
+ });
1174
+ }
1175
+ default: {
1176
+ const _exhaustiveCheck = role;
1177
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1178
+ }
1179
+ }
1180
+ }
1181
+ text += `${assistant}:
1182
+ `;
1183
+ return {
1184
+ prompt: text,
1185
+ stopSequences: [`
1186
+ ${user}:`]
1187
+ };
1188
+ }
1189
+
1190
+ // src/map-openai-completion-logprobs.ts
1191
+ function mapOpenAICompletionLogProbs(logprobs) {
1192
+ return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1193
+ token,
1194
+ logprob: logprobs.token_logprobs[index],
1195
+ topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1196
+ ([token2, logprob]) => ({
1197
+ token: token2,
1198
+ logprob
1199
+ })
1200
+ ) : []
1201
+ }));
1202
+ }
1203
+
1204
+ // src/openai-completion-language-model.ts
1205
+ var OpenAICompletionLanguageModel = class {
1206
+ constructor(modelId, settings, config) {
1207
+ this.specificationVersion = "v1";
1208
+ this.defaultObjectGenerationMode = void 0;
1209
+ this.modelId = modelId;
1210
+ this.settings = settings;
1211
+ this.config = config;
1212
+ }
1213
+ get provider() {
1214
+ return this.config.provider;
1215
+ }
1216
+ getArgs({
1217
+ mode,
1218
+ inputFormat,
1219
+ prompt,
1220
+ maxTokens,
1221
+ temperature,
1222
+ topP,
1223
+ topK,
1224
+ frequencyPenalty,
1225
+ presencePenalty,
1226
+ stopSequences: userStopSequences,
1227
+ responseFormat,
1228
+ seed
1229
+ }) {
1230
+ var _a;
1231
+ const type = mode.type;
1232
+ const warnings = [];
1233
+ if (topK != null) {
1234
+ warnings.push({
1235
+ type: "unsupported-setting",
1236
+ setting: "topK"
1237
+ });
1238
+ }
1239
+ if (responseFormat != null && responseFormat.type !== "text") {
1240
+ warnings.push({
1241
+ type: "unsupported-setting",
1242
+ setting: "responseFormat",
1243
+ details: "JSON response format is not supported."
1244
+ });
1245
+ }
1246
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1247
+ const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1248
+ const baseArgs = {
1249
+ // model id:
1250
+ model: this.modelId,
1251
+ // model specific settings:
1252
+ echo: this.settings.echo,
1253
+ logit_bias: this.settings.logitBias,
1254
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1255
+ suffix: this.settings.suffix,
1256
+ user: this.settings.user,
1257
+ // standardized settings:
1258
+ max_tokens: maxTokens,
1259
+ temperature,
1260
+ top_p: topP,
1261
+ frequency_penalty: frequencyPenalty,
1262
+ presence_penalty: presencePenalty,
1263
+ seed,
1264
+ // prompt:
1265
+ prompt: completionPrompt,
1266
+ // stop sequences:
1267
+ stop: stop.length > 0 ? stop : void 0
1268
+ };
1269
+ switch (type) {
1270
+ case "regular": {
1271
+ if ((_a = mode.tools) == null ? void 0 : _a.length) {
1272
+ throw new import_provider5.UnsupportedFunctionalityError({
1273
+ functionality: "tools"
1274
+ });
1275
+ }
1276
+ if (mode.toolChoice) {
1277
+ throw new import_provider5.UnsupportedFunctionalityError({
1278
+ functionality: "toolChoice"
1279
+ });
1280
+ }
1281
+ return { args: baseArgs, warnings };
1282
+ }
1283
+ case "object-json": {
1284
+ throw new import_provider5.UnsupportedFunctionalityError({
1285
+ functionality: "object-json mode"
1286
+ });
1287
+ }
1288
+ case "object-tool": {
1289
+ throw new import_provider5.UnsupportedFunctionalityError({
1290
+ functionality: "object-tool mode"
1291
+ });
1292
+ }
1293
+ default: {
1294
+ const _exhaustiveCheck = type;
1295
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1296
+ }
1297
+ }
1298
+ }
1299
+ async doGenerate(options) {
1300
+ const { args, warnings } = this.getArgs(options);
1301
+ const {
1302
+ responseHeaders,
1303
+ value: response,
1304
+ rawValue: rawResponse
1305
+ } = await (0, import_provider_utils4.postJsonToApi)({
1306
+ url: this.config.url({
1307
+ path: "/completions",
1308
+ modelId: this.modelId
1309
+ }),
1310
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1311
+ body: args,
1312
+ failedResponseHandler: openaiFailedResponseHandler,
1313
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
1314
+ openaiCompletionResponseSchema
1315
+ ),
1316
+ abortSignal: options.abortSignal,
1317
+ fetch: this.config.fetch
1318
+ });
1319
+ const { prompt: rawPrompt, ...rawSettings } = args;
1320
+ const choice = response.choices[0];
1321
+ return {
1322
+ text: choice.text,
1323
+ usage: {
1324
+ promptTokens: response.usage.prompt_tokens,
1325
+ completionTokens: response.usage.completion_tokens
1326
+ },
1327
+ finishReason: mapOpenAIFinishReason(choice.finish_reason),
1328
+ logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1329
+ rawCall: { rawPrompt, rawSettings },
1330
+ rawResponse: { headers: responseHeaders, body: rawResponse },
1331
+ response: getResponseMetadata(response),
1332
+ warnings,
1333
+ request: { body: JSON.stringify(args) }
1334
+ };
1335
+ }
1336
+ async doStream(options) {
1337
+ const { args, warnings } = this.getArgs(options);
1338
+ const body = {
1339
+ ...args,
1340
+ stream: true,
1341
+ // only include stream_options when in strict compatibility mode:
1342
+ stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1343
+ };
1344
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1345
+ url: this.config.url({
1346
+ path: "/completions",
1347
+ modelId: this.modelId
1348
+ }),
1349
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), options.headers),
1350
+ body,
1351
+ failedResponseHandler: openaiFailedResponseHandler,
1352
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(
1353
+ openaiCompletionChunkSchema
1354
+ ),
1355
+ abortSignal: options.abortSignal,
1356
+ fetch: this.config.fetch
1357
+ });
1358
+ const { prompt: rawPrompt, ...rawSettings } = args;
1359
+ let finishReason = "unknown";
1360
+ let usage = {
1361
+ promptTokens: Number.NaN,
1362
+ completionTokens: Number.NaN
1363
+ };
1364
+ let logprobs;
1365
+ let isFirstChunk = true;
1366
+ return {
1367
+ stream: response.pipeThrough(
1368
+ new TransformStream({
1369
+ transform(chunk, controller) {
1370
+ if (!chunk.success) {
1371
+ finishReason = "error";
1372
+ controller.enqueue({ type: "error", error: chunk.error });
1373
+ return;
1374
+ }
1375
+ const value = chunk.value;
1376
+ if ("error" in value) {
1377
+ finishReason = "error";
1378
+ controller.enqueue({ type: "error", error: value.error });
1379
+ return;
1380
+ }
1381
+ if (isFirstChunk) {
1382
+ isFirstChunk = false;
1383
+ controller.enqueue({
1384
+ type: "response-metadata",
1385
+ ...getResponseMetadata(value)
1386
+ });
1387
+ }
1388
+ if (value.usage != null) {
1389
+ usage = {
1390
+ promptTokens: value.usage.prompt_tokens,
1391
+ completionTokens: value.usage.completion_tokens
1392
+ };
1393
+ }
1394
+ const choice = value.choices[0];
1395
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
1396
+ finishReason = mapOpenAIFinishReason(choice.finish_reason);
1397
+ }
1398
+ if ((choice == null ? void 0 : choice.text) != null) {
1399
+ controller.enqueue({
1400
+ type: "text-delta",
1401
+ textDelta: choice.text
1402
+ });
1403
+ }
1404
+ const mappedLogprobs = mapOpenAICompletionLogProbs(
1405
+ choice == null ? void 0 : choice.logprobs
1406
+ );
1407
+ if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1408
+ if (logprobs === void 0) logprobs = [];
1409
+ logprobs.push(...mappedLogprobs);
1410
+ }
1411
+ },
1412
+ flush(controller) {
1413
+ controller.enqueue({
1414
+ type: "finish",
1415
+ finishReason,
1416
+ logprobs,
1417
+ usage
1418
+ });
1419
+ }
1420
+ })
1421
+ ),
1422
+ rawCall: { rawPrompt, rawSettings },
1423
+ rawResponse: { headers: responseHeaders },
1424
+ warnings,
1425
+ request: { body: JSON.stringify(body) }
1426
+ };
1427
+ }
1428
+ };
1429
+ var openaiCompletionResponseSchema = import_zod3.z.object({
1430
+ id: import_zod3.z.string().nullish(),
1431
+ created: import_zod3.z.number().nullish(),
1432
+ model: import_zod3.z.string().nullish(),
1433
+ choices: import_zod3.z.array(
1434
+ import_zod3.z.object({
1435
+ text: import_zod3.z.string(),
1436
+ finish_reason: import_zod3.z.string(),
1437
+ logprobs: import_zod3.z.object({
1438
+ tokens: import_zod3.z.array(import_zod3.z.string()),
1439
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1440
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1441
+ }).nullish()
1442
+ })
1443
+ ),
1444
+ usage: import_zod3.z.object({
1445
+ prompt_tokens: import_zod3.z.number(),
1446
+ completion_tokens: import_zod3.z.number()
1447
+ })
1448
+ });
1449
+ var openaiCompletionChunkSchema = import_zod3.z.union([
1450
+ import_zod3.z.object({
1451
+ id: import_zod3.z.string().nullish(),
1452
+ created: import_zod3.z.number().nullish(),
1453
+ model: import_zod3.z.string().nullish(),
1454
+ choices: import_zod3.z.array(
1455
+ import_zod3.z.object({
1456
+ text: import_zod3.z.string(),
1457
+ finish_reason: import_zod3.z.string().nullish(),
1458
+ index: import_zod3.z.number(),
1459
+ logprobs: import_zod3.z.object({
1460
+ tokens: import_zod3.z.array(import_zod3.z.string()),
1461
+ token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1462
+ top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1463
+ }).nullish()
1464
+ })
1465
+ ),
1466
+ usage: import_zod3.z.object({
1467
+ prompt_tokens: import_zod3.z.number(),
1468
+ completion_tokens: import_zod3.z.number()
1469
+ }).nullish()
1470
+ }),
1471
+ openaiErrorDataSchema
1472
+ ]);
1473
+
1474
+ // src/openai-embedding-model.ts
1475
+ var import_provider6 = require("@ai-sdk/provider");
1476
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1477
+ var import_zod4 = require("zod");
1478
+ var OpenAIEmbeddingModel = class {
1479
+ constructor(modelId, settings, config) {
1480
+ this.specificationVersion = "v1";
1481
+ this.modelId = modelId;
1482
+ this.settings = settings;
1483
+ this.config = config;
1484
+ }
1485
+ get provider() {
1486
+ return this.config.provider;
1487
+ }
1488
+ get maxEmbeddingsPerCall() {
1489
+ var _a;
1490
+ return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1491
+ }
1492
+ get supportsParallelCalls() {
1493
+ var _a;
1494
+ return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1495
+ }
1496
+ async doEmbed({
1497
+ values,
1498
+ headers,
1499
+ abortSignal
1500
+ }) {
1501
+ if (values.length > this.maxEmbeddingsPerCall) {
1502
+ throw new import_provider6.TooManyEmbeddingValuesForCallError({
1503
+ provider: this.provider,
1504
+ modelId: this.modelId,
1505
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1506
+ values
1507
+ });
1508
+ }
1509
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1510
+ url: this.config.url({
1511
+ path: "/embeddings",
1512
+ modelId: this.modelId
1513
+ }),
1514
+ headers: (0, import_provider_utils5.combineHeaders)(this.config.headers(), headers),
1515
+ body: {
1516
+ model: this.modelId,
1517
+ input: values,
1518
+ encoding_format: "float",
1519
+ dimensions: this.settings.dimensions,
1520
+ user: this.settings.user
1521
+ },
1522
+ failedResponseHandler: openaiFailedResponseHandler,
1523
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
1524
+ openaiTextEmbeddingResponseSchema
1525
+ ),
1526
+ abortSignal,
1527
+ fetch: this.config.fetch
1528
+ });
1529
+ return {
1530
+ embeddings: response.data.map((item) => item.embedding),
1531
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1532
+ rawResponse: { headers: responseHeaders }
1533
+ };
1534
+ }
1535
+ };
1536
+ var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1537
+ data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1538
+ usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1539
+ });
1540
+
1541
+ // src/openai-image-model.ts
1542
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1543
+ var import_zod5 = require("zod");
1544
+
1545
+ // src/openai-image-settings.ts
1546
+ var modelMaxImagesPerCall = {
1547
+ "dall-e-3": 1,
1548
+ "dall-e-2": 10,
1549
+ "gpt-image-1": 10
1550
+ };
1551
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1552
+
1553
+ // src/openai-image-model.ts
1554
+ var OpenAIImageModel = class {
1555
+ constructor(modelId, settings, config) {
1556
+ this.modelId = modelId;
1557
+ this.settings = settings;
1558
+ this.config = config;
1559
+ this.specificationVersion = "v1";
1560
+ }
1561
+ get maxImagesPerCall() {
1562
+ var _a, _b;
1563
+ return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1564
+ }
1565
+ get provider() {
1566
+ return this.config.provider;
1567
+ }
1568
+ async doGenerate({
1569
+ prompt,
1570
+ n,
1571
+ size,
1572
+ aspectRatio,
1573
+ seed,
1574
+ providerOptions,
1575
+ headers,
1576
+ abortSignal
1577
+ }) {
1578
+ var _a, _b, _c, _d;
1579
+ const warnings = [];
1580
+ if (aspectRatio != null) {
1581
+ warnings.push({
1582
+ type: "unsupported-setting",
1583
+ setting: "aspectRatio",
1584
+ details: "This model does not support aspect ratio. Use `size` instead."
1585
+ });
1586
+ }
1587
+ if (seed != null) {
1588
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1589
+ }
1590
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1591
+ const { value: response, responseHeaders } = await (0, import_provider_utils6.postJsonToApi)({
1592
+ url: this.config.url({
1593
+ path: "/images/generations",
1594
+ modelId: this.modelId
1595
+ }),
1596
+ headers: (0, import_provider_utils6.combineHeaders)(this.config.headers(), headers),
1597
+ body: {
1598
+ model: this.modelId,
1599
+ prompt,
1600
+ n,
1601
+ size,
1602
+ ...(_d = providerOptions.openai) != null ? _d : {},
1603
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1604
+ },
1605
+ failedResponseHandler: openaiFailedResponseHandler,
1606
+ successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
1607
+ openaiImageResponseSchema
1608
+ ),
1609
+ abortSignal,
1610
+ fetch: this.config.fetch
1611
+ });
1612
+ return {
1613
+ images: response.data.map((item) => item.b64_json),
1614
+ warnings,
1615
+ response: {
1616
+ timestamp: currentDate,
1617
+ modelId: this.modelId,
1618
+ headers: responseHeaders
1619
+ }
1620
+ };
1621
+ }
1622
+ };
1623
+ var openaiImageResponseSchema = import_zod5.z.object({
1624
+ data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1625
+ });
1626
+
1627
+ // src/openai-transcription-model.ts
1628
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1629
+ var import_zod6 = require("zod");
1630
+ var openAIProviderOptionsSchema = import_zod6.z.object({
1631
+ include: import_zod6.z.array(import_zod6.z.string()).nullish(),
1632
+ language: import_zod6.z.string().nullish(),
1633
+ prompt: import_zod6.z.string().nullish(),
1634
+ temperature: import_zod6.z.number().min(0).max(1).nullish().default(0),
1635
+ timestampGranularities: import_zod6.z.array(import_zod6.z.enum(["word", "segment"])).nullish().default(["segment"])
1636
+ });
1637
+ var languageMap = {
1638
+ afrikaans: "af",
1639
+ arabic: "ar",
1640
+ armenian: "hy",
1641
+ azerbaijani: "az",
1642
+ belarusian: "be",
1643
+ bosnian: "bs",
1644
+ bulgarian: "bg",
1645
+ catalan: "ca",
1646
+ chinese: "zh",
1647
+ croatian: "hr",
1648
+ czech: "cs",
1649
+ danish: "da",
1650
+ dutch: "nl",
1651
+ english: "en",
1652
+ estonian: "et",
1653
+ finnish: "fi",
1654
+ french: "fr",
1655
+ galician: "gl",
1656
+ german: "de",
1657
+ greek: "el",
1658
+ hebrew: "he",
1659
+ hindi: "hi",
1660
+ hungarian: "hu",
1661
+ icelandic: "is",
1662
+ indonesian: "id",
1663
+ italian: "it",
1664
+ japanese: "ja",
1665
+ kannada: "kn",
1666
+ kazakh: "kk",
1667
+ korean: "ko",
1668
+ latvian: "lv",
1669
+ lithuanian: "lt",
1670
+ macedonian: "mk",
1671
+ malay: "ms",
1672
+ marathi: "mr",
1673
+ maori: "mi",
1674
+ nepali: "ne",
1675
+ norwegian: "no",
1676
+ persian: "fa",
1677
+ polish: "pl",
1678
+ portuguese: "pt",
1679
+ romanian: "ro",
1680
+ russian: "ru",
1681
+ serbian: "sr",
1682
+ slovak: "sk",
1683
+ slovenian: "sl",
1684
+ spanish: "es",
1685
+ swahili: "sw",
1686
+ swedish: "sv",
1687
+ tagalog: "tl",
1688
+ tamil: "ta",
1689
+ thai: "th",
1690
+ turkish: "tr",
1691
+ ukrainian: "uk",
1692
+ urdu: "ur",
1693
+ vietnamese: "vi",
1694
+ welsh: "cy"
1695
+ };
1696
+ var OpenAITranscriptionModel = class {
1697
+ constructor(modelId, config) {
1698
+ this.modelId = modelId;
1699
+ this.config = config;
1700
+ this.specificationVersion = "v1";
1701
+ }
1702
+ get provider() {
1703
+ return this.config.provider;
1704
+ }
1705
+ getArgs({
1706
+ audio,
1707
+ mediaType,
1708
+ providerOptions
1709
+ }) {
1710
+ var _a, _b, _c, _d, _e;
1711
+ const warnings = [];
1712
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1713
+ provider: "openai",
1714
+ providerOptions,
1715
+ schema: openAIProviderOptionsSchema
1716
+ });
1717
+ const formData = new FormData();
1718
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1719
+ formData.append("model", this.modelId);
1720
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1721
+ if (openAIOptions) {
1722
+ const transcriptionModelOptions = {
1723
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1724
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1725
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1726
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1727
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1728
+ };
1729
+ for (const key in transcriptionModelOptions) {
1730
+ const value = transcriptionModelOptions[key];
1731
+ if (value !== void 0) {
1732
+ formData.append(key, String(value));
1733
+ }
1734
+ }
1735
+ }
1736
+ return {
1737
+ formData,
1738
+ warnings
1739
+ };
1740
+ }
1741
+ async doGenerate(options) {
1742
+ var _a, _b, _c, _d, _e, _f;
1743
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1744
+ const { formData, warnings } = this.getArgs(options);
1745
+ const {
1746
+ value: response,
1747
+ responseHeaders,
1748
+ rawValue: rawResponse
1749
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1750
+ url: this.config.url({
1751
+ path: "/audio/transcriptions",
1752
+ modelId: this.modelId
1753
+ }),
1754
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1755
+ formData,
1756
+ failedResponseHandler: openaiFailedResponseHandler,
1757
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1758
+ openaiTranscriptionResponseSchema
1759
+ ),
1760
+ abortSignal: options.abortSignal,
1761
+ fetch: this.config.fetch
1762
+ });
1763
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1764
+ return {
1765
+ text: response.text,
1766
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1767
+ text: word.word,
1768
+ startSecond: word.start,
1769
+ endSecond: word.end
1770
+ }))) != null ? _e : [],
1771
+ language,
1772
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1773
+ warnings,
1774
+ response: {
1775
+ timestamp: currentDate,
1776
+ modelId: this.modelId,
1777
+ headers: responseHeaders,
1778
+ body: rawResponse
1779
+ }
1780
+ };
1781
+ }
1782
+ };
1783
+ var openaiTranscriptionResponseSchema = import_zod6.z.object({
1784
+ text: import_zod6.z.string(),
1785
+ language: import_zod6.z.string().nullish(),
1786
+ duration: import_zod6.z.number().nullish(),
1787
+ words: import_zod6.z.array(
1788
+ import_zod6.z.object({
1789
+ word: import_zod6.z.string(),
1790
+ start: import_zod6.z.number(),
1791
+ end: import_zod6.z.number()
1792
+ })
1793
+ ).nullish()
1794
+ });
1795
+
1796
+ // src/responses/openai-responses-language-model.ts
1797
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1798
+ var import_zod7 = require("zod");
1799
+
1800
+ // src/responses/convert-to-openai-responses-messages.ts
1801
+ var import_provider7 = require("@ai-sdk/provider");
1802
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
1803
+ function convertToOpenAIResponsesMessages({
1804
+ prompt,
1805
+ systemMessageMode
1806
+ }) {
1807
+ const messages = [];
1808
+ const warnings = [];
1809
+ for (const { role, content } of prompt) {
1810
+ switch (role) {
1811
+ case "system": {
1812
+ switch (systemMessageMode) {
1813
+ case "system": {
1814
+ messages.push({ role: "system", content });
1815
+ break;
1816
+ }
1817
+ case "developer": {
1818
+ messages.push({ role: "developer", content });
1819
+ break;
1820
+ }
1821
+ case "remove": {
1822
+ warnings.push({
1823
+ type: "other",
1824
+ message: "system messages are removed for this model"
1825
+ });
1826
+ break;
1827
+ }
1828
+ default: {
1829
+ const _exhaustiveCheck = systemMessageMode;
1830
+ throw new Error(
1831
+ `Unsupported system message mode: ${_exhaustiveCheck}`
1832
+ );
1833
+ }
1834
+ }
1835
+ break;
1836
+ }
1837
+ case "user": {
1838
+ messages.push({
1839
+ role: "user",
1840
+ content: content.map((part, index) => {
1841
+ var _a, _b, _c, _d;
1842
+ switch (part.type) {
1843
+ case "text": {
1844
+ return { type: "input_text", text: part.text };
1845
+ }
1846
+ case "image": {
1847
+ return {
1848
+ type: "input_image",
1849
+ image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils8.convertUint8ArrayToBase64)(part.image)}`,
1850
+ // OpenAI specific extension: image detail
1851
+ detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1852
+ };
1853
+ }
1854
+ case "file": {
1855
+ if (part.data instanceof URL) {
1856
+ throw new import_provider7.UnsupportedFunctionalityError({
1857
+ functionality: "File URLs in user messages"
1858
+ });
1859
+ }
1860
+ switch (part.mimeType) {
1861
+ case "application/pdf": {
1862
+ return {
1863
+ type: "input_file",
1864
+ filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1865
+ file_data: `data:application/pdf;base64,${part.data}`
1866
+ };
1867
+ }
1868
+ default: {
1869
+ throw new import_provider7.UnsupportedFunctionalityError({
1870
+ functionality: "Only PDF files are supported in user messages"
1871
+ });
1872
+ }
1873
+ }
1874
+ }
1875
+ }
1876
+ })
1877
+ });
1878
+ break;
1879
+ }
1880
+ case "assistant": {
1881
+ for (const part of content) {
1882
+ switch (part.type) {
1883
+ case "text": {
1884
+ messages.push({
1885
+ role: "assistant",
1886
+ content: [{ type: "output_text", text: part.text }]
1887
+ });
1888
+ break;
1889
+ }
1890
+ case "tool-call": {
1891
+ messages.push({
1892
+ type: "function_call",
1893
+ call_id: part.toolCallId,
1894
+ name: part.toolName,
1895
+ arguments: JSON.stringify(part.args)
1896
+ });
1897
+ break;
1898
+ }
1899
+ }
1900
+ }
1901
+ break;
1902
+ }
1903
+ case "tool": {
1904
+ for (const part of content) {
1905
+ messages.push({
1906
+ type: "function_call_output",
1907
+ call_id: part.toolCallId,
1908
+ output: JSON.stringify(part.result)
1909
+ });
1910
+ }
1911
+ break;
1912
+ }
1913
+ default: {
1914
+ const _exhaustiveCheck = role;
1915
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
1916
+ }
1917
+ }
1918
+ }
1919
+ return { messages, warnings };
1920
+ }
1921
+
1922
+ // src/responses/map-openai-responses-finish-reason.ts
1923
+ function mapOpenAIResponseFinishReason({
1924
+ finishReason,
1925
+ hasToolCalls
1926
+ }) {
1927
+ switch (finishReason) {
1928
+ case void 0:
1929
+ case null:
1930
+ return hasToolCalls ? "tool-calls" : "stop";
1931
+ case "max_output_tokens":
1932
+ return "length";
1933
+ case "content_filter":
1934
+ return "content-filter";
1935
+ default:
1936
+ return hasToolCalls ? "tool-calls" : "unknown";
1937
+ }
1938
+ }
1939
+
1940
+ // src/responses/openai-responses-prepare-tools.ts
1941
+ var import_provider8 = require("@ai-sdk/provider");
1942
+ function prepareResponsesTools({
1943
+ mode,
1944
+ strict
1945
+ }) {
1946
+ var _a;
1947
+ const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1948
+ const toolWarnings = [];
1949
+ if (tools == null) {
1950
+ return { tools: void 0, tool_choice: void 0, toolWarnings };
1951
+ }
1952
+ const toolChoice = mode.toolChoice;
1953
+ const openaiTools2 = [];
1954
+ for (const tool of tools) {
1955
+ switch (tool.type) {
1956
+ case "function":
1957
+ openaiTools2.push({
1958
+ type: "function",
1959
+ name: tool.name,
1960
+ description: tool.description,
1961
+ parameters: tool.parameters,
1962
+ strict: strict ? true : void 0
1963
+ });
1964
+ break;
1965
+ case "provider-defined":
1966
+ switch (tool.id) {
1967
+ case "openai.file_search":
1968
+ openaiTools2.push({
1969
+ type: "file_search",
1970
+ vector_store_ids: tool.args.vectorStoreIds,
1971
+ max_num_results: tool.args.maxNumResults,
1972
+ ranking: tool.args.ranking,
1973
+ filters: tool.args.filters
1974
+ });
1975
+ break;
1976
+ case "openai.web_search_preview":
1977
+ openaiTools2.push({
1978
+ type: "web_search_preview",
1979
+ search_context_size: tool.args.searchContextSize,
1980
+ user_location: tool.args.userLocation
1981
+ });
1982
+ break;
1983
+ case "openai.code_interpreter":
1984
+ openaiTools2.push({
1985
+ type: "code_interpreter",
1986
+ container: tool.args.container
1987
+ });
1988
+ break;
1989
+ default:
1990
+ toolWarnings.push({ type: "unsupported-tool", tool });
1991
+ break;
1992
+ }
1993
+ break;
1994
+ default:
1995
+ toolWarnings.push({ type: "unsupported-tool", tool });
1996
+ break;
1997
+ }
1998
+ }
1999
+ if (toolChoice == null) {
2000
+ return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
2001
+ }
2002
+ const type = toolChoice.type;
2003
+ switch (type) {
2004
+ case "auto":
2005
+ case "none":
2006
+ case "required":
2007
+ return { tools: openaiTools2, tool_choice: type, toolWarnings };
2008
+ case "tool": {
2009
+ if (toolChoice.toolName === "web_search_preview") {
2010
+ return {
2011
+ tools: openaiTools2,
2012
+ tool_choice: {
2013
+ type: "web_search_preview"
2014
+ },
2015
+ toolWarnings
2016
+ };
2017
+ }
2018
+ if (toolChoice.toolName === "code_interpreter") {
2019
+ return {
2020
+ tools: openaiTools2,
2021
+ tool_choice: {
2022
+ type: "code_interpreter"
2023
+ },
2024
+ toolWarnings
2025
+ };
2026
+ }
2027
+ return {
2028
+ tools: openaiTools2,
2029
+ tool_choice: {
2030
+ type: "function",
2031
+ name: toolChoice.toolName
2032
+ },
2033
+ toolWarnings
2034
+ };
2035
+ }
2036
+ default: {
2037
+ const _exhaustiveCheck = type;
2038
+ throw new import_provider8.UnsupportedFunctionalityError({
2039
+ functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2040
+ });
2041
+ }
2042
+ }
2043
+ }
2044
+
2045
+ // src/responses/openai-responses-language-model.ts
2046
+ var OpenAIResponsesLanguageModel = class {
2047
+ constructor(modelId, config) {
2048
+ this.specificationVersion = "v1";
2049
+ this.defaultObjectGenerationMode = "json";
2050
+ this.supportsStructuredOutputs = true;
2051
+ this.modelId = modelId;
2052
+ this.config = config;
2053
+ }
2054
+ get provider() {
2055
+ return this.config.provider;
2056
+ }
2057
+ getArgs({
2058
+ mode,
2059
+ maxTokens,
2060
+ temperature,
2061
+ stopSequences,
2062
+ topP,
2063
+ topK,
2064
+ presencePenalty,
2065
+ frequencyPenalty,
2066
+ seed,
2067
+ prompt,
2068
+ providerMetadata,
2069
+ responseFormat
2070
+ }) {
2071
+ var _a, _b, _c;
2072
+ const warnings = [];
2073
+ const modelConfig = getResponsesModelConfig(this.modelId);
2074
+ const type = mode.type;
2075
+ if (topK != null) {
2076
+ warnings.push({
2077
+ type: "unsupported-setting",
2078
+ setting: "topK"
2079
+ });
2080
+ }
2081
+ if (seed != null) {
2082
+ warnings.push({
2083
+ type: "unsupported-setting",
2084
+ setting: "seed"
2085
+ });
2086
+ }
2087
+ if (presencePenalty != null) {
2088
+ warnings.push({
2089
+ type: "unsupported-setting",
2090
+ setting: "presencePenalty"
2091
+ });
2092
+ }
2093
+ if (frequencyPenalty != null) {
2094
+ warnings.push({
2095
+ type: "unsupported-setting",
2096
+ setting: "frequencyPenalty"
2097
+ });
2098
+ }
2099
+ if (stopSequences != null) {
2100
+ warnings.push({
2101
+ type: "unsupported-setting",
2102
+ setting: "stopSequences"
2103
+ });
2104
+ }
2105
+ const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2106
+ prompt,
2107
+ systemMessageMode: modelConfig.systemMessageMode
2108
+ });
2109
+ warnings.push(...messageWarnings);
2110
+ console.log("providerMetadata", JSON.stringify(providerMetadata));
2111
+ const openaiOptions = (0, import_provider_utils9.parseProviderOptions)({
2112
+ provider: "openai",
2113
+ providerOptions: providerMetadata,
2114
+ schema: openaiResponsesProviderOptionsSchema
2115
+ });
2116
+ const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2117
+ console.log("openaiOptions", JSON.stringify(openaiOptions));
2118
+ const baseArgs = {
2119
+ model: this.modelId,
2120
+ input: messages,
2121
+ temperature: (openaiOptions == null ? void 0 : openaiOptions.forceNoTemperature) ? void 0 : temperature,
2122
+ top_p: topP,
2123
+ max_output_tokens: maxTokens,
2124
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2125
+ text: {
2126
+ format: responseFormat.schema != null ? {
2127
+ type: "json_schema",
2128
+ strict: isStrict,
2129
+ name: (_b = responseFormat.name) != null ? _b : "response",
2130
+ description: responseFormat.description,
2131
+ schema: responseFormat.schema
2132
+ } : { type: "json_object" }
2133
+ }
2134
+ },
2135
+ // provider options:
2136
+ metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
2137
+ parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
2138
+ previous_response_id: openaiOptions == null ? void 0 : openaiOptions.previousResponseId,
2139
+ store: openaiOptions == null ? void 0 : openaiOptions.store,
2140
+ user: openaiOptions == null ? void 0 : openaiOptions.user,
2141
+ instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2142
+ // model-specific settings:
2143
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2144
+ reasoning: {
2145
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2146
+ effort: openaiOptions.reasoningEffort
2147
+ },
2148
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2149
+ summary: openaiOptions.reasoningSummary
2150
+ }
2151
+ }
2152
+ },
2153
+ ...modelConfig.requiredAutoTruncation && {
2154
+ truncation: "auto"
2155
+ }
2156
+ };
2157
+ console.log("baseArgs", JSON.stringify(baseArgs));
2158
+ if (modelConfig.isReasoningModel) {
2159
+ if (baseArgs.temperature != null) {
2160
+ baseArgs.temperature = void 0;
2161
+ warnings.push({
2162
+ type: "unsupported-setting",
2163
+ setting: "temperature",
2164
+ details: "temperature is not supported for reasoning models"
2165
+ });
2166
+ }
2167
+ if (baseArgs.top_p != null) {
2168
+ baseArgs.top_p = void 0;
2169
+ warnings.push({
2170
+ type: "unsupported-setting",
2171
+ setting: "topP",
2172
+ details: "topP is not supported for reasoning models"
2173
+ });
2174
+ }
2175
+ }
2176
+ switch (type) {
2177
+ case "regular": {
2178
+ const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
2179
+ mode,
2180
+ strict: isStrict
2181
+ // TODO support provider options on tools
2182
+ });
2183
+ return {
2184
+ args: {
2185
+ ...baseArgs,
2186
+ tools,
2187
+ tool_choice
2188
+ },
2189
+ warnings: [...warnings, ...toolWarnings]
2190
+ };
2191
+ }
2192
+ case "object-json": {
2193
+ return {
2194
+ args: {
2195
+ ...baseArgs,
2196
+ text: {
2197
+ format: mode.schema != null ? {
2198
+ type: "json_schema",
2199
+ strict: isStrict,
2200
+ name: (_c = mode.name) != null ? _c : "response",
2201
+ description: mode.description,
2202
+ schema: mode.schema
2203
+ } : { type: "json_object" }
2204
+ }
2205
+ },
2206
+ warnings
2207
+ };
2208
+ }
2209
+ case "object-tool": {
2210
+ return {
2211
+ args: {
2212
+ ...baseArgs,
2213
+ tool_choice: { type: "function", name: mode.tool.name },
2214
+ tools: [
2215
+ {
2216
+ type: "function",
2217
+ name: mode.tool.name,
2218
+ description: mode.tool.description,
2219
+ parameters: mode.tool.parameters,
2220
+ strict: isStrict
2221
+ }
2222
+ ]
2223
+ },
2224
+ warnings
2225
+ };
2226
+ }
2227
+ default: {
2228
+ const _exhaustiveCheck = type;
2229
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2230
+ }
2231
+ }
2232
+ }
2233
+ async doGenerate(options) {
2234
+ var _a, _b, _c, _d, _e, _f, _g;
2235
+ const { args: body, warnings } = this.getArgs(options);
2236
+ const {
2237
+ responseHeaders,
2238
+ value: response,
2239
+ rawValue: rawResponse
2240
+ } = await (0, import_provider_utils9.postJsonToApi)({
2241
+ url: this.config.url({
2242
+ path: "/responses",
2243
+ modelId: this.modelId
2244
+ }),
2245
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2246
+ body,
2247
+ failedResponseHandler: openaiFailedResponseHandler,
2248
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
2249
+ import_zod7.z.object({
2250
+ id: import_zod7.z.string(),
2251
+ created_at: import_zod7.z.number(),
2252
+ model: import_zod7.z.string(),
2253
+ output: import_zod7.z.array(
2254
+ import_zod7.z.discriminatedUnion("type", [
2255
+ import_zod7.z.object({
2256
+ type: import_zod7.z.literal("message"),
2257
+ role: import_zod7.z.literal("assistant"),
2258
+ content: import_zod7.z.array(
2259
+ import_zod7.z.object({
2260
+ type: import_zod7.z.literal("output_text"),
2261
+ text: import_zod7.z.string(),
2262
+ annotations: import_zod7.z.array(
2263
+ import_zod7.z.object({
2264
+ type: import_zod7.z.literal("url_citation"),
2265
+ start_index: import_zod7.z.number(),
2266
+ end_index: import_zod7.z.number(),
2267
+ url: import_zod7.z.string(),
2268
+ title: import_zod7.z.string()
2269
+ })
2270
+ )
2271
+ })
2272
+ )
2273
+ }),
2274
+ import_zod7.z.object({
2275
+ type: import_zod7.z.literal("function_call"),
2276
+ call_id: import_zod7.z.string(),
2277
+ name: import_zod7.z.string(),
2278
+ arguments: import_zod7.z.string()
2279
+ }),
2280
+ import_zod7.z.object({
2281
+ type: import_zod7.z.literal("web_search_call")
2282
+ }),
2283
+ import_zod7.z.object({
2284
+ type: import_zod7.z.literal("computer_call")
2285
+ }),
2286
+ import_zod7.z.object({
2287
+ type: import_zod7.z.literal("reasoning"),
2288
+ summary: import_zod7.z.array(
2289
+ import_zod7.z.object({
2290
+ type: import_zod7.z.literal("summary_text"),
2291
+ text: import_zod7.z.string()
2292
+ })
2293
+ )
2294
+ })
2295
+ ])
2296
+ ),
2297
+ incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullable(),
2298
+ usage: usageSchema
2299
+ })
2300
+ ),
2301
+ abortSignal: options.abortSignal,
2302
+ fetch: this.config.fetch
2303
+ });
2304
+ const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2305
+ const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2306
+ toolCallType: "function",
2307
+ toolCallId: output.call_id,
2308
+ toolName: output.name,
2309
+ args: output.arguments
2310
+ }));
2311
+ const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2312
+ return {
2313
+ text: outputTextElements.map((content) => content.text).join("\n"),
2314
+ sources: outputTextElements.flatMap(
2315
+ (content) => content.annotations.map((annotation) => {
2316
+ var _a2, _b2, _c2;
2317
+ return {
2318
+ sourceType: "url",
2319
+ id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils9.generateId)(),
2320
+ url: annotation.url,
2321
+ title: annotation.title
2322
+ };
2323
+ })
2324
+ ),
2325
+ finishReason: mapOpenAIResponseFinishReason({
2326
+ finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2327
+ hasToolCalls: toolCalls.length > 0
2328
+ }),
2329
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2330
+ reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2331
+ type: "text",
2332
+ text: summary.text
2333
+ })) : void 0,
2334
+ usage: {
2335
+ promptTokens: response.usage.input_tokens,
2336
+ completionTokens: response.usage.output_tokens
2337
+ },
2338
+ rawCall: {
2339
+ rawPrompt: void 0,
2340
+ rawSettings: {}
2341
+ },
2342
+ rawResponse: {
2343
+ headers: responseHeaders,
2344
+ body: rawResponse
2345
+ },
2346
+ request: {
2347
+ body: JSON.stringify(body)
2348
+ },
2349
+ response: {
2350
+ id: response.id,
2351
+ timestamp: new Date(response.created_at * 1e3),
2352
+ modelId: response.model
2353
+ },
2354
+ providerMetadata: {
2355
+ openai: {
2356
+ responseId: response.id,
2357
+ cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2358
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2359
+ }
2360
+ },
2361
+ warnings
2362
+ };
2363
+ }
2364
+ async doStream(options) {
2365
+ const { args: body, warnings } = this.getArgs(options);
2366
+ const { responseHeaders, value: response } = await (0, import_provider_utils9.postJsonToApi)({
2367
+ url: this.config.url({
2368
+ path: "/responses",
2369
+ modelId: this.modelId
2370
+ }),
2371
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2372
+ body: {
2373
+ ...body,
2374
+ stream: true
2375
+ },
2376
+ failedResponseHandler: openaiFailedResponseHandler,
2377
+ successfulResponseHandler: (0, import_provider_utils9.createEventSourceResponseHandler)(
2378
+ openaiResponsesChunkSchema
2379
+ ),
2380
+ abortSignal: options.abortSignal,
2381
+ fetch: this.config.fetch
2382
+ });
2383
+ const self = this;
2384
+ let finishReason = "unknown";
2385
+ let promptTokens = NaN;
2386
+ let completionTokens = NaN;
2387
+ let cachedPromptTokens = null;
2388
+ let reasoningTokens = null;
2389
+ let responseId = null;
2390
+ const ongoingToolCalls = {};
2391
+ let hasToolCalls = false;
2392
+ return {
2393
+ stream: response.pipeThrough(
2394
+ new TransformStream({
2395
+ transform(chunk, controller) {
2396
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2397
+ if (!chunk.success) {
2398
+ finishReason = "error";
2399
+ controller.enqueue({ type: "error", error: chunk.error });
2400
+ return;
2401
+ }
2402
+ const value = chunk.value;
2403
+ if (isResponseOutputItemAddedChunk(value)) {
2404
+ if (value.item.type === "function_call") {
2405
+ ongoingToolCalls[value.output_index] = {
2406
+ toolName: value.item.name,
2407
+ toolCallId: value.item.call_id
2408
+ };
2409
+ controller.enqueue({
2410
+ type: "tool-call-delta",
2411
+ toolCallType: "function",
2412
+ toolCallId: value.item.call_id,
2413
+ toolName: value.item.name,
2414
+ argsTextDelta: value.item.arguments
2415
+ });
2416
+ }
2417
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2418
+ const toolCall = ongoingToolCalls[value.output_index];
2419
+ if (toolCall != null) {
2420
+ controller.enqueue({
2421
+ type: "tool-call-delta",
2422
+ toolCallType: "function",
2423
+ toolCallId: toolCall.toolCallId,
2424
+ toolName: toolCall.toolName,
2425
+ argsTextDelta: value.delta
2426
+ });
2427
+ }
2428
+ } else if (isResponseCreatedChunk(value)) {
2429
+ responseId = value.response.id;
2430
+ controller.enqueue({
2431
+ type: "response-metadata",
2432
+ id: value.response.id,
2433
+ timestamp: new Date(value.response.created_at * 1e3),
2434
+ modelId: value.response.model
2435
+ });
2436
+ } else if (isTextDeltaChunk(value)) {
2437
+ controller.enqueue({
2438
+ type: "text-delta",
2439
+ textDelta: value.delta
2440
+ });
2441
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2442
+ controller.enqueue({
2443
+ type: "reasoning",
2444
+ textDelta: value.delta
2445
+ });
2446
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2447
+ ongoingToolCalls[value.output_index] = void 0;
2448
+ hasToolCalls = true;
2449
+ controller.enqueue({
2450
+ type: "tool-call",
2451
+ toolCallType: "function",
2452
+ toolCallId: value.item.call_id,
2453
+ toolName: value.item.name,
2454
+ args: value.item.arguments
2455
+ });
2456
+ } else if (isResponseFinishedChunk(value)) {
2457
+ finishReason = mapOpenAIResponseFinishReason({
2458
+ finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2459
+ hasToolCalls
2460
+ });
2461
+ promptTokens = value.response.usage.input_tokens;
2462
+ completionTokens = value.response.usage.output_tokens;
2463
+ cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2464
+ reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2465
+ } else if (isResponseAnnotationAddedChunk(value)) {
2466
+ controller.enqueue({
2467
+ type: "source",
2468
+ source: {
2469
+ sourceType: "url",
2470
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils9.generateId)(),
2471
+ url: value.annotation.url,
2472
+ title: value.annotation.title
2473
+ }
2474
+ });
2475
+ }
2476
+ },
2477
+ flush(controller) {
2478
+ controller.enqueue({
2479
+ type: "finish",
2480
+ finishReason,
2481
+ usage: { promptTokens, completionTokens },
2482
+ ...(cachedPromptTokens != null || reasoningTokens != null) && {
2483
+ providerMetadata: {
2484
+ openai: {
2485
+ responseId,
2486
+ cachedPromptTokens,
2487
+ reasoningTokens
2488
+ }
2489
+ }
2490
+ }
2491
+ });
2492
+ }
2493
+ })
2494
+ ),
2495
+ rawCall: {
2496
+ rawPrompt: void 0,
2497
+ rawSettings: {}
2498
+ },
2499
+ rawResponse: { headers: responseHeaders },
2500
+ request: { body: JSON.stringify(body) },
2501
+ warnings
2502
+ };
2503
+ }
2504
+ };
2505
+ var usageSchema = import_zod7.z.object({
2506
+ input_tokens: import_zod7.z.number(),
2507
+ input_tokens_details: import_zod7.z.object({ cached_tokens: import_zod7.z.number().nullish() }).nullish(),
2508
+ output_tokens: import_zod7.z.number(),
2509
+ output_tokens_details: import_zod7.z.object({ reasoning_tokens: import_zod7.z.number().nullish() }).nullish()
2510
+ });
2511
+ var textDeltaChunkSchema = import_zod7.z.object({
2512
+ type: import_zod7.z.literal("response.output_text.delta"),
2513
+ delta: import_zod7.z.string()
2514
+ });
2515
+ var responseFinishedChunkSchema = import_zod7.z.object({
2516
+ type: import_zod7.z.enum(["response.completed", "response.incomplete"]),
2517
+ response: import_zod7.z.object({
2518
+ incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullish(),
2519
+ usage: usageSchema
2520
+ })
2521
+ });
2522
+ var responseCreatedChunkSchema = import_zod7.z.object({
2523
+ type: import_zod7.z.literal("response.created"),
2524
+ response: import_zod7.z.object({
2525
+ id: import_zod7.z.string(),
2526
+ created_at: import_zod7.z.number(),
2527
+ model: import_zod7.z.string()
2528
+ })
2529
+ });
2530
+ var responseOutputItemDoneSchema = import_zod7.z.object({
2531
+ type: import_zod7.z.literal("response.output_item.done"),
2532
+ output_index: import_zod7.z.number(),
2533
+ item: import_zod7.z.discriminatedUnion("type", [
2534
+ import_zod7.z.object({
2535
+ type: import_zod7.z.literal("message")
2536
+ }),
2537
+ import_zod7.z.object({
2538
+ type: import_zod7.z.literal("function_call"),
2539
+ id: import_zod7.z.string(),
2540
+ call_id: import_zod7.z.string(),
2541
+ name: import_zod7.z.string(),
2542
+ arguments: import_zod7.z.string(),
2543
+ status: import_zod7.z.literal("completed")
2544
+ })
2545
+ ])
2546
+ });
2547
+ var responseFunctionCallArgumentsDeltaSchema = import_zod7.z.object({
2548
+ type: import_zod7.z.literal("response.function_call_arguments.delta"),
2549
+ item_id: import_zod7.z.string(),
2550
+ output_index: import_zod7.z.number(),
2551
+ delta: import_zod7.z.string()
2552
+ });
2553
+ var responseOutputItemAddedSchema = import_zod7.z.object({
2554
+ type: import_zod7.z.literal("response.output_item.added"),
2555
+ output_index: import_zod7.z.number(),
2556
+ item: import_zod7.z.discriminatedUnion("type", [
2557
+ import_zod7.z.object({
2558
+ type: import_zod7.z.literal("message")
2559
+ }),
2560
+ import_zod7.z.object({
2561
+ type: import_zod7.z.literal("function_call"),
2562
+ id: import_zod7.z.string(),
2563
+ call_id: import_zod7.z.string(),
2564
+ name: import_zod7.z.string(),
2565
+ arguments: import_zod7.z.string()
2566
+ })
2567
+ ])
2568
+ });
2569
+ var responseAnnotationAddedSchema = import_zod7.z.object({
2570
+ type: import_zod7.z.literal("response.output_text.annotation.added"),
2571
+ annotation: import_zod7.z.object({
2572
+ type: import_zod7.z.literal("url_citation"),
2573
+ url: import_zod7.z.string(),
2574
+ title: import_zod7.z.string()
2575
+ })
2576
+ });
2577
+ var responseReasoningSummaryTextDeltaSchema = import_zod7.z.object({
2578
+ type: import_zod7.z.literal("response.reasoning_summary_text.delta"),
2579
+ item_id: import_zod7.z.string(),
2580
+ output_index: import_zod7.z.number(),
2581
+ summary_index: import_zod7.z.number(),
2582
+ delta: import_zod7.z.string()
2583
+ });
2584
+ var openaiResponsesChunkSchema = import_zod7.z.union([
2585
+ textDeltaChunkSchema,
2586
+ responseFinishedChunkSchema,
2587
+ responseCreatedChunkSchema,
2588
+ responseOutputItemDoneSchema,
2589
+ responseFunctionCallArgumentsDeltaSchema,
2590
+ responseOutputItemAddedSchema,
2591
+ responseAnnotationAddedSchema,
2592
+ responseReasoningSummaryTextDeltaSchema,
2593
+ import_zod7.z.object({ type: import_zod7.z.string() }).passthrough()
2594
+ // fallback for unknown chunks
2595
+ ]);
2596
+ function isTextDeltaChunk(chunk) {
2597
+ return chunk.type === "response.output_text.delta";
2598
+ }
2599
+ function isResponseOutputItemDoneChunk(chunk) {
2600
+ return chunk.type === "response.output_item.done";
2601
+ }
2602
+ function isResponseFinishedChunk(chunk) {
2603
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2604
+ }
2605
+ function isResponseCreatedChunk(chunk) {
2606
+ return chunk.type === "response.created";
2607
+ }
2608
+ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2609
+ return chunk.type === "response.function_call_arguments.delta";
2610
+ }
2611
+ function isResponseOutputItemAddedChunk(chunk) {
2612
+ return chunk.type === "response.output_item.added";
2613
+ }
2614
+ function isResponseAnnotationAddedChunk(chunk) {
2615
+ return chunk.type === "response.output_text.annotation.added";
2616
+ }
2617
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2618
+ return chunk.type === "response.reasoning_summary_text.delta";
2619
+ }
2620
+ function getResponsesModelConfig(modelId) {
2621
+ if (modelId.startsWith("o")) {
2622
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2623
+ return {
2624
+ isReasoningModel: true,
2625
+ systemMessageMode: "remove",
2626
+ requiredAutoTruncation: false
2627
+ };
2628
+ }
2629
+ return {
2630
+ isReasoningModel: true,
2631
+ systemMessageMode: "developer",
2632
+ requiredAutoTruncation: false
2633
+ };
2634
+ }
2635
+ return {
2636
+ isReasoningModel: false,
2637
+ systemMessageMode: "system",
2638
+ requiredAutoTruncation: false
2639
+ };
2640
+ }
2641
+ var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2642
+ metadata: import_zod7.z.any().nullish(),
2643
+ parallelToolCalls: import_zod7.z.boolean().nullish(),
2644
+ previousResponseId: import_zod7.z.string().nullish(),
2645
+ forceNoTemperature: import_zod7.z.boolean().nullish(),
2646
+ store: import_zod7.z.boolean().nullish(),
2647
+ user: import_zod7.z.string().nullish(),
2648
+ reasoningEffort: import_zod7.z.string().nullish(),
2649
+ strictSchemas: import_zod7.z.boolean().nullish(),
2650
+ instructions: import_zod7.z.string().nullish(),
2651
+ reasoningSummary: import_zod7.z.string().nullish()
2652
+ });
2653
+
2654
+ // src/openai-tools.ts
2655
+ var import_zod8 = require("zod");
2656
+ var WebSearchPreviewParameters = import_zod8.z.object({});
2657
+ function webSearchPreviewTool({
2658
+ searchContextSize,
2659
+ userLocation
2660
+ } = {}) {
2661
+ return {
2662
+ type: "provider-defined",
2663
+ id: "openai.web_search_preview",
2664
+ args: {
2665
+ searchContextSize,
2666
+ userLocation
2667
+ },
2668
+ parameters: WebSearchPreviewParameters
2669
+ };
2670
+ }
2671
+ var CodeInterpreterParameters = import_zod8.z.object({
2672
+ container: import_zod8.z.union([
2673
+ import_zod8.z.string(),
2674
+ import_zod8.z.object({
2675
+ containerId: import_zod8.z.union([import_zod8.z.string(), import_zod8.z.null()]).transform((val) => val != null ? val : void 0),
2676
+ type: import_zod8.z.enum(["auto", "file", "code_interpreter"]),
2677
+ files: import_zod8.z.array(import_zod8.z.string())
2678
+ })
2679
+ ])
2680
+ });
2681
+ function codeInterpreterTool({
2682
+ container
2683
+ }) {
2684
+ return {
2685
+ type: "provider-defined",
2686
+ id: "openai.code_interpreter",
2687
+ args: {
2688
+ container
2689
+ },
2690
+ parameters: CodeInterpreterParameters
2691
+ };
2692
+ }
2693
+ var comparisonFilterSchema = import_zod8.z.object({
2694
+ key: import_zod8.z.string(),
2695
+ type: import_zod8.z.enum(["eq", "ne", "gt", "gte", "lt", "lte"]),
2696
+ value: import_zod8.z.union([import_zod8.z.string(), import_zod8.z.number(), import_zod8.z.boolean()])
2697
+ });
2698
+ var compoundFilterSchema = import_zod8.z.object({
2699
+ type: import_zod8.z.enum(["and", "or"]),
2700
+ filters: import_zod8.z.array(
2701
+ import_zod8.z.union([comparisonFilterSchema, import_zod8.z.lazy(() => compoundFilterSchema)])
2702
+ )
2703
+ });
2704
+ var filtersSchema = import_zod8.z.union([comparisonFilterSchema, compoundFilterSchema]);
2705
+ var fileSearchArgsSchema = import_zod8.z.object({
2706
+ /**
2707
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
2708
+ */
2709
+ vectorStoreIds: import_zod8.z.array(import_zod8.z.string()).optional(),
2710
+ /**
2711
+ * Maximum number of search results to return. Defaults to 10.
2712
+ */
2713
+ maxNumResults: import_zod8.z.number().optional(),
2714
+ /**
2715
+ * Ranking options for the search.
2716
+ */
2717
+ ranking: import_zod8.z.object({
2718
+ ranker: import_zod8.z.enum(["auto", "default-2024-08-21"]).optional()
2719
+ }).optional(),
2720
+ /**
2721
+ * A filter to apply based on file attributes.
2722
+ */
2723
+ filters: filtersSchema.optional()
2724
+ });
2725
+ function fileSearchTool({
2726
+ vectorStoreIds,
2727
+ maxNumResults,
2728
+ ranking,
2729
+ filters
2730
+ } = {}) {
2731
+ return {
2732
+ type: "provider-defined",
2733
+ id: "openai.file_search",
2734
+ args: {
2735
+ vectorStoreIds,
2736
+ maxNumResults,
2737
+ ranking,
2738
+ filters
2739
+ },
2740
+ parameters: import_zod8.z.object({})
2741
+ };
2742
+ }
2743
+ var openaiTools = {
2744
+ webSearchPreview: webSearchPreviewTool,
2745
+ codeInterpreter: codeInterpreterTool,
2746
+ fileSearch: fileSearchTool
2747
+ };
2748
+
2749
+ // src/openai-speech-model.ts
2750
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
2751
+ var import_zod9 = require("zod");
2752
+ var OpenAIProviderOptionsSchema = import_zod9.z.object({
2753
+ instructions: import_zod9.z.string().nullish(),
2754
+ speed: import_zod9.z.number().min(0.25).max(4).default(1).nullish()
2755
+ });
2756
+ var OpenAISpeechModel = class {
2757
+ constructor(modelId, config) {
2758
+ this.modelId = modelId;
2759
+ this.config = config;
2760
+ this.specificationVersion = "v1";
2761
+ }
2762
+ get provider() {
2763
+ return this.config.provider;
2764
+ }
2765
+ getArgs({
2766
+ text,
2767
+ voice = "alloy",
2768
+ outputFormat = "mp3",
2769
+ speed,
2770
+ instructions,
2771
+ providerOptions
2772
+ }) {
2773
+ const warnings = [];
2774
+ const openAIOptions = (0, import_provider_utils10.parseProviderOptions)({
2775
+ provider: "openai",
2776
+ providerOptions,
2777
+ schema: OpenAIProviderOptionsSchema
2778
+ });
2779
+ const requestBody = {
2780
+ model: this.modelId,
2781
+ input: text,
2782
+ voice,
2783
+ response_format: "mp3",
2784
+ speed,
2785
+ instructions
2786
+ };
2787
+ if (outputFormat) {
2788
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
2789
+ requestBody.response_format = outputFormat;
2790
+ } else {
2791
+ warnings.push({
2792
+ type: "unsupported-setting",
2793
+ setting: "outputFormat",
2794
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
2795
+ });
2796
+ }
2797
+ }
2798
+ if (openAIOptions) {
2799
+ const speechModelOptions = {};
2800
+ for (const key in speechModelOptions) {
2801
+ const value = speechModelOptions[key];
2802
+ if (value !== void 0) {
2803
+ requestBody[key] = value;
2804
+ }
2805
+ }
2806
+ }
2807
+ return {
2808
+ requestBody,
2809
+ warnings
2810
+ };
2811
+ }
2812
+ async doGenerate(options) {
2813
+ var _a, _b, _c;
2814
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2815
+ const { requestBody, warnings } = this.getArgs(options);
2816
+ const {
2817
+ value: audio,
2818
+ responseHeaders,
2819
+ rawValue: rawResponse
2820
+ } = await (0, import_provider_utils10.postJsonToApi)({
2821
+ url: this.config.url({
2822
+ path: "/audio/speech",
2823
+ modelId: this.modelId
2824
+ }),
2825
+ headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2826
+ body: requestBody,
2827
+ failedResponseHandler: openaiFailedResponseHandler,
2828
+ successfulResponseHandler: (0, import_provider_utils10.createBinaryResponseHandler)(),
2829
+ abortSignal: options.abortSignal,
2830
+ fetch: this.config.fetch
2831
+ });
2832
+ return {
2833
+ audio,
2834
+ warnings,
2835
+ request: {
2836
+ body: JSON.stringify(requestBody)
2837
+ },
2838
+ response: {
2839
+ timestamp: currentDate,
2840
+ modelId: this.modelId,
2841
+ headers: responseHeaders,
2842
+ body: rawResponse
2843
+ }
2844
+ };
2845
+ }
2846
+ };
2847
+
2848
+ // src/openai-provider.ts
2849
+ function createOpenAI(options = {}) {
2850
+ var _a, _b, _c;
2851
+ const baseURL = (_a = (0, import_provider_utils11.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2852
+ const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2853
+ const providerName = (_c = options.name) != null ? _c : "openai";
2854
+ const getHeaders = () => ({
2855
+ Authorization: `Bearer ${(0, import_provider_utils11.loadApiKey)({
2856
+ apiKey: options.apiKey,
2857
+ environmentVariableName: "OPENAI_API_KEY",
2858
+ description: "OpenAI"
2859
+ })}`,
2860
+ "OpenAI-Organization": options.organization,
2861
+ "OpenAI-Project": options.project,
2862
+ ...options.headers
2863
+ });
2864
+ const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
2865
+ provider: `${providerName}.chat`,
2866
+ url: ({ path }) => `${baseURL}${path}`,
2867
+ headers: getHeaders,
2868
+ compatibility,
2869
+ fetch: options.fetch
2870
+ });
2871
+ const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
2872
+ provider: `${providerName}.completion`,
2873
+ url: ({ path }) => `${baseURL}${path}`,
2874
+ headers: getHeaders,
2875
+ compatibility,
2876
+ fetch: options.fetch
2877
+ });
2878
+ const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
2879
+ provider: `${providerName}.embedding`,
2880
+ url: ({ path }) => `${baseURL}${path}`,
2881
+ headers: getHeaders,
2882
+ fetch: options.fetch
2883
+ });
2884
+ const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
2885
+ provider: `${providerName}.image`,
2886
+ url: ({ path }) => `${baseURL}${path}`,
2887
+ headers: getHeaders,
2888
+ fetch: options.fetch
2889
+ });
2890
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
2891
+ provider: `${providerName}.transcription`,
2892
+ url: ({ path }) => `${baseURL}${path}`,
2893
+ headers: getHeaders,
2894
+ fetch: options.fetch
2895
+ });
2896
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
2897
+ provider: `${providerName}.speech`,
2898
+ url: ({ path }) => `${baseURL}${path}`,
2899
+ headers: getHeaders,
2900
+ fetch: options.fetch
2901
+ });
2902
+ const createLanguageModel = (modelId, settings) => {
2903
+ if (new.target) {
2904
+ throw new Error(
2905
+ "The OpenAI model function cannot be called with the new keyword."
2906
+ );
2907
+ }
2908
+ if (modelId === "gpt-3.5-turbo-instruct") {
2909
+ return createCompletionModel(
2910
+ modelId,
2911
+ settings
2912
+ );
2913
+ }
2914
+ return createChatModel(modelId, settings);
2915
+ };
2916
+ const createResponsesModel = (modelId) => {
2917
+ return new OpenAIResponsesLanguageModel(modelId, {
2918
+ provider: `${providerName}.responses`,
2919
+ url: ({ path }) => `${baseURL}${path}`,
2920
+ headers: getHeaders,
2921
+ fetch: options.fetch
2922
+ });
2923
+ };
2924
+ const provider = function(modelId, settings) {
2925
+ return createLanguageModel(modelId, settings);
2926
+ };
2927
+ provider.languageModel = createLanguageModel;
2928
+ provider.chat = createChatModel;
2929
+ provider.completion = createCompletionModel;
2930
+ provider.responses = createResponsesModel;
2931
+ provider.embedding = createEmbeddingModel;
2932
+ provider.textEmbedding = createEmbeddingModel;
2933
+ provider.textEmbeddingModel = createEmbeddingModel;
2934
+ provider.image = createImageModel;
2935
+ provider.imageModel = createImageModel;
2936
+ provider.transcription = createTranscriptionModel;
2937
+ provider.transcriptionModel = createTranscriptionModel;
2938
+ provider.speech = createSpeechModel;
2939
+ provider.speechModel = createSpeechModel;
2940
+ provider.tools = openaiTools;
2941
+ return provider;
2942
+ }
2943
+ var openai = createOpenAI({
2944
+ compatibility: "strict"
2945
+ // strict for OpenAI API
2946
+ });
2947
+ // Annotate the CommonJS export names for ESM import in node:
2948
+ 0 && (module.exports = {
2949
+ createOpenAI,
2950
+ openai
2951
+ });
2952
+ //# sourceMappingURL=index.js.map