@ai-sdk/openai 2.0.0-canary.2 → 2.0.0-canary.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,19 +26,18 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
33
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
34
- var import_zod2 = require("zod");
34
+ var import_zod3 = require("zod");
35
35
 
36
36
  // src/convert-to-openai-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
38
38
  var import_provider_utils = require("@ai-sdk/provider-utils");
39
39
  function convertToOpenAIChatMessages({
40
40
  prompt,
41
- useLegacyFunctionCalling = false,
42
41
  systemMessageMode = "system"
43
42
  }) {
44
43
  const messages = [];
@@ -79,55 +78,71 @@ function convertToOpenAIChatMessages({
79
78
  messages.push({
80
79
  role: "user",
81
80
  content: content.map((part, index) => {
82
- var _a, _b, _c, _d;
81
+ var _a, _b, _c;
83
82
  switch (part.type) {
84
83
  case "text": {
85
84
  return { type: "text", text: part.text };
86
85
  }
87
- case "image": {
88
- return {
89
- type: "image_url",
90
- image_url: {
91
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
92
- // OpenAI specific extension: image detail
93
- detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
94
- }
95
- };
96
- }
97
86
  case "file": {
98
- if (part.data instanceof URL) {
99
- throw new import_provider.UnsupportedFunctionalityError({
100
- functionality: "'File content parts with URL data' functionality not supported."
101
- });
102
- }
103
- switch (part.mimeType) {
104
- case "audio/wav": {
105
- return {
106
- type: "input_audio",
107
- input_audio: { data: part.data, format: "wav" }
108
- };
109
- }
110
- case "audio/mp3":
111
- case "audio/mpeg": {
112
- return {
113
- type: "input_audio",
114
- input_audio: { data: part.data, format: "mp3" }
115
- };
87
+ if (part.mediaType.startsWith("image/")) {
88
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
89
+ return {
90
+ type: "image_url",
91
+ image_url: {
92
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
93
+ // OpenAI specific extension: image detail
94
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
95
+ }
96
+ };
97
+ } else if (part.mediaType.startsWith("audio/")) {
98
+ if (part.data instanceof URL) {
99
+ throw new import_provider.UnsupportedFunctionalityError({
100
+ functionality: "audio file parts with URLs"
101
+ });
116
102
  }
117
- case "application/pdf": {
118
- return {
119
- type: "file",
120
- file: {
121
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
122
- file_data: `data:application/pdf;base64,${part.data}`
123
- }
124
- };
103
+ switch (part.mediaType) {
104
+ case "audio/wav": {
105
+ return {
106
+ type: "input_audio",
107
+ input_audio: {
108
+ data: (0, import_provider_utils.convertToBase64)(part.data),
109
+ format: "wav"
110
+ }
111
+ };
112
+ }
113
+ case "audio/mp3":
114
+ case "audio/mpeg": {
115
+ return {
116
+ type: "input_audio",
117
+ input_audio: {
118
+ data: (0, import_provider_utils.convertToBase64)(part.data),
119
+ format: "mp3"
120
+ }
121
+ };
122
+ }
123
+ default: {
124
+ throw new import_provider.UnsupportedFunctionalityError({
125
+ functionality: `audio content parts with media type ${part.mediaType}`
126
+ });
127
+ }
125
128
  }
126
- default: {
129
+ } else if (part.mediaType === "application/pdf") {
130
+ if (part.data instanceof URL) {
127
131
  throw new import_provider.UnsupportedFunctionalityError({
128
- functionality: `File content part type ${part.mimeType} in user messages`
132
+ functionality: "PDF file parts with URLs"
129
133
  });
130
134
  }
135
+ return {
136
+ type: "file",
137
+ file: {
138
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
139
+ file_data: `data:application/pdf;base64,${part.data}`
140
+ }
141
+ };
142
+ } else {
143
+ throw new import_provider.UnsupportedFunctionalityError({
144
+ functionality: `file part media type ${part.mediaType}`
145
+ });
131
146
  }
132
147
  }
133
148
  }
@@ -157,41 +172,20 @@ function convertToOpenAIChatMessages({
157
172
  }
158
173
  }
159
174
  }
160
- if (useLegacyFunctionCalling) {
161
- if (toolCalls.length > 1) {
162
- throw new import_provider.UnsupportedFunctionalityError({
163
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
164
- });
165
- }
166
- messages.push({
167
- role: "assistant",
168
- content: text,
169
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
170
- });
171
- } else {
172
- messages.push({
173
- role: "assistant",
174
- content: text,
175
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
176
- });
177
- }
175
+ messages.push({
176
+ role: "assistant",
177
+ content: text,
178
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
179
+ });
178
180
  break;
179
181
  }
180
182
  case "tool": {
181
183
  for (const toolResponse of content) {
182
- if (useLegacyFunctionCalling) {
183
- messages.push({
184
- role: "function",
185
- name: toolResponse.toolName,
186
- content: JSON.stringify(toolResponse.result)
187
- });
188
- } else {
189
- messages.push({
190
- role: "tool",
191
- tool_call_id: toolResponse.toolCallId,
192
- content: JSON.stringify(toolResponse.result)
193
- });
194
- }
184
+ messages.push({
185
+ role: "tool",
186
+ tool_call_id: toolResponse.toolCallId,
187
+ content: JSON.stringify(toolResponse.result)
188
+ });
195
189
  }
196
190
  break;
197
191
  }
@@ -204,17 +198,17 @@ function convertToOpenAIChatMessages({
204
198
  return { messages, warnings };
205
199
  }
206
200
 
207
- // src/map-openai-chat-logprobs.ts
208
- function mapOpenAIChatLogProbsOutput(logprobs) {
209
- var _a, _b;
210
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
211
- token,
212
- logprob,
213
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
214
- token: token2,
215
- logprob: logprob2
216
- })) : []
217
- }))) != null ? _b : void 0;
201
+ // src/get-response-metadata.ts
202
+ function getResponseMetadata({
203
+ id,
204
+ model,
205
+ created
206
+ }) {
207
+ return {
208
+ id: id != null ? id : void 0,
209
+ modelId: model != null ? model : void 0,
210
+ timestamp: created != null ? new Date(created * 1e3) : void 0
211
+ };
218
212
  }
219
213
 
220
214
  // src/map-openai-finish-reason.ts
@@ -234,18 +228,75 @@ function mapOpenAIFinishReason(finishReason) {
234
228
  }
235
229
  }
236
230
 
237
- // src/openai-error.ts
231
+ // src/openai-chat-options.ts
238
232
  var import_zod = require("zod");
233
+ var openaiProviderOptions = import_zod.z.object({
234
+ /**
235
+ * Modify the likelihood of specified tokens appearing in the completion.
236
+ *
237
+ * Accepts a JSON object that maps tokens (specified by their token ID in
238
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
239
+ */
240
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
241
+ /**
242
+ * Return the log probabilities of the tokens.
243
+ *
244
+ * Setting to true will return the log probabilities of the tokens that
245
+ * were generated.
246
+ *
247
+ * Setting to a number will return the log probabilities of the top n
248
+ * tokens that were generated.
249
+ */
250
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
251
+ /**
252
+ * Whether to enable parallel function calling during tool use. Default to true.
253
+ */
254
+ parallelToolCalls: import_zod.z.boolean().optional(),
255
+ /**
256
+ * A unique identifier representing your end-user, which can help OpenAI to
257
+ * monitor and detect abuse.
258
+ */
259
+ user: import_zod.z.string().optional(),
260
+ /**
261
+ * Reasoning effort for reasoning models. Defaults to `medium`.
262
+ */
263
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
264
+ /**
265
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
266
+ */
267
+ maxCompletionTokens: import_zod.z.number().optional(),
268
+ /**
269
+ * Whether to enable persistence in responses API.
270
+ */
271
+ store: import_zod.z.boolean().optional(),
272
+ /**
273
+ * Metadata to associate with the request.
274
+ */
275
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
276
+ /**
277
+ * Parameters for prediction mode.
278
+ */
279
+ prediction: import_zod.z.record(import_zod.z.any()).optional(),
280
+ /**
281
+ * Whether to use structured outputs.
282
+ *
283
+ * @default true
284
+ */
285
+ structuredOutputs: import_zod.z.boolean().optional()
286
+ });
287
+
288
+ // src/openai-error.ts
289
+ var import_zod2 = require("zod");
239
290
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
240
- var openaiErrorDataSchema = import_zod.z.object({
241
- error: import_zod.z.object({
242
- message: import_zod.z.string(),
291
+ var openaiErrorDataSchema = import_zod2.z.object({
292
+ error: import_zod2.z.object({
293
+ message: import_zod2.z.string(),
243
294
  // The additional information below is handled loosely to support
244
295
  // OpenAI-compatible providers that have slightly different error
245
296
  // responses:
246
- type: import_zod.z.string().nullish(),
247
- param: import_zod.z.any().nullish(),
248
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
297
+ type: import_zod2.z.string().nullish(),
298
+ param: import_zod2.z.any().nullish(),
299
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
249
300
  })
250
301
  });
251
302
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -253,25 +304,11 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
253
304
  errorToMessage: (data) => data.error.message
254
305
  });
255
306
 
256
- // src/get-response-metadata.ts
257
- function getResponseMetadata({
258
- id,
259
- model,
260
- created
261
- }) {
262
- return {
263
- id: id != null ? id : void 0,
264
- modelId: model != null ? model : void 0,
265
- timestamp: created != null ? new Date(created * 1e3) : void 0
266
- };
267
- }
268
-
269
307
  // src/openai-prepare-tools.ts
270
308
  var import_provider2 = require("@ai-sdk/provider");
271
309
  function prepareTools({
272
310
  tools,
273
311
  toolChoice,
274
- useLegacyFunctionCalling = false,
275
312
  structuredOutputs
276
313
  }) {
277
314
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -279,48 +316,6 @@ function prepareTools({
279
316
  if (tools == null) {
280
317
  return { tools: void 0, toolChoice: void 0, toolWarnings };
281
318
  }
282
- if (useLegacyFunctionCalling) {
283
- const openaiFunctions = [];
284
- for (const tool of tools) {
285
- if (tool.type === "provider-defined") {
286
- toolWarnings.push({ type: "unsupported-tool", tool });
287
- } else {
288
- openaiFunctions.push({
289
- name: tool.name,
290
- description: tool.description,
291
- parameters: tool.parameters
292
- });
293
- }
294
- }
295
- if (toolChoice == null) {
296
- return {
297
- functions: openaiFunctions,
298
- function_call: void 0,
299
- toolWarnings
300
- };
301
- }
302
- const type2 = toolChoice.type;
303
- switch (type2) {
304
- case "auto":
305
- case "none":
306
- case void 0:
307
- return {
308
- functions: openaiFunctions,
309
- function_call: void 0,
310
- toolWarnings
311
- };
312
- case "required":
313
- throw new import_provider2.UnsupportedFunctionalityError({
314
- functionality: "useLegacyFunctionCalling and toolChoice: required"
315
- });
316
- default:
317
- return {
318
- functions: openaiFunctions,
319
- function_call: { name: toolChoice.toolName },
320
- toolWarnings
321
- };
322
- }
323
- }
324
319
  const openaiTools2 = [];
325
320
  for (const tool of tools) {
326
321
  if (tool.type === "provider-defined") {
@@ -360,7 +355,7 @@ function prepareTools({
360
355
  default: {
361
356
  const _exhaustiveCheck = type;
362
357
  throw new import_provider2.UnsupportedFunctionalityError({
363
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
358
+ functionality: `tool choice type: ${_exhaustiveCheck}`
364
359
  });
365
360
  }
366
361
  }
@@ -368,31 +363,20 @@ function prepareTools({
368
363
 
369
364
  // src/openai-chat-language-model.ts
370
365
  var OpenAIChatLanguageModel = class {
371
- constructor(modelId, settings, config) {
366
+ constructor(modelId, config) {
372
367
  this.specificationVersion = "v2";
368
+ this.supportedUrls = {
369
+ "image/*": [/^https?:\/\/.*$/]
370
+ };
373
371
  this.modelId = modelId;
374
- this.settings = settings;
375
372
  this.config = config;
376
373
  }
377
- get supportsStructuredOutputs() {
378
- var _a;
379
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
380
- }
381
- get defaultObjectGenerationMode() {
382
- if (isAudioModel(this.modelId)) {
383
- return "tool";
384
- }
385
- return this.supportsStructuredOutputs ? "json" : "tool";
386
- }
387
374
  get provider() {
388
375
  return this.config.provider;
389
376
  }
390
- get supportsImageUrls() {
391
- return !this.settings.downloadImages;
392
- }
393
- getArgs({
377
+ async getArgs({
394
378
  prompt,
395
- maxTokens,
379
+ maxOutputTokens,
396
380
  temperature,
397
381
  topP,
398
382
  topK,
@@ -405,36 +389,30 @@ var OpenAIChatLanguageModel = class {
405
389
  toolChoice,
406
390
  providerOptions
407
391
  }) {
408
- var _a, _b, _c, _d, _e, _f, _g;
392
+ var _a, _b, _c;
409
393
  const warnings = [];
394
+ const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
395
+ provider: "openai",
396
+ providerOptions,
397
+ schema: openaiProviderOptions
398
+ })) != null ? _a : {};
399
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
410
400
  if (topK != null) {
411
401
  warnings.push({
412
402
  type: "unsupported-setting",
413
403
  setting: "topK"
414
404
  });
415
405
  }
416
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
406
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
417
407
  warnings.push({
418
408
  type: "unsupported-setting",
419
409
  setting: "responseFormat",
420
410
  details: "JSON response format schema is only supported with structuredOutputs"
421
411
  });
422
412
  }
423
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
424
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
425
- throw new import_provider3.UnsupportedFunctionalityError({
426
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
427
- });
428
- }
429
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
430
- throw new import_provider3.UnsupportedFunctionalityError({
431
- functionality: "structuredOutputs with useLegacyFunctionCalling"
432
- });
433
- }
434
413
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
435
414
  {
436
415
  prompt,
437
- useLegacyFunctionCalling,
438
416
  systemMessageMode: getSystemMessageMode(this.modelId)
439
417
  }
440
418
  );
@@ -443,36 +421,38 @@ var OpenAIChatLanguageModel = class {
443
421
  // model id:
444
422
  model: this.modelId,
445
423
  // model specific settings:
446
- logit_bias: this.settings.logitBias,
447
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
448
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
449
- user: this.settings.user,
450
- parallel_tool_calls: this.settings.parallelToolCalls,
424
+ logit_bias: openaiOptions.logitBias,
425
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
426
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
427
+ user: openaiOptions.user,
428
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
451
429
  // standardized settings:
452
- max_tokens: maxTokens,
430
+ max_tokens: maxOutputTokens,
453
431
  temperature,
454
432
  top_p: topP,
455
433
  frequency_penalty: frequencyPenalty,
456
434
  presence_penalty: presencePenalty,
457
- // TODO improve below:
458
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
459
- type: "json_schema",
460
- json_schema: {
461
- schema: responseFormat.schema,
462
- strict: true,
463
- name: (_a = responseFormat.name) != null ? _a : "response",
464
- description: responseFormat.description
465
- }
466
- } : { type: "json_object" } : void 0,
435
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
436
+ // TODO convert into provider option
437
+ structuredOutputs && responseFormat.schema != null ? {
438
+ type: "json_schema",
439
+ json_schema: {
440
+ schema: responseFormat.schema,
441
+ strict: true,
442
+ name: (_c = responseFormat.name) != null ? _c : "response",
443
+ description: responseFormat.description
444
+ }
445
+ } : { type: "json_object" }
446
+ ) : void 0,
467
447
  stop: stopSequences,
468
448
  seed,
469
449
  // openai specific settings:
470
- // TODO remove in next major version; we auto-map maxTokens now
471
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
472
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
473
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
474
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
475
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
450
+ // TODO remove in next major version; we auto-map maxOutputTokens now
451
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
452
+ store: openaiOptions.store,
453
+ metadata: openaiOptions.metadata,
454
+ prediction: openaiOptions.prediction,
455
+ reasoning_effort: openaiOptions.reasoningEffort,
476
456
  // messages:
477
457
  messages
478
458
  };
@@ -536,33 +516,37 @@ var OpenAIChatLanguageModel = class {
536
516
  }
537
517
  baseArgs.max_tokens = void 0;
538
518
  }
519
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
520
+ if (baseArgs.temperature != null) {
521
+ baseArgs.temperature = void 0;
522
+ warnings.push({
523
+ type: "unsupported-setting",
524
+ setting: "temperature",
525
+ details: "temperature is not supported for the search preview models and has been removed."
526
+ });
527
+ }
539
528
  }
540
529
  const {
541
530
  tools: openaiTools2,
542
531
  toolChoice: openaiToolChoice,
543
- functions,
544
- function_call,
545
532
  toolWarnings
546
533
  } = prepareTools({
547
534
  tools,
548
535
  toolChoice,
549
- useLegacyFunctionCalling,
550
- structuredOutputs: this.supportsStructuredOutputs
536
+ structuredOutputs
551
537
  });
552
538
  return {
553
539
  args: {
554
540
  ...baseArgs,
555
541
  tools: openaiTools2,
556
- tool_choice: openaiToolChoice,
557
- functions,
558
- function_call
542
+ tool_choice: openaiToolChoice
559
543
  },
560
544
  warnings: [...warnings, ...toolWarnings]
561
545
  };
562
546
  }
563
547
  async doGenerate(options) {
564
- var _a, _b, _c, _d, _e, _f, _g, _h;
565
- const { args: body, warnings } = this.getArgs(options);
548
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
549
+ const { args: body, warnings } = await this.getArgs(options);
566
550
  const {
567
551
  responseHeaders,
568
552
  value: response,
@@ -581,105 +565,61 @@ var OpenAIChatLanguageModel = class {
581
565
  abortSignal: options.abortSignal,
582
566
  fetch: this.config.fetch
583
567
  });
584
- const { messages: rawPrompt, ...rawSettings } = body;
585
568
  const choice = response.choices[0];
586
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
587
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
588
- const providerMetadata = { openai: {} };
589
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
590
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
569
+ const content = [];
570
+ const text = choice.message.content;
571
+ if (text != null && text.length > 0) {
572
+ content.push({ type: "text", text });
591
573
  }
574
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
575
+ content.push({
576
+ type: "tool-call",
577
+ toolCallType: "function",
578
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
579
+ toolName: toolCall.function.name,
580
+ args: toolCall.function.arguments
581
+ });
582
+ }
583
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
584
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
585
+ const providerMetadata = { openai: {} };
592
586
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
593
587
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
594
588
  }
595
589
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
596
590
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
597
591
  }
598
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
599
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
592
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
593
+ providerMetadata.openai.logprobs = choice.logprobs.content;
600
594
  }
601
595
  return {
602
- text: (_c = choice.message.content) != null ? _c : void 0,
603
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
604
- {
605
- toolCallType: "function",
606
- toolCallId: (0, import_provider_utils3.generateId)(),
607
- toolName: choice.message.function_call.name,
608
- args: choice.message.function_call.arguments
609
- }
610
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
611
- var _a2;
612
- return {
613
- toolCallType: "function",
614
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
615
- toolName: toolCall.function.name,
616
- args: toolCall.function.arguments
617
- };
618
- }),
596
+ content,
619
597
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
620
598
  usage: {
621
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
622
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
599
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
600
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
601
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
602
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
603
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
604
+ },
605
+ request: { body },
606
+ response: {
607
+ ...getResponseMetadata(response),
608
+ headers: responseHeaders,
609
+ body: rawResponse
623
610
  },
624
- rawCall: { rawPrompt, rawSettings },
625
- rawResponse: { headers: responseHeaders, body: rawResponse },
626
- request: { body: JSON.stringify(body) },
627
- response: getResponseMetadata(response),
628
611
  warnings,
629
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
630
612
  providerMetadata
631
613
  };
632
614
  }
633
615
  async doStream(options) {
634
- if (this.settings.simulateStreaming) {
635
- const result = await this.doGenerate(options);
636
- const simulatedStream = new ReadableStream({
637
- start(controller) {
638
- controller.enqueue({ type: "response-metadata", ...result.response });
639
- if (result.text) {
640
- controller.enqueue({
641
- type: "text-delta",
642
- textDelta: result.text
643
- });
644
- }
645
- if (result.toolCalls) {
646
- for (const toolCall of result.toolCalls) {
647
- controller.enqueue({
648
- type: "tool-call-delta",
649
- toolCallType: "function",
650
- toolCallId: toolCall.toolCallId,
651
- toolName: toolCall.toolName,
652
- argsTextDelta: toolCall.args
653
- });
654
- controller.enqueue({
655
- type: "tool-call",
656
- ...toolCall
657
- });
658
- }
659
- }
660
- controller.enqueue({
661
- type: "finish",
662
- finishReason: result.finishReason,
663
- usage: result.usage,
664
- logprobs: result.logprobs,
665
- providerMetadata: result.providerMetadata
666
- });
667
- controller.close();
668
- }
669
- });
670
- return {
671
- stream: simulatedStream,
672
- rawCall: result.rawCall,
673
- rawResponse: result.rawResponse,
674
- warnings: result.warnings
675
- };
676
- }
677
- const { args, warnings } = this.getArgs(options);
616
+ const { args, warnings } = await this.getArgs(options);
678
617
  const body = {
679
618
  ...args,
680
619
  stream: true,
681
- // only include stream_options when in strict compatibility mode:
682
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
620
+ stream_options: {
621
+ include_usage: true
622
+ }
683
623
  };
684
624
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
685
625
  url: this.config.url({
@@ -695,22 +635,23 @@ var OpenAIChatLanguageModel = class {
695
635
  abortSignal: options.abortSignal,
696
636
  fetch: this.config.fetch
697
637
  });
698
- const { messages: rawPrompt, ...rawSettings } = args;
699
638
  const toolCalls = [];
700
639
  let finishReason = "unknown";
701
- let usage = {
702
- promptTokens: void 0,
703
- completionTokens: void 0
640
+ const usage = {
641
+ inputTokens: void 0,
642
+ outputTokens: void 0,
643
+ totalTokens: void 0
704
644
  };
705
- let logprobs;
706
645
  let isFirstChunk = true;
707
- const { useLegacyFunctionCalling } = this.settings;
708
646
  const providerMetadata = { openai: {} };
709
647
  return {
710
648
  stream: response.pipeThrough(
711
649
  new TransformStream({
650
+ start(controller) {
651
+ controller.enqueue({ type: "stream-start", warnings });
652
+ },
712
653
  transform(chunk, controller) {
713
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
654
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
714
655
  if (!chunk.success) {
715
656
  finishReason = "error";
716
657
  controller.enqueue({ type: "error", error: chunk.error });
@@ -730,60 +671,37 @@ var OpenAIChatLanguageModel = class {
730
671
  });
731
672
  }
732
673
  if (value.usage != null) {
733
- const {
734
- prompt_tokens,
735
- completion_tokens,
736
- prompt_tokens_details,
737
- completion_tokens_details
738
- } = value.usage;
739
- usage = {
740
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
741
- completionTokens: completion_tokens != null ? completion_tokens : void 0
742
- };
743
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
744
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
745
- }
746
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
747
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
674
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
675
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
676
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
677
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
678
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
679
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
680
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
748
681
  }
749
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
750
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
751
- }
752
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
753
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
682
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
683
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
754
684
  }
755
685
  }
756
686
  const choice = value.choices[0];
757
687
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
758
688
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
759
689
  }
690
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
691
+ providerMetadata.openai.logprobs = choice.logprobs.content;
692
+ }
760
693
  if ((choice == null ? void 0 : choice.delta) == null) {
761
694
  return;
762
695
  }
763
696
  const delta = choice.delta;
764
697
  if (delta.content != null) {
765
698
  controller.enqueue({
766
- type: "text-delta",
767
- textDelta: delta.content
699
+ type: "text",
700
+ text: delta.content
768
701
  });
769
702
  }
770
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
771
- choice == null ? void 0 : choice.logprobs
772
- );
773
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
774
- if (logprobs === void 0) logprobs = [];
775
- logprobs.push(...mappedLogprobs);
776
- }
777
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
778
- {
779
- type: "function",
780
- id: (0, import_provider_utils3.generateId)(),
781
- function: delta.function_call,
782
- index: 0
783
- }
784
- ] : delta.tool_calls;
785
- if (mappedToolCalls != null) {
786
- for (const toolCallDelta of mappedToolCalls) {
703
+ if (delta.tool_calls != null) {
704
+ for (const toolCallDelta of delta.tool_calls) {
787
705
  const index = toolCallDelta.index;
788
706
  if (toolCalls[index] == null) {
789
707
  if (toolCallDelta.type !== "function") {
@@ -798,7 +716,7 @@ var OpenAIChatLanguageModel = class {
798
716
  message: `Expected 'id' to be a string.`
799
717
  });
800
718
  }
801
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
719
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
802
720
  throw new import_provider3.InvalidResponseDataError({
803
721
  data: toolCallDelta,
804
722
  message: `Expected 'function.name' to be a string.`
@@ -809,12 +727,12 @@ var OpenAIChatLanguageModel = class {
809
727
  type: "function",
810
728
  function: {
811
729
  name: toolCallDelta.function.name,
812
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
730
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
813
731
  },
814
732
  hasFinished: false
815
733
  };
816
734
  const toolCall2 = toolCalls[index];
817
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
735
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
818
736
  if (toolCall2.function.arguments.length > 0) {
819
737
  controller.enqueue({
820
738
  type: "tool-call-delta",
@@ -828,7 +746,7 @@ var OpenAIChatLanguageModel = class {
828
746
  controller.enqueue({
829
747
  type: "tool-call",
830
748
  toolCallType: "function",
831
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
749
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
832
750
  toolName: toolCall2.function.name,
833
751
  args: toolCall2.function.arguments
834
752
  });
@@ -841,21 +759,21 @@ var OpenAIChatLanguageModel = class {
841
759
  if (toolCall.hasFinished) {
842
760
  continue;
843
761
  }
844
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
845
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
762
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
763
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
846
764
  }
847
765
  controller.enqueue({
848
766
  type: "tool-call-delta",
849
767
  toolCallType: "function",
850
768
  toolCallId: toolCall.id,
851
769
  toolName: toolCall.function.name,
852
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
770
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
853
771
  });
854
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
772
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
855
773
  controller.enqueue({
856
774
  type: "tool-call",
857
775
  toolCallType: "function",
858
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
776
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
859
777
  toolName: toolCall.function.name,
860
778
  args: toolCall.function.arguments
861
779
  });
@@ -865,125 +783,111 @@ var OpenAIChatLanguageModel = class {
865
783
  }
866
784
  },
867
785
  flush(controller) {
868
- var _a, _b;
869
786
  controller.enqueue({
870
787
  type: "finish",
871
788
  finishReason,
872
- logprobs,
873
- usage: {
874
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
875
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
876
- },
789
+ usage,
877
790
  ...providerMetadata != null ? { providerMetadata } : {}
878
791
  });
879
792
  }
880
793
  })
881
794
  ),
882
- rawCall: { rawPrompt, rawSettings },
883
- rawResponse: { headers: responseHeaders },
884
- request: { body: JSON.stringify(body) },
885
- warnings
795
+ request: { body },
796
+ response: { headers: responseHeaders }
886
797
  };
887
798
  }
888
799
  };
889
- var openaiTokenUsageSchema = import_zod2.z.object({
890
- prompt_tokens: import_zod2.z.number().nullish(),
891
- completion_tokens: import_zod2.z.number().nullish(),
892
- prompt_tokens_details: import_zod2.z.object({
893
- cached_tokens: import_zod2.z.number().nullish()
800
+ var openaiTokenUsageSchema = import_zod3.z.object({
801
+ prompt_tokens: import_zod3.z.number().nullish(),
802
+ completion_tokens: import_zod3.z.number().nullish(),
803
+ total_tokens: import_zod3.z.number().nullish(),
804
+ prompt_tokens_details: import_zod3.z.object({
805
+ cached_tokens: import_zod3.z.number().nullish()
894
806
  }).nullish(),
895
- completion_tokens_details: import_zod2.z.object({
896
- reasoning_tokens: import_zod2.z.number().nullish(),
897
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
898
- rejected_prediction_tokens: import_zod2.z.number().nullish()
807
+ completion_tokens_details: import_zod3.z.object({
808
+ reasoning_tokens: import_zod3.z.number().nullish(),
809
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
810
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
899
811
  }).nullish()
900
812
  }).nullish();
901
- var openaiChatResponseSchema = import_zod2.z.object({
902
- id: import_zod2.z.string().nullish(),
903
- created: import_zod2.z.number().nullish(),
904
- model: import_zod2.z.string().nullish(),
905
- choices: import_zod2.z.array(
906
- import_zod2.z.object({
907
- message: import_zod2.z.object({
908
- role: import_zod2.z.literal("assistant").nullish(),
909
- content: import_zod2.z.string().nullish(),
910
- function_call: import_zod2.z.object({
911
- arguments: import_zod2.z.string(),
912
- name: import_zod2.z.string()
913
- }).nullish(),
914
- tool_calls: import_zod2.z.array(
915
- import_zod2.z.object({
916
- id: import_zod2.z.string().nullish(),
917
- type: import_zod2.z.literal("function"),
918
- function: import_zod2.z.object({
919
- name: import_zod2.z.string(),
920
- arguments: import_zod2.z.string()
813
+ var openaiChatResponseSchema = import_zod3.z.object({
814
+ id: import_zod3.z.string().nullish(),
815
+ created: import_zod3.z.number().nullish(),
816
+ model: import_zod3.z.string().nullish(),
817
+ choices: import_zod3.z.array(
818
+ import_zod3.z.object({
819
+ message: import_zod3.z.object({
820
+ role: import_zod3.z.literal("assistant").nullish(),
821
+ content: import_zod3.z.string().nullish(),
822
+ tool_calls: import_zod3.z.array(
823
+ import_zod3.z.object({
824
+ id: import_zod3.z.string().nullish(),
825
+ type: import_zod3.z.literal("function"),
826
+ function: import_zod3.z.object({
827
+ name: import_zod3.z.string(),
828
+ arguments: import_zod3.z.string()
921
829
  })
922
830
  })
923
831
  ).nullish()
924
832
  }),
925
- index: import_zod2.z.number(),
926
- logprobs: import_zod2.z.object({
927
- content: import_zod2.z.array(
928
- import_zod2.z.object({
929
- token: import_zod2.z.string(),
930
- logprob: import_zod2.z.number(),
931
- top_logprobs: import_zod2.z.array(
932
- import_zod2.z.object({
933
- token: import_zod2.z.string(),
934
- logprob: import_zod2.z.number()
833
+ index: import_zod3.z.number(),
834
+ logprobs: import_zod3.z.object({
835
+ content: import_zod3.z.array(
836
+ import_zod3.z.object({
837
+ token: import_zod3.z.string(),
838
+ logprob: import_zod3.z.number(),
839
+ top_logprobs: import_zod3.z.array(
840
+ import_zod3.z.object({
841
+ token: import_zod3.z.string(),
842
+ logprob: import_zod3.z.number()
935
843
  })
936
844
  )
937
845
  })
938
- ).nullable()
846
+ ).nullish()
939
847
  }).nullish(),
940
- finish_reason: import_zod2.z.string().nullish()
848
+ finish_reason: import_zod3.z.string().nullish()
941
849
  })
942
850
  ),
943
851
  usage: openaiTokenUsageSchema
944
852
  });
945
- var openaiChatChunkSchema = import_zod2.z.union([
946
- import_zod2.z.object({
947
- id: import_zod2.z.string().nullish(),
948
- created: import_zod2.z.number().nullish(),
949
- model: import_zod2.z.string().nullish(),
950
- choices: import_zod2.z.array(
951
- import_zod2.z.object({
952
- delta: import_zod2.z.object({
953
- role: import_zod2.z.enum(["assistant"]).nullish(),
954
- content: import_zod2.z.string().nullish(),
955
- function_call: import_zod2.z.object({
956
- name: import_zod2.z.string().optional(),
957
- arguments: import_zod2.z.string().optional()
958
- }).nullish(),
959
- tool_calls: import_zod2.z.array(
960
- import_zod2.z.object({
961
- index: import_zod2.z.number(),
962
- id: import_zod2.z.string().nullish(),
963
- type: import_zod2.z.literal("function").optional(),
964
- function: import_zod2.z.object({
965
- name: import_zod2.z.string().nullish(),
966
- arguments: import_zod2.z.string().nullish()
853
+ var openaiChatChunkSchema = import_zod3.z.union([
854
+ import_zod3.z.object({
855
+ id: import_zod3.z.string().nullish(),
856
+ created: import_zod3.z.number().nullish(),
857
+ model: import_zod3.z.string().nullish(),
858
+ choices: import_zod3.z.array(
859
+ import_zod3.z.object({
860
+ delta: import_zod3.z.object({
861
+ role: import_zod3.z.enum(["assistant"]).nullish(),
862
+ content: import_zod3.z.string().nullish(),
863
+ tool_calls: import_zod3.z.array(
864
+ import_zod3.z.object({
865
+ index: import_zod3.z.number(),
866
+ id: import_zod3.z.string().nullish(),
867
+ type: import_zod3.z.literal("function").nullish(),
868
+ function: import_zod3.z.object({
869
+ name: import_zod3.z.string().nullish(),
870
+ arguments: import_zod3.z.string().nullish()
967
871
  })
968
872
  })
969
873
  ).nullish()
970
874
  }).nullish(),
971
- logprobs: import_zod2.z.object({
972
- content: import_zod2.z.array(
973
- import_zod2.z.object({
974
- token: import_zod2.z.string(),
975
- logprob: import_zod2.z.number(),
976
- top_logprobs: import_zod2.z.array(
977
- import_zod2.z.object({
978
- token: import_zod2.z.string(),
979
- logprob: import_zod2.z.number()
875
+ logprobs: import_zod3.z.object({
876
+ content: import_zod3.z.array(
877
+ import_zod3.z.object({
878
+ token: import_zod3.z.string(),
879
+ logprob: import_zod3.z.number(),
880
+ top_logprobs: import_zod3.z.array(
881
+ import_zod3.z.object({
882
+ token: import_zod3.z.string(),
883
+ logprob: import_zod3.z.number()
980
884
  })
981
885
  )
982
886
  })
983
- ).nullable()
887
+ ).nullish()
984
888
  }).nullish(),
985
- finish_reason: import_zod2.z.string().nullable().optional(),
986
- index: import_zod2.z.number()
889
+ finish_reason: import_zod3.z.string().nullish(),
890
+ index: import_zod3.z.number()
987
891
  })
988
892
  ),
989
893
  usage: openaiTokenUsageSchema
@@ -991,10 +895,7 @@ var openaiChatChunkSchema = import_zod2.z.union([
991
895
  openaiErrorDataSchema
992
896
  ]);
993
897
  function isReasoningModel(modelId) {
994
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
995
- }
996
- function isAudioModel(modelId) {
997
- return modelId.startsWith("gpt-4o-audio-preview");
898
+ return modelId.startsWith("o");
998
899
  }
999
900
  function getSystemMessageMode(modelId) {
1000
901
  var _a, _b;
@@ -1016,29 +917,37 @@ var reasoningModels = {
1016
917
  "o1-preview-2024-09-12": {
1017
918
  systemMessageMode: "remove"
1018
919
  },
920
+ o3: {
921
+ systemMessageMode: "developer"
922
+ },
923
+ "o3-2025-04-16": {
924
+ systemMessageMode: "developer"
925
+ },
1019
926
  "o3-mini": {
1020
927
  systemMessageMode: "developer"
1021
928
  },
1022
929
  "o3-mini-2025-01-31": {
1023
930
  systemMessageMode: "developer"
931
+ },
932
+ "o4-mini": {
933
+ systemMessageMode: "developer"
934
+ },
935
+ "o4-mini-2025-04-16": {
936
+ systemMessageMode: "developer"
1024
937
  }
1025
938
  };
1026
939
 
1027
940
  // src/openai-completion-language-model.ts
1028
941
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1029
- var import_zod3 = require("zod");
942
+ var import_zod5 = require("zod");
1030
943
 
1031
944
  // src/convert-to-openai-completion-prompt.ts
1032
945
  var import_provider4 = require("@ai-sdk/provider");
1033
946
  function convertToOpenAICompletionPrompt({
1034
947
  prompt,
1035
- inputFormat,
1036
948
  user = "user",
1037
949
  assistant = "assistant"
1038
950
  }) {
1039
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1040
- return { prompt: prompt[0].content[0].text };
1041
- }
1042
951
  let text = "";
1043
952
  if (prompt[0].role === "system") {
1044
953
  text += `${prompt[0].content}
@@ -1060,13 +969,8 @@ function convertToOpenAICompletionPrompt({
1060
969
  case "text": {
1061
970
  return part.text;
1062
971
  }
1063
- case "image": {
1064
- throw new import_provider4.UnsupportedFunctionalityError({
1065
- functionality: "images"
1066
- });
1067
- }
1068
972
  }
1069
- }).join("");
973
+ }).filter(Boolean).join("");
1070
974
  text += `${user}:
1071
975
  ${userMessage}
1072
976
 
@@ -1112,36 +1016,68 @@ ${user}:`]
1112
1016
  };
1113
1017
  }
1114
1018
 
1115
- // src/map-openai-completion-logprobs.ts
1116
- function mapOpenAICompletionLogProbs(logprobs) {
1117
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1118
- token,
1119
- logprob: logprobs.token_logprobs[index],
1120
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1121
- ([token2, logprob]) => ({
1122
- token: token2,
1123
- logprob
1124
- })
1125
- ) : []
1126
- }));
1127
- }
1019
+ // src/openai-completion-options.ts
1020
+ var import_zod4 = require("zod");
1021
+ var openaiCompletionProviderOptions = import_zod4.z.object({
1022
+ /**
1023
+ Echo back the prompt in addition to the completion.
1024
+ */
1025
+ echo: import_zod4.z.boolean().optional(),
1026
+ /**
1027
+ Modify the likelihood of specified tokens appearing in the completion.
1028
+
1029
+ Accepts a JSON object that maps tokens (specified by their token ID in
1030
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1031
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1032
+ the bias is added to the logits generated by the model prior to sampling.
1033
+ The exact effect will vary per model, but values between -1 and 1 should
1034
+ decrease or increase likelihood of selection; values like -100 or 100
1035
+ should result in a ban or exclusive selection of the relevant token.
1036
+
1037
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1038
+ token from being generated.
1039
+ */
1040
+ logitBias: import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number()).optional(),
1041
+ /**
1042
+ The suffix that comes after a completion of inserted text.
1043
+ */
1044
+ suffix: import_zod4.z.string().optional(),
1045
+ /**
1046
+ A unique identifier representing your end-user, which can help OpenAI to
1047
+ monitor and detect abuse. Learn more.
1048
+ */
1049
+ user: import_zod4.z.string().optional(),
1050
+ /**
1051
+ Return the log probabilities of the tokens. Including logprobs will increase
1052
+ the response size and can slow down response times. However, it can
1053
+ be useful to better understand how the model is behaving.
1054
+ Setting to true will return the log probabilities of the tokens that
1055
+ were generated.
1056
+ Setting to a number will return the log probabilities of the top n
1057
+ tokens that were generated.
1058
+ */
1059
+ logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
1060
+ });
1128
1061
 
1129
1062
  // src/openai-completion-language-model.ts
1130
1063
  var OpenAICompletionLanguageModel = class {
1131
- constructor(modelId, settings, config) {
1064
+ constructor(modelId, config) {
1132
1065
  this.specificationVersion = "v2";
1133
- this.defaultObjectGenerationMode = void 0;
1066
+ this.supportedUrls = {
1067
+ // No URLs are supported for completion models.
1068
+ };
1134
1069
  this.modelId = modelId;
1135
- this.settings = settings;
1136
1070
  this.config = config;
1137
1071
  }
1072
+ get providerOptionsName() {
1073
+ return this.config.provider.split(".")[0].trim();
1074
+ }
1138
1075
  get provider() {
1139
1076
  return this.config.provider;
1140
1077
  }
1141
- getArgs({
1142
- inputFormat,
1078
+ async getArgs({
1143
1079
  prompt,
1144
- maxTokens,
1080
+ maxOutputTokens,
1145
1081
  temperature,
1146
1082
  topP,
1147
1083
  topK,
@@ -1151,9 +1087,22 @@ var OpenAICompletionLanguageModel = class {
1151
1087
  responseFormat,
1152
1088
  tools,
1153
1089
  toolChoice,
1154
- seed
1090
+ seed,
1091
+ providerOptions
1155
1092
  }) {
1156
1093
  const warnings = [];
1094
+ const openaiOptions = {
1095
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1096
+ provider: "openai",
1097
+ providerOptions,
1098
+ schema: openaiCompletionProviderOptions
1099
+ }),
1100
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1101
+ provider: this.providerOptionsName,
1102
+ providerOptions,
1103
+ schema: openaiCompletionProviderOptions
1104
+ })
1105
+ };
1157
1106
  if (topK != null) {
1158
1107
  warnings.push({ type: "unsupported-setting", setting: "topK" });
1159
1108
  }
@@ -1170,20 +1119,20 @@ var OpenAICompletionLanguageModel = class {
1170
1119
  details: "JSON response format is not supported."
1171
1120
  });
1172
1121
  }
1173
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1122
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1174
1123
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1175
1124
  return {
1176
1125
  args: {
1177
1126
  // model id:
1178
1127
  model: this.modelId,
1179
1128
  // model specific settings:
1180
- echo: this.settings.echo,
1181
- logit_bias: this.settings.logitBias,
1182
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1183
- suffix: this.settings.suffix,
1184
- user: this.settings.user,
1129
+ echo: openaiOptions.echo,
1130
+ logit_bias: openaiOptions.logitBias,
1131
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1132
+ suffix: openaiOptions.suffix,
1133
+ user: openaiOptions.user,
1185
1134
  // standardized settings:
1186
- max_tokens: maxTokens,
1135
+ max_tokens: maxOutputTokens,
1187
1136
  temperature,
1188
1137
  top_p: topP,
1189
1138
  frequency_penalty: frequencyPenalty,
@@ -1198,7 +1147,8 @@ var OpenAICompletionLanguageModel = class {
1198
1147
  };
1199
1148
  }
1200
1149
  async doGenerate(options) {
1201
- const { args, warnings } = this.getArgs(options);
1150
+ var _a, _b, _c;
1151
+ const { args, warnings } = await this.getArgs(options);
1202
1152
  const {
1203
1153
  responseHeaders,
1204
1154
  value: response,
@@ -1217,30 +1167,37 @@ var OpenAICompletionLanguageModel = class {
1217
1167
  abortSignal: options.abortSignal,
1218
1168
  fetch: this.config.fetch
1219
1169
  });
1220
- const { prompt: rawPrompt, ...rawSettings } = args;
1221
1170
  const choice = response.choices[0];
1171
+ const providerMetadata = { openai: {} };
1172
+ if (choice.logprobs != null) {
1173
+ providerMetadata.openai.logprobs = choice.logprobs;
1174
+ }
1222
1175
  return {
1223
- text: choice.text,
1176
+ content: [{ type: "text", text: choice.text }],
1224
1177
  usage: {
1225
- promptTokens: response.usage.prompt_tokens,
1226
- completionTokens: response.usage.completion_tokens
1178
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1179
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1180
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1227
1181
  },
1228
1182
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1229
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1230
- rawCall: { rawPrompt, rawSettings },
1231
- rawResponse: { headers: responseHeaders, body: rawResponse },
1232
- response: getResponseMetadata(response),
1233
- warnings,
1234
- request: { body: JSON.stringify(args) }
1183
+ request: { body: args },
1184
+ response: {
1185
+ ...getResponseMetadata(response),
1186
+ headers: responseHeaders,
1187
+ body: rawResponse
1188
+ },
1189
+ providerMetadata,
1190
+ warnings
1235
1191
  };
1236
1192
  }
1237
1193
  async doStream(options) {
1238
- const { args, warnings } = this.getArgs(options);
1194
+ const { args, warnings } = await this.getArgs(options);
1239
1195
  const body = {
1240
1196
  ...args,
1241
1197
  stream: true,
1242
- // only include stream_options when in strict compatibility mode:
1243
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1198
+ stream_options: {
1199
+ include_usage: true
1200
+ }
1244
1201
  };
1245
1202
  const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1246
1203
  url: this.config.url({
@@ -1256,17 +1213,20 @@ var OpenAICompletionLanguageModel = class {
1256
1213
  abortSignal: options.abortSignal,
1257
1214
  fetch: this.config.fetch
1258
1215
  });
1259
- const { prompt: rawPrompt, ...rawSettings } = args;
1260
1216
  let finishReason = "unknown";
1261
- let usage = {
1262
- promptTokens: Number.NaN,
1263
- completionTokens: Number.NaN
1217
+ const providerMetadata = { openai: {} };
1218
+ const usage = {
1219
+ inputTokens: void 0,
1220
+ outputTokens: void 0,
1221
+ totalTokens: void 0
1264
1222
  };
1265
- let logprobs;
1266
1223
  let isFirstChunk = true;
1267
1224
  return {
1268
1225
  stream: response.pipeThrough(
1269
1226
  new TransformStream({
1227
+ start(controller) {
1228
+ controller.enqueue({ type: "stream-start", warnings });
1229
+ },
1270
1230
  transform(chunk, controller) {
1271
1231
  if (!chunk.success) {
1272
1232
  finishReason = "error";
@@ -1287,87 +1247,79 @@ var OpenAICompletionLanguageModel = class {
1287
1247
  });
1288
1248
  }
1289
1249
  if (value.usage != null) {
1290
- usage = {
1291
- promptTokens: value.usage.prompt_tokens,
1292
- completionTokens: value.usage.completion_tokens
1293
- };
1250
+ usage.inputTokens = value.usage.prompt_tokens;
1251
+ usage.outputTokens = value.usage.completion_tokens;
1252
+ usage.totalTokens = value.usage.total_tokens;
1294
1253
  }
1295
1254
  const choice = value.choices[0];
1296
1255
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1297
1256
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1298
1257
  }
1258
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1259
+ providerMetadata.openai.logprobs = choice.logprobs;
1260
+ }
1299
1261
  if ((choice == null ? void 0 : choice.text) != null) {
1300
1262
  controller.enqueue({
1301
- type: "text-delta",
1302
- textDelta: choice.text
1263
+ type: "text",
1264
+ text: choice.text
1303
1265
  });
1304
1266
  }
1305
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1306
- choice == null ? void 0 : choice.logprobs
1307
- );
1308
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1309
- if (logprobs === void 0) logprobs = [];
1310
- logprobs.push(...mappedLogprobs);
1311
- }
1312
1267
  },
1313
1268
  flush(controller) {
1314
1269
  controller.enqueue({
1315
1270
  type: "finish",
1316
1271
  finishReason,
1317
- logprobs,
1272
+ providerMetadata,
1318
1273
  usage
1319
1274
  });
1320
1275
  }
1321
1276
  })
1322
1277
  ),
1323
- rawCall: { rawPrompt, rawSettings },
1324
- rawResponse: { headers: responseHeaders },
1325
- warnings,
1326
- request: { body: JSON.stringify(body) }
1278
+ request: { body },
1279
+ response: { headers: responseHeaders }
1327
1280
  };
1328
1281
  }
1329
1282
  };
1330
- var openaiCompletionResponseSchema = import_zod3.z.object({
1331
- id: import_zod3.z.string().nullish(),
1332
- created: import_zod3.z.number().nullish(),
1333
- model: import_zod3.z.string().nullish(),
1334
- choices: import_zod3.z.array(
1335
- import_zod3.z.object({
1336
- text: import_zod3.z.string(),
1337
- finish_reason: import_zod3.z.string(),
1338
- logprobs: import_zod3.z.object({
1339
- tokens: import_zod3.z.array(import_zod3.z.string()),
1340
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1341
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1283
+ var usageSchema = import_zod5.z.object({
1284
+ prompt_tokens: import_zod5.z.number(),
1285
+ completion_tokens: import_zod5.z.number(),
1286
+ total_tokens: import_zod5.z.number()
1287
+ });
1288
+ var openaiCompletionResponseSchema = import_zod5.z.object({
1289
+ id: import_zod5.z.string().nullish(),
1290
+ created: import_zod5.z.number().nullish(),
1291
+ model: import_zod5.z.string().nullish(),
1292
+ choices: import_zod5.z.array(
1293
+ import_zod5.z.object({
1294
+ text: import_zod5.z.string(),
1295
+ finish_reason: import_zod5.z.string(),
1296
+ logprobs: import_zod5.z.object({
1297
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1298
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1299
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1342
1300
  }).nullish()
1343
1301
  })
1344
1302
  ),
1345
- usage: import_zod3.z.object({
1346
- prompt_tokens: import_zod3.z.number(),
1347
- completion_tokens: import_zod3.z.number()
1348
- })
1303
+ usage: usageSchema.nullish()
1349
1304
  });
1350
- var openaiCompletionChunkSchema = import_zod3.z.union([
1351
- import_zod3.z.object({
1352
- id: import_zod3.z.string().nullish(),
1353
- created: import_zod3.z.number().nullish(),
1354
- model: import_zod3.z.string().nullish(),
1355
- choices: import_zod3.z.array(
1356
- import_zod3.z.object({
1357
- text: import_zod3.z.string(),
1358
- finish_reason: import_zod3.z.string().nullish(),
1359
- index: import_zod3.z.number(),
1360
- logprobs: import_zod3.z.object({
1361
- tokens: import_zod3.z.array(import_zod3.z.string()),
1362
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1363
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1305
+ var openaiCompletionChunkSchema = import_zod5.z.union([
1306
+ import_zod5.z.object({
1307
+ id: import_zod5.z.string().nullish(),
1308
+ created: import_zod5.z.number().nullish(),
1309
+ model: import_zod5.z.string().nullish(),
1310
+ choices: import_zod5.z.array(
1311
+ import_zod5.z.object({
1312
+ text: import_zod5.z.string(),
1313
+ finish_reason: import_zod5.z.string().nullish(),
1314
+ index: import_zod5.z.number(),
1315
+ logprobs: import_zod5.z.object({
1316
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1317
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1318
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1364
1319
  }).nullish()
1365
1320
  })
1366
1321
  ),
1367
- usage: import_zod3.z.object({
1368
- prompt_tokens: import_zod3.z.number(),
1369
- completion_tokens: import_zod3.z.number()
1370
- }).nullish()
1322
+ usage: usageSchema.nullish()
1371
1323
  }),
1372
1324
  openaiErrorDataSchema
1373
1325
  ]);
@@ -1375,30 +1327,42 @@ var openaiCompletionChunkSchema = import_zod3.z.union([
1375
1327
  // src/openai-embedding-model.ts
1376
1328
  var import_provider5 = require("@ai-sdk/provider");
1377
1329
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1378
- var import_zod4 = require("zod");
1330
+ var import_zod7 = require("zod");
1331
+
1332
+ // src/openai-embedding-options.ts
1333
+ var import_zod6 = require("zod");
1334
+ var openaiEmbeddingProviderOptions = import_zod6.z.object({
1335
+ /**
1336
+ The number of dimensions the resulting output embeddings should have.
1337
+ Only supported in text-embedding-3 and later models.
1338
+ */
1339
+ dimensions: import_zod6.z.number().optional(),
1340
+ /**
1341
+ A unique identifier representing your end-user, which can help OpenAI to
1342
+ monitor and detect abuse. Learn more.
1343
+ */
1344
+ user: import_zod6.z.string().optional()
1345
+ });
1346
+
1347
+ // src/openai-embedding-model.ts
1379
1348
  var OpenAIEmbeddingModel = class {
1380
- constructor(modelId, settings, config) {
1381
- this.specificationVersion = "v1";
1349
+ constructor(modelId, config) {
1350
+ this.specificationVersion = "v2";
1351
+ this.maxEmbeddingsPerCall = 2048;
1352
+ this.supportsParallelCalls = true;
1382
1353
  this.modelId = modelId;
1383
- this.settings = settings;
1384
1354
  this.config = config;
1385
1355
  }
1386
1356
  get provider() {
1387
1357
  return this.config.provider;
1388
1358
  }
1389
- get maxEmbeddingsPerCall() {
1390
- var _a;
1391
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1392
- }
1393
- get supportsParallelCalls() {
1394
- var _a;
1395
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1396
- }
1397
1359
  async doEmbed({
1398
1360
  values,
1399
1361
  headers,
1400
- abortSignal
1362
+ abortSignal,
1363
+ providerOptions
1401
1364
  }) {
1365
+ var _a;
1402
1366
  if (values.length > this.maxEmbeddingsPerCall) {
1403
1367
  throw new import_provider5.TooManyEmbeddingValuesForCallError({
1404
1368
  provider: this.provider,
@@ -1407,7 +1371,16 @@ var OpenAIEmbeddingModel = class {
1407
1371
  values
1408
1372
  });
1409
1373
  }
1410
- const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1374
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
1375
+ provider: "openai",
1376
+ providerOptions,
1377
+ schema: openaiEmbeddingProviderOptions
1378
+ })) != null ? _a : {};
1379
+ const {
1380
+ responseHeaders,
1381
+ value: response,
1382
+ rawValue
1383
+ } = await (0, import_provider_utils5.postJsonToApi)({
1411
1384
  url: this.config.url({
1412
1385
  path: "/embeddings",
1413
1386
  modelId: this.modelId
@@ -1417,8 +1390,8 @@ var OpenAIEmbeddingModel = class {
1417
1390
  model: this.modelId,
1418
1391
  input: values,
1419
1392
  encoding_format: "float",
1420
- dimensions: this.settings.dimensions,
1421
- user: this.settings.user
1393
+ dimensions: openaiOptions.dimensions,
1394
+ user: openaiOptions.user
1422
1395
  },
1423
1396
  failedResponseHandler: openaiFailedResponseHandler,
1424
1397
  successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
@@ -1430,36 +1403,37 @@ var OpenAIEmbeddingModel = class {
1430
1403
  return {
1431
1404
  embeddings: response.data.map((item) => item.embedding),
1432
1405
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1433
- rawResponse: { headers: responseHeaders }
1406
+ response: { headers: responseHeaders, body: rawValue }
1434
1407
  };
1435
1408
  }
1436
1409
  };
1437
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1438
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1439
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1410
+ var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
1411
+ data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
1412
+ usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
1440
1413
  });
1441
1414
 
1442
1415
  // src/openai-image-model.ts
1443
1416
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1444
- var import_zod5 = require("zod");
1417
+ var import_zod8 = require("zod");
1445
1418
 
1446
1419
  // src/openai-image-settings.ts
1447
1420
  var modelMaxImagesPerCall = {
1448
1421
  "dall-e-3": 1,
1449
- "dall-e-2": 10
1422
+ "dall-e-2": 10,
1423
+ "gpt-image-1": 10
1450
1424
  };
1425
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1451
1426
 
1452
1427
  // src/openai-image-model.ts
1453
1428
  var OpenAIImageModel = class {
1454
- constructor(modelId, settings, config) {
1429
+ constructor(modelId, config) {
1455
1430
  this.modelId = modelId;
1456
- this.settings = settings;
1457
1431
  this.config = config;
1458
- this.specificationVersion = "v1";
1432
+ this.specificationVersion = "v2";
1459
1433
  }
1460
1434
  get maxImagesPerCall() {
1461
- var _a, _b;
1462
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1435
+ var _a;
1436
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1463
1437
  }
1464
1438
  get provider() {
1465
1439
  return this.config.provider;
@@ -1499,7 +1473,7 @@ var OpenAIImageModel = class {
1499
1473
  n,
1500
1474
  size,
1501
1475
  ...(_d = providerOptions.openai) != null ? _d : {},
1502
- response_format: "b64_json"
1476
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1503
1477
  },
1504
1478
  failedResponseHandler: openaiFailedResponseHandler,
1505
1479
  successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
@@ -1515,17 +1489,28 @@ var OpenAIImageModel = class {
1515
1489
  timestamp: currentDate,
1516
1490
  modelId: this.modelId,
1517
1491
  headers: responseHeaders
1492
+ },
1493
+ providerMetadata: {
1494
+ openai: {
1495
+ images: response.data.map(
1496
+ (item) => item.revised_prompt ? {
1497
+ revisedPrompt: item.revised_prompt
1498
+ } : null
1499
+ )
1500
+ }
1518
1501
  }
1519
1502
  };
1520
1503
  }
1521
1504
  };
1522
- var openaiImageResponseSchema = import_zod5.z.object({
1523
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1505
+ var openaiImageResponseSchema = import_zod8.z.object({
1506
+ data: import_zod8.z.array(
1507
+ import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
1508
+ )
1524
1509
  });
1525
1510
 
1526
1511
  // src/openai-tools.ts
1527
- var import_zod6 = require("zod");
1528
- var WebSearchPreviewParameters = import_zod6.z.object({});
1512
+ var import_zod9 = require("zod");
1513
+ var WebSearchPreviewParameters = import_zod9.z.object({});
1529
1514
  function webSearchPreviewTool({
1530
1515
  searchContextSize,
1531
1516
  userLocation
@@ -1544,13 +1529,201 @@ var openaiTools = {
1544
1529
  webSearchPreview: webSearchPreviewTool
1545
1530
  };
1546
1531
 
1532
+ // src/openai-transcription-model.ts
1533
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1534
+ var import_zod11 = require("zod");
1535
+
1536
+ // src/openai-transcription-options.ts
1537
+ var import_zod10 = require("zod");
1538
+ var openAITranscriptionProviderOptions = import_zod10.z.object({
1539
+ /**
1540
+ * Additional information to include in the transcription response.
1541
+ */
1542
+ include: import_zod10.z.array(import_zod10.z.string()).optional(),
1543
+ /**
1544
+ * The language of the input audio in ISO-639-1 format.
1545
+ */
1546
+ language: import_zod10.z.string().optional(),
1547
+ /**
1548
+ * An optional text to guide the model's style or continue a previous audio segment.
1549
+ */
1550
+ prompt: import_zod10.z.string().optional(),
1551
+ /**
1552
+ * The sampling temperature, between 0 and 1.
1553
+ * @default 0
1554
+ */
1555
+ temperature: import_zod10.z.number().min(0).max(1).default(0).optional(),
1556
+ /**
1557
+ * The timestamp granularities to populate for this transcription.
1558
+ * @default ['segment']
1559
+ */
1560
+ timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).optional()
1561
+ });
1562
+
1563
+ // src/openai-transcription-model.ts
1564
+ var languageMap = {
1565
+ afrikaans: "af",
1566
+ arabic: "ar",
1567
+ armenian: "hy",
1568
+ azerbaijani: "az",
1569
+ belarusian: "be",
1570
+ bosnian: "bs",
1571
+ bulgarian: "bg",
1572
+ catalan: "ca",
1573
+ chinese: "zh",
1574
+ croatian: "hr",
1575
+ czech: "cs",
1576
+ danish: "da",
1577
+ dutch: "nl",
1578
+ english: "en",
1579
+ estonian: "et",
1580
+ finnish: "fi",
1581
+ french: "fr",
1582
+ galician: "gl",
1583
+ german: "de",
1584
+ greek: "el",
1585
+ hebrew: "he",
1586
+ hindi: "hi",
1587
+ hungarian: "hu",
1588
+ icelandic: "is",
1589
+ indonesian: "id",
1590
+ italian: "it",
1591
+ japanese: "ja",
1592
+ kannada: "kn",
1593
+ kazakh: "kk",
1594
+ korean: "ko",
1595
+ latvian: "lv",
1596
+ lithuanian: "lt",
1597
+ macedonian: "mk",
1598
+ malay: "ms",
1599
+ marathi: "mr",
1600
+ maori: "mi",
1601
+ nepali: "ne",
1602
+ norwegian: "no",
1603
+ persian: "fa",
1604
+ polish: "pl",
1605
+ portuguese: "pt",
1606
+ romanian: "ro",
1607
+ russian: "ru",
1608
+ serbian: "sr",
1609
+ slovak: "sk",
1610
+ slovenian: "sl",
1611
+ spanish: "es",
1612
+ swahili: "sw",
1613
+ swedish: "sv",
1614
+ tagalog: "tl",
1615
+ tamil: "ta",
1616
+ thai: "th",
1617
+ turkish: "tr",
1618
+ ukrainian: "uk",
1619
+ urdu: "ur",
1620
+ vietnamese: "vi",
1621
+ welsh: "cy"
1622
+ };
1623
+ var OpenAITranscriptionModel = class {
1624
+ constructor(modelId, config) {
1625
+ this.modelId = modelId;
1626
+ this.config = config;
1627
+ this.specificationVersion = "v1";
1628
+ }
1629
+ get provider() {
1630
+ return this.config.provider;
1631
+ }
1632
+ async getArgs({
1633
+ audio,
1634
+ mediaType,
1635
+ providerOptions
1636
+ }) {
1637
+ const warnings = [];
1638
+ const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
1639
+ provider: "openai",
1640
+ providerOptions,
1641
+ schema: openAITranscriptionProviderOptions
1642
+ });
1643
+ const formData = new FormData();
1644
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1645
+ formData.append("model", this.modelId);
1646
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1647
+ if (openAIOptions) {
1648
+ const transcriptionModelOptions = {
1649
+ include: openAIOptions.include,
1650
+ language: openAIOptions.language,
1651
+ prompt: openAIOptions.prompt,
1652
+ temperature: openAIOptions.temperature,
1653
+ timestamp_granularities: openAIOptions.timestampGranularities
1654
+ };
1655
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1656
+ if (value != null) {
1657
+ formData.append(key, String(value));
1658
+ }
1659
+ }
1660
+ }
1661
+ return {
1662
+ formData,
1663
+ warnings
1664
+ };
1665
+ }
1666
+ async doGenerate(options) {
1667
+ var _a, _b, _c, _d, _e, _f;
1668
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1669
+ const { formData, warnings } = await this.getArgs(options);
1670
+ const {
1671
+ value: response,
1672
+ responseHeaders,
1673
+ rawValue: rawResponse
1674
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1675
+ url: this.config.url({
1676
+ path: "/audio/transcriptions",
1677
+ modelId: this.modelId
1678
+ }),
1679
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1680
+ formData,
1681
+ failedResponseHandler: openaiFailedResponseHandler,
1682
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1683
+ openaiTranscriptionResponseSchema
1684
+ ),
1685
+ abortSignal: options.abortSignal,
1686
+ fetch: this.config.fetch
1687
+ });
1688
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1689
+ return {
1690
+ text: response.text,
1691
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1692
+ text: word.word,
1693
+ startSecond: word.start,
1694
+ endSecond: word.end
1695
+ }))) != null ? _e : [],
1696
+ language,
1697
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1698
+ warnings,
1699
+ response: {
1700
+ timestamp: currentDate,
1701
+ modelId: this.modelId,
1702
+ headers: responseHeaders,
1703
+ body: rawResponse
1704
+ }
1705
+ };
1706
+ }
1707
+ };
1708
+ var openaiTranscriptionResponseSchema = import_zod11.z.object({
1709
+ text: import_zod11.z.string(),
1710
+ language: import_zod11.z.string().nullish(),
1711
+ duration: import_zod11.z.number().nullish(),
1712
+ words: import_zod11.z.array(
1713
+ import_zod11.z.object({
1714
+ word: import_zod11.z.string(),
1715
+ start: import_zod11.z.number(),
1716
+ end: import_zod11.z.number()
1717
+ })
1718
+ ).nullish()
1719
+ });
1720
+
1547
1721
  // src/responses/openai-responses-language-model.ts
1548
1722
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1549
- var import_zod7 = require("zod");
1723
+ var import_zod12 = require("zod");
1550
1724
 
1551
1725
  // src/responses/convert-to-openai-responses-messages.ts
1552
1726
  var import_provider6 = require("@ai-sdk/provider");
1553
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1554
1727
  function convertToOpenAIResponsesMessages({
1555
1728
  prompt,
1556
1729
  systemMessageMode
@@ -1589,38 +1762,35 @@ function convertToOpenAIResponsesMessages({
1589
1762
  messages.push({
1590
1763
  role: "user",
1591
1764
  content: content.map((part, index) => {
1592
- var _a, _b, _c, _d;
1765
+ var _a, _b, _c;
1593
1766
  switch (part.type) {
1594
1767
  case "text": {
1595
1768
  return { type: "input_text", text: part.text };
1596
1769
  }
1597
- case "image": {
1598
- return {
1599
- type: "input_image",
1600
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils7.convertUint8ArrayToBase64)(part.image)}`,
1601
- // OpenAI specific extension: image detail
1602
- detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1603
- };
1604
- }
1605
1770
  case "file": {
1606
- if (part.data instanceof URL) {
1607
- throw new import_provider6.UnsupportedFunctionalityError({
1608
- functionality: "File URLs in user messages"
1609
- });
1610
- }
1611
- switch (part.mimeType) {
1612
- case "application/pdf": {
1613
- return {
1614
- type: "input_file",
1615
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1616
- file_data: `data:application/pdf;base64,${part.data}`
1617
- };
1618
- }
1619
- default: {
1771
+ if (part.mediaType.startsWith("image/")) {
1772
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1773
+ return {
1774
+ type: "input_image",
1775
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1776
+ // OpenAI specific extension: image detail
1777
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1778
+ };
1779
+ } else if (part.mediaType === "application/pdf") {
1780
+ if (part.data instanceof URL) {
1620
1781
  throw new import_provider6.UnsupportedFunctionalityError({
1621
- functionality: "Only PDF files are supported in user messages"
1782
+ functionality: "PDF file parts with URLs"
1622
1783
  });
1623
1784
  }
1785
+ return {
1786
+ type: "input_file",
1787
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1788
+ file_data: `data:application/pdf;base64,${part.data}`
1789
+ };
1790
+ } else {
1791
+ throw new import_provider6.UnsupportedFunctionalityError({
1792
+ functionality: `file part media type ${part.mediaType}`
1793
+ });
1624
1794
  }
1625
1795
  }
1626
1796
  }
@@ -1749,7 +1919,7 @@ function prepareResponsesTools({
1749
1919
  default: {
1750
1920
  const _exhaustiveCheck = type;
1751
1921
  throw new import_provider7.UnsupportedFunctionalityError({
1752
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1922
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1753
1923
  });
1754
1924
  }
1755
1925
  }
@@ -1759,15 +1929,17 @@ function prepareResponsesTools({
1759
1929
  var OpenAIResponsesLanguageModel = class {
1760
1930
  constructor(modelId, config) {
1761
1931
  this.specificationVersion = "v2";
1762
- this.defaultObjectGenerationMode = "json";
1932
+ this.supportedUrls = {
1933
+ "image/*": [/^https?:\/\/.*$/]
1934
+ };
1763
1935
  this.modelId = modelId;
1764
1936
  this.config = config;
1765
1937
  }
1766
1938
  get provider() {
1767
1939
  return this.config.provider;
1768
1940
  }
1769
- getArgs({
1770
- maxTokens,
1941
+ async getArgs({
1942
+ maxOutputTokens,
1771
1943
  temperature,
1772
1944
  stopSequences,
1773
1945
  topP,
@@ -1810,7 +1982,7 @@ var OpenAIResponsesLanguageModel = class {
1810
1982
  systemMessageMode: modelConfig.systemMessageMode
1811
1983
  });
1812
1984
  warnings.push(...messageWarnings);
1813
- const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1985
+ const openaiOptions = await (0, import_provider_utils8.parseProviderOptions)({
1814
1986
  provider: "openai",
1815
1987
  providerOptions,
1816
1988
  schema: openaiResponsesProviderOptionsSchema
@@ -1821,7 +1993,7 @@ var OpenAIResponsesLanguageModel = class {
1821
1993
  input: messages,
1822
1994
  temperature,
1823
1995
  top_p: topP,
1824
- max_output_tokens: maxTokens,
1996
+ max_output_tokens: maxOutputTokens,
1825
1997
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1826
1998
  text: {
1827
1999
  format: responseFormat.schema != null ? {
@@ -1841,8 +2013,15 @@ var OpenAIResponsesLanguageModel = class {
1841
2013
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1842
2014
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1843
2015
  // model-specific settings:
1844
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1845
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2016
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2017
+ reasoning: {
2018
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2019
+ effort: openaiOptions.reasoningEffort
2020
+ },
2021
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2022
+ summary: openaiOptions.reasoningSummary
2023
+ }
2024
+ }
1846
2025
  },
1847
2026
  ...modelConfig.requiredAutoTruncation && {
1848
2027
  truncation: "auto"
@@ -1885,8 +2064,8 @@ var OpenAIResponsesLanguageModel = class {
1885
2064
  };
1886
2065
  }
1887
2066
  async doGenerate(options) {
1888
- var _a, _b, _c, _d, _e;
1889
- const { args: body, warnings } = this.getArgs(options);
2067
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2068
+ const { args: body, warnings } = await this.getArgs(options);
1890
2069
  const {
1891
2070
  responseHeaders,
1892
2071
  value: response,
@@ -1900,112 +2079,132 @@ var OpenAIResponsesLanguageModel = class {
1900
2079
  body,
1901
2080
  failedResponseHandler: openaiFailedResponseHandler,
1902
2081
  successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1903
- import_zod7.z.object({
1904
- id: import_zod7.z.string(),
1905
- created_at: import_zod7.z.number(),
1906
- model: import_zod7.z.string(),
1907
- output: import_zod7.z.array(
1908
- import_zod7.z.discriminatedUnion("type", [
1909
- import_zod7.z.object({
1910
- type: import_zod7.z.literal("message"),
1911
- role: import_zod7.z.literal("assistant"),
1912
- content: import_zod7.z.array(
1913
- import_zod7.z.object({
1914
- type: import_zod7.z.literal("output_text"),
1915
- text: import_zod7.z.string(),
1916
- annotations: import_zod7.z.array(
1917
- import_zod7.z.object({
1918
- type: import_zod7.z.literal("url_citation"),
1919
- start_index: import_zod7.z.number(),
1920
- end_index: import_zod7.z.number(),
1921
- url: import_zod7.z.string(),
1922
- title: import_zod7.z.string()
2082
+ import_zod12.z.object({
2083
+ id: import_zod12.z.string(),
2084
+ created_at: import_zod12.z.number(),
2085
+ model: import_zod12.z.string(),
2086
+ output: import_zod12.z.array(
2087
+ import_zod12.z.discriminatedUnion("type", [
2088
+ import_zod12.z.object({
2089
+ type: import_zod12.z.literal("message"),
2090
+ role: import_zod12.z.literal("assistant"),
2091
+ content: import_zod12.z.array(
2092
+ import_zod12.z.object({
2093
+ type: import_zod12.z.literal("output_text"),
2094
+ text: import_zod12.z.string(),
2095
+ annotations: import_zod12.z.array(
2096
+ import_zod12.z.object({
2097
+ type: import_zod12.z.literal("url_citation"),
2098
+ start_index: import_zod12.z.number(),
2099
+ end_index: import_zod12.z.number(),
2100
+ url: import_zod12.z.string(),
2101
+ title: import_zod12.z.string()
1923
2102
  })
1924
2103
  )
1925
2104
  })
1926
2105
  )
1927
2106
  }),
1928
- import_zod7.z.object({
1929
- type: import_zod7.z.literal("function_call"),
1930
- call_id: import_zod7.z.string(),
1931
- name: import_zod7.z.string(),
1932
- arguments: import_zod7.z.string()
2107
+ import_zod12.z.object({
2108
+ type: import_zod12.z.literal("function_call"),
2109
+ call_id: import_zod12.z.string(),
2110
+ name: import_zod12.z.string(),
2111
+ arguments: import_zod12.z.string()
1933
2112
  }),
1934
- import_zod7.z.object({
1935
- type: import_zod7.z.literal("web_search_call")
2113
+ import_zod12.z.object({
2114
+ type: import_zod12.z.literal("web_search_call")
1936
2115
  }),
1937
- import_zod7.z.object({
1938
- type: import_zod7.z.literal("computer_call")
2116
+ import_zod12.z.object({
2117
+ type: import_zod12.z.literal("computer_call")
1939
2118
  }),
1940
- import_zod7.z.object({
1941
- type: import_zod7.z.literal("reasoning")
2119
+ import_zod12.z.object({
2120
+ type: import_zod12.z.literal("reasoning"),
2121
+ summary: import_zod12.z.array(
2122
+ import_zod12.z.object({
2123
+ type: import_zod12.z.literal("summary_text"),
2124
+ text: import_zod12.z.string()
2125
+ })
2126
+ )
1942
2127
  })
1943
2128
  ])
1944
2129
  ),
1945
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullable(),
1946
- usage: usageSchema
2130
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2131
+ usage: usageSchema2
1947
2132
  })
1948
2133
  ),
1949
2134
  abortSignal: options.abortSignal,
1950
2135
  fetch: this.config.fetch
1951
2136
  });
1952
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
1953
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
1954
- toolCallType: "function",
1955
- toolCallId: output.call_id,
1956
- toolName: output.name,
1957
- args: output.arguments
1958
- }));
2137
+ const content = [];
2138
+ for (const part of response.output) {
2139
+ switch (part.type) {
2140
+ case "reasoning": {
2141
+ content.push({
2142
+ type: "reasoning",
2143
+ text: part.summary.map((summary) => summary.text).join()
2144
+ });
2145
+ break;
2146
+ }
2147
+ case "message": {
2148
+ for (const contentPart of part.content) {
2149
+ content.push({
2150
+ type: "text",
2151
+ text: contentPart.text
2152
+ });
2153
+ for (const annotation of contentPart.annotations) {
2154
+ content.push({
2155
+ type: "source",
2156
+ sourceType: "url",
2157
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils8.generateId)(),
2158
+ url: annotation.url,
2159
+ title: annotation.title
2160
+ });
2161
+ }
2162
+ }
2163
+ break;
2164
+ }
2165
+ case "function_call": {
2166
+ content.push({
2167
+ type: "tool-call",
2168
+ toolCallType: "function",
2169
+ toolCallId: part.call_id,
2170
+ toolName: part.name,
2171
+ args: part.arguments
2172
+ });
2173
+ break;
2174
+ }
2175
+ }
2176
+ }
1959
2177
  return {
1960
- text: outputTextElements.map((content) => content.text).join("\n"),
1961
- sources: outputTextElements.flatMap(
1962
- (content) => content.annotations.map((annotation) => {
1963
- var _a2, _b2, _c2;
1964
- return {
1965
- sourceType: "url",
1966
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
1967
- url: annotation.url,
1968
- title: annotation.title
1969
- };
1970
- })
1971
- ),
2178
+ content,
1972
2179
  finishReason: mapOpenAIResponseFinishReason({
1973
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
1974
- hasToolCalls: toolCalls.length > 0
2180
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2181
+ hasToolCalls: content.some((part) => part.type === "tool-call")
1975
2182
  }),
1976
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
1977
2183
  usage: {
1978
- promptTokens: response.usage.input_tokens,
1979
- completionTokens: response.usage.output_tokens
1980
- },
1981
- rawCall: {
1982
- rawPrompt: void 0,
1983
- rawSettings: {}
1984
- },
1985
- rawResponse: {
1986
- headers: responseHeaders,
1987
- body: rawResponse
1988
- },
1989
- request: {
1990
- body: JSON.stringify(body)
2184
+ inputTokens: response.usage.input_tokens,
2185
+ outputTokens: response.usage.output_tokens,
2186
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2187
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2188
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
1991
2189
  },
2190
+ request: { body },
1992
2191
  response: {
1993
2192
  id: response.id,
1994
2193
  timestamp: new Date(response.created_at * 1e3),
1995
- modelId: response.model
2194
+ modelId: response.model,
2195
+ headers: responseHeaders,
2196
+ body: rawResponse
1996
2197
  },
1997
2198
  providerMetadata: {
1998
2199
  openai: {
1999
- responseId: response.id,
2000
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2001
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2200
+ responseId: response.id
2002
2201
  }
2003
2202
  },
2004
2203
  warnings
2005
2204
  };
2006
2205
  }
2007
2206
  async doStream(options) {
2008
- const { args: body, warnings } = this.getArgs(options);
2207
+ const { args: body, warnings } = await this.getArgs(options);
2009
2208
  const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2010
2209
  url: this.config.url({
2011
2210
  path: "/responses",
@@ -2025,16 +2224,20 @@ var OpenAIResponsesLanguageModel = class {
2025
2224
  });
2026
2225
  const self = this;
2027
2226
  let finishReason = "unknown";
2028
- let promptTokens = NaN;
2029
- let completionTokens = NaN;
2030
- let cachedPromptTokens = null;
2031
- let reasoningTokens = null;
2227
+ const usage = {
2228
+ inputTokens: void 0,
2229
+ outputTokens: void 0,
2230
+ totalTokens: void 0
2231
+ };
2032
2232
  let responseId = null;
2033
2233
  const ongoingToolCalls = {};
2034
2234
  let hasToolCalls = false;
2035
2235
  return {
2036
2236
  stream: response.pipeThrough(
2037
2237
  new TransformStream({
2238
+ start(controller) {
2239
+ controller.enqueue({ type: "stream-start", warnings });
2240
+ },
2038
2241
  transform(chunk, controller) {
2039
2242
  var _a, _b, _c, _d, _e, _f, _g, _h;
2040
2243
  if (!chunk.success) {
@@ -2078,8 +2281,13 @@ var OpenAIResponsesLanguageModel = class {
2078
2281
  });
2079
2282
  } else if (isTextDeltaChunk(value)) {
2080
2283
  controller.enqueue({
2081
- type: "text-delta",
2082
- textDelta: value.delta
2284
+ type: "text",
2285
+ text: value.delta
2286
+ });
2287
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2288
+ controller.enqueue({
2289
+ type: "reasoning",
2290
+ text: value.delta
2083
2291
  });
2084
2292
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2085
2293
  ongoingToolCalls[value.output_index] = void 0;
@@ -2096,19 +2304,18 @@ var OpenAIResponsesLanguageModel = class {
2096
2304
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2097
2305
  hasToolCalls
2098
2306
  });
2099
- promptTokens = value.response.usage.input_tokens;
2100
- completionTokens = value.response.usage.output_tokens;
2101
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2102
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2307
+ usage.inputTokens = value.response.usage.input_tokens;
2308
+ usage.outputTokens = value.response.usage.output_tokens;
2309
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2310
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2311
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2103
2312
  } else if (isResponseAnnotationAddedChunk(value)) {
2104
2313
  controller.enqueue({
2105
2314
  type: "source",
2106
- source: {
2107
- sourceType: "url",
2108
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2109
- url: value.annotation.url,
2110
- title: value.annotation.title
2111
- }
2315
+ sourceType: "url",
2316
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2317
+ url: value.annotation.url,
2318
+ title: value.annotation.title
2112
2319
  });
2113
2320
  }
2114
2321
  },
@@ -2116,103 +2323,101 @@ var OpenAIResponsesLanguageModel = class {
2116
2323
  controller.enqueue({
2117
2324
  type: "finish",
2118
2325
  finishReason,
2119
- usage: { promptTokens, completionTokens },
2120
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2121
- providerMetadata: {
2122
- openai: {
2123
- responseId,
2124
- cachedPromptTokens,
2125
- reasoningTokens
2126
- }
2326
+ usage,
2327
+ providerMetadata: {
2328
+ openai: {
2329
+ responseId
2127
2330
  }
2128
2331
  }
2129
2332
  });
2130
2333
  }
2131
2334
  })
2132
2335
  ),
2133
- rawCall: {
2134
- rawPrompt: void 0,
2135
- rawSettings: {}
2136
- },
2137
- rawResponse: { headers: responseHeaders },
2138
- request: { body: JSON.stringify(body) },
2139
- warnings
2336
+ request: { body },
2337
+ response: { headers: responseHeaders }
2140
2338
  };
2141
2339
  }
2142
2340
  };
2143
- var usageSchema = import_zod7.z.object({
2144
- input_tokens: import_zod7.z.number(),
2145
- input_tokens_details: import_zod7.z.object({ cached_tokens: import_zod7.z.number().nullish() }).nullish(),
2146
- output_tokens: import_zod7.z.number(),
2147
- output_tokens_details: import_zod7.z.object({ reasoning_tokens: import_zod7.z.number().nullish() }).nullish()
2341
+ var usageSchema2 = import_zod12.z.object({
2342
+ input_tokens: import_zod12.z.number(),
2343
+ input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2344
+ output_tokens: import_zod12.z.number(),
2345
+ output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
2148
2346
  });
2149
- var textDeltaChunkSchema = import_zod7.z.object({
2150
- type: import_zod7.z.literal("response.output_text.delta"),
2151
- delta: import_zod7.z.string()
2347
+ var textDeltaChunkSchema = import_zod12.z.object({
2348
+ type: import_zod12.z.literal("response.output_text.delta"),
2349
+ delta: import_zod12.z.string()
2152
2350
  });
2153
- var responseFinishedChunkSchema = import_zod7.z.object({
2154
- type: import_zod7.z.enum(["response.completed", "response.incomplete"]),
2155
- response: import_zod7.z.object({
2156
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullish(),
2157
- usage: usageSchema
2351
+ var responseFinishedChunkSchema = import_zod12.z.object({
2352
+ type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2353
+ response: import_zod12.z.object({
2354
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2355
+ usage: usageSchema2
2158
2356
  })
2159
2357
  });
2160
- var responseCreatedChunkSchema = import_zod7.z.object({
2161
- type: import_zod7.z.literal("response.created"),
2162
- response: import_zod7.z.object({
2163
- id: import_zod7.z.string(),
2164
- created_at: import_zod7.z.number(),
2165
- model: import_zod7.z.string()
2358
+ var responseCreatedChunkSchema = import_zod12.z.object({
2359
+ type: import_zod12.z.literal("response.created"),
2360
+ response: import_zod12.z.object({
2361
+ id: import_zod12.z.string(),
2362
+ created_at: import_zod12.z.number(),
2363
+ model: import_zod12.z.string()
2166
2364
  })
2167
2365
  });
2168
- var responseOutputItemDoneSchema = import_zod7.z.object({
2169
- type: import_zod7.z.literal("response.output_item.done"),
2170
- output_index: import_zod7.z.number(),
2171
- item: import_zod7.z.discriminatedUnion("type", [
2172
- import_zod7.z.object({
2173
- type: import_zod7.z.literal("message")
2366
+ var responseOutputItemDoneSchema = import_zod12.z.object({
2367
+ type: import_zod12.z.literal("response.output_item.done"),
2368
+ output_index: import_zod12.z.number(),
2369
+ item: import_zod12.z.discriminatedUnion("type", [
2370
+ import_zod12.z.object({
2371
+ type: import_zod12.z.literal("message")
2174
2372
  }),
2175
- import_zod7.z.object({
2176
- type: import_zod7.z.literal("function_call"),
2177
- id: import_zod7.z.string(),
2178
- call_id: import_zod7.z.string(),
2179
- name: import_zod7.z.string(),
2180
- arguments: import_zod7.z.string(),
2181
- status: import_zod7.z.literal("completed")
2373
+ import_zod12.z.object({
2374
+ type: import_zod12.z.literal("function_call"),
2375
+ id: import_zod12.z.string(),
2376
+ call_id: import_zod12.z.string(),
2377
+ name: import_zod12.z.string(),
2378
+ arguments: import_zod12.z.string(),
2379
+ status: import_zod12.z.literal("completed")
2182
2380
  })
2183
2381
  ])
2184
2382
  });
2185
- var responseFunctionCallArgumentsDeltaSchema = import_zod7.z.object({
2186
- type: import_zod7.z.literal("response.function_call_arguments.delta"),
2187
- item_id: import_zod7.z.string(),
2188
- output_index: import_zod7.z.number(),
2189
- delta: import_zod7.z.string()
2383
+ var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
2384
+ type: import_zod12.z.literal("response.function_call_arguments.delta"),
2385
+ item_id: import_zod12.z.string(),
2386
+ output_index: import_zod12.z.number(),
2387
+ delta: import_zod12.z.string()
2190
2388
  });
2191
- var responseOutputItemAddedSchema = import_zod7.z.object({
2192
- type: import_zod7.z.literal("response.output_item.added"),
2193
- output_index: import_zod7.z.number(),
2194
- item: import_zod7.z.discriminatedUnion("type", [
2195
- import_zod7.z.object({
2196
- type: import_zod7.z.literal("message")
2389
+ var responseOutputItemAddedSchema = import_zod12.z.object({
2390
+ type: import_zod12.z.literal("response.output_item.added"),
2391
+ output_index: import_zod12.z.number(),
2392
+ item: import_zod12.z.discriminatedUnion("type", [
2393
+ import_zod12.z.object({
2394
+ type: import_zod12.z.literal("message")
2197
2395
  }),
2198
- import_zod7.z.object({
2199
- type: import_zod7.z.literal("function_call"),
2200
- id: import_zod7.z.string(),
2201
- call_id: import_zod7.z.string(),
2202
- name: import_zod7.z.string(),
2203
- arguments: import_zod7.z.string()
2396
+ import_zod12.z.object({
2397
+ type: import_zod12.z.literal("function_call"),
2398
+ id: import_zod12.z.string(),
2399
+ call_id: import_zod12.z.string(),
2400
+ name: import_zod12.z.string(),
2401
+ arguments: import_zod12.z.string()
2204
2402
  })
2205
2403
  ])
2206
2404
  });
2207
- var responseAnnotationAddedSchema = import_zod7.z.object({
2208
- type: import_zod7.z.literal("response.output_text.annotation.added"),
2209
- annotation: import_zod7.z.object({
2210
- type: import_zod7.z.literal("url_citation"),
2211
- url: import_zod7.z.string(),
2212
- title: import_zod7.z.string()
2405
+ var responseAnnotationAddedSchema = import_zod12.z.object({
2406
+ type: import_zod12.z.literal("response.output_text.annotation.added"),
2407
+ annotation: import_zod12.z.object({
2408
+ type: import_zod12.z.literal("url_citation"),
2409
+ url: import_zod12.z.string(),
2410
+ title: import_zod12.z.string()
2213
2411
  })
2214
2412
  });
2215
- var openaiResponsesChunkSchema = import_zod7.z.union([
2413
+ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
2414
+ type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
2415
+ item_id: import_zod12.z.string(),
2416
+ output_index: import_zod12.z.number(),
2417
+ summary_index: import_zod12.z.number(),
2418
+ delta: import_zod12.z.string()
2419
+ });
2420
+ var openaiResponsesChunkSchema = import_zod12.z.union([
2216
2421
  textDeltaChunkSchema,
2217
2422
  responseFinishedChunkSchema,
2218
2423
  responseCreatedChunkSchema,
@@ -2220,7 +2425,8 @@ var openaiResponsesChunkSchema = import_zod7.z.union([
2220
2425
  responseFunctionCallArgumentsDeltaSchema,
2221
2426
  responseOutputItemAddedSchema,
2222
2427
  responseAnnotationAddedSchema,
2223
- import_zod7.z.object({ type: import_zod7.z.string() }).passthrough()
2428
+ responseReasoningSummaryTextDeltaSchema,
2429
+ import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
2224
2430
  // fallback for unknown chunks
2225
2431
  ]);
2226
2432
  function isTextDeltaChunk(chunk) {
@@ -2244,6 +2450,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2244
2450
  function isResponseAnnotationAddedChunk(chunk) {
2245
2451
  return chunk.type === "response.output_text.annotation.added";
2246
2452
  }
2453
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2454
+ return chunk.type === "response.reasoning_summary_text.delta";
2455
+ }
2247
2456
  function getResponsesModelConfig(modelId) {
2248
2457
  if (modelId.startsWith("o")) {
2249
2458
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2265,25 +2474,124 @@ function getResponsesModelConfig(modelId) {
2265
2474
  requiredAutoTruncation: false
2266
2475
  };
2267
2476
  }
2268
- var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2269
- metadata: import_zod7.z.any().nullish(),
2270
- parallelToolCalls: import_zod7.z.boolean().nullish(),
2271
- previousResponseId: import_zod7.z.string().nullish(),
2272
- store: import_zod7.z.boolean().nullish(),
2273
- user: import_zod7.z.string().nullish(),
2274
- reasoningEffort: import_zod7.z.string().nullish(),
2275
- strictSchemas: import_zod7.z.boolean().nullish(),
2276
- instructions: import_zod7.z.string().nullish()
2477
+ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2478
+ metadata: import_zod12.z.any().nullish(),
2479
+ parallelToolCalls: import_zod12.z.boolean().nullish(),
2480
+ previousResponseId: import_zod12.z.string().nullish(),
2481
+ store: import_zod12.z.boolean().nullish(),
2482
+ user: import_zod12.z.string().nullish(),
2483
+ reasoningEffort: import_zod12.z.string().nullish(),
2484
+ strictSchemas: import_zod12.z.boolean().nullish(),
2485
+ instructions: import_zod12.z.string().nullish(),
2486
+ reasoningSummary: import_zod12.z.string().nullish()
2277
2487
  });
2278
2488
 
2489
+ // src/openai-speech-model.ts
2490
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
2491
+ var import_zod13 = require("zod");
2492
+ var OpenAIProviderOptionsSchema = import_zod13.z.object({
2493
+ instructions: import_zod13.z.string().nullish(),
2494
+ speed: import_zod13.z.number().min(0.25).max(4).default(1).nullish()
2495
+ });
2496
+ var OpenAISpeechModel = class {
2497
+ constructor(modelId, config) {
2498
+ this.modelId = modelId;
2499
+ this.config = config;
2500
+ this.specificationVersion = "v1";
2501
+ }
2502
+ get provider() {
2503
+ return this.config.provider;
2504
+ }
2505
+ async getArgs({
2506
+ text,
2507
+ voice = "alloy",
2508
+ outputFormat = "mp3",
2509
+ speed,
2510
+ instructions,
2511
+ providerOptions
2512
+ }) {
2513
+ const warnings = [];
2514
+ const openAIOptions = await (0, import_provider_utils9.parseProviderOptions)({
2515
+ provider: "openai",
2516
+ providerOptions,
2517
+ schema: OpenAIProviderOptionsSchema
2518
+ });
2519
+ const requestBody = {
2520
+ model: this.modelId,
2521
+ input: text,
2522
+ voice,
2523
+ response_format: "mp3",
2524
+ speed,
2525
+ instructions
2526
+ };
2527
+ if (outputFormat) {
2528
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
2529
+ requestBody.response_format = outputFormat;
2530
+ } else {
2531
+ warnings.push({
2532
+ type: "unsupported-setting",
2533
+ setting: "outputFormat",
2534
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
2535
+ });
2536
+ }
2537
+ }
2538
+ if (openAIOptions) {
2539
+ const speechModelOptions = {};
2540
+ for (const key in speechModelOptions) {
2541
+ const value = speechModelOptions[key];
2542
+ if (value !== void 0) {
2543
+ requestBody[key] = value;
2544
+ }
2545
+ }
2546
+ }
2547
+ return {
2548
+ requestBody,
2549
+ warnings
2550
+ };
2551
+ }
2552
+ async doGenerate(options) {
2553
+ var _a, _b, _c;
2554
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2555
+ const { requestBody, warnings } = await this.getArgs(options);
2556
+ const {
2557
+ value: audio,
2558
+ responseHeaders,
2559
+ rawValue: rawResponse
2560
+ } = await (0, import_provider_utils9.postJsonToApi)({
2561
+ url: this.config.url({
2562
+ path: "/audio/speech",
2563
+ modelId: this.modelId
2564
+ }),
2565
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2566
+ body: requestBody,
2567
+ failedResponseHandler: openaiFailedResponseHandler,
2568
+ successfulResponseHandler: (0, import_provider_utils9.createBinaryResponseHandler)(),
2569
+ abortSignal: options.abortSignal,
2570
+ fetch: this.config.fetch
2571
+ });
2572
+ return {
2573
+ audio,
2574
+ warnings,
2575
+ request: {
2576
+ body: JSON.stringify(requestBody)
2577
+ },
2578
+ response: {
2579
+ timestamp: currentDate,
2580
+ modelId: this.modelId,
2581
+ headers: responseHeaders,
2582
+ body: rawResponse
2583
+ }
2584
+ };
2585
+ }
2586
+ };
2587
+
2279
2588
  // src/openai-provider.ts
2280
2589
  function createOpenAI(options = {}) {
2281
- var _a, _b, _c;
2282
- const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2283
- const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2284
- const providerName = (_c = options.name) != null ? _c : "openai";
2590
+ var _a, _b;
2591
+ const baseURL = (_a = (0, import_provider_utils10.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2592
+ const providerName = (_b = options.name) != null ? _b : "openai";
2285
2593
  const getHeaders = () => ({
2286
- Authorization: `Bearer ${(0, import_provider_utils9.loadApiKey)({
2594
+ Authorization: `Bearer ${(0, import_provider_utils10.loadApiKey)({
2287
2595
  apiKey: options.apiKey,
2288
2596
  environmentVariableName: "OPENAI_API_KEY",
2289
2597
  description: "OpenAI"
@@ -2292,45 +2600,52 @@ function createOpenAI(options = {}) {
2292
2600
  "OpenAI-Project": options.project,
2293
2601
  ...options.headers
2294
2602
  });
2295
- const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
2603
+ const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
2296
2604
  provider: `${providerName}.chat`,
2297
2605
  url: ({ path }) => `${baseURL}${path}`,
2298
2606
  headers: getHeaders,
2299
- compatibility,
2300
2607
  fetch: options.fetch
2301
2608
  });
2302
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompletionLanguageModel(modelId, settings, {
2609
+ const createCompletionModel = (modelId) => new OpenAICompletionLanguageModel(modelId, {
2303
2610
  provider: `${providerName}.completion`,
2304
2611
  url: ({ path }) => `${baseURL}${path}`,
2305
2612
  headers: getHeaders,
2306
- compatibility,
2307
2613
  fetch: options.fetch
2308
2614
  });
2309
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
2615
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
2310
2616
  provider: `${providerName}.embedding`,
2311
2617
  url: ({ path }) => `${baseURL}${path}`,
2312
2618
  headers: getHeaders,
2313
2619
  fetch: options.fetch
2314
2620
  });
2315
- const createImageModel = (modelId, settings = {}) => new OpenAIImageModel(modelId, settings, {
2621
+ const createImageModel = (modelId) => new OpenAIImageModel(modelId, {
2316
2622
  provider: `${providerName}.image`,
2317
2623
  url: ({ path }) => `${baseURL}${path}`,
2318
2624
  headers: getHeaders,
2319
2625
  fetch: options.fetch
2320
2626
  });
2321
- const createLanguageModel = (modelId, settings) => {
2627
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
2628
+ provider: `${providerName}.transcription`,
2629
+ url: ({ path }) => `${baseURL}${path}`,
2630
+ headers: getHeaders,
2631
+ fetch: options.fetch
2632
+ });
2633
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
2634
+ provider: `${providerName}.speech`,
2635
+ url: ({ path }) => `${baseURL}${path}`,
2636
+ headers: getHeaders,
2637
+ fetch: options.fetch
2638
+ });
2639
+ const createLanguageModel = (modelId) => {
2322
2640
  if (new.target) {
2323
2641
  throw new Error(
2324
2642
  "The OpenAI model function cannot be called with the new keyword."
2325
2643
  );
2326
2644
  }
2327
2645
  if (modelId === "gpt-3.5-turbo-instruct") {
2328
- return createCompletionModel(
2329
- modelId,
2330
- settings
2331
- );
2646
+ return createCompletionModel(modelId);
2332
2647
  }
2333
- return createChatModel(modelId, settings);
2648
+ return createChatModel(modelId);
2334
2649
  };
2335
2650
  const createResponsesModel = (modelId) => {
2336
2651
  return new OpenAIResponsesLanguageModel(modelId, {
@@ -2340,8 +2655,8 @@ function createOpenAI(options = {}) {
2340
2655
  fetch: options.fetch
2341
2656
  });
2342
2657
  };
2343
- const provider = function(modelId, settings) {
2344
- return createLanguageModel(modelId, settings);
2658
+ const provider = function(modelId) {
2659
+ return createLanguageModel(modelId);
2345
2660
  };
2346
2661
  provider.languageModel = createLanguageModel;
2347
2662
  provider.chat = createChatModel;
@@ -2352,13 +2667,14 @@ function createOpenAI(options = {}) {
2352
2667
  provider.textEmbeddingModel = createEmbeddingModel;
2353
2668
  provider.image = createImageModel;
2354
2669
  provider.imageModel = createImageModel;
2670
+ provider.transcription = createTranscriptionModel;
2671
+ provider.transcriptionModel = createTranscriptionModel;
2672
+ provider.speech = createSpeechModel;
2673
+ provider.speechModel = createSpeechModel;
2355
2674
  provider.tools = openaiTools;
2356
2675
  return provider;
2357
2676
  }
2358
- var openai = createOpenAI({
2359
- compatibility: "strict"
2360
- // strict for OpenAI API
2361
- });
2677
+ var openai = createOpenAI();
2362
2678
  // Annotate the CommonJS export names for ESM import in node:
2363
2679
  0 && (module.exports = {
2364
2680
  createOpenAI,