@ai-sdk/openai 2.0.0-canary.2 → 2.0.0-canary.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -25,21 +25,27 @@ __export(internal_exports, {
25
25
  OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
26
  OpenAIImageModel: () => OpenAIImageModel,
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
- modelMaxImagesPerCall: () => modelMaxImagesPerCall
28
+ OpenAISpeechModel: () => OpenAISpeechModel,
29
+ OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
+ openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
33
+ openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
34
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
35
+ openaiProviderOptions: () => openaiProviderOptions
29
36
  });
30
37
  module.exports = __toCommonJS(internal_exports);
31
38
 
32
39
  // src/openai-chat-language-model.ts
33
40
  var import_provider3 = require("@ai-sdk/provider");
34
41
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
- var import_zod2 = require("zod");
42
+ var import_zod3 = require("zod");
36
43
 
37
44
  // src/convert-to-openai-chat-messages.ts
38
45
  var import_provider = require("@ai-sdk/provider");
39
46
  var import_provider_utils = require("@ai-sdk/provider-utils");
40
47
  function convertToOpenAIChatMessages({
41
48
  prompt,
42
- useLegacyFunctionCalling = false,
43
49
  systemMessageMode = "system"
44
50
  }) {
45
51
  const messages = [];
@@ -80,55 +86,71 @@ function convertToOpenAIChatMessages({
80
86
  messages.push({
81
87
  role: "user",
82
88
  content: content.map((part, index) => {
83
- var _a, _b, _c, _d;
89
+ var _a, _b, _c;
84
90
  switch (part.type) {
85
91
  case "text": {
86
92
  return { type: "text", text: part.text };
87
93
  }
88
- case "image": {
89
- return {
90
- type: "image_url",
91
- image_url: {
92
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
93
- // OpenAI specific extension: image detail
94
- detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
95
- }
96
- };
97
- }
98
94
  case "file": {
99
- if (part.data instanceof URL) {
100
- throw new import_provider.UnsupportedFunctionalityError({
101
- functionality: "'File content parts with URL data' functionality not supported."
102
- });
103
- }
104
- switch (part.mimeType) {
105
- case "audio/wav": {
106
- return {
107
- type: "input_audio",
108
- input_audio: { data: part.data, format: "wav" }
109
- };
110
- }
111
- case "audio/mp3":
112
- case "audio/mpeg": {
113
- return {
114
- type: "input_audio",
115
- input_audio: { data: part.data, format: "mp3" }
116
- };
95
+ if (part.mediaType.startsWith("image/")) {
96
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
97
+ return {
98
+ type: "image_url",
99
+ image_url: {
100
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
101
+ // OpenAI specific extension: image detail
102
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
103
+ }
104
+ };
105
+ } else if (part.mediaType.startsWith("audio/")) {
106
+ if (part.data instanceof URL) {
107
+ throw new import_provider.UnsupportedFunctionalityError({
108
+ functionality: "audio file parts with URLs"
109
+ });
117
110
  }
118
- case "application/pdf": {
119
- return {
120
- type: "file",
121
- file: {
122
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
123
- file_data: `data:application/pdf;base64,${part.data}`
124
- }
125
- };
111
+ switch (part.mediaType) {
112
+ case "audio/wav": {
113
+ return {
114
+ type: "input_audio",
115
+ input_audio: {
116
+ data: (0, import_provider_utils.convertToBase64)(part.data),
117
+ format: "wav"
118
+ }
119
+ };
120
+ }
121
+ case "audio/mp3":
122
+ case "audio/mpeg": {
123
+ return {
124
+ type: "input_audio",
125
+ input_audio: {
126
+ data: (0, import_provider_utils.convertToBase64)(part.data),
127
+ format: "mp3"
128
+ }
129
+ };
130
+ }
131
+ default: {
132
+ throw new import_provider.UnsupportedFunctionalityError({
133
+ functionality: `audio content parts with media type ${part.mediaType}`
134
+ });
135
+ }
126
136
  }
127
- default: {
137
+ } else if (part.mediaType === "application/pdf") {
138
+ if (part.data instanceof URL) {
128
139
  throw new import_provider.UnsupportedFunctionalityError({
129
- functionality: `File content part type ${part.mimeType} in user messages`
140
+ functionality: "PDF file parts with URLs"
130
141
  });
131
142
  }
143
+ return {
144
+ type: "file",
145
+ file: {
146
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
147
+ file_data: `data:application/pdf;base64,${part.data}`
148
+ }
149
+ };
150
+ } else {
151
+ throw new import_provider.UnsupportedFunctionalityError({
152
+ functionality: `file part media type ${part.mediaType}`
153
+ });
132
154
  }
133
155
  }
134
156
  }
@@ -158,41 +180,20 @@ function convertToOpenAIChatMessages({
158
180
  }
159
181
  }
160
182
  }
161
- if (useLegacyFunctionCalling) {
162
- if (toolCalls.length > 1) {
163
- throw new import_provider.UnsupportedFunctionalityError({
164
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
165
- });
166
- }
167
- messages.push({
168
- role: "assistant",
169
- content: text,
170
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
171
- });
172
- } else {
173
- messages.push({
174
- role: "assistant",
175
- content: text,
176
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
177
- });
178
- }
183
+ messages.push({
184
+ role: "assistant",
185
+ content: text,
186
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
187
+ });
179
188
  break;
180
189
  }
181
190
  case "tool": {
182
191
  for (const toolResponse of content) {
183
- if (useLegacyFunctionCalling) {
184
- messages.push({
185
- role: "function",
186
- name: toolResponse.toolName,
187
- content: JSON.stringify(toolResponse.result)
188
- });
189
- } else {
190
- messages.push({
191
- role: "tool",
192
- tool_call_id: toolResponse.toolCallId,
193
- content: JSON.stringify(toolResponse.result)
194
- });
195
- }
192
+ messages.push({
193
+ role: "tool",
194
+ tool_call_id: toolResponse.toolCallId,
195
+ content: JSON.stringify(toolResponse.result)
196
+ });
196
197
  }
197
198
  break;
198
199
  }
@@ -205,17 +206,17 @@ function convertToOpenAIChatMessages({
205
206
  return { messages, warnings };
206
207
  }
207
208
 
208
- // src/map-openai-chat-logprobs.ts
209
- function mapOpenAIChatLogProbsOutput(logprobs) {
210
- var _a, _b;
211
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
212
- token,
213
- logprob,
214
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
215
- token: token2,
216
- logprob: logprob2
217
- })) : []
218
- }))) != null ? _b : void 0;
209
+ // src/get-response-metadata.ts
210
+ function getResponseMetadata({
211
+ id,
212
+ model,
213
+ created
214
+ }) {
215
+ return {
216
+ id: id != null ? id : void 0,
217
+ modelId: model != null ? model : void 0,
218
+ timestamp: created != null ? new Date(created * 1e3) : void 0
219
+ };
219
220
  }
220
221
 
221
222
  // src/map-openai-finish-reason.ts
@@ -235,18 +236,75 @@ function mapOpenAIFinishReason(finishReason) {
235
236
  }
236
237
  }
237
238
 
238
- // src/openai-error.ts
239
+ // src/openai-chat-options.ts
239
240
  var import_zod = require("zod");
241
+ var openaiProviderOptions = import_zod.z.object({
242
+ /**
243
+ * Modify the likelihood of specified tokens appearing in the completion.
244
+ *
245
+ * Accepts a JSON object that maps tokens (specified by their token ID in
246
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
247
+ */
248
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
249
+ /**
250
+ * Return the log probabilities of the tokens.
251
+ *
252
+ * Setting to true will return the log probabilities of the tokens that
253
+ * were generated.
254
+ *
255
+ * Setting to a number will return the log probabilities of the top n
256
+ * tokens that were generated.
257
+ */
258
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
259
+ /**
260
+ * Whether to enable parallel function calling during tool use. Default to true.
261
+ */
262
+ parallelToolCalls: import_zod.z.boolean().optional(),
263
+ /**
264
+ * A unique identifier representing your end-user, which can help OpenAI to
265
+ * monitor and detect abuse.
266
+ */
267
+ user: import_zod.z.string().optional(),
268
+ /**
269
+ * Reasoning effort for reasoning models. Defaults to `medium`.
270
+ */
271
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
272
+ /**
273
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
274
+ */
275
+ maxCompletionTokens: import_zod.z.number().optional(),
276
+ /**
277
+ * Whether to enable persistence in responses API.
278
+ */
279
+ store: import_zod.z.boolean().optional(),
280
+ /**
281
+ * Metadata to associate with the request.
282
+ */
283
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
284
+ /**
285
+ * Parameters for prediction mode.
286
+ */
287
+ prediction: import_zod.z.record(import_zod.z.any()).optional(),
288
+ /**
289
+ * Whether to use structured outputs.
290
+ *
291
+ * @default true
292
+ */
293
+ structuredOutputs: import_zod.z.boolean().optional()
294
+ });
295
+
296
+ // src/openai-error.ts
297
+ var import_zod2 = require("zod");
240
298
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
241
- var openaiErrorDataSchema = import_zod.z.object({
242
- error: import_zod.z.object({
243
- message: import_zod.z.string(),
299
+ var openaiErrorDataSchema = import_zod2.z.object({
300
+ error: import_zod2.z.object({
301
+ message: import_zod2.z.string(),
244
302
  // The additional information below is handled loosely to support
245
303
  // OpenAI-compatible providers that have slightly different error
246
304
  // responses:
247
- type: import_zod.z.string().nullish(),
248
- param: import_zod.z.any().nullish(),
249
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
305
+ type: import_zod2.z.string().nullish(),
306
+ param: import_zod2.z.any().nullish(),
307
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
250
308
  })
251
309
  });
252
310
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -254,25 +312,11 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
254
312
  errorToMessage: (data) => data.error.message
255
313
  });
256
314
 
257
- // src/get-response-metadata.ts
258
- function getResponseMetadata({
259
- id,
260
- model,
261
- created
262
- }) {
263
- return {
264
- id: id != null ? id : void 0,
265
- modelId: model != null ? model : void 0,
266
- timestamp: created != null ? new Date(created * 1e3) : void 0
267
- };
268
- }
269
-
270
315
  // src/openai-prepare-tools.ts
271
316
  var import_provider2 = require("@ai-sdk/provider");
272
317
  function prepareTools({
273
318
  tools,
274
319
  toolChoice,
275
- useLegacyFunctionCalling = false,
276
320
  structuredOutputs
277
321
  }) {
278
322
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
@@ -280,48 +324,6 @@ function prepareTools({
280
324
  if (tools == null) {
281
325
  return { tools: void 0, toolChoice: void 0, toolWarnings };
282
326
  }
283
- if (useLegacyFunctionCalling) {
284
- const openaiFunctions = [];
285
- for (const tool of tools) {
286
- if (tool.type === "provider-defined") {
287
- toolWarnings.push({ type: "unsupported-tool", tool });
288
- } else {
289
- openaiFunctions.push({
290
- name: tool.name,
291
- description: tool.description,
292
- parameters: tool.parameters
293
- });
294
- }
295
- }
296
- if (toolChoice == null) {
297
- return {
298
- functions: openaiFunctions,
299
- function_call: void 0,
300
- toolWarnings
301
- };
302
- }
303
- const type2 = toolChoice.type;
304
- switch (type2) {
305
- case "auto":
306
- case "none":
307
- case void 0:
308
- return {
309
- functions: openaiFunctions,
310
- function_call: void 0,
311
- toolWarnings
312
- };
313
- case "required":
314
- throw new import_provider2.UnsupportedFunctionalityError({
315
- functionality: "useLegacyFunctionCalling and toolChoice: required"
316
- });
317
- default:
318
- return {
319
- functions: openaiFunctions,
320
- function_call: { name: toolChoice.toolName },
321
- toolWarnings
322
- };
323
- }
324
- }
325
327
  const openaiTools = [];
326
328
  for (const tool of tools) {
327
329
  if (tool.type === "provider-defined") {
@@ -361,7 +363,7 @@ function prepareTools({
361
363
  default: {
362
364
  const _exhaustiveCheck = type;
363
365
  throw new import_provider2.UnsupportedFunctionalityError({
364
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
366
+ functionality: `tool choice type: ${_exhaustiveCheck}`
365
367
  });
366
368
  }
367
369
  }
@@ -369,31 +371,20 @@ function prepareTools({
369
371
 
370
372
  // src/openai-chat-language-model.ts
371
373
  var OpenAIChatLanguageModel = class {
372
- constructor(modelId, settings, config) {
374
+ constructor(modelId, config) {
373
375
  this.specificationVersion = "v2";
376
+ this.supportedUrls = {
377
+ "image/*": [/^https?:\/\/.*$/]
378
+ };
374
379
  this.modelId = modelId;
375
- this.settings = settings;
376
380
  this.config = config;
377
381
  }
378
- get supportsStructuredOutputs() {
379
- var _a;
380
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
381
- }
382
- get defaultObjectGenerationMode() {
383
- if (isAudioModel(this.modelId)) {
384
- return "tool";
385
- }
386
- return this.supportsStructuredOutputs ? "json" : "tool";
387
- }
388
382
  get provider() {
389
383
  return this.config.provider;
390
384
  }
391
- get supportsImageUrls() {
392
- return !this.settings.downloadImages;
393
- }
394
- getArgs({
385
+ async getArgs({
395
386
  prompt,
396
- maxTokens,
387
+ maxOutputTokens,
397
388
  temperature,
398
389
  topP,
399
390
  topK,
@@ -406,36 +397,30 @@ var OpenAIChatLanguageModel = class {
406
397
  toolChoice,
407
398
  providerOptions
408
399
  }) {
409
- var _a, _b, _c, _d, _e, _f, _g;
400
+ var _a, _b, _c;
410
401
  const warnings = [];
402
+ const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
403
+ provider: "openai",
404
+ providerOptions,
405
+ schema: openaiProviderOptions
406
+ })) != null ? _a : {};
407
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
411
408
  if (topK != null) {
412
409
  warnings.push({
413
410
  type: "unsupported-setting",
414
411
  setting: "topK"
415
412
  });
416
413
  }
417
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
414
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
418
415
  warnings.push({
419
416
  type: "unsupported-setting",
420
417
  setting: "responseFormat",
421
418
  details: "JSON response format schema is only supported with structuredOutputs"
422
419
  });
423
420
  }
424
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
425
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
426
- throw new import_provider3.UnsupportedFunctionalityError({
427
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
428
- });
429
- }
430
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
431
- throw new import_provider3.UnsupportedFunctionalityError({
432
- functionality: "structuredOutputs with useLegacyFunctionCalling"
433
- });
434
- }
435
421
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
436
422
  {
437
423
  prompt,
438
- useLegacyFunctionCalling,
439
424
  systemMessageMode: getSystemMessageMode(this.modelId)
440
425
  }
441
426
  );
@@ -444,36 +429,38 @@ var OpenAIChatLanguageModel = class {
444
429
  // model id:
445
430
  model: this.modelId,
446
431
  // model specific settings:
447
- logit_bias: this.settings.logitBias,
448
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
449
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
450
- user: this.settings.user,
451
- parallel_tool_calls: this.settings.parallelToolCalls,
432
+ logit_bias: openaiOptions.logitBias,
433
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
434
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
435
+ user: openaiOptions.user,
436
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
452
437
  // standardized settings:
453
- max_tokens: maxTokens,
438
+ max_tokens: maxOutputTokens,
454
439
  temperature,
455
440
  top_p: topP,
456
441
  frequency_penalty: frequencyPenalty,
457
442
  presence_penalty: presencePenalty,
458
- // TODO improve below:
459
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
460
- type: "json_schema",
461
- json_schema: {
462
- schema: responseFormat.schema,
463
- strict: true,
464
- name: (_a = responseFormat.name) != null ? _a : "response",
465
- description: responseFormat.description
466
- }
467
- } : { type: "json_object" } : void 0,
443
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
444
+ // TODO convert into provider option
445
+ structuredOutputs && responseFormat.schema != null ? {
446
+ type: "json_schema",
447
+ json_schema: {
448
+ schema: responseFormat.schema,
449
+ strict: true,
450
+ name: (_c = responseFormat.name) != null ? _c : "response",
451
+ description: responseFormat.description
452
+ }
453
+ } : { type: "json_object" }
454
+ ) : void 0,
468
455
  stop: stopSequences,
469
456
  seed,
470
457
  // openai specific settings:
471
- // TODO remove in next major version; we auto-map maxTokens now
472
- max_completion_tokens: (_b = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _b.maxCompletionTokens,
473
- store: (_c = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _c.store,
474
- metadata: (_d = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _d.metadata,
475
- prediction: (_e = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _e.prediction,
476
- reasoning_effort: (_g = (_f = providerOptions == null ? void 0 : providerOptions.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
458
+ // TODO remove in next major version; we auto-map maxOutputTokens now
459
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
460
+ store: openaiOptions.store,
461
+ metadata: openaiOptions.metadata,
462
+ prediction: openaiOptions.prediction,
463
+ reasoning_effort: openaiOptions.reasoningEffort,
477
464
  // messages:
478
465
  messages
479
466
  };
@@ -537,33 +524,37 @@ var OpenAIChatLanguageModel = class {
537
524
  }
538
525
  baseArgs.max_tokens = void 0;
539
526
  }
527
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
528
+ if (baseArgs.temperature != null) {
529
+ baseArgs.temperature = void 0;
530
+ warnings.push({
531
+ type: "unsupported-setting",
532
+ setting: "temperature",
533
+ details: "temperature is not supported for the search preview models and has been removed."
534
+ });
535
+ }
540
536
  }
541
537
  const {
542
538
  tools: openaiTools,
543
539
  toolChoice: openaiToolChoice,
544
- functions,
545
- function_call,
546
540
  toolWarnings
547
541
  } = prepareTools({
548
542
  tools,
549
543
  toolChoice,
550
- useLegacyFunctionCalling,
551
- structuredOutputs: this.supportsStructuredOutputs
544
+ structuredOutputs
552
545
  });
553
546
  return {
554
547
  args: {
555
548
  ...baseArgs,
556
549
  tools: openaiTools,
557
- tool_choice: openaiToolChoice,
558
- functions,
559
- function_call
550
+ tool_choice: openaiToolChoice
560
551
  },
561
552
  warnings: [...warnings, ...toolWarnings]
562
553
  };
563
554
  }
564
555
  async doGenerate(options) {
565
- var _a, _b, _c, _d, _e, _f, _g, _h;
566
- const { args: body, warnings } = this.getArgs(options);
556
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
557
+ const { args: body, warnings } = await this.getArgs(options);
567
558
  const {
568
559
  responseHeaders,
569
560
  value: response,
@@ -582,105 +573,61 @@ var OpenAIChatLanguageModel = class {
582
573
  abortSignal: options.abortSignal,
583
574
  fetch: this.config.fetch
584
575
  });
585
- const { messages: rawPrompt, ...rawSettings } = body;
586
576
  const choice = response.choices[0];
587
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
588
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
589
- const providerMetadata = { openai: {} };
590
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
591
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
577
+ const content = [];
578
+ const text = choice.message.content;
579
+ if (text != null && text.length > 0) {
580
+ content.push({ type: "text", text });
581
+ }
582
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
583
+ content.push({
584
+ type: "tool-call",
585
+ toolCallType: "function",
586
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
587
+ toolName: toolCall.function.name,
588
+ args: toolCall.function.arguments
589
+ });
592
590
  }
591
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
592
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
593
+ const providerMetadata = { openai: {} };
593
594
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
594
595
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
595
596
  }
596
597
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
597
598
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
598
599
  }
599
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
600
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
600
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
601
+ providerMetadata.openai.logprobs = choice.logprobs.content;
601
602
  }
602
603
  return {
603
- text: (_c = choice.message.content) != null ? _c : void 0,
604
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
605
- {
606
- toolCallType: "function",
607
- toolCallId: (0, import_provider_utils3.generateId)(),
608
- toolName: choice.message.function_call.name,
609
- args: choice.message.function_call.arguments
610
- }
611
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
612
- var _a2;
613
- return {
614
- toolCallType: "function",
615
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
616
- toolName: toolCall.function.name,
617
- args: toolCall.function.arguments
618
- };
619
- }),
604
+ content,
620
605
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
621
606
  usage: {
622
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
623
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
607
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
608
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
609
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
610
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
611
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
612
+ },
613
+ request: { body },
614
+ response: {
615
+ ...getResponseMetadata(response),
616
+ headers: responseHeaders,
617
+ body: rawResponse
624
618
  },
625
- rawCall: { rawPrompt, rawSettings },
626
- rawResponse: { headers: responseHeaders, body: rawResponse },
627
- request: { body: JSON.stringify(body) },
628
- response: getResponseMetadata(response),
629
619
  warnings,
630
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
631
620
  providerMetadata
632
621
  };
633
622
  }
634
623
  async doStream(options) {
635
- if (this.settings.simulateStreaming) {
636
- const result = await this.doGenerate(options);
637
- const simulatedStream = new ReadableStream({
638
- start(controller) {
639
- controller.enqueue({ type: "response-metadata", ...result.response });
640
- if (result.text) {
641
- controller.enqueue({
642
- type: "text-delta",
643
- textDelta: result.text
644
- });
645
- }
646
- if (result.toolCalls) {
647
- for (const toolCall of result.toolCalls) {
648
- controller.enqueue({
649
- type: "tool-call-delta",
650
- toolCallType: "function",
651
- toolCallId: toolCall.toolCallId,
652
- toolName: toolCall.toolName,
653
- argsTextDelta: toolCall.args
654
- });
655
- controller.enqueue({
656
- type: "tool-call",
657
- ...toolCall
658
- });
659
- }
660
- }
661
- controller.enqueue({
662
- type: "finish",
663
- finishReason: result.finishReason,
664
- usage: result.usage,
665
- logprobs: result.logprobs,
666
- providerMetadata: result.providerMetadata
667
- });
668
- controller.close();
669
- }
670
- });
671
- return {
672
- stream: simulatedStream,
673
- rawCall: result.rawCall,
674
- rawResponse: result.rawResponse,
675
- warnings: result.warnings
676
- };
677
- }
678
- const { args, warnings } = this.getArgs(options);
624
+ const { args, warnings } = await this.getArgs(options);
679
625
  const body = {
680
626
  ...args,
681
627
  stream: true,
682
- // only include stream_options when in strict compatibility mode:
683
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
628
+ stream_options: {
629
+ include_usage: true
630
+ }
684
631
  };
685
632
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
686
633
  url: this.config.url({
@@ -696,22 +643,23 @@ var OpenAIChatLanguageModel = class {
696
643
  abortSignal: options.abortSignal,
697
644
  fetch: this.config.fetch
698
645
  });
699
- const { messages: rawPrompt, ...rawSettings } = args;
700
646
  const toolCalls = [];
701
647
  let finishReason = "unknown";
702
- let usage = {
703
- promptTokens: void 0,
704
- completionTokens: void 0
648
+ const usage = {
649
+ inputTokens: void 0,
650
+ outputTokens: void 0,
651
+ totalTokens: void 0
705
652
  };
706
- let logprobs;
707
653
  let isFirstChunk = true;
708
- const { useLegacyFunctionCalling } = this.settings;
709
654
  const providerMetadata = { openai: {} };
710
655
  return {
711
656
  stream: response.pipeThrough(
712
657
  new TransformStream({
658
+ start(controller) {
659
+ controller.enqueue({ type: "stream-start", warnings });
660
+ },
713
661
  transform(chunk, controller) {
714
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
662
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
715
663
  if (!chunk.success) {
716
664
  finishReason = "error";
717
665
  controller.enqueue({ type: "error", error: chunk.error });
@@ -731,60 +679,37 @@ var OpenAIChatLanguageModel = class {
731
679
  });
732
680
  }
733
681
  if (value.usage != null) {
734
- const {
735
- prompt_tokens,
736
- completion_tokens,
737
- prompt_tokens_details,
738
- completion_tokens_details
739
- } = value.usage;
740
- usage = {
741
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
742
- completionTokens: completion_tokens != null ? completion_tokens : void 0
743
- };
744
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
745
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
746
- }
747
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
748
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
749
- }
750
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
751
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
682
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
683
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
684
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
685
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
686
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
687
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
688
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
752
689
  }
753
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
754
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
690
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
691
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
755
692
  }
756
693
  }
757
694
  const choice = value.choices[0];
758
695
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
759
696
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
760
697
  }
698
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
699
+ providerMetadata.openai.logprobs = choice.logprobs.content;
700
+ }
761
701
  if ((choice == null ? void 0 : choice.delta) == null) {
762
702
  return;
763
703
  }
764
704
  const delta = choice.delta;
765
705
  if (delta.content != null) {
766
706
  controller.enqueue({
767
- type: "text-delta",
768
- textDelta: delta.content
707
+ type: "text",
708
+ text: delta.content
769
709
  });
770
710
  }
771
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
772
- choice == null ? void 0 : choice.logprobs
773
- );
774
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
775
- if (logprobs === void 0) logprobs = [];
776
- logprobs.push(...mappedLogprobs);
777
- }
778
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
779
- {
780
- type: "function",
781
- id: (0, import_provider_utils3.generateId)(),
782
- function: delta.function_call,
783
- index: 0
784
- }
785
- ] : delta.tool_calls;
786
- if (mappedToolCalls != null) {
787
- for (const toolCallDelta of mappedToolCalls) {
711
+ if (delta.tool_calls != null) {
712
+ for (const toolCallDelta of delta.tool_calls) {
788
713
  const index = toolCallDelta.index;
789
714
  if (toolCalls[index] == null) {
790
715
  if (toolCallDelta.type !== "function") {
@@ -799,7 +724,7 @@ var OpenAIChatLanguageModel = class {
799
724
  message: `Expected 'id' to be a string.`
800
725
  });
801
726
  }
802
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
727
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
803
728
  throw new import_provider3.InvalidResponseDataError({
804
729
  data: toolCallDelta,
805
730
  message: `Expected 'function.name' to be a string.`
@@ -810,12 +735,12 @@ var OpenAIChatLanguageModel = class {
810
735
  type: "function",
811
736
  function: {
812
737
  name: toolCallDelta.function.name,
813
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
738
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
814
739
  },
815
740
  hasFinished: false
816
741
  };
817
742
  const toolCall2 = toolCalls[index];
818
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
743
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
819
744
  if (toolCall2.function.arguments.length > 0) {
820
745
  controller.enqueue({
821
746
  type: "tool-call-delta",
@@ -829,7 +754,7 @@ var OpenAIChatLanguageModel = class {
829
754
  controller.enqueue({
830
755
  type: "tool-call",
831
756
  toolCallType: "function",
832
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
757
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
833
758
  toolName: toolCall2.function.name,
834
759
  args: toolCall2.function.arguments
835
760
  });
@@ -842,21 +767,21 @@ var OpenAIChatLanguageModel = class {
842
767
  if (toolCall.hasFinished) {
843
768
  continue;
844
769
  }
845
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
846
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
770
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
771
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
847
772
  }
848
773
  controller.enqueue({
849
774
  type: "tool-call-delta",
850
775
  toolCallType: "function",
851
776
  toolCallId: toolCall.id,
852
777
  toolName: toolCall.function.name,
853
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
778
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
854
779
  });
855
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
780
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
856
781
  controller.enqueue({
857
782
  type: "tool-call",
858
783
  toolCallType: "function",
859
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
784
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
860
785
  toolName: toolCall.function.name,
861
786
  args: toolCall.function.arguments
862
787
  });
@@ -866,125 +791,111 @@ var OpenAIChatLanguageModel = class {
866
791
  }
867
792
  },
868
793
  flush(controller) {
869
- var _a, _b;
870
794
  controller.enqueue({
871
795
  type: "finish",
872
796
  finishReason,
873
- logprobs,
874
- usage: {
875
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
876
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
877
- },
797
+ usage,
878
798
  ...providerMetadata != null ? { providerMetadata } : {}
879
799
  });
880
800
  }
881
801
  })
882
802
  ),
883
- rawCall: { rawPrompt, rawSettings },
884
- rawResponse: { headers: responseHeaders },
885
- request: { body: JSON.stringify(body) },
886
- warnings
803
+ request: { body },
804
+ response: { headers: responseHeaders }
887
805
  };
888
806
  }
889
807
  };
890
- var openaiTokenUsageSchema = import_zod2.z.object({
891
- prompt_tokens: import_zod2.z.number().nullish(),
892
- completion_tokens: import_zod2.z.number().nullish(),
893
- prompt_tokens_details: import_zod2.z.object({
894
- cached_tokens: import_zod2.z.number().nullish()
808
+ var openaiTokenUsageSchema = import_zod3.z.object({
809
+ prompt_tokens: import_zod3.z.number().nullish(),
810
+ completion_tokens: import_zod3.z.number().nullish(),
811
+ total_tokens: import_zod3.z.number().nullish(),
812
+ prompt_tokens_details: import_zod3.z.object({
813
+ cached_tokens: import_zod3.z.number().nullish()
895
814
  }).nullish(),
896
- completion_tokens_details: import_zod2.z.object({
897
- reasoning_tokens: import_zod2.z.number().nullish(),
898
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
899
- rejected_prediction_tokens: import_zod2.z.number().nullish()
815
+ completion_tokens_details: import_zod3.z.object({
816
+ reasoning_tokens: import_zod3.z.number().nullish(),
817
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
818
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
900
819
  }).nullish()
901
820
  }).nullish();
902
- var openaiChatResponseSchema = import_zod2.z.object({
903
- id: import_zod2.z.string().nullish(),
904
- created: import_zod2.z.number().nullish(),
905
- model: import_zod2.z.string().nullish(),
906
- choices: import_zod2.z.array(
907
- import_zod2.z.object({
908
- message: import_zod2.z.object({
909
- role: import_zod2.z.literal("assistant").nullish(),
910
- content: import_zod2.z.string().nullish(),
911
- function_call: import_zod2.z.object({
912
- arguments: import_zod2.z.string(),
913
- name: import_zod2.z.string()
914
- }).nullish(),
915
- tool_calls: import_zod2.z.array(
916
- import_zod2.z.object({
917
- id: import_zod2.z.string().nullish(),
918
- type: import_zod2.z.literal("function"),
919
- function: import_zod2.z.object({
920
- name: import_zod2.z.string(),
921
- arguments: import_zod2.z.string()
821
+ var openaiChatResponseSchema = import_zod3.z.object({
822
+ id: import_zod3.z.string().nullish(),
823
+ created: import_zod3.z.number().nullish(),
824
+ model: import_zod3.z.string().nullish(),
825
+ choices: import_zod3.z.array(
826
+ import_zod3.z.object({
827
+ message: import_zod3.z.object({
828
+ role: import_zod3.z.literal("assistant").nullish(),
829
+ content: import_zod3.z.string().nullish(),
830
+ tool_calls: import_zod3.z.array(
831
+ import_zod3.z.object({
832
+ id: import_zod3.z.string().nullish(),
833
+ type: import_zod3.z.literal("function"),
834
+ function: import_zod3.z.object({
835
+ name: import_zod3.z.string(),
836
+ arguments: import_zod3.z.string()
922
837
  })
923
838
  })
924
839
  ).nullish()
925
840
  }),
926
- index: import_zod2.z.number(),
927
- logprobs: import_zod2.z.object({
928
- content: import_zod2.z.array(
929
- import_zod2.z.object({
930
- token: import_zod2.z.string(),
931
- logprob: import_zod2.z.number(),
932
- top_logprobs: import_zod2.z.array(
933
- import_zod2.z.object({
934
- token: import_zod2.z.string(),
935
- logprob: import_zod2.z.number()
841
+ index: import_zod3.z.number(),
842
+ logprobs: import_zod3.z.object({
843
+ content: import_zod3.z.array(
844
+ import_zod3.z.object({
845
+ token: import_zod3.z.string(),
846
+ logprob: import_zod3.z.number(),
847
+ top_logprobs: import_zod3.z.array(
848
+ import_zod3.z.object({
849
+ token: import_zod3.z.string(),
850
+ logprob: import_zod3.z.number()
936
851
  })
937
852
  )
938
853
  })
939
- ).nullable()
854
+ ).nullish()
940
855
  }).nullish(),
941
- finish_reason: import_zod2.z.string().nullish()
856
+ finish_reason: import_zod3.z.string().nullish()
942
857
  })
943
858
  ),
944
859
  usage: openaiTokenUsageSchema
945
860
  });
946
- var openaiChatChunkSchema = import_zod2.z.union([
947
- import_zod2.z.object({
948
- id: import_zod2.z.string().nullish(),
949
- created: import_zod2.z.number().nullish(),
950
- model: import_zod2.z.string().nullish(),
951
- choices: import_zod2.z.array(
952
- import_zod2.z.object({
953
- delta: import_zod2.z.object({
954
- role: import_zod2.z.enum(["assistant"]).nullish(),
955
- content: import_zod2.z.string().nullish(),
956
- function_call: import_zod2.z.object({
957
- name: import_zod2.z.string().optional(),
958
- arguments: import_zod2.z.string().optional()
959
- }).nullish(),
960
- tool_calls: import_zod2.z.array(
961
- import_zod2.z.object({
962
- index: import_zod2.z.number(),
963
- id: import_zod2.z.string().nullish(),
964
- type: import_zod2.z.literal("function").optional(),
965
- function: import_zod2.z.object({
966
- name: import_zod2.z.string().nullish(),
967
- arguments: import_zod2.z.string().nullish()
861
+ var openaiChatChunkSchema = import_zod3.z.union([
862
+ import_zod3.z.object({
863
+ id: import_zod3.z.string().nullish(),
864
+ created: import_zod3.z.number().nullish(),
865
+ model: import_zod3.z.string().nullish(),
866
+ choices: import_zod3.z.array(
867
+ import_zod3.z.object({
868
+ delta: import_zod3.z.object({
869
+ role: import_zod3.z.enum(["assistant"]).nullish(),
870
+ content: import_zod3.z.string().nullish(),
871
+ tool_calls: import_zod3.z.array(
872
+ import_zod3.z.object({
873
+ index: import_zod3.z.number(),
874
+ id: import_zod3.z.string().nullish(),
875
+ type: import_zod3.z.literal("function").nullish(),
876
+ function: import_zod3.z.object({
877
+ name: import_zod3.z.string().nullish(),
878
+ arguments: import_zod3.z.string().nullish()
968
879
  })
969
880
  })
970
881
  ).nullish()
971
882
  }).nullish(),
972
- logprobs: import_zod2.z.object({
973
- content: import_zod2.z.array(
974
- import_zod2.z.object({
975
- token: import_zod2.z.string(),
976
- logprob: import_zod2.z.number(),
977
- top_logprobs: import_zod2.z.array(
978
- import_zod2.z.object({
979
- token: import_zod2.z.string(),
980
- logprob: import_zod2.z.number()
883
+ logprobs: import_zod3.z.object({
884
+ content: import_zod3.z.array(
885
+ import_zod3.z.object({
886
+ token: import_zod3.z.string(),
887
+ logprob: import_zod3.z.number(),
888
+ top_logprobs: import_zod3.z.array(
889
+ import_zod3.z.object({
890
+ token: import_zod3.z.string(),
891
+ logprob: import_zod3.z.number()
981
892
  })
982
893
  )
983
894
  })
984
- ).nullable()
895
+ ).nullish()
985
896
  }).nullish(),
986
- finish_reason: import_zod2.z.string().nullable().optional(),
987
- index: import_zod2.z.number()
897
+ finish_reason: import_zod3.z.string().nullish(),
898
+ index: import_zod3.z.number()
988
899
  })
989
900
  ),
990
901
  usage: openaiTokenUsageSchema
@@ -992,10 +903,7 @@ var openaiChatChunkSchema = import_zod2.z.union([
992
903
  openaiErrorDataSchema
993
904
  ]);
994
905
  function isReasoningModel(modelId) {
995
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
996
- }
997
- function isAudioModel(modelId) {
998
- return modelId.startsWith("gpt-4o-audio-preview");
906
+ return modelId.startsWith("o");
999
907
  }
1000
908
  function getSystemMessageMode(modelId) {
1001
909
  var _a, _b;
@@ -1017,29 +925,37 @@ var reasoningModels = {
1017
925
  "o1-preview-2024-09-12": {
1018
926
  systemMessageMode: "remove"
1019
927
  },
928
+ o3: {
929
+ systemMessageMode: "developer"
930
+ },
931
+ "o3-2025-04-16": {
932
+ systemMessageMode: "developer"
933
+ },
1020
934
  "o3-mini": {
1021
935
  systemMessageMode: "developer"
1022
936
  },
1023
937
  "o3-mini-2025-01-31": {
1024
938
  systemMessageMode: "developer"
939
+ },
940
+ "o4-mini": {
941
+ systemMessageMode: "developer"
942
+ },
943
+ "o4-mini-2025-04-16": {
944
+ systemMessageMode: "developer"
1025
945
  }
1026
946
  };
1027
947
 
1028
948
  // src/openai-completion-language-model.ts
1029
949
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1030
- var import_zod3 = require("zod");
950
+ var import_zod5 = require("zod");
1031
951
 
1032
952
  // src/convert-to-openai-completion-prompt.ts
1033
953
  var import_provider4 = require("@ai-sdk/provider");
1034
954
  function convertToOpenAICompletionPrompt({
1035
955
  prompt,
1036
- inputFormat,
1037
956
  user = "user",
1038
957
  assistant = "assistant"
1039
958
  }) {
1040
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1041
- return { prompt: prompt[0].content[0].text };
1042
- }
1043
959
  let text = "";
1044
960
  if (prompt[0].role === "system") {
1045
961
  text += `${prompt[0].content}
@@ -1061,13 +977,8 @@ function convertToOpenAICompletionPrompt({
1061
977
  case "text": {
1062
978
  return part.text;
1063
979
  }
1064
- case "image": {
1065
- throw new import_provider4.UnsupportedFunctionalityError({
1066
- functionality: "images"
1067
- });
1068
- }
1069
980
  }
1070
- }).join("");
981
+ }).filter(Boolean).join("");
1071
982
  text += `${user}:
1072
983
  ${userMessage}
1073
984
 
@@ -1113,36 +1024,68 @@ ${user}:`]
1113
1024
  };
1114
1025
  }
1115
1026
 
1116
- // src/map-openai-completion-logprobs.ts
1117
- function mapOpenAICompletionLogProbs(logprobs) {
1118
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1119
- token,
1120
- logprob: logprobs.token_logprobs[index],
1121
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1122
- ([token2, logprob]) => ({
1123
- token: token2,
1124
- logprob
1125
- })
1126
- ) : []
1127
- }));
1128
- }
1027
+ // src/openai-completion-options.ts
1028
+ var import_zod4 = require("zod");
1029
+ var openaiCompletionProviderOptions = import_zod4.z.object({
1030
+ /**
1031
+ Echo back the prompt in addition to the completion.
1032
+ */
1033
+ echo: import_zod4.z.boolean().optional(),
1034
+ /**
1035
+ Modify the likelihood of specified tokens appearing in the completion.
1036
+
1037
+ Accepts a JSON object that maps tokens (specified by their token ID in
1038
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1039
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1040
+ the bias is added to the logits generated by the model prior to sampling.
1041
+ The exact effect will vary per model, but values between -1 and 1 should
1042
+ decrease or increase likelihood of selection; values like -100 or 100
1043
+ should result in a ban or exclusive selection of the relevant token.
1044
+
1045
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1046
+ token from being generated.
1047
+ */
1048
+ logitBias: import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number()).optional(),
1049
+ /**
1050
+ The suffix that comes after a completion of inserted text.
1051
+ */
1052
+ suffix: import_zod4.z.string().optional(),
1053
+ /**
1054
+ A unique identifier representing your end-user, which can help OpenAI to
1055
+ monitor and detect abuse. Learn more.
1056
+ */
1057
+ user: import_zod4.z.string().optional(),
1058
+ /**
1059
+ Return the log probabilities of the tokens. Including logprobs will increase
1060
+ the response size and can slow down response times. However, it can
1061
+ be useful to better understand how the model is behaving.
1062
+ Setting to true will return the log probabilities of the tokens that
1063
+ were generated.
1064
+ Setting to a number will return the log probabilities of the top n
1065
+ tokens that were generated.
1066
+ */
1067
+ logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
1068
+ });
1129
1069
 
1130
1070
  // src/openai-completion-language-model.ts
1131
1071
  var OpenAICompletionLanguageModel = class {
1132
- constructor(modelId, settings, config) {
1072
+ constructor(modelId, config) {
1133
1073
  this.specificationVersion = "v2";
1134
- this.defaultObjectGenerationMode = void 0;
1074
+ this.supportedUrls = {
1075
+ // No URLs are supported for completion models.
1076
+ };
1135
1077
  this.modelId = modelId;
1136
- this.settings = settings;
1137
1078
  this.config = config;
1138
1079
  }
1080
+ get providerOptionsName() {
1081
+ return this.config.provider.split(".")[0].trim();
1082
+ }
1139
1083
  get provider() {
1140
1084
  return this.config.provider;
1141
1085
  }
1142
- getArgs({
1143
- inputFormat,
1086
+ async getArgs({
1144
1087
  prompt,
1145
- maxTokens,
1088
+ maxOutputTokens,
1146
1089
  temperature,
1147
1090
  topP,
1148
1091
  topK,
@@ -1152,9 +1095,22 @@ var OpenAICompletionLanguageModel = class {
1152
1095
  responseFormat,
1153
1096
  tools,
1154
1097
  toolChoice,
1155
- seed
1098
+ seed,
1099
+ providerOptions
1156
1100
  }) {
1157
1101
  const warnings = [];
1102
+ const openaiOptions = {
1103
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1104
+ provider: "openai",
1105
+ providerOptions,
1106
+ schema: openaiCompletionProviderOptions
1107
+ }),
1108
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1109
+ provider: this.providerOptionsName,
1110
+ providerOptions,
1111
+ schema: openaiCompletionProviderOptions
1112
+ })
1113
+ };
1158
1114
  if (topK != null) {
1159
1115
  warnings.push({ type: "unsupported-setting", setting: "topK" });
1160
1116
  }
@@ -1171,20 +1127,20 @@ var OpenAICompletionLanguageModel = class {
1171
1127
  details: "JSON response format is not supported."
1172
1128
  });
1173
1129
  }
1174
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1130
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1175
1131
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1176
1132
  return {
1177
1133
  args: {
1178
1134
  // model id:
1179
1135
  model: this.modelId,
1180
1136
  // model specific settings:
1181
- echo: this.settings.echo,
1182
- logit_bias: this.settings.logitBias,
1183
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1184
- suffix: this.settings.suffix,
1185
- user: this.settings.user,
1137
+ echo: openaiOptions.echo,
1138
+ logit_bias: openaiOptions.logitBias,
1139
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1140
+ suffix: openaiOptions.suffix,
1141
+ user: openaiOptions.user,
1186
1142
  // standardized settings:
1187
- max_tokens: maxTokens,
1143
+ max_tokens: maxOutputTokens,
1188
1144
  temperature,
1189
1145
  top_p: topP,
1190
1146
  frequency_penalty: frequencyPenalty,
@@ -1199,7 +1155,8 @@ var OpenAICompletionLanguageModel = class {
1199
1155
  };
1200
1156
  }
1201
1157
  async doGenerate(options) {
1202
- const { args, warnings } = this.getArgs(options);
1158
+ var _a, _b, _c;
1159
+ const { args, warnings } = await this.getArgs(options);
1203
1160
  const {
1204
1161
  responseHeaders,
1205
1162
  value: response,
@@ -1218,30 +1175,37 @@ var OpenAICompletionLanguageModel = class {
1218
1175
  abortSignal: options.abortSignal,
1219
1176
  fetch: this.config.fetch
1220
1177
  });
1221
- const { prompt: rawPrompt, ...rawSettings } = args;
1222
1178
  const choice = response.choices[0];
1179
+ const providerMetadata = { openai: {} };
1180
+ if (choice.logprobs != null) {
1181
+ providerMetadata.openai.logprobs = choice.logprobs;
1182
+ }
1223
1183
  return {
1224
- text: choice.text,
1184
+ content: [{ type: "text", text: choice.text }],
1225
1185
  usage: {
1226
- promptTokens: response.usage.prompt_tokens,
1227
- completionTokens: response.usage.completion_tokens
1186
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1187
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1188
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1228
1189
  },
1229
1190
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1230
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1231
- rawCall: { rawPrompt, rawSettings },
1232
- rawResponse: { headers: responseHeaders, body: rawResponse },
1233
- response: getResponseMetadata(response),
1234
- warnings,
1235
- request: { body: JSON.stringify(args) }
1191
+ request: { body: args },
1192
+ response: {
1193
+ ...getResponseMetadata(response),
1194
+ headers: responseHeaders,
1195
+ body: rawResponse
1196
+ },
1197
+ providerMetadata,
1198
+ warnings
1236
1199
  };
1237
1200
  }
1238
1201
  async doStream(options) {
1239
- const { args, warnings } = this.getArgs(options);
1202
+ const { args, warnings } = await this.getArgs(options);
1240
1203
  const body = {
1241
1204
  ...args,
1242
1205
  stream: true,
1243
- // only include stream_options when in strict compatibility mode:
1244
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1206
+ stream_options: {
1207
+ include_usage: true
1208
+ }
1245
1209
  };
1246
1210
  const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1247
1211
  url: this.config.url({
@@ -1257,17 +1221,20 @@ var OpenAICompletionLanguageModel = class {
1257
1221
  abortSignal: options.abortSignal,
1258
1222
  fetch: this.config.fetch
1259
1223
  });
1260
- const { prompt: rawPrompt, ...rawSettings } = args;
1261
1224
  let finishReason = "unknown";
1262
- let usage = {
1263
- promptTokens: Number.NaN,
1264
- completionTokens: Number.NaN
1225
+ const providerMetadata = { openai: {} };
1226
+ const usage = {
1227
+ inputTokens: void 0,
1228
+ outputTokens: void 0,
1229
+ totalTokens: void 0
1265
1230
  };
1266
- let logprobs;
1267
1231
  let isFirstChunk = true;
1268
1232
  return {
1269
1233
  stream: response.pipeThrough(
1270
1234
  new TransformStream({
1235
+ start(controller) {
1236
+ controller.enqueue({ type: "stream-start", warnings });
1237
+ },
1271
1238
  transform(chunk, controller) {
1272
1239
  if (!chunk.success) {
1273
1240
  finishReason = "error";
@@ -1288,87 +1255,79 @@ var OpenAICompletionLanguageModel = class {
1288
1255
  });
1289
1256
  }
1290
1257
  if (value.usage != null) {
1291
- usage = {
1292
- promptTokens: value.usage.prompt_tokens,
1293
- completionTokens: value.usage.completion_tokens
1294
- };
1258
+ usage.inputTokens = value.usage.prompt_tokens;
1259
+ usage.outputTokens = value.usage.completion_tokens;
1260
+ usage.totalTokens = value.usage.total_tokens;
1295
1261
  }
1296
1262
  const choice = value.choices[0];
1297
1263
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1298
1264
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1299
1265
  }
1266
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1267
+ providerMetadata.openai.logprobs = choice.logprobs;
1268
+ }
1300
1269
  if ((choice == null ? void 0 : choice.text) != null) {
1301
1270
  controller.enqueue({
1302
- type: "text-delta",
1303
- textDelta: choice.text
1271
+ type: "text",
1272
+ text: choice.text
1304
1273
  });
1305
1274
  }
1306
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1307
- choice == null ? void 0 : choice.logprobs
1308
- );
1309
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1310
- if (logprobs === void 0) logprobs = [];
1311
- logprobs.push(...mappedLogprobs);
1312
- }
1313
1275
  },
1314
1276
  flush(controller) {
1315
1277
  controller.enqueue({
1316
1278
  type: "finish",
1317
1279
  finishReason,
1318
- logprobs,
1280
+ providerMetadata,
1319
1281
  usage
1320
1282
  });
1321
1283
  }
1322
1284
  })
1323
1285
  ),
1324
- rawCall: { rawPrompt, rawSettings },
1325
- rawResponse: { headers: responseHeaders },
1326
- warnings,
1327
- request: { body: JSON.stringify(body) }
1286
+ request: { body },
1287
+ response: { headers: responseHeaders }
1328
1288
  };
1329
1289
  }
1330
1290
  };
1331
- var openaiCompletionResponseSchema = import_zod3.z.object({
1332
- id: import_zod3.z.string().nullish(),
1333
- created: import_zod3.z.number().nullish(),
1334
- model: import_zod3.z.string().nullish(),
1335
- choices: import_zod3.z.array(
1336
- import_zod3.z.object({
1337
- text: import_zod3.z.string(),
1338
- finish_reason: import_zod3.z.string(),
1339
- logprobs: import_zod3.z.object({
1340
- tokens: import_zod3.z.array(import_zod3.z.string()),
1341
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1342
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1291
+ var usageSchema = import_zod5.z.object({
1292
+ prompt_tokens: import_zod5.z.number(),
1293
+ completion_tokens: import_zod5.z.number(),
1294
+ total_tokens: import_zod5.z.number()
1295
+ });
1296
+ var openaiCompletionResponseSchema = import_zod5.z.object({
1297
+ id: import_zod5.z.string().nullish(),
1298
+ created: import_zod5.z.number().nullish(),
1299
+ model: import_zod5.z.string().nullish(),
1300
+ choices: import_zod5.z.array(
1301
+ import_zod5.z.object({
1302
+ text: import_zod5.z.string(),
1303
+ finish_reason: import_zod5.z.string(),
1304
+ logprobs: import_zod5.z.object({
1305
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1306
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1307
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1343
1308
  }).nullish()
1344
1309
  })
1345
1310
  ),
1346
- usage: import_zod3.z.object({
1347
- prompt_tokens: import_zod3.z.number(),
1348
- completion_tokens: import_zod3.z.number()
1349
- })
1311
+ usage: usageSchema.nullish()
1350
1312
  });
1351
- var openaiCompletionChunkSchema = import_zod3.z.union([
1352
- import_zod3.z.object({
1353
- id: import_zod3.z.string().nullish(),
1354
- created: import_zod3.z.number().nullish(),
1355
- model: import_zod3.z.string().nullish(),
1356
- choices: import_zod3.z.array(
1357
- import_zod3.z.object({
1358
- text: import_zod3.z.string(),
1359
- finish_reason: import_zod3.z.string().nullish(),
1360
- index: import_zod3.z.number(),
1361
- logprobs: import_zod3.z.object({
1362
- tokens: import_zod3.z.array(import_zod3.z.string()),
1363
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1364
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1313
+ var openaiCompletionChunkSchema = import_zod5.z.union([
1314
+ import_zod5.z.object({
1315
+ id: import_zod5.z.string().nullish(),
1316
+ created: import_zod5.z.number().nullish(),
1317
+ model: import_zod5.z.string().nullish(),
1318
+ choices: import_zod5.z.array(
1319
+ import_zod5.z.object({
1320
+ text: import_zod5.z.string(),
1321
+ finish_reason: import_zod5.z.string().nullish(),
1322
+ index: import_zod5.z.number(),
1323
+ logprobs: import_zod5.z.object({
1324
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1325
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1326
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1365
1327
  }).nullish()
1366
1328
  })
1367
1329
  ),
1368
- usage: import_zod3.z.object({
1369
- prompt_tokens: import_zod3.z.number(),
1370
- completion_tokens: import_zod3.z.number()
1371
- }).nullish()
1330
+ usage: usageSchema.nullish()
1372
1331
  }),
1373
1332
  openaiErrorDataSchema
1374
1333
  ]);
@@ -1376,30 +1335,42 @@ var openaiCompletionChunkSchema = import_zod3.z.union([
1376
1335
  // src/openai-embedding-model.ts
1377
1336
  var import_provider5 = require("@ai-sdk/provider");
1378
1337
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1379
- var import_zod4 = require("zod");
1338
+ var import_zod7 = require("zod");
1339
+
1340
+ // src/openai-embedding-options.ts
1341
+ var import_zod6 = require("zod");
1342
+ var openaiEmbeddingProviderOptions = import_zod6.z.object({
1343
+ /**
1344
+ The number of dimensions the resulting output embeddings should have.
1345
+ Only supported in text-embedding-3 and later models.
1346
+ */
1347
+ dimensions: import_zod6.z.number().optional(),
1348
+ /**
1349
+ A unique identifier representing your end-user, which can help OpenAI to
1350
+ monitor and detect abuse. Learn more.
1351
+ */
1352
+ user: import_zod6.z.string().optional()
1353
+ });
1354
+
1355
+ // src/openai-embedding-model.ts
1380
1356
  var OpenAIEmbeddingModel = class {
1381
- constructor(modelId, settings, config) {
1382
- this.specificationVersion = "v1";
1357
+ constructor(modelId, config) {
1358
+ this.specificationVersion = "v2";
1359
+ this.maxEmbeddingsPerCall = 2048;
1360
+ this.supportsParallelCalls = true;
1383
1361
  this.modelId = modelId;
1384
- this.settings = settings;
1385
1362
  this.config = config;
1386
1363
  }
1387
1364
  get provider() {
1388
1365
  return this.config.provider;
1389
1366
  }
1390
- get maxEmbeddingsPerCall() {
1391
- var _a;
1392
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1393
- }
1394
- get supportsParallelCalls() {
1395
- var _a;
1396
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1397
- }
1398
1367
  async doEmbed({
1399
1368
  values,
1400
1369
  headers,
1401
- abortSignal
1370
+ abortSignal,
1371
+ providerOptions
1402
1372
  }) {
1373
+ var _a;
1403
1374
  if (values.length > this.maxEmbeddingsPerCall) {
1404
1375
  throw new import_provider5.TooManyEmbeddingValuesForCallError({
1405
1376
  provider: this.provider,
@@ -1408,7 +1379,16 @@ var OpenAIEmbeddingModel = class {
1408
1379
  values
1409
1380
  });
1410
1381
  }
1411
- const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1382
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
1383
+ provider: "openai",
1384
+ providerOptions,
1385
+ schema: openaiEmbeddingProviderOptions
1386
+ })) != null ? _a : {};
1387
+ const {
1388
+ responseHeaders,
1389
+ value: response,
1390
+ rawValue
1391
+ } = await (0, import_provider_utils5.postJsonToApi)({
1412
1392
  url: this.config.url({
1413
1393
  path: "/embeddings",
1414
1394
  modelId: this.modelId
@@ -1418,8 +1398,8 @@ var OpenAIEmbeddingModel = class {
1418
1398
  model: this.modelId,
1419
1399
  input: values,
1420
1400
  encoding_format: "float",
1421
- dimensions: this.settings.dimensions,
1422
- user: this.settings.user
1401
+ dimensions: openaiOptions.dimensions,
1402
+ user: openaiOptions.user
1423
1403
  },
1424
1404
  failedResponseHandler: openaiFailedResponseHandler,
1425
1405
  successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
@@ -1431,36 +1411,37 @@ var OpenAIEmbeddingModel = class {
1431
1411
  return {
1432
1412
  embeddings: response.data.map((item) => item.embedding),
1433
1413
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1434
- rawResponse: { headers: responseHeaders }
1414
+ response: { headers: responseHeaders, body: rawValue }
1435
1415
  };
1436
1416
  }
1437
1417
  };
1438
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1439
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1440
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1418
+ var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
1419
+ data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
1420
+ usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
1441
1421
  });
1442
1422
 
1443
1423
  // src/openai-image-model.ts
1444
1424
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1445
- var import_zod5 = require("zod");
1425
+ var import_zod8 = require("zod");
1446
1426
 
1447
1427
  // src/openai-image-settings.ts
1448
1428
  var modelMaxImagesPerCall = {
1449
1429
  "dall-e-3": 1,
1450
- "dall-e-2": 10
1430
+ "dall-e-2": 10,
1431
+ "gpt-image-1": 10
1451
1432
  };
1433
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1452
1434
 
1453
1435
  // src/openai-image-model.ts
1454
1436
  var OpenAIImageModel = class {
1455
- constructor(modelId, settings, config) {
1437
+ constructor(modelId, config) {
1456
1438
  this.modelId = modelId;
1457
- this.settings = settings;
1458
1439
  this.config = config;
1459
- this.specificationVersion = "v1";
1440
+ this.specificationVersion = "v2";
1460
1441
  }
1461
1442
  get maxImagesPerCall() {
1462
- var _a, _b;
1463
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1443
+ var _a;
1444
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1464
1445
  }
1465
1446
  get provider() {
1466
1447
  return this.config.provider;
@@ -1500,7 +1481,7 @@ var OpenAIImageModel = class {
1500
1481
  n,
1501
1482
  size,
1502
1483
  ...(_d = providerOptions.openai) != null ? _d : {},
1503
- response_format: "b64_json"
1484
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1504
1485
  },
1505
1486
  failedResponseHandler: openaiFailedResponseHandler,
1506
1487
  successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
@@ -1516,21 +1497,319 @@ var OpenAIImageModel = class {
1516
1497
  timestamp: currentDate,
1517
1498
  modelId: this.modelId,
1518
1499
  headers: responseHeaders
1500
+ },
1501
+ providerMetadata: {
1502
+ openai: {
1503
+ images: response.data.map(
1504
+ (item) => item.revised_prompt ? {
1505
+ revisedPrompt: item.revised_prompt
1506
+ } : null
1507
+ )
1508
+ }
1519
1509
  }
1520
1510
  };
1521
1511
  }
1522
1512
  };
1523
- var openaiImageResponseSchema = import_zod5.z.object({
1524
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1513
+ var openaiImageResponseSchema = import_zod8.z.object({
1514
+ data: import_zod8.z.array(
1515
+ import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
1516
+ )
1525
1517
  });
1526
1518
 
1527
- // src/responses/openai-responses-language-model.ts
1519
+ // src/openai-transcription-model.ts
1520
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1521
+ var import_zod10 = require("zod");
1522
+
1523
+ // src/openai-transcription-options.ts
1524
+ var import_zod9 = require("zod");
1525
+ var openAITranscriptionProviderOptions = import_zod9.z.object({
1526
+ /**
1527
+ * Additional information to include in the transcription response.
1528
+ */
1529
+ include: import_zod9.z.array(import_zod9.z.string()).optional(),
1530
+ /**
1531
+ * The language of the input audio in ISO-639-1 format.
1532
+ */
1533
+ language: import_zod9.z.string().optional(),
1534
+ /**
1535
+ * An optional text to guide the model's style or continue a previous audio segment.
1536
+ */
1537
+ prompt: import_zod9.z.string().optional(),
1538
+ /**
1539
+ * The sampling temperature, between 0 and 1.
1540
+ * @default 0
1541
+ */
1542
+ temperature: import_zod9.z.number().min(0).max(1).default(0).optional(),
1543
+ /**
1544
+ * The timestamp granularities to populate for this transcription.
1545
+ * @default ['segment']
1546
+ */
1547
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).optional()
1548
+ });
1549
+
1550
+ // src/openai-transcription-model.ts
1551
+ var languageMap = {
1552
+ afrikaans: "af",
1553
+ arabic: "ar",
1554
+ armenian: "hy",
1555
+ azerbaijani: "az",
1556
+ belarusian: "be",
1557
+ bosnian: "bs",
1558
+ bulgarian: "bg",
1559
+ catalan: "ca",
1560
+ chinese: "zh",
1561
+ croatian: "hr",
1562
+ czech: "cs",
1563
+ danish: "da",
1564
+ dutch: "nl",
1565
+ english: "en",
1566
+ estonian: "et",
1567
+ finnish: "fi",
1568
+ french: "fr",
1569
+ galician: "gl",
1570
+ german: "de",
1571
+ greek: "el",
1572
+ hebrew: "he",
1573
+ hindi: "hi",
1574
+ hungarian: "hu",
1575
+ icelandic: "is",
1576
+ indonesian: "id",
1577
+ italian: "it",
1578
+ japanese: "ja",
1579
+ kannada: "kn",
1580
+ kazakh: "kk",
1581
+ korean: "ko",
1582
+ latvian: "lv",
1583
+ lithuanian: "lt",
1584
+ macedonian: "mk",
1585
+ malay: "ms",
1586
+ marathi: "mr",
1587
+ maori: "mi",
1588
+ nepali: "ne",
1589
+ norwegian: "no",
1590
+ persian: "fa",
1591
+ polish: "pl",
1592
+ portuguese: "pt",
1593
+ romanian: "ro",
1594
+ russian: "ru",
1595
+ serbian: "sr",
1596
+ slovak: "sk",
1597
+ slovenian: "sl",
1598
+ spanish: "es",
1599
+ swahili: "sw",
1600
+ swedish: "sv",
1601
+ tagalog: "tl",
1602
+ tamil: "ta",
1603
+ thai: "th",
1604
+ turkish: "tr",
1605
+ ukrainian: "uk",
1606
+ urdu: "ur",
1607
+ vietnamese: "vi",
1608
+ welsh: "cy"
1609
+ };
1610
+ var OpenAITranscriptionModel = class {
1611
+ constructor(modelId, config) {
1612
+ this.modelId = modelId;
1613
+ this.config = config;
1614
+ this.specificationVersion = "v1";
1615
+ }
1616
+ get provider() {
1617
+ return this.config.provider;
1618
+ }
1619
+ async getArgs({
1620
+ audio,
1621
+ mediaType,
1622
+ providerOptions
1623
+ }) {
1624
+ const warnings = [];
1625
+ const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
1626
+ provider: "openai",
1627
+ providerOptions,
1628
+ schema: openAITranscriptionProviderOptions
1629
+ });
1630
+ const formData = new FormData();
1631
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1632
+ formData.append("model", this.modelId);
1633
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1634
+ if (openAIOptions) {
1635
+ const transcriptionModelOptions = {
1636
+ include: openAIOptions.include,
1637
+ language: openAIOptions.language,
1638
+ prompt: openAIOptions.prompt,
1639
+ temperature: openAIOptions.temperature,
1640
+ timestamp_granularities: openAIOptions.timestampGranularities
1641
+ };
1642
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1643
+ if (value != null) {
1644
+ formData.append(key, String(value));
1645
+ }
1646
+ }
1647
+ }
1648
+ return {
1649
+ formData,
1650
+ warnings
1651
+ };
1652
+ }
1653
+ async doGenerate(options) {
1654
+ var _a, _b, _c, _d, _e, _f;
1655
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1656
+ const { formData, warnings } = await this.getArgs(options);
1657
+ const {
1658
+ value: response,
1659
+ responseHeaders,
1660
+ rawValue: rawResponse
1661
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1662
+ url: this.config.url({
1663
+ path: "/audio/transcriptions",
1664
+ modelId: this.modelId
1665
+ }),
1666
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1667
+ formData,
1668
+ failedResponseHandler: openaiFailedResponseHandler,
1669
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1670
+ openaiTranscriptionResponseSchema
1671
+ ),
1672
+ abortSignal: options.abortSignal,
1673
+ fetch: this.config.fetch
1674
+ });
1675
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1676
+ return {
1677
+ text: response.text,
1678
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1679
+ text: word.word,
1680
+ startSecond: word.start,
1681
+ endSecond: word.end
1682
+ }))) != null ? _e : [],
1683
+ language,
1684
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1685
+ warnings,
1686
+ response: {
1687
+ timestamp: currentDate,
1688
+ modelId: this.modelId,
1689
+ headers: responseHeaders,
1690
+ body: rawResponse
1691
+ }
1692
+ };
1693
+ }
1694
+ };
1695
+ var openaiTranscriptionResponseSchema = import_zod10.z.object({
1696
+ text: import_zod10.z.string(),
1697
+ language: import_zod10.z.string().nullish(),
1698
+ duration: import_zod10.z.number().nullish(),
1699
+ words: import_zod10.z.array(
1700
+ import_zod10.z.object({
1701
+ word: import_zod10.z.string(),
1702
+ start: import_zod10.z.number(),
1703
+ end: import_zod10.z.number()
1704
+ })
1705
+ ).nullish()
1706
+ });
1707
+
1708
+ // src/openai-speech-model.ts
1528
1709
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1529
- var import_zod6 = require("zod");
1710
+ var import_zod11 = require("zod");
1711
+ var OpenAIProviderOptionsSchema = import_zod11.z.object({
1712
+ instructions: import_zod11.z.string().nullish(),
1713
+ speed: import_zod11.z.number().min(0.25).max(4).default(1).nullish()
1714
+ });
1715
+ var OpenAISpeechModel = class {
1716
+ constructor(modelId, config) {
1717
+ this.modelId = modelId;
1718
+ this.config = config;
1719
+ this.specificationVersion = "v1";
1720
+ }
1721
+ get provider() {
1722
+ return this.config.provider;
1723
+ }
1724
+ async getArgs({
1725
+ text,
1726
+ voice = "alloy",
1727
+ outputFormat = "mp3",
1728
+ speed,
1729
+ instructions,
1730
+ providerOptions
1731
+ }) {
1732
+ const warnings = [];
1733
+ const openAIOptions = await (0, import_provider_utils8.parseProviderOptions)({
1734
+ provider: "openai",
1735
+ providerOptions,
1736
+ schema: OpenAIProviderOptionsSchema
1737
+ });
1738
+ const requestBody = {
1739
+ model: this.modelId,
1740
+ input: text,
1741
+ voice,
1742
+ response_format: "mp3",
1743
+ speed,
1744
+ instructions
1745
+ };
1746
+ if (outputFormat) {
1747
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1748
+ requestBody.response_format = outputFormat;
1749
+ } else {
1750
+ warnings.push({
1751
+ type: "unsupported-setting",
1752
+ setting: "outputFormat",
1753
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1754
+ });
1755
+ }
1756
+ }
1757
+ if (openAIOptions) {
1758
+ const speechModelOptions = {};
1759
+ for (const key in speechModelOptions) {
1760
+ const value = speechModelOptions[key];
1761
+ if (value !== void 0) {
1762
+ requestBody[key] = value;
1763
+ }
1764
+ }
1765
+ }
1766
+ return {
1767
+ requestBody,
1768
+ warnings
1769
+ };
1770
+ }
1771
+ async doGenerate(options) {
1772
+ var _a, _b, _c;
1773
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1774
+ const { requestBody, warnings } = await this.getArgs(options);
1775
+ const {
1776
+ value: audio,
1777
+ responseHeaders,
1778
+ rawValue: rawResponse
1779
+ } = await (0, import_provider_utils8.postJsonToApi)({
1780
+ url: this.config.url({
1781
+ path: "/audio/speech",
1782
+ modelId: this.modelId
1783
+ }),
1784
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1785
+ body: requestBody,
1786
+ failedResponseHandler: openaiFailedResponseHandler,
1787
+ successfulResponseHandler: (0, import_provider_utils8.createBinaryResponseHandler)(),
1788
+ abortSignal: options.abortSignal,
1789
+ fetch: this.config.fetch
1790
+ });
1791
+ return {
1792
+ audio,
1793
+ warnings,
1794
+ request: {
1795
+ body: JSON.stringify(requestBody)
1796
+ },
1797
+ response: {
1798
+ timestamp: currentDate,
1799
+ modelId: this.modelId,
1800
+ headers: responseHeaders,
1801
+ body: rawResponse
1802
+ }
1803
+ };
1804
+ }
1805
+ };
1806
+
1807
+ // src/responses/openai-responses-language-model.ts
1808
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1809
+ var import_zod12 = require("zod");
1530
1810
 
1531
1811
  // src/responses/convert-to-openai-responses-messages.ts
1532
1812
  var import_provider6 = require("@ai-sdk/provider");
1533
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1534
1813
  function convertToOpenAIResponsesMessages({
1535
1814
  prompt,
1536
1815
  systemMessageMode
@@ -1569,38 +1848,35 @@ function convertToOpenAIResponsesMessages({
1569
1848
  messages.push({
1570
1849
  role: "user",
1571
1850
  content: content.map((part, index) => {
1572
- var _a, _b, _c, _d;
1851
+ var _a, _b, _c;
1573
1852
  switch (part.type) {
1574
1853
  case "text": {
1575
1854
  return { type: "input_text", text: part.text };
1576
1855
  }
1577
- case "image": {
1578
- return {
1579
- type: "input_image",
1580
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils7.convertUint8ArrayToBase64)(part.image)}`,
1581
- // OpenAI specific extension: image detail
1582
- detail: (_c = (_b = part.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1583
- };
1584
- }
1585
1856
  case "file": {
1586
- if (part.data instanceof URL) {
1587
- throw new import_provider6.UnsupportedFunctionalityError({
1588
- functionality: "File URLs in user messages"
1589
- });
1590
- }
1591
- switch (part.mimeType) {
1592
- case "application/pdf": {
1593
- return {
1594
- type: "input_file",
1595
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1596
- file_data: `data:application/pdf;base64,${part.data}`
1597
- };
1598
- }
1599
- default: {
1857
+ if (part.mediaType.startsWith("image/")) {
1858
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1859
+ return {
1860
+ type: "input_image",
1861
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1862
+ // OpenAI specific extension: image detail
1863
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1864
+ };
1865
+ } else if (part.mediaType === "application/pdf") {
1866
+ if (part.data instanceof URL) {
1600
1867
  throw new import_provider6.UnsupportedFunctionalityError({
1601
- functionality: "Only PDF files are supported in user messages"
1868
+ functionality: "PDF file parts with URLs"
1602
1869
  });
1603
1870
  }
1871
+ return {
1872
+ type: "input_file",
1873
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1874
+ file_data: `data:application/pdf;base64,${part.data}`
1875
+ };
1876
+ } else {
1877
+ throw new import_provider6.UnsupportedFunctionalityError({
1878
+ functionality: `file part media type ${part.mediaType}`
1879
+ });
1604
1880
  }
1605
1881
  }
1606
1882
  }
@@ -1729,7 +2005,7 @@ function prepareResponsesTools({
1729
2005
  default: {
1730
2006
  const _exhaustiveCheck = type;
1731
2007
  throw new import_provider7.UnsupportedFunctionalityError({
1732
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2008
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1733
2009
  });
1734
2010
  }
1735
2011
  }
@@ -1739,15 +2015,17 @@ function prepareResponsesTools({
1739
2015
  var OpenAIResponsesLanguageModel = class {
1740
2016
  constructor(modelId, config) {
1741
2017
  this.specificationVersion = "v2";
1742
- this.defaultObjectGenerationMode = "json";
2018
+ this.supportedUrls = {
2019
+ "image/*": [/^https?:\/\/.*$/]
2020
+ };
1743
2021
  this.modelId = modelId;
1744
2022
  this.config = config;
1745
2023
  }
1746
2024
  get provider() {
1747
2025
  return this.config.provider;
1748
2026
  }
1749
- getArgs({
1750
- maxTokens,
2027
+ async getArgs({
2028
+ maxOutputTokens,
1751
2029
  temperature,
1752
2030
  stopSequences,
1753
2031
  topP,
@@ -1790,7 +2068,7 @@ var OpenAIResponsesLanguageModel = class {
1790
2068
  systemMessageMode: modelConfig.systemMessageMode
1791
2069
  });
1792
2070
  warnings.push(...messageWarnings);
1793
- const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
2071
+ const openaiOptions = await (0, import_provider_utils9.parseProviderOptions)({
1794
2072
  provider: "openai",
1795
2073
  providerOptions,
1796
2074
  schema: openaiResponsesProviderOptionsSchema
@@ -1801,7 +2079,7 @@ var OpenAIResponsesLanguageModel = class {
1801
2079
  input: messages,
1802
2080
  temperature,
1803
2081
  top_p: topP,
1804
- max_output_tokens: maxTokens,
2082
+ max_output_tokens: maxOutputTokens,
1805
2083
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1806
2084
  text: {
1807
2085
  format: responseFormat.schema != null ? {
@@ -1821,8 +2099,15 @@ var OpenAIResponsesLanguageModel = class {
1821
2099
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1822
2100
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1823
2101
  // model-specific settings:
1824
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1825
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2102
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2103
+ reasoning: {
2104
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2105
+ effort: openaiOptions.reasoningEffort
2106
+ },
2107
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2108
+ summary: openaiOptions.reasoningSummary
2109
+ }
2110
+ }
1826
2111
  },
1827
2112
  ...modelConfig.requiredAutoTruncation && {
1828
2113
  truncation: "auto"
@@ -1865,139 +2150,159 @@ var OpenAIResponsesLanguageModel = class {
1865
2150
  };
1866
2151
  }
1867
2152
  async doGenerate(options) {
1868
- var _a, _b, _c, _d, _e;
1869
- const { args: body, warnings } = this.getArgs(options);
2153
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2154
+ const { args: body, warnings } = await this.getArgs(options);
1870
2155
  const {
1871
2156
  responseHeaders,
1872
2157
  value: response,
1873
2158
  rawValue: rawResponse
1874
- } = await (0, import_provider_utils8.postJsonToApi)({
2159
+ } = await (0, import_provider_utils9.postJsonToApi)({
1875
2160
  url: this.config.url({
1876
2161
  path: "/responses",
1877
2162
  modelId: this.modelId
1878
2163
  }),
1879
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2164
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
1880
2165
  body,
1881
2166
  failedResponseHandler: openaiFailedResponseHandler,
1882
- successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1883
- import_zod6.z.object({
1884
- id: import_zod6.z.string(),
1885
- created_at: import_zod6.z.number(),
1886
- model: import_zod6.z.string(),
1887
- output: import_zod6.z.array(
1888
- import_zod6.z.discriminatedUnion("type", [
1889
- import_zod6.z.object({
1890
- type: import_zod6.z.literal("message"),
1891
- role: import_zod6.z.literal("assistant"),
1892
- content: import_zod6.z.array(
1893
- import_zod6.z.object({
1894
- type: import_zod6.z.literal("output_text"),
1895
- text: import_zod6.z.string(),
1896
- annotations: import_zod6.z.array(
1897
- import_zod6.z.object({
1898
- type: import_zod6.z.literal("url_citation"),
1899
- start_index: import_zod6.z.number(),
1900
- end_index: import_zod6.z.number(),
1901
- url: import_zod6.z.string(),
1902
- title: import_zod6.z.string()
2167
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
2168
+ import_zod12.z.object({
2169
+ id: import_zod12.z.string(),
2170
+ created_at: import_zod12.z.number(),
2171
+ model: import_zod12.z.string(),
2172
+ output: import_zod12.z.array(
2173
+ import_zod12.z.discriminatedUnion("type", [
2174
+ import_zod12.z.object({
2175
+ type: import_zod12.z.literal("message"),
2176
+ role: import_zod12.z.literal("assistant"),
2177
+ content: import_zod12.z.array(
2178
+ import_zod12.z.object({
2179
+ type: import_zod12.z.literal("output_text"),
2180
+ text: import_zod12.z.string(),
2181
+ annotations: import_zod12.z.array(
2182
+ import_zod12.z.object({
2183
+ type: import_zod12.z.literal("url_citation"),
2184
+ start_index: import_zod12.z.number(),
2185
+ end_index: import_zod12.z.number(),
2186
+ url: import_zod12.z.string(),
2187
+ title: import_zod12.z.string()
1903
2188
  })
1904
2189
  )
1905
2190
  })
1906
2191
  )
1907
2192
  }),
1908
- import_zod6.z.object({
1909
- type: import_zod6.z.literal("function_call"),
1910
- call_id: import_zod6.z.string(),
1911
- name: import_zod6.z.string(),
1912
- arguments: import_zod6.z.string()
2193
+ import_zod12.z.object({
2194
+ type: import_zod12.z.literal("function_call"),
2195
+ call_id: import_zod12.z.string(),
2196
+ name: import_zod12.z.string(),
2197
+ arguments: import_zod12.z.string()
1913
2198
  }),
1914
- import_zod6.z.object({
1915
- type: import_zod6.z.literal("web_search_call")
2199
+ import_zod12.z.object({
2200
+ type: import_zod12.z.literal("web_search_call")
1916
2201
  }),
1917
- import_zod6.z.object({
1918
- type: import_zod6.z.literal("computer_call")
2202
+ import_zod12.z.object({
2203
+ type: import_zod12.z.literal("computer_call")
1919
2204
  }),
1920
- import_zod6.z.object({
1921
- type: import_zod6.z.literal("reasoning")
2205
+ import_zod12.z.object({
2206
+ type: import_zod12.z.literal("reasoning"),
2207
+ summary: import_zod12.z.array(
2208
+ import_zod12.z.object({
2209
+ type: import_zod12.z.literal("summary_text"),
2210
+ text: import_zod12.z.string()
2211
+ })
2212
+ )
1922
2213
  })
1923
2214
  ])
1924
2215
  ),
1925
- incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullable(),
1926
- usage: usageSchema
2216
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2217
+ usage: usageSchema2
1927
2218
  })
1928
2219
  ),
1929
2220
  abortSignal: options.abortSignal,
1930
2221
  fetch: this.config.fetch
1931
2222
  });
1932
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
1933
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
1934
- toolCallType: "function",
1935
- toolCallId: output.call_id,
1936
- toolName: output.name,
1937
- args: output.arguments
1938
- }));
2223
+ const content = [];
2224
+ for (const part of response.output) {
2225
+ switch (part.type) {
2226
+ case "reasoning": {
2227
+ content.push({
2228
+ type: "reasoning",
2229
+ text: part.summary.map((summary) => summary.text).join()
2230
+ });
2231
+ break;
2232
+ }
2233
+ case "message": {
2234
+ for (const contentPart of part.content) {
2235
+ content.push({
2236
+ type: "text",
2237
+ text: contentPart.text
2238
+ });
2239
+ for (const annotation of contentPart.annotations) {
2240
+ content.push({
2241
+ type: "source",
2242
+ sourceType: "url",
2243
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils9.generateId)(),
2244
+ url: annotation.url,
2245
+ title: annotation.title
2246
+ });
2247
+ }
2248
+ }
2249
+ break;
2250
+ }
2251
+ case "function_call": {
2252
+ content.push({
2253
+ type: "tool-call",
2254
+ toolCallType: "function",
2255
+ toolCallId: part.call_id,
2256
+ toolName: part.name,
2257
+ args: part.arguments
2258
+ });
2259
+ break;
2260
+ }
2261
+ }
2262
+ }
1939
2263
  return {
1940
- text: outputTextElements.map((content) => content.text).join("\n"),
1941
- sources: outputTextElements.flatMap(
1942
- (content) => content.annotations.map((annotation) => {
1943
- var _a2, _b2, _c2;
1944
- return {
1945
- sourceType: "url",
1946
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
1947
- url: annotation.url,
1948
- title: annotation.title
1949
- };
1950
- })
1951
- ),
2264
+ content,
1952
2265
  finishReason: mapOpenAIResponseFinishReason({
1953
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
1954
- hasToolCalls: toolCalls.length > 0
2266
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2267
+ hasToolCalls: content.some((part) => part.type === "tool-call")
1955
2268
  }),
1956
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
1957
2269
  usage: {
1958
- promptTokens: response.usage.input_tokens,
1959
- completionTokens: response.usage.output_tokens
1960
- },
1961
- rawCall: {
1962
- rawPrompt: void 0,
1963
- rawSettings: {}
1964
- },
1965
- rawResponse: {
1966
- headers: responseHeaders,
1967
- body: rawResponse
1968
- },
1969
- request: {
1970
- body: JSON.stringify(body)
2270
+ inputTokens: response.usage.input_tokens,
2271
+ outputTokens: response.usage.output_tokens,
2272
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2273
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2274
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
1971
2275
  },
2276
+ request: { body },
1972
2277
  response: {
1973
2278
  id: response.id,
1974
2279
  timestamp: new Date(response.created_at * 1e3),
1975
- modelId: response.model
2280
+ modelId: response.model,
2281
+ headers: responseHeaders,
2282
+ body: rawResponse
1976
2283
  },
1977
2284
  providerMetadata: {
1978
2285
  openai: {
1979
- responseId: response.id,
1980
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
1981
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2286
+ responseId: response.id
1982
2287
  }
1983
2288
  },
1984
2289
  warnings
1985
2290
  };
1986
2291
  }
1987
2292
  async doStream(options) {
1988
- const { args: body, warnings } = this.getArgs(options);
1989
- const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2293
+ const { args: body, warnings } = await this.getArgs(options);
2294
+ const { responseHeaders, value: response } = await (0, import_provider_utils9.postJsonToApi)({
1990
2295
  url: this.config.url({
1991
2296
  path: "/responses",
1992
2297
  modelId: this.modelId
1993
2298
  }),
1994
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2299
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
1995
2300
  body: {
1996
2301
  ...body,
1997
2302
  stream: true
1998
2303
  },
1999
2304
  failedResponseHandler: openaiFailedResponseHandler,
2000
- successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2305
+ successfulResponseHandler: (0, import_provider_utils9.createEventSourceResponseHandler)(
2001
2306
  openaiResponsesChunkSchema
2002
2307
  ),
2003
2308
  abortSignal: options.abortSignal,
@@ -2005,16 +2310,20 @@ var OpenAIResponsesLanguageModel = class {
2005
2310
  });
2006
2311
  const self = this;
2007
2312
  let finishReason = "unknown";
2008
- let promptTokens = NaN;
2009
- let completionTokens = NaN;
2010
- let cachedPromptTokens = null;
2011
- let reasoningTokens = null;
2313
+ const usage = {
2314
+ inputTokens: void 0,
2315
+ outputTokens: void 0,
2316
+ totalTokens: void 0
2317
+ };
2012
2318
  let responseId = null;
2013
2319
  const ongoingToolCalls = {};
2014
2320
  let hasToolCalls = false;
2015
2321
  return {
2016
2322
  stream: response.pipeThrough(
2017
2323
  new TransformStream({
2324
+ start(controller) {
2325
+ controller.enqueue({ type: "stream-start", warnings });
2326
+ },
2018
2327
  transform(chunk, controller) {
2019
2328
  var _a, _b, _c, _d, _e, _f, _g, _h;
2020
2329
  if (!chunk.success) {
@@ -2058,8 +2367,13 @@ var OpenAIResponsesLanguageModel = class {
2058
2367
  });
2059
2368
  } else if (isTextDeltaChunk(value)) {
2060
2369
  controller.enqueue({
2061
- type: "text-delta",
2062
- textDelta: value.delta
2370
+ type: "text",
2371
+ text: value.delta
2372
+ });
2373
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2374
+ controller.enqueue({
2375
+ type: "reasoning",
2376
+ text: value.delta
2063
2377
  });
2064
2378
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2065
2379
  ongoingToolCalls[value.output_index] = void 0;
@@ -2076,19 +2390,18 @@ var OpenAIResponsesLanguageModel = class {
2076
2390
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2077
2391
  hasToolCalls
2078
2392
  });
2079
- promptTokens = value.response.usage.input_tokens;
2080
- completionTokens = value.response.usage.output_tokens;
2081
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2082
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2393
+ usage.inputTokens = value.response.usage.input_tokens;
2394
+ usage.outputTokens = value.response.usage.output_tokens;
2395
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2396
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2397
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2083
2398
  } else if (isResponseAnnotationAddedChunk(value)) {
2084
2399
  controller.enqueue({
2085
2400
  type: "source",
2086
- source: {
2087
- sourceType: "url",
2088
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2089
- url: value.annotation.url,
2090
- title: value.annotation.title
2091
- }
2401
+ sourceType: "url",
2402
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils9.generateId)(),
2403
+ url: value.annotation.url,
2404
+ title: value.annotation.title
2092
2405
  });
2093
2406
  }
2094
2407
  },
@@ -2096,103 +2409,101 @@ var OpenAIResponsesLanguageModel = class {
2096
2409
  controller.enqueue({
2097
2410
  type: "finish",
2098
2411
  finishReason,
2099
- usage: { promptTokens, completionTokens },
2100
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2101
- providerMetadata: {
2102
- openai: {
2103
- responseId,
2104
- cachedPromptTokens,
2105
- reasoningTokens
2106
- }
2412
+ usage,
2413
+ providerMetadata: {
2414
+ openai: {
2415
+ responseId
2107
2416
  }
2108
2417
  }
2109
2418
  });
2110
2419
  }
2111
2420
  })
2112
2421
  ),
2113
- rawCall: {
2114
- rawPrompt: void 0,
2115
- rawSettings: {}
2116
- },
2117
- rawResponse: { headers: responseHeaders },
2118
- request: { body: JSON.stringify(body) },
2119
- warnings
2422
+ request: { body },
2423
+ response: { headers: responseHeaders }
2120
2424
  };
2121
2425
  }
2122
2426
  };
2123
- var usageSchema = import_zod6.z.object({
2124
- input_tokens: import_zod6.z.number(),
2125
- input_tokens_details: import_zod6.z.object({ cached_tokens: import_zod6.z.number().nullish() }).nullish(),
2126
- output_tokens: import_zod6.z.number(),
2127
- output_tokens_details: import_zod6.z.object({ reasoning_tokens: import_zod6.z.number().nullish() }).nullish()
2427
+ var usageSchema2 = import_zod12.z.object({
2428
+ input_tokens: import_zod12.z.number(),
2429
+ input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2430
+ output_tokens: import_zod12.z.number(),
2431
+ output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
2128
2432
  });
2129
- var textDeltaChunkSchema = import_zod6.z.object({
2130
- type: import_zod6.z.literal("response.output_text.delta"),
2131
- delta: import_zod6.z.string()
2433
+ var textDeltaChunkSchema = import_zod12.z.object({
2434
+ type: import_zod12.z.literal("response.output_text.delta"),
2435
+ delta: import_zod12.z.string()
2132
2436
  });
2133
- var responseFinishedChunkSchema = import_zod6.z.object({
2134
- type: import_zod6.z.enum(["response.completed", "response.incomplete"]),
2135
- response: import_zod6.z.object({
2136
- incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullish(),
2137
- usage: usageSchema
2437
+ var responseFinishedChunkSchema = import_zod12.z.object({
2438
+ type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2439
+ response: import_zod12.z.object({
2440
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2441
+ usage: usageSchema2
2138
2442
  })
2139
2443
  });
2140
- var responseCreatedChunkSchema = import_zod6.z.object({
2141
- type: import_zod6.z.literal("response.created"),
2142
- response: import_zod6.z.object({
2143
- id: import_zod6.z.string(),
2144
- created_at: import_zod6.z.number(),
2145
- model: import_zod6.z.string()
2444
+ var responseCreatedChunkSchema = import_zod12.z.object({
2445
+ type: import_zod12.z.literal("response.created"),
2446
+ response: import_zod12.z.object({
2447
+ id: import_zod12.z.string(),
2448
+ created_at: import_zod12.z.number(),
2449
+ model: import_zod12.z.string()
2146
2450
  })
2147
2451
  });
2148
- var responseOutputItemDoneSchema = import_zod6.z.object({
2149
- type: import_zod6.z.literal("response.output_item.done"),
2150
- output_index: import_zod6.z.number(),
2151
- item: import_zod6.z.discriminatedUnion("type", [
2152
- import_zod6.z.object({
2153
- type: import_zod6.z.literal("message")
2452
+ var responseOutputItemDoneSchema = import_zod12.z.object({
2453
+ type: import_zod12.z.literal("response.output_item.done"),
2454
+ output_index: import_zod12.z.number(),
2455
+ item: import_zod12.z.discriminatedUnion("type", [
2456
+ import_zod12.z.object({
2457
+ type: import_zod12.z.literal("message")
2154
2458
  }),
2155
- import_zod6.z.object({
2156
- type: import_zod6.z.literal("function_call"),
2157
- id: import_zod6.z.string(),
2158
- call_id: import_zod6.z.string(),
2159
- name: import_zod6.z.string(),
2160
- arguments: import_zod6.z.string(),
2161
- status: import_zod6.z.literal("completed")
2459
+ import_zod12.z.object({
2460
+ type: import_zod12.z.literal("function_call"),
2461
+ id: import_zod12.z.string(),
2462
+ call_id: import_zod12.z.string(),
2463
+ name: import_zod12.z.string(),
2464
+ arguments: import_zod12.z.string(),
2465
+ status: import_zod12.z.literal("completed")
2162
2466
  })
2163
2467
  ])
2164
2468
  });
2165
- var responseFunctionCallArgumentsDeltaSchema = import_zod6.z.object({
2166
- type: import_zod6.z.literal("response.function_call_arguments.delta"),
2167
- item_id: import_zod6.z.string(),
2168
- output_index: import_zod6.z.number(),
2169
- delta: import_zod6.z.string()
2469
+ var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
2470
+ type: import_zod12.z.literal("response.function_call_arguments.delta"),
2471
+ item_id: import_zod12.z.string(),
2472
+ output_index: import_zod12.z.number(),
2473
+ delta: import_zod12.z.string()
2170
2474
  });
2171
- var responseOutputItemAddedSchema = import_zod6.z.object({
2172
- type: import_zod6.z.literal("response.output_item.added"),
2173
- output_index: import_zod6.z.number(),
2174
- item: import_zod6.z.discriminatedUnion("type", [
2175
- import_zod6.z.object({
2176
- type: import_zod6.z.literal("message")
2475
+ var responseOutputItemAddedSchema = import_zod12.z.object({
2476
+ type: import_zod12.z.literal("response.output_item.added"),
2477
+ output_index: import_zod12.z.number(),
2478
+ item: import_zod12.z.discriminatedUnion("type", [
2479
+ import_zod12.z.object({
2480
+ type: import_zod12.z.literal("message")
2177
2481
  }),
2178
- import_zod6.z.object({
2179
- type: import_zod6.z.literal("function_call"),
2180
- id: import_zod6.z.string(),
2181
- call_id: import_zod6.z.string(),
2182
- name: import_zod6.z.string(),
2183
- arguments: import_zod6.z.string()
2482
+ import_zod12.z.object({
2483
+ type: import_zod12.z.literal("function_call"),
2484
+ id: import_zod12.z.string(),
2485
+ call_id: import_zod12.z.string(),
2486
+ name: import_zod12.z.string(),
2487
+ arguments: import_zod12.z.string()
2184
2488
  })
2185
2489
  ])
2186
2490
  });
2187
- var responseAnnotationAddedSchema = import_zod6.z.object({
2188
- type: import_zod6.z.literal("response.output_text.annotation.added"),
2189
- annotation: import_zod6.z.object({
2190
- type: import_zod6.z.literal("url_citation"),
2191
- url: import_zod6.z.string(),
2192
- title: import_zod6.z.string()
2491
+ var responseAnnotationAddedSchema = import_zod12.z.object({
2492
+ type: import_zod12.z.literal("response.output_text.annotation.added"),
2493
+ annotation: import_zod12.z.object({
2494
+ type: import_zod12.z.literal("url_citation"),
2495
+ url: import_zod12.z.string(),
2496
+ title: import_zod12.z.string()
2193
2497
  })
2194
2498
  });
2195
- var openaiResponsesChunkSchema = import_zod6.z.union([
2499
+ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
2500
+ type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
2501
+ item_id: import_zod12.z.string(),
2502
+ output_index: import_zod12.z.number(),
2503
+ summary_index: import_zod12.z.number(),
2504
+ delta: import_zod12.z.string()
2505
+ });
2506
+ var openaiResponsesChunkSchema = import_zod12.z.union([
2196
2507
  textDeltaChunkSchema,
2197
2508
  responseFinishedChunkSchema,
2198
2509
  responseCreatedChunkSchema,
@@ -2200,7 +2511,8 @@ var openaiResponsesChunkSchema = import_zod6.z.union([
2200
2511
  responseFunctionCallArgumentsDeltaSchema,
2201
2512
  responseOutputItemAddedSchema,
2202
2513
  responseAnnotationAddedSchema,
2203
- import_zod6.z.object({ type: import_zod6.z.string() }).passthrough()
2514
+ responseReasoningSummaryTextDeltaSchema,
2515
+ import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
2204
2516
  // fallback for unknown chunks
2205
2517
  ]);
2206
2518
  function isTextDeltaChunk(chunk) {
@@ -2224,6 +2536,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2224
2536
  function isResponseAnnotationAddedChunk(chunk) {
2225
2537
  return chunk.type === "response.output_text.annotation.added";
2226
2538
  }
2539
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2540
+ return chunk.type === "response.reasoning_summary_text.delta";
2541
+ }
2227
2542
  function getResponsesModelConfig(modelId) {
2228
2543
  if (modelId.startsWith("o")) {
2229
2544
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2245,15 +2560,16 @@ function getResponsesModelConfig(modelId) {
2245
2560
  requiredAutoTruncation: false
2246
2561
  };
2247
2562
  }
2248
- var openaiResponsesProviderOptionsSchema = import_zod6.z.object({
2249
- metadata: import_zod6.z.any().nullish(),
2250
- parallelToolCalls: import_zod6.z.boolean().nullish(),
2251
- previousResponseId: import_zod6.z.string().nullish(),
2252
- store: import_zod6.z.boolean().nullish(),
2253
- user: import_zod6.z.string().nullish(),
2254
- reasoningEffort: import_zod6.z.string().nullish(),
2255
- strictSchemas: import_zod6.z.boolean().nullish(),
2256
- instructions: import_zod6.z.string().nullish()
2563
+ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2564
+ metadata: import_zod12.z.any().nullish(),
2565
+ parallelToolCalls: import_zod12.z.boolean().nullish(),
2566
+ previousResponseId: import_zod12.z.string().nullish(),
2567
+ store: import_zod12.z.boolean().nullish(),
2568
+ user: import_zod12.z.string().nullish(),
2569
+ reasoningEffort: import_zod12.z.string().nullish(),
2570
+ strictSchemas: import_zod12.z.boolean().nullish(),
2571
+ instructions: import_zod12.z.string().nullish(),
2572
+ reasoningSummary: import_zod12.z.string().nullish()
2257
2573
  });
2258
2574
  // Annotate the CommonJS export names for ESM import in node:
2259
2575
  0 && (module.exports = {
@@ -2262,6 +2578,13 @@ var openaiResponsesProviderOptionsSchema = import_zod6.z.object({
2262
2578
  OpenAIEmbeddingModel,
2263
2579
  OpenAIImageModel,
2264
2580
  OpenAIResponsesLanguageModel,
2265
- modelMaxImagesPerCall
2581
+ OpenAISpeechModel,
2582
+ OpenAITranscriptionModel,
2583
+ hasDefaultResponseFormat,
2584
+ modelMaxImagesPerCall,
2585
+ openAITranscriptionProviderOptions,
2586
+ openaiCompletionProviderOptions,
2587
+ openaiEmbeddingProviderOptions,
2588
+ openaiProviderOptions
2266
2589
  });
2267
2590
  //# sourceMappingURL=index.js.map