@ai-sdk/openai 2.0.0-canary.1 → 2.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,19 +26,18 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
33
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
34
- var import_zod2 = require("zod");
34
+ var import_zod3 = require("zod");
35
35
 
36
36
  // src/convert-to-openai-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
38
38
  var import_provider_utils = require("@ai-sdk/provider-utils");
39
39
  function convertToOpenAIChatMessages({
40
40
  prompt,
41
- useLegacyFunctionCalling = false,
42
41
  systemMessageMode = "system"
43
42
  }) {
44
43
  const messages = [];
@@ -79,55 +78,71 @@ function convertToOpenAIChatMessages({
79
78
  messages.push({
80
79
  role: "user",
81
80
  content: content.map((part, index) => {
82
- var _a, _b, _c, _d;
81
+ var _a, _b, _c;
83
82
  switch (part.type) {
84
83
  case "text": {
85
84
  return { type: "text", text: part.text };
86
85
  }
87
- case "image": {
88
- return {
89
- type: "image_url",
90
- image_url: {
91
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
92
- // OpenAI specific extension: image detail
93
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
94
- }
95
- };
96
- }
97
86
  case "file": {
98
- if (part.data instanceof URL) {
99
- throw new import_provider.UnsupportedFunctionalityError({
100
- functionality: "'File content parts with URL data' functionality not supported."
101
- });
102
- }
103
- switch (part.mimeType) {
104
- case "audio/wav": {
105
- return {
106
- type: "input_audio",
107
- input_audio: { data: part.data, format: "wav" }
108
- };
109
- }
110
- case "audio/mp3":
111
- case "audio/mpeg": {
112
- return {
113
- type: "input_audio",
114
- input_audio: { data: part.data, format: "mp3" }
115
- };
87
+ if (part.mediaType.startsWith("image/")) {
88
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
89
+ return {
90
+ type: "image_url",
91
+ image_url: {
92
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
93
+ // OpenAI specific extension: image detail
94
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
95
+ }
96
+ };
97
+ } else if (part.mediaType.startsWith("audio/")) {
98
+ if (part.data instanceof URL) {
99
+ throw new import_provider.UnsupportedFunctionalityError({
100
+ functionality: "audio file parts with URLs"
101
+ });
116
102
  }
117
- case "application/pdf": {
118
- return {
119
- type: "file",
120
- file: {
121
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
122
- file_data: `data:application/pdf;base64,${part.data}`
123
- }
124
- };
103
+ switch (part.mediaType) {
104
+ case "audio/wav": {
105
+ return {
106
+ type: "input_audio",
107
+ input_audio: {
108
+ data: (0, import_provider_utils.convertToBase64)(part.data),
109
+ format: "wav"
110
+ }
111
+ };
112
+ }
113
+ case "audio/mp3":
114
+ case "audio/mpeg": {
115
+ return {
116
+ type: "input_audio",
117
+ input_audio: {
118
+ data: (0, import_provider_utils.convertToBase64)(part.data),
119
+ format: "mp3"
120
+ }
121
+ };
122
+ }
123
+ default: {
124
+ throw new import_provider.UnsupportedFunctionalityError({
125
+ functionality: `audio content parts with media type ${part.mediaType}`
126
+ });
127
+ }
125
128
  }
126
- default: {
129
+ } else if (part.mediaType === "application/pdf") {
130
+ if (part.data instanceof URL) {
127
131
  throw new import_provider.UnsupportedFunctionalityError({
128
- functionality: `File content part type ${part.mimeType} in user messages`
132
+ functionality: "PDF file parts with URLs"
129
133
  });
130
134
  }
135
+ return {
136
+ type: "file",
137
+ file: {
138
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
139
+ file_data: `data:application/pdf;base64,${part.data}`
140
+ }
141
+ };
142
+ } else {
143
+ throw new import_provider.UnsupportedFunctionalityError({
144
+ functionality: `file part media type ${part.mediaType}`
145
+ });
131
146
  }
132
147
  }
133
148
  }
@@ -157,41 +172,20 @@ function convertToOpenAIChatMessages({
157
172
  }
158
173
  }
159
174
  }
160
- if (useLegacyFunctionCalling) {
161
- if (toolCalls.length > 1) {
162
- throw new import_provider.UnsupportedFunctionalityError({
163
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
164
- });
165
- }
166
- messages.push({
167
- role: "assistant",
168
- content: text,
169
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
170
- });
171
- } else {
172
- messages.push({
173
- role: "assistant",
174
- content: text,
175
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
176
- });
177
- }
175
+ messages.push({
176
+ role: "assistant",
177
+ content: text,
178
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
179
+ });
178
180
  break;
179
181
  }
180
182
  case "tool": {
181
183
  for (const toolResponse of content) {
182
- if (useLegacyFunctionCalling) {
183
- messages.push({
184
- role: "function",
185
- name: toolResponse.toolName,
186
- content: JSON.stringify(toolResponse.result)
187
- });
188
- } else {
189
- messages.push({
190
- role: "tool",
191
- tool_call_id: toolResponse.toolCallId,
192
- content: JSON.stringify(toolResponse.result)
193
- });
194
- }
184
+ messages.push({
185
+ role: "tool",
186
+ tool_call_id: toolResponse.toolCallId,
187
+ content: JSON.stringify(toolResponse.result)
188
+ });
195
189
  }
196
190
  break;
197
191
  }
@@ -204,6 +198,19 @@ function convertToOpenAIChatMessages({
204
198
  return { messages, warnings };
205
199
  }
206
200
 
201
+ // src/get-response-metadata.ts
202
+ function getResponseMetadata({
203
+ id,
204
+ model,
205
+ created
206
+ }) {
207
+ return {
208
+ id: id != null ? id : void 0,
209
+ modelId: model != null ? model : void 0,
210
+ timestamp: created != null ? new Date(created * 1e3) : void 0
211
+ };
212
+ }
213
+
207
214
  // src/map-openai-chat-logprobs.ts
208
215
  function mapOpenAIChatLogProbsOutput(logprobs) {
209
216
  var _a, _b;
@@ -234,18 +241,69 @@ function mapOpenAIFinishReason(finishReason) {
234
241
  }
235
242
  }
236
243
 
237
- // src/openai-error.ts
244
+ // src/openai-chat-options.ts
238
245
  var import_zod = require("zod");
246
+ var openaiProviderOptions = import_zod.z.object({
247
+ /**
248
+ * Modify the likelihood of specified tokens appearing in the completion.
249
+ *
250
+ * Accepts a JSON object that maps tokens (specified by their token ID in
251
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
252
+ */
253
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
254
+ /**
255
+ * Return the log probabilities of the tokens.
256
+ *
257
+ * Setting to true will return the log probabilities of the tokens that
258
+ * were generated.
259
+ *
260
+ * Setting to a number will return the log probabilities of the top n
261
+ * tokens that were generated.
262
+ */
263
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
264
+ /**
265
+ * Whether to enable parallel function calling during tool use. Default to true.
266
+ */
267
+ parallelToolCalls: import_zod.z.boolean().optional(),
268
+ /**
269
+ * A unique identifier representing your end-user, which can help OpenAI to
270
+ * monitor and detect abuse.
271
+ */
272
+ user: import_zod.z.string().optional(),
273
+ /**
274
+ * Reasoning effort for reasoning models. Defaults to `medium`.
275
+ */
276
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
277
+ /**
278
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
279
+ */
280
+ maxCompletionTokens: import_zod.z.number().optional(),
281
+ /**
282
+ * Whether to enable persistence in responses API.
283
+ */
284
+ store: import_zod.z.boolean().optional(),
285
+ /**
286
+ * Metadata to associate with the request.
287
+ */
288
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
289
+ /**
290
+ * Parameters for prediction mode.
291
+ */
292
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
293
+ });
294
+
295
+ // src/openai-error.ts
296
+ var import_zod2 = require("zod");
239
297
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
240
- var openaiErrorDataSchema = import_zod.z.object({
241
- error: import_zod.z.object({
242
- message: import_zod.z.string(),
298
+ var openaiErrorDataSchema = import_zod2.z.object({
299
+ error: import_zod2.z.object({
300
+ message: import_zod2.z.string(),
243
301
  // The additional information below is handled loosely to support
244
302
  // OpenAI-compatible providers that have slightly different error
245
303
  // responses:
246
- type: import_zod.z.string().nullish(),
247
- param: import_zod.z.any().nullish(),
248
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
304
+ type: import_zod2.z.string().nullish(),
305
+ param: import_zod2.z.any().nullish(),
306
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
249
307
  })
250
308
  });
251
309
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -253,74 +311,17 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
253
311
  errorToMessage: (data) => data.error.message
254
312
  });
255
313
 
256
- // src/get-response-metadata.ts
257
- function getResponseMetadata({
258
- id,
259
- model,
260
- created
261
- }) {
262
- return {
263
- id: id != null ? id : void 0,
264
- modelId: model != null ? model : void 0,
265
- timestamp: created != null ? new Date(created * 1e3) : void 0
266
- };
267
- }
268
-
269
314
  // src/openai-prepare-tools.ts
270
315
  var import_provider2 = require("@ai-sdk/provider");
271
316
  function prepareTools({
272
- mode,
273
- useLegacyFunctionCalling = false,
317
+ tools,
318
+ toolChoice,
274
319
  structuredOutputs
275
320
  }) {
276
- var _a;
277
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
321
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
278
322
  const toolWarnings = [];
279
323
  if (tools == null) {
280
- return { tools: void 0, tool_choice: void 0, toolWarnings };
281
- }
282
- const toolChoice = mode.toolChoice;
283
- if (useLegacyFunctionCalling) {
284
- const openaiFunctions = [];
285
- for (const tool of tools) {
286
- if (tool.type === "provider-defined") {
287
- toolWarnings.push({ type: "unsupported-tool", tool });
288
- } else {
289
- openaiFunctions.push({
290
- name: tool.name,
291
- description: tool.description,
292
- parameters: tool.parameters
293
- });
294
- }
295
- }
296
- if (toolChoice == null) {
297
- return {
298
- functions: openaiFunctions,
299
- function_call: void 0,
300
- toolWarnings
301
- };
302
- }
303
- const type2 = toolChoice.type;
304
- switch (type2) {
305
- case "auto":
306
- case "none":
307
- case void 0:
308
- return {
309
- functions: openaiFunctions,
310
- function_call: void 0,
311
- toolWarnings
312
- };
313
- case "required":
314
- throw new import_provider2.UnsupportedFunctionalityError({
315
- functionality: "useLegacyFunctionCalling and toolChoice: required"
316
- });
317
- default:
318
- return {
319
- functions: openaiFunctions,
320
- function_call: { name: toolChoice.toolName },
321
- toolWarnings
322
- };
323
- }
324
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
324
325
  }
325
326
  const openaiTools2 = [];
326
327
  for (const tool of tools) {
@@ -339,18 +340,18 @@ function prepareTools({
339
340
  }
340
341
  }
341
342
  if (toolChoice == null) {
342
- return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
343
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
343
344
  }
344
345
  const type = toolChoice.type;
345
346
  switch (type) {
346
347
  case "auto":
347
348
  case "none":
348
349
  case "required":
349
- return { tools: openaiTools2, tool_choice: type, toolWarnings };
350
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
350
351
  case "tool":
351
352
  return {
352
353
  tools: openaiTools2,
353
- tool_choice: {
354
+ toolChoice: {
354
355
  type: "function",
355
356
  function: {
356
357
  name: toolChoice.toolName
@@ -361,7 +362,7 @@ function prepareTools({
361
362
  default: {
362
363
  const _exhaustiveCheck = type;
363
364
  throw new import_provider2.UnsupportedFunctionalityError({
364
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
365
+ functionality: `tool choice type: ${_exhaustiveCheck}`
365
366
  });
366
367
  }
367
368
  }
@@ -375,26 +376,17 @@ var OpenAIChatLanguageModel = class {
375
376
  this.settings = settings;
376
377
  this.config = config;
377
378
  }
378
- get supportsStructuredOutputs() {
379
- var _a;
380
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
381
- }
382
- get defaultObjectGenerationMode() {
383
- if (isAudioModel(this.modelId)) {
384
- return "tool";
385
- }
386
- return this.supportsStructuredOutputs ? "json" : "tool";
387
- }
388
379
  get provider() {
389
380
  return this.config.provider;
390
381
  }
391
- get supportsImageUrls() {
392
- return !this.settings.downloadImages;
382
+ async getSupportedUrls() {
383
+ return {
384
+ "image/*": [/^https?:\/\/.*$/]
385
+ };
393
386
  }
394
387
  getArgs({
395
- mode,
396
388
  prompt,
397
- maxTokens,
389
+ maxOutputTokens,
398
390
  temperature,
399
391
  topP,
400
392
  topK,
@@ -403,39 +395,33 @@ var OpenAIChatLanguageModel = class {
403
395
  stopSequences,
404
396
  responseFormat,
405
397
  seed,
406
- providerMetadata
398
+ tools,
399
+ toolChoice,
400
+ providerOptions
407
401
  }) {
408
- var _a, _b, _c, _d, _e, _f, _g, _h;
409
- const type = mode.type;
402
+ var _a, _b, _c;
410
403
  const warnings = [];
404
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
405
+ provider: "openai",
406
+ providerOptions,
407
+ schema: openaiProviderOptions
408
+ })) != null ? _a : {};
411
409
  if (topK != null) {
412
410
  warnings.push({
413
411
  type: "unsupported-setting",
414
412
  setting: "topK"
415
413
  });
416
414
  }
417
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
415
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
418
416
  warnings.push({
419
417
  type: "unsupported-setting",
420
418
  setting: "responseFormat",
421
419
  details: "JSON response format schema is only supported with structuredOutputs"
422
420
  });
423
421
  }
424
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
425
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
426
- throw new import_provider3.UnsupportedFunctionalityError({
427
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
428
- });
429
- }
430
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
431
- throw new import_provider3.UnsupportedFunctionalityError({
432
- functionality: "structuredOutputs with useLegacyFunctionCalling"
433
- });
434
- }
435
422
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
436
423
  {
437
424
  prompt,
438
- useLegacyFunctionCalling,
439
425
  systemMessageMode: getSystemMessageMode(this.modelId)
440
426
  }
441
427
  );
@@ -444,35 +430,38 @@ var OpenAIChatLanguageModel = class {
444
430
  // model id:
445
431
  model: this.modelId,
446
432
  // model specific settings:
447
- logit_bias: this.settings.logitBias,
448
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
449
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
450
- user: this.settings.user,
451
- parallel_tool_calls: this.settings.parallelToolCalls,
433
+ logit_bias: openaiOptions.logitBias,
434
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
435
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
436
+ user: openaiOptions.user,
437
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
452
438
  // standardized settings:
453
- max_tokens: maxTokens,
439
+ max_tokens: maxOutputTokens,
454
440
  temperature,
455
441
  top_p: topP,
456
442
  frequency_penalty: frequencyPenalty,
457
443
  presence_penalty: presencePenalty,
458
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
459
- type: "json_schema",
460
- json_schema: {
461
- schema: responseFormat.schema,
462
- strict: true,
463
- name: (_a = responseFormat.name) != null ? _a : "response",
464
- description: responseFormat.description
465
- }
466
- } : { type: "json_object" } : void 0,
444
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
445
+ // TODO convert into provider option
446
+ this.settings.structuredOutputs && responseFormat.schema != null ? {
447
+ type: "json_schema",
448
+ json_schema: {
449
+ schema: responseFormat.schema,
450
+ strict: true,
451
+ name: (_b = responseFormat.name) != null ? _b : "response",
452
+ description: responseFormat.description
453
+ }
454
+ } : { type: "json_object" }
455
+ ) : void 0,
467
456
  stop: stopSequences,
468
457
  seed,
469
458
  // openai specific settings:
470
- // TODO remove in next major version; we auto-map maxTokens now
471
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
472
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
473
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
474
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
475
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
459
+ // TODO remove in next major version; we auto-map maxOutputTokens now
460
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
461
+ store: openaiOptions.store,
462
+ metadata: openaiOptions.metadata,
463
+ prediction: openaiOptions.prediction,
464
+ reasoning_effort: openaiOptions.reasoningEffort,
476
465
  // messages:
477
466
  messages
478
467
  };
@@ -536,82 +525,33 @@ var OpenAIChatLanguageModel = class {
536
525
  }
537
526
  baseArgs.max_tokens = void 0;
538
527
  }
539
- }
540
- switch (type) {
541
- case "regular": {
542
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
543
- mode,
544
- useLegacyFunctionCalling,
545
- structuredOutputs: this.supportsStructuredOutputs
528
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
529
+ if (baseArgs.temperature != null) {
530
+ baseArgs.temperature = void 0;
531
+ warnings.push({
532
+ type: "unsupported-setting",
533
+ setting: "temperature",
534
+ details: "temperature is not supported for the search preview models and has been removed."
546
535
  });
547
- return {
548
- args: {
549
- ...baseArgs,
550
- tools,
551
- tool_choice,
552
- functions,
553
- function_call
554
- },
555
- warnings: [...warnings, ...toolWarnings]
556
- };
557
- }
558
- case "object-json": {
559
- return {
560
- args: {
561
- ...baseArgs,
562
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
563
- type: "json_schema",
564
- json_schema: {
565
- schema: mode.schema,
566
- strict: true,
567
- name: (_h = mode.name) != null ? _h : "response",
568
- description: mode.description
569
- }
570
- } : { type: "json_object" }
571
- },
572
- warnings
573
- };
574
- }
575
- case "object-tool": {
576
- return {
577
- args: useLegacyFunctionCalling ? {
578
- ...baseArgs,
579
- function_call: {
580
- name: mode.tool.name
581
- },
582
- functions: [
583
- {
584
- name: mode.tool.name,
585
- description: mode.tool.description,
586
- parameters: mode.tool.parameters
587
- }
588
- ]
589
- } : {
590
- ...baseArgs,
591
- tool_choice: {
592
- type: "function",
593
- function: { name: mode.tool.name }
594
- },
595
- tools: [
596
- {
597
- type: "function",
598
- function: {
599
- name: mode.tool.name,
600
- description: mode.tool.description,
601
- parameters: mode.tool.parameters,
602
- strict: this.supportsStructuredOutputs ? true : void 0
603
- }
604
- }
605
- ]
606
- },
607
- warnings
608
- };
609
- }
610
- default: {
611
- const _exhaustiveCheck = type;
612
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
613
536
  }
614
537
  }
538
+ const {
539
+ tools: openaiTools2,
540
+ toolChoice: openaiToolChoice,
541
+ toolWarnings
542
+ } = prepareTools({
543
+ tools,
544
+ toolChoice,
545
+ structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
546
+ });
547
+ return {
548
+ args: {
549
+ ...baseArgs,
550
+ tools: openaiTools2,
551
+ tool_choice: openaiToolChoice
552
+ },
553
+ warnings: [...warnings, ...toolWarnings]
554
+ };
615
555
  }
616
556
  async doGenerate(options) {
617
557
  var _a, _b, _c, _d, _e, _f, _g, _h;
@@ -634,10 +574,23 @@ var OpenAIChatLanguageModel = class {
634
574
  abortSignal: options.abortSignal,
635
575
  fetch: this.config.fetch
636
576
  });
637
- const { messages: rawPrompt, ...rawSettings } = body;
638
577
  const choice = response.choices[0];
639
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
640
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
578
+ const content = [];
579
+ const text = choice.message.content;
580
+ if (text != null && text.length > 0) {
581
+ content.push({ type: "text", text });
582
+ }
583
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
584
+ content.push({
585
+ type: "tool-call",
586
+ toolCallType: "function",
587
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
588
+ toolName: toolCall.function.name,
589
+ args: toolCall.function.arguments
590
+ });
591
+ }
592
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
593
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
641
594
  const providerMetadata = { openai: {} };
642
595
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
643
596
  providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
@@ -652,81 +605,24 @@ var OpenAIChatLanguageModel = class {
652
605
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
653
606
  }
654
607
  return {
655
- text: (_c = choice.message.content) != null ? _c : void 0,
656
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
657
- {
658
- toolCallType: "function",
659
- toolCallId: (0, import_provider_utils3.generateId)(),
660
- toolName: choice.message.function_call.name,
661
- args: choice.message.function_call.arguments
662
- }
663
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
664
- var _a2;
665
- return {
666
- toolCallType: "function",
667
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
668
- toolName: toolCall.function.name,
669
- args: toolCall.function.arguments
670
- };
671
- }),
608
+ content,
672
609
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
673
610
  usage: {
674
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
675
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
611
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
612
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
613
+ },
614
+ request: { body },
615
+ response: {
616
+ ...getResponseMetadata(response),
617
+ headers: responseHeaders,
618
+ body: rawResponse
676
619
  },
677
- rawCall: { rawPrompt, rawSettings },
678
- rawResponse: { headers: responseHeaders, body: rawResponse },
679
- request: { body: JSON.stringify(body) },
680
- response: getResponseMetadata(response),
681
620
  warnings,
682
621
  logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
683
622
  providerMetadata
684
623
  };
685
624
  }
686
625
  async doStream(options) {
687
- if (this.settings.simulateStreaming) {
688
- const result = await this.doGenerate(options);
689
- const simulatedStream = new ReadableStream({
690
- start(controller) {
691
- controller.enqueue({ type: "response-metadata", ...result.response });
692
- if (result.text) {
693
- controller.enqueue({
694
- type: "text-delta",
695
- textDelta: result.text
696
- });
697
- }
698
- if (result.toolCalls) {
699
- for (const toolCall of result.toolCalls) {
700
- controller.enqueue({
701
- type: "tool-call-delta",
702
- toolCallType: "function",
703
- toolCallId: toolCall.toolCallId,
704
- toolName: toolCall.toolName,
705
- argsTextDelta: toolCall.args
706
- });
707
- controller.enqueue({
708
- type: "tool-call",
709
- ...toolCall
710
- });
711
- }
712
- }
713
- controller.enqueue({
714
- type: "finish",
715
- finishReason: result.finishReason,
716
- usage: result.usage,
717
- logprobs: result.logprobs,
718
- providerMetadata: result.providerMetadata
719
- });
720
- controller.close();
721
- }
722
- });
723
- return {
724
- stream: simulatedStream,
725
- rawCall: result.rawCall,
726
- rawResponse: result.rawResponse,
727
- warnings: result.warnings
728
- };
729
- }
730
626
  const { args, warnings } = this.getArgs(options);
731
627
  const body = {
732
628
  ...args,
@@ -751,17 +647,19 @@ var OpenAIChatLanguageModel = class {
751
647
  const { messages: rawPrompt, ...rawSettings } = args;
752
648
  const toolCalls = [];
753
649
  let finishReason = "unknown";
754
- let usage = {
755
- promptTokens: void 0,
756
- completionTokens: void 0
650
+ const usage = {
651
+ inputTokens: void 0,
652
+ outputTokens: void 0
757
653
  };
758
654
  let logprobs;
759
655
  let isFirstChunk = true;
760
- const { useLegacyFunctionCalling } = this.settings;
761
656
  const providerMetadata = { openai: {} };
762
657
  return {
763
658
  stream: response.pipeThrough(
764
659
  new TransformStream({
660
+ start(controller) {
661
+ controller.enqueue({ type: "stream-start", warnings });
662
+ },
765
663
  transform(chunk, controller) {
766
664
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
767
665
  if (!chunk.success) {
@@ -789,10 +687,8 @@ var OpenAIChatLanguageModel = class {
789
687
  prompt_tokens_details,
790
688
  completion_tokens_details
791
689
  } = value.usage;
792
- usage = {
793
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
794
- completionTokens: completion_tokens != null ? completion_tokens : void 0
795
- };
690
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
691
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
796
692
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
797
693
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
798
694
  }
@@ -816,8 +712,8 @@ var OpenAIChatLanguageModel = class {
816
712
  const delta = choice.delta;
817
713
  if (delta.content != null) {
818
714
  controller.enqueue({
819
- type: "text-delta",
820
- textDelta: delta.content
715
+ type: "text",
716
+ text: delta.content
821
717
  });
822
718
  }
823
719
  const mappedLogprobs = mapOpenAIChatLogProbsOutput(
@@ -827,16 +723,8 @@ var OpenAIChatLanguageModel = class {
827
723
  if (logprobs === void 0) logprobs = [];
828
724
  logprobs.push(...mappedLogprobs);
829
725
  }
830
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
831
- {
832
- type: "function",
833
- id: (0, import_provider_utils3.generateId)(),
834
- function: delta.function_call,
835
- index: 0
836
- }
837
- ] : delta.tool_calls;
838
- if (mappedToolCalls != null) {
839
- for (const toolCallDelta of mappedToolCalls) {
726
+ if (delta.tool_calls != null) {
727
+ for (const toolCallDelta of delta.tool_calls) {
840
728
  const index = toolCallDelta.index;
841
729
  if (toolCalls[index] == null) {
842
730
  if (toolCallDelta.type !== "function") {
@@ -918,125 +806,111 @@ var OpenAIChatLanguageModel = class {
918
806
  }
919
807
  },
920
808
  flush(controller) {
921
- var _a, _b;
922
809
  controller.enqueue({
923
810
  type: "finish",
924
811
  finishReason,
925
812
  logprobs,
926
- usage: {
927
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
928
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
929
- },
813
+ usage,
930
814
  ...providerMetadata != null ? { providerMetadata } : {}
931
815
  });
932
816
  }
933
817
  })
934
818
  ),
935
- rawCall: { rawPrompt, rawSettings },
936
- rawResponse: { headers: responseHeaders },
937
- request: { body: JSON.stringify(body) },
938
- warnings
819
+ request: { body },
820
+ response: { headers: responseHeaders }
939
821
  };
940
822
  }
941
823
  };
942
- var openaiTokenUsageSchema = import_zod2.z.object({
943
- prompt_tokens: import_zod2.z.number().nullish(),
944
- completion_tokens: import_zod2.z.number().nullish(),
945
- prompt_tokens_details: import_zod2.z.object({
946
- cached_tokens: import_zod2.z.number().nullish()
824
+ var openaiTokenUsageSchema = import_zod3.z.object({
825
+ prompt_tokens: import_zod3.z.number().nullish(),
826
+ completion_tokens: import_zod3.z.number().nullish(),
827
+ prompt_tokens_details: import_zod3.z.object({
828
+ cached_tokens: import_zod3.z.number().nullish()
947
829
  }).nullish(),
948
- completion_tokens_details: import_zod2.z.object({
949
- reasoning_tokens: import_zod2.z.number().nullish(),
950
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
951
- rejected_prediction_tokens: import_zod2.z.number().nullish()
830
+ completion_tokens_details: import_zod3.z.object({
831
+ reasoning_tokens: import_zod3.z.number().nullish(),
832
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
833
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
952
834
  }).nullish()
953
835
  }).nullish();
954
- var openaiChatResponseSchema = import_zod2.z.object({
955
- id: import_zod2.z.string().nullish(),
956
- created: import_zod2.z.number().nullish(),
957
- model: import_zod2.z.string().nullish(),
958
- choices: import_zod2.z.array(
959
- import_zod2.z.object({
960
- message: import_zod2.z.object({
961
- role: import_zod2.z.literal("assistant").nullish(),
962
- content: import_zod2.z.string().nullish(),
963
- function_call: import_zod2.z.object({
964
- arguments: import_zod2.z.string(),
965
- name: import_zod2.z.string()
966
- }).nullish(),
967
- tool_calls: import_zod2.z.array(
968
- import_zod2.z.object({
969
- id: import_zod2.z.string().nullish(),
970
- type: import_zod2.z.literal("function"),
971
- function: import_zod2.z.object({
972
- name: import_zod2.z.string(),
973
- arguments: import_zod2.z.string()
836
+ var openaiChatResponseSchema = import_zod3.z.object({
837
+ id: import_zod3.z.string().nullish(),
838
+ created: import_zod3.z.number().nullish(),
839
+ model: import_zod3.z.string().nullish(),
840
+ choices: import_zod3.z.array(
841
+ import_zod3.z.object({
842
+ message: import_zod3.z.object({
843
+ role: import_zod3.z.literal("assistant").nullish(),
844
+ content: import_zod3.z.string().nullish(),
845
+ tool_calls: import_zod3.z.array(
846
+ import_zod3.z.object({
847
+ id: import_zod3.z.string().nullish(),
848
+ type: import_zod3.z.literal("function"),
849
+ function: import_zod3.z.object({
850
+ name: import_zod3.z.string(),
851
+ arguments: import_zod3.z.string()
974
852
  })
975
853
  })
976
854
  ).nullish()
977
855
  }),
978
- index: import_zod2.z.number(),
979
- logprobs: import_zod2.z.object({
980
- content: import_zod2.z.array(
981
- import_zod2.z.object({
982
- token: import_zod2.z.string(),
983
- logprob: import_zod2.z.number(),
984
- top_logprobs: import_zod2.z.array(
985
- import_zod2.z.object({
986
- token: import_zod2.z.string(),
987
- logprob: import_zod2.z.number()
856
+ index: import_zod3.z.number(),
857
+ logprobs: import_zod3.z.object({
858
+ content: import_zod3.z.array(
859
+ import_zod3.z.object({
860
+ token: import_zod3.z.string(),
861
+ logprob: import_zod3.z.number(),
862
+ top_logprobs: import_zod3.z.array(
863
+ import_zod3.z.object({
864
+ token: import_zod3.z.string(),
865
+ logprob: import_zod3.z.number()
988
866
  })
989
867
  )
990
868
  })
991
869
  ).nullable()
992
870
  }).nullish(),
993
- finish_reason: import_zod2.z.string().nullish()
871
+ finish_reason: import_zod3.z.string().nullish()
994
872
  })
995
873
  ),
996
874
  usage: openaiTokenUsageSchema
997
875
  });
998
- var openaiChatChunkSchema = import_zod2.z.union([
999
- import_zod2.z.object({
1000
- id: import_zod2.z.string().nullish(),
1001
- created: import_zod2.z.number().nullish(),
1002
- model: import_zod2.z.string().nullish(),
1003
- choices: import_zod2.z.array(
1004
- import_zod2.z.object({
1005
- delta: import_zod2.z.object({
1006
- role: import_zod2.z.enum(["assistant"]).nullish(),
1007
- content: import_zod2.z.string().nullish(),
1008
- function_call: import_zod2.z.object({
1009
- name: import_zod2.z.string().optional(),
1010
- arguments: import_zod2.z.string().optional()
1011
- }).nullish(),
1012
- tool_calls: import_zod2.z.array(
1013
- import_zod2.z.object({
1014
- index: import_zod2.z.number(),
1015
- id: import_zod2.z.string().nullish(),
1016
- type: import_zod2.z.literal("function").optional(),
1017
- function: import_zod2.z.object({
1018
- name: import_zod2.z.string().nullish(),
1019
- arguments: import_zod2.z.string().nullish()
876
+ var openaiChatChunkSchema = import_zod3.z.union([
877
+ import_zod3.z.object({
878
+ id: import_zod3.z.string().nullish(),
879
+ created: import_zod3.z.number().nullish(),
880
+ model: import_zod3.z.string().nullish(),
881
+ choices: import_zod3.z.array(
882
+ import_zod3.z.object({
883
+ delta: import_zod3.z.object({
884
+ role: import_zod3.z.enum(["assistant"]).nullish(),
885
+ content: import_zod3.z.string().nullish(),
886
+ tool_calls: import_zod3.z.array(
887
+ import_zod3.z.object({
888
+ index: import_zod3.z.number(),
889
+ id: import_zod3.z.string().nullish(),
890
+ type: import_zod3.z.literal("function").optional(),
891
+ function: import_zod3.z.object({
892
+ name: import_zod3.z.string().nullish(),
893
+ arguments: import_zod3.z.string().nullish()
1020
894
  })
1021
895
  })
1022
896
  ).nullish()
1023
897
  }).nullish(),
1024
- logprobs: import_zod2.z.object({
1025
- content: import_zod2.z.array(
1026
- import_zod2.z.object({
1027
- token: import_zod2.z.string(),
1028
- logprob: import_zod2.z.number(),
1029
- top_logprobs: import_zod2.z.array(
1030
- import_zod2.z.object({
1031
- token: import_zod2.z.string(),
1032
- logprob: import_zod2.z.number()
898
+ logprobs: import_zod3.z.object({
899
+ content: import_zod3.z.array(
900
+ import_zod3.z.object({
901
+ token: import_zod3.z.string(),
902
+ logprob: import_zod3.z.number(),
903
+ top_logprobs: import_zod3.z.array(
904
+ import_zod3.z.object({
905
+ token: import_zod3.z.string(),
906
+ logprob: import_zod3.z.number()
1033
907
  })
1034
908
  )
1035
909
  })
1036
910
  ).nullable()
1037
911
  }).nullish(),
1038
- finish_reason: import_zod2.z.string().nullable().optional(),
1039
- index: import_zod2.z.number()
912
+ finish_reason: import_zod3.z.string().nullable().optional(),
913
+ index: import_zod3.z.number()
1040
914
  })
1041
915
  ),
1042
916
  usage: openaiTokenUsageSchema
@@ -1044,10 +918,7 @@ var openaiChatChunkSchema = import_zod2.z.union([
1044
918
  openaiErrorDataSchema
1045
919
  ]);
1046
920
  function isReasoningModel(modelId) {
1047
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
1048
- }
1049
- function isAudioModel(modelId) {
1050
- return modelId.startsWith("gpt-4o-audio-preview");
921
+ return modelId.startsWith("o");
1051
922
  }
1052
923
  function getSystemMessageMode(modelId) {
1053
924
  var _a, _b;
@@ -1078,9 +949,8 @@ var reasoningModels = {
1078
949
  };
1079
950
 
1080
951
  // src/openai-completion-language-model.ts
1081
- var import_provider5 = require("@ai-sdk/provider");
1082
952
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1083
- var import_zod3 = require("zod");
953
+ var import_zod4 = require("zod");
1084
954
 
1085
955
  // src/convert-to-openai-completion-prompt.ts
1086
956
  var import_provider4 = require("@ai-sdk/provider");
@@ -1114,13 +984,8 @@ function convertToOpenAICompletionPrompt({
1114
984
  case "text": {
1115
985
  return part.text;
1116
986
  }
1117
- case "image": {
1118
- throw new import_provider4.UnsupportedFunctionalityError({
1119
- functionality: "images"
1120
- });
1121
- }
1122
987
  }
1123
- }).join("");
988
+ }).filter(Boolean).join("");
1124
989
  text += `${user}:
1125
990
  ${userMessage}
1126
991
 
@@ -1184,7 +1049,6 @@ function mapOpenAICompletionLogProbs(logprobs) {
1184
1049
  var OpenAICompletionLanguageModel = class {
1185
1050
  constructor(modelId, settings, config) {
1186
1051
  this.specificationVersion = "v2";
1187
- this.defaultObjectGenerationMode = void 0;
1188
1052
  this.modelId = modelId;
1189
1053
  this.settings = settings;
1190
1054
  this.config = config;
@@ -1192,11 +1056,15 @@ var OpenAICompletionLanguageModel = class {
1192
1056
  get provider() {
1193
1057
  return this.config.provider;
1194
1058
  }
1059
+ async getSupportedUrls() {
1060
+ return {
1061
+ // no supported urls for completion models
1062
+ };
1063
+ }
1195
1064
  getArgs({
1196
- mode,
1197
1065
  inputFormat,
1198
1066
  prompt,
1199
- maxTokens,
1067
+ maxOutputTokens,
1200
1068
  temperature,
1201
1069
  topP,
1202
1070
  topK,
@@ -1204,16 +1072,19 @@ var OpenAICompletionLanguageModel = class {
1204
1072
  presencePenalty,
1205
1073
  stopSequences: userStopSequences,
1206
1074
  responseFormat,
1075
+ tools,
1076
+ toolChoice,
1207
1077
  seed
1208
1078
  }) {
1209
- var _a;
1210
- const type = mode.type;
1211
1079
  const warnings = [];
1212
1080
  if (topK != null) {
1213
- warnings.push({
1214
- type: "unsupported-setting",
1215
- setting: "topK"
1216
- });
1081
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1082
+ }
1083
+ if (tools == null ? void 0 : tools.length) {
1084
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1085
+ }
1086
+ if (toolChoice != null) {
1087
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1217
1088
  }
1218
1089
  if (responseFormat != null && responseFormat.type !== "text") {
1219
1090
  warnings.push({
@@ -1224,56 +1095,30 @@ var OpenAICompletionLanguageModel = class {
1224
1095
  }
1225
1096
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1226
1097
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1227
- const baseArgs = {
1228
- // model id:
1229
- model: this.modelId,
1230
- // model specific settings:
1231
- echo: this.settings.echo,
1232
- logit_bias: this.settings.logitBias,
1233
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1234
- suffix: this.settings.suffix,
1235
- user: this.settings.user,
1236
- // standardized settings:
1237
- max_tokens: maxTokens,
1238
- temperature,
1239
- top_p: topP,
1240
- frequency_penalty: frequencyPenalty,
1241
- presence_penalty: presencePenalty,
1242
- seed,
1243
- // prompt:
1244
- prompt: completionPrompt,
1245
- // stop sequences:
1246
- stop: stop.length > 0 ? stop : void 0
1098
+ return {
1099
+ args: {
1100
+ // model id:
1101
+ model: this.modelId,
1102
+ // model specific settings:
1103
+ echo: this.settings.echo,
1104
+ logit_bias: this.settings.logitBias,
1105
+ logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1106
+ suffix: this.settings.suffix,
1107
+ user: this.settings.user,
1108
+ // standardized settings:
1109
+ max_tokens: maxOutputTokens,
1110
+ temperature,
1111
+ top_p: topP,
1112
+ frequency_penalty: frequencyPenalty,
1113
+ presence_penalty: presencePenalty,
1114
+ seed,
1115
+ // prompt:
1116
+ prompt: completionPrompt,
1117
+ // stop sequences:
1118
+ stop: stop.length > 0 ? stop : void 0
1119
+ },
1120
+ warnings
1247
1121
  };
1248
- switch (type) {
1249
- case "regular": {
1250
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1251
- throw new import_provider5.UnsupportedFunctionalityError({
1252
- functionality: "tools"
1253
- });
1254
- }
1255
- if (mode.toolChoice) {
1256
- throw new import_provider5.UnsupportedFunctionalityError({
1257
- functionality: "toolChoice"
1258
- });
1259
- }
1260
- return { args: baseArgs, warnings };
1261
- }
1262
- case "object-json": {
1263
- throw new import_provider5.UnsupportedFunctionalityError({
1264
- functionality: "object-json mode"
1265
- });
1266
- }
1267
- case "object-tool": {
1268
- throw new import_provider5.UnsupportedFunctionalityError({
1269
- functionality: "object-tool mode"
1270
- });
1271
- }
1272
- default: {
1273
- const _exhaustiveCheck = type;
1274
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1275
- }
1276
- }
1277
1122
  }
1278
1123
  async doGenerate(options) {
1279
1124
  const { args, warnings } = this.getArgs(options);
@@ -1295,21 +1140,22 @@ var OpenAICompletionLanguageModel = class {
1295
1140
  abortSignal: options.abortSignal,
1296
1141
  fetch: this.config.fetch
1297
1142
  });
1298
- const { prompt: rawPrompt, ...rawSettings } = args;
1299
1143
  const choice = response.choices[0];
1300
1144
  return {
1301
- text: choice.text,
1145
+ content: [{ type: "text", text: choice.text }],
1302
1146
  usage: {
1303
- promptTokens: response.usage.prompt_tokens,
1304
- completionTokens: response.usage.completion_tokens
1147
+ inputTokens: response.usage.prompt_tokens,
1148
+ outputTokens: response.usage.completion_tokens
1305
1149
  },
1306
1150
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1307
1151
  logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1308
- rawCall: { rawPrompt, rawSettings },
1309
- rawResponse: { headers: responseHeaders, body: rawResponse },
1310
- response: getResponseMetadata(response),
1311
- warnings,
1312
- request: { body: JSON.stringify(args) }
1152
+ request: { body: args },
1153
+ response: {
1154
+ ...getResponseMetadata(response),
1155
+ headers: responseHeaders,
1156
+ body: rawResponse
1157
+ },
1158
+ warnings
1313
1159
  };
1314
1160
  }
1315
1161
  async doStream(options) {
@@ -1334,17 +1180,19 @@ var OpenAICompletionLanguageModel = class {
1334
1180
  abortSignal: options.abortSignal,
1335
1181
  fetch: this.config.fetch
1336
1182
  });
1337
- const { prompt: rawPrompt, ...rawSettings } = args;
1338
1183
  let finishReason = "unknown";
1339
- let usage = {
1340
- promptTokens: Number.NaN,
1341
- completionTokens: Number.NaN
1184
+ const usage = {
1185
+ inputTokens: void 0,
1186
+ outputTokens: void 0
1342
1187
  };
1343
1188
  let logprobs;
1344
1189
  let isFirstChunk = true;
1345
1190
  return {
1346
1191
  stream: response.pipeThrough(
1347
1192
  new TransformStream({
1193
+ start(controller) {
1194
+ controller.enqueue({ type: "stream-start", warnings });
1195
+ },
1348
1196
  transform(chunk, controller) {
1349
1197
  if (!chunk.success) {
1350
1198
  finishReason = "error";
@@ -1365,10 +1213,8 @@ var OpenAICompletionLanguageModel = class {
1365
1213
  });
1366
1214
  }
1367
1215
  if (value.usage != null) {
1368
- usage = {
1369
- promptTokens: value.usage.prompt_tokens,
1370
- completionTokens: value.usage.completion_tokens
1371
- };
1216
+ usage.inputTokens = value.usage.prompt_tokens;
1217
+ usage.outputTokens = value.usage.completion_tokens;
1372
1218
  }
1373
1219
  const choice = value.choices[0];
1374
1220
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1376,8 +1222,8 @@ var OpenAICompletionLanguageModel = class {
1376
1222
  }
1377
1223
  if ((choice == null ? void 0 : choice.text) != null) {
1378
1224
  controller.enqueue({
1379
- type: "text-delta",
1380
- textDelta: choice.text
1225
+ type: "text",
1226
+ text: choice.text
1381
1227
  });
1382
1228
  }
1383
1229
  const mappedLogprobs = mapOpenAICompletionLogProbs(
@@ -1398,65 +1244,80 @@ var OpenAICompletionLanguageModel = class {
1398
1244
  }
1399
1245
  })
1400
1246
  ),
1401
- rawCall: { rawPrompt, rawSettings },
1402
- rawResponse: { headers: responseHeaders },
1403
- warnings,
1404
- request: { body: JSON.stringify(body) }
1247
+ request: { body },
1248
+ response: { headers: responseHeaders }
1405
1249
  };
1406
1250
  }
1407
1251
  };
1408
- var openaiCompletionResponseSchema = import_zod3.z.object({
1409
- id: import_zod3.z.string().nullish(),
1410
- created: import_zod3.z.number().nullish(),
1411
- model: import_zod3.z.string().nullish(),
1412
- choices: import_zod3.z.array(
1413
- import_zod3.z.object({
1414
- text: import_zod3.z.string(),
1415
- finish_reason: import_zod3.z.string(),
1416
- logprobs: import_zod3.z.object({
1417
- tokens: import_zod3.z.array(import_zod3.z.string()),
1418
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1419
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1252
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1253
+ id: import_zod4.z.string().nullish(),
1254
+ created: import_zod4.z.number().nullish(),
1255
+ model: import_zod4.z.string().nullish(),
1256
+ choices: import_zod4.z.array(
1257
+ import_zod4.z.object({
1258
+ text: import_zod4.z.string(),
1259
+ finish_reason: import_zod4.z.string(),
1260
+ logprobs: import_zod4.z.object({
1261
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1262
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1263
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1420
1264
  }).nullish()
1421
1265
  })
1422
1266
  ),
1423
- usage: import_zod3.z.object({
1424
- prompt_tokens: import_zod3.z.number(),
1425
- completion_tokens: import_zod3.z.number()
1267
+ usage: import_zod4.z.object({
1268
+ prompt_tokens: import_zod4.z.number(),
1269
+ completion_tokens: import_zod4.z.number()
1426
1270
  })
1427
1271
  });
1428
- var openaiCompletionChunkSchema = import_zod3.z.union([
1429
- import_zod3.z.object({
1430
- id: import_zod3.z.string().nullish(),
1431
- created: import_zod3.z.number().nullish(),
1432
- model: import_zod3.z.string().nullish(),
1433
- choices: import_zod3.z.array(
1434
- import_zod3.z.object({
1435
- text: import_zod3.z.string(),
1436
- finish_reason: import_zod3.z.string().nullish(),
1437
- index: import_zod3.z.number(),
1438
- logprobs: import_zod3.z.object({
1439
- tokens: import_zod3.z.array(import_zod3.z.string()),
1440
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1441
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1272
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1273
+ import_zod4.z.object({
1274
+ id: import_zod4.z.string().nullish(),
1275
+ created: import_zod4.z.number().nullish(),
1276
+ model: import_zod4.z.string().nullish(),
1277
+ choices: import_zod4.z.array(
1278
+ import_zod4.z.object({
1279
+ text: import_zod4.z.string(),
1280
+ finish_reason: import_zod4.z.string().nullish(),
1281
+ index: import_zod4.z.number(),
1282
+ logprobs: import_zod4.z.object({
1283
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1284
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1285
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1442
1286
  }).nullish()
1443
1287
  })
1444
1288
  ),
1445
- usage: import_zod3.z.object({
1446
- prompt_tokens: import_zod3.z.number(),
1447
- completion_tokens: import_zod3.z.number()
1289
+ usage: import_zod4.z.object({
1290
+ prompt_tokens: import_zod4.z.number(),
1291
+ completion_tokens: import_zod4.z.number()
1448
1292
  }).nullish()
1449
1293
  }),
1450
1294
  openaiErrorDataSchema
1451
1295
  ]);
1452
1296
 
1453
1297
  // src/openai-embedding-model.ts
1454
- var import_provider6 = require("@ai-sdk/provider");
1298
+ var import_provider5 = require("@ai-sdk/provider");
1455
1299
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1456
- var import_zod4 = require("zod");
1300
+ var import_zod6 = require("zod");
1301
+
1302
+ // src/openai-embedding-options.ts
1303
+ var import_zod5 = require("zod");
1304
+ var openaiEmbeddingProviderOptions = import_zod5.z.object({
1305
+ /**
1306
+ The number of dimensions the resulting output embeddings should have.
1307
+ Only supported in text-embedding-3 and later models.
1308
+ */
1309
+ dimensions: import_zod5.z.number().optional(),
1310
+ /**
1311
+ A unique identifier representing your end-user, which can help OpenAI to
1312
+ monitor and detect abuse. Learn more.
1313
+ */
1314
+ user: import_zod5.z.string().optional()
1315
+ });
1316
+
1317
+ // src/openai-embedding-model.ts
1457
1318
  var OpenAIEmbeddingModel = class {
1458
1319
  constructor(modelId, settings, config) {
1459
- this.specificationVersion = "v1";
1320
+ this.specificationVersion = "v2";
1460
1321
  this.modelId = modelId;
1461
1322
  this.settings = settings;
1462
1323
  this.config = config;
@@ -1475,17 +1336,28 @@ var OpenAIEmbeddingModel = class {
1475
1336
  async doEmbed({
1476
1337
  values,
1477
1338
  headers,
1478
- abortSignal
1339
+ abortSignal,
1340
+ providerOptions
1479
1341
  }) {
1342
+ var _a;
1480
1343
  if (values.length > this.maxEmbeddingsPerCall) {
1481
- throw new import_provider6.TooManyEmbeddingValuesForCallError({
1344
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1482
1345
  provider: this.provider,
1483
1346
  modelId: this.modelId,
1484
1347
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1485
1348
  values
1486
1349
  });
1487
1350
  }
1488
- const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1351
+ const openaiOptions = (_a = (0, import_provider_utils5.parseProviderOptions)({
1352
+ provider: "openai",
1353
+ providerOptions,
1354
+ schema: openaiEmbeddingProviderOptions
1355
+ })) != null ? _a : {};
1356
+ const {
1357
+ responseHeaders,
1358
+ value: response,
1359
+ rawValue
1360
+ } = await (0, import_provider_utils5.postJsonToApi)({
1489
1361
  url: this.config.url({
1490
1362
  path: "/embeddings",
1491
1363
  modelId: this.modelId
@@ -1495,8 +1367,8 @@ var OpenAIEmbeddingModel = class {
1495
1367
  model: this.modelId,
1496
1368
  input: values,
1497
1369
  encoding_format: "float",
1498
- dimensions: this.settings.dimensions,
1499
- user: this.settings.user
1370
+ dimensions: openaiOptions.dimensions,
1371
+ user: openaiOptions.user
1500
1372
  },
1501
1373
  failedResponseHandler: openaiFailedResponseHandler,
1502
1374
  successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
@@ -1508,18 +1380,18 @@ var OpenAIEmbeddingModel = class {
1508
1380
  return {
1509
1381
  embeddings: response.data.map((item) => item.embedding),
1510
1382
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1511
- rawResponse: { headers: responseHeaders }
1383
+ response: { headers: responseHeaders, body: rawValue }
1512
1384
  };
1513
1385
  }
1514
1386
  };
1515
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1516
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1517
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1387
+ var openaiTextEmbeddingResponseSchema = import_zod6.z.object({
1388
+ data: import_zod6.z.array(import_zod6.z.object({ embedding: import_zod6.z.array(import_zod6.z.number()) })),
1389
+ usage: import_zod6.z.object({ prompt_tokens: import_zod6.z.number() }).nullish()
1518
1390
  });
1519
1391
 
1520
1392
  // src/openai-image-model.ts
1521
1393
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1522
- var import_zod5 = require("zod");
1394
+ var import_zod7 = require("zod");
1523
1395
 
1524
1396
  // src/openai-image-settings.ts
1525
1397
  var modelMaxImagesPerCall = {
@@ -1597,13 +1469,13 @@ var OpenAIImageModel = class {
1597
1469
  };
1598
1470
  }
1599
1471
  };
1600
- var openaiImageResponseSchema = import_zod5.z.object({
1601
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1472
+ var openaiImageResponseSchema = import_zod7.z.object({
1473
+ data: import_zod7.z.array(import_zod7.z.object({ b64_json: import_zod7.z.string() }))
1602
1474
  });
1603
1475
 
1604
1476
  // src/openai-tools.ts
1605
- var import_zod6 = require("zod");
1606
- var WebSearchPreviewParameters = import_zod6.z.object({});
1477
+ var import_zod8 = require("zod");
1478
+ var WebSearchPreviewParameters = import_zod8.z.object({});
1607
1479
  function webSearchPreviewTool({
1608
1480
  searchContextSize,
1609
1481
  userLocation
@@ -1622,13 +1494,181 @@ var openaiTools = {
1622
1494
  webSearchPreview: webSearchPreviewTool
1623
1495
  };
1624
1496
 
1497
+ // src/openai-transcription-model.ts
1498
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1499
+ var import_zod9 = require("zod");
1500
+ var openAIProviderOptionsSchema = import_zod9.z.object({
1501
+ include: import_zod9.z.array(import_zod9.z.string()).nullish(),
1502
+ language: import_zod9.z.string().nullish(),
1503
+ prompt: import_zod9.z.string().nullish(),
1504
+ temperature: import_zod9.z.number().min(0).max(1).nullish().default(0),
1505
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).nullish().default(["segment"])
1506
+ });
1507
+ var languageMap = {
1508
+ afrikaans: "af",
1509
+ arabic: "ar",
1510
+ armenian: "hy",
1511
+ azerbaijani: "az",
1512
+ belarusian: "be",
1513
+ bosnian: "bs",
1514
+ bulgarian: "bg",
1515
+ catalan: "ca",
1516
+ chinese: "zh",
1517
+ croatian: "hr",
1518
+ czech: "cs",
1519
+ danish: "da",
1520
+ dutch: "nl",
1521
+ english: "en",
1522
+ estonian: "et",
1523
+ finnish: "fi",
1524
+ french: "fr",
1525
+ galician: "gl",
1526
+ german: "de",
1527
+ greek: "el",
1528
+ hebrew: "he",
1529
+ hindi: "hi",
1530
+ hungarian: "hu",
1531
+ icelandic: "is",
1532
+ indonesian: "id",
1533
+ italian: "it",
1534
+ japanese: "ja",
1535
+ kannada: "kn",
1536
+ kazakh: "kk",
1537
+ korean: "ko",
1538
+ latvian: "lv",
1539
+ lithuanian: "lt",
1540
+ macedonian: "mk",
1541
+ malay: "ms",
1542
+ marathi: "mr",
1543
+ maori: "mi",
1544
+ nepali: "ne",
1545
+ norwegian: "no",
1546
+ persian: "fa",
1547
+ polish: "pl",
1548
+ portuguese: "pt",
1549
+ romanian: "ro",
1550
+ russian: "ru",
1551
+ serbian: "sr",
1552
+ slovak: "sk",
1553
+ slovenian: "sl",
1554
+ spanish: "es",
1555
+ swahili: "sw",
1556
+ swedish: "sv",
1557
+ tagalog: "tl",
1558
+ tamil: "ta",
1559
+ thai: "th",
1560
+ turkish: "tr",
1561
+ ukrainian: "uk",
1562
+ urdu: "ur",
1563
+ vietnamese: "vi",
1564
+ welsh: "cy"
1565
+ };
1566
+ var OpenAITranscriptionModel = class {
1567
+ constructor(modelId, config) {
1568
+ this.modelId = modelId;
1569
+ this.config = config;
1570
+ this.specificationVersion = "v1";
1571
+ }
1572
+ get provider() {
1573
+ return this.config.provider;
1574
+ }
1575
+ getArgs({
1576
+ audio,
1577
+ mediaType,
1578
+ providerOptions
1579
+ }) {
1580
+ var _a, _b, _c, _d, _e;
1581
+ const warnings = [];
1582
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1583
+ provider: "openai",
1584
+ providerOptions,
1585
+ schema: openAIProviderOptionsSchema
1586
+ });
1587
+ const formData = new FormData();
1588
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1589
+ formData.append("model", this.modelId);
1590
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1591
+ if (openAIOptions) {
1592
+ const transcriptionModelOptions = {
1593
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1594
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1595
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1596
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1597
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1598
+ };
1599
+ for (const key in transcriptionModelOptions) {
1600
+ const value = transcriptionModelOptions[key];
1601
+ if (value !== void 0) {
1602
+ formData.append(key, String(value));
1603
+ }
1604
+ }
1605
+ }
1606
+ return {
1607
+ formData,
1608
+ warnings
1609
+ };
1610
+ }
1611
+ async doGenerate(options) {
1612
+ var _a, _b, _c, _d, _e, _f;
1613
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1614
+ const { formData, warnings } = this.getArgs(options);
1615
+ const {
1616
+ value: response,
1617
+ responseHeaders,
1618
+ rawValue: rawResponse
1619
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1620
+ url: this.config.url({
1621
+ path: "/audio/transcriptions",
1622
+ modelId: this.modelId
1623
+ }),
1624
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1625
+ formData,
1626
+ failedResponseHandler: openaiFailedResponseHandler,
1627
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1628
+ openaiTranscriptionResponseSchema
1629
+ ),
1630
+ abortSignal: options.abortSignal,
1631
+ fetch: this.config.fetch
1632
+ });
1633
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1634
+ return {
1635
+ text: response.text,
1636
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1637
+ text: word.word,
1638
+ startSecond: word.start,
1639
+ endSecond: word.end
1640
+ }))) != null ? _e : [],
1641
+ language,
1642
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1643
+ warnings,
1644
+ response: {
1645
+ timestamp: currentDate,
1646
+ modelId: this.modelId,
1647
+ headers: responseHeaders,
1648
+ body: rawResponse
1649
+ }
1650
+ };
1651
+ }
1652
+ };
1653
+ var openaiTranscriptionResponseSchema = import_zod9.z.object({
1654
+ text: import_zod9.z.string(),
1655
+ language: import_zod9.z.string().nullish(),
1656
+ duration: import_zod9.z.number().nullish(),
1657
+ words: import_zod9.z.array(
1658
+ import_zod9.z.object({
1659
+ word: import_zod9.z.string(),
1660
+ start: import_zod9.z.number(),
1661
+ end: import_zod9.z.number()
1662
+ })
1663
+ ).nullish()
1664
+ });
1665
+
1625
1666
  // src/responses/openai-responses-language-model.ts
1626
1667
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1627
- var import_zod7 = require("zod");
1668
+ var import_zod10 = require("zod");
1628
1669
 
1629
1670
  // src/responses/convert-to-openai-responses-messages.ts
1630
- var import_provider7 = require("@ai-sdk/provider");
1631
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1671
+ var import_provider6 = require("@ai-sdk/provider");
1632
1672
  function convertToOpenAIResponsesMessages({
1633
1673
  prompt,
1634
1674
  systemMessageMode
@@ -1667,38 +1707,35 @@ function convertToOpenAIResponsesMessages({
1667
1707
  messages.push({
1668
1708
  role: "user",
1669
1709
  content: content.map((part, index) => {
1670
- var _a, _b, _c, _d;
1710
+ var _a, _b, _c;
1671
1711
  switch (part.type) {
1672
1712
  case "text": {
1673
1713
  return { type: "input_text", text: part.text };
1674
1714
  }
1675
- case "image": {
1676
- return {
1677
- type: "input_image",
1678
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils7.convertUint8ArrayToBase64)(part.image)}`,
1679
- // OpenAI specific extension: image detail
1680
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1681
- };
1682
- }
1683
1715
  case "file": {
1684
- if (part.data instanceof URL) {
1685
- throw new import_provider7.UnsupportedFunctionalityError({
1686
- functionality: "File URLs in user messages"
1687
- });
1688
- }
1689
- switch (part.mimeType) {
1690
- case "application/pdf": {
1691
- return {
1692
- type: "input_file",
1693
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1694
- file_data: `data:application/pdf;base64,${part.data}`
1695
- };
1696
- }
1697
- default: {
1698
- throw new import_provider7.UnsupportedFunctionalityError({
1699
- functionality: "Only PDF files are supported in user messages"
1716
+ if (part.mediaType.startsWith("image/")) {
1717
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1718
+ return {
1719
+ type: "input_image",
1720
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1721
+ // OpenAI specific extension: image detail
1722
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1723
+ };
1724
+ } else if (part.mediaType === "application/pdf") {
1725
+ if (part.data instanceof URL) {
1726
+ throw new import_provider6.UnsupportedFunctionalityError({
1727
+ functionality: "PDF file parts with URLs"
1700
1728
  });
1701
1729
  }
1730
+ return {
1731
+ type: "input_file",
1732
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1733
+ file_data: `data:application/pdf;base64,${part.data}`
1734
+ };
1735
+ } else {
1736
+ throw new import_provider6.UnsupportedFunctionalityError({
1737
+ functionality: `file part media type ${part.mediaType}`
1738
+ });
1702
1739
  }
1703
1740
  }
1704
1741
  }
@@ -1767,18 +1804,17 @@ function mapOpenAIResponseFinishReason({
1767
1804
  }
1768
1805
 
1769
1806
  // src/responses/openai-responses-prepare-tools.ts
1770
- var import_provider8 = require("@ai-sdk/provider");
1807
+ var import_provider7 = require("@ai-sdk/provider");
1771
1808
  function prepareResponsesTools({
1772
- mode,
1809
+ tools,
1810
+ toolChoice,
1773
1811
  strict
1774
1812
  }) {
1775
- var _a;
1776
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1813
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1777
1814
  const toolWarnings = [];
1778
1815
  if (tools == null) {
1779
- return { tools: void 0, tool_choice: void 0, toolWarnings };
1816
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
1780
1817
  }
1781
- const toolChoice = mode.toolChoice;
1782
1818
  const openaiTools2 = [];
1783
1819
  for (const tool of tools) {
1784
1820
  switch (tool.type) {
@@ -1811,37 +1847,24 @@ function prepareResponsesTools({
1811
1847
  }
1812
1848
  }
1813
1849
  if (toolChoice == null) {
1814
- return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
1850
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
1815
1851
  }
1816
1852
  const type = toolChoice.type;
1817
1853
  switch (type) {
1818
1854
  case "auto":
1819
1855
  case "none":
1820
1856
  case "required":
1821
- return { tools: openaiTools2, tool_choice: type, toolWarnings };
1822
- case "tool": {
1823
- if (toolChoice.toolName === "web_search_preview") {
1824
- return {
1825
- tools: openaiTools2,
1826
- tool_choice: {
1827
- type: "web_search_preview"
1828
- },
1829
- toolWarnings
1830
- };
1831
- }
1857
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
1858
+ case "tool":
1832
1859
  return {
1833
1860
  tools: openaiTools2,
1834
- tool_choice: {
1835
- type: "function",
1836
- name: toolChoice.toolName
1837
- },
1861
+ toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1838
1862
  toolWarnings
1839
1863
  };
1840
- }
1841
1864
  default: {
1842
1865
  const _exhaustiveCheck = type;
1843
- throw new import_provider8.UnsupportedFunctionalityError({
1844
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1866
+ throw new import_provider7.UnsupportedFunctionalityError({
1867
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1845
1868
  });
1846
1869
  }
1847
1870
  }
@@ -1851,16 +1874,19 @@ function prepareResponsesTools({
1851
1874
  var OpenAIResponsesLanguageModel = class {
1852
1875
  constructor(modelId, config) {
1853
1876
  this.specificationVersion = "v2";
1854
- this.defaultObjectGenerationMode = "json";
1855
1877
  this.modelId = modelId;
1856
1878
  this.config = config;
1857
1879
  }
1880
+ async getSupportedUrls() {
1881
+ return {
1882
+ "image/*": [/^https?:\/\/.*$/]
1883
+ };
1884
+ }
1858
1885
  get provider() {
1859
1886
  return this.config.provider;
1860
1887
  }
1861
1888
  getArgs({
1862
- mode,
1863
- maxTokens,
1889
+ maxOutputTokens,
1864
1890
  temperature,
1865
1891
  stopSequences,
1866
1892
  topP,
@@ -1869,24 +1895,19 @@ var OpenAIResponsesLanguageModel = class {
1869
1895
  frequencyPenalty,
1870
1896
  seed,
1871
1897
  prompt,
1872
- providerMetadata,
1898
+ providerOptions,
1899
+ tools,
1900
+ toolChoice,
1873
1901
  responseFormat
1874
1902
  }) {
1875
- var _a, _b, _c;
1903
+ var _a, _b;
1876
1904
  const warnings = [];
1877
1905
  const modelConfig = getResponsesModelConfig(this.modelId);
1878
- const type = mode.type;
1879
1906
  if (topK != null) {
1880
- warnings.push({
1881
- type: "unsupported-setting",
1882
- setting: "topK"
1883
- });
1907
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1884
1908
  }
1885
1909
  if (seed != null) {
1886
- warnings.push({
1887
- type: "unsupported-setting",
1888
- setting: "seed"
1889
- });
1910
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1890
1911
  }
1891
1912
  if (presencePenalty != null) {
1892
1913
  warnings.push({
@@ -1901,10 +1922,7 @@ var OpenAIResponsesLanguageModel = class {
1901
1922
  });
1902
1923
  }
1903
1924
  if (stopSequences != null) {
1904
- warnings.push({
1905
- type: "unsupported-setting",
1906
- setting: "stopSequences"
1907
- });
1925
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
1908
1926
  }
1909
1927
  const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1910
1928
  prompt,
@@ -1913,7 +1931,7 @@ var OpenAIResponsesLanguageModel = class {
1913
1931
  warnings.push(...messageWarnings);
1914
1932
  const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1915
1933
  provider: "openai",
1916
- providerOptions: providerMetadata,
1934
+ providerOptions,
1917
1935
  schema: openaiResponsesProviderOptionsSchema
1918
1936
  });
1919
1937
  const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
@@ -1922,7 +1940,7 @@ var OpenAIResponsesLanguageModel = class {
1922
1940
  input: messages,
1923
1941
  temperature,
1924
1942
  top_p: topP,
1925
- max_output_tokens: maxTokens,
1943
+ max_output_tokens: maxOutputTokens,
1926
1944
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1927
1945
  text: {
1928
1946
  format: responseFormat.schema != null ? {
@@ -1967,65 +1985,26 @@ var OpenAIResponsesLanguageModel = class {
1967
1985
  });
1968
1986
  }
1969
1987
  }
1970
- switch (type) {
1971
- case "regular": {
1972
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1973
- mode,
1974
- strict: isStrict
1975
- // TODO support provider options on tools
1976
- });
1977
- return {
1978
- args: {
1979
- ...baseArgs,
1980
- tools,
1981
- tool_choice
1982
- },
1983
- warnings: [...warnings, ...toolWarnings]
1984
- };
1985
- }
1986
- case "object-json": {
1987
- return {
1988
- args: {
1989
- ...baseArgs,
1990
- text: {
1991
- format: mode.schema != null ? {
1992
- type: "json_schema",
1993
- strict: isStrict,
1994
- name: (_c = mode.name) != null ? _c : "response",
1995
- description: mode.description,
1996
- schema: mode.schema
1997
- } : { type: "json_object" }
1998
- }
1999
- },
2000
- warnings
2001
- };
2002
- }
2003
- case "object-tool": {
2004
- return {
2005
- args: {
2006
- ...baseArgs,
2007
- tool_choice: { type: "function", name: mode.tool.name },
2008
- tools: [
2009
- {
2010
- type: "function",
2011
- name: mode.tool.name,
2012
- description: mode.tool.description,
2013
- parameters: mode.tool.parameters,
2014
- strict: isStrict
2015
- }
2016
- ]
2017
- },
2018
- warnings
2019
- };
2020
- }
2021
- default: {
2022
- const _exhaustiveCheck = type;
2023
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2024
- }
2025
- }
1988
+ const {
1989
+ tools: openaiTools2,
1990
+ toolChoice: openaiToolChoice,
1991
+ toolWarnings
1992
+ } = prepareResponsesTools({
1993
+ tools,
1994
+ toolChoice,
1995
+ strict: isStrict
1996
+ });
1997
+ return {
1998
+ args: {
1999
+ ...baseArgs,
2000
+ tools: openaiTools2,
2001
+ tool_choice: openaiToolChoice
2002
+ },
2003
+ warnings: [...warnings, ...toolWarnings]
2004
+ };
2026
2005
  }
2027
2006
  async doGenerate(options) {
2028
- var _a, _b, _c, _d, _e;
2007
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2029
2008
  const { args: body, warnings } = this.getArgs(options);
2030
2009
  const {
2031
2010
  responseHeaders,
@@ -2040,105 +2019,111 @@ var OpenAIResponsesLanguageModel = class {
2040
2019
  body,
2041
2020
  failedResponseHandler: openaiFailedResponseHandler,
2042
2021
  successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
2043
- import_zod7.z.object({
2044
- id: import_zod7.z.string(),
2045
- created_at: import_zod7.z.number(),
2046
- model: import_zod7.z.string(),
2047
- output: import_zod7.z.array(
2048
- import_zod7.z.discriminatedUnion("type", [
2049
- import_zod7.z.object({
2050
- type: import_zod7.z.literal("message"),
2051
- role: import_zod7.z.literal("assistant"),
2052
- content: import_zod7.z.array(
2053
- import_zod7.z.object({
2054
- type: import_zod7.z.literal("output_text"),
2055
- text: import_zod7.z.string(),
2056
- annotations: import_zod7.z.array(
2057
- import_zod7.z.object({
2058
- type: import_zod7.z.literal("url_citation"),
2059
- start_index: import_zod7.z.number(),
2060
- end_index: import_zod7.z.number(),
2061
- url: import_zod7.z.string(),
2062
- title: import_zod7.z.string()
2022
+ import_zod10.z.object({
2023
+ id: import_zod10.z.string(),
2024
+ created_at: import_zod10.z.number(),
2025
+ model: import_zod10.z.string(),
2026
+ output: import_zod10.z.array(
2027
+ import_zod10.z.discriminatedUnion("type", [
2028
+ import_zod10.z.object({
2029
+ type: import_zod10.z.literal("message"),
2030
+ role: import_zod10.z.literal("assistant"),
2031
+ content: import_zod10.z.array(
2032
+ import_zod10.z.object({
2033
+ type: import_zod10.z.literal("output_text"),
2034
+ text: import_zod10.z.string(),
2035
+ annotations: import_zod10.z.array(
2036
+ import_zod10.z.object({
2037
+ type: import_zod10.z.literal("url_citation"),
2038
+ start_index: import_zod10.z.number(),
2039
+ end_index: import_zod10.z.number(),
2040
+ url: import_zod10.z.string(),
2041
+ title: import_zod10.z.string()
2063
2042
  })
2064
2043
  )
2065
2044
  })
2066
2045
  )
2067
2046
  }),
2068
- import_zod7.z.object({
2069
- type: import_zod7.z.literal("function_call"),
2070
- call_id: import_zod7.z.string(),
2071
- name: import_zod7.z.string(),
2072
- arguments: import_zod7.z.string()
2047
+ import_zod10.z.object({
2048
+ type: import_zod10.z.literal("function_call"),
2049
+ call_id: import_zod10.z.string(),
2050
+ name: import_zod10.z.string(),
2051
+ arguments: import_zod10.z.string()
2073
2052
  }),
2074
- import_zod7.z.object({
2075
- type: import_zod7.z.literal("web_search_call")
2053
+ import_zod10.z.object({
2054
+ type: import_zod10.z.literal("web_search_call")
2076
2055
  }),
2077
- import_zod7.z.object({
2078
- type: import_zod7.z.literal("computer_call")
2056
+ import_zod10.z.object({
2057
+ type: import_zod10.z.literal("computer_call")
2079
2058
  }),
2080
- import_zod7.z.object({
2081
- type: import_zod7.z.literal("reasoning")
2059
+ import_zod10.z.object({
2060
+ type: import_zod10.z.literal("reasoning")
2082
2061
  })
2083
2062
  ])
2084
2063
  ),
2085
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullable(),
2064
+ incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullable(),
2086
2065
  usage: usageSchema
2087
2066
  })
2088
2067
  ),
2089
2068
  abortSignal: options.abortSignal,
2090
2069
  fetch: this.config.fetch
2091
2070
  });
2092
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2093
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2094
- toolCallType: "function",
2095
- toolCallId: output.call_id,
2096
- toolName: output.name,
2097
- args: output.arguments
2098
- }));
2071
+ const content = [];
2072
+ for (const part of response.output) {
2073
+ switch (part.type) {
2074
+ case "message": {
2075
+ for (const contentPart of part.content) {
2076
+ content.push({
2077
+ type: "text",
2078
+ text: contentPart.text
2079
+ });
2080
+ for (const annotation of contentPart.annotations) {
2081
+ content.push({
2082
+ type: "source",
2083
+ sourceType: "url",
2084
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils8.generateId)(),
2085
+ url: annotation.url,
2086
+ title: annotation.title
2087
+ });
2088
+ }
2089
+ }
2090
+ break;
2091
+ }
2092
+ case "function_call": {
2093
+ content.push({
2094
+ type: "tool-call",
2095
+ toolCallType: "function",
2096
+ toolCallId: part.call_id,
2097
+ toolName: part.name,
2098
+ args: part.arguments
2099
+ });
2100
+ break;
2101
+ }
2102
+ }
2103
+ }
2099
2104
  return {
2100
- text: outputTextElements.map((content) => content.text).join("\n"),
2101
- sources: outputTextElements.flatMap(
2102
- (content) => content.annotations.map((annotation) => {
2103
- var _a2, _b2, _c2;
2104
- return {
2105
- sourceType: "url",
2106
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2107
- url: annotation.url,
2108
- title: annotation.title
2109
- };
2110
- })
2111
- ),
2105
+ content,
2112
2106
  finishReason: mapOpenAIResponseFinishReason({
2113
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2114
- hasToolCalls: toolCalls.length > 0
2107
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2108
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2115
2109
  }),
2116
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2117
2110
  usage: {
2118
- promptTokens: response.usage.input_tokens,
2119
- completionTokens: response.usage.output_tokens
2120
- },
2121
- rawCall: {
2122
- rawPrompt: void 0,
2123
- rawSettings: {}
2124
- },
2125
- rawResponse: {
2126
- headers: responseHeaders,
2127
- body: rawResponse
2128
- },
2129
- request: {
2130
- body: JSON.stringify(body)
2111
+ inputTokens: response.usage.input_tokens,
2112
+ outputTokens: response.usage.output_tokens
2131
2113
  },
2114
+ request: { body },
2132
2115
  response: {
2133
2116
  id: response.id,
2134
2117
  timestamp: new Date(response.created_at * 1e3),
2135
- modelId: response.model
2118
+ modelId: response.model,
2119
+ headers: responseHeaders,
2120
+ body: rawResponse
2136
2121
  },
2137
2122
  providerMetadata: {
2138
2123
  openai: {
2139
2124
  responseId: response.id,
2140
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2141
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2125
+ cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2126
+ reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2142
2127
  }
2143
2128
  },
2144
2129
  warnings
@@ -2165,8 +2150,10 @@ var OpenAIResponsesLanguageModel = class {
2165
2150
  });
2166
2151
  const self = this;
2167
2152
  let finishReason = "unknown";
2168
- let promptTokens = NaN;
2169
- let completionTokens = NaN;
2153
+ const usage = {
2154
+ inputTokens: void 0,
2155
+ outputTokens: void 0
2156
+ };
2170
2157
  let cachedPromptTokens = null;
2171
2158
  let reasoningTokens = null;
2172
2159
  let responseId = null;
@@ -2175,6 +2162,9 @@ var OpenAIResponsesLanguageModel = class {
2175
2162
  return {
2176
2163
  stream: response.pipeThrough(
2177
2164
  new TransformStream({
2165
+ start(controller) {
2166
+ controller.enqueue({ type: "stream-start", warnings });
2167
+ },
2178
2168
  transform(chunk, controller) {
2179
2169
  var _a, _b, _c, _d, _e, _f, _g, _h;
2180
2170
  if (!chunk.success) {
@@ -2218,8 +2208,8 @@ var OpenAIResponsesLanguageModel = class {
2218
2208
  });
2219
2209
  } else if (isTextDeltaChunk(value)) {
2220
2210
  controller.enqueue({
2221
- type: "text-delta",
2222
- textDelta: value.delta
2211
+ type: "text",
2212
+ text: value.delta
2223
2213
  });
2224
2214
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2225
2215
  ongoingToolCalls[value.output_index] = void 0;
@@ -2236,19 +2226,17 @@ var OpenAIResponsesLanguageModel = class {
2236
2226
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2237
2227
  hasToolCalls
2238
2228
  });
2239
- promptTokens = value.response.usage.input_tokens;
2240
- completionTokens = value.response.usage.output_tokens;
2229
+ usage.inputTokens = value.response.usage.input_tokens;
2230
+ usage.outputTokens = value.response.usage.output_tokens;
2241
2231
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2242
2232
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2243
2233
  } else if (isResponseAnnotationAddedChunk(value)) {
2244
2234
  controller.enqueue({
2245
2235
  type: "source",
2246
- source: {
2247
- sourceType: "url",
2248
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2249
- url: value.annotation.url,
2250
- title: value.annotation.title
2251
- }
2236
+ sourceType: "url",
2237
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2238
+ url: value.annotation.url,
2239
+ title: value.annotation.title
2252
2240
  });
2253
2241
  }
2254
2242
  },
@@ -2256,7 +2244,7 @@ var OpenAIResponsesLanguageModel = class {
2256
2244
  controller.enqueue({
2257
2245
  type: "finish",
2258
2246
  finishReason,
2259
- usage: { promptTokens, completionTokens },
2247
+ usage,
2260
2248
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2261
2249
  providerMetadata: {
2262
2250
  openai: {
@@ -2270,89 +2258,84 @@ var OpenAIResponsesLanguageModel = class {
2270
2258
  }
2271
2259
  })
2272
2260
  ),
2273
- rawCall: {
2274
- rawPrompt: void 0,
2275
- rawSettings: {}
2276
- },
2277
- rawResponse: { headers: responseHeaders },
2278
- request: { body: JSON.stringify(body) },
2279
- warnings
2261
+ request: { body },
2262
+ response: { headers: responseHeaders }
2280
2263
  };
2281
2264
  }
2282
2265
  };
2283
- var usageSchema = import_zod7.z.object({
2284
- input_tokens: import_zod7.z.number(),
2285
- input_tokens_details: import_zod7.z.object({ cached_tokens: import_zod7.z.number().nullish() }).nullish(),
2286
- output_tokens: import_zod7.z.number(),
2287
- output_tokens_details: import_zod7.z.object({ reasoning_tokens: import_zod7.z.number().nullish() }).nullish()
2266
+ var usageSchema = import_zod10.z.object({
2267
+ input_tokens: import_zod10.z.number(),
2268
+ input_tokens_details: import_zod10.z.object({ cached_tokens: import_zod10.z.number().nullish() }).nullish(),
2269
+ output_tokens: import_zod10.z.number(),
2270
+ output_tokens_details: import_zod10.z.object({ reasoning_tokens: import_zod10.z.number().nullish() }).nullish()
2288
2271
  });
2289
- var textDeltaChunkSchema = import_zod7.z.object({
2290
- type: import_zod7.z.literal("response.output_text.delta"),
2291
- delta: import_zod7.z.string()
2272
+ var textDeltaChunkSchema = import_zod10.z.object({
2273
+ type: import_zod10.z.literal("response.output_text.delta"),
2274
+ delta: import_zod10.z.string()
2292
2275
  });
2293
- var responseFinishedChunkSchema = import_zod7.z.object({
2294
- type: import_zod7.z.enum(["response.completed", "response.incomplete"]),
2295
- response: import_zod7.z.object({
2296
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullish(),
2276
+ var responseFinishedChunkSchema = import_zod10.z.object({
2277
+ type: import_zod10.z.enum(["response.completed", "response.incomplete"]),
2278
+ response: import_zod10.z.object({
2279
+ incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullish(),
2297
2280
  usage: usageSchema
2298
2281
  })
2299
2282
  });
2300
- var responseCreatedChunkSchema = import_zod7.z.object({
2301
- type: import_zod7.z.literal("response.created"),
2302
- response: import_zod7.z.object({
2303
- id: import_zod7.z.string(),
2304
- created_at: import_zod7.z.number(),
2305
- model: import_zod7.z.string()
2283
+ var responseCreatedChunkSchema = import_zod10.z.object({
2284
+ type: import_zod10.z.literal("response.created"),
2285
+ response: import_zod10.z.object({
2286
+ id: import_zod10.z.string(),
2287
+ created_at: import_zod10.z.number(),
2288
+ model: import_zod10.z.string()
2306
2289
  })
2307
2290
  });
2308
- var responseOutputItemDoneSchema = import_zod7.z.object({
2309
- type: import_zod7.z.literal("response.output_item.done"),
2310
- output_index: import_zod7.z.number(),
2311
- item: import_zod7.z.discriminatedUnion("type", [
2312
- import_zod7.z.object({
2313
- type: import_zod7.z.literal("message")
2291
+ var responseOutputItemDoneSchema = import_zod10.z.object({
2292
+ type: import_zod10.z.literal("response.output_item.done"),
2293
+ output_index: import_zod10.z.number(),
2294
+ item: import_zod10.z.discriminatedUnion("type", [
2295
+ import_zod10.z.object({
2296
+ type: import_zod10.z.literal("message")
2314
2297
  }),
2315
- import_zod7.z.object({
2316
- type: import_zod7.z.literal("function_call"),
2317
- id: import_zod7.z.string(),
2318
- call_id: import_zod7.z.string(),
2319
- name: import_zod7.z.string(),
2320
- arguments: import_zod7.z.string(),
2321
- status: import_zod7.z.literal("completed")
2298
+ import_zod10.z.object({
2299
+ type: import_zod10.z.literal("function_call"),
2300
+ id: import_zod10.z.string(),
2301
+ call_id: import_zod10.z.string(),
2302
+ name: import_zod10.z.string(),
2303
+ arguments: import_zod10.z.string(),
2304
+ status: import_zod10.z.literal("completed")
2322
2305
  })
2323
2306
  ])
2324
2307
  });
2325
- var responseFunctionCallArgumentsDeltaSchema = import_zod7.z.object({
2326
- type: import_zod7.z.literal("response.function_call_arguments.delta"),
2327
- item_id: import_zod7.z.string(),
2328
- output_index: import_zod7.z.number(),
2329
- delta: import_zod7.z.string()
2308
+ var responseFunctionCallArgumentsDeltaSchema = import_zod10.z.object({
2309
+ type: import_zod10.z.literal("response.function_call_arguments.delta"),
2310
+ item_id: import_zod10.z.string(),
2311
+ output_index: import_zod10.z.number(),
2312
+ delta: import_zod10.z.string()
2330
2313
  });
2331
- var responseOutputItemAddedSchema = import_zod7.z.object({
2332
- type: import_zod7.z.literal("response.output_item.added"),
2333
- output_index: import_zod7.z.number(),
2334
- item: import_zod7.z.discriminatedUnion("type", [
2335
- import_zod7.z.object({
2336
- type: import_zod7.z.literal("message")
2314
+ var responseOutputItemAddedSchema = import_zod10.z.object({
2315
+ type: import_zod10.z.literal("response.output_item.added"),
2316
+ output_index: import_zod10.z.number(),
2317
+ item: import_zod10.z.discriminatedUnion("type", [
2318
+ import_zod10.z.object({
2319
+ type: import_zod10.z.literal("message")
2337
2320
  }),
2338
- import_zod7.z.object({
2339
- type: import_zod7.z.literal("function_call"),
2340
- id: import_zod7.z.string(),
2341
- call_id: import_zod7.z.string(),
2342
- name: import_zod7.z.string(),
2343
- arguments: import_zod7.z.string()
2321
+ import_zod10.z.object({
2322
+ type: import_zod10.z.literal("function_call"),
2323
+ id: import_zod10.z.string(),
2324
+ call_id: import_zod10.z.string(),
2325
+ name: import_zod10.z.string(),
2326
+ arguments: import_zod10.z.string()
2344
2327
  })
2345
2328
  ])
2346
2329
  });
2347
- var responseAnnotationAddedSchema = import_zod7.z.object({
2348
- type: import_zod7.z.literal("response.output_text.annotation.added"),
2349
- annotation: import_zod7.z.object({
2350
- type: import_zod7.z.literal("url_citation"),
2351
- url: import_zod7.z.string(),
2352
- title: import_zod7.z.string()
2330
+ var responseAnnotationAddedSchema = import_zod10.z.object({
2331
+ type: import_zod10.z.literal("response.output_text.annotation.added"),
2332
+ annotation: import_zod10.z.object({
2333
+ type: import_zod10.z.literal("url_citation"),
2334
+ url: import_zod10.z.string(),
2335
+ title: import_zod10.z.string()
2353
2336
  })
2354
2337
  });
2355
- var openaiResponsesChunkSchema = import_zod7.z.union([
2338
+ var openaiResponsesChunkSchema = import_zod10.z.union([
2356
2339
  textDeltaChunkSchema,
2357
2340
  responseFinishedChunkSchema,
2358
2341
  responseCreatedChunkSchema,
@@ -2360,7 +2343,7 @@ var openaiResponsesChunkSchema = import_zod7.z.union([
2360
2343
  responseFunctionCallArgumentsDeltaSchema,
2361
2344
  responseOutputItemAddedSchema,
2362
2345
  responseAnnotationAddedSchema,
2363
- import_zod7.z.object({ type: import_zod7.z.string() }).passthrough()
2346
+ import_zod10.z.object({ type: import_zod10.z.string() }).passthrough()
2364
2347
  // fallback for unknown chunks
2365
2348
  ]);
2366
2349
  function isTextDeltaChunk(chunk) {
@@ -2405,25 +2388,124 @@ function getResponsesModelConfig(modelId) {
2405
2388
  requiredAutoTruncation: false
2406
2389
  };
2407
2390
  }
2408
- var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2409
- metadata: import_zod7.z.any().nullish(),
2410
- parallelToolCalls: import_zod7.z.boolean().nullish(),
2411
- previousResponseId: import_zod7.z.string().nullish(),
2412
- store: import_zod7.z.boolean().nullish(),
2413
- user: import_zod7.z.string().nullish(),
2414
- reasoningEffort: import_zod7.z.string().nullish(),
2415
- strictSchemas: import_zod7.z.boolean().nullish(),
2416
- instructions: import_zod7.z.string().nullish()
2391
+ var openaiResponsesProviderOptionsSchema = import_zod10.z.object({
2392
+ metadata: import_zod10.z.any().nullish(),
2393
+ parallelToolCalls: import_zod10.z.boolean().nullish(),
2394
+ previousResponseId: import_zod10.z.string().nullish(),
2395
+ store: import_zod10.z.boolean().nullish(),
2396
+ user: import_zod10.z.string().nullish(),
2397
+ reasoningEffort: import_zod10.z.string().nullish(),
2398
+ strictSchemas: import_zod10.z.boolean().nullish(),
2399
+ instructions: import_zod10.z.string().nullish()
2400
+ });
2401
+
2402
+ // src/openai-speech-model.ts
2403
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
2404
+ var import_zod11 = require("zod");
2405
+ var OpenAIProviderOptionsSchema = import_zod11.z.object({
2406
+ instructions: import_zod11.z.string().nullish(),
2407
+ speed: import_zod11.z.number().min(0.25).max(4).default(1).nullish()
2417
2408
  });
2409
+ var OpenAISpeechModel = class {
2410
+ constructor(modelId, config) {
2411
+ this.modelId = modelId;
2412
+ this.config = config;
2413
+ this.specificationVersion = "v1";
2414
+ }
2415
+ get provider() {
2416
+ return this.config.provider;
2417
+ }
2418
+ getArgs({
2419
+ text,
2420
+ voice = "alloy",
2421
+ outputFormat = "mp3",
2422
+ speed,
2423
+ instructions,
2424
+ providerOptions
2425
+ }) {
2426
+ const warnings = [];
2427
+ const openAIOptions = (0, import_provider_utils9.parseProviderOptions)({
2428
+ provider: "openai",
2429
+ providerOptions,
2430
+ schema: OpenAIProviderOptionsSchema
2431
+ });
2432
+ const requestBody = {
2433
+ model: this.modelId,
2434
+ input: text,
2435
+ voice,
2436
+ response_format: "mp3",
2437
+ speed,
2438
+ instructions
2439
+ };
2440
+ if (outputFormat) {
2441
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
2442
+ requestBody.response_format = outputFormat;
2443
+ } else {
2444
+ warnings.push({
2445
+ type: "unsupported-setting",
2446
+ setting: "outputFormat",
2447
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
2448
+ });
2449
+ }
2450
+ }
2451
+ if (openAIOptions) {
2452
+ const speechModelOptions = {};
2453
+ for (const key in speechModelOptions) {
2454
+ const value = speechModelOptions[key];
2455
+ if (value !== void 0) {
2456
+ requestBody[key] = value;
2457
+ }
2458
+ }
2459
+ }
2460
+ return {
2461
+ requestBody,
2462
+ warnings
2463
+ };
2464
+ }
2465
+ async doGenerate(options) {
2466
+ var _a, _b, _c;
2467
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2468
+ const { requestBody, warnings } = this.getArgs(options);
2469
+ const {
2470
+ value: audio,
2471
+ responseHeaders,
2472
+ rawValue: rawResponse
2473
+ } = await (0, import_provider_utils9.postJsonToApi)({
2474
+ url: this.config.url({
2475
+ path: "/audio/speech",
2476
+ modelId: this.modelId
2477
+ }),
2478
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2479
+ body: requestBody,
2480
+ failedResponseHandler: openaiFailedResponseHandler,
2481
+ successfulResponseHandler: (0, import_provider_utils9.createBinaryResponseHandler)(),
2482
+ abortSignal: options.abortSignal,
2483
+ fetch: this.config.fetch
2484
+ });
2485
+ return {
2486
+ audio,
2487
+ warnings,
2488
+ request: {
2489
+ body: JSON.stringify(requestBody)
2490
+ },
2491
+ response: {
2492
+ timestamp: currentDate,
2493
+ modelId: this.modelId,
2494
+ headers: responseHeaders,
2495
+ body: rawResponse
2496
+ }
2497
+ };
2498
+ }
2499
+ };
2418
2500
 
2419
2501
  // src/openai-provider.ts
2420
2502
  function createOpenAI(options = {}) {
2421
2503
  var _a, _b, _c;
2422
- const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2504
+ const baseURL = (_a = (0, import_provider_utils10.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2423
2505
  const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2424
2506
  const providerName = (_c = options.name) != null ? _c : "openai";
2425
2507
  const getHeaders = () => ({
2426
- Authorization: `Bearer ${(0, import_provider_utils9.loadApiKey)({
2508
+ Authorization: `Bearer ${(0, import_provider_utils10.loadApiKey)({
2427
2509
  apiKey: options.apiKey,
2428
2510
  environmentVariableName: "OPENAI_API_KEY",
2429
2511
  description: "OpenAI"
@@ -2458,6 +2540,18 @@ function createOpenAI(options = {}) {
2458
2540
  headers: getHeaders,
2459
2541
  fetch: options.fetch
2460
2542
  });
2543
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
2544
+ provider: `${providerName}.transcription`,
2545
+ url: ({ path }) => `${baseURL}${path}`,
2546
+ headers: getHeaders,
2547
+ fetch: options.fetch
2548
+ });
2549
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
2550
+ provider: `${providerName}.speech`,
2551
+ url: ({ path }) => `${baseURL}${path}`,
2552
+ headers: getHeaders,
2553
+ fetch: options.fetch
2554
+ });
2461
2555
  const createLanguageModel = (modelId, settings) => {
2462
2556
  if (new.target) {
2463
2557
  throw new Error(
@@ -2492,6 +2586,10 @@ function createOpenAI(options = {}) {
2492
2586
  provider.textEmbeddingModel = createEmbeddingModel;
2493
2587
  provider.image = createImageModel;
2494
2588
  provider.imageModel = createImageModel;
2589
+ provider.transcription = createTranscriptionModel;
2590
+ provider.transcriptionModel = createTranscriptionModel;
2591
+ provider.speech = createSpeechModel;
2592
+ provider.speechModel = createSpeechModel;
2495
2593
  provider.tools = openaiTools;
2496
2594
  return provider;
2497
2595
  }