@ai-sdk/openai 2.0.0-canary.1 → 2.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,19 +26,18 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/openai-provider.ts
29
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
29
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
30
30
 
31
31
  // src/openai-chat-language-model.ts
32
32
  var import_provider3 = require("@ai-sdk/provider");
33
33
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
34
- var import_zod2 = require("zod");
34
+ var import_zod3 = require("zod");
35
35
 
36
36
  // src/convert-to-openai-chat-messages.ts
37
37
  var import_provider = require("@ai-sdk/provider");
38
38
  var import_provider_utils = require("@ai-sdk/provider-utils");
39
39
  function convertToOpenAIChatMessages({
40
40
  prompt,
41
- useLegacyFunctionCalling = false,
42
41
  systemMessageMode = "system"
43
42
  }) {
44
43
  const messages = [];
@@ -79,55 +78,71 @@ function convertToOpenAIChatMessages({
79
78
  messages.push({
80
79
  role: "user",
81
80
  content: content.map((part, index) => {
82
- var _a, _b, _c, _d;
81
+ var _a, _b, _c;
83
82
  switch (part.type) {
84
83
  case "text": {
85
84
  return { type: "text", text: part.text };
86
85
  }
87
- case "image": {
88
- return {
89
- type: "image_url",
90
- image_url: {
91
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
92
- // OpenAI specific extension: image detail
93
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
94
- }
95
- };
96
- }
97
86
  case "file": {
98
- if (part.data instanceof URL) {
99
- throw new import_provider.UnsupportedFunctionalityError({
100
- functionality: "'File content parts with URL data' functionality not supported."
101
- });
102
- }
103
- switch (part.mimeType) {
104
- case "audio/wav": {
105
- return {
106
- type: "input_audio",
107
- input_audio: { data: part.data, format: "wav" }
108
- };
109
- }
110
- case "audio/mp3":
111
- case "audio/mpeg": {
112
- return {
113
- type: "input_audio",
114
- input_audio: { data: part.data, format: "mp3" }
115
- };
87
+ if (part.mediaType.startsWith("image/")) {
88
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
89
+ return {
90
+ type: "image_url",
91
+ image_url: {
92
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
93
+ // OpenAI specific extension: image detail
94
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
95
+ }
96
+ };
97
+ } else if (part.mediaType.startsWith("audio/")) {
98
+ if (part.data instanceof URL) {
99
+ throw new import_provider.UnsupportedFunctionalityError({
100
+ functionality: "audio file parts with URLs"
101
+ });
116
102
  }
117
- case "application/pdf": {
118
- return {
119
- type: "file",
120
- file: {
121
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
122
- file_data: `data:application/pdf;base64,${part.data}`
123
- }
124
- };
103
+ switch (part.mediaType) {
104
+ case "audio/wav": {
105
+ return {
106
+ type: "input_audio",
107
+ input_audio: {
108
+ data: (0, import_provider_utils.convertToBase64)(part.data),
109
+ format: "wav"
110
+ }
111
+ };
112
+ }
113
+ case "audio/mp3":
114
+ case "audio/mpeg": {
115
+ return {
116
+ type: "input_audio",
117
+ input_audio: {
118
+ data: (0, import_provider_utils.convertToBase64)(part.data),
119
+ format: "mp3"
120
+ }
121
+ };
122
+ }
123
+ default: {
124
+ throw new import_provider.UnsupportedFunctionalityError({
125
+ functionality: `audio content parts with media type ${part.mediaType}`
126
+ });
127
+ }
125
128
  }
126
- default: {
129
+ } else if (part.mediaType === "application/pdf") {
130
+ if (part.data instanceof URL) {
127
131
  throw new import_provider.UnsupportedFunctionalityError({
128
- functionality: `File content part type ${part.mimeType} in user messages`
132
+ functionality: "PDF file parts with URLs"
129
133
  });
130
134
  }
135
+ return {
136
+ type: "file",
137
+ file: {
138
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
139
+ file_data: `data:application/pdf;base64,${part.data}`
140
+ }
141
+ };
142
+ } else {
143
+ throw new import_provider.UnsupportedFunctionalityError({
144
+ functionality: `file part media type ${part.mediaType}`
145
+ });
131
146
  }
132
147
  }
133
148
  }
@@ -157,41 +172,20 @@ function convertToOpenAIChatMessages({
157
172
  }
158
173
  }
159
174
  }
160
- if (useLegacyFunctionCalling) {
161
- if (toolCalls.length > 1) {
162
- throw new import_provider.UnsupportedFunctionalityError({
163
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
164
- });
165
- }
166
- messages.push({
167
- role: "assistant",
168
- content: text,
169
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
170
- });
171
- } else {
172
- messages.push({
173
- role: "assistant",
174
- content: text,
175
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
176
- });
177
- }
175
+ messages.push({
176
+ role: "assistant",
177
+ content: text,
178
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
179
+ });
178
180
  break;
179
181
  }
180
182
  case "tool": {
181
183
  for (const toolResponse of content) {
182
- if (useLegacyFunctionCalling) {
183
- messages.push({
184
- role: "function",
185
- name: toolResponse.toolName,
186
- content: JSON.stringify(toolResponse.result)
187
- });
188
- } else {
189
- messages.push({
190
- role: "tool",
191
- tool_call_id: toolResponse.toolCallId,
192
- content: JSON.stringify(toolResponse.result)
193
- });
194
- }
184
+ messages.push({
185
+ role: "tool",
186
+ tool_call_id: toolResponse.toolCallId,
187
+ content: JSON.stringify(toolResponse.result)
188
+ });
195
189
  }
196
190
  break;
197
191
  }
@@ -204,17 +198,17 @@ function convertToOpenAIChatMessages({
204
198
  return { messages, warnings };
205
199
  }
206
200
 
207
- // src/map-openai-chat-logprobs.ts
208
- function mapOpenAIChatLogProbsOutput(logprobs) {
209
- var _a, _b;
210
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
211
- token,
212
- logprob,
213
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
214
- token: token2,
215
- logprob: logprob2
216
- })) : []
217
- }))) != null ? _b : void 0;
201
+ // src/get-response-metadata.ts
202
+ function getResponseMetadata({
203
+ id,
204
+ model,
205
+ created
206
+ }) {
207
+ return {
208
+ id: id != null ? id : void 0,
209
+ modelId: model != null ? model : void 0,
210
+ timestamp: created != null ? new Date(created * 1e3) : void 0
211
+ };
218
212
  }
219
213
 
220
214
  // src/map-openai-finish-reason.ts
@@ -234,18 +228,59 @@ function mapOpenAIFinishReason(finishReason) {
234
228
  }
235
229
  }
236
230
 
237
- // src/openai-error.ts
231
+ // src/openai-chat-options.ts
238
232
  var import_zod = require("zod");
233
+ var openaiProviderOptions = import_zod.z.object({
234
+ /**
235
+ * Modify the likelihood of specified tokens appearing in the completion.
236
+ *
237
+ * Accepts a JSON object that maps tokens (specified by their token ID in
238
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
239
+ */
240
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
241
+ /**
242
+ * Whether to enable parallel function calling during tool use. Default to true.
243
+ */
244
+ parallelToolCalls: import_zod.z.boolean().optional(),
245
+ /**
246
+ * A unique identifier representing your end-user, which can help OpenAI to
247
+ * monitor and detect abuse.
248
+ */
249
+ user: import_zod.z.string().optional(),
250
+ /**
251
+ * Reasoning effort for reasoning models. Defaults to `medium`.
252
+ */
253
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
254
+ /**
255
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
256
+ */
257
+ maxCompletionTokens: import_zod.z.number().optional(),
258
+ /**
259
+ * Whether to enable persistence in responses API.
260
+ */
261
+ store: import_zod.z.boolean().optional(),
262
+ /**
263
+ * Metadata to associate with the request.
264
+ */
265
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
266
+ /**
267
+ * Parameters for prediction mode.
268
+ */
269
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
270
+ });
271
+
272
+ // src/openai-error.ts
273
+ var import_zod2 = require("zod");
239
274
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
240
- var openaiErrorDataSchema = import_zod.z.object({
241
- error: import_zod.z.object({
242
- message: import_zod.z.string(),
275
+ var openaiErrorDataSchema = import_zod2.z.object({
276
+ error: import_zod2.z.object({
277
+ message: import_zod2.z.string(),
243
278
  // The additional information below is handled loosely to support
244
279
  // OpenAI-compatible providers that have slightly different error
245
280
  // responses:
246
- type: import_zod.z.string().nullish(),
247
- param: import_zod.z.any().nullish(),
248
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
281
+ type: import_zod2.z.string().nullish(),
282
+ param: import_zod2.z.any().nullish(),
283
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
249
284
  })
250
285
  });
251
286
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -253,74 +288,17 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
253
288
  errorToMessage: (data) => data.error.message
254
289
  });
255
290
 
256
- // src/get-response-metadata.ts
257
- function getResponseMetadata({
258
- id,
259
- model,
260
- created
261
- }) {
262
- return {
263
- id: id != null ? id : void 0,
264
- modelId: model != null ? model : void 0,
265
- timestamp: created != null ? new Date(created * 1e3) : void 0
266
- };
267
- }
268
-
269
291
  // src/openai-prepare-tools.ts
270
292
  var import_provider2 = require("@ai-sdk/provider");
271
293
  function prepareTools({
272
- mode,
273
- useLegacyFunctionCalling = false,
294
+ tools,
295
+ toolChoice,
274
296
  structuredOutputs
275
297
  }) {
276
- var _a;
277
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
298
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
278
299
  const toolWarnings = [];
279
300
  if (tools == null) {
280
- return { tools: void 0, tool_choice: void 0, toolWarnings };
281
- }
282
- const toolChoice = mode.toolChoice;
283
- if (useLegacyFunctionCalling) {
284
- const openaiFunctions = [];
285
- for (const tool of tools) {
286
- if (tool.type === "provider-defined") {
287
- toolWarnings.push({ type: "unsupported-tool", tool });
288
- } else {
289
- openaiFunctions.push({
290
- name: tool.name,
291
- description: tool.description,
292
- parameters: tool.parameters
293
- });
294
- }
295
- }
296
- if (toolChoice == null) {
297
- return {
298
- functions: openaiFunctions,
299
- function_call: void 0,
300
- toolWarnings
301
- };
302
- }
303
- const type2 = toolChoice.type;
304
- switch (type2) {
305
- case "auto":
306
- case "none":
307
- case void 0:
308
- return {
309
- functions: openaiFunctions,
310
- function_call: void 0,
311
- toolWarnings
312
- };
313
- case "required":
314
- throw new import_provider2.UnsupportedFunctionalityError({
315
- functionality: "useLegacyFunctionCalling and toolChoice: required"
316
- });
317
- default:
318
- return {
319
- functions: openaiFunctions,
320
- function_call: { name: toolChoice.toolName },
321
- toolWarnings
322
- };
323
- }
301
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
324
302
  }
325
303
  const openaiTools2 = [];
326
304
  for (const tool of tools) {
@@ -339,18 +317,18 @@ function prepareTools({
339
317
  }
340
318
  }
341
319
  if (toolChoice == null) {
342
- return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
320
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
343
321
  }
344
322
  const type = toolChoice.type;
345
323
  switch (type) {
346
324
  case "auto":
347
325
  case "none":
348
326
  case "required":
349
- return { tools: openaiTools2, tool_choice: type, toolWarnings };
327
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
350
328
  case "tool":
351
329
  return {
352
330
  tools: openaiTools2,
353
- tool_choice: {
331
+ toolChoice: {
354
332
  type: "function",
355
333
  function: {
356
334
  name: toolChoice.toolName
@@ -361,7 +339,7 @@ function prepareTools({
361
339
  default: {
362
340
  const _exhaustiveCheck = type;
363
341
  throw new import_provider2.UnsupportedFunctionalityError({
364
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
342
+ functionality: `tool choice type: ${_exhaustiveCheck}`
365
343
  });
366
344
  }
367
345
  }
@@ -375,26 +353,17 @@ var OpenAIChatLanguageModel = class {
375
353
  this.settings = settings;
376
354
  this.config = config;
377
355
  }
378
- get supportsStructuredOutputs() {
379
- var _a;
380
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
381
- }
382
- get defaultObjectGenerationMode() {
383
- if (isAudioModel(this.modelId)) {
384
- return "tool";
385
- }
386
- return this.supportsStructuredOutputs ? "json" : "tool";
387
- }
388
356
  get provider() {
389
357
  return this.config.provider;
390
358
  }
391
- get supportsImageUrls() {
392
- return !this.settings.downloadImages;
359
+ async getSupportedUrls() {
360
+ return {
361
+ "image/*": [/^https?:\/\/.*$/]
362
+ };
393
363
  }
394
364
  getArgs({
395
- mode,
396
365
  prompt,
397
- maxTokens,
366
+ maxOutputTokens,
398
367
  temperature,
399
368
  topP,
400
369
  topK,
@@ -403,39 +372,33 @@ var OpenAIChatLanguageModel = class {
403
372
  stopSequences,
404
373
  responseFormat,
405
374
  seed,
406
- providerMetadata
375
+ tools,
376
+ toolChoice,
377
+ providerOptions
407
378
  }) {
408
- var _a, _b, _c, _d, _e, _f, _g, _h;
409
- const type = mode.type;
379
+ var _a, _b, _c;
410
380
  const warnings = [];
381
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
382
+ provider: "openai",
383
+ providerOptions,
384
+ schema: openaiProviderOptions
385
+ })) != null ? _a : {};
411
386
  if (topK != null) {
412
387
  warnings.push({
413
388
  type: "unsupported-setting",
414
389
  setting: "topK"
415
390
  });
416
391
  }
417
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
392
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
418
393
  warnings.push({
419
394
  type: "unsupported-setting",
420
395
  setting: "responseFormat",
421
396
  details: "JSON response format schema is only supported with structuredOutputs"
422
397
  });
423
398
  }
424
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
425
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
426
- throw new import_provider3.UnsupportedFunctionalityError({
427
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
428
- });
429
- }
430
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
431
- throw new import_provider3.UnsupportedFunctionalityError({
432
- functionality: "structuredOutputs with useLegacyFunctionCalling"
433
- });
434
- }
435
399
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
436
400
  {
437
401
  prompt,
438
- useLegacyFunctionCalling,
439
402
  systemMessageMode: getSystemMessageMode(this.modelId)
440
403
  }
441
404
  );
@@ -444,35 +407,36 @@ var OpenAIChatLanguageModel = class {
444
407
  // model id:
445
408
  model: this.modelId,
446
409
  // model specific settings:
447
- logit_bias: this.settings.logitBias,
448
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
449
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
450
- user: this.settings.user,
451
- parallel_tool_calls: this.settings.parallelToolCalls,
410
+ logit_bias: openaiOptions.logitBias,
411
+ user: openaiOptions.user,
412
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
452
413
  // standardized settings:
453
- max_tokens: maxTokens,
414
+ max_tokens: maxOutputTokens,
454
415
  temperature,
455
416
  top_p: topP,
456
417
  frequency_penalty: frequencyPenalty,
457
418
  presence_penalty: presencePenalty,
458
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
459
- type: "json_schema",
460
- json_schema: {
461
- schema: responseFormat.schema,
462
- strict: true,
463
- name: (_a = responseFormat.name) != null ? _a : "response",
464
- description: responseFormat.description
465
- }
466
- } : { type: "json_object" } : void 0,
419
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
420
+ // TODO convert into provider option
421
+ this.settings.structuredOutputs && responseFormat.schema != null ? {
422
+ type: "json_schema",
423
+ json_schema: {
424
+ schema: responseFormat.schema,
425
+ strict: true,
426
+ name: (_b = responseFormat.name) != null ? _b : "response",
427
+ description: responseFormat.description
428
+ }
429
+ } : { type: "json_object" }
430
+ ) : void 0,
467
431
  stop: stopSequences,
468
432
  seed,
469
433
  // openai specific settings:
470
- // TODO remove in next major version; we auto-map maxTokens now
471
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
472
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
473
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
474
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
475
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
434
+ // TODO remove in next major version; we auto-map maxOutputTokens now
435
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
436
+ store: openaiOptions.store,
437
+ metadata: openaiOptions.metadata,
438
+ prediction: openaiOptions.prediction,
439
+ reasoning_effort: openaiOptions.reasoningEffort,
476
440
  // messages:
477
441
  messages
478
442
  };
@@ -516,102 +480,39 @@ var OpenAIChatLanguageModel = class {
516
480
  message: "logitBias is not supported for reasoning models"
517
481
  });
518
482
  }
519
- if (baseArgs.logprobs != null) {
520
- baseArgs.logprobs = void 0;
521
- warnings.push({
522
- type: "other",
523
- message: "logprobs is not supported for reasoning models"
524
- });
525
- }
526
- if (baseArgs.top_logprobs != null) {
527
- baseArgs.top_logprobs = void 0;
528
- warnings.push({
529
- type: "other",
530
- message: "topLogprobs is not supported for reasoning models"
531
- });
532
- }
533
483
  if (baseArgs.max_tokens != null) {
534
484
  if (baseArgs.max_completion_tokens == null) {
535
485
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
536
486
  }
537
487
  baseArgs.max_tokens = void 0;
538
488
  }
539
- }
540
- switch (type) {
541
- case "regular": {
542
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
543
- mode,
544
- useLegacyFunctionCalling,
545
- structuredOutputs: this.supportsStructuredOutputs
489
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
490
+ if (baseArgs.temperature != null) {
491
+ baseArgs.temperature = void 0;
492
+ warnings.push({
493
+ type: "unsupported-setting",
494
+ setting: "temperature",
495
+ details: "temperature is not supported for the search preview models and has been removed."
546
496
  });
547
- return {
548
- args: {
549
- ...baseArgs,
550
- tools,
551
- tool_choice,
552
- functions,
553
- function_call
554
- },
555
- warnings: [...warnings, ...toolWarnings]
556
- };
557
- }
558
- case "object-json": {
559
- return {
560
- args: {
561
- ...baseArgs,
562
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
563
- type: "json_schema",
564
- json_schema: {
565
- schema: mode.schema,
566
- strict: true,
567
- name: (_h = mode.name) != null ? _h : "response",
568
- description: mode.description
569
- }
570
- } : { type: "json_object" }
571
- },
572
- warnings
573
- };
574
- }
575
- case "object-tool": {
576
- return {
577
- args: useLegacyFunctionCalling ? {
578
- ...baseArgs,
579
- function_call: {
580
- name: mode.tool.name
581
- },
582
- functions: [
583
- {
584
- name: mode.tool.name,
585
- description: mode.tool.description,
586
- parameters: mode.tool.parameters
587
- }
588
- ]
589
- } : {
590
- ...baseArgs,
591
- tool_choice: {
592
- type: "function",
593
- function: { name: mode.tool.name }
594
- },
595
- tools: [
596
- {
597
- type: "function",
598
- function: {
599
- name: mode.tool.name,
600
- description: mode.tool.description,
601
- parameters: mode.tool.parameters,
602
- strict: this.supportsStructuredOutputs ? true : void 0
603
- }
604
- }
605
- ]
606
- },
607
- warnings
608
- };
609
- }
610
- default: {
611
- const _exhaustiveCheck = type;
612
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
613
497
  }
614
498
  }
499
+ const {
500
+ tools: openaiTools2,
501
+ toolChoice: openaiToolChoice,
502
+ toolWarnings
503
+ } = prepareTools({
504
+ tools,
505
+ toolChoice,
506
+ structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
507
+ });
508
+ return {
509
+ args: {
510
+ ...baseArgs,
511
+ tools: openaiTools2,
512
+ tool_choice: openaiToolChoice
513
+ },
514
+ warnings: [...warnings, ...toolWarnings]
515
+ };
615
516
  }
616
517
  async doGenerate(options) {
617
518
  var _a, _b, _c, _d, _e, _f, _g, _h;
@@ -634,10 +535,23 @@ var OpenAIChatLanguageModel = class {
634
535
  abortSignal: options.abortSignal,
635
536
  fetch: this.config.fetch
636
537
  });
637
- const { messages: rawPrompt, ...rawSettings } = body;
638
538
  const choice = response.choices[0];
639
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
640
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
539
+ const content = [];
540
+ const text = choice.message.content;
541
+ if (text != null && text.length > 0) {
542
+ content.push({ type: "text", text });
543
+ }
544
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
545
+ content.push({
546
+ type: "tool-call",
547
+ toolCallType: "function",
548
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
549
+ toolName: toolCall.function.name,
550
+ args: toolCall.function.arguments
551
+ });
552
+ }
553
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
554
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
641
555
  const providerMetadata = { openai: {} };
642
556
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
643
557
  providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
@@ -652,81 +566,23 @@ var OpenAIChatLanguageModel = class {
652
566
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
653
567
  }
654
568
  return {
655
- text: (_c = choice.message.content) != null ? _c : void 0,
656
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
657
- {
658
- toolCallType: "function",
659
- toolCallId: (0, import_provider_utils3.generateId)(),
660
- toolName: choice.message.function_call.name,
661
- args: choice.message.function_call.arguments
662
- }
663
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
664
- var _a2;
665
- return {
666
- toolCallType: "function",
667
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
668
- toolName: toolCall.function.name,
669
- args: toolCall.function.arguments
670
- };
671
- }),
569
+ content,
672
570
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
673
571
  usage: {
674
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
675
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
572
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
573
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
574
+ },
575
+ request: { body },
576
+ response: {
577
+ ...getResponseMetadata(response),
578
+ headers: responseHeaders,
579
+ body: rawResponse
676
580
  },
677
- rawCall: { rawPrompt, rawSettings },
678
- rawResponse: { headers: responseHeaders, body: rawResponse },
679
- request: { body: JSON.stringify(body) },
680
- response: getResponseMetadata(response),
681
581
  warnings,
682
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
683
582
  providerMetadata
684
583
  };
685
584
  }
686
585
  async doStream(options) {
687
- if (this.settings.simulateStreaming) {
688
- const result = await this.doGenerate(options);
689
- const simulatedStream = new ReadableStream({
690
- start(controller) {
691
- controller.enqueue({ type: "response-metadata", ...result.response });
692
- if (result.text) {
693
- controller.enqueue({
694
- type: "text-delta",
695
- textDelta: result.text
696
- });
697
- }
698
- if (result.toolCalls) {
699
- for (const toolCall of result.toolCalls) {
700
- controller.enqueue({
701
- type: "tool-call-delta",
702
- toolCallType: "function",
703
- toolCallId: toolCall.toolCallId,
704
- toolName: toolCall.toolName,
705
- argsTextDelta: toolCall.args
706
- });
707
- controller.enqueue({
708
- type: "tool-call",
709
- ...toolCall
710
- });
711
- }
712
- }
713
- controller.enqueue({
714
- type: "finish",
715
- finishReason: result.finishReason,
716
- usage: result.usage,
717
- logprobs: result.logprobs,
718
- providerMetadata: result.providerMetadata
719
- });
720
- controller.close();
721
- }
722
- });
723
- return {
724
- stream: simulatedStream,
725
- rawCall: result.rawCall,
726
- rawResponse: result.rawResponse,
727
- warnings: result.warnings
728
- };
729
- }
730
586
  const { args, warnings } = this.getArgs(options);
731
587
  const body = {
732
588
  ...args,
@@ -751,17 +607,18 @@ var OpenAIChatLanguageModel = class {
751
607
  const { messages: rawPrompt, ...rawSettings } = args;
752
608
  const toolCalls = [];
753
609
  let finishReason = "unknown";
754
- let usage = {
755
- promptTokens: void 0,
756
- completionTokens: void 0
610
+ const usage = {
611
+ inputTokens: void 0,
612
+ outputTokens: void 0
757
613
  };
758
- let logprobs;
759
614
  let isFirstChunk = true;
760
- const { useLegacyFunctionCalling } = this.settings;
761
615
  const providerMetadata = { openai: {} };
762
616
  return {
763
617
  stream: response.pipeThrough(
764
618
  new TransformStream({
619
+ start(controller) {
620
+ controller.enqueue({ type: "stream-start", warnings });
621
+ },
765
622
  transform(chunk, controller) {
766
623
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
767
624
  if (!chunk.success) {
@@ -789,10 +646,8 @@ var OpenAIChatLanguageModel = class {
789
646
  prompt_tokens_details,
790
647
  completion_tokens_details
791
648
  } = value.usage;
792
- usage = {
793
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
794
- completionTokens: completion_tokens != null ? completion_tokens : void 0
795
- };
649
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
650
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
796
651
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
797
652
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
798
653
  }
@@ -816,27 +671,12 @@ var OpenAIChatLanguageModel = class {
816
671
  const delta = choice.delta;
817
672
  if (delta.content != null) {
818
673
  controller.enqueue({
819
- type: "text-delta",
820
- textDelta: delta.content
674
+ type: "text",
675
+ text: delta.content
821
676
  });
822
677
  }
823
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
824
- choice == null ? void 0 : choice.logprobs
825
- );
826
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
827
- if (logprobs === void 0) logprobs = [];
828
- logprobs.push(...mappedLogprobs);
829
- }
830
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
831
- {
832
- type: "function",
833
- id: (0, import_provider_utils3.generateId)(),
834
- function: delta.function_call,
835
- index: 0
836
- }
837
- ] : delta.tool_calls;
838
- if (mappedToolCalls != null) {
839
- for (const toolCallDelta of mappedToolCalls) {
678
+ if (delta.tool_calls != null) {
679
+ for (const toolCallDelta of delta.tool_calls) {
840
680
  const index = toolCallDelta.index;
841
681
  if (toolCalls[index] == null) {
842
682
  if (toolCallDelta.type !== "function") {
@@ -918,125 +758,82 @@ var OpenAIChatLanguageModel = class {
918
758
  }
919
759
  },
920
760
  flush(controller) {
921
- var _a, _b;
922
761
  controller.enqueue({
923
762
  type: "finish",
924
763
  finishReason,
925
- logprobs,
926
- usage: {
927
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
928
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
929
- },
764
+ usage,
930
765
  ...providerMetadata != null ? { providerMetadata } : {}
931
766
  });
932
767
  }
933
768
  })
934
769
  ),
935
- rawCall: { rawPrompt, rawSettings },
936
- rawResponse: { headers: responseHeaders },
937
- request: { body: JSON.stringify(body) },
938
- warnings
770
+ request: { body },
771
+ response: { headers: responseHeaders }
939
772
  };
940
773
  }
941
774
  };
942
- var openaiTokenUsageSchema = import_zod2.z.object({
943
- prompt_tokens: import_zod2.z.number().nullish(),
944
- completion_tokens: import_zod2.z.number().nullish(),
945
- prompt_tokens_details: import_zod2.z.object({
946
- cached_tokens: import_zod2.z.number().nullish()
775
+ var openaiTokenUsageSchema = import_zod3.z.object({
776
+ prompt_tokens: import_zod3.z.number().nullish(),
777
+ completion_tokens: import_zod3.z.number().nullish(),
778
+ prompt_tokens_details: import_zod3.z.object({
779
+ cached_tokens: import_zod3.z.number().nullish()
947
780
  }).nullish(),
948
- completion_tokens_details: import_zod2.z.object({
949
- reasoning_tokens: import_zod2.z.number().nullish(),
950
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
951
- rejected_prediction_tokens: import_zod2.z.number().nullish()
781
+ completion_tokens_details: import_zod3.z.object({
782
+ reasoning_tokens: import_zod3.z.number().nullish(),
783
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
784
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
952
785
  }).nullish()
953
786
  }).nullish();
954
- var openaiChatResponseSchema = import_zod2.z.object({
955
- id: import_zod2.z.string().nullish(),
956
- created: import_zod2.z.number().nullish(),
957
- model: import_zod2.z.string().nullish(),
958
- choices: import_zod2.z.array(
959
- import_zod2.z.object({
960
- message: import_zod2.z.object({
961
- role: import_zod2.z.literal("assistant").nullish(),
962
- content: import_zod2.z.string().nullish(),
963
- function_call: import_zod2.z.object({
964
- arguments: import_zod2.z.string(),
965
- name: import_zod2.z.string()
966
- }).nullish(),
967
- tool_calls: import_zod2.z.array(
968
- import_zod2.z.object({
969
- id: import_zod2.z.string().nullish(),
970
- type: import_zod2.z.literal("function"),
971
- function: import_zod2.z.object({
972
- name: import_zod2.z.string(),
973
- arguments: import_zod2.z.string()
787
+ var openaiChatResponseSchema = import_zod3.z.object({
788
+ id: import_zod3.z.string().nullish(),
789
+ created: import_zod3.z.number().nullish(),
790
+ model: import_zod3.z.string().nullish(),
791
+ choices: import_zod3.z.array(
792
+ import_zod3.z.object({
793
+ message: import_zod3.z.object({
794
+ role: import_zod3.z.literal("assistant").nullish(),
795
+ content: import_zod3.z.string().nullish(),
796
+ tool_calls: import_zod3.z.array(
797
+ import_zod3.z.object({
798
+ id: import_zod3.z.string().nullish(),
799
+ type: import_zod3.z.literal("function"),
800
+ function: import_zod3.z.object({
801
+ name: import_zod3.z.string(),
802
+ arguments: import_zod3.z.string()
974
803
  })
975
804
  })
976
805
  ).nullish()
977
806
  }),
978
- index: import_zod2.z.number(),
979
- logprobs: import_zod2.z.object({
980
- content: import_zod2.z.array(
981
- import_zod2.z.object({
982
- token: import_zod2.z.string(),
983
- logprob: import_zod2.z.number(),
984
- top_logprobs: import_zod2.z.array(
985
- import_zod2.z.object({
986
- token: import_zod2.z.string(),
987
- logprob: import_zod2.z.number()
988
- })
989
- )
990
- })
991
- ).nullable()
992
- }).nullish(),
993
- finish_reason: import_zod2.z.string().nullish()
807
+ index: import_zod3.z.number(),
808
+ finish_reason: import_zod3.z.string().nullish()
994
809
  })
995
810
  ),
996
811
  usage: openaiTokenUsageSchema
997
812
  });
998
- var openaiChatChunkSchema = import_zod2.z.union([
999
- import_zod2.z.object({
1000
- id: import_zod2.z.string().nullish(),
1001
- created: import_zod2.z.number().nullish(),
1002
- model: import_zod2.z.string().nullish(),
1003
- choices: import_zod2.z.array(
1004
- import_zod2.z.object({
1005
- delta: import_zod2.z.object({
1006
- role: import_zod2.z.enum(["assistant"]).nullish(),
1007
- content: import_zod2.z.string().nullish(),
1008
- function_call: import_zod2.z.object({
1009
- name: import_zod2.z.string().optional(),
1010
- arguments: import_zod2.z.string().optional()
1011
- }).nullish(),
1012
- tool_calls: import_zod2.z.array(
1013
- import_zod2.z.object({
1014
- index: import_zod2.z.number(),
1015
- id: import_zod2.z.string().nullish(),
1016
- type: import_zod2.z.literal("function").optional(),
1017
- function: import_zod2.z.object({
1018
- name: import_zod2.z.string().nullish(),
1019
- arguments: import_zod2.z.string().nullish()
813
+ var openaiChatChunkSchema = import_zod3.z.union([
814
+ import_zod3.z.object({
815
+ id: import_zod3.z.string().nullish(),
816
+ created: import_zod3.z.number().nullish(),
817
+ model: import_zod3.z.string().nullish(),
818
+ choices: import_zod3.z.array(
819
+ import_zod3.z.object({
820
+ delta: import_zod3.z.object({
821
+ role: import_zod3.z.enum(["assistant"]).nullish(),
822
+ content: import_zod3.z.string().nullish(),
823
+ tool_calls: import_zod3.z.array(
824
+ import_zod3.z.object({
825
+ index: import_zod3.z.number(),
826
+ id: import_zod3.z.string().nullish(),
827
+ type: import_zod3.z.literal("function").optional(),
828
+ function: import_zod3.z.object({
829
+ name: import_zod3.z.string().nullish(),
830
+ arguments: import_zod3.z.string().nullish()
1020
831
  })
1021
832
  })
1022
833
  ).nullish()
1023
834
  }).nullish(),
1024
- logprobs: import_zod2.z.object({
1025
- content: import_zod2.z.array(
1026
- import_zod2.z.object({
1027
- token: import_zod2.z.string(),
1028
- logprob: import_zod2.z.number(),
1029
- top_logprobs: import_zod2.z.array(
1030
- import_zod2.z.object({
1031
- token: import_zod2.z.string(),
1032
- logprob: import_zod2.z.number()
1033
- })
1034
- )
1035
- })
1036
- ).nullable()
1037
- }).nullish(),
1038
- finish_reason: import_zod2.z.string().nullable().optional(),
1039
- index: import_zod2.z.number()
835
+ finish_reason: import_zod3.z.string().nullable().optional(),
836
+ index: import_zod3.z.number()
1040
837
  })
1041
838
  ),
1042
839
  usage: openaiTokenUsageSchema
@@ -1044,10 +841,7 @@ var openaiChatChunkSchema = import_zod2.z.union([
1044
841
  openaiErrorDataSchema
1045
842
  ]);
1046
843
  function isReasoningModel(modelId) {
1047
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
1048
- }
1049
- function isAudioModel(modelId) {
1050
- return modelId.startsWith("gpt-4o-audio-preview");
844
+ return modelId.startsWith("o");
1051
845
  }
1052
846
  function getSystemMessageMode(modelId) {
1053
847
  var _a, _b;
@@ -1078,9 +872,8 @@ var reasoningModels = {
1078
872
  };
1079
873
 
1080
874
  // src/openai-completion-language-model.ts
1081
- var import_provider5 = require("@ai-sdk/provider");
1082
875
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1083
- var import_zod3 = require("zod");
876
+ var import_zod4 = require("zod");
1084
877
 
1085
878
  // src/convert-to-openai-completion-prompt.ts
1086
879
  var import_provider4 = require("@ai-sdk/provider");
@@ -1114,13 +907,8 @@ function convertToOpenAICompletionPrompt({
1114
907
  case "text": {
1115
908
  return part.text;
1116
909
  }
1117
- case "image": {
1118
- throw new import_provider4.UnsupportedFunctionalityError({
1119
- functionality: "images"
1120
- });
1121
- }
1122
910
  }
1123
- }).join("");
911
+ }).filter(Boolean).join("");
1124
912
  text += `${user}:
1125
913
  ${userMessage}
1126
914
 
@@ -1166,25 +954,10 @@ ${user}:`]
1166
954
  };
1167
955
  }
1168
956
 
1169
- // src/map-openai-completion-logprobs.ts
1170
- function mapOpenAICompletionLogProbs(logprobs) {
1171
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1172
- token,
1173
- logprob: logprobs.token_logprobs[index],
1174
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1175
- ([token2, logprob]) => ({
1176
- token: token2,
1177
- logprob
1178
- })
1179
- ) : []
1180
- }));
1181
- }
1182
-
1183
957
  // src/openai-completion-language-model.ts
1184
958
  var OpenAICompletionLanguageModel = class {
1185
959
  constructor(modelId, settings, config) {
1186
960
  this.specificationVersion = "v2";
1187
- this.defaultObjectGenerationMode = void 0;
1188
961
  this.modelId = modelId;
1189
962
  this.settings = settings;
1190
963
  this.config = config;
@@ -1192,11 +965,15 @@ var OpenAICompletionLanguageModel = class {
1192
965
  get provider() {
1193
966
  return this.config.provider;
1194
967
  }
968
+ async getSupportedUrls() {
969
+ return {
970
+ // no supported urls for completion models
971
+ };
972
+ }
1195
973
  getArgs({
1196
- mode,
1197
974
  inputFormat,
1198
975
  prompt,
1199
- maxTokens,
976
+ maxOutputTokens,
1200
977
  temperature,
1201
978
  topP,
1202
979
  topK,
@@ -1204,16 +981,19 @@ var OpenAICompletionLanguageModel = class {
1204
981
  presencePenalty,
1205
982
  stopSequences: userStopSequences,
1206
983
  responseFormat,
984
+ tools,
985
+ toolChoice,
1207
986
  seed
1208
987
  }) {
1209
- var _a;
1210
- const type = mode.type;
1211
988
  const warnings = [];
1212
989
  if (topK != null) {
1213
- warnings.push({
1214
- type: "unsupported-setting",
1215
- setting: "topK"
1216
- });
990
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
991
+ }
992
+ if (tools == null ? void 0 : tools.length) {
993
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
994
+ }
995
+ if (toolChoice != null) {
996
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1217
997
  }
1218
998
  if (responseFormat != null && responseFormat.type !== "text") {
1219
999
  warnings.push({
@@ -1224,56 +1004,29 @@ var OpenAICompletionLanguageModel = class {
1224
1004
  }
1225
1005
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1226
1006
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1227
- const baseArgs = {
1228
- // model id:
1229
- model: this.modelId,
1230
- // model specific settings:
1231
- echo: this.settings.echo,
1232
- logit_bias: this.settings.logitBias,
1233
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1234
- suffix: this.settings.suffix,
1235
- user: this.settings.user,
1236
- // standardized settings:
1237
- max_tokens: maxTokens,
1238
- temperature,
1239
- top_p: topP,
1240
- frequency_penalty: frequencyPenalty,
1241
- presence_penalty: presencePenalty,
1242
- seed,
1243
- // prompt:
1244
- prompt: completionPrompt,
1245
- // stop sequences:
1246
- stop: stop.length > 0 ? stop : void 0
1007
+ return {
1008
+ args: {
1009
+ // model id:
1010
+ model: this.modelId,
1011
+ // model specific settings:
1012
+ echo: this.settings.echo,
1013
+ logit_bias: this.settings.logitBias,
1014
+ suffix: this.settings.suffix,
1015
+ user: this.settings.user,
1016
+ // standardized settings:
1017
+ max_tokens: maxOutputTokens,
1018
+ temperature,
1019
+ top_p: topP,
1020
+ frequency_penalty: frequencyPenalty,
1021
+ presence_penalty: presencePenalty,
1022
+ seed,
1023
+ // prompt:
1024
+ prompt: completionPrompt,
1025
+ // stop sequences:
1026
+ stop: stop.length > 0 ? stop : void 0
1027
+ },
1028
+ warnings
1247
1029
  };
1248
- switch (type) {
1249
- case "regular": {
1250
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1251
- throw new import_provider5.UnsupportedFunctionalityError({
1252
- functionality: "tools"
1253
- });
1254
- }
1255
- if (mode.toolChoice) {
1256
- throw new import_provider5.UnsupportedFunctionalityError({
1257
- functionality: "toolChoice"
1258
- });
1259
- }
1260
- return { args: baseArgs, warnings };
1261
- }
1262
- case "object-json": {
1263
- throw new import_provider5.UnsupportedFunctionalityError({
1264
- functionality: "object-json mode"
1265
- });
1266
- }
1267
- case "object-tool": {
1268
- throw new import_provider5.UnsupportedFunctionalityError({
1269
- functionality: "object-tool mode"
1270
- });
1271
- }
1272
- default: {
1273
- const _exhaustiveCheck = type;
1274
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1275
- }
1276
- }
1277
1030
  }
1278
1031
  async doGenerate(options) {
1279
1032
  const { args, warnings } = this.getArgs(options);
@@ -1295,21 +1048,21 @@ var OpenAICompletionLanguageModel = class {
1295
1048
  abortSignal: options.abortSignal,
1296
1049
  fetch: this.config.fetch
1297
1050
  });
1298
- const { prompt: rawPrompt, ...rawSettings } = args;
1299
1051
  const choice = response.choices[0];
1300
1052
  return {
1301
- text: choice.text,
1053
+ content: [{ type: "text", text: choice.text }],
1302
1054
  usage: {
1303
- promptTokens: response.usage.prompt_tokens,
1304
- completionTokens: response.usage.completion_tokens
1055
+ inputTokens: response.usage.prompt_tokens,
1056
+ outputTokens: response.usage.completion_tokens
1305
1057
  },
1306
1058
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1307
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1308
- rawCall: { rawPrompt, rawSettings },
1309
- rawResponse: { headers: responseHeaders, body: rawResponse },
1310
- response: getResponseMetadata(response),
1311
- warnings,
1312
- request: { body: JSON.stringify(args) }
1059
+ request: { body: args },
1060
+ response: {
1061
+ ...getResponseMetadata(response),
1062
+ headers: responseHeaders,
1063
+ body: rawResponse
1064
+ },
1065
+ warnings
1313
1066
  };
1314
1067
  }
1315
1068
  async doStream(options) {
@@ -1334,17 +1087,18 @@ var OpenAICompletionLanguageModel = class {
1334
1087
  abortSignal: options.abortSignal,
1335
1088
  fetch: this.config.fetch
1336
1089
  });
1337
- const { prompt: rawPrompt, ...rawSettings } = args;
1338
1090
  let finishReason = "unknown";
1339
- let usage = {
1340
- promptTokens: Number.NaN,
1341
- completionTokens: Number.NaN
1091
+ const usage = {
1092
+ inputTokens: void 0,
1093
+ outputTokens: void 0
1342
1094
  };
1343
- let logprobs;
1344
1095
  let isFirstChunk = true;
1345
1096
  return {
1346
1097
  stream: response.pipeThrough(
1347
1098
  new TransformStream({
1099
+ start(controller) {
1100
+ controller.enqueue({ type: "stream-start", warnings });
1101
+ },
1348
1102
  transform(chunk, controller) {
1349
1103
  if (!chunk.success) {
1350
1104
  finishReason = "error";
@@ -1365,10 +1119,8 @@ var OpenAICompletionLanguageModel = class {
1365
1119
  });
1366
1120
  }
1367
1121
  if (value.usage != null) {
1368
- usage = {
1369
- promptTokens: value.usage.prompt_tokens,
1370
- completionTokens: value.usage.completion_tokens
1371
- };
1122
+ usage.inputTokens = value.usage.prompt_tokens;
1123
+ usage.outputTokens = value.usage.completion_tokens;
1372
1124
  }
1373
1125
  const choice = value.choices[0];
1374
1126
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1376,87 +1128,84 @@ var OpenAICompletionLanguageModel = class {
1376
1128
  }
1377
1129
  if ((choice == null ? void 0 : choice.text) != null) {
1378
1130
  controller.enqueue({
1379
- type: "text-delta",
1380
- textDelta: choice.text
1131
+ type: "text",
1132
+ text: choice.text
1381
1133
  });
1382
1134
  }
1383
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1384
- choice == null ? void 0 : choice.logprobs
1385
- );
1386
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1387
- if (logprobs === void 0) logprobs = [];
1388
- logprobs.push(...mappedLogprobs);
1389
- }
1390
1135
  },
1391
1136
  flush(controller) {
1392
1137
  controller.enqueue({
1393
1138
  type: "finish",
1394
1139
  finishReason,
1395
- logprobs,
1396
1140
  usage
1397
1141
  });
1398
1142
  }
1399
1143
  })
1400
1144
  ),
1401
- rawCall: { rawPrompt, rawSettings },
1402
- rawResponse: { headers: responseHeaders },
1403
- warnings,
1404
- request: { body: JSON.stringify(body) }
1145
+ request: { body },
1146
+ response: { headers: responseHeaders }
1405
1147
  };
1406
1148
  }
1407
1149
  };
1408
- var openaiCompletionResponseSchema = import_zod3.z.object({
1409
- id: import_zod3.z.string().nullish(),
1410
- created: import_zod3.z.number().nullish(),
1411
- model: import_zod3.z.string().nullish(),
1412
- choices: import_zod3.z.array(
1413
- import_zod3.z.object({
1414
- text: import_zod3.z.string(),
1415
- finish_reason: import_zod3.z.string(),
1416
- logprobs: import_zod3.z.object({
1417
- tokens: import_zod3.z.array(import_zod3.z.string()),
1418
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1419
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1420
- }).nullish()
1150
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1151
+ id: import_zod4.z.string().nullish(),
1152
+ created: import_zod4.z.number().nullish(),
1153
+ model: import_zod4.z.string().nullish(),
1154
+ choices: import_zod4.z.array(
1155
+ import_zod4.z.object({
1156
+ text: import_zod4.z.string(),
1157
+ finish_reason: import_zod4.z.string()
1421
1158
  })
1422
1159
  ),
1423
- usage: import_zod3.z.object({
1424
- prompt_tokens: import_zod3.z.number(),
1425
- completion_tokens: import_zod3.z.number()
1160
+ usage: import_zod4.z.object({
1161
+ prompt_tokens: import_zod4.z.number(),
1162
+ completion_tokens: import_zod4.z.number()
1426
1163
  })
1427
1164
  });
1428
- var openaiCompletionChunkSchema = import_zod3.z.union([
1429
- import_zod3.z.object({
1430
- id: import_zod3.z.string().nullish(),
1431
- created: import_zod3.z.number().nullish(),
1432
- model: import_zod3.z.string().nullish(),
1433
- choices: import_zod3.z.array(
1434
- import_zod3.z.object({
1435
- text: import_zod3.z.string(),
1436
- finish_reason: import_zod3.z.string().nullish(),
1437
- index: import_zod3.z.number(),
1438
- logprobs: import_zod3.z.object({
1439
- tokens: import_zod3.z.array(import_zod3.z.string()),
1440
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1441
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1442
- }).nullish()
1165
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1166
+ import_zod4.z.object({
1167
+ id: import_zod4.z.string().nullish(),
1168
+ created: import_zod4.z.number().nullish(),
1169
+ model: import_zod4.z.string().nullish(),
1170
+ choices: import_zod4.z.array(
1171
+ import_zod4.z.object({
1172
+ text: import_zod4.z.string(),
1173
+ finish_reason: import_zod4.z.string().nullish(),
1174
+ index: import_zod4.z.number()
1443
1175
  })
1444
1176
  ),
1445
- usage: import_zod3.z.object({
1446
- prompt_tokens: import_zod3.z.number(),
1447
- completion_tokens: import_zod3.z.number()
1177
+ usage: import_zod4.z.object({
1178
+ prompt_tokens: import_zod4.z.number(),
1179
+ completion_tokens: import_zod4.z.number()
1448
1180
  }).nullish()
1449
1181
  }),
1450
1182
  openaiErrorDataSchema
1451
1183
  ]);
1452
1184
 
1453
1185
  // src/openai-embedding-model.ts
1454
- var import_provider6 = require("@ai-sdk/provider");
1186
+ var import_provider5 = require("@ai-sdk/provider");
1455
1187
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1456
- var import_zod4 = require("zod");
1188
+ var import_zod6 = require("zod");
1189
+
1190
+ // src/openai-embedding-options.ts
1191
+ var import_zod5 = require("zod");
1192
+ var openaiEmbeddingProviderOptions = import_zod5.z.object({
1193
+ /**
1194
+ The number of dimensions the resulting output embeddings should have.
1195
+ Only supported in text-embedding-3 and later models.
1196
+ */
1197
+ dimensions: import_zod5.z.number().optional(),
1198
+ /**
1199
+ A unique identifier representing your end-user, which can help OpenAI to
1200
+ monitor and detect abuse. Learn more.
1201
+ */
1202
+ user: import_zod5.z.string().optional()
1203
+ });
1204
+
1205
+ // src/openai-embedding-model.ts
1457
1206
  var OpenAIEmbeddingModel = class {
1458
1207
  constructor(modelId, settings, config) {
1459
- this.specificationVersion = "v1";
1208
+ this.specificationVersion = "v2";
1460
1209
  this.modelId = modelId;
1461
1210
  this.settings = settings;
1462
1211
  this.config = config;
@@ -1475,17 +1224,28 @@ var OpenAIEmbeddingModel = class {
1475
1224
  async doEmbed({
1476
1225
  values,
1477
1226
  headers,
1478
- abortSignal
1227
+ abortSignal,
1228
+ providerOptions
1479
1229
  }) {
1230
+ var _a;
1480
1231
  if (values.length > this.maxEmbeddingsPerCall) {
1481
- throw new import_provider6.TooManyEmbeddingValuesForCallError({
1232
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1482
1233
  provider: this.provider,
1483
1234
  modelId: this.modelId,
1484
1235
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1485
1236
  values
1486
1237
  });
1487
1238
  }
1488
- const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1239
+ const openaiOptions = (_a = (0, import_provider_utils5.parseProviderOptions)({
1240
+ provider: "openai",
1241
+ providerOptions,
1242
+ schema: openaiEmbeddingProviderOptions
1243
+ })) != null ? _a : {};
1244
+ const {
1245
+ responseHeaders,
1246
+ value: response,
1247
+ rawValue
1248
+ } = await (0, import_provider_utils5.postJsonToApi)({
1489
1249
  url: this.config.url({
1490
1250
  path: "/embeddings",
1491
1251
  modelId: this.modelId
@@ -1495,8 +1255,8 @@ var OpenAIEmbeddingModel = class {
1495
1255
  model: this.modelId,
1496
1256
  input: values,
1497
1257
  encoding_format: "float",
1498
- dimensions: this.settings.dimensions,
1499
- user: this.settings.user
1258
+ dimensions: openaiOptions.dimensions,
1259
+ user: openaiOptions.user
1500
1260
  },
1501
1261
  failedResponseHandler: openaiFailedResponseHandler,
1502
1262
  successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
@@ -1508,18 +1268,18 @@ var OpenAIEmbeddingModel = class {
1508
1268
  return {
1509
1269
  embeddings: response.data.map((item) => item.embedding),
1510
1270
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1511
- rawResponse: { headers: responseHeaders }
1271
+ response: { headers: responseHeaders, body: rawValue }
1512
1272
  };
1513
1273
  }
1514
1274
  };
1515
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1516
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1517
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1275
+ var openaiTextEmbeddingResponseSchema = import_zod6.z.object({
1276
+ data: import_zod6.z.array(import_zod6.z.object({ embedding: import_zod6.z.array(import_zod6.z.number()) })),
1277
+ usage: import_zod6.z.object({ prompt_tokens: import_zod6.z.number() }).nullish()
1518
1278
  });
1519
1279
 
1520
1280
  // src/openai-image-model.ts
1521
1281
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1522
- var import_zod5 = require("zod");
1282
+ var import_zod7 = require("zod");
1523
1283
 
1524
1284
  // src/openai-image-settings.ts
1525
1285
  var modelMaxImagesPerCall = {
@@ -1597,13 +1357,13 @@ var OpenAIImageModel = class {
1597
1357
  };
1598
1358
  }
1599
1359
  };
1600
- var openaiImageResponseSchema = import_zod5.z.object({
1601
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1360
+ var openaiImageResponseSchema = import_zod7.z.object({
1361
+ data: import_zod7.z.array(import_zod7.z.object({ b64_json: import_zod7.z.string() }))
1602
1362
  });
1603
1363
 
1604
1364
  // src/openai-tools.ts
1605
- var import_zod6 = require("zod");
1606
- var WebSearchPreviewParameters = import_zod6.z.object({});
1365
+ var import_zod8 = require("zod");
1366
+ var WebSearchPreviewParameters = import_zod8.z.object({});
1607
1367
  function webSearchPreviewTool({
1608
1368
  searchContextSize,
1609
1369
  userLocation
@@ -1622,13 +1382,181 @@ var openaiTools = {
1622
1382
  webSearchPreview: webSearchPreviewTool
1623
1383
  };
1624
1384
 
1385
+ // src/openai-transcription-model.ts
1386
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1387
+ var import_zod9 = require("zod");
1388
+ var openAIProviderOptionsSchema = import_zod9.z.object({
1389
+ include: import_zod9.z.array(import_zod9.z.string()).nullish(),
1390
+ language: import_zod9.z.string().nullish(),
1391
+ prompt: import_zod9.z.string().nullish(),
1392
+ temperature: import_zod9.z.number().min(0).max(1).nullish().default(0),
1393
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).nullish().default(["segment"])
1394
+ });
1395
+ var languageMap = {
1396
+ afrikaans: "af",
1397
+ arabic: "ar",
1398
+ armenian: "hy",
1399
+ azerbaijani: "az",
1400
+ belarusian: "be",
1401
+ bosnian: "bs",
1402
+ bulgarian: "bg",
1403
+ catalan: "ca",
1404
+ chinese: "zh",
1405
+ croatian: "hr",
1406
+ czech: "cs",
1407
+ danish: "da",
1408
+ dutch: "nl",
1409
+ english: "en",
1410
+ estonian: "et",
1411
+ finnish: "fi",
1412
+ french: "fr",
1413
+ galician: "gl",
1414
+ german: "de",
1415
+ greek: "el",
1416
+ hebrew: "he",
1417
+ hindi: "hi",
1418
+ hungarian: "hu",
1419
+ icelandic: "is",
1420
+ indonesian: "id",
1421
+ italian: "it",
1422
+ japanese: "ja",
1423
+ kannada: "kn",
1424
+ kazakh: "kk",
1425
+ korean: "ko",
1426
+ latvian: "lv",
1427
+ lithuanian: "lt",
1428
+ macedonian: "mk",
1429
+ malay: "ms",
1430
+ marathi: "mr",
1431
+ maori: "mi",
1432
+ nepali: "ne",
1433
+ norwegian: "no",
1434
+ persian: "fa",
1435
+ polish: "pl",
1436
+ portuguese: "pt",
1437
+ romanian: "ro",
1438
+ russian: "ru",
1439
+ serbian: "sr",
1440
+ slovak: "sk",
1441
+ slovenian: "sl",
1442
+ spanish: "es",
1443
+ swahili: "sw",
1444
+ swedish: "sv",
1445
+ tagalog: "tl",
1446
+ tamil: "ta",
1447
+ thai: "th",
1448
+ turkish: "tr",
1449
+ ukrainian: "uk",
1450
+ urdu: "ur",
1451
+ vietnamese: "vi",
1452
+ welsh: "cy"
1453
+ };
1454
+ var OpenAITranscriptionModel = class {
1455
+ constructor(modelId, config) {
1456
+ this.modelId = modelId;
1457
+ this.config = config;
1458
+ this.specificationVersion = "v1";
1459
+ }
1460
+ get provider() {
1461
+ return this.config.provider;
1462
+ }
1463
+ getArgs({
1464
+ audio,
1465
+ mediaType,
1466
+ providerOptions
1467
+ }) {
1468
+ var _a, _b, _c, _d, _e;
1469
+ const warnings = [];
1470
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1471
+ provider: "openai",
1472
+ providerOptions,
1473
+ schema: openAIProviderOptionsSchema
1474
+ });
1475
+ const formData = new FormData();
1476
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1477
+ formData.append("model", this.modelId);
1478
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1479
+ if (openAIOptions) {
1480
+ const transcriptionModelOptions = {
1481
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1482
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1483
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1484
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1485
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1486
+ };
1487
+ for (const key in transcriptionModelOptions) {
1488
+ const value = transcriptionModelOptions[key];
1489
+ if (value !== void 0) {
1490
+ formData.append(key, String(value));
1491
+ }
1492
+ }
1493
+ }
1494
+ return {
1495
+ formData,
1496
+ warnings
1497
+ };
1498
+ }
1499
+ async doGenerate(options) {
1500
+ var _a, _b, _c, _d, _e, _f;
1501
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1502
+ const { formData, warnings } = this.getArgs(options);
1503
+ const {
1504
+ value: response,
1505
+ responseHeaders,
1506
+ rawValue: rawResponse
1507
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1508
+ url: this.config.url({
1509
+ path: "/audio/transcriptions",
1510
+ modelId: this.modelId
1511
+ }),
1512
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1513
+ formData,
1514
+ failedResponseHandler: openaiFailedResponseHandler,
1515
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1516
+ openaiTranscriptionResponseSchema
1517
+ ),
1518
+ abortSignal: options.abortSignal,
1519
+ fetch: this.config.fetch
1520
+ });
1521
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1522
+ return {
1523
+ text: response.text,
1524
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1525
+ text: word.word,
1526
+ startSecond: word.start,
1527
+ endSecond: word.end
1528
+ }))) != null ? _e : [],
1529
+ language,
1530
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1531
+ warnings,
1532
+ response: {
1533
+ timestamp: currentDate,
1534
+ modelId: this.modelId,
1535
+ headers: responseHeaders,
1536
+ body: rawResponse
1537
+ }
1538
+ };
1539
+ }
1540
+ };
1541
+ var openaiTranscriptionResponseSchema = import_zod9.z.object({
1542
+ text: import_zod9.z.string(),
1543
+ language: import_zod9.z.string().nullish(),
1544
+ duration: import_zod9.z.number().nullish(),
1545
+ words: import_zod9.z.array(
1546
+ import_zod9.z.object({
1547
+ word: import_zod9.z.string(),
1548
+ start: import_zod9.z.number(),
1549
+ end: import_zod9.z.number()
1550
+ })
1551
+ ).nullish()
1552
+ });
1553
+
1625
1554
  // src/responses/openai-responses-language-model.ts
1626
1555
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1627
- var import_zod7 = require("zod");
1556
+ var import_zod10 = require("zod");
1628
1557
 
1629
1558
  // src/responses/convert-to-openai-responses-messages.ts
1630
- var import_provider7 = require("@ai-sdk/provider");
1631
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1559
+ var import_provider6 = require("@ai-sdk/provider");
1632
1560
  function convertToOpenAIResponsesMessages({
1633
1561
  prompt,
1634
1562
  systemMessageMode
@@ -1667,38 +1595,35 @@ function convertToOpenAIResponsesMessages({
1667
1595
  messages.push({
1668
1596
  role: "user",
1669
1597
  content: content.map((part, index) => {
1670
- var _a, _b, _c, _d;
1598
+ var _a, _b, _c;
1671
1599
  switch (part.type) {
1672
1600
  case "text": {
1673
1601
  return { type: "input_text", text: part.text };
1674
1602
  }
1675
- case "image": {
1676
- return {
1677
- type: "input_image",
1678
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils7.convertUint8ArrayToBase64)(part.image)}`,
1679
- // OpenAI specific extension: image detail
1680
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1681
- };
1682
- }
1683
1603
  case "file": {
1684
- if (part.data instanceof URL) {
1685
- throw new import_provider7.UnsupportedFunctionalityError({
1686
- functionality: "File URLs in user messages"
1687
- });
1688
- }
1689
- switch (part.mimeType) {
1690
- case "application/pdf": {
1691
- return {
1692
- type: "input_file",
1693
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1694
- file_data: `data:application/pdf;base64,${part.data}`
1695
- };
1696
- }
1697
- default: {
1698
- throw new import_provider7.UnsupportedFunctionalityError({
1699
- functionality: "Only PDF files are supported in user messages"
1604
+ if (part.mediaType.startsWith("image/")) {
1605
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1606
+ return {
1607
+ type: "input_image",
1608
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1609
+ // OpenAI specific extension: image detail
1610
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1611
+ };
1612
+ } else if (part.mediaType === "application/pdf") {
1613
+ if (part.data instanceof URL) {
1614
+ throw new import_provider6.UnsupportedFunctionalityError({
1615
+ functionality: "PDF file parts with URLs"
1700
1616
  });
1701
1617
  }
1618
+ return {
1619
+ type: "input_file",
1620
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1621
+ file_data: `data:application/pdf;base64,${part.data}`
1622
+ };
1623
+ } else {
1624
+ throw new import_provider6.UnsupportedFunctionalityError({
1625
+ functionality: `file part media type ${part.mediaType}`
1626
+ });
1702
1627
  }
1703
1628
  }
1704
1629
  }
@@ -1767,18 +1692,17 @@ function mapOpenAIResponseFinishReason({
1767
1692
  }
1768
1693
 
1769
1694
  // src/responses/openai-responses-prepare-tools.ts
1770
- var import_provider8 = require("@ai-sdk/provider");
1695
+ var import_provider7 = require("@ai-sdk/provider");
1771
1696
  function prepareResponsesTools({
1772
- mode,
1697
+ tools,
1698
+ toolChoice,
1773
1699
  strict
1774
1700
  }) {
1775
- var _a;
1776
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1701
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1777
1702
  const toolWarnings = [];
1778
1703
  if (tools == null) {
1779
- return { tools: void 0, tool_choice: void 0, toolWarnings };
1704
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
1780
1705
  }
1781
- const toolChoice = mode.toolChoice;
1782
1706
  const openaiTools2 = [];
1783
1707
  for (const tool of tools) {
1784
1708
  switch (tool.type) {
@@ -1811,37 +1735,24 @@ function prepareResponsesTools({
1811
1735
  }
1812
1736
  }
1813
1737
  if (toolChoice == null) {
1814
- return { tools: openaiTools2, tool_choice: void 0, toolWarnings };
1738
+ return { tools: openaiTools2, toolChoice: void 0, toolWarnings };
1815
1739
  }
1816
1740
  const type = toolChoice.type;
1817
1741
  switch (type) {
1818
1742
  case "auto":
1819
1743
  case "none":
1820
1744
  case "required":
1821
- return { tools: openaiTools2, tool_choice: type, toolWarnings };
1822
- case "tool": {
1823
- if (toolChoice.toolName === "web_search_preview") {
1824
- return {
1825
- tools: openaiTools2,
1826
- tool_choice: {
1827
- type: "web_search_preview"
1828
- },
1829
- toolWarnings
1830
- };
1831
- }
1745
+ return { tools: openaiTools2, toolChoice: type, toolWarnings };
1746
+ case "tool":
1832
1747
  return {
1833
1748
  tools: openaiTools2,
1834
- tool_choice: {
1835
- type: "function",
1836
- name: toolChoice.toolName
1837
- },
1749
+ toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1838
1750
  toolWarnings
1839
1751
  };
1840
- }
1841
1752
  default: {
1842
1753
  const _exhaustiveCheck = type;
1843
- throw new import_provider8.UnsupportedFunctionalityError({
1844
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1754
+ throw new import_provider7.UnsupportedFunctionalityError({
1755
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1845
1756
  });
1846
1757
  }
1847
1758
  }
@@ -1851,16 +1762,19 @@ function prepareResponsesTools({
1851
1762
  var OpenAIResponsesLanguageModel = class {
1852
1763
  constructor(modelId, config) {
1853
1764
  this.specificationVersion = "v2";
1854
- this.defaultObjectGenerationMode = "json";
1855
1765
  this.modelId = modelId;
1856
1766
  this.config = config;
1857
1767
  }
1768
+ async getSupportedUrls() {
1769
+ return {
1770
+ "image/*": [/^https?:\/\/.*$/]
1771
+ };
1772
+ }
1858
1773
  get provider() {
1859
1774
  return this.config.provider;
1860
1775
  }
1861
1776
  getArgs({
1862
- mode,
1863
- maxTokens,
1777
+ maxOutputTokens,
1864
1778
  temperature,
1865
1779
  stopSequences,
1866
1780
  topP,
@@ -1869,24 +1783,19 @@ var OpenAIResponsesLanguageModel = class {
1869
1783
  frequencyPenalty,
1870
1784
  seed,
1871
1785
  prompt,
1872
- providerMetadata,
1786
+ providerOptions,
1787
+ tools,
1788
+ toolChoice,
1873
1789
  responseFormat
1874
1790
  }) {
1875
- var _a, _b, _c;
1791
+ var _a, _b;
1876
1792
  const warnings = [];
1877
1793
  const modelConfig = getResponsesModelConfig(this.modelId);
1878
- const type = mode.type;
1879
1794
  if (topK != null) {
1880
- warnings.push({
1881
- type: "unsupported-setting",
1882
- setting: "topK"
1883
- });
1795
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1884
1796
  }
1885
1797
  if (seed != null) {
1886
- warnings.push({
1887
- type: "unsupported-setting",
1888
- setting: "seed"
1889
- });
1798
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1890
1799
  }
1891
1800
  if (presencePenalty != null) {
1892
1801
  warnings.push({
@@ -1901,10 +1810,7 @@ var OpenAIResponsesLanguageModel = class {
1901
1810
  });
1902
1811
  }
1903
1812
  if (stopSequences != null) {
1904
- warnings.push({
1905
- type: "unsupported-setting",
1906
- setting: "stopSequences"
1907
- });
1813
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
1908
1814
  }
1909
1815
  const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1910
1816
  prompt,
@@ -1913,7 +1819,7 @@ var OpenAIResponsesLanguageModel = class {
1913
1819
  warnings.push(...messageWarnings);
1914
1820
  const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1915
1821
  provider: "openai",
1916
- providerOptions: providerMetadata,
1822
+ providerOptions,
1917
1823
  schema: openaiResponsesProviderOptionsSchema
1918
1824
  });
1919
1825
  const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
@@ -1922,7 +1828,7 @@ var OpenAIResponsesLanguageModel = class {
1922
1828
  input: messages,
1923
1829
  temperature,
1924
1830
  top_p: topP,
1925
- max_output_tokens: maxTokens,
1831
+ max_output_tokens: maxOutputTokens,
1926
1832
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1927
1833
  text: {
1928
1834
  format: responseFormat.schema != null ? {
@@ -1942,8 +1848,15 @@ var OpenAIResponsesLanguageModel = class {
1942
1848
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1943
1849
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1944
1850
  // model-specific settings:
1945
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1946
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1851
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
1852
+ reasoning: {
1853
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1854
+ effort: openaiOptions.reasoningEffort
1855
+ },
1856
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
1857
+ summary: openaiOptions.reasoningSummary
1858
+ }
1859
+ }
1947
1860
  },
1948
1861
  ...modelConfig.requiredAutoTruncation && {
1949
1862
  truncation: "auto"
@@ -1967,65 +1880,26 @@ var OpenAIResponsesLanguageModel = class {
1967
1880
  });
1968
1881
  }
1969
1882
  }
1970
- switch (type) {
1971
- case "regular": {
1972
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1973
- mode,
1974
- strict: isStrict
1975
- // TODO support provider options on tools
1976
- });
1977
- return {
1978
- args: {
1979
- ...baseArgs,
1980
- tools,
1981
- tool_choice
1982
- },
1983
- warnings: [...warnings, ...toolWarnings]
1984
- };
1985
- }
1986
- case "object-json": {
1987
- return {
1988
- args: {
1989
- ...baseArgs,
1990
- text: {
1991
- format: mode.schema != null ? {
1992
- type: "json_schema",
1993
- strict: isStrict,
1994
- name: (_c = mode.name) != null ? _c : "response",
1995
- description: mode.description,
1996
- schema: mode.schema
1997
- } : { type: "json_object" }
1998
- }
1999
- },
2000
- warnings
2001
- };
2002
- }
2003
- case "object-tool": {
2004
- return {
2005
- args: {
2006
- ...baseArgs,
2007
- tool_choice: { type: "function", name: mode.tool.name },
2008
- tools: [
2009
- {
2010
- type: "function",
2011
- name: mode.tool.name,
2012
- description: mode.tool.description,
2013
- parameters: mode.tool.parameters,
2014
- strict: isStrict
2015
- }
2016
- ]
2017
- },
2018
- warnings
2019
- };
2020
- }
2021
- default: {
2022
- const _exhaustiveCheck = type;
2023
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2024
- }
2025
- }
1883
+ const {
1884
+ tools: openaiTools2,
1885
+ toolChoice: openaiToolChoice,
1886
+ toolWarnings
1887
+ } = prepareResponsesTools({
1888
+ tools,
1889
+ toolChoice,
1890
+ strict: isStrict
1891
+ });
1892
+ return {
1893
+ args: {
1894
+ ...baseArgs,
1895
+ tools: openaiTools2,
1896
+ tool_choice: openaiToolChoice
1897
+ },
1898
+ warnings: [...warnings, ...toolWarnings]
1899
+ };
2026
1900
  }
2027
1901
  async doGenerate(options) {
2028
- var _a, _b, _c, _d, _e;
1902
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2029
1903
  const { args: body, warnings } = this.getArgs(options);
2030
1904
  const {
2031
1905
  responseHeaders,
@@ -2040,105 +1914,125 @@ var OpenAIResponsesLanguageModel = class {
2040
1914
  body,
2041
1915
  failedResponseHandler: openaiFailedResponseHandler,
2042
1916
  successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
2043
- import_zod7.z.object({
2044
- id: import_zod7.z.string(),
2045
- created_at: import_zod7.z.number(),
2046
- model: import_zod7.z.string(),
2047
- output: import_zod7.z.array(
2048
- import_zod7.z.discriminatedUnion("type", [
2049
- import_zod7.z.object({
2050
- type: import_zod7.z.literal("message"),
2051
- role: import_zod7.z.literal("assistant"),
2052
- content: import_zod7.z.array(
2053
- import_zod7.z.object({
2054
- type: import_zod7.z.literal("output_text"),
2055
- text: import_zod7.z.string(),
2056
- annotations: import_zod7.z.array(
2057
- import_zod7.z.object({
2058
- type: import_zod7.z.literal("url_citation"),
2059
- start_index: import_zod7.z.number(),
2060
- end_index: import_zod7.z.number(),
2061
- url: import_zod7.z.string(),
2062
- title: import_zod7.z.string()
1917
+ import_zod10.z.object({
1918
+ id: import_zod10.z.string(),
1919
+ created_at: import_zod10.z.number(),
1920
+ model: import_zod10.z.string(),
1921
+ output: import_zod10.z.array(
1922
+ import_zod10.z.discriminatedUnion("type", [
1923
+ import_zod10.z.object({
1924
+ type: import_zod10.z.literal("message"),
1925
+ role: import_zod10.z.literal("assistant"),
1926
+ content: import_zod10.z.array(
1927
+ import_zod10.z.object({
1928
+ type: import_zod10.z.literal("output_text"),
1929
+ text: import_zod10.z.string(),
1930
+ annotations: import_zod10.z.array(
1931
+ import_zod10.z.object({
1932
+ type: import_zod10.z.literal("url_citation"),
1933
+ start_index: import_zod10.z.number(),
1934
+ end_index: import_zod10.z.number(),
1935
+ url: import_zod10.z.string(),
1936
+ title: import_zod10.z.string()
2063
1937
  })
2064
1938
  )
2065
1939
  })
2066
1940
  )
2067
1941
  }),
2068
- import_zod7.z.object({
2069
- type: import_zod7.z.literal("function_call"),
2070
- call_id: import_zod7.z.string(),
2071
- name: import_zod7.z.string(),
2072
- arguments: import_zod7.z.string()
1942
+ import_zod10.z.object({
1943
+ type: import_zod10.z.literal("function_call"),
1944
+ call_id: import_zod10.z.string(),
1945
+ name: import_zod10.z.string(),
1946
+ arguments: import_zod10.z.string()
2073
1947
  }),
2074
- import_zod7.z.object({
2075
- type: import_zod7.z.literal("web_search_call")
1948
+ import_zod10.z.object({
1949
+ type: import_zod10.z.literal("web_search_call")
2076
1950
  }),
2077
- import_zod7.z.object({
2078
- type: import_zod7.z.literal("computer_call")
1951
+ import_zod10.z.object({
1952
+ type: import_zod10.z.literal("computer_call")
2079
1953
  }),
2080
- import_zod7.z.object({
2081
- type: import_zod7.z.literal("reasoning")
1954
+ import_zod10.z.object({
1955
+ type: import_zod10.z.literal("reasoning"),
1956
+ summary: import_zod10.z.array(
1957
+ import_zod10.z.object({
1958
+ type: import_zod10.z.literal("summary_text"),
1959
+ text: import_zod10.z.string()
1960
+ })
1961
+ )
2082
1962
  })
2083
1963
  ])
2084
1964
  ),
2085
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullable(),
1965
+ incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullable(),
2086
1966
  usage: usageSchema
2087
1967
  })
2088
1968
  ),
2089
1969
  abortSignal: options.abortSignal,
2090
1970
  fetch: this.config.fetch
2091
1971
  });
2092
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2093
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2094
- toolCallType: "function",
2095
- toolCallId: output.call_id,
2096
- toolName: output.name,
2097
- args: output.arguments
2098
- }));
1972
+ const content = [];
1973
+ for (const part of response.output) {
1974
+ switch (part.type) {
1975
+ case "reasoning": {
1976
+ content.push({
1977
+ type: "reasoning",
1978
+ reasoningType: "text",
1979
+ text: part.summary.map((summary) => summary.text).join()
1980
+ });
1981
+ break;
1982
+ }
1983
+ case "message": {
1984
+ for (const contentPart of part.content) {
1985
+ content.push({
1986
+ type: "text",
1987
+ text: contentPart.text
1988
+ });
1989
+ for (const annotation of contentPart.annotations) {
1990
+ content.push({
1991
+ type: "source",
1992
+ sourceType: "url",
1993
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils8.generateId)(),
1994
+ url: annotation.url,
1995
+ title: annotation.title
1996
+ });
1997
+ }
1998
+ }
1999
+ break;
2000
+ }
2001
+ case "function_call": {
2002
+ content.push({
2003
+ type: "tool-call",
2004
+ toolCallType: "function",
2005
+ toolCallId: part.call_id,
2006
+ toolName: part.name,
2007
+ args: part.arguments
2008
+ });
2009
+ break;
2010
+ }
2011
+ }
2012
+ }
2099
2013
  return {
2100
- text: outputTextElements.map((content) => content.text).join("\n"),
2101
- sources: outputTextElements.flatMap(
2102
- (content) => content.annotations.map((annotation) => {
2103
- var _a2, _b2, _c2;
2104
- return {
2105
- sourceType: "url",
2106
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2107
- url: annotation.url,
2108
- title: annotation.title
2109
- };
2110
- })
2111
- ),
2014
+ content,
2112
2015
  finishReason: mapOpenAIResponseFinishReason({
2113
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2114
- hasToolCalls: toolCalls.length > 0
2016
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2017
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2115
2018
  }),
2116
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2117
2019
  usage: {
2118
- promptTokens: response.usage.input_tokens,
2119
- completionTokens: response.usage.output_tokens
2120
- },
2121
- rawCall: {
2122
- rawPrompt: void 0,
2123
- rawSettings: {}
2124
- },
2125
- rawResponse: {
2126
- headers: responseHeaders,
2127
- body: rawResponse
2128
- },
2129
- request: {
2130
- body: JSON.stringify(body)
2020
+ inputTokens: response.usage.input_tokens,
2021
+ outputTokens: response.usage.output_tokens
2131
2022
  },
2023
+ request: { body },
2132
2024
  response: {
2133
2025
  id: response.id,
2134
2026
  timestamp: new Date(response.created_at * 1e3),
2135
- modelId: response.model
2027
+ modelId: response.model,
2028
+ headers: responseHeaders,
2029
+ body: rawResponse
2136
2030
  },
2137
2031
  providerMetadata: {
2138
2032
  openai: {
2139
2033
  responseId: response.id,
2140
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2141
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2034
+ cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2035
+ reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2142
2036
  }
2143
2037
  },
2144
2038
  warnings
@@ -2165,8 +2059,10 @@ var OpenAIResponsesLanguageModel = class {
2165
2059
  });
2166
2060
  const self = this;
2167
2061
  let finishReason = "unknown";
2168
- let promptTokens = NaN;
2169
- let completionTokens = NaN;
2062
+ const usage = {
2063
+ inputTokens: void 0,
2064
+ outputTokens: void 0
2065
+ };
2170
2066
  let cachedPromptTokens = null;
2171
2067
  let reasoningTokens = null;
2172
2068
  let responseId = null;
@@ -2175,6 +2071,9 @@ var OpenAIResponsesLanguageModel = class {
2175
2071
  return {
2176
2072
  stream: response.pipeThrough(
2177
2073
  new TransformStream({
2074
+ start(controller) {
2075
+ controller.enqueue({ type: "stream-start", warnings });
2076
+ },
2178
2077
  transform(chunk, controller) {
2179
2078
  var _a, _b, _c, _d, _e, _f, _g, _h;
2180
2079
  if (!chunk.success) {
@@ -2218,8 +2117,14 @@ var OpenAIResponsesLanguageModel = class {
2218
2117
  });
2219
2118
  } else if (isTextDeltaChunk(value)) {
2220
2119
  controller.enqueue({
2221
- type: "text-delta",
2222
- textDelta: value.delta
2120
+ type: "text",
2121
+ text: value.delta
2122
+ });
2123
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2124
+ controller.enqueue({
2125
+ type: "reasoning",
2126
+ reasoningType: "text",
2127
+ text: value.delta
2223
2128
  });
2224
2129
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2225
2130
  ongoingToolCalls[value.output_index] = void 0;
@@ -2236,19 +2141,17 @@ var OpenAIResponsesLanguageModel = class {
2236
2141
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2237
2142
  hasToolCalls
2238
2143
  });
2239
- promptTokens = value.response.usage.input_tokens;
2240
- completionTokens = value.response.usage.output_tokens;
2144
+ usage.inputTokens = value.response.usage.input_tokens;
2145
+ usage.outputTokens = value.response.usage.output_tokens;
2241
2146
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2242
2147
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2243
2148
  } else if (isResponseAnnotationAddedChunk(value)) {
2244
2149
  controller.enqueue({
2245
2150
  type: "source",
2246
- source: {
2247
- sourceType: "url",
2248
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2249
- url: value.annotation.url,
2250
- title: value.annotation.title
2251
- }
2151
+ sourceType: "url",
2152
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2153
+ url: value.annotation.url,
2154
+ title: value.annotation.title
2252
2155
  });
2253
2156
  }
2254
2157
  },
@@ -2256,7 +2159,7 @@ var OpenAIResponsesLanguageModel = class {
2256
2159
  controller.enqueue({
2257
2160
  type: "finish",
2258
2161
  finishReason,
2259
- usage: { promptTokens, completionTokens },
2162
+ usage,
2260
2163
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2261
2164
  providerMetadata: {
2262
2165
  openai: {
@@ -2270,89 +2173,91 @@ var OpenAIResponsesLanguageModel = class {
2270
2173
  }
2271
2174
  })
2272
2175
  ),
2273
- rawCall: {
2274
- rawPrompt: void 0,
2275
- rawSettings: {}
2276
- },
2277
- rawResponse: { headers: responseHeaders },
2278
- request: { body: JSON.stringify(body) },
2279
- warnings
2176
+ request: { body },
2177
+ response: { headers: responseHeaders }
2280
2178
  };
2281
2179
  }
2282
2180
  };
2283
- var usageSchema = import_zod7.z.object({
2284
- input_tokens: import_zod7.z.number(),
2285
- input_tokens_details: import_zod7.z.object({ cached_tokens: import_zod7.z.number().nullish() }).nullish(),
2286
- output_tokens: import_zod7.z.number(),
2287
- output_tokens_details: import_zod7.z.object({ reasoning_tokens: import_zod7.z.number().nullish() }).nullish()
2181
+ var usageSchema = import_zod10.z.object({
2182
+ input_tokens: import_zod10.z.number(),
2183
+ input_tokens_details: import_zod10.z.object({ cached_tokens: import_zod10.z.number().nullish() }).nullish(),
2184
+ output_tokens: import_zod10.z.number(),
2185
+ output_tokens_details: import_zod10.z.object({ reasoning_tokens: import_zod10.z.number().nullish() }).nullish()
2288
2186
  });
2289
- var textDeltaChunkSchema = import_zod7.z.object({
2290
- type: import_zod7.z.literal("response.output_text.delta"),
2291
- delta: import_zod7.z.string()
2187
+ var textDeltaChunkSchema = import_zod10.z.object({
2188
+ type: import_zod10.z.literal("response.output_text.delta"),
2189
+ delta: import_zod10.z.string()
2292
2190
  });
2293
- var responseFinishedChunkSchema = import_zod7.z.object({
2294
- type: import_zod7.z.enum(["response.completed", "response.incomplete"]),
2295
- response: import_zod7.z.object({
2296
- incomplete_details: import_zod7.z.object({ reason: import_zod7.z.string() }).nullish(),
2191
+ var responseFinishedChunkSchema = import_zod10.z.object({
2192
+ type: import_zod10.z.enum(["response.completed", "response.incomplete"]),
2193
+ response: import_zod10.z.object({
2194
+ incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullish(),
2297
2195
  usage: usageSchema
2298
2196
  })
2299
2197
  });
2300
- var responseCreatedChunkSchema = import_zod7.z.object({
2301
- type: import_zod7.z.literal("response.created"),
2302
- response: import_zod7.z.object({
2303
- id: import_zod7.z.string(),
2304
- created_at: import_zod7.z.number(),
2305
- model: import_zod7.z.string()
2198
+ var responseCreatedChunkSchema = import_zod10.z.object({
2199
+ type: import_zod10.z.literal("response.created"),
2200
+ response: import_zod10.z.object({
2201
+ id: import_zod10.z.string(),
2202
+ created_at: import_zod10.z.number(),
2203
+ model: import_zod10.z.string()
2306
2204
  })
2307
2205
  });
2308
- var responseOutputItemDoneSchema = import_zod7.z.object({
2309
- type: import_zod7.z.literal("response.output_item.done"),
2310
- output_index: import_zod7.z.number(),
2311
- item: import_zod7.z.discriminatedUnion("type", [
2312
- import_zod7.z.object({
2313
- type: import_zod7.z.literal("message")
2206
+ var responseOutputItemDoneSchema = import_zod10.z.object({
2207
+ type: import_zod10.z.literal("response.output_item.done"),
2208
+ output_index: import_zod10.z.number(),
2209
+ item: import_zod10.z.discriminatedUnion("type", [
2210
+ import_zod10.z.object({
2211
+ type: import_zod10.z.literal("message")
2314
2212
  }),
2315
- import_zod7.z.object({
2316
- type: import_zod7.z.literal("function_call"),
2317
- id: import_zod7.z.string(),
2318
- call_id: import_zod7.z.string(),
2319
- name: import_zod7.z.string(),
2320
- arguments: import_zod7.z.string(),
2321
- status: import_zod7.z.literal("completed")
2213
+ import_zod10.z.object({
2214
+ type: import_zod10.z.literal("function_call"),
2215
+ id: import_zod10.z.string(),
2216
+ call_id: import_zod10.z.string(),
2217
+ name: import_zod10.z.string(),
2218
+ arguments: import_zod10.z.string(),
2219
+ status: import_zod10.z.literal("completed")
2322
2220
  })
2323
2221
  ])
2324
2222
  });
2325
- var responseFunctionCallArgumentsDeltaSchema = import_zod7.z.object({
2326
- type: import_zod7.z.literal("response.function_call_arguments.delta"),
2327
- item_id: import_zod7.z.string(),
2328
- output_index: import_zod7.z.number(),
2329
- delta: import_zod7.z.string()
2223
+ var responseFunctionCallArgumentsDeltaSchema = import_zod10.z.object({
2224
+ type: import_zod10.z.literal("response.function_call_arguments.delta"),
2225
+ item_id: import_zod10.z.string(),
2226
+ output_index: import_zod10.z.number(),
2227
+ delta: import_zod10.z.string()
2330
2228
  });
2331
- var responseOutputItemAddedSchema = import_zod7.z.object({
2332
- type: import_zod7.z.literal("response.output_item.added"),
2333
- output_index: import_zod7.z.number(),
2334
- item: import_zod7.z.discriminatedUnion("type", [
2335
- import_zod7.z.object({
2336
- type: import_zod7.z.literal("message")
2229
+ var responseOutputItemAddedSchema = import_zod10.z.object({
2230
+ type: import_zod10.z.literal("response.output_item.added"),
2231
+ output_index: import_zod10.z.number(),
2232
+ item: import_zod10.z.discriminatedUnion("type", [
2233
+ import_zod10.z.object({
2234
+ type: import_zod10.z.literal("message")
2337
2235
  }),
2338
- import_zod7.z.object({
2339
- type: import_zod7.z.literal("function_call"),
2340
- id: import_zod7.z.string(),
2341
- call_id: import_zod7.z.string(),
2342
- name: import_zod7.z.string(),
2343
- arguments: import_zod7.z.string()
2236
+ import_zod10.z.object({
2237
+ type: import_zod10.z.literal("function_call"),
2238
+ id: import_zod10.z.string(),
2239
+ call_id: import_zod10.z.string(),
2240
+ name: import_zod10.z.string(),
2241
+ arguments: import_zod10.z.string()
2344
2242
  })
2345
2243
  ])
2346
2244
  });
2347
- var responseAnnotationAddedSchema = import_zod7.z.object({
2348
- type: import_zod7.z.literal("response.output_text.annotation.added"),
2349
- annotation: import_zod7.z.object({
2350
- type: import_zod7.z.literal("url_citation"),
2351
- url: import_zod7.z.string(),
2352
- title: import_zod7.z.string()
2245
+ var responseAnnotationAddedSchema = import_zod10.z.object({
2246
+ type: import_zod10.z.literal("response.output_text.annotation.added"),
2247
+ annotation: import_zod10.z.object({
2248
+ type: import_zod10.z.literal("url_citation"),
2249
+ url: import_zod10.z.string(),
2250
+ title: import_zod10.z.string()
2353
2251
  })
2354
2252
  });
2355
- var openaiResponsesChunkSchema = import_zod7.z.union([
2253
+ var responseReasoningSummaryTextDeltaSchema = import_zod10.z.object({
2254
+ type: import_zod10.z.literal("response.reasoning_summary_text.delta"),
2255
+ item_id: import_zod10.z.string(),
2256
+ output_index: import_zod10.z.number(),
2257
+ summary_index: import_zod10.z.number(),
2258
+ delta: import_zod10.z.string()
2259
+ });
2260
+ var openaiResponsesChunkSchema = import_zod10.z.union([
2356
2261
  textDeltaChunkSchema,
2357
2262
  responseFinishedChunkSchema,
2358
2263
  responseCreatedChunkSchema,
@@ -2360,7 +2265,8 @@ var openaiResponsesChunkSchema = import_zod7.z.union([
2360
2265
  responseFunctionCallArgumentsDeltaSchema,
2361
2266
  responseOutputItemAddedSchema,
2362
2267
  responseAnnotationAddedSchema,
2363
- import_zod7.z.object({ type: import_zod7.z.string() }).passthrough()
2268
+ responseReasoningSummaryTextDeltaSchema,
2269
+ import_zod10.z.object({ type: import_zod10.z.string() }).passthrough()
2364
2270
  // fallback for unknown chunks
2365
2271
  ]);
2366
2272
  function isTextDeltaChunk(chunk) {
@@ -2384,6 +2290,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2384
2290
  function isResponseAnnotationAddedChunk(chunk) {
2385
2291
  return chunk.type === "response.output_text.annotation.added";
2386
2292
  }
2293
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2294
+ return chunk.type === "response.reasoning_summary_text.delta";
2295
+ }
2387
2296
  function getResponsesModelConfig(modelId) {
2388
2297
  if (modelId.startsWith("o")) {
2389
2298
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2405,25 +2314,125 @@ function getResponsesModelConfig(modelId) {
2405
2314
  requiredAutoTruncation: false
2406
2315
  };
2407
2316
  }
2408
- var openaiResponsesProviderOptionsSchema = import_zod7.z.object({
2409
- metadata: import_zod7.z.any().nullish(),
2410
- parallelToolCalls: import_zod7.z.boolean().nullish(),
2411
- previousResponseId: import_zod7.z.string().nullish(),
2412
- store: import_zod7.z.boolean().nullish(),
2413
- user: import_zod7.z.string().nullish(),
2414
- reasoningEffort: import_zod7.z.string().nullish(),
2415
- strictSchemas: import_zod7.z.boolean().nullish(),
2416
- instructions: import_zod7.z.string().nullish()
2317
+ var openaiResponsesProviderOptionsSchema = import_zod10.z.object({
2318
+ metadata: import_zod10.z.any().nullish(),
2319
+ parallelToolCalls: import_zod10.z.boolean().nullish(),
2320
+ previousResponseId: import_zod10.z.string().nullish(),
2321
+ store: import_zod10.z.boolean().nullish(),
2322
+ user: import_zod10.z.string().nullish(),
2323
+ reasoningEffort: import_zod10.z.string().nullish(),
2324
+ strictSchemas: import_zod10.z.boolean().nullish(),
2325
+ instructions: import_zod10.z.string().nullish(),
2326
+ reasoningSummary: import_zod10.z.string().nullish()
2417
2327
  });
2418
2328
 
2329
+ // src/openai-speech-model.ts
2330
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
2331
+ var import_zod11 = require("zod");
2332
+ var OpenAIProviderOptionsSchema = import_zod11.z.object({
2333
+ instructions: import_zod11.z.string().nullish(),
2334
+ speed: import_zod11.z.number().min(0.25).max(4).default(1).nullish()
2335
+ });
2336
+ var OpenAISpeechModel = class {
2337
+ constructor(modelId, config) {
2338
+ this.modelId = modelId;
2339
+ this.config = config;
2340
+ this.specificationVersion = "v1";
2341
+ }
2342
+ get provider() {
2343
+ return this.config.provider;
2344
+ }
2345
+ getArgs({
2346
+ text,
2347
+ voice = "alloy",
2348
+ outputFormat = "mp3",
2349
+ speed,
2350
+ instructions,
2351
+ providerOptions
2352
+ }) {
2353
+ const warnings = [];
2354
+ const openAIOptions = (0, import_provider_utils9.parseProviderOptions)({
2355
+ provider: "openai",
2356
+ providerOptions,
2357
+ schema: OpenAIProviderOptionsSchema
2358
+ });
2359
+ const requestBody = {
2360
+ model: this.modelId,
2361
+ input: text,
2362
+ voice,
2363
+ response_format: "mp3",
2364
+ speed,
2365
+ instructions
2366
+ };
2367
+ if (outputFormat) {
2368
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
2369
+ requestBody.response_format = outputFormat;
2370
+ } else {
2371
+ warnings.push({
2372
+ type: "unsupported-setting",
2373
+ setting: "outputFormat",
2374
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
2375
+ });
2376
+ }
2377
+ }
2378
+ if (openAIOptions) {
2379
+ const speechModelOptions = {};
2380
+ for (const key in speechModelOptions) {
2381
+ const value = speechModelOptions[key];
2382
+ if (value !== void 0) {
2383
+ requestBody[key] = value;
2384
+ }
2385
+ }
2386
+ }
2387
+ return {
2388
+ requestBody,
2389
+ warnings
2390
+ };
2391
+ }
2392
+ async doGenerate(options) {
2393
+ var _a, _b, _c;
2394
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2395
+ const { requestBody, warnings } = this.getArgs(options);
2396
+ const {
2397
+ value: audio,
2398
+ responseHeaders,
2399
+ rawValue: rawResponse
2400
+ } = await (0, import_provider_utils9.postJsonToApi)({
2401
+ url: this.config.url({
2402
+ path: "/audio/speech",
2403
+ modelId: this.modelId
2404
+ }),
2405
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2406
+ body: requestBody,
2407
+ failedResponseHandler: openaiFailedResponseHandler,
2408
+ successfulResponseHandler: (0, import_provider_utils9.createBinaryResponseHandler)(),
2409
+ abortSignal: options.abortSignal,
2410
+ fetch: this.config.fetch
2411
+ });
2412
+ return {
2413
+ audio,
2414
+ warnings,
2415
+ request: {
2416
+ body: JSON.stringify(requestBody)
2417
+ },
2418
+ response: {
2419
+ timestamp: currentDate,
2420
+ modelId: this.modelId,
2421
+ headers: responseHeaders,
2422
+ body: rawResponse
2423
+ }
2424
+ };
2425
+ }
2426
+ };
2427
+
2419
2428
  // src/openai-provider.ts
2420
2429
  function createOpenAI(options = {}) {
2421
2430
  var _a, _b, _c;
2422
- const baseURL = (_a = (0, import_provider_utils9.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2431
+ const baseURL = (_a = (0, import_provider_utils10.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.openai.com/v1";
2423
2432
  const compatibility = (_b = options.compatibility) != null ? _b : "compatible";
2424
2433
  const providerName = (_c = options.name) != null ? _c : "openai";
2425
2434
  const getHeaders = () => ({
2426
- Authorization: `Bearer ${(0, import_provider_utils9.loadApiKey)({
2435
+ Authorization: `Bearer ${(0, import_provider_utils10.loadApiKey)({
2427
2436
  apiKey: options.apiKey,
2428
2437
  environmentVariableName: "OPENAI_API_KEY",
2429
2438
  description: "OpenAI"
@@ -2458,6 +2467,18 @@ function createOpenAI(options = {}) {
2458
2467
  headers: getHeaders,
2459
2468
  fetch: options.fetch
2460
2469
  });
2470
+ const createTranscriptionModel = (modelId) => new OpenAITranscriptionModel(modelId, {
2471
+ provider: `${providerName}.transcription`,
2472
+ url: ({ path }) => `${baseURL}${path}`,
2473
+ headers: getHeaders,
2474
+ fetch: options.fetch
2475
+ });
2476
+ const createSpeechModel = (modelId) => new OpenAISpeechModel(modelId, {
2477
+ provider: `${providerName}.speech`,
2478
+ url: ({ path }) => `${baseURL}${path}`,
2479
+ headers: getHeaders,
2480
+ fetch: options.fetch
2481
+ });
2461
2482
  const createLanguageModel = (modelId, settings) => {
2462
2483
  if (new.target) {
2463
2484
  throw new Error(
@@ -2492,6 +2513,10 @@ function createOpenAI(options = {}) {
2492
2513
  provider.textEmbeddingModel = createEmbeddingModel;
2493
2514
  provider.image = createImageModel;
2494
2515
  provider.imageModel = createImageModel;
2516
+ provider.transcription = createTranscriptionModel;
2517
+ provider.transcriptionModel = createTranscriptionModel;
2518
+ provider.speech = createSpeechModel;
2519
+ provider.speechModel = createSpeechModel;
2495
2520
  provider.tools = openaiTools;
2496
2521
  return provider;
2497
2522
  }