@ai-sdk/openai 2.0.0-canary.1 → 2.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -25,21 +25,24 @@ __export(internal_exports, {
25
25
  OpenAIEmbeddingModel: () => OpenAIEmbeddingModel,
26
26
  OpenAIImageModel: () => OpenAIImageModel,
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
- modelMaxImagesPerCall: () => modelMaxImagesPerCall
28
+ OpenAISpeechModel: () => OpenAISpeechModel,
29
+ OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
31
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
32
+ openaiProviderOptions: () => openaiProviderOptions
29
33
  });
30
34
  module.exports = __toCommonJS(internal_exports);
31
35
 
32
36
  // src/openai-chat-language-model.ts
33
37
  var import_provider3 = require("@ai-sdk/provider");
34
38
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
- var import_zod2 = require("zod");
39
+ var import_zod3 = require("zod");
36
40
 
37
41
  // src/convert-to-openai-chat-messages.ts
38
42
  var import_provider = require("@ai-sdk/provider");
39
43
  var import_provider_utils = require("@ai-sdk/provider-utils");
40
44
  function convertToOpenAIChatMessages({
41
45
  prompt,
42
- useLegacyFunctionCalling = false,
43
46
  systemMessageMode = "system"
44
47
  }) {
45
48
  const messages = [];
@@ -80,55 +83,71 @@ function convertToOpenAIChatMessages({
80
83
  messages.push({
81
84
  role: "user",
82
85
  content: content.map((part, index) => {
83
- var _a, _b, _c, _d;
86
+ var _a, _b, _c;
84
87
  switch (part.type) {
85
88
  case "text": {
86
89
  return { type: "text", text: part.text };
87
90
  }
88
- case "image": {
89
- return {
90
- type: "image_url",
91
- image_url: {
92
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
93
- // OpenAI specific extension: image detail
94
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
95
- }
96
- };
97
- }
98
91
  case "file": {
99
- if (part.data instanceof URL) {
100
- throw new import_provider.UnsupportedFunctionalityError({
101
- functionality: "'File content parts with URL data' functionality not supported."
102
- });
103
- }
104
- switch (part.mimeType) {
105
- case "audio/wav": {
106
- return {
107
- type: "input_audio",
108
- input_audio: { data: part.data, format: "wav" }
109
- };
110
- }
111
- case "audio/mp3":
112
- case "audio/mpeg": {
113
- return {
114
- type: "input_audio",
115
- input_audio: { data: part.data, format: "mp3" }
116
- };
92
+ if (part.mediaType.startsWith("image/")) {
93
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
94
+ return {
95
+ type: "image_url",
96
+ image_url: {
97
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
98
+ // OpenAI specific extension: image detail
99
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
100
+ }
101
+ };
102
+ } else if (part.mediaType.startsWith("audio/")) {
103
+ if (part.data instanceof URL) {
104
+ throw new import_provider.UnsupportedFunctionalityError({
105
+ functionality: "audio file parts with URLs"
106
+ });
117
107
  }
118
- case "application/pdf": {
119
- return {
120
- type: "file",
121
- file: {
122
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
123
- file_data: `data:application/pdf;base64,${part.data}`
124
- }
125
- };
108
+ switch (part.mediaType) {
109
+ case "audio/wav": {
110
+ return {
111
+ type: "input_audio",
112
+ input_audio: {
113
+ data: (0, import_provider_utils.convertToBase64)(part.data),
114
+ format: "wav"
115
+ }
116
+ };
117
+ }
118
+ case "audio/mp3":
119
+ case "audio/mpeg": {
120
+ return {
121
+ type: "input_audio",
122
+ input_audio: {
123
+ data: (0, import_provider_utils.convertToBase64)(part.data),
124
+ format: "mp3"
125
+ }
126
+ };
127
+ }
128
+ default: {
129
+ throw new import_provider.UnsupportedFunctionalityError({
130
+ functionality: `audio content parts with media type ${part.mediaType}`
131
+ });
132
+ }
126
133
  }
127
- default: {
134
+ } else if (part.mediaType === "application/pdf") {
135
+ if (part.data instanceof URL) {
128
136
  throw new import_provider.UnsupportedFunctionalityError({
129
- functionality: `File content part type ${part.mimeType} in user messages`
137
+ functionality: "PDF file parts with URLs"
130
138
  });
131
139
  }
140
+ return {
141
+ type: "file",
142
+ file: {
143
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
144
+ file_data: `data:application/pdf;base64,${part.data}`
145
+ }
146
+ };
147
+ } else {
148
+ throw new import_provider.UnsupportedFunctionalityError({
149
+ functionality: `file part media type ${part.mediaType}`
150
+ });
132
151
  }
133
152
  }
134
153
  }
@@ -158,41 +177,20 @@ function convertToOpenAIChatMessages({
158
177
  }
159
178
  }
160
179
  }
161
- if (useLegacyFunctionCalling) {
162
- if (toolCalls.length > 1) {
163
- throw new import_provider.UnsupportedFunctionalityError({
164
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
165
- });
166
- }
167
- messages.push({
168
- role: "assistant",
169
- content: text,
170
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
171
- });
172
- } else {
173
- messages.push({
174
- role: "assistant",
175
- content: text,
176
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
177
- });
178
- }
180
+ messages.push({
181
+ role: "assistant",
182
+ content: text,
183
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
184
+ });
179
185
  break;
180
186
  }
181
187
  case "tool": {
182
188
  for (const toolResponse of content) {
183
- if (useLegacyFunctionCalling) {
184
- messages.push({
185
- role: "function",
186
- name: toolResponse.toolName,
187
- content: JSON.stringify(toolResponse.result)
188
- });
189
- } else {
190
- messages.push({
191
- role: "tool",
192
- tool_call_id: toolResponse.toolCallId,
193
- content: JSON.stringify(toolResponse.result)
194
- });
195
- }
189
+ messages.push({
190
+ role: "tool",
191
+ tool_call_id: toolResponse.toolCallId,
192
+ content: JSON.stringify(toolResponse.result)
193
+ });
196
194
  }
197
195
  break;
198
196
  }
@@ -205,17 +203,17 @@ function convertToOpenAIChatMessages({
205
203
  return { messages, warnings };
206
204
  }
207
205
 
208
- // src/map-openai-chat-logprobs.ts
209
- function mapOpenAIChatLogProbsOutput(logprobs) {
210
- var _a, _b;
211
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
212
- token,
213
- logprob,
214
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
215
- token: token2,
216
- logprob: logprob2
217
- })) : []
218
- }))) != null ? _b : void 0;
206
+ // src/get-response-metadata.ts
207
+ function getResponseMetadata({
208
+ id,
209
+ model,
210
+ created
211
+ }) {
212
+ return {
213
+ id: id != null ? id : void 0,
214
+ modelId: model != null ? model : void 0,
215
+ timestamp: created != null ? new Date(created * 1e3) : void 0
216
+ };
219
217
  }
220
218
 
221
219
  // src/map-openai-finish-reason.ts
@@ -235,18 +233,59 @@ function mapOpenAIFinishReason(finishReason) {
235
233
  }
236
234
  }
237
235
 
238
- // src/openai-error.ts
236
+ // src/openai-chat-options.ts
239
237
  var import_zod = require("zod");
238
+ var openaiProviderOptions = import_zod.z.object({
239
+ /**
240
+ * Modify the likelihood of specified tokens appearing in the completion.
241
+ *
242
+ * Accepts a JSON object that maps tokens (specified by their token ID in
243
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
244
+ */
245
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
246
+ /**
247
+ * Whether to enable parallel function calling during tool use. Default to true.
248
+ */
249
+ parallelToolCalls: import_zod.z.boolean().optional(),
250
+ /**
251
+ * A unique identifier representing your end-user, which can help OpenAI to
252
+ * monitor and detect abuse.
253
+ */
254
+ user: import_zod.z.string().optional(),
255
+ /**
256
+ * Reasoning effort for reasoning models. Defaults to `medium`.
257
+ */
258
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
259
+ /**
260
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
261
+ */
262
+ maxCompletionTokens: import_zod.z.number().optional(),
263
+ /**
264
+ * Whether to enable persistence in responses API.
265
+ */
266
+ store: import_zod.z.boolean().optional(),
267
+ /**
268
+ * Metadata to associate with the request.
269
+ */
270
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
271
+ /**
272
+ * Parameters for prediction mode.
273
+ */
274
+ prediction: import_zod.z.record(import_zod.z.any()).optional()
275
+ });
276
+
277
+ // src/openai-error.ts
278
+ var import_zod2 = require("zod");
240
279
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
241
- var openaiErrorDataSchema = import_zod.z.object({
242
- error: import_zod.z.object({
243
- message: import_zod.z.string(),
280
+ var openaiErrorDataSchema = import_zod2.z.object({
281
+ error: import_zod2.z.object({
282
+ message: import_zod2.z.string(),
244
283
  // The additional information below is handled loosely to support
245
284
  // OpenAI-compatible providers that have slightly different error
246
285
  // responses:
247
- type: import_zod.z.string().nullish(),
248
- param: import_zod.z.any().nullish(),
249
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
286
+ type: import_zod2.z.string().nullish(),
287
+ param: import_zod2.z.any().nullish(),
288
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
250
289
  })
251
290
  });
252
291
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -254,74 +293,17 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
254
293
  errorToMessage: (data) => data.error.message
255
294
  });
256
295
 
257
- // src/get-response-metadata.ts
258
- function getResponseMetadata({
259
- id,
260
- model,
261
- created
262
- }) {
263
- return {
264
- id: id != null ? id : void 0,
265
- modelId: model != null ? model : void 0,
266
- timestamp: created != null ? new Date(created * 1e3) : void 0
267
- };
268
- }
269
-
270
296
  // src/openai-prepare-tools.ts
271
297
  var import_provider2 = require("@ai-sdk/provider");
272
298
  function prepareTools({
273
- mode,
274
- useLegacyFunctionCalling = false,
299
+ tools,
300
+ toolChoice,
275
301
  structuredOutputs
276
302
  }) {
277
- var _a;
278
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
303
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
279
304
  const toolWarnings = [];
280
305
  if (tools == null) {
281
- return { tools: void 0, tool_choice: void 0, toolWarnings };
282
- }
283
- const toolChoice = mode.toolChoice;
284
- if (useLegacyFunctionCalling) {
285
- const openaiFunctions = [];
286
- for (const tool of tools) {
287
- if (tool.type === "provider-defined") {
288
- toolWarnings.push({ type: "unsupported-tool", tool });
289
- } else {
290
- openaiFunctions.push({
291
- name: tool.name,
292
- description: tool.description,
293
- parameters: tool.parameters
294
- });
295
- }
296
- }
297
- if (toolChoice == null) {
298
- return {
299
- functions: openaiFunctions,
300
- function_call: void 0,
301
- toolWarnings
302
- };
303
- }
304
- const type2 = toolChoice.type;
305
- switch (type2) {
306
- case "auto":
307
- case "none":
308
- case void 0:
309
- return {
310
- functions: openaiFunctions,
311
- function_call: void 0,
312
- toolWarnings
313
- };
314
- case "required":
315
- throw new import_provider2.UnsupportedFunctionalityError({
316
- functionality: "useLegacyFunctionCalling and toolChoice: required"
317
- });
318
- default:
319
- return {
320
- functions: openaiFunctions,
321
- function_call: { name: toolChoice.toolName },
322
- toolWarnings
323
- };
324
- }
306
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
325
307
  }
326
308
  const openaiTools = [];
327
309
  for (const tool of tools) {
@@ -340,18 +322,18 @@ function prepareTools({
340
322
  }
341
323
  }
342
324
  if (toolChoice == null) {
343
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
325
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
344
326
  }
345
327
  const type = toolChoice.type;
346
328
  switch (type) {
347
329
  case "auto":
348
330
  case "none":
349
331
  case "required":
350
- return { tools: openaiTools, tool_choice: type, toolWarnings };
332
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
351
333
  case "tool":
352
334
  return {
353
335
  tools: openaiTools,
354
- tool_choice: {
336
+ toolChoice: {
355
337
  type: "function",
356
338
  function: {
357
339
  name: toolChoice.toolName
@@ -362,7 +344,7 @@ function prepareTools({
362
344
  default: {
363
345
  const _exhaustiveCheck = type;
364
346
  throw new import_provider2.UnsupportedFunctionalityError({
365
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
347
+ functionality: `tool choice type: ${_exhaustiveCheck}`
366
348
  });
367
349
  }
368
350
  }
@@ -376,26 +358,17 @@ var OpenAIChatLanguageModel = class {
376
358
  this.settings = settings;
377
359
  this.config = config;
378
360
  }
379
- get supportsStructuredOutputs() {
380
- var _a;
381
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
382
- }
383
- get defaultObjectGenerationMode() {
384
- if (isAudioModel(this.modelId)) {
385
- return "tool";
386
- }
387
- return this.supportsStructuredOutputs ? "json" : "tool";
388
- }
389
361
  get provider() {
390
362
  return this.config.provider;
391
363
  }
392
- get supportsImageUrls() {
393
- return !this.settings.downloadImages;
364
+ async getSupportedUrls() {
365
+ return {
366
+ "image/*": [/^https?:\/\/.*$/]
367
+ };
394
368
  }
395
369
  getArgs({
396
- mode,
397
370
  prompt,
398
- maxTokens,
371
+ maxOutputTokens,
399
372
  temperature,
400
373
  topP,
401
374
  topK,
@@ -404,39 +377,33 @@ var OpenAIChatLanguageModel = class {
404
377
  stopSequences,
405
378
  responseFormat,
406
379
  seed,
407
- providerMetadata
380
+ tools,
381
+ toolChoice,
382
+ providerOptions
408
383
  }) {
409
- var _a, _b, _c, _d, _e, _f, _g, _h;
410
- const type = mode.type;
384
+ var _a, _b, _c;
411
385
  const warnings = [];
386
+ const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
387
+ provider: "openai",
388
+ providerOptions,
389
+ schema: openaiProviderOptions
390
+ })) != null ? _a : {};
412
391
  if (topK != null) {
413
392
  warnings.push({
414
393
  type: "unsupported-setting",
415
394
  setting: "topK"
416
395
  });
417
396
  }
418
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
397
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
419
398
  warnings.push({
420
399
  type: "unsupported-setting",
421
400
  setting: "responseFormat",
422
401
  details: "JSON response format schema is only supported with structuredOutputs"
423
402
  });
424
403
  }
425
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
426
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
427
- throw new import_provider3.UnsupportedFunctionalityError({
428
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
429
- });
430
- }
431
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
432
- throw new import_provider3.UnsupportedFunctionalityError({
433
- functionality: "structuredOutputs with useLegacyFunctionCalling"
434
- });
435
- }
436
404
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
437
405
  {
438
406
  prompt,
439
- useLegacyFunctionCalling,
440
407
  systemMessageMode: getSystemMessageMode(this.modelId)
441
408
  }
442
409
  );
@@ -445,35 +412,36 @@ var OpenAIChatLanguageModel = class {
445
412
  // model id:
446
413
  model: this.modelId,
447
414
  // model specific settings:
448
- logit_bias: this.settings.logitBias,
449
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
450
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
451
- user: this.settings.user,
452
- parallel_tool_calls: this.settings.parallelToolCalls,
415
+ logit_bias: openaiOptions.logitBias,
416
+ user: openaiOptions.user,
417
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
453
418
  // standardized settings:
454
- max_tokens: maxTokens,
419
+ max_tokens: maxOutputTokens,
455
420
  temperature,
456
421
  top_p: topP,
457
422
  frequency_penalty: frequencyPenalty,
458
423
  presence_penalty: presencePenalty,
459
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
460
- type: "json_schema",
461
- json_schema: {
462
- schema: responseFormat.schema,
463
- strict: true,
464
- name: (_a = responseFormat.name) != null ? _a : "response",
465
- description: responseFormat.description
466
- }
467
- } : { type: "json_object" } : void 0,
424
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
425
+ // TODO convert into provider option
426
+ this.settings.structuredOutputs && responseFormat.schema != null ? {
427
+ type: "json_schema",
428
+ json_schema: {
429
+ schema: responseFormat.schema,
430
+ strict: true,
431
+ name: (_b = responseFormat.name) != null ? _b : "response",
432
+ description: responseFormat.description
433
+ }
434
+ } : { type: "json_object" }
435
+ ) : void 0,
468
436
  stop: stopSequences,
469
437
  seed,
470
438
  // openai specific settings:
471
- // TODO remove in next major version; we auto-map maxTokens now
472
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
473
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
474
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
475
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
476
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
439
+ // TODO remove in next major version; we auto-map maxOutputTokens now
440
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
441
+ store: openaiOptions.store,
442
+ metadata: openaiOptions.metadata,
443
+ prediction: openaiOptions.prediction,
444
+ reasoning_effort: openaiOptions.reasoningEffort,
477
445
  // messages:
478
446
  messages
479
447
  };
@@ -517,102 +485,39 @@ var OpenAIChatLanguageModel = class {
517
485
  message: "logitBias is not supported for reasoning models"
518
486
  });
519
487
  }
520
- if (baseArgs.logprobs != null) {
521
- baseArgs.logprobs = void 0;
522
- warnings.push({
523
- type: "other",
524
- message: "logprobs is not supported for reasoning models"
525
- });
526
- }
527
- if (baseArgs.top_logprobs != null) {
528
- baseArgs.top_logprobs = void 0;
529
- warnings.push({
530
- type: "other",
531
- message: "topLogprobs is not supported for reasoning models"
532
- });
533
- }
534
488
  if (baseArgs.max_tokens != null) {
535
489
  if (baseArgs.max_completion_tokens == null) {
536
490
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
537
491
  }
538
492
  baseArgs.max_tokens = void 0;
539
493
  }
540
- }
541
- switch (type) {
542
- case "regular": {
543
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
544
- mode,
545
- useLegacyFunctionCalling,
546
- structuredOutputs: this.supportsStructuredOutputs
494
+ } else if (this.modelId.startsWith("gpt-4o-search-preview") || this.modelId.startsWith("gpt-4o-mini-search-preview")) {
495
+ if (baseArgs.temperature != null) {
496
+ baseArgs.temperature = void 0;
497
+ warnings.push({
498
+ type: "unsupported-setting",
499
+ setting: "temperature",
500
+ details: "temperature is not supported for the search preview models and has been removed."
547
501
  });
548
- return {
549
- args: {
550
- ...baseArgs,
551
- tools,
552
- tool_choice,
553
- functions,
554
- function_call
555
- },
556
- warnings: [...warnings, ...toolWarnings]
557
- };
558
- }
559
- case "object-json": {
560
- return {
561
- args: {
562
- ...baseArgs,
563
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
564
- type: "json_schema",
565
- json_schema: {
566
- schema: mode.schema,
567
- strict: true,
568
- name: (_h = mode.name) != null ? _h : "response",
569
- description: mode.description
570
- }
571
- } : { type: "json_object" }
572
- },
573
- warnings
574
- };
575
- }
576
- case "object-tool": {
577
- return {
578
- args: useLegacyFunctionCalling ? {
579
- ...baseArgs,
580
- function_call: {
581
- name: mode.tool.name
582
- },
583
- functions: [
584
- {
585
- name: mode.tool.name,
586
- description: mode.tool.description,
587
- parameters: mode.tool.parameters
588
- }
589
- ]
590
- } : {
591
- ...baseArgs,
592
- tool_choice: {
593
- type: "function",
594
- function: { name: mode.tool.name }
595
- },
596
- tools: [
597
- {
598
- type: "function",
599
- function: {
600
- name: mode.tool.name,
601
- description: mode.tool.description,
602
- parameters: mode.tool.parameters,
603
- strict: this.supportsStructuredOutputs ? true : void 0
604
- }
605
- }
606
- ]
607
- },
608
- warnings
609
- };
610
- }
611
- default: {
612
- const _exhaustiveCheck = type;
613
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
614
502
  }
615
503
  }
504
+ const {
505
+ tools: openaiTools,
506
+ toolChoice: openaiToolChoice,
507
+ toolWarnings
508
+ } = prepareTools({
509
+ tools,
510
+ toolChoice,
511
+ structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
512
+ });
513
+ return {
514
+ args: {
515
+ ...baseArgs,
516
+ tools: openaiTools,
517
+ tool_choice: openaiToolChoice
518
+ },
519
+ warnings: [...warnings, ...toolWarnings]
520
+ };
616
521
  }
617
522
  async doGenerate(options) {
618
523
  var _a, _b, _c, _d, _e, _f, _g, _h;
@@ -635,10 +540,23 @@ var OpenAIChatLanguageModel = class {
635
540
  abortSignal: options.abortSignal,
636
541
  fetch: this.config.fetch
637
542
  });
638
- const { messages: rawPrompt, ...rawSettings } = body;
639
543
  const choice = response.choices[0];
640
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
641
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
544
+ const content = [];
545
+ const text = choice.message.content;
546
+ if (text != null && text.length > 0) {
547
+ content.push({ type: "text", text });
548
+ }
549
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
550
+ content.push({
551
+ type: "tool-call",
552
+ toolCallType: "function",
553
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
554
+ toolName: toolCall.function.name,
555
+ args: toolCall.function.arguments
556
+ });
557
+ }
558
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
559
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
642
560
  const providerMetadata = { openai: {} };
643
561
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
644
562
  providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
@@ -653,81 +571,23 @@ var OpenAIChatLanguageModel = class {
653
571
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
654
572
  }
655
573
  return {
656
- text: (_c = choice.message.content) != null ? _c : void 0,
657
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
658
- {
659
- toolCallType: "function",
660
- toolCallId: (0, import_provider_utils3.generateId)(),
661
- toolName: choice.message.function_call.name,
662
- args: choice.message.function_call.arguments
663
- }
664
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
665
- var _a2;
666
- return {
667
- toolCallType: "function",
668
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
669
- toolName: toolCall.function.name,
670
- args: toolCall.function.arguments
671
- };
672
- }),
574
+ content,
673
575
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
674
576
  usage: {
675
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
676
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
577
+ inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
578
+ outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
579
+ },
580
+ request: { body },
581
+ response: {
582
+ ...getResponseMetadata(response),
583
+ headers: responseHeaders,
584
+ body: rawResponse
677
585
  },
678
- rawCall: { rawPrompt, rawSettings },
679
- rawResponse: { headers: responseHeaders, body: rawResponse },
680
- request: { body: JSON.stringify(body) },
681
- response: getResponseMetadata(response),
682
586
  warnings,
683
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
684
587
  providerMetadata
685
588
  };
686
589
  }
687
590
  async doStream(options) {
688
- if (this.settings.simulateStreaming) {
689
- const result = await this.doGenerate(options);
690
- const simulatedStream = new ReadableStream({
691
- start(controller) {
692
- controller.enqueue({ type: "response-metadata", ...result.response });
693
- if (result.text) {
694
- controller.enqueue({
695
- type: "text-delta",
696
- textDelta: result.text
697
- });
698
- }
699
- if (result.toolCalls) {
700
- for (const toolCall of result.toolCalls) {
701
- controller.enqueue({
702
- type: "tool-call-delta",
703
- toolCallType: "function",
704
- toolCallId: toolCall.toolCallId,
705
- toolName: toolCall.toolName,
706
- argsTextDelta: toolCall.args
707
- });
708
- controller.enqueue({
709
- type: "tool-call",
710
- ...toolCall
711
- });
712
- }
713
- }
714
- controller.enqueue({
715
- type: "finish",
716
- finishReason: result.finishReason,
717
- usage: result.usage,
718
- logprobs: result.logprobs,
719
- providerMetadata: result.providerMetadata
720
- });
721
- controller.close();
722
- }
723
- });
724
- return {
725
- stream: simulatedStream,
726
- rawCall: result.rawCall,
727
- rawResponse: result.rawResponse,
728
- warnings: result.warnings
729
- };
730
- }
731
591
  const { args, warnings } = this.getArgs(options);
732
592
  const body = {
733
593
  ...args,
@@ -752,17 +612,18 @@ var OpenAIChatLanguageModel = class {
752
612
  const { messages: rawPrompt, ...rawSettings } = args;
753
613
  const toolCalls = [];
754
614
  let finishReason = "unknown";
755
- let usage = {
756
- promptTokens: void 0,
757
- completionTokens: void 0
615
+ const usage = {
616
+ inputTokens: void 0,
617
+ outputTokens: void 0
758
618
  };
759
- let logprobs;
760
619
  let isFirstChunk = true;
761
- const { useLegacyFunctionCalling } = this.settings;
762
620
  const providerMetadata = { openai: {} };
763
621
  return {
764
622
  stream: response.pipeThrough(
765
623
  new TransformStream({
624
+ start(controller) {
625
+ controller.enqueue({ type: "stream-start", warnings });
626
+ },
766
627
  transform(chunk, controller) {
767
628
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
768
629
  if (!chunk.success) {
@@ -790,10 +651,8 @@ var OpenAIChatLanguageModel = class {
790
651
  prompt_tokens_details,
791
652
  completion_tokens_details
792
653
  } = value.usage;
793
- usage = {
794
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
795
- completionTokens: completion_tokens != null ? completion_tokens : void 0
796
- };
654
+ usage.inputTokens = prompt_tokens != null ? prompt_tokens : void 0;
655
+ usage.outputTokens = completion_tokens != null ? completion_tokens : void 0;
797
656
  if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
798
657
  providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
799
658
  }
@@ -817,27 +676,12 @@ var OpenAIChatLanguageModel = class {
817
676
  const delta = choice.delta;
818
677
  if (delta.content != null) {
819
678
  controller.enqueue({
820
- type: "text-delta",
821
- textDelta: delta.content
679
+ type: "text",
680
+ text: delta.content
822
681
  });
823
682
  }
824
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
825
- choice == null ? void 0 : choice.logprobs
826
- );
827
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
828
- if (logprobs === void 0) logprobs = [];
829
- logprobs.push(...mappedLogprobs);
830
- }
831
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
832
- {
833
- type: "function",
834
- id: (0, import_provider_utils3.generateId)(),
835
- function: delta.function_call,
836
- index: 0
837
- }
838
- ] : delta.tool_calls;
839
- if (mappedToolCalls != null) {
840
- for (const toolCallDelta of mappedToolCalls) {
683
+ if (delta.tool_calls != null) {
684
+ for (const toolCallDelta of delta.tool_calls) {
841
685
  const index = toolCallDelta.index;
842
686
  if (toolCalls[index] == null) {
843
687
  if (toolCallDelta.type !== "function") {
@@ -919,125 +763,82 @@ var OpenAIChatLanguageModel = class {
919
763
  }
920
764
  },
921
765
  flush(controller) {
922
- var _a, _b;
923
766
  controller.enqueue({
924
767
  type: "finish",
925
768
  finishReason,
926
- logprobs,
927
- usage: {
928
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
929
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
930
- },
769
+ usage,
931
770
  ...providerMetadata != null ? { providerMetadata } : {}
932
771
  });
933
772
  }
934
773
  })
935
774
  ),
936
- rawCall: { rawPrompt, rawSettings },
937
- rawResponse: { headers: responseHeaders },
938
- request: { body: JSON.stringify(body) },
939
- warnings
775
+ request: { body },
776
+ response: { headers: responseHeaders }
940
777
  };
941
778
  }
942
779
  };
943
- var openaiTokenUsageSchema = import_zod2.z.object({
944
- prompt_tokens: import_zod2.z.number().nullish(),
945
- completion_tokens: import_zod2.z.number().nullish(),
946
- prompt_tokens_details: import_zod2.z.object({
947
- cached_tokens: import_zod2.z.number().nullish()
780
+ var openaiTokenUsageSchema = import_zod3.z.object({
781
+ prompt_tokens: import_zod3.z.number().nullish(),
782
+ completion_tokens: import_zod3.z.number().nullish(),
783
+ prompt_tokens_details: import_zod3.z.object({
784
+ cached_tokens: import_zod3.z.number().nullish()
948
785
  }).nullish(),
949
- completion_tokens_details: import_zod2.z.object({
950
- reasoning_tokens: import_zod2.z.number().nullish(),
951
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
952
- rejected_prediction_tokens: import_zod2.z.number().nullish()
786
+ completion_tokens_details: import_zod3.z.object({
787
+ reasoning_tokens: import_zod3.z.number().nullish(),
788
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
789
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
953
790
  }).nullish()
954
791
  }).nullish();
955
- var openaiChatResponseSchema = import_zod2.z.object({
956
- id: import_zod2.z.string().nullish(),
957
- created: import_zod2.z.number().nullish(),
958
- model: import_zod2.z.string().nullish(),
959
- choices: import_zod2.z.array(
960
- import_zod2.z.object({
961
- message: import_zod2.z.object({
962
- role: import_zod2.z.literal("assistant").nullish(),
963
- content: import_zod2.z.string().nullish(),
964
- function_call: import_zod2.z.object({
965
- arguments: import_zod2.z.string(),
966
- name: import_zod2.z.string()
967
- }).nullish(),
968
- tool_calls: import_zod2.z.array(
969
- import_zod2.z.object({
970
- id: import_zod2.z.string().nullish(),
971
- type: import_zod2.z.literal("function"),
972
- function: import_zod2.z.object({
973
- name: import_zod2.z.string(),
974
- arguments: import_zod2.z.string()
792
+ var openaiChatResponseSchema = import_zod3.z.object({
793
+ id: import_zod3.z.string().nullish(),
794
+ created: import_zod3.z.number().nullish(),
795
+ model: import_zod3.z.string().nullish(),
796
+ choices: import_zod3.z.array(
797
+ import_zod3.z.object({
798
+ message: import_zod3.z.object({
799
+ role: import_zod3.z.literal("assistant").nullish(),
800
+ content: import_zod3.z.string().nullish(),
801
+ tool_calls: import_zod3.z.array(
802
+ import_zod3.z.object({
803
+ id: import_zod3.z.string().nullish(),
804
+ type: import_zod3.z.literal("function"),
805
+ function: import_zod3.z.object({
806
+ name: import_zod3.z.string(),
807
+ arguments: import_zod3.z.string()
975
808
  })
976
809
  })
977
810
  ).nullish()
978
811
  }),
979
- index: import_zod2.z.number(),
980
- logprobs: import_zod2.z.object({
981
- content: import_zod2.z.array(
982
- import_zod2.z.object({
983
- token: import_zod2.z.string(),
984
- logprob: import_zod2.z.number(),
985
- top_logprobs: import_zod2.z.array(
986
- import_zod2.z.object({
987
- token: import_zod2.z.string(),
988
- logprob: import_zod2.z.number()
989
- })
990
- )
991
- })
992
- ).nullable()
993
- }).nullish(),
994
- finish_reason: import_zod2.z.string().nullish()
812
+ index: import_zod3.z.number(),
813
+ finish_reason: import_zod3.z.string().nullish()
995
814
  })
996
815
  ),
997
816
  usage: openaiTokenUsageSchema
998
817
  });
999
- var openaiChatChunkSchema = import_zod2.z.union([
1000
- import_zod2.z.object({
1001
- id: import_zod2.z.string().nullish(),
1002
- created: import_zod2.z.number().nullish(),
1003
- model: import_zod2.z.string().nullish(),
1004
- choices: import_zod2.z.array(
1005
- import_zod2.z.object({
1006
- delta: import_zod2.z.object({
1007
- role: import_zod2.z.enum(["assistant"]).nullish(),
1008
- content: import_zod2.z.string().nullish(),
1009
- function_call: import_zod2.z.object({
1010
- name: import_zod2.z.string().optional(),
1011
- arguments: import_zod2.z.string().optional()
1012
- }).nullish(),
1013
- tool_calls: import_zod2.z.array(
1014
- import_zod2.z.object({
1015
- index: import_zod2.z.number(),
1016
- id: import_zod2.z.string().nullish(),
1017
- type: import_zod2.z.literal("function").optional(),
1018
- function: import_zod2.z.object({
1019
- name: import_zod2.z.string().nullish(),
1020
- arguments: import_zod2.z.string().nullish()
818
+ var openaiChatChunkSchema = import_zod3.z.union([
819
+ import_zod3.z.object({
820
+ id: import_zod3.z.string().nullish(),
821
+ created: import_zod3.z.number().nullish(),
822
+ model: import_zod3.z.string().nullish(),
823
+ choices: import_zod3.z.array(
824
+ import_zod3.z.object({
825
+ delta: import_zod3.z.object({
826
+ role: import_zod3.z.enum(["assistant"]).nullish(),
827
+ content: import_zod3.z.string().nullish(),
828
+ tool_calls: import_zod3.z.array(
829
+ import_zod3.z.object({
830
+ index: import_zod3.z.number(),
831
+ id: import_zod3.z.string().nullish(),
832
+ type: import_zod3.z.literal("function").optional(),
833
+ function: import_zod3.z.object({
834
+ name: import_zod3.z.string().nullish(),
835
+ arguments: import_zod3.z.string().nullish()
1021
836
  })
1022
837
  })
1023
838
  ).nullish()
1024
839
  }).nullish(),
1025
- logprobs: import_zod2.z.object({
1026
- content: import_zod2.z.array(
1027
- import_zod2.z.object({
1028
- token: import_zod2.z.string(),
1029
- logprob: import_zod2.z.number(),
1030
- top_logprobs: import_zod2.z.array(
1031
- import_zod2.z.object({
1032
- token: import_zod2.z.string(),
1033
- logprob: import_zod2.z.number()
1034
- })
1035
- )
1036
- })
1037
- ).nullable()
1038
- }).nullish(),
1039
- finish_reason: import_zod2.z.string().nullable().optional(),
1040
- index: import_zod2.z.number()
840
+ finish_reason: import_zod3.z.string().nullable().optional(),
841
+ index: import_zod3.z.number()
1041
842
  })
1042
843
  ),
1043
844
  usage: openaiTokenUsageSchema
@@ -1045,10 +846,7 @@ var openaiChatChunkSchema = import_zod2.z.union([
1045
846
  openaiErrorDataSchema
1046
847
  ]);
1047
848
  function isReasoningModel(modelId) {
1048
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-");
1049
- }
1050
- function isAudioModel(modelId) {
1051
- return modelId.startsWith("gpt-4o-audio-preview");
849
+ return modelId.startsWith("o");
1052
850
  }
1053
851
  function getSystemMessageMode(modelId) {
1054
852
  var _a, _b;
@@ -1079,9 +877,8 @@ var reasoningModels = {
1079
877
  };
1080
878
 
1081
879
  // src/openai-completion-language-model.ts
1082
- var import_provider5 = require("@ai-sdk/provider");
1083
880
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1084
- var import_zod3 = require("zod");
881
+ var import_zod4 = require("zod");
1085
882
 
1086
883
  // src/convert-to-openai-completion-prompt.ts
1087
884
  var import_provider4 = require("@ai-sdk/provider");
@@ -1115,13 +912,8 @@ function convertToOpenAICompletionPrompt({
1115
912
  case "text": {
1116
913
  return part.text;
1117
914
  }
1118
- case "image": {
1119
- throw new import_provider4.UnsupportedFunctionalityError({
1120
- functionality: "images"
1121
- });
1122
- }
1123
915
  }
1124
- }).join("");
916
+ }).filter(Boolean).join("");
1125
917
  text += `${user}:
1126
918
  ${userMessage}
1127
919
 
@@ -1167,25 +959,10 @@ ${user}:`]
1167
959
  };
1168
960
  }
1169
961
 
1170
- // src/map-openai-completion-logprobs.ts
1171
- function mapOpenAICompletionLogProbs(logprobs) {
1172
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1173
- token,
1174
- logprob: logprobs.token_logprobs[index],
1175
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1176
- ([token2, logprob]) => ({
1177
- token: token2,
1178
- logprob
1179
- })
1180
- ) : []
1181
- }));
1182
- }
1183
-
1184
962
  // src/openai-completion-language-model.ts
1185
963
  var OpenAICompletionLanguageModel = class {
1186
964
  constructor(modelId, settings, config) {
1187
965
  this.specificationVersion = "v2";
1188
- this.defaultObjectGenerationMode = void 0;
1189
966
  this.modelId = modelId;
1190
967
  this.settings = settings;
1191
968
  this.config = config;
@@ -1193,11 +970,15 @@ var OpenAICompletionLanguageModel = class {
1193
970
  get provider() {
1194
971
  return this.config.provider;
1195
972
  }
973
+ async getSupportedUrls() {
974
+ return {
975
+ // no supported urls for completion models
976
+ };
977
+ }
1196
978
  getArgs({
1197
- mode,
1198
979
  inputFormat,
1199
980
  prompt,
1200
- maxTokens,
981
+ maxOutputTokens,
1201
982
  temperature,
1202
983
  topP,
1203
984
  topK,
@@ -1205,16 +986,19 @@ var OpenAICompletionLanguageModel = class {
1205
986
  presencePenalty,
1206
987
  stopSequences: userStopSequences,
1207
988
  responseFormat,
989
+ tools,
990
+ toolChoice,
1208
991
  seed
1209
992
  }) {
1210
- var _a;
1211
- const type = mode.type;
1212
993
  const warnings = [];
1213
994
  if (topK != null) {
1214
- warnings.push({
1215
- type: "unsupported-setting",
1216
- setting: "topK"
1217
- });
995
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
996
+ }
997
+ if (tools == null ? void 0 : tools.length) {
998
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
999
+ }
1000
+ if (toolChoice != null) {
1001
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1218
1002
  }
1219
1003
  if (responseFormat != null && responseFormat.type !== "text") {
1220
1004
  warnings.push({
@@ -1225,56 +1009,29 @@ var OpenAICompletionLanguageModel = class {
1225
1009
  }
1226
1010
  const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1227
1011
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1228
- const baseArgs = {
1229
- // model id:
1230
- model: this.modelId,
1231
- // model specific settings:
1232
- echo: this.settings.echo,
1233
- logit_bias: this.settings.logitBias,
1234
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1235
- suffix: this.settings.suffix,
1236
- user: this.settings.user,
1237
- // standardized settings:
1238
- max_tokens: maxTokens,
1239
- temperature,
1240
- top_p: topP,
1241
- frequency_penalty: frequencyPenalty,
1242
- presence_penalty: presencePenalty,
1243
- seed,
1244
- // prompt:
1245
- prompt: completionPrompt,
1246
- // stop sequences:
1247
- stop: stop.length > 0 ? stop : void 0
1012
+ return {
1013
+ args: {
1014
+ // model id:
1015
+ model: this.modelId,
1016
+ // model specific settings:
1017
+ echo: this.settings.echo,
1018
+ logit_bias: this.settings.logitBias,
1019
+ suffix: this.settings.suffix,
1020
+ user: this.settings.user,
1021
+ // standardized settings:
1022
+ max_tokens: maxOutputTokens,
1023
+ temperature,
1024
+ top_p: topP,
1025
+ frequency_penalty: frequencyPenalty,
1026
+ presence_penalty: presencePenalty,
1027
+ seed,
1028
+ // prompt:
1029
+ prompt: completionPrompt,
1030
+ // stop sequences:
1031
+ stop: stop.length > 0 ? stop : void 0
1032
+ },
1033
+ warnings
1248
1034
  };
1249
- switch (type) {
1250
- case "regular": {
1251
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1252
- throw new import_provider5.UnsupportedFunctionalityError({
1253
- functionality: "tools"
1254
- });
1255
- }
1256
- if (mode.toolChoice) {
1257
- throw new import_provider5.UnsupportedFunctionalityError({
1258
- functionality: "toolChoice"
1259
- });
1260
- }
1261
- return { args: baseArgs, warnings };
1262
- }
1263
- case "object-json": {
1264
- throw new import_provider5.UnsupportedFunctionalityError({
1265
- functionality: "object-json mode"
1266
- });
1267
- }
1268
- case "object-tool": {
1269
- throw new import_provider5.UnsupportedFunctionalityError({
1270
- functionality: "object-tool mode"
1271
- });
1272
- }
1273
- default: {
1274
- const _exhaustiveCheck = type;
1275
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1276
- }
1277
- }
1278
1035
  }
1279
1036
  async doGenerate(options) {
1280
1037
  const { args, warnings } = this.getArgs(options);
@@ -1296,21 +1053,21 @@ var OpenAICompletionLanguageModel = class {
1296
1053
  abortSignal: options.abortSignal,
1297
1054
  fetch: this.config.fetch
1298
1055
  });
1299
- const { prompt: rawPrompt, ...rawSettings } = args;
1300
1056
  const choice = response.choices[0];
1301
1057
  return {
1302
- text: choice.text,
1058
+ content: [{ type: "text", text: choice.text }],
1303
1059
  usage: {
1304
- promptTokens: response.usage.prompt_tokens,
1305
- completionTokens: response.usage.completion_tokens
1060
+ inputTokens: response.usage.prompt_tokens,
1061
+ outputTokens: response.usage.completion_tokens
1306
1062
  },
1307
1063
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1308
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1309
- rawCall: { rawPrompt, rawSettings },
1310
- rawResponse: { headers: responseHeaders, body: rawResponse },
1311
- response: getResponseMetadata(response),
1312
- warnings,
1313
- request: { body: JSON.stringify(args) }
1064
+ request: { body: args },
1065
+ response: {
1066
+ ...getResponseMetadata(response),
1067
+ headers: responseHeaders,
1068
+ body: rawResponse
1069
+ },
1070
+ warnings
1314
1071
  };
1315
1072
  }
1316
1073
  async doStream(options) {
@@ -1335,17 +1092,18 @@ var OpenAICompletionLanguageModel = class {
1335
1092
  abortSignal: options.abortSignal,
1336
1093
  fetch: this.config.fetch
1337
1094
  });
1338
- const { prompt: rawPrompt, ...rawSettings } = args;
1339
1095
  let finishReason = "unknown";
1340
- let usage = {
1341
- promptTokens: Number.NaN,
1342
- completionTokens: Number.NaN
1096
+ const usage = {
1097
+ inputTokens: void 0,
1098
+ outputTokens: void 0
1343
1099
  };
1344
- let logprobs;
1345
1100
  let isFirstChunk = true;
1346
1101
  return {
1347
1102
  stream: response.pipeThrough(
1348
1103
  new TransformStream({
1104
+ start(controller) {
1105
+ controller.enqueue({ type: "stream-start", warnings });
1106
+ },
1349
1107
  transform(chunk, controller) {
1350
1108
  if (!chunk.success) {
1351
1109
  finishReason = "error";
@@ -1366,10 +1124,8 @@ var OpenAICompletionLanguageModel = class {
1366
1124
  });
1367
1125
  }
1368
1126
  if (value.usage != null) {
1369
- usage = {
1370
- promptTokens: value.usage.prompt_tokens,
1371
- completionTokens: value.usage.completion_tokens
1372
- };
1127
+ usage.inputTokens = value.usage.prompt_tokens;
1128
+ usage.outputTokens = value.usage.completion_tokens;
1373
1129
  }
1374
1130
  const choice = value.choices[0];
1375
1131
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1377,87 +1133,84 @@ var OpenAICompletionLanguageModel = class {
1377
1133
  }
1378
1134
  if ((choice == null ? void 0 : choice.text) != null) {
1379
1135
  controller.enqueue({
1380
- type: "text-delta",
1381
- textDelta: choice.text
1136
+ type: "text",
1137
+ text: choice.text
1382
1138
  });
1383
1139
  }
1384
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1385
- choice == null ? void 0 : choice.logprobs
1386
- );
1387
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1388
- if (logprobs === void 0) logprobs = [];
1389
- logprobs.push(...mappedLogprobs);
1390
- }
1391
1140
  },
1392
1141
  flush(controller) {
1393
1142
  controller.enqueue({
1394
1143
  type: "finish",
1395
1144
  finishReason,
1396
- logprobs,
1397
1145
  usage
1398
1146
  });
1399
1147
  }
1400
1148
  })
1401
1149
  ),
1402
- rawCall: { rawPrompt, rawSettings },
1403
- rawResponse: { headers: responseHeaders },
1404
- warnings,
1405
- request: { body: JSON.stringify(body) }
1150
+ request: { body },
1151
+ response: { headers: responseHeaders }
1406
1152
  };
1407
1153
  }
1408
1154
  };
1409
- var openaiCompletionResponseSchema = import_zod3.z.object({
1410
- id: import_zod3.z.string().nullish(),
1411
- created: import_zod3.z.number().nullish(),
1412
- model: import_zod3.z.string().nullish(),
1413
- choices: import_zod3.z.array(
1414
- import_zod3.z.object({
1415
- text: import_zod3.z.string(),
1416
- finish_reason: import_zod3.z.string(),
1417
- logprobs: import_zod3.z.object({
1418
- tokens: import_zod3.z.array(import_zod3.z.string()),
1419
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1420
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1421
- }).nullish()
1155
+ var openaiCompletionResponseSchema = import_zod4.z.object({
1156
+ id: import_zod4.z.string().nullish(),
1157
+ created: import_zod4.z.number().nullish(),
1158
+ model: import_zod4.z.string().nullish(),
1159
+ choices: import_zod4.z.array(
1160
+ import_zod4.z.object({
1161
+ text: import_zod4.z.string(),
1162
+ finish_reason: import_zod4.z.string()
1422
1163
  })
1423
1164
  ),
1424
- usage: import_zod3.z.object({
1425
- prompt_tokens: import_zod3.z.number(),
1426
- completion_tokens: import_zod3.z.number()
1165
+ usage: import_zod4.z.object({
1166
+ prompt_tokens: import_zod4.z.number(),
1167
+ completion_tokens: import_zod4.z.number()
1427
1168
  })
1428
1169
  });
1429
- var openaiCompletionChunkSchema = import_zod3.z.union([
1430
- import_zod3.z.object({
1431
- id: import_zod3.z.string().nullish(),
1432
- created: import_zod3.z.number().nullish(),
1433
- model: import_zod3.z.string().nullish(),
1434
- choices: import_zod3.z.array(
1435
- import_zod3.z.object({
1436
- text: import_zod3.z.string(),
1437
- finish_reason: import_zod3.z.string().nullish(),
1438
- index: import_zod3.z.number(),
1439
- logprobs: import_zod3.z.object({
1440
- tokens: import_zod3.z.array(import_zod3.z.string()),
1441
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1442
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1443
- }).nullish()
1170
+ var openaiCompletionChunkSchema = import_zod4.z.union([
1171
+ import_zod4.z.object({
1172
+ id: import_zod4.z.string().nullish(),
1173
+ created: import_zod4.z.number().nullish(),
1174
+ model: import_zod4.z.string().nullish(),
1175
+ choices: import_zod4.z.array(
1176
+ import_zod4.z.object({
1177
+ text: import_zod4.z.string(),
1178
+ finish_reason: import_zod4.z.string().nullish(),
1179
+ index: import_zod4.z.number()
1444
1180
  })
1445
1181
  ),
1446
- usage: import_zod3.z.object({
1447
- prompt_tokens: import_zod3.z.number(),
1448
- completion_tokens: import_zod3.z.number()
1182
+ usage: import_zod4.z.object({
1183
+ prompt_tokens: import_zod4.z.number(),
1184
+ completion_tokens: import_zod4.z.number()
1449
1185
  }).nullish()
1450
1186
  }),
1451
1187
  openaiErrorDataSchema
1452
1188
  ]);
1453
1189
 
1454
1190
  // src/openai-embedding-model.ts
1455
- var import_provider6 = require("@ai-sdk/provider");
1191
+ var import_provider5 = require("@ai-sdk/provider");
1456
1192
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1457
- var import_zod4 = require("zod");
1193
+ var import_zod6 = require("zod");
1194
+
1195
+ // src/openai-embedding-options.ts
1196
+ var import_zod5 = require("zod");
1197
+ var openaiEmbeddingProviderOptions = import_zod5.z.object({
1198
+ /**
1199
+ The number of dimensions the resulting output embeddings should have.
1200
+ Only supported in text-embedding-3 and later models.
1201
+ */
1202
+ dimensions: import_zod5.z.number().optional(),
1203
+ /**
1204
+ A unique identifier representing your end-user, which can help OpenAI to
1205
+ monitor and detect abuse. Learn more.
1206
+ */
1207
+ user: import_zod5.z.string().optional()
1208
+ });
1209
+
1210
+ // src/openai-embedding-model.ts
1458
1211
  var OpenAIEmbeddingModel = class {
1459
1212
  constructor(modelId, settings, config) {
1460
- this.specificationVersion = "v1";
1213
+ this.specificationVersion = "v2";
1461
1214
  this.modelId = modelId;
1462
1215
  this.settings = settings;
1463
1216
  this.config = config;
@@ -1476,17 +1229,28 @@ var OpenAIEmbeddingModel = class {
1476
1229
  async doEmbed({
1477
1230
  values,
1478
1231
  headers,
1479
- abortSignal
1232
+ abortSignal,
1233
+ providerOptions
1480
1234
  }) {
1235
+ var _a;
1481
1236
  if (values.length > this.maxEmbeddingsPerCall) {
1482
- throw new import_provider6.TooManyEmbeddingValuesForCallError({
1237
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1483
1238
  provider: this.provider,
1484
1239
  modelId: this.modelId,
1485
1240
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1486
1241
  values
1487
1242
  });
1488
1243
  }
1489
- const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1244
+ const openaiOptions = (_a = (0, import_provider_utils5.parseProviderOptions)({
1245
+ provider: "openai",
1246
+ providerOptions,
1247
+ schema: openaiEmbeddingProviderOptions
1248
+ })) != null ? _a : {};
1249
+ const {
1250
+ responseHeaders,
1251
+ value: response,
1252
+ rawValue
1253
+ } = await (0, import_provider_utils5.postJsonToApi)({
1490
1254
  url: this.config.url({
1491
1255
  path: "/embeddings",
1492
1256
  modelId: this.modelId
@@ -1496,8 +1260,8 @@ var OpenAIEmbeddingModel = class {
1496
1260
  model: this.modelId,
1497
1261
  input: values,
1498
1262
  encoding_format: "float",
1499
- dimensions: this.settings.dimensions,
1500
- user: this.settings.user
1263
+ dimensions: openaiOptions.dimensions,
1264
+ user: openaiOptions.user
1501
1265
  },
1502
1266
  failedResponseHandler: openaiFailedResponseHandler,
1503
1267
  successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
@@ -1509,18 +1273,18 @@ var OpenAIEmbeddingModel = class {
1509
1273
  return {
1510
1274
  embeddings: response.data.map((item) => item.embedding),
1511
1275
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1512
- rawResponse: { headers: responseHeaders }
1276
+ response: { headers: responseHeaders, body: rawValue }
1513
1277
  };
1514
1278
  }
1515
1279
  };
1516
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1517
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1518
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1280
+ var openaiTextEmbeddingResponseSchema = import_zod6.z.object({
1281
+ data: import_zod6.z.array(import_zod6.z.object({ embedding: import_zod6.z.array(import_zod6.z.number()) })),
1282
+ usage: import_zod6.z.object({ prompt_tokens: import_zod6.z.number() }).nullish()
1519
1283
  });
1520
1284
 
1521
1285
  // src/openai-image-model.ts
1522
1286
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1523
- var import_zod5 = require("zod");
1287
+ var import_zod7 = require("zod");
1524
1288
 
1525
1289
  // src/openai-image-settings.ts
1526
1290
  var modelMaxImagesPerCall = {
@@ -1598,17 +1362,284 @@ var OpenAIImageModel = class {
1598
1362
  };
1599
1363
  }
1600
1364
  };
1601
- var openaiImageResponseSchema = import_zod5.z.object({
1602
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1365
+ var openaiImageResponseSchema = import_zod7.z.object({
1366
+ data: import_zod7.z.array(import_zod7.z.object({ b64_json: import_zod7.z.string() }))
1603
1367
  });
1604
1368
 
1605
- // src/responses/openai-responses-language-model.ts
1369
+ // src/openai-transcription-model.ts
1370
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
1371
+ var import_zod8 = require("zod");
1372
+ var openAIProviderOptionsSchema = import_zod8.z.object({
1373
+ include: import_zod8.z.array(import_zod8.z.string()).nullish(),
1374
+ language: import_zod8.z.string().nullish(),
1375
+ prompt: import_zod8.z.string().nullish(),
1376
+ temperature: import_zod8.z.number().min(0).max(1).nullish().default(0),
1377
+ timestampGranularities: import_zod8.z.array(import_zod8.z.enum(["word", "segment"])).nullish().default(["segment"])
1378
+ });
1379
+ var languageMap = {
1380
+ afrikaans: "af",
1381
+ arabic: "ar",
1382
+ armenian: "hy",
1383
+ azerbaijani: "az",
1384
+ belarusian: "be",
1385
+ bosnian: "bs",
1386
+ bulgarian: "bg",
1387
+ catalan: "ca",
1388
+ chinese: "zh",
1389
+ croatian: "hr",
1390
+ czech: "cs",
1391
+ danish: "da",
1392
+ dutch: "nl",
1393
+ english: "en",
1394
+ estonian: "et",
1395
+ finnish: "fi",
1396
+ french: "fr",
1397
+ galician: "gl",
1398
+ german: "de",
1399
+ greek: "el",
1400
+ hebrew: "he",
1401
+ hindi: "hi",
1402
+ hungarian: "hu",
1403
+ icelandic: "is",
1404
+ indonesian: "id",
1405
+ italian: "it",
1406
+ japanese: "ja",
1407
+ kannada: "kn",
1408
+ kazakh: "kk",
1409
+ korean: "ko",
1410
+ latvian: "lv",
1411
+ lithuanian: "lt",
1412
+ macedonian: "mk",
1413
+ malay: "ms",
1414
+ marathi: "mr",
1415
+ maori: "mi",
1416
+ nepali: "ne",
1417
+ norwegian: "no",
1418
+ persian: "fa",
1419
+ polish: "pl",
1420
+ portuguese: "pt",
1421
+ romanian: "ro",
1422
+ russian: "ru",
1423
+ serbian: "sr",
1424
+ slovak: "sk",
1425
+ slovenian: "sl",
1426
+ spanish: "es",
1427
+ swahili: "sw",
1428
+ swedish: "sv",
1429
+ tagalog: "tl",
1430
+ tamil: "ta",
1431
+ thai: "th",
1432
+ turkish: "tr",
1433
+ ukrainian: "uk",
1434
+ urdu: "ur",
1435
+ vietnamese: "vi",
1436
+ welsh: "cy"
1437
+ };
1438
+ var OpenAITranscriptionModel = class {
1439
+ constructor(modelId, config) {
1440
+ this.modelId = modelId;
1441
+ this.config = config;
1442
+ this.specificationVersion = "v1";
1443
+ }
1444
+ get provider() {
1445
+ return this.config.provider;
1446
+ }
1447
+ getArgs({
1448
+ audio,
1449
+ mediaType,
1450
+ providerOptions
1451
+ }) {
1452
+ var _a, _b, _c, _d, _e;
1453
+ const warnings = [];
1454
+ const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1455
+ provider: "openai",
1456
+ providerOptions,
1457
+ schema: openAIProviderOptionsSchema
1458
+ });
1459
+ const formData = new FormData();
1460
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
1461
+ formData.append("model", this.modelId);
1462
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
1463
+ if (openAIOptions) {
1464
+ const transcriptionModelOptions = {
1465
+ include: (_a = openAIOptions.include) != null ? _a : void 0,
1466
+ language: (_b = openAIOptions.language) != null ? _b : void 0,
1467
+ prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1468
+ temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1469
+ timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1470
+ };
1471
+ for (const key in transcriptionModelOptions) {
1472
+ const value = transcriptionModelOptions[key];
1473
+ if (value !== void 0) {
1474
+ formData.append(key, String(value));
1475
+ }
1476
+ }
1477
+ }
1478
+ return {
1479
+ formData,
1480
+ warnings
1481
+ };
1482
+ }
1483
+ async doGenerate(options) {
1484
+ var _a, _b, _c, _d, _e, _f;
1485
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1486
+ const { formData, warnings } = this.getArgs(options);
1487
+ const {
1488
+ value: response,
1489
+ responseHeaders,
1490
+ rawValue: rawResponse
1491
+ } = await (0, import_provider_utils7.postFormDataToApi)({
1492
+ url: this.config.url({
1493
+ path: "/audio/transcriptions",
1494
+ modelId: this.modelId
1495
+ }),
1496
+ headers: (0, import_provider_utils7.combineHeaders)(this.config.headers(), options.headers),
1497
+ formData,
1498
+ failedResponseHandler: openaiFailedResponseHandler,
1499
+ successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
1500
+ openaiTranscriptionResponseSchema
1501
+ ),
1502
+ abortSignal: options.abortSignal,
1503
+ fetch: this.config.fetch
1504
+ });
1505
+ const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
1506
+ return {
1507
+ text: response.text,
1508
+ segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
1509
+ text: word.word,
1510
+ startSecond: word.start,
1511
+ endSecond: word.end
1512
+ }))) != null ? _e : [],
1513
+ language,
1514
+ durationInSeconds: (_f = response.duration) != null ? _f : void 0,
1515
+ warnings,
1516
+ response: {
1517
+ timestamp: currentDate,
1518
+ modelId: this.modelId,
1519
+ headers: responseHeaders,
1520
+ body: rawResponse
1521
+ }
1522
+ };
1523
+ }
1524
+ };
1525
+ var openaiTranscriptionResponseSchema = import_zod8.z.object({
1526
+ text: import_zod8.z.string(),
1527
+ language: import_zod8.z.string().nullish(),
1528
+ duration: import_zod8.z.number().nullish(),
1529
+ words: import_zod8.z.array(
1530
+ import_zod8.z.object({
1531
+ word: import_zod8.z.string(),
1532
+ start: import_zod8.z.number(),
1533
+ end: import_zod8.z.number()
1534
+ })
1535
+ ).nullish()
1536
+ });
1537
+
1538
+ // src/openai-speech-model.ts
1606
1539
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1607
- var import_zod6 = require("zod");
1540
+ var import_zod9 = require("zod");
1541
+ var OpenAIProviderOptionsSchema = import_zod9.z.object({
1542
+ instructions: import_zod9.z.string().nullish(),
1543
+ speed: import_zod9.z.number().min(0.25).max(4).default(1).nullish()
1544
+ });
1545
+ var OpenAISpeechModel = class {
1546
+ constructor(modelId, config) {
1547
+ this.modelId = modelId;
1548
+ this.config = config;
1549
+ this.specificationVersion = "v1";
1550
+ }
1551
+ get provider() {
1552
+ return this.config.provider;
1553
+ }
1554
+ getArgs({
1555
+ text,
1556
+ voice = "alloy",
1557
+ outputFormat = "mp3",
1558
+ speed,
1559
+ instructions,
1560
+ providerOptions
1561
+ }) {
1562
+ const warnings = [];
1563
+ const openAIOptions = (0, import_provider_utils8.parseProviderOptions)({
1564
+ provider: "openai",
1565
+ providerOptions,
1566
+ schema: OpenAIProviderOptionsSchema
1567
+ });
1568
+ const requestBody = {
1569
+ model: this.modelId,
1570
+ input: text,
1571
+ voice,
1572
+ response_format: "mp3",
1573
+ speed,
1574
+ instructions
1575
+ };
1576
+ if (outputFormat) {
1577
+ if (["mp3", "opus", "aac", "flac", "wav", "pcm"].includes(outputFormat)) {
1578
+ requestBody.response_format = outputFormat;
1579
+ } else {
1580
+ warnings.push({
1581
+ type: "unsupported-setting",
1582
+ setting: "outputFormat",
1583
+ details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
1584
+ });
1585
+ }
1586
+ }
1587
+ if (openAIOptions) {
1588
+ const speechModelOptions = {};
1589
+ for (const key in speechModelOptions) {
1590
+ const value = speechModelOptions[key];
1591
+ if (value !== void 0) {
1592
+ requestBody[key] = value;
1593
+ }
1594
+ }
1595
+ }
1596
+ return {
1597
+ requestBody,
1598
+ warnings
1599
+ };
1600
+ }
1601
+ async doGenerate(options) {
1602
+ var _a, _b, _c;
1603
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1604
+ const { requestBody, warnings } = this.getArgs(options);
1605
+ const {
1606
+ value: audio,
1607
+ responseHeaders,
1608
+ rawValue: rawResponse
1609
+ } = await (0, import_provider_utils8.postJsonToApi)({
1610
+ url: this.config.url({
1611
+ path: "/audio/speech",
1612
+ modelId: this.modelId
1613
+ }),
1614
+ headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1615
+ body: requestBody,
1616
+ failedResponseHandler: openaiFailedResponseHandler,
1617
+ successfulResponseHandler: (0, import_provider_utils8.createBinaryResponseHandler)(),
1618
+ abortSignal: options.abortSignal,
1619
+ fetch: this.config.fetch
1620
+ });
1621
+ return {
1622
+ audio,
1623
+ warnings,
1624
+ request: {
1625
+ body: JSON.stringify(requestBody)
1626
+ },
1627
+ response: {
1628
+ timestamp: currentDate,
1629
+ modelId: this.modelId,
1630
+ headers: responseHeaders,
1631
+ body: rawResponse
1632
+ }
1633
+ };
1634
+ }
1635
+ };
1636
+
1637
+ // src/responses/openai-responses-language-model.ts
1638
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1639
+ var import_zod10 = require("zod");
1608
1640
 
1609
1641
  // src/responses/convert-to-openai-responses-messages.ts
1610
- var import_provider7 = require("@ai-sdk/provider");
1611
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1642
+ var import_provider6 = require("@ai-sdk/provider");
1612
1643
  function convertToOpenAIResponsesMessages({
1613
1644
  prompt,
1614
1645
  systemMessageMode
@@ -1647,38 +1678,35 @@ function convertToOpenAIResponsesMessages({
1647
1678
  messages.push({
1648
1679
  role: "user",
1649
1680
  content: content.map((part, index) => {
1650
- var _a, _b, _c, _d;
1681
+ var _a, _b, _c;
1651
1682
  switch (part.type) {
1652
1683
  case "text": {
1653
1684
  return { type: "input_text", text: part.text };
1654
1685
  }
1655
- case "image": {
1656
- return {
1657
- type: "input_image",
1658
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils7.convertUint8ArrayToBase64)(part.image)}`,
1659
- // OpenAI specific extension: image detail
1660
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1661
- };
1662
- }
1663
1686
  case "file": {
1664
- if (part.data instanceof URL) {
1665
- throw new import_provider7.UnsupportedFunctionalityError({
1666
- functionality: "File URLs in user messages"
1667
- });
1668
- }
1669
- switch (part.mimeType) {
1670
- case "application/pdf": {
1671
- return {
1672
- type: "input_file",
1673
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1674
- file_data: `data:application/pdf;base64,${part.data}`
1675
- };
1676
- }
1677
- default: {
1678
- throw new import_provider7.UnsupportedFunctionalityError({
1679
- functionality: "Only PDF files are supported in user messages"
1687
+ if (part.mediaType.startsWith("image/")) {
1688
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1689
+ return {
1690
+ type: "input_image",
1691
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1692
+ // OpenAI specific extension: image detail
1693
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1694
+ };
1695
+ } else if (part.mediaType === "application/pdf") {
1696
+ if (part.data instanceof URL) {
1697
+ throw new import_provider6.UnsupportedFunctionalityError({
1698
+ functionality: "PDF file parts with URLs"
1680
1699
  });
1681
1700
  }
1701
+ return {
1702
+ type: "input_file",
1703
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1704
+ file_data: `data:application/pdf;base64,${part.data}`
1705
+ };
1706
+ } else {
1707
+ throw new import_provider6.UnsupportedFunctionalityError({
1708
+ functionality: `file part media type ${part.mediaType}`
1709
+ });
1682
1710
  }
1683
1711
  }
1684
1712
  }
@@ -1747,18 +1775,17 @@ function mapOpenAIResponseFinishReason({
1747
1775
  }
1748
1776
 
1749
1777
  // src/responses/openai-responses-prepare-tools.ts
1750
- var import_provider8 = require("@ai-sdk/provider");
1778
+ var import_provider7 = require("@ai-sdk/provider");
1751
1779
  function prepareResponsesTools({
1752
- mode,
1780
+ tools,
1781
+ toolChoice,
1753
1782
  strict
1754
1783
  }) {
1755
- var _a;
1756
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1784
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1757
1785
  const toolWarnings = [];
1758
1786
  if (tools == null) {
1759
- return { tools: void 0, tool_choice: void 0, toolWarnings };
1787
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
1760
1788
  }
1761
- const toolChoice = mode.toolChoice;
1762
1789
  const openaiTools = [];
1763
1790
  for (const tool of tools) {
1764
1791
  switch (tool.type) {
@@ -1791,37 +1818,24 @@ function prepareResponsesTools({
1791
1818
  }
1792
1819
  }
1793
1820
  if (toolChoice == null) {
1794
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
1821
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
1795
1822
  }
1796
1823
  const type = toolChoice.type;
1797
1824
  switch (type) {
1798
1825
  case "auto":
1799
1826
  case "none":
1800
1827
  case "required":
1801
- return { tools: openaiTools, tool_choice: type, toolWarnings };
1802
- case "tool": {
1803
- if (toolChoice.toolName === "web_search_preview") {
1804
- return {
1805
- tools: openaiTools,
1806
- tool_choice: {
1807
- type: "web_search_preview"
1808
- },
1809
- toolWarnings
1810
- };
1811
- }
1828
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
1829
+ case "tool":
1812
1830
  return {
1813
1831
  tools: openaiTools,
1814
- tool_choice: {
1815
- type: "function",
1816
- name: toolChoice.toolName
1817
- },
1832
+ toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1818
1833
  toolWarnings
1819
1834
  };
1820
- }
1821
1835
  default: {
1822
1836
  const _exhaustiveCheck = type;
1823
- throw new import_provider8.UnsupportedFunctionalityError({
1824
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
1837
+ throw new import_provider7.UnsupportedFunctionalityError({
1838
+ functionality: `tool choice type: ${_exhaustiveCheck}`
1825
1839
  });
1826
1840
  }
1827
1841
  }
@@ -1831,16 +1845,19 @@ function prepareResponsesTools({
1831
1845
  var OpenAIResponsesLanguageModel = class {
1832
1846
  constructor(modelId, config) {
1833
1847
  this.specificationVersion = "v2";
1834
- this.defaultObjectGenerationMode = "json";
1835
1848
  this.modelId = modelId;
1836
1849
  this.config = config;
1837
1850
  }
1851
+ async getSupportedUrls() {
1852
+ return {
1853
+ "image/*": [/^https?:\/\/.*$/]
1854
+ };
1855
+ }
1838
1856
  get provider() {
1839
1857
  return this.config.provider;
1840
1858
  }
1841
1859
  getArgs({
1842
- mode,
1843
- maxTokens,
1860
+ maxOutputTokens,
1844
1861
  temperature,
1845
1862
  stopSequences,
1846
1863
  topP,
@@ -1849,24 +1866,19 @@ var OpenAIResponsesLanguageModel = class {
1849
1866
  frequencyPenalty,
1850
1867
  seed,
1851
1868
  prompt,
1852
- providerMetadata,
1869
+ providerOptions,
1870
+ tools,
1871
+ toolChoice,
1853
1872
  responseFormat
1854
1873
  }) {
1855
- var _a, _b, _c;
1874
+ var _a, _b;
1856
1875
  const warnings = [];
1857
1876
  const modelConfig = getResponsesModelConfig(this.modelId);
1858
- const type = mode.type;
1859
1877
  if (topK != null) {
1860
- warnings.push({
1861
- type: "unsupported-setting",
1862
- setting: "topK"
1863
- });
1878
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1864
1879
  }
1865
1880
  if (seed != null) {
1866
- warnings.push({
1867
- type: "unsupported-setting",
1868
- setting: "seed"
1869
- });
1881
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
1870
1882
  }
1871
1883
  if (presencePenalty != null) {
1872
1884
  warnings.push({
@@ -1881,19 +1893,16 @@ var OpenAIResponsesLanguageModel = class {
1881
1893
  });
1882
1894
  }
1883
1895
  if (stopSequences != null) {
1884
- warnings.push({
1885
- type: "unsupported-setting",
1886
- setting: "stopSequences"
1887
- });
1896
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
1888
1897
  }
1889
1898
  const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
1890
1899
  prompt,
1891
1900
  systemMessageMode: modelConfig.systemMessageMode
1892
1901
  });
1893
1902
  warnings.push(...messageWarnings);
1894
- const openaiOptions = (0, import_provider_utils8.parseProviderOptions)({
1903
+ const openaiOptions = (0, import_provider_utils9.parseProviderOptions)({
1895
1904
  provider: "openai",
1896
- providerOptions: providerMetadata,
1905
+ providerOptions,
1897
1906
  schema: openaiResponsesProviderOptionsSchema
1898
1907
  });
1899
1908
  const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
@@ -1902,7 +1911,7 @@ var OpenAIResponsesLanguageModel = class {
1902
1911
  input: messages,
1903
1912
  temperature,
1904
1913
  top_p: topP,
1905
- max_output_tokens: maxTokens,
1914
+ max_output_tokens: maxOutputTokens,
1906
1915
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
1907
1916
  text: {
1908
1917
  format: responseFormat.schema != null ? {
@@ -1922,8 +1931,15 @@ var OpenAIResponsesLanguageModel = class {
1922
1931
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1923
1932
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1924
1933
  // model-specific settings:
1925
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1926
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1934
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
1935
+ reasoning: {
1936
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1937
+ effort: openaiOptions.reasoningEffort
1938
+ },
1939
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
1940
+ summary: openaiOptions.reasoningSummary
1941
+ }
1942
+ }
1927
1943
  },
1928
1944
  ...modelConfig.requiredAutoTruncation && {
1929
1945
  truncation: "auto"
@@ -1947,178 +1963,159 @@ var OpenAIResponsesLanguageModel = class {
1947
1963
  });
1948
1964
  }
1949
1965
  }
1950
- switch (type) {
1951
- case "regular": {
1952
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
1953
- mode,
1954
- strict: isStrict
1955
- // TODO support provider options on tools
1956
- });
1957
- return {
1958
- args: {
1959
- ...baseArgs,
1960
- tools,
1961
- tool_choice
1962
- },
1963
- warnings: [...warnings, ...toolWarnings]
1964
- };
1965
- }
1966
- case "object-json": {
1967
- return {
1968
- args: {
1969
- ...baseArgs,
1970
- text: {
1971
- format: mode.schema != null ? {
1972
- type: "json_schema",
1973
- strict: isStrict,
1974
- name: (_c = mode.name) != null ? _c : "response",
1975
- description: mode.description,
1976
- schema: mode.schema
1977
- } : { type: "json_object" }
1978
- }
1979
- },
1980
- warnings
1981
- };
1982
- }
1983
- case "object-tool": {
1984
- return {
1985
- args: {
1986
- ...baseArgs,
1987
- tool_choice: { type: "function", name: mode.tool.name },
1988
- tools: [
1989
- {
1990
- type: "function",
1991
- name: mode.tool.name,
1992
- description: mode.tool.description,
1993
- parameters: mode.tool.parameters,
1994
- strict: isStrict
1995
- }
1996
- ]
1997
- },
1998
- warnings
1999
- };
2000
- }
2001
- default: {
2002
- const _exhaustiveCheck = type;
2003
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2004
- }
2005
- }
1966
+ const {
1967
+ tools: openaiTools,
1968
+ toolChoice: openaiToolChoice,
1969
+ toolWarnings
1970
+ } = prepareResponsesTools({
1971
+ tools,
1972
+ toolChoice,
1973
+ strict: isStrict
1974
+ });
1975
+ return {
1976
+ args: {
1977
+ ...baseArgs,
1978
+ tools: openaiTools,
1979
+ tool_choice: openaiToolChoice
1980
+ },
1981
+ warnings: [...warnings, ...toolWarnings]
1982
+ };
2006
1983
  }
2007
1984
  async doGenerate(options) {
2008
- var _a, _b, _c, _d, _e;
1985
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2009
1986
  const { args: body, warnings } = this.getArgs(options);
2010
1987
  const {
2011
1988
  responseHeaders,
2012
1989
  value: response,
2013
1990
  rawValue: rawResponse
2014
- } = await (0, import_provider_utils8.postJsonToApi)({
1991
+ } = await (0, import_provider_utils9.postJsonToApi)({
2015
1992
  url: this.config.url({
2016
1993
  path: "/responses",
2017
1994
  modelId: this.modelId
2018
1995
  }),
2019
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
1996
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2020
1997
  body,
2021
1998
  failedResponseHandler: openaiFailedResponseHandler,
2022
- successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
2023
- import_zod6.z.object({
2024
- id: import_zod6.z.string(),
2025
- created_at: import_zod6.z.number(),
2026
- model: import_zod6.z.string(),
2027
- output: import_zod6.z.array(
2028
- import_zod6.z.discriminatedUnion("type", [
2029
- import_zod6.z.object({
2030
- type: import_zod6.z.literal("message"),
2031
- role: import_zod6.z.literal("assistant"),
2032
- content: import_zod6.z.array(
2033
- import_zod6.z.object({
2034
- type: import_zod6.z.literal("output_text"),
2035
- text: import_zod6.z.string(),
2036
- annotations: import_zod6.z.array(
2037
- import_zod6.z.object({
2038
- type: import_zod6.z.literal("url_citation"),
2039
- start_index: import_zod6.z.number(),
2040
- end_index: import_zod6.z.number(),
2041
- url: import_zod6.z.string(),
2042
- title: import_zod6.z.string()
1999
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
2000
+ import_zod10.z.object({
2001
+ id: import_zod10.z.string(),
2002
+ created_at: import_zod10.z.number(),
2003
+ model: import_zod10.z.string(),
2004
+ output: import_zod10.z.array(
2005
+ import_zod10.z.discriminatedUnion("type", [
2006
+ import_zod10.z.object({
2007
+ type: import_zod10.z.literal("message"),
2008
+ role: import_zod10.z.literal("assistant"),
2009
+ content: import_zod10.z.array(
2010
+ import_zod10.z.object({
2011
+ type: import_zod10.z.literal("output_text"),
2012
+ text: import_zod10.z.string(),
2013
+ annotations: import_zod10.z.array(
2014
+ import_zod10.z.object({
2015
+ type: import_zod10.z.literal("url_citation"),
2016
+ start_index: import_zod10.z.number(),
2017
+ end_index: import_zod10.z.number(),
2018
+ url: import_zod10.z.string(),
2019
+ title: import_zod10.z.string()
2043
2020
  })
2044
2021
  )
2045
2022
  })
2046
2023
  )
2047
2024
  }),
2048
- import_zod6.z.object({
2049
- type: import_zod6.z.literal("function_call"),
2050
- call_id: import_zod6.z.string(),
2051
- name: import_zod6.z.string(),
2052
- arguments: import_zod6.z.string()
2025
+ import_zod10.z.object({
2026
+ type: import_zod10.z.literal("function_call"),
2027
+ call_id: import_zod10.z.string(),
2028
+ name: import_zod10.z.string(),
2029
+ arguments: import_zod10.z.string()
2053
2030
  }),
2054
- import_zod6.z.object({
2055
- type: import_zod6.z.literal("web_search_call")
2031
+ import_zod10.z.object({
2032
+ type: import_zod10.z.literal("web_search_call")
2056
2033
  }),
2057
- import_zod6.z.object({
2058
- type: import_zod6.z.literal("computer_call")
2034
+ import_zod10.z.object({
2035
+ type: import_zod10.z.literal("computer_call")
2059
2036
  }),
2060
- import_zod6.z.object({
2061
- type: import_zod6.z.literal("reasoning")
2037
+ import_zod10.z.object({
2038
+ type: import_zod10.z.literal("reasoning"),
2039
+ summary: import_zod10.z.array(
2040
+ import_zod10.z.object({
2041
+ type: import_zod10.z.literal("summary_text"),
2042
+ text: import_zod10.z.string()
2043
+ })
2044
+ )
2062
2045
  })
2063
2046
  ])
2064
2047
  ),
2065
- incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullable(),
2048
+ incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullable(),
2066
2049
  usage: usageSchema
2067
2050
  })
2068
2051
  ),
2069
2052
  abortSignal: options.abortSignal,
2070
2053
  fetch: this.config.fetch
2071
2054
  });
2072
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2073
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2074
- toolCallType: "function",
2075
- toolCallId: output.call_id,
2076
- toolName: output.name,
2077
- args: output.arguments
2078
- }));
2055
+ const content = [];
2056
+ for (const part of response.output) {
2057
+ switch (part.type) {
2058
+ case "reasoning": {
2059
+ content.push({
2060
+ type: "reasoning",
2061
+ reasoningType: "text",
2062
+ text: part.summary.map((summary) => summary.text).join()
2063
+ });
2064
+ break;
2065
+ }
2066
+ case "message": {
2067
+ for (const contentPart of part.content) {
2068
+ content.push({
2069
+ type: "text",
2070
+ text: contentPart.text
2071
+ });
2072
+ for (const annotation of contentPart.annotations) {
2073
+ content.push({
2074
+ type: "source",
2075
+ sourceType: "url",
2076
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils9.generateId)(),
2077
+ url: annotation.url,
2078
+ title: annotation.title
2079
+ });
2080
+ }
2081
+ }
2082
+ break;
2083
+ }
2084
+ case "function_call": {
2085
+ content.push({
2086
+ type: "tool-call",
2087
+ toolCallType: "function",
2088
+ toolCallId: part.call_id,
2089
+ toolName: part.name,
2090
+ args: part.arguments
2091
+ });
2092
+ break;
2093
+ }
2094
+ }
2095
+ }
2079
2096
  return {
2080
- text: outputTextElements.map((content) => content.text).join("\n"),
2081
- sources: outputTextElements.flatMap(
2082
- (content) => content.annotations.map((annotation) => {
2083
- var _a2, _b2, _c2;
2084
- return {
2085
- sourceType: "url",
2086
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils8.generateId)(),
2087
- url: annotation.url,
2088
- title: annotation.title
2089
- };
2090
- })
2091
- ),
2097
+ content,
2092
2098
  finishReason: mapOpenAIResponseFinishReason({
2093
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2094
- hasToolCalls: toolCalls.length > 0
2099
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2100
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2095
2101
  }),
2096
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2097
2102
  usage: {
2098
- promptTokens: response.usage.input_tokens,
2099
- completionTokens: response.usage.output_tokens
2100
- },
2101
- rawCall: {
2102
- rawPrompt: void 0,
2103
- rawSettings: {}
2104
- },
2105
- rawResponse: {
2106
- headers: responseHeaders,
2107
- body: rawResponse
2108
- },
2109
- request: {
2110
- body: JSON.stringify(body)
2103
+ inputTokens: response.usage.input_tokens,
2104
+ outputTokens: response.usage.output_tokens
2111
2105
  },
2106
+ request: { body },
2112
2107
  response: {
2113
2108
  id: response.id,
2114
2109
  timestamp: new Date(response.created_at * 1e3),
2115
- modelId: response.model
2110
+ modelId: response.model,
2111
+ headers: responseHeaders,
2112
+ body: rawResponse
2116
2113
  },
2117
2114
  providerMetadata: {
2118
2115
  openai: {
2119
2116
  responseId: response.id,
2120
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2121
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2117
+ cachedPromptTokens: (_f = (_e = response.usage.input_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : null,
2118
+ reasoningTokens: (_h = (_g = response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : null
2122
2119
  }
2123
2120
  },
2124
2121
  warnings
@@ -2126,18 +2123,18 @@ var OpenAIResponsesLanguageModel = class {
2126
2123
  }
2127
2124
  async doStream(options) {
2128
2125
  const { args: body, warnings } = this.getArgs(options);
2129
- const { responseHeaders, value: response } = await (0, import_provider_utils8.postJsonToApi)({
2126
+ const { responseHeaders, value: response } = await (0, import_provider_utils9.postJsonToApi)({
2130
2127
  url: this.config.url({
2131
2128
  path: "/responses",
2132
2129
  modelId: this.modelId
2133
2130
  }),
2134
- headers: (0, import_provider_utils8.combineHeaders)(this.config.headers(), options.headers),
2131
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2135
2132
  body: {
2136
2133
  ...body,
2137
2134
  stream: true
2138
2135
  },
2139
2136
  failedResponseHandler: openaiFailedResponseHandler,
2140
- successfulResponseHandler: (0, import_provider_utils8.createEventSourceResponseHandler)(
2137
+ successfulResponseHandler: (0, import_provider_utils9.createEventSourceResponseHandler)(
2141
2138
  openaiResponsesChunkSchema
2142
2139
  ),
2143
2140
  abortSignal: options.abortSignal,
@@ -2145,8 +2142,10 @@ var OpenAIResponsesLanguageModel = class {
2145
2142
  });
2146
2143
  const self = this;
2147
2144
  let finishReason = "unknown";
2148
- let promptTokens = NaN;
2149
- let completionTokens = NaN;
2145
+ const usage = {
2146
+ inputTokens: void 0,
2147
+ outputTokens: void 0
2148
+ };
2150
2149
  let cachedPromptTokens = null;
2151
2150
  let reasoningTokens = null;
2152
2151
  let responseId = null;
@@ -2155,6 +2154,9 @@ var OpenAIResponsesLanguageModel = class {
2155
2154
  return {
2156
2155
  stream: response.pipeThrough(
2157
2156
  new TransformStream({
2157
+ start(controller) {
2158
+ controller.enqueue({ type: "stream-start", warnings });
2159
+ },
2158
2160
  transform(chunk, controller) {
2159
2161
  var _a, _b, _c, _d, _e, _f, _g, _h;
2160
2162
  if (!chunk.success) {
@@ -2198,8 +2200,14 @@ var OpenAIResponsesLanguageModel = class {
2198
2200
  });
2199
2201
  } else if (isTextDeltaChunk(value)) {
2200
2202
  controller.enqueue({
2201
- type: "text-delta",
2202
- textDelta: value.delta
2203
+ type: "text",
2204
+ text: value.delta
2205
+ });
2206
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2207
+ controller.enqueue({
2208
+ type: "reasoning",
2209
+ reasoningType: "text",
2210
+ text: value.delta
2203
2211
  });
2204
2212
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2205
2213
  ongoingToolCalls[value.output_index] = void 0;
@@ -2216,19 +2224,17 @@ var OpenAIResponsesLanguageModel = class {
2216
2224
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2217
2225
  hasToolCalls
2218
2226
  });
2219
- promptTokens = value.response.usage.input_tokens;
2220
- completionTokens = value.response.usage.output_tokens;
2227
+ usage.inputTokens = value.response.usage.input_tokens;
2228
+ usage.outputTokens = value.response.usage.output_tokens;
2221
2229
  cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2222
2230
  reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2223
2231
  } else if (isResponseAnnotationAddedChunk(value)) {
2224
2232
  controller.enqueue({
2225
2233
  type: "source",
2226
- source: {
2227
- sourceType: "url",
2228
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils8.generateId)(),
2229
- url: value.annotation.url,
2230
- title: value.annotation.title
2231
- }
2234
+ sourceType: "url",
2235
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils9.generateId)(),
2236
+ url: value.annotation.url,
2237
+ title: value.annotation.title
2232
2238
  });
2233
2239
  }
2234
2240
  },
@@ -2236,7 +2242,7 @@ var OpenAIResponsesLanguageModel = class {
2236
2242
  controller.enqueue({
2237
2243
  type: "finish",
2238
2244
  finishReason,
2239
- usage: { promptTokens, completionTokens },
2245
+ usage,
2240
2246
  ...(cachedPromptTokens != null || reasoningTokens != null) && {
2241
2247
  providerMetadata: {
2242
2248
  openai: {
@@ -2250,89 +2256,91 @@ var OpenAIResponsesLanguageModel = class {
2250
2256
  }
2251
2257
  })
2252
2258
  ),
2253
- rawCall: {
2254
- rawPrompt: void 0,
2255
- rawSettings: {}
2256
- },
2257
- rawResponse: { headers: responseHeaders },
2258
- request: { body: JSON.stringify(body) },
2259
- warnings
2259
+ request: { body },
2260
+ response: { headers: responseHeaders }
2260
2261
  };
2261
2262
  }
2262
2263
  };
2263
- var usageSchema = import_zod6.z.object({
2264
- input_tokens: import_zod6.z.number(),
2265
- input_tokens_details: import_zod6.z.object({ cached_tokens: import_zod6.z.number().nullish() }).nullish(),
2266
- output_tokens: import_zod6.z.number(),
2267
- output_tokens_details: import_zod6.z.object({ reasoning_tokens: import_zod6.z.number().nullish() }).nullish()
2264
+ var usageSchema = import_zod10.z.object({
2265
+ input_tokens: import_zod10.z.number(),
2266
+ input_tokens_details: import_zod10.z.object({ cached_tokens: import_zod10.z.number().nullish() }).nullish(),
2267
+ output_tokens: import_zod10.z.number(),
2268
+ output_tokens_details: import_zod10.z.object({ reasoning_tokens: import_zod10.z.number().nullish() }).nullish()
2268
2269
  });
2269
- var textDeltaChunkSchema = import_zod6.z.object({
2270
- type: import_zod6.z.literal("response.output_text.delta"),
2271
- delta: import_zod6.z.string()
2270
+ var textDeltaChunkSchema = import_zod10.z.object({
2271
+ type: import_zod10.z.literal("response.output_text.delta"),
2272
+ delta: import_zod10.z.string()
2272
2273
  });
2273
- var responseFinishedChunkSchema = import_zod6.z.object({
2274
- type: import_zod6.z.enum(["response.completed", "response.incomplete"]),
2275
- response: import_zod6.z.object({
2276
- incomplete_details: import_zod6.z.object({ reason: import_zod6.z.string() }).nullish(),
2274
+ var responseFinishedChunkSchema = import_zod10.z.object({
2275
+ type: import_zod10.z.enum(["response.completed", "response.incomplete"]),
2276
+ response: import_zod10.z.object({
2277
+ incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullish(),
2277
2278
  usage: usageSchema
2278
2279
  })
2279
2280
  });
2280
- var responseCreatedChunkSchema = import_zod6.z.object({
2281
- type: import_zod6.z.literal("response.created"),
2282
- response: import_zod6.z.object({
2283
- id: import_zod6.z.string(),
2284
- created_at: import_zod6.z.number(),
2285
- model: import_zod6.z.string()
2281
+ var responseCreatedChunkSchema = import_zod10.z.object({
2282
+ type: import_zod10.z.literal("response.created"),
2283
+ response: import_zod10.z.object({
2284
+ id: import_zod10.z.string(),
2285
+ created_at: import_zod10.z.number(),
2286
+ model: import_zod10.z.string()
2286
2287
  })
2287
2288
  });
2288
- var responseOutputItemDoneSchema = import_zod6.z.object({
2289
- type: import_zod6.z.literal("response.output_item.done"),
2290
- output_index: import_zod6.z.number(),
2291
- item: import_zod6.z.discriminatedUnion("type", [
2292
- import_zod6.z.object({
2293
- type: import_zod6.z.literal("message")
2289
+ var responseOutputItemDoneSchema = import_zod10.z.object({
2290
+ type: import_zod10.z.literal("response.output_item.done"),
2291
+ output_index: import_zod10.z.number(),
2292
+ item: import_zod10.z.discriminatedUnion("type", [
2293
+ import_zod10.z.object({
2294
+ type: import_zod10.z.literal("message")
2294
2295
  }),
2295
- import_zod6.z.object({
2296
- type: import_zod6.z.literal("function_call"),
2297
- id: import_zod6.z.string(),
2298
- call_id: import_zod6.z.string(),
2299
- name: import_zod6.z.string(),
2300
- arguments: import_zod6.z.string(),
2301
- status: import_zod6.z.literal("completed")
2296
+ import_zod10.z.object({
2297
+ type: import_zod10.z.literal("function_call"),
2298
+ id: import_zod10.z.string(),
2299
+ call_id: import_zod10.z.string(),
2300
+ name: import_zod10.z.string(),
2301
+ arguments: import_zod10.z.string(),
2302
+ status: import_zod10.z.literal("completed")
2302
2303
  })
2303
2304
  ])
2304
2305
  });
2305
- var responseFunctionCallArgumentsDeltaSchema = import_zod6.z.object({
2306
- type: import_zod6.z.literal("response.function_call_arguments.delta"),
2307
- item_id: import_zod6.z.string(),
2308
- output_index: import_zod6.z.number(),
2309
- delta: import_zod6.z.string()
2306
+ var responseFunctionCallArgumentsDeltaSchema = import_zod10.z.object({
2307
+ type: import_zod10.z.literal("response.function_call_arguments.delta"),
2308
+ item_id: import_zod10.z.string(),
2309
+ output_index: import_zod10.z.number(),
2310
+ delta: import_zod10.z.string()
2310
2311
  });
2311
- var responseOutputItemAddedSchema = import_zod6.z.object({
2312
- type: import_zod6.z.literal("response.output_item.added"),
2313
- output_index: import_zod6.z.number(),
2314
- item: import_zod6.z.discriminatedUnion("type", [
2315
- import_zod6.z.object({
2316
- type: import_zod6.z.literal("message")
2312
+ var responseOutputItemAddedSchema = import_zod10.z.object({
2313
+ type: import_zod10.z.literal("response.output_item.added"),
2314
+ output_index: import_zod10.z.number(),
2315
+ item: import_zod10.z.discriminatedUnion("type", [
2316
+ import_zod10.z.object({
2317
+ type: import_zod10.z.literal("message")
2317
2318
  }),
2318
- import_zod6.z.object({
2319
- type: import_zod6.z.literal("function_call"),
2320
- id: import_zod6.z.string(),
2321
- call_id: import_zod6.z.string(),
2322
- name: import_zod6.z.string(),
2323
- arguments: import_zod6.z.string()
2319
+ import_zod10.z.object({
2320
+ type: import_zod10.z.literal("function_call"),
2321
+ id: import_zod10.z.string(),
2322
+ call_id: import_zod10.z.string(),
2323
+ name: import_zod10.z.string(),
2324
+ arguments: import_zod10.z.string()
2324
2325
  })
2325
2326
  ])
2326
2327
  });
2327
- var responseAnnotationAddedSchema = import_zod6.z.object({
2328
- type: import_zod6.z.literal("response.output_text.annotation.added"),
2329
- annotation: import_zod6.z.object({
2330
- type: import_zod6.z.literal("url_citation"),
2331
- url: import_zod6.z.string(),
2332
- title: import_zod6.z.string()
2328
+ var responseAnnotationAddedSchema = import_zod10.z.object({
2329
+ type: import_zod10.z.literal("response.output_text.annotation.added"),
2330
+ annotation: import_zod10.z.object({
2331
+ type: import_zod10.z.literal("url_citation"),
2332
+ url: import_zod10.z.string(),
2333
+ title: import_zod10.z.string()
2333
2334
  })
2334
2335
  });
2335
- var openaiResponsesChunkSchema = import_zod6.z.union([
2336
+ var responseReasoningSummaryTextDeltaSchema = import_zod10.z.object({
2337
+ type: import_zod10.z.literal("response.reasoning_summary_text.delta"),
2338
+ item_id: import_zod10.z.string(),
2339
+ output_index: import_zod10.z.number(),
2340
+ summary_index: import_zod10.z.number(),
2341
+ delta: import_zod10.z.string()
2342
+ });
2343
+ var openaiResponsesChunkSchema = import_zod10.z.union([
2336
2344
  textDeltaChunkSchema,
2337
2345
  responseFinishedChunkSchema,
2338
2346
  responseCreatedChunkSchema,
@@ -2340,7 +2348,8 @@ var openaiResponsesChunkSchema = import_zod6.z.union([
2340
2348
  responseFunctionCallArgumentsDeltaSchema,
2341
2349
  responseOutputItemAddedSchema,
2342
2350
  responseAnnotationAddedSchema,
2343
- import_zod6.z.object({ type: import_zod6.z.string() }).passthrough()
2351
+ responseReasoningSummaryTextDeltaSchema,
2352
+ import_zod10.z.object({ type: import_zod10.z.string() }).passthrough()
2344
2353
  // fallback for unknown chunks
2345
2354
  ]);
2346
2355
  function isTextDeltaChunk(chunk) {
@@ -2364,6 +2373,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2364
2373
  function isResponseAnnotationAddedChunk(chunk) {
2365
2374
  return chunk.type === "response.output_text.annotation.added";
2366
2375
  }
2376
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2377
+ return chunk.type === "response.reasoning_summary_text.delta";
2378
+ }
2367
2379
  function getResponsesModelConfig(modelId) {
2368
2380
  if (modelId.startsWith("o")) {
2369
2381
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2385,15 +2397,16 @@ function getResponsesModelConfig(modelId) {
2385
2397
  requiredAutoTruncation: false
2386
2398
  };
2387
2399
  }
2388
- var openaiResponsesProviderOptionsSchema = import_zod6.z.object({
2389
- metadata: import_zod6.z.any().nullish(),
2390
- parallelToolCalls: import_zod6.z.boolean().nullish(),
2391
- previousResponseId: import_zod6.z.string().nullish(),
2392
- store: import_zod6.z.boolean().nullish(),
2393
- user: import_zod6.z.string().nullish(),
2394
- reasoningEffort: import_zod6.z.string().nullish(),
2395
- strictSchemas: import_zod6.z.boolean().nullish(),
2396
- instructions: import_zod6.z.string().nullish()
2400
+ var openaiResponsesProviderOptionsSchema = import_zod10.z.object({
2401
+ metadata: import_zod10.z.any().nullish(),
2402
+ parallelToolCalls: import_zod10.z.boolean().nullish(),
2403
+ previousResponseId: import_zod10.z.string().nullish(),
2404
+ store: import_zod10.z.boolean().nullish(),
2405
+ user: import_zod10.z.string().nullish(),
2406
+ reasoningEffort: import_zod10.z.string().nullish(),
2407
+ strictSchemas: import_zod10.z.boolean().nullish(),
2408
+ instructions: import_zod10.z.string().nullish(),
2409
+ reasoningSummary: import_zod10.z.string().nullish()
2397
2410
  });
2398
2411
  // Annotate the CommonJS export names for ESM import in node:
2399
2412
  0 && (module.exports = {
@@ -2402,6 +2415,10 @@ var openaiResponsesProviderOptionsSchema = import_zod6.z.object({
2402
2415
  OpenAIEmbeddingModel,
2403
2416
  OpenAIImageModel,
2404
2417
  OpenAIResponsesLanguageModel,
2405
- modelMaxImagesPerCall
2418
+ OpenAISpeechModel,
2419
+ OpenAITranscriptionModel,
2420
+ modelMaxImagesPerCall,
2421
+ openaiEmbeddingProviderOptions,
2422
+ openaiProviderOptions
2406
2423
  });
2407
2424
  //# sourceMappingURL=index.js.map