@ai-sdk/openai 1.3.22 → 2.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,21 +28,24 @@ __export(internal_exports, {
28
28
  OpenAISpeechModel: () => OpenAISpeechModel,
29
29
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
30
  hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
- modelMaxImagesPerCall: () => modelMaxImagesPerCall
31
+ modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
+ openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
33
+ openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
34
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
35
+ openaiProviderOptions: () => openaiProviderOptions
32
36
  });
33
37
  module.exports = __toCommonJS(internal_exports);
34
38
 
35
39
  // src/openai-chat-language-model.ts
36
40
  var import_provider3 = require("@ai-sdk/provider");
37
41
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
38
- var import_zod2 = require("zod");
42
+ var import_zod3 = require("zod");
39
43
 
40
44
  // src/convert-to-openai-chat-messages.ts
41
45
  var import_provider = require("@ai-sdk/provider");
42
46
  var import_provider_utils = require("@ai-sdk/provider-utils");
43
47
  function convertToOpenAIChatMessages({
44
48
  prompt,
45
- useLegacyFunctionCalling = false,
46
49
  systemMessageMode = "system"
47
50
  }) {
48
51
  const messages = [];
@@ -83,55 +86,71 @@ function convertToOpenAIChatMessages({
83
86
  messages.push({
84
87
  role: "user",
85
88
  content: content.map((part, index) => {
86
- var _a, _b, _c, _d;
89
+ var _a, _b, _c;
87
90
  switch (part.type) {
88
91
  case "text": {
89
92
  return { type: "text", text: part.text };
90
93
  }
91
- case "image": {
92
- return {
93
- type: "image_url",
94
- image_url: {
95
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`,
96
- // OpenAI specific extension: image detail
97
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
98
- }
99
- };
100
- }
101
94
  case "file": {
102
- if (part.data instanceof URL) {
103
- throw new import_provider.UnsupportedFunctionalityError({
104
- functionality: "'File content parts with URL data' functionality not supported."
105
- });
106
- }
107
- switch (part.mimeType) {
108
- case "audio/wav": {
109
- return {
110
- type: "input_audio",
111
- input_audio: { data: part.data, format: "wav" }
112
- };
113
- }
114
- case "audio/mp3":
115
- case "audio/mpeg": {
116
- return {
117
- type: "input_audio",
118
- input_audio: { data: part.data, format: "mp3" }
119
- };
95
+ if (part.mediaType.startsWith("image/")) {
96
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
97
+ return {
98
+ type: "image_url",
99
+ image_url: {
100
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(part.data)}`,
101
+ // OpenAI specific extension: image detail
102
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
103
+ }
104
+ };
105
+ } else if (part.mediaType.startsWith("audio/")) {
106
+ if (part.data instanceof URL) {
107
+ throw new import_provider.UnsupportedFunctionalityError({
108
+ functionality: "audio file parts with URLs"
109
+ });
120
110
  }
121
- case "application/pdf": {
122
- return {
123
- type: "file",
124
- file: {
125
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
126
- file_data: `data:application/pdf;base64,${part.data}`
127
- }
128
- };
111
+ switch (part.mediaType) {
112
+ case "audio/wav": {
113
+ return {
114
+ type: "input_audio",
115
+ input_audio: {
116
+ data: (0, import_provider_utils.convertToBase64)(part.data),
117
+ format: "wav"
118
+ }
119
+ };
120
+ }
121
+ case "audio/mp3":
122
+ case "audio/mpeg": {
123
+ return {
124
+ type: "input_audio",
125
+ input_audio: {
126
+ data: (0, import_provider_utils.convertToBase64)(part.data),
127
+ format: "mp3"
128
+ }
129
+ };
130
+ }
131
+ default: {
132
+ throw new import_provider.UnsupportedFunctionalityError({
133
+ functionality: `audio content parts with media type ${part.mediaType}`
134
+ });
135
+ }
129
136
  }
130
- default: {
137
+ } else if (part.mediaType === "application/pdf") {
138
+ if (part.data instanceof URL) {
131
139
  throw new import_provider.UnsupportedFunctionalityError({
132
- functionality: `File content part type ${part.mimeType} in user messages`
140
+ functionality: "PDF file parts with URLs"
133
141
  });
134
142
  }
143
+ return {
144
+ type: "file",
145
+ file: {
146
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
147
+ file_data: `data:application/pdf;base64,${part.data}`
148
+ }
149
+ };
150
+ } else {
151
+ throw new import_provider.UnsupportedFunctionalityError({
152
+ functionality: `file part media type ${part.mediaType}`
153
+ });
135
154
  }
136
155
  }
137
156
  }
@@ -161,41 +180,20 @@ function convertToOpenAIChatMessages({
161
180
  }
162
181
  }
163
182
  }
164
- if (useLegacyFunctionCalling) {
165
- if (toolCalls.length > 1) {
166
- throw new import_provider.UnsupportedFunctionalityError({
167
- functionality: "useLegacyFunctionCalling with multiple tool calls in one message"
168
- });
169
- }
170
- messages.push({
171
- role: "assistant",
172
- content: text,
173
- function_call: toolCalls.length > 0 ? toolCalls[0].function : void 0
174
- });
175
- } else {
176
- messages.push({
177
- role: "assistant",
178
- content: text,
179
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
180
- });
181
- }
183
+ messages.push({
184
+ role: "assistant",
185
+ content: text,
186
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
187
+ });
182
188
  break;
183
189
  }
184
190
  case "tool": {
185
191
  for (const toolResponse of content) {
186
- if (useLegacyFunctionCalling) {
187
- messages.push({
188
- role: "function",
189
- name: toolResponse.toolName,
190
- content: JSON.stringify(toolResponse.result)
191
- });
192
- } else {
193
- messages.push({
194
- role: "tool",
195
- tool_call_id: toolResponse.toolCallId,
196
- content: JSON.stringify(toolResponse.result)
197
- });
198
- }
192
+ messages.push({
193
+ role: "tool",
194
+ tool_call_id: toolResponse.toolCallId,
195
+ content: JSON.stringify(toolResponse.result)
196
+ });
199
197
  }
200
198
  break;
201
199
  }
@@ -208,17 +206,17 @@ function convertToOpenAIChatMessages({
208
206
  return { messages, warnings };
209
207
  }
210
208
 
211
- // src/map-openai-chat-logprobs.ts
212
- function mapOpenAIChatLogProbsOutput(logprobs) {
213
- var _a, _b;
214
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
215
- token,
216
- logprob,
217
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
218
- token: token2,
219
- logprob: logprob2
220
- })) : []
221
- }))) != null ? _b : void 0;
209
+ // src/get-response-metadata.ts
210
+ function getResponseMetadata({
211
+ id,
212
+ model,
213
+ created
214
+ }) {
215
+ return {
216
+ id: id != null ? id : void 0,
217
+ modelId: model != null ? model : void 0,
218
+ timestamp: created != null ? new Date(created * 1e3) : void 0
219
+ };
222
220
  }
223
221
 
224
222
  // src/map-openai-finish-reason.ts
@@ -238,18 +236,75 @@ function mapOpenAIFinishReason(finishReason) {
238
236
  }
239
237
  }
240
238
 
241
- // src/openai-error.ts
239
+ // src/openai-chat-options.ts
242
240
  var import_zod = require("zod");
241
+ var openaiProviderOptions = import_zod.z.object({
242
+ /**
243
+ * Modify the likelihood of specified tokens appearing in the completion.
244
+ *
245
+ * Accepts a JSON object that maps tokens (specified by their token ID in
246
+ * the GPT tokenizer) to an associated bias value from -100 to 100.
247
+ */
248
+ logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
249
+ /**
250
+ * Return the log probabilities of the tokens.
251
+ *
252
+ * Setting to true will return the log probabilities of the tokens that
253
+ * were generated.
254
+ *
255
+ * Setting to a number will return the log probabilities of the top n
256
+ * tokens that were generated.
257
+ */
258
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
259
+ /**
260
+ * Whether to enable parallel function calling during tool use. Default to true.
261
+ */
262
+ parallelToolCalls: import_zod.z.boolean().optional(),
263
+ /**
264
+ * A unique identifier representing your end-user, which can help OpenAI to
265
+ * monitor and detect abuse.
266
+ */
267
+ user: import_zod.z.string().optional(),
268
+ /**
269
+ * Reasoning effort for reasoning models. Defaults to `medium`.
270
+ */
271
+ reasoningEffort: import_zod.z.enum(["low", "medium", "high"]).optional(),
272
+ /**
273
+ * Maximum number of completion tokens to generate. Useful for reasoning models.
274
+ */
275
+ maxCompletionTokens: import_zod.z.number().optional(),
276
+ /**
277
+ * Whether to enable persistence in responses API.
278
+ */
279
+ store: import_zod.z.boolean().optional(),
280
+ /**
281
+ * Metadata to associate with the request.
282
+ */
283
+ metadata: import_zod.z.record(import_zod.z.string()).optional(),
284
+ /**
285
+ * Parameters for prediction mode.
286
+ */
287
+ prediction: import_zod.z.record(import_zod.z.any()).optional(),
288
+ /**
289
+ * Whether to use structured outputs.
290
+ *
291
+ * @default true
292
+ */
293
+ structuredOutputs: import_zod.z.boolean().optional()
294
+ });
295
+
296
+ // src/openai-error.ts
297
+ var import_zod2 = require("zod");
243
298
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
244
- var openaiErrorDataSchema = import_zod.z.object({
245
- error: import_zod.z.object({
246
- message: import_zod.z.string(),
299
+ var openaiErrorDataSchema = import_zod2.z.object({
300
+ error: import_zod2.z.object({
301
+ message: import_zod2.z.string(),
247
302
  // The additional information below is handled loosely to support
248
303
  // OpenAI-compatible providers that have slightly different error
249
304
  // responses:
250
- type: import_zod.z.string().nullish(),
251
- param: import_zod.z.any().nullish(),
252
- code: import_zod.z.union([import_zod.z.string(), import_zod.z.number()]).nullish()
305
+ type: import_zod2.z.string().nullish(),
306
+ param: import_zod2.z.any().nullish(),
307
+ code: import_zod2.z.union([import_zod2.z.string(), import_zod2.z.number()]).nullish()
253
308
  })
254
309
  });
255
310
  var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -257,74 +312,17 @@ var openaiFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
257
312
  errorToMessage: (data) => data.error.message
258
313
  });
259
314
 
260
- // src/get-response-metadata.ts
261
- function getResponseMetadata({
262
- id,
263
- model,
264
- created
265
- }) {
266
- return {
267
- id: id != null ? id : void 0,
268
- modelId: model != null ? model : void 0,
269
- timestamp: created != null ? new Date(created * 1e3) : void 0
270
- };
271
- }
272
-
273
315
  // src/openai-prepare-tools.ts
274
316
  var import_provider2 = require("@ai-sdk/provider");
275
317
  function prepareTools({
276
- mode,
277
- useLegacyFunctionCalling = false,
318
+ tools,
319
+ toolChoice,
278
320
  structuredOutputs
279
321
  }) {
280
- var _a;
281
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
322
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
282
323
  const toolWarnings = [];
283
324
  if (tools == null) {
284
- return { tools: void 0, tool_choice: void 0, toolWarnings };
285
- }
286
- const toolChoice = mode.toolChoice;
287
- if (useLegacyFunctionCalling) {
288
- const openaiFunctions = [];
289
- for (const tool of tools) {
290
- if (tool.type === "provider-defined") {
291
- toolWarnings.push({ type: "unsupported-tool", tool });
292
- } else {
293
- openaiFunctions.push({
294
- name: tool.name,
295
- description: tool.description,
296
- parameters: tool.parameters
297
- });
298
- }
299
- }
300
- if (toolChoice == null) {
301
- return {
302
- functions: openaiFunctions,
303
- function_call: void 0,
304
- toolWarnings
305
- };
306
- }
307
- const type2 = toolChoice.type;
308
- switch (type2) {
309
- case "auto":
310
- case "none":
311
- case void 0:
312
- return {
313
- functions: openaiFunctions,
314
- function_call: void 0,
315
- toolWarnings
316
- };
317
- case "required":
318
- throw new import_provider2.UnsupportedFunctionalityError({
319
- functionality: "useLegacyFunctionCalling and toolChoice: required"
320
- });
321
- default:
322
- return {
323
- functions: openaiFunctions,
324
- function_call: { name: toolChoice.toolName },
325
- toolWarnings
326
- };
327
- }
325
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
328
326
  }
329
327
  const openaiTools = [];
330
328
  for (const tool of tools) {
@@ -343,18 +341,18 @@ function prepareTools({
343
341
  }
344
342
  }
345
343
  if (toolChoice == null) {
346
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
344
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
347
345
  }
348
346
  const type = toolChoice.type;
349
347
  switch (type) {
350
348
  case "auto":
351
349
  case "none":
352
350
  case "required":
353
- return { tools: openaiTools, tool_choice: type, toolWarnings };
351
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
354
352
  case "tool":
355
353
  return {
356
354
  tools: openaiTools,
357
- tool_choice: {
355
+ toolChoice: {
358
356
  type: "function",
359
357
  function: {
360
358
  name: toolChoice.toolName
@@ -365,7 +363,7 @@ function prepareTools({
365
363
  default: {
366
364
  const _exhaustiveCheck = type;
367
365
  throw new import_provider2.UnsupportedFunctionalityError({
368
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
366
+ functionality: `tool choice type: ${_exhaustiveCheck}`
369
367
  });
370
368
  }
371
369
  }
@@ -373,32 +371,20 @@ function prepareTools({
373
371
 
374
372
  // src/openai-chat-language-model.ts
375
373
  var OpenAIChatLanguageModel = class {
376
- constructor(modelId, settings, config) {
377
- this.specificationVersion = "v1";
374
+ constructor(modelId, config) {
375
+ this.specificationVersion = "v2";
376
+ this.supportedUrls = {
377
+ "image/*": [/^https?:\/\/.*$/]
378
+ };
378
379
  this.modelId = modelId;
379
- this.settings = settings;
380
380
  this.config = config;
381
381
  }
382
- get supportsStructuredOutputs() {
383
- var _a;
384
- return (_a = this.settings.structuredOutputs) != null ? _a : isReasoningModel(this.modelId);
385
- }
386
- get defaultObjectGenerationMode() {
387
- if (isAudioModel(this.modelId)) {
388
- return "tool";
389
- }
390
- return this.supportsStructuredOutputs ? "json" : "tool";
391
- }
392
382
  get provider() {
393
383
  return this.config.provider;
394
384
  }
395
- get supportsImageUrls() {
396
- return !this.settings.downloadImages;
397
- }
398
- getArgs({
399
- mode,
385
+ async getArgs({
400
386
  prompt,
401
- maxTokens,
387
+ maxOutputTokens,
402
388
  temperature,
403
389
  topP,
404
390
  topK,
@@ -407,39 +393,34 @@ var OpenAIChatLanguageModel = class {
407
393
  stopSequences,
408
394
  responseFormat,
409
395
  seed,
410
- providerMetadata
396
+ tools,
397
+ toolChoice,
398
+ providerOptions
411
399
  }) {
412
- var _a, _b, _c, _d, _e, _f, _g, _h;
413
- const type = mode.type;
400
+ var _a, _b, _c;
414
401
  const warnings = [];
402
+ const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
403
+ provider: "openai",
404
+ providerOptions,
405
+ schema: openaiProviderOptions
406
+ })) != null ? _a : {};
407
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
415
408
  if (topK != null) {
416
409
  warnings.push({
417
410
  type: "unsupported-setting",
418
411
  setting: "topK"
419
412
  });
420
413
  }
421
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
414
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
422
415
  warnings.push({
423
416
  type: "unsupported-setting",
424
417
  setting: "responseFormat",
425
418
  details: "JSON response format schema is only supported with structuredOutputs"
426
419
  });
427
420
  }
428
- const useLegacyFunctionCalling = this.settings.useLegacyFunctionCalling;
429
- if (useLegacyFunctionCalling && this.settings.parallelToolCalls === true) {
430
- throw new import_provider3.UnsupportedFunctionalityError({
431
- functionality: "useLegacyFunctionCalling with parallelToolCalls"
432
- });
433
- }
434
- if (useLegacyFunctionCalling && this.supportsStructuredOutputs) {
435
- throw new import_provider3.UnsupportedFunctionalityError({
436
- functionality: "structuredOutputs with useLegacyFunctionCalling"
437
- });
438
- }
439
421
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
440
422
  {
441
423
  prompt,
442
- useLegacyFunctionCalling,
443
424
  systemMessageMode: getSystemMessageMode(this.modelId)
444
425
  }
445
426
  );
@@ -448,35 +429,38 @@ var OpenAIChatLanguageModel = class {
448
429
  // model id:
449
430
  model: this.modelId,
450
431
  // model specific settings:
451
- logit_bias: this.settings.logitBias,
452
- logprobs: this.settings.logprobs === true || typeof this.settings.logprobs === "number" ? true : void 0,
453
- top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
454
- user: this.settings.user,
455
- parallel_tool_calls: this.settings.parallelToolCalls,
432
+ logit_bias: openaiOptions.logitBias,
433
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
434
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
435
+ user: openaiOptions.user,
436
+ parallel_tool_calls: openaiOptions.parallelToolCalls,
456
437
  // standardized settings:
457
- max_tokens: maxTokens,
438
+ max_tokens: maxOutputTokens,
458
439
  temperature,
459
440
  top_p: topP,
460
441
  frequency_penalty: frequencyPenalty,
461
442
  presence_penalty: presencePenalty,
462
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs && responseFormat.schema != null ? {
463
- type: "json_schema",
464
- json_schema: {
465
- schema: responseFormat.schema,
466
- strict: true,
467
- name: (_a = responseFormat.name) != null ? _a : "response",
468
- description: responseFormat.description
469
- }
470
- } : { type: "json_object" } : void 0,
443
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
444
+ // TODO convert into provider option
445
+ structuredOutputs && responseFormat.schema != null ? {
446
+ type: "json_schema",
447
+ json_schema: {
448
+ schema: responseFormat.schema,
449
+ strict: true,
450
+ name: (_c = responseFormat.name) != null ? _c : "response",
451
+ description: responseFormat.description
452
+ }
453
+ } : { type: "json_object" }
454
+ ) : void 0,
471
455
  stop: stopSequences,
472
456
  seed,
473
457
  // openai specific settings:
474
- // TODO remove in next major version; we auto-map maxTokens now
475
- max_completion_tokens: (_b = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _b.maxCompletionTokens,
476
- store: (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store,
477
- metadata: (_d = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _d.metadata,
478
- prediction: (_e = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _e.prediction,
479
- reasoning_effort: (_g = (_f = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _f.reasoningEffort) != null ? _g : this.settings.reasoningEffort,
458
+ // TODO remove in next major version; we auto-map maxOutputTokens now
459
+ max_completion_tokens: openaiOptions.maxCompletionTokens,
460
+ store: openaiOptions.store,
461
+ metadata: openaiOptions.metadata,
462
+ prediction: openaiOptions.prediction,
463
+ reasoning_effort: openaiOptions.reasoningEffort,
480
464
  // messages:
481
465
  messages
482
466
  };
@@ -550,85 +534,27 @@ var OpenAIChatLanguageModel = class {
550
534
  });
551
535
  }
552
536
  }
553
- switch (type) {
554
- case "regular": {
555
- const { tools, tool_choice, functions, function_call, toolWarnings } = prepareTools({
556
- mode,
557
- useLegacyFunctionCalling,
558
- structuredOutputs: this.supportsStructuredOutputs
559
- });
560
- return {
561
- args: {
562
- ...baseArgs,
563
- tools,
564
- tool_choice,
565
- functions,
566
- function_call
567
- },
568
- warnings: [...warnings, ...toolWarnings]
569
- };
570
- }
571
- case "object-json": {
572
- return {
573
- args: {
574
- ...baseArgs,
575
- response_format: this.supportsStructuredOutputs && mode.schema != null ? {
576
- type: "json_schema",
577
- json_schema: {
578
- schema: mode.schema,
579
- strict: true,
580
- name: (_h = mode.name) != null ? _h : "response",
581
- description: mode.description
582
- }
583
- } : { type: "json_object" }
584
- },
585
- warnings
586
- };
587
- }
588
- case "object-tool": {
589
- return {
590
- args: useLegacyFunctionCalling ? {
591
- ...baseArgs,
592
- function_call: {
593
- name: mode.tool.name
594
- },
595
- functions: [
596
- {
597
- name: mode.tool.name,
598
- description: mode.tool.description,
599
- parameters: mode.tool.parameters
600
- }
601
- ]
602
- } : {
603
- ...baseArgs,
604
- tool_choice: {
605
- type: "function",
606
- function: { name: mode.tool.name }
607
- },
608
- tools: [
609
- {
610
- type: "function",
611
- function: {
612
- name: mode.tool.name,
613
- description: mode.tool.description,
614
- parameters: mode.tool.parameters,
615
- strict: this.supportsStructuredOutputs ? true : void 0
616
- }
617
- }
618
- ]
619
- },
620
- warnings
621
- };
622
- }
623
- default: {
624
- const _exhaustiveCheck = type;
625
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
626
- }
627
- }
537
+ const {
538
+ tools: openaiTools,
539
+ toolChoice: openaiToolChoice,
540
+ toolWarnings
541
+ } = prepareTools({
542
+ tools,
543
+ toolChoice,
544
+ structuredOutputs
545
+ });
546
+ return {
547
+ args: {
548
+ ...baseArgs,
549
+ tools: openaiTools,
550
+ tool_choice: openaiToolChoice
551
+ },
552
+ warnings: [...warnings, ...toolWarnings]
553
+ };
628
554
  }
629
555
  async doGenerate(options) {
630
- var _a, _b, _c, _d, _e, _f, _g, _h;
631
- const { args: body, warnings } = this.getArgs(options);
556
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
557
+ const { args: body, warnings } = await this.getArgs(options);
632
558
  const {
633
559
  responseHeaders,
634
560
  value: response,
@@ -647,105 +573,61 @@ var OpenAIChatLanguageModel = class {
647
573
  abortSignal: options.abortSignal,
648
574
  fetch: this.config.fetch
649
575
  });
650
- const { messages: rawPrompt, ...rawSettings } = body;
651
576
  const choice = response.choices[0];
652
- const completionTokenDetails = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details;
653
- const promptTokenDetails = (_b = response.usage) == null ? void 0 : _b.prompt_tokens_details;
654
- const providerMetadata = { openai: {} };
655
- if ((completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null) {
656
- providerMetadata.openai.reasoningTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens;
577
+ const content = [];
578
+ const text = choice.message.content;
579
+ if (text != null && text.length > 0) {
580
+ content.push({ type: "text", text });
581
+ }
582
+ for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
583
+ content.push({
584
+ type: "tool-call",
585
+ toolCallType: "function",
586
+ toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils3.generateId)(),
587
+ toolName: toolCall.function.name,
588
+ args: toolCall.function.arguments
589
+ });
657
590
  }
591
+ const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
592
+ const promptTokenDetails = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details;
593
+ const providerMetadata = { openai: {} };
658
594
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
659
595
  providerMetadata.openai.acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
660
596
  }
661
597
  if ((completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens) != null) {
662
598
  providerMetadata.openai.rejectedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.rejected_prediction_tokens;
663
599
  }
664
- if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
665
- providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
600
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
601
+ providerMetadata.openai.logprobs = choice.logprobs.content;
666
602
  }
667
603
  return {
668
- text: (_c = choice.message.content) != null ? _c : void 0,
669
- toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
670
- {
671
- toolCallType: "function",
672
- toolCallId: (0, import_provider_utils3.generateId)(),
673
- toolName: choice.message.function_call.name,
674
- args: choice.message.function_call.arguments
675
- }
676
- ] : (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
677
- var _a2;
678
- return {
679
- toolCallType: "function",
680
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
681
- toolName: toolCall.function.name,
682
- args: toolCall.function.arguments
683
- };
684
- }),
604
+ content,
685
605
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
686
606
  usage: {
687
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : NaN,
688
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : NaN
607
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
608
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
609
+ totalTokens: (_k = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
610
+ reasoningTokens: (_l = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _l : void 0,
611
+ cachedInputTokens: (_m = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _m : void 0
612
+ },
613
+ request: { body },
614
+ response: {
615
+ ...getResponseMetadata(response),
616
+ headers: responseHeaders,
617
+ body: rawResponse
689
618
  },
690
- rawCall: { rawPrompt, rawSettings },
691
- rawResponse: { headers: responseHeaders, body: rawResponse },
692
- request: { body: JSON.stringify(body) },
693
- response: getResponseMetadata(response),
694
619
  warnings,
695
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
696
620
  providerMetadata
697
621
  };
698
622
  }
699
623
  async doStream(options) {
700
- if (this.settings.simulateStreaming) {
701
- const result = await this.doGenerate(options);
702
- const simulatedStream = new ReadableStream({
703
- start(controller) {
704
- controller.enqueue({ type: "response-metadata", ...result.response });
705
- if (result.text) {
706
- controller.enqueue({
707
- type: "text-delta",
708
- textDelta: result.text
709
- });
710
- }
711
- if (result.toolCalls) {
712
- for (const toolCall of result.toolCalls) {
713
- controller.enqueue({
714
- type: "tool-call-delta",
715
- toolCallType: "function",
716
- toolCallId: toolCall.toolCallId,
717
- toolName: toolCall.toolName,
718
- argsTextDelta: toolCall.args
719
- });
720
- controller.enqueue({
721
- type: "tool-call",
722
- ...toolCall
723
- });
724
- }
725
- }
726
- controller.enqueue({
727
- type: "finish",
728
- finishReason: result.finishReason,
729
- usage: result.usage,
730
- logprobs: result.logprobs,
731
- providerMetadata: result.providerMetadata
732
- });
733
- controller.close();
734
- }
735
- });
736
- return {
737
- stream: simulatedStream,
738
- rawCall: result.rawCall,
739
- rawResponse: result.rawResponse,
740
- warnings: result.warnings
741
- };
742
- }
743
- const { args, warnings } = this.getArgs(options);
624
+ const { args, warnings } = await this.getArgs(options);
744
625
  const body = {
745
626
  ...args,
746
627
  stream: true,
747
- // only include stream_options when in strict compatibility mode:
748
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
628
+ stream_options: {
629
+ include_usage: true
630
+ }
749
631
  };
750
632
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
751
633
  url: this.config.url({
@@ -761,22 +643,23 @@ var OpenAIChatLanguageModel = class {
761
643
  abortSignal: options.abortSignal,
762
644
  fetch: this.config.fetch
763
645
  });
764
- const { messages: rawPrompt, ...rawSettings } = args;
765
646
  const toolCalls = [];
766
647
  let finishReason = "unknown";
767
- let usage = {
768
- promptTokens: void 0,
769
- completionTokens: void 0
648
+ const usage = {
649
+ inputTokens: void 0,
650
+ outputTokens: void 0,
651
+ totalTokens: void 0
770
652
  };
771
- let logprobs;
772
653
  let isFirstChunk = true;
773
- const { useLegacyFunctionCalling } = this.settings;
774
654
  const providerMetadata = { openai: {} };
775
655
  return {
776
656
  stream: response.pipeThrough(
777
657
  new TransformStream({
658
+ start(controller) {
659
+ controller.enqueue({ type: "stream-start", warnings });
660
+ },
778
661
  transform(chunk, controller) {
779
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
662
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
780
663
  if (!chunk.success) {
781
664
  finishReason = "error";
782
665
  controller.enqueue({ type: "error", error: chunk.error });
@@ -796,60 +679,37 @@ var OpenAIChatLanguageModel = class {
796
679
  });
797
680
  }
798
681
  if (value.usage != null) {
799
- const {
800
- prompt_tokens,
801
- completion_tokens,
802
- prompt_tokens_details,
803
- completion_tokens_details
804
- } = value.usage;
805
- usage = {
806
- promptTokens: prompt_tokens != null ? prompt_tokens : void 0,
807
- completionTokens: completion_tokens != null ? completion_tokens : void 0
808
- };
809
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
810
- providerMetadata.openai.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
811
- }
812
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
813
- providerMetadata.openai.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
814
- }
815
- if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
816
- providerMetadata.openai.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
682
+ usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
683
+ usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
684
+ usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
685
+ usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
686
+ usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
687
+ if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
688
+ providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
817
689
  }
818
- if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
819
- providerMetadata.openai.cachedPromptTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
690
+ if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
691
+ providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
820
692
  }
821
693
  }
822
694
  const choice = value.choices[0];
823
695
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
824
696
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
825
697
  }
698
+ if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
699
+ providerMetadata.openai.logprobs = choice.logprobs.content;
700
+ }
826
701
  if ((choice == null ? void 0 : choice.delta) == null) {
827
702
  return;
828
703
  }
829
704
  const delta = choice.delta;
830
705
  if (delta.content != null) {
831
706
  controller.enqueue({
832
- type: "text-delta",
833
- textDelta: delta.content
707
+ type: "text",
708
+ text: delta.content
834
709
  });
835
710
  }
836
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
837
- choice == null ? void 0 : choice.logprobs
838
- );
839
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
840
- if (logprobs === void 0) logprobs = [];
841
- logprobs.push(...mappedLogprobs);
842
- }
843
- const mappedToolCalls = useLegacyFunctionCalling && delta.function_call != null ? [
844
- {
845
- type: "function",
846
- id: (0, import_provider_utils3.generateId)(),
847
- function: delta.function_call,
848
- index: 0
849
- }
850
- ] : delta.tool_calls;
851
- if (mappedToolCalls != null) {
852
- for (const toolCallDelta of mappedToolCalls) {
711
+ if (delta.tool_calls != null) {
712
+ for (const toolCallDelta of delta.tool_calls) {
853
713
  const index = toolCallDelta.index;
854
714
  if (toolCalls[index] == null) {
855
715
  if (toolCallDelta.type !== "function") {
@@ -864,7 +724,7 @@ var OpenAIChatLanguageModel = class {
864
724
  message: `Expected 'id' to be a string.`
865
725
  });
866
726
  }
867
- if (((_a = toolCallDelta.function) == null ? void 0 : _a.name) == null) {
727
+ if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
868
728
  throw new import_provider3.InvalidResponseDataError({
869
729
  data: toolCallDelta,
870
730
  message: `Expected 'function.name' to be a string.`
@@ -875,12 +735,12 @@ var OpenAIChatLanguageModel = class {
875
735
  type: "function",
876
736
  function: {
877
737
  name: toolCallDelta.function.name,
878
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
738
+ arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
879
739
  },
880
740
  hasFinished: false
881
741
  };
882
742
  const toolCall2 = toolCalls[index];
883
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
743
+ if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
884
744
  if (toolCall2.function.arguments.length > 0) {
885
745
  controller.enqueue({
886
746
  type: "tool-call-delta",
@@ -894,7 +754,7 @@ var OpenAIChatLanguageModel = class {
894
754
  controller.enqueue({
895
755
  type: "tool-call",
896
756
  toolCallType: "function",
897
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
757
+ toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils3.generateId)(),
898
758
  toolName: toolCall2.function.name,
899
759
  args: toolCall2.function.arguments
900
760
  });
@@ -907,21 +767,21 @@ var OpenAIChatLanguageModel = class {
907
767
  if (toolCall.hasFinished) {
908
768
  continue;
909
769
  }
910
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
911
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
770
+ if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
771
+ toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
912
772
  }
913
773
  controller.enqueue({
914
774
  type: "tool-call-delta",
915
775
  toolCallType: "function",
916
776
  toolCallId: toolCall.id,
917
777
  toolName: toolCall.function.name,
918
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
778
+ argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
919
779
  });
920
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
780
+ if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
921
781
  controller.enqueue({
922
782
  type: "tool-call",
923
783
  toolCallType: "function",
924
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
784
+ toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils3.generateId)(),
925
785
  toolName: toolCall.function.name,
926
786
  args: toolCall.function.arguments
927
787
  });
@@ -931,125 +791,111 @@ var OpenAIChatLanguageModel = class {
931
791
  }
932
792
  },
933
793
  flush(controller) {
934
- var _a, _b;
935
794
  controller.enqueue({
936
795
  type: "finish",
937
796
  finishReason,
938
- logprobs,
939
- usage: {
940
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
941
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
942
- },
797
+ usage,
943
798
  ...providerMetadata != null ? { providerMetadata } : {}
944
799
  });
945
800
  }
946
801
  })
947
802
  ),
948
- rawCall: { rawPrompt, rawSettings },
949
- rawResponse: { headers: responseHeaders },
950
- request: { body: JSON.stringify(body) },
951
- warnings
803
+ request: { body },
804
+ response: { headers: responseHeaders }
952
805
  };
953
806
  }
954
807
  };
955
- var openaiTokenUsageSchema = import_zod2.z.object({
956
- prompt_tokens: import_zod2.z.number().nullish(),
957
- completion_tokens: import_zod2.z.number().nullish(),
958
- prompt_tokens_details: import_zod2.z.object({
959
- cached_tokens: import_zod2.z.number().nullish()
808
+ var openaiTokenUsageSchema = import_zod3.z.object({
809
+ prompt_tokens: import_zod3.z.number().nullish(),
810
+ completion_tokens: import_zod3.z.number().nullish(),
811
+ total_tokens: import_zod3.z.number().nullish(),
812
+ prompt_tokens_details: import_zod3.z.object({
813
+ cached_tokens: import_zod3.z.number().nullish()
960
814
  }).nullish(),
961
- completion_tokens_details: import_zod2.z.object({
962
- reasoning_tokens: import_zod2.z.number().nullish(),
963
- accepted_prediction_tokens: import_zod2.z.number().nullish(),
964
- rejected_prediction_tokens: import_zod2.z.number().nullish()
815
+ completion_tokens_details: import_zod3.z.object({
816
+ reasoning_tokens: import_zod3.z.number().nullish(),
817
+ accepted_prediction_tokens: import_zod3.z.number().nullish(),
818
+ rejected_prediction_tokens: import_zod3.z.number().nullish()
965
819
  }).nullish()
966
820
  }).nullish();
967
- var openaiChatResponseSchema = import_zod2.z.object({
968
- id: import_zod2.z.string().nullish(),
969
- created: import_zod2.z.number().nullish(),
970
- model: import_zod2.z.string().nullish(),
971
- choices: import_zod2.z.array(
972
- import_zod2.z.object({
973
- message: import_zod2.z.object({
974
- role: import_zod2.z.literal("assistant").nullish(),
975
- content: import_zod2.z.string().nullish(),
976
- function_call: import_zod2.z.object({
977
- arguments: import_zod2.z.string(),
978
- name: import_zod2.z.string()
979
- }).nullish(),
980
- tool_calls: import_zod2.z.array(
981
- import_zod2.z.object({
982
- id: import_zod2.z.string().nullish(),
983
- type: import_zod2.z.literal("function"),
984
- function: import_zod2.z.object({
985
- name: import_zod2.z.string(),
986
- arguments: import_zod2.z.string()
821
+ var openaiChatResponseSchema = import_zod3.z.object({
822
+ id: import_zod3.z.string().nullish(),
823
+ created: import_zod3.z.number().nullish(),
824
+ model: import_zod3.z.string().nullish(),
825
+ choices: import_zod3.z.array(
826
+ import_zod3.z.object({
827
+ message: import_zod3.z.object({
828
+ role: import_zod3.z.literal("assistant").nullish(),
829
+ content: import_zod3.z.string().nullish(),
830
+ tool_calls: import_zod3.z.array(
831
+ import_zod3.z.object({
832
+ id: import_zod3.z.string().nullish(),
833
+ type: import_zod3.z.literal("function"),
834
+ function: import_zod3.z.object({
835
+ name: import_zod3.z.string(),
836
+ arguments: import_zod3.z.string()
987
837
  })
988
838
  })
989
839
  ).nullish()
990
840
  }),
991
- index: import_zod2.z.number(),
992
- logprobs: import_zod2.z.object({
993
- content: import_zod2.z.array(
994
- import_zod2.z.object({
995
- token: import_zod2.z.string(),
996
- logprob: import_zod2.z.number(),
997
- top_logprobs: import_zod2.z.array(
998
- import_zod2.z.object({
999
- token: import_zod2.z.string(),
1000
- logprob: import_zod2.z.number()
841
+ index: import_zod3.z.number(),
842
+ logprobs: import_zod3.z.object({
843
+ content: import_zod3.z.array(
844
+ import_zod3.z.object({
845
+ token: import_zod3.z.string(),
846
+ logprob: import_zod3.z.number(),
847
+ top_logprobs: import_zod3.z.array(
848
+ import_zod3.z.object({
849
+ token: import_zod3.z.string(),
850
+ logprob: import_zod3.z.number()
1001
851
  })
1002
852
  )
1003
853
  })
1004
- ).nullable()
854
+ ).nullish()
1005
855
  }).nullish(),
1006
- finish_reason: import_zod2.z.string().nullish()
856
+ finish_reason: import_zod3.z.string().nullish()
1007
857
  })
1008
858
  ),
1009
859
  usage: openaiTokenUsageSchema
1010
860
  });
1011
- var openaiChatChunkSchema = import_zod2.z.union([
1012
- import_zod2.z.object({
1013
- id: import_zod2.z.string().nullish(),
1014
- created: import_zod2.z.number().nullish(),
1015
- model: import_zod2.z.string().nullish(),
1016
- choices: import_zod2.z.array(
1017
- import_zod2.z.object({
1018
- delta: import_zod2.z.object({
1019
- role: import_zod2.z.enum(["assistant"]).nullish(),
1020
- content: import_zod2.z.string().nullish(),
1021
- function_call: import_zod2.z.object({
1022
- name: import_zod2.z.string().optional(),
1023
- arguments: import_zod2.z.string().optional()
1024
- }).nullish(),
1025
- tool_calls: import_zod2.z.array(
1026
- import_zod2.z.object({
1027
- index: import_zod2.z.number(),
1028
- id: import_zod2.z.string().nullish(),
1029
- type: import_zod2.z.literal("function").nullish(),
1030
- function: import_zod2.z.object({
1031
- name: import_zod2.z.string().nullish(),
1032
- arguments: import_zod2.z.string().nullish()
861
+ var openaiChatChunkSchema = import_zod3.z.union([
862
+ import_zod3.z.object({
863
+ id: import_zod3.z.string().nullish(),
864
+ created: import_zod3.z.number().nullish(),
865
+ model: import_zod3.z.string().nullish(),
866
+ choices: import_zod3.z.array(
867
+ import_zod3.z.object({
868
+ delta: import_zod3.z.object({
869
+ role: import_zod3.z.enum(["assistant"]).nullish(),
870
+ content: import_zod3.z.string().nullish(),
871
+ tool_calls: import_zod3.z.array(
872
+ import_zod3.z.object({
873
+ index: import_zod3.z.number(),
874
+ id: import_zod3.z.string().nullish(),
875
+ type: import_zod3.z.literal("function").nullish(),
876
+ function: import_zod3.z.object({
877
+ name: import_zod3.z.string().nullish(),
878
+ arguments: import_zod3.z.string().nullish()
1033
879
  })
1034
880
  })
1035
881
  ).nullish()
1036
882
  }).nullish(),
1037
- logprobs: import_zod2.z.object({
1038
- content: import_zod2.z.array(
1039
- import_zod2.z.object({
1040
- token: import_zod2.z.string(),
1041
- logprob: import_zod2.z.number(),
1042
- top_logprobs: import_zod2.z.array(
1043
- import_zod2.z.object({
1044
- token: import_zod2.z.string(),
1045
- logprob: import_zod2.z.number()
883
+ logprobs: import_zod3.z.object({
884
+ content: import_zod3.z.array(
885
+ import_zod3.z.object({
886
+ token: import_zod3.z.string(),
887
+ logprob: import_zod3.z.number(),
888
+ top_logprobs: import_zod3.z.array(
889
+ import_zod3.z.object({
890
+ token: import_zod3.z.string(),
891
+ logprob: import_zod3.z.number()
1046
892
  })
1047
893
  )
1048
894
  })
1049
- ).nullable()
895
+ ).nullish()
1050
896
  }).nullish(),
1051
- finish_reason: import_zod2.z.string().nullish(),
1052
- index: import_zod2.z.number()
897
+ finish_reason: import_zod3.z.string().nullish(),
898
+ index: import_zod3.z.number()
1053
899
  })
1054
900
  ),
1055
901
  usage: openaiTokenUsageSchema
@@ -1059,9 +905,6 @@ var openaiChatChunkSchema = import_zod2.z.union([
1059
905
  function isReasoningModel(modelId) {
1060
906
  return modelId.startsWith("o");
1061
907
  }
1062
- function isAudioModel(modelId) {
1063
- return modelId.startsWith("gpt-4o-audio-preview");
1064
- }
1065
908
  function getSystemMessageMode(modelId) {
1066
909
  var _a, _b;
1067
910
  if (!isReasoningModel(modelId)) {
@@ -1103,21 +946,16 @@ var reasoningModels = {
1103
946
  };
1104
947
 
1105
948
  // src/openai-completion-language-model.ts
1106
- var import_provider5 = require("@ai-sdk/provider");
1107
949
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1108
- var import_zod3 = require("zod");
950
+ var import_zod5 = require("zod");
1109
951
 
1110
952
  // src/convert-to-openai-completion-prompt.ts
1111
953
  var import_provider4 = require("@ai-sdk/provider");
1112
954
  function convertToOpenAICompletionPrompt({
1113
955
  prompt,
1114
- inputFormat,
1115
956
  user = "user",
1116
957
  assistant = "assistant"
1117
958
  }) {
1118
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
1119
- return { prompt: prompt[0].content[0].text };
1120
- }
1121
959
  let text = "";
1122
960
  if (prompt[0].role === "system") {
1123
961
  text += `${prompt[0].content}
@@ -1139,13 +977,8 @@ function convertToOpenAICompletionPrompt({
1139
977
  case "text": {
1140
978
  return part.text;
1141
979
  }
1142
- case "image": {
1143
- throw new import_provider4.UnsupportedFunctionalityError({
1144
- functionality: "images"
1145
- });
1146
- }
1147
980
  }
1148
- }).join("");
981
+ }).filter(Boolean).join("");
1149
982
  text += `${user}:
1150
983
  ${userMessage}
1151
984
 
@@ -1191,37 +1024,68 @@ ${user}:`]
1191
1024
  };
1192
1025
  }
1193
1026
 
1194
- // src/map-openai-completion-logprobs.ts
1195
- function mapOpenAICompletionLogProbs(logprobs) {
1196
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1197
- token,
1198
- logprob: logprobs.token_logprobs[index],
1199
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1200
- ([token2, logprob]) => ({
1201
- token: token2,
1202
- logprob
1203
- })
1204
- ) : []
1205
- }));
1206
- }
1027
+ // src/openai-completion-options.ts
1028
+ var import_zod4 = require("zod");
1029
+ var openaiCompletionProviderOptions = import_zod4.z.object({
1030
+ /**
1031
+ Echo back the prompt in addition to the completion.
1032
+ */
1033
+ echo: import_zod4.z.boolean().optional(),
1034
+ /**
1035
+ Modify the likelihood of specified tokens appearing in the completion.
1036
+
1037
+ Accepts a JSON object that maps tokens (specified by their token ID in
1038
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
1039
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
1040
+ the bias is added to the logits generated by the model prior to sampling.
1041
+ The exact effect will vary per model, but values between -1 and 1 should
1042
+ decrease or increase likelihood of selection; values like -100 or 100
1043
+ should result in a ban or exclusive selection of the relevant token.
1044
+
1045
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1046
+ token from being generated.
1047
+ */
1048
+ logitBias: import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number()).optional(),
1049
+ /**
1050
+ The suffix that comes after a completion of inserted text.
1051
+ */
1052
+ suffix: import_zod4.z.string().optional(),
1053
+ /**
1054
+ A unique identifier representing your end-user, which can help OpenAI to
1055
+ monitor and detect abuse. Learn more.
1056
+ */
1057
+ user: import_zod4.z.string().optional(),
1058
+ /**
1059
+ Return the log probabilities of the tokens. Including logprobs will increase
1060
+ the response size and can slow down response times. However, it can
1061
+ be useful to better understand how the model is behaving.
1062
+ Setting to true will return the log probabilities of the tokens that
1063
+ were generated.
1064
+ Setting to a number will return the log probabilities of the top n
1065
+ tokens that were generated.
1066
+ */
1067
+ logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
1068
+ });
1207
1069
 
1208
1070
  // src/openai-completion-language-model.ts
1209
1071
  var OpenAICompletionLanguageModel = class {
1210
- constructor(modelId, settings, config) {
1211
- this.specificationVersion = "v1";
1212
- this.defaultObjectGenerationMode = void 0;
1072
+ constructor(modelId, config) {
1073
+ this.specificationVersion = "v2";
1074
+ this.supportedUrls = {
1075
+ // No URLs are supported for completion models.
1076
+ };
1213
1077
  this.modelId = modelId;
1214
- this.settings = settings;
1215
1078
  this.config = config;
1216
1079
  }
1080
+ get providerOptionsName() {
1081
+ return this.config.provider.split(".")[0].trim();
1082
+ }
1217
1083
  get provider() {
1218
1084
  return this.config.provider;
1219
1085
  }
1220
- getArgs({
1221
- mode,
1222
- inputFormat,
1086
+ async getArgs({
1223
1087
  prompt,
1224
- maxTokens,
1088
+ maxOutputTokens,
1225
1089
  temperature,
1226
1090
  topP,
1227
1091
  topK,
@@ -1229,16 +1093,32 @@ var OpenAICompletionLanguageModel = class {
1229
1093
  presencePenalty,
1230
1094
  stopSequences: userStopSequences,
1231
1095
  responseFormat,
1232
- seed
1096
+ tools,
1097
+ toolChoice,
1098
+ seed,
1099
+ providerOptions
1233
1100
  }) {
1234
- var _a;
1235
- const type = mode.type;
1236
1101
  const warnings = [];
1102
+ const openaiOptions = {
1103
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1104
+ provider: "openai",
1105
+ providerOptions,
1106
+ schema: openaiCompletionProviderOptions
1107
+ }),
1108
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1109
+ provider: this.providerOptionsName,
1110
+ providerOptions,
1111
+ schema: openaiCompletionProviderOptions
1112
+ })
1113
+ };
1237
1114
  if (topK != null) {
1238
- warnings.push({
1239
- type: "unsupported-setting",
1240
- setting: "topK"
1241
- });
1115
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
1116
+ }
1117
+ if (tools == null ? void 0 : tools.length) {
1118
+ warnings.push({ type: "unsupported-setting", setting: "tools" });
1119
+ }
1120
+ if (toolChoice != null) {
1121
+ warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1242
1122
  }
1243
1123
  if (responseFormat != null && responseFormat.type !== "text") {
1244
1124
  warnings.push({
@@ -1247,61 +1127,36 @@ var OpenAICompletionLanguageModel = class {
1247
1127
  details: "JSON response format is not supported."
1248
1128
  });
1249
1129
  }
1250
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1130
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1251
1131
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1252
- const baseArgs = {
1253
- // model id:
1254
- model: this.modelId,
1255
- // model specific settings:
1256
- echo: this.settings.echo,
1257
- logit_bias: this.settings.logitBias,
1258
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1259
- suffix: this.settings.suffix,
1260
- user: this.settings.user,
1261
- // standardized settings:
1262
- max_tokens: maxTokens,
1263
- temperature,
1264
- top_p: topP,
1265
- frequency_penalty: frequencyPenalty,
1266
- presence_penalty: presencePenalty,
1267
- seed,
1268
- // prompt:
1269
- prompt: completionPrompt,
1270
- // stop sequences:
1271
- stop: stop.length > 0 ? stop : void 0
1132
+ return {
1133
+ args: {
1134
+ // model id:
1135
+ model: this.modelId,
1136
+ // model specific settings:
1137
+ echo: openaiOptions.echo,
1138
+ logit_bias: openaiOptions.logitBias,
1139
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1140
+ suffix: openaiOptions.suffix,
1141
+ user: openaiOptions.user,
1142
+ // standardized settings:
1143
+ max_tokens: maxOutputTokens,
1144
+ temperature,
1145
+ top_p: topP,
1146
+ frequency_penalty: frequencyPenalty,
1147
+ presence_penalty: presencePenalty,
1148
+ seed,
1149
+ // prompt:
1150
+ prompt: completionPrompt,
1151
+ // stop sequences:
1152
+ stop: stop.length > 0 ? stop : void 0
1153
+ },
1154
+ warnings
1272
1155
  };
1273
- switch (type) {
1274
- case "regular": {
1275
- if ((_a = mode.tools) == null ? void 0 : _a.length) {
1276
- throw new import_provider5.UnsupportedFunctionalityError({
1277
- functionality: "tools"
1278
- });
1279
- }
1280
- if (mode.toolChoice) {
1281
- throw new import_provider5.UnsupportedFunctionalityError({
1282
- functionality: "toolChoice"
1283
- });
1284
- }
1285
- return { args: baseArgs, warnings };
1286
- }
1287
- case "object-json": {
1288
- throw new import_provider5.UnsupportedFunctionalityError({
1289
- functionality: "object-json mode"
1290
- });
1291
- }
1292
- case "object-tool": {
1293
- throw new import_provider5.UnsupportedFunctionalityError({
1294
- functionality: "object-tool mode"
1295
- });
1296
- }
1297
- default: {
1298
- const _exhaustiveCheck = type;
1299
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
1300
- }
1301
- }
1302
1156
  }
1303
1157
  async doGenerate(options) {
1304
- const { args, warnings } = this.getArgs(options);
1158
+ var _a, _b, _c;
1159
+ const { args, warnings } = await this.getArgs(options);
1305
1160
  const {
1306
1161
  responseHeaders,
1307
1162
  value: response,
@@ -1320,30 +1175,37 @@ var OpenAICompletionLanguageModel = class {
1320
1175
  abortSignal: options.abortSignal,
1321
1176
  fetch: this.config.fetch
1322
1177
  });
1323
- const { prompt: rawPrompt, ...rawSettings } = args;
1324
1178
  const choice = response.choices[0];
1179
+ const providerMetadata = { openai: {} };
1180
+ if (choice.logprobs != null) {
1181
+ providerMetadata.openai.logprobs = choice.logprobs;
1182
+ }
1325
1183
  return {
1326
- text: choice.text,
1184
+ content: [{ type: "text", text: choice.text }],
1327
1185
  usage: {
1328
- promptTokens: response.usage.prompt_tokens,
1329
- completionTokens: response.usage.completion_tokens
1186
+ inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1187
+ outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1188
+ totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1330
1189
  },
1331
1190
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1332
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1333
- rawCall: { rawPrompt, rawSettings },
1334
- rawResponse: { headers: responseHeaders, body: rawResponse },
1335
- response: getResponseMetadata(response),
1336
- warnings,
1337
- request: { body: JSON.stringify(args) }
1191
+ request: { body: args },
1192
+ response: {
1193
+ ...getResponseMetadata(response),
1194
+ headers: responseHeaders,
1195
+ body: rawResponse
1196
+ },
1197
+ providerMetadata,
1198
+ warnings
1338
1199
  };
1339
1200
  }
1340
1201
  async doStream(options) {
1341
- const { args, warnings } = this.getArgs(options);
1202
+ const { args, warnings } = await this.getArgs(options);
1342
1203
  const body = {
1343
1204
  ...args,
1344
1205
  stream: true,
1345
- // only include stream_options when in strict compatibility mode:
1346
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
1206
+ stream_options: {
1207
+ include_usage: true
1208
+ }
1347
1209
  };
1348
1210
  const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
1349
1211
  url: this.config.url({
@@ -1359,17 +1221,20 @@ var OpenAICompletionLanguageModel = class {
1359
1221
  abortSignal: options.abortSignal,
1360
1222
  fetch: this.config.fetch
1361
1223
  });
1362
- const { prompt: rawPrompt, ...rawSettings } = args;
1363
1224
  let finishReason = "unknown";
1364
- let usage = {
1365
- promptTokens: Number.NaN,
1366
- completionTokens: Number.NaN
1225
+ const providerMetadata = { openai: {} };
1226
+ const usage = {
1227
+ inputTokens: void 0,
1228
+ outputTokens: void 0,
1229
+ totalTokens: void 0
1367
1230
  };
1368
- let logprobs;
1369
1231
  let isFirstChunk = true;
1370
1232
  return {
1371
1233
  stream: response.pipeThrough(
1372
1234
  new TransformStream({
1235
+ start(controller) {
1236
+ controller.enqueue({ type: "stream-start", warnings });
1237
+ },
1373
1238
  transform(chunk, controller) {
1374
1239
  if (!chunk.success) {
1375
1240
  finishReason = "error";
@@ -1390,127 +1255,140 @@ var OpenAICompletionLanguageModel = class {
1390
1255
  });
1391
1256
  }
1392
1257
  if (value.usage != null) {
1393
- usage = {
1394
- promptTokens: value.usage.prompt_tokens,
1395
- completionTokens: value.usage.completion_tokens
1396
- };
1258
+ usage.inputTokens = value.usage.prompt_tokens;
1259
+ usage.outputTokens = value.usage.completion_tokens;
1260
+ usage.totalTokens = value.usage.total_tokens;
1397
1261
  }
1398
1262
  const choice = value.choices[0];
1399
1263
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1400
1264
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1401
1265
  }
1266
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1267
+ providerMetadata.openai.logprobs = choice.logprobs;
1268
+ }
1402
1269
  if ((choice == null ? void 0 : choice.text) != null) {
1403
1270
  controller.enqueue({
1404
- type: "text-delta",
1405
- textDelta: choice.text
1271
+ type: "text",
1272
+ text: choice.text
1406
1273
  });
1407
1274
  }
1408
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1409
- choice == null ? void 0 : choice.logprobs
1410
- );
1411
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1412
- if (logprobs === void 0) logprobs = [];
1413
- logprobs.push(...mappedLogprobs);
1414
- }
1415
1275
  },
1416
1276
  flush(controller) {
1417
1277
  controller.enqueue({
1418
1278
  type: "finish",
1419
1279
  finishReason,
1420
- logprobs,
1280
+ providerMetadata,
1421
1281
  usage
1422
1282
  });
1423
1283
  }
1424
1284
  })
1425
1285
  ),
1426
- rawCall: { rawPrompt, rawSettings },
1427
- rawResponse: { headers: responseHeaders },
1428
- warnings,
1429
- request: { body: JSON.stringify(body) }
1286
+ request: { body },
1287
+ response: { headers: responseHeaders }
1430
1288
  };
1431
1289
  }
1432
1290
  };
1433
- var openaiCompletionResponseSchema = import_zod3.z.object({
1434
- id: import_zod3.z.string().nullish(),
1435
- created: import_zod3.z.number().nullish(),
1436
- model: import_zod3.z.string().nullish(),
1437
- choices: import_zod3.z.array(
1438
- import_zod3.z.object({
1439
- text: import_zod3.z.string(),
1440
- finish_reason: import_zod3.z.string(),
1441
- logprobs: import_zod3.z.object({
1442
- tokens: import_zod3.z.array(import_zod3.z.string()),
1443
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1444
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1291
+ var usageSchema = import_zod5.z.object({
1292
+ prompt_tokens: import_zod5.z.number(),
1293
+ completion_tokens: import_zod5.z.number(),
1294
+ total_tokens: import_zod5.z.number()
1295
+ });
1296
+ var openaiCompletionResponseSchema = import_zod5.z.object({
1297
+ id: import_zod5.z.string().nullish(),
1298
+ created: import_zod5.z.number().nullish(),
1299
+ model: import_zod5.z.string().nullish(),
1300
+ choices: import_zod5.z.array(
1301
+ import_zod5.z.object({
1302
+ text: import_zod5.z.string(),
1303
+ finish_reason: import_zod5.z.string(),
1304
+ logprobs: import_zod5.z.object({
1305
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1306
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1307
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1445
1308
  }).nullish()
1446
1309
  })
1447
1310
  ),
1448
- usage: import_zod3.z.object({
1449
- prompt_tokens: import_zod3.z.number(),
1450
- completion_tokens: import_zod3.z.number()
1451
- })
1311
+ usage: usageSchema.nullish()
1452
1312
  });
1453
- var openaiCompletionChunkSchema = import_zod3.z.union([
1454
- import_zod3.z.object({
1455
- id: import_zod3.z.string().nullish(),
1456
- created: import_zod3.z.number().nullish(),
1457
- model: import_zod3.z.string().nullish(),
1458
- choices: import_zod3.z.array(
1459
- import_zod3.z.object({
1460
- text: import_zod3.z.string(),
1461
- finish_reason: import_zod3.z.string().nullish(),
1462
- index: import_zod3.z.number(),
1463
- logprobs: import_zod3.z.object({
1464
- tokens: import_zod3.z.array(import_zod3.z.string()),
1465
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1466
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1313
+ var openaiCompletionChunkSchema = import_zod5.z.union([
1314
+ import_zod5.z.object({
1315
+ id: import_zod5.z.string().nullish(),
1316
+ created: import_zod5.z.number().nullish(),
1317
+ model: import_zod5.z.string().nullish(),
1318
+ choices: import_zod5.z.array(
1319
+ import_zod5.z.object({
1320
+ text: import_zod5.z.string(),
1321
+ finish_reason: import_zod5.z.string().nullish(),
1322
+ index: import_zod5.z.number(),
1323
+ logprobs: import_zod5.z.object({
1324
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1325
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1326
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1467
1327
  }).nullish()
1468
1328
  })
1469
1329
  ),
1470
- usage: import_zod3.z.object({
1471
- prompt_tokens: import_zod3.z.number(),
1472
- completion_tokens: import_zod3.z.number()
1473
- }).nullish()
1330
+ usage: usageSchema.nullish()
1474
1331
  }),
1475
1332
  openaiErrorDataSchema
1476
1333
  ]);
1477
1334
 
1478
1335
  // src/openai-embedding-model.ts
1479
- var import_provider6 = require("@ai-sdk/provider");
1336
+ var import_provider5 = require("@ai-sdk/provider");
1480
1337
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1481
- var import_zod4 = require("zod");
1338
+ var import_zod7 = require("zod");
1339
+
1340
+ // src/openai-embedding-options.ts
1341
+ var import_zod6 = require("zod");
1342
+ var openaiEmbeddingProviderOptions = import_zod6.z.object({
1343
+ /**
1344
+ The number of dimensions the resulting output embeddings should have.
1345
+ Only supported in text-embedding-3 and later models.
1346
+ */
1347
+ dimensions: import_zod6.z.number().optional(),
1348
+ /**
1349
+ A unique identifier representing your end-user, which can help OpenAI to
1350
+ monitor and detect abuse. Learn more.
1351
+ */
1352
+ user: import_zod6.z.string().optional()
1353
+ });
1354
+
1355
+ // src/openai-embedding-model.ts
1482
1356
  var OpenAIEmbeddingModel = class {
1483
- constructor(modelId, settings, config) {
1484
- this.specificationVersion = "v1";
1357
+ constructor(modelId, config) {
1358
+ this.specificationVersion = "v2";
1359
+ this.maxEmbeddingsPerCall = 2048;
1360
+ this.supportsParallelCalls = true;
1485
1361
  this.modelId = modelId;
1486
- this.settings = settings;
1487
1362
  this.config = config;
1488
1363
  }
1489
1364
  get provider() {
1490
1365
  return this.config.provider;
1491
1366
  }
1492
- get maxEmbeddingsPerCall() {
1493
- var _a;
1494
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1495
- }
1496
- get supportsParallelCalls() {
1497
- var _a;
1498
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1499
- }
1500
1367
  async doEmbed({
1501
1368
  values,
1502
1369
  headers,
1503
- abortSignal
1370
+ abortSignal,
1371
+ providerOptions
1504
1372
  }) {
1373
+ var _a;
1505
1374
  if (values.length > this.maxEmbeddingsPerCall) {
1506
- throw new import_provider6.TooManyEmbeddingValuesForCallError({
1375
+ throw new import_provider5.TooManyEmbeddingValuesForCallError({
1507
1376
  provider: this.provider,
1508
1377
  modelId: this.modelId,
1509
1378
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
1510
1379
  values
1511
1380
  });
1512
1381
  }
1513
- const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
1382
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
1383
+ provider: "openai",
1384
+ providerOptions,
1385
+ schema: openaiEmbeddingProviderOptions
1386
+ })) != null ? _a : {};
1387
+ const {
1388
+ responseHeaders,
1389
+ value: response,
1390
+ rawValue
1391
+ } = await (0, import_provider_utils5.postJsonToApi)({
1514
1392
  url: this.config.url({
1515
1393
  path: "/embeddings",
1516
1394
  modelId: this.modelId
@@ -1520,8 +1398,8 @@ var OpenAIEmbeddingModel = class {
1520
1398
  model: this.modelId,
1521
1399
  input: values,
1522
1400
  encoding_format: "float",
1523
- dimensions: this.settings.dimensions,
1524
- user: this.settings.user
1401
+ dimensions: openaiOptions.dimensions,
1402
+ user: openaiOptions.user
1525
1403
  },
1526
1404
  failedResponseHandler: openaiFailedResponseHandler,
1527
1405
  successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(
@@ -1533,18 +1411,18 @@ var OpenAIEmbeddingModel = class {
1533
1411
  return {
1534
1412
  embeddings: response.data.map((item) => item.embedding),
1535
1413
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
1536
- rawResponse: { headers: responseHeaders }
1414
+ response: { headers: responseHeaders, body: rawValue }
1537
1415
  };
1538
1416
  }
1539
1417
  };
1540
- var openaiTextEmbeddingResponseSchema = import_zod4.z.object({
1541
- data: import_zod4.z.array(import_zod4.z.object({ embedding: import_zod4.z.array(import_zod4.z.number()) })),
1542
- usage: import_zod4.z.object({ prompt_tokens: import_zod4.z.number() }).nullish()
1418
+ var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
1419
+ data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
1420
+ usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
1543
1421
  });
1544
1422
 
1545
1423
  // src/openai-image-model.ts
1546
1424
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1547
- var import_zod5 = require("zod");
1425
+ var import_zod8 = require("zod");
1548
1426
 
1549
1427
  // src/openai-image-settings.ts
1550
1428
  var modelMaxImagesPerCall = {
@@ -1556,15 +1434,14 @@ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1556
1434
 
1557
1435
  // src/openai-image-model.ts
1558
1436
  var OpenAIImageModel = class {
1559
- constructor(modelId, settings, config) {
1437
+ constructor(modelId, config) {
1560
1438
  this.modelId = modelId;
1561
- this.settings = settings;
1562
1439
  this.config = config;
1563
- this.specificationVersion = "v1";
1440
+ this.specificationVersion = "v2";
1564
1441
  }
1565
1442
  get maxImagesPerCall() {
1566
- var _a, _b;
1567
- return (_b = (_a = this.settings.maxImagesPerCall) != null ? _a : modelMaxImagesPerCall[this.modelId]) != null ? _b : 1;
1443
+ var _a;
1444
+ return (_a = modelMaxImagesPerCall[this.modelId]) != null ? _a : 1;
1568
1445
  }
1569
1446
  get provider() {
1570
1447
  return this.config.provider;
@@ -1620,24 +1497,57 @@ var OpenAIImageModel = class {
1620
1497
  timestamp: currentDate,
1621
1498
  modelId: this.modelId,
1622
1499
  headers: responseHeaders
1500
+ },
1501
+ providerMetadata: {
1502
+ openai: {
1503
+ images: response.data.map(
1504
+ (item) => item.revised_prompt ? {
1505
+ revisedPrompt: item.revised_prompt
1506
+ } : null
1507
+ )
1508
+ }
1623
1509
  }
1624
1510
  };
1625
1511
  }
1626
1512
  };
1627
- var openaiImageResponseSchema = import_zod5.z.object({
1628
- data: import_zod5.z.array(import_zod5.z.object({ b64_json: import_zod5.z.string() }))
1513
+ var openaiImageResponseSchema = import_zod8.z.object({
1514
+ data: import_zod8.z.array(
1515
+ import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
1516
+ )
1629
1517
  });
1630
1518
 
1631
1519
  // src/openai-transcription-model.ts
1632
1520
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1633
- var import_zod6 = require("zod");
1634
- var openAIProviderOptionsSchema = import_zod6.z.object({
1635
- include: import_zod6.z.array(import_zod6.z.string()).nullish(),
1636
- language: import_zod6.z.string().nullish(),
1637
- prompt: import_zod6.z.string().nullish(),
1638
- temperature: import_zod6.z.number().min(0).max(1).nullish().default(0),
1639
- timestampGranularities: import_zod6.z.array(import_zod6.z.enum(["word", "segment"])).nullish().default(["segment"])
1521
+ var import_zod10 = require("zod");
1522
+
1523
+ // src/openai-transcription-options.ts
1524
+ var import_zod9 = require("zod");
1525
+ var openAITranscriptionProviderOptions = import_zod9.z.object({
1526
+ /**
1527
+ * Additional information to include in the transcription response.
1528
+ */
1529
+ include: import_zod9.z.array(import_zod9.z.string()).optional(),
1530
+ /**
1531
+ * The language of the input audio in ISO-639-1 format.
1532
+ */
1533
+ language: import_zod9.z.string().optional(),
1534
+ /**
1535
+ * An optional text to guide the model's style or continue a previous audio segment.
1536
+ */
1537
+ prompt: import_zod9.z.string().optional(),
1538
+ /**
1539
+ * The sampling temperature, between 0 and 1.
1540
+ * @default 0
1541
+ */
1542
+ temperature: import_zod9.z.number().min(0).max(1).default(0).optional(),
1543
+ /**
1544
+ * The timestamp granularities to populate for this transcription.
1545
+ * @default ['segment']
1546
+ */
1547
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).optional()
1640
1548
  });
1549
+
1550
+ // src/openai-transcription-model.ts
1641
1551
  var languageMap = {
1642
1552
  afrikaans: "af",
1643
1553
  arabic: "ar",
@@ -1706,17 +1616,16 @@ var OpenAITranscriptionModel = class {
1706
1616
  get provider() {
1707
1617
  return this.config.provider;
1708
1618
  }
1709
- getArgs({
1619
+ async getArgs({
1710
1620
  audio,
1711
1621
  mediaType,
1712
1622
  providerOptions
1713
1623
  }) {
1714
- var _a, _b, _c, _d, _e;
1715
1624
  const warnings = [];
1716
- const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1625
+ const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
1717
1626
  provider: "openai",
1718
1627
  providerOptions,
1719
- schema: openAIProviderOptionsSchema
1628
+ schema: openAITranscriptionProviderOptions
1720
1629
  });
1721
1630
  const formData = new FormData();
1722
1631
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
@@ -1724,15 +1633,14 @@ var OpenAITranscriptionModel = class {
1724
1633
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1725
1634
  if (openAIOptions) {
1726
1635
  const transcriptionModelOptions = {
1727
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1728
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1729
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1730
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1731
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1636
+ include: openAIOptions.include,
1637
+ language: openAIOptions.language,
1638
+ prompt: openAIOptions.prompt,
1639
+ temperature: openAIOptions.temperature,
1640
+ timestamp_granularities: openAIOptions.timestampGranularities
1732
1641
  };
1733
- for (const key in transcriptionModelOptions) {
1734
- const value = transcriptionModelOptions[key];
1735
- if (value !== void 0) {
1642
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1643
+ if (value != null) {
1736
1644
  formData.append(key, String(value));
1737
1645
  }
1738
1646
  }
@@ -1745,7 +1653,7 @@ var OpenAITranscriptionModel = class {
1745
1653
  async doGenerate(options) {
1746
1654
  var _a, _b, _c, _d, _e, _f;
1747
1655
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1748
- const { formData, warnings } = this.getArgs(options);
1656
+ const { formData, warnings } = await this.getArgs(options);
1749
1657
  const {
1750
1658
  value: response,
1751
1659
  responseHeaders,
@@ -1784,25 +1692,25 @@ var OpenAITranscriptionModel = class {
1784
1692
  };
1785
1693
  }
1786
1694
  };
1787
- var openaiTranscriptionResponseSchema = import_zod6.z.object({
1788
- text: import_zod6.z.string(),
1789
- language: import_zod6.z.string().nullish(),
1790
- duration: import_zod6.z.number().nullish(),
1791
- words: import_zod6.z.array(
1792
- import_zod6.z.object({
1793
- word: import_zod6.z.string(),
1794
- start: import_zod6.z.number(),
1795
- end: import_zod6.z.number()
1695
+ var openaiTranscriptionResponseSchema = import_zod10.z.object({
1696
+ text: import_zod10.z.string(),
1697
+ language: import_zod10.z.string().nullish(),
1698
+ duration: import_zod10.z.number().nullish(),
1699
+ words: import_zod10.z.array(
1700
+ import_zod10.z.object({
1701
+ word: import_zod10.z.string(),
1702
+ start: import_zod10.z.number(),
1703
+ end: import_zod10.z.number()
1796
1704
  })
1797
1705
  ).nullish()
1798
1706
  });
1799
1707
 
1800
1708
  // src/openai-speech-model.ts
1801
1709
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1802
- var import_zod7 = require("zod");
1803
- var OpenAIProviderOptionsSchema = import_zod7.z.object({
1804
- instructions: import_zod7.z.string().nullish(),
1805
- speed: import_zod7.z.number().min(0.25).max(4).default(1).nullish()
1710
+ var import_zod11 = require("zod");
1711
+ var OpenAIProviderOptionsSchema = import_zod11.z.object({
1712
+ instructions: import_zod11.z.string().nullish(),
1713
+ speed: import_zod11.z.number().min(0.25).max(4).default(1).nullish()
1806
1714
  });
1807
1715
  var OpenAISpeechModel = class {
1808
1716
  constructor(modelId, config) {
@@ -1813,7 +1721,7 @@ var OpenAISpeechModel = class {
1813
1721
  get provider() {
1814
1722
  return this.config.provider;
1815
1723
  }
1816
- getArgs({
1724
+ async getArgs({
1817
1725
  text,
1818
1726
  voice = "alloy",
1819
1727
  outputFormat = "mp3",
@@ -1822,7 +1730,7 @@ var OpenAISpeechModel = class {
1822
1730
  providerOptions
1823
1731
  }) {
1824
1732
  const warnings = [];
1825
- const openAIOptions = (0, import_provider_utils8.parseProviderOptions)({
1733
+ const openAIOptions = await (0, import_provider_utils8.parseProviderOptions)({
1826
1734
  provider: "openai",
1827
1735
  providerOptions,
1828
1736
  schema: OpenAIProviderOptionsSchema
@@ -1863,7 +1771,7 @@ var OpenAISpeechModel = class {
1863
1771
  async doGenerate(options) {
1864
1772
  var _a, _b, _c;
1865
1773
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1866
- const { requestBody, warnings } = this.getArgs(options);
1774
+ const { requestBody, warnings } = await this.getArgs(options);
1867
1775
  const {
1868
1776
  value: audio,
1869
1777
  responseHeaders,
@@ -1897,12 +1805,11 @@ var OpenAISpeechModel = class {
1897
1805
  };
1898
1806
 
1899
1807
  // src/responses/openai-responses-language-model.ts
1900
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
1901
- var import_zod8 = require("zod");
1808
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
1809
+ var import_zod12 = require("zod");
1902
1810
 
1903
1811
  // src/responses/convert-to-openai-responses-messages.ts
1904
- var import_provider7 = require("@ai-sdk/provider");
1905
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
1812
+ var import_provider6 = require("@ai-sdk/provider");
1906
1813
  function convertToOpenAIResponsesMessages({
1907
1814
  prompt,
1908
1815
  systemMessageMode
@@ -1941,38 +1848,35 @@ function convertToOpenAIResponsesMessages({
1941
1848
  messages.push({
1942
1849
  role: "user",
1943
1850
  content: content.map((part, index) => {
1944
- var _a, _b, _c, _d;
1851
+ var _a, _b, _c;
1945
1852
  switch (part.type) {
1946
1853
  case "text": {
1947
1854
  return { type: "input_text", text: part.text };
1948
1855
  }
1949
- case "image": {
1950
- return {
1951
- type: "input_image",
1952
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils9.convertUint8ArrayToBase64)(part.image)}`,
1953
- // OpenAI specific extension: image detail
1954
- detail: (_c = (_b = part.providerMetadata) == null ? void 0 : _b.openai) == null ? void 0 : _c.imageDetail
1955
- };
1956
- }
1957
1856
  case "file": {
1958
- if (part.data instanceof URL) {
1959
- throw new import_provider7.UnsupportedFunctionalityError({
1960
- functionality: "File URLs in user messages"
1961
- });
1962
- }
1963
- switch (part.mimeType) {
1964
- case "application/pdf": {
1965
- return {
1966
- type: "input_file",
1967
- filename: (_d = part.filename) != null ? _d : `part-${index}.pdf`,
1968
- file_data: `data:application/pdf;base64,${part.data}`
1969
- };
1970
- }
1971
- default: {
1972
- throw new import_provider7.UnsupportedFunctionalityError({
1973
- functionality: "Only PDF files are supported in user messages"
1857
+ if (part.mediaType.startsWith("image/")) {
1858
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
1859
+ return {
1860
+ type: "input_image",
1861
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1862
+ // OpenAI specific extension: image detail
1863
+ detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1864
+ };
1865
+ } else if (part.mediaType === "application/pdf") {
1866
+ if (part.data instanceof URL) {
1867
+ throw new import_provider6.UnsupportedFunctionalityError({
1868
+ functionality: "PDF file parts with URLs"
1974
1869
  });
1975
1870
  }
1871
+ return {
1872
+ type: "input_file",
1873
+ filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1874
+ file_data: `data:application/pdf;base64,${part.data}`
1875
+ };
1876
+ } else {
1877
+ throw new import_provider6.UnsupportedFunctionalityError({
1878
+ functionality: `file part media type ${part.mediaType}`
1879
+ });
1976
1880
  }
1977
1881
  }
1978
1882
  }
@@ -2041,18 +1945,17 @@ function mapOpenAIResponseFinishReason({
2041
1945
  }
2042
1946
 
2043
1947
  // src/responses/openai-responses-prepare-tools.ts
2044
- var import_provider8 = require("@ai-sdk/provider");
1948
+ var import_provider7 = require("@ai-sdk/provider");
2045
1949
  function prepareResponsesTools({
2046
- mode,
1950
+ tools,
1951
+ toolChoice,
2047
1952
  strict
2048
1953
  }) {
2049
- var _a;
2050
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
1954
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
2051
1955
  const toolWarnings = [];
2052
1956
  if (tools == null) {
2053
- return { tools: void 0, tool_choice: void 0, toolWarnings };
1957
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
2054
1958
  }
2055
- const toolChoice = mode.toolChoice;
2056
1959
  const openaiTools = [];
2057
1960
  for (const tool of tools) {
2058
1961
  switch (tool.type) {
@@ -2085,37 +1988,24 @@ function prepareResponsesTools({
2085
1988
  }
2086
1989
  }
2087
1990
  if (toolChoice == null) {
2088
- return { tools: openaiTools, tool_choice: void 0, toolWarnings };
1991
+ return { tools: openaiTools, toolChoice: void 0, toolWarnings };
2089
1992
  }
2090
1993
  const type = toolChoice.type;
2091
1994
  switch (type) {
2092
1995
  case "auto":
2093
1996
  case "none":
2094
1997
  case "required":
2095
- return { tools: openaiTools, tool_choice: type, toolWarnings };
2096
- case "tool": {
2097
- if (toolChoice.toolName === "web_search_preview") {
2098
- return {
2099
- tools: openaiTools,
2100
- tool_choice: {
2101
- type: "web_search_preview"
2102
- },
2103
- toolWarnings
2104
- };
2105
- }
1998
+ return { tools: openaiTools, toolChoice: type, toolWarnings };
1999
+ case "tool":
2106
2000
  return {
2107
2001
  tools: openaiTools,
2108
- tool_choice: {
2109
- type: "function",
2110
- name: toolChoice.toolName
2111
- },
2002
+ toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2112
2003
  toolWarnings
2113
2004
  };
2114
- }
2115
2005
  default: {
2116
2006
  const _exhaustiveCheck = type;
2117
- throw new import_provider8.UnsupportedFunctionalityError({
2118
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
2007
+ throw new import_provider7.UnsupportedFunctionalityError({
2008
+ functionality: `tool choice type: ${_exhaustiveCheck}`
2119
2009
  });
2120
2010
  }
2121
2011
  }
@@ -2124,18 +2014,18 @@ function prepareResponsesTools({
2124
2014
  // src/responses/openai-responses-language-model.ts
2125
2015
  var OpenAIResponsesLanguageModel = class {
2126
2016
  constructor(modelId, config) {
2127
- this.specificationVersion = "v1";
2128
- this.defaultObjectGenerationMode = "json";
2129
- this.supportsStructuredOutputs = true;
2017
+ this.specificationVersion = "v2";
2018
+ this.supportedUrls = {
2019
+ "image/*": [/^https?:\/\/.*$/]
2020
+ };
2130
2021
  this.modelId = modelId;
2131
2022
  this.config = config;
2132
2023
  }
2133
2024
  get provider() {
2134
2025
  return this.config.provider;
2135
2026
  }
2136
- getArgs({
2137
- mode,
2138
- maxTokens,
2027
+ async getArgs({
2028
+ maxOutputTokens,
2139
2029
  temperature,
2140
2030
  stopSequences,
2141
2031
  topP,
@@ -2144,24 +2034,19 @@ var OpenAIResponsesLanguageModel = class {
2144
2034
  frequencyPenalty,
2145
2035
  seed,
2146
2036
  prompt,
2147
- providerMetadata,
2037
+ providerOptions,
2038
+ tools,
2039
+ toolChoice,
2148
2040
  responseFormat
2149
2041
  }) {
2150
- var _a, _b, _c;
2042
+ var _a, _b;
2151
2043
  const warnings = [];
2152
2044
  const modelConfig = getResponsesModelConfig(this.modelId);
2153
- const type = mode.type;
2154
2045
  if (topK != null) {
2155
- warnings.push({
2156
- type: "unsupported-setting",
2157
- setting: "topK"
2158
- });
2046
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
2159
2047
  }
2160
2048
  if (seed != null) {
2161
- warnings.push({
2162
- type: "unsupported-setting",
2163
- setting: "seed"
2164
- });
2049
+ warnings.push({ type: "unsupported-setting", setting: "seed" });
2165
2050
  }
2166
2051
  if (presencePenalty != null) {
2167
2052
  warnings.push({
@@ -2176,19 +2061,16 @@ var OpenAIResponsesLanguageModel = class {
2176
2061
  });
2177
2062
  }
2178
2063
  if (stopSequences != null) {
2179
- warnings.push({
2180
- type: "unsupported-setting",
2181
- setting: "stopSequences"
2182
- });
2064
+ warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2183
2065
  }
2184
2066
  const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2185
2067
  prompt,
2186
2068
  systemMessageMode: modelConfig.systemMessageMode
2187
2069
  });
2188
2070
  warnings.push(...messageWarnings);
2189
- const openaiOptions = (0, import_provider_utils10.parseProviderOptions)({
2071
+ const openaiOptions = await (0, import_provider_utils9.parseProviderOptions)({
2190
2072
  provider: "openai",
2191
- providerOptions: providerMetadata,
2073
+ providerOptions,
2192
2074
  schema: openaiResponsesProviderOptionsSchema
2193
2075
  });
2194
2076
  const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
@@ -2197,7 +2079,7 @@ var OpenAIResponsesLanguageModel = class {
2197
2079
  input: messages,
2198
2080
  temperature,
2199
2081
  top_p: topP,
2200
- max_output_tokens: maxTokens,
2082
+ max_output_tokens: maxOutputTokens,
2201
2083
  ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2202
2084
  text: {
2203
2085
  format: responseFormat.schema != null ? {
@@ -2249,208 +2131,178 @@ var OpenAIResponsesLanguageModel = class {
2249
2131
  });
2250
2132
  }
2251
2133
  }
2252
- switch (type) {
2253
- case "regular": {
2254
- const { tools, tool_choice, toolWarnings } = prepareResponsesTools({
2255
- mode,
2256
- strict: isStrict
2257
- // TODO support provider options on tools
2258
- });
2259
- return {
2260
- args: {
2261
- ...baseArgs,
2262
- tools,
2263
- tool_choice
2264
- },
2265
- warnings: [...warnings, ...toolWarnings]
2266
- };
2267
- }
2268
- case "object-json": {
2269
- return {
2270
- args: {
2271
- ...baseArgs,
2272
- text: {
2273
- format: mode.schema != null ? {
2274
- type: "json_schema",
2275
- strict: isStrict,
2276
- name: (_c = mode.name) != null ? _c : "response",
2277
- description: mode.description,
2278
- schema: mode.schema
2279
- } : { type: "json_object" }
2280
- }
2281
- },
2282
- warnings
2283
- };
2284
- }
2285
- case "object-tool": {
2286
- return {
2287
- args: {
2288
- ...baseArgs,
2289
- tool_choice: { type: "function", name: mode.tool.name },
2290
- tools: [
2291
- {
2292
- type: "function",
2293
- name: mode.tool.name,
2294
- description: mode.tool.description,
2295
- parameters: mode.tool.parameters,
2296
- strict: isStrict
2297
- }
2298
- ]
2299
- },
2300
- warnings
2301
- };
2302
- }
2303
- default: {
2304
- const _exhaustiveCheck = type;
2305
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
2306
- }
2307
- }
2134
+ const {
2135
+ tools: openaiTools,
2136
+ toolChoice: openaiToolChoice,
2137
+ toolWarnings
2138
+ } = prepareResponsesTools({
2139
+ tools,
2140
+ toolChoice,
2141
+ strict: isStrict
2142
+ });
2143
+ return {
2144
+ args: {
2145
+ ...baseArgs,
2146
+ tools: openaiTools,
2147
+ tool_choice: openaiToolChoice
2148
+ },
2149
+ warnings: [...warnings, ...toolWarnings]
2150
+ };
2308
2151
  }
2309
2152
  async doGenerate(options) {
2310
- var _a, _b, _c, _d, _e, _f, _g;
2311
- const { args: body, warnings } = this.getArgs(options);
2153
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2154
+ const { args: body, warnings } = await this.getArgs(options);
2312
2155
  const {
2313
2156
  responseHeaders,
2314
2157
  value: response,
2315
2158
  rawValue: rawResponse
2316
- } = await (0, import_provider_utils10.postJsonToApi)({
2159
+ } = await (0, import_provider_utils9.postJsonToApi)({
2317
2160
  url: this.config.url({
2318
2161
  path: "/responses",
2319
2162
  modelId: this.modelId
2320
2163
  }),
2321
- headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2164
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2322
2165
  body,
2323
2166
  failedResponseHandler: openaiFailedResponseHandler,
2324
- successfulResponseHandler: (0, import_provider_utils10.createJsonResponseHandler)(
2325
- import_zod8.z.object({
2326
- id: import_zod8.z.string(),
2327
- created_at: import_zod8.z.number(),
2328
- model: import_zod8.z.string(),
2329
- output: import_zod8.z.array(
2330
- import_zod8.z.discriminatedUnion("type", [
2331
- import_zod8.z.object({
2332
- type: import_zod8.z.literal("message"),
2333
- role: import_zod8.z.literal("assistant"),
2334
- content: import_zod8.z.array(
2335
- import_zod8.z.object({
2336
- type: import_zod8.z.literal("output_text"),
2337
- text: import_zod8.z.string(),
2338
- annotations: import_zod8.z.array(
2339
- import_zod8.z.object({
2340
- type: import_zod8.z.literal("url_citation"),
2341
- start_index: import_zod8.z.number(),
2342
- end_index: import_zod8.z.number(),
2343
- url: import_zod8.z.string(),
2344
- title: import_zod8.z.string()
2167
+ successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
2168
+ import_zod12.z.object({
2169
+ id: import_zod12.z.string(),
2170
+ created_at: import_zod12.z.number(),
2171
+ model: import_zod12.z.string(),
2172
+ output: import_zod12.z.array(
2173
+ import_zod12.z.discriminatedUnion("type", [
2174
+ import_zod12.z.object({
2175
+ type: import_zod12.z.literal("message"),
2176
+ role: import_zod12.z.literal("assistant"),
2177
+ content: import_zod12.z.array(
2178
+ import_zod12.z.object({
2179
+ type: import_zod12.z.literal("output_text"),
2180
+ text: import_zod12.z.string(),
2181
+ annotations: import_zod12.z.array(
2182
+ import_zod12.z.object({
2183
+ type: import_zod12.z.literal("url_citation"),
2184
+ start_index: import_zod12.z.number(),
2185
+ end_index: import_zod12.z.number(),
2186
+ url: import_zod12.z.string(),
2187
+ title: import_zod12.z.string()
2345
2188
  })
2346
2189
  )
2347
2190
  })
2348
2191
  )
2349
2192
  }),
2350
- import_zod8.z.object({
2351
- type: import_zod8.z.literal("function_call"),
2352
- call_id: import_zod8.z.string(),
2353
- name: import_zod8.z.string(),
2354
- arguments: import_zod8.z.string()
2193
+ import_zod12.z.object({
2194
+ type: import_zod12.z.literal("function_call"),
2195
+ call_id: import_zod12.z.string(),
2196
+ name: import_zod12.z.string(),
2197
+ arguments: import_zod12.z.string()
2355
2198
  }),
2356
- import_zod8.z.object({
2357
- type: import_zod8.z.literal("web_search_call")
2199
+ import_zod12.z.object({
2200
+ type: import_zod12.z.literal("web_search_call")
2358
2201
  }),
2359
- import_zod8.z.object({
2360
- type: import_zod8.z.literal("computer_call")
2202
+ import_zod12.z.object({
2203
+ type: import_zod12.z.literal("computer_call")
2361
2204
  }),
2362
- import_zod8.z.object({
2363
- type: import_zod8.z.literal("reasoning"),
2364
- summary: import_zod8.z.array(
2365
- import_zod8.z.object({
2366
- type: import_zod8.z.literal("summary_text"),
2367
- text: import_zod8.z.string()
2205
+ import_zod12.z.object({
2206
+ type: import_zod12.z.literal("reasoning"),
2207
+ summary: import_zod12.z.array(
2208
+ import_zod12.z.object({
2209
+ type: import_zod12.z.literal("summary_text"),
2210
+ text: import_zod12.z.string()
2368
2211
  })
2369
2212
  )
2370
2213
  })
2371
2214
  ])
2372
2215
  ),
2373
- incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullable(),
2374
- usage: usageSchema
2216
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2217
+ usage: usageSchema2
2375
2218
  })
2376
2219
  ),
2377
2220
  abortSignal: options.abortSignal,
2378
2221
  fetch: this.config.fetch
2379
2222
  });
2380
- const outputTextElements = response.output.filter((output) => output.type === "message").flatMap((output) => output.content).filter((content) => content.type === "output_text");
2381
- const toolCalls = response.output.filter((output) => output.type === "function_call").map((output) => ({
2382
- toolCallType: "function",
2383
- toolCallId: output.call_id,
2384
- toolName: output.name,
2385
- args: output.arguments
2386
- }));
2387
- const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2223
+ const content = [];
2224
+ for (const part of response.output) {
2225
+ switch (part.type) {
2226
+ case "reasoning": {
2227
+ content.push({
2228
+ type: "reasoning",
2229
+ text: part.summary.map((summary) => summary.text).join()
2230
+ });
2231
+ break;
2232
+ }
2233
+ case "message": {
2234
+ for (const contentPart of part.content) {
2235
+ content.push({
2236
+ type: "text",
2237
+ text: contentPart.text
2238
+ });
2239
+ for (const annotation of contentPart.annotations) {
2240
+ content.push({
2241
+ type: "source",
2242
+ sourceType: "url",
2243
+ id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : (0, import_provider_utils9.generateId)(),
2244
+ url: annotation.url,
2245
+ title: annotation.title
2246
+ });
2247
+ }
2248
+ }
2249
+ break;
2250
+ }
2251
+ case "function_call": {
2252
+ content.push({
2253
+ type: "tool-call",
2254
+ toolCallType: "function",
2255
+ toolCallId: part.call_id,
2256
+ toolName: part.name,
2257
+ args: part.arguments
2258
+ });
2259
+ break;
2260
+ }
2261
+ }
2262
+ }
2388
2263
  return {
2389
- text: outputTextElements.map((content) => content.text).join("\n"),
2390
- sources: outputTextElements.flatMap(
2391
- (content) => content.annotations.map((annotation) => {
2392
- var _a2, _b2, _c2;
2393
- return {
2394
- sourceType: "url",
2395
- id: (_c2 = (_b2 = (_a2 = this.config).generateId) == null ? void 0 : _b2.call(_a2)) != null ? _c2 : (0, import_provider_utils10.generateId)(),
2396
- url: annotation.url,
2397
- title: annotation.title
2398
- };
2399
- })
2400
- ),
2264
+ content,
2401
2265
  finishReason: mapOpenAIResponseFinishReason({
2402
- finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2403
- hasToolCalls: toolCalls.length > 0
2266
+ finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2267
+ hasToolCalls: content.some((part) => part.type === "tool-call")
2404
2268
  }),
2405
- toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2406
- reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2407
- type: "text",
2408
- text: summary.text
2409
- })) : void 0,
2410
2269
  usage: {
2411
- promptTokens: response.usage.input_tokens,
2412
- completionTokens: response.usage.output_tokens
2413
- },
2414
- rawCall: {
2415
- rawPrompt: void 0,
2416
- rawSettings: {}
2417
- },
2418
- rawResponse: {
2419
- headers: responseHeaders,
2420
- body: rawResponse
2421
- },
2422
- request: {
2423
- body: JSON.stringify(body)
2270
+ inputTokens: response.usage.input_tokens,
2271
+ outputTokens: response.usage.output_tokens,
2272
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2273
+ reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2274
+ cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2424
2275
  },
2276
+ request: { body },
2425
2277
  response: {
2426
2278
  id: response.id,
2427
2279
  timestamp: new Date(response.created_at * 1e3),
2428
- modelId: response.model
2280
+ modelId: response.model,
2281
+ headers: responseHeaders,
2282
+ body: rawResponse
2429
2283
  },
2430
2284
  providerMetadata: {
2431
2285
  openai: {
2432
- responseId: response.id,
2433
- cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2434
- reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2286
+ responseId: response.id
2435
2287
  }
2436
2288
  },
2437
2289
  warnings
2438
2290
  };
2439
2291
  }
2440
2292
  async doStream(options) {
2441
- const { args: body, warnings } = this.getArgs(options);
2442
- const { responseHeaders, value: response } = await (0, import_provider_utils10.postJsonToApi)({
2293
+ const { args: body, warnings } = await this.getArgs(options);
2294
+ const { responseHeaders, value: response } = await (0, import_provider_utils9.postJsonToApi)({
2443
2295
  url: this.config.url({
2444
2296
  path: "/responses",
2445
2297
  modelId: this.modelId
2446
2298
  }),
2447
- headers: (0, import_provider_utils10.combineHeaders)(this.config.headers(), options.headers),
2299
+ headers: (0, import_provider_utils9.combineHeaders)(this.config.headers(), options.headers),
2448
2300
  body: {
2449
2301
  ...body,
2450
2302
  stream: true
2451
2303
  },
2452
2304
  failedResponseHandler: openaiFailedResponseHandler,
2453
- successfulResponseHandler: (0, import_provider_utils10.createEventSourceResponseHandler)(
2305
+ successfulResponseHandler: (0, import_provider_utils9.createEventSourceResponseHandler)(
2454
2306
  openaiResponsesChunkSchema
2455
2307
  ),
2456
2308
  abortSignal: options.abortSignal,
@@ -2458,16 +2310,20 @@ var OpenAIResponsesLanguageModel = class {
2458
2310
  });
2459
2311
  const self = this;
2460
2312
  let finishReason = "unknown";
2461
- let promptTokens = NaN;
2462
- let completionTokens = NaN;
2463
- let cachedPromptTokens = null;
2464
- let reasoningTokens = null;
2313
+ const usage = {
2314
+ inputTokens: void 0,
2315
+ outputTokens: void 0,
2316
+ totalTokens: void 0
2317
+ };
2465
2318
  let responseId = null;
2466
2319
  const ongoingToolCalls = {};
2467
2320
  let hasToolCalls = false;
2468
2321
  return {
2469
2322
  stream: response.pipeThrough(
2470
2323
  new TransformStream({
2324
+ start(controller) {
2325
+ controller.enqueue({ type: "stream-start", warnings });
2326
+ },
2471
2327
  transform(chunk, controller) {
2472
2328
  var _a, _b, _c, _d, _e, _f, _g, _h;
2473
2329
  if (!chunk.success) {
@@ -2511,13 +2367,13 @@ var OpenAIResponsesLanguageModel = class {
2511
2367
  });
2512
2368
  } else if (isTextDeltaChunk(value)) {
2513
2369
  controller.enqueue({
2514
- type: "text-delta",
2515
- textDelta: value.delta
2370
+ type: "text",
2371
+ text: value.delta
2516
2372
  });
2517
2373
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2518
2374
  controller.enqueue({
2519
2375
  type: "reasoning",
2520
- textDelta: value.delta
2376
+ text: value.delta
2521
2377
  });
2522
2378
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2523
2379
  ongoingToolCalls[value.output_index] = void 0;
@@ -2534,19 +2390,18 @@ var OpenAIResponsesLanguageModel = class {
2534
2390
  finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2535
2391
  hasToolCalls
2536
2392
  });
2537
- promptTokens = value.response.usage.input_tokens;
2538
- completionTokens = value.response.usage.output_tokens;
2539
- cachedPromptTokens = (_c = (_b = value.response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : cachedPromptTokens;
2540
- reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : reasoningTokens;
2393
+ usage.inputTokens = value.response.usage.input_tokens;
2394
+ usage.outputTokens = value.response.usage.output_tokens;
2395
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2396
+ usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2397
+ usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2541
2398
  } else if (isResponseAnnotationAddedChunk(value)) {
2542
2399
  controller.enqueue({
2543
2400
  type: "source",
2544
- source: {
2545
- sourceType: "url",
2546
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils10.generateId)(),
2547
- url: value.annotation.url,
2548
- title: value.annotation.title
2549
- }
2401
+ sourceType: "url",
2402
+ id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : (0, import_provider_utils9.generateId)(),
2403
+ url: value.annotation.url,
2404
+ title: value.annotation.title
2550
2405
  });
2551
2406
  }
2552
2407
  },
@@ -2554,110 +2409,101 @@ var OpenAIResponsesLanguageModel = class {
2554
2409
  controller.enqueue({
2555
2410
  type: "finish",
2556
2411
  finishReason,
2557
- usage: { promptTokens, completionTokens },
2558
- ...(cachedPromptTokens != null || reasoningTokens != null) && {
2559
- providerMetadata: {
2560
- openai: {
2561
- responseId,
2562
- cachedPromptTokens,
2563
- reasoningTokens
2564
- }
2412
+ usage,
2413
+ providerMetadata: {
2414
+ openai: {
2415
+ responseId
2565
2416
  }
2566
2417
  }
2567
2418
  });
2568
2419
  }
2569
2420
  })
2570
2421
  ),
2571
- rawCall: {
2572
- rawPrompt: void 0,
2573
- rawSettings: {}
2574
- },
2575
- rawResponse: { headers: responseHeaders },
2576
- request: { body: JSON.stringify(body) },
2577
- warnings
2422
+ request: { body },
2423
+ response: { headers: responseHeaders }
2578
2424
  };
2579
2425
  }
2580
2426
  };
2581
- var usageSchema = import_zod8.z.object({
2582
- input_tokens: import_zod8.z.number(),
2583
- input_tokens_details: import_zod8.z.object({ cached_tokens: import_zod8.z.number().nullish() }).nullish(),
2584
- output_tokens: import_zod8.z.number(),
2585
- output_tokens_details: import_zod8.z.object({ reasoning_tokens: import_zod8.z.number().nullish() }).nullish()
2427
+ var usageSchema2 = import_zod12.z.object({
2428
+ input_tokens: import_zod12.z.number(),
2429
+ input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2430
+ output_tokens: import_zod12.z.number(),
2431
+ output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
2586
2432
  });
2587
- var textDeltaChunkSchema = import_zod8.z.object({
2588
- type: import_zod8.z.literal("response.output_text.delta"),
2589
- delta: import_zod8.z.string()
2433
+ var textDeltaChunkSchema = import_zod12.z.object({
2434
+ type: import_zod12.z.literal("response.output_text.delta"),
2435
+ delta: import_zod12.z.string()
2590
2436
  });
2591
- var responseFinishedChunkSchema = import_zod8.z.object({
2592
- type: import_zod8.z.enum(["response.completed", "response.incomplete"]),
2593
- response: import_zod8.z.object({
2594
- incomplete_details: import_zod8.z.object({ reason: import_zod8.z.string() }).nullish(),
2595
- usage: usageSchema
2437
+ var responseFinishedChunkSchema = import_zod12.z.object({
2438
+ type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2439
+ response: import_zod12.z.object({
2440
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2441
+ usage: usageSchema2
2596
2442
  })
2597
2443
  });
2598
- var responseCreatedChunkSchema = import_zod8.z.object({
2599
- type: import_zod8.z.literal("response.created"),
2600
- response: import_zod8.z.object({
2601
- id: import_zod8.z.string(),
2602
- created_at: import_zod8.z.number(),
2603
- model: import_zod8.z.string()
2444
+ var responseCreatedChunkSchema = import_zod12.z.object({
2445
+ type: import_zod12.z.literal("response.created"),
2446
+ response: import_zod12.z.object({
2447
+ id: import_zod12.z.string(),
2448
+ created_at: import_zod12.z.number(),
2449
+ model: import_zod12.z.string()
2604
2450
  })
2605
2451
  });
2606
- var responseOutputItemDoneSchema = import_zod8.z.object({
2607
- type: import_zod8.z.literal("response.output_item.done"),
2608
- output_index: import_zod8.z.number(),
2609
- item: import_zod8.z.discriminatedUnion("type", [
2610
- import_zod8.z.object({
2611
- type: import_zod8.z.literal("message")
2452
+ var responseOutputItemDoneSchema = import_zod12.z.object({
2453
+ type: import_zod12.z.literal("response.output_item.done"),
2454
+ output_index: import_zod12.z.number(),
2455
+ item: import_zod12.z.discriminatedUnion("type", [
2456
+ import_zod12.z.object({
2457
+ type: import_zod12.z.literal("message")
2612
2458
  }),
2613
- import_zod8.z.object({
2614
- type: import_zod8.z.literal("function_call"),
2615
- id: import_zod8.z.string(),
2616
- call_id: import_zod8.z.string(),
2617
- name: import_zod8.z.string(),
2618
- arguments: import_zod8.z.string(),
2619
- status: import_zod8.z.literal("completed")
2459
+ import_zod12.z.object({
2460
+ type: import_zod12.z.literal("function_call"),
2461
+ id: import_zod12.z.string(),
2462
+ call_id: import_zod12.z.string(),
2463
+ name: import_zod12.z.string(),
2464
+ arguments: import_zod12.z.string(),
2465
+ status: import_zod12.z.literal("completed")
2620
2466
  })
2621
2467
  ])
2622
2468
  });
2623
- var responseFunctionCallArgumentsDeltaSchema = import_zod8.z.object({
2624
- type: import_zod8.z.literal("response.function_call_arguments.delta"),
2625
- item_id: import_zod8.z.string(),
2626
- output_index: import_zod8.z.number(),
2627
- delta: import_zod8.z.string()
2469
+ var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
2470
+ type: import_zod12.z.literal("response.function_call_arguments.delta"),
2471
+ item_id: import_zod12.z.string(),
2472
+ output_index: import_zod12.z.number(),
2473
+ delta: import_zod12.z.string()
2628
2474
  });
2629
- var responseOutputItemAddedSchema = import_zod8.z.object({
2630
- type: import_zod8.z.literal("response.output_item.added"),
2631
- output_index: import_zod8.z.number(),
2632
- item: import_zod8.z.discriminatedUnion("type", [
2633
- import_zod8.z.object({
2634
- type: import_zod8.z.literal("message")
2475
+ var responseOutputItemAddedSchema = import_zod12.z.object({
2476
+ type: import_zod12.z.literal("response.output_item.added"),
2477
+ output_index: import_zod12.z.number(),
2478
+ item: import_zod12.z.discriminatedUnion("type", [
2479
+ import_zod12.z.object({
2480
+ type: import_zod12.z.literal("message")
2635
2481
  }),
2636
- import_zod8.z.object({
2637
- type: import_zod8.z.literal("function_call"),
2638
- id: import_zod8.z.string(),
2639
- call_id: import_zod8.z.string(),
2640
- name: import_zod8.z.string(),
2641
- arguments: import_zod8.z.string()
2482
+ import_zod12.z.object({
2483
+ type: import_zod12.z.literal("function_call"),
2484
+ id: import_zod12.z.string(),
2485
+ call_id: import_zod12.z.string(),
2486
+ name: import_zod12.z.string(),
2487
+ arguments: import_zod12.z.string()
2642
2488
  })
2643
2489
  ])
2644
2490
  });
2645
- var responseAnnotationAddedSchema = import_zod8.z.object({
2646
- type: import_zod8.z.literal("response.output_text.annotation.added"),
2647
- annotation: import_zod8.z.object({
2648
- type: import_zod8.z.literal("url_citation"),
2649
- url: import_zod8.z.string(),
2650
- title: import_zod8.z.string()
2491
+ var responseAnnotationAddedSchema = import_zod12.z.object({
2492
+ type: import_zod12.z.literal("response.output_text.annotation.added"),
2493
+ annotation: import_zod12.z.object({
2494
+ type: import_zod12.z.literal("url_citation"),
2495
+ url: import_zod12.z.string(),
2496
+ title: import_zod12.z.string()
2651
2497
  })
2652
2498
  });
2653
- var responseReasoningSummaryTextDeltaSchema = import_zod8.z.object({
2654
- type: import_zod8.z.literal("response.reasoning_summary_text.delta"),
2655
- item_id: import_zod8.z.string(),
2656
- output_index: import_zod8.z.number(),
2657
- summary_index: import_zod8.z.number(),
2658
- delta: import_zod8.z.string()
2499
+ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
2500
+ type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
2501
+ item_id: import_zod12.z.string(),
2502
+ output_index: import_zod12.z.number(),
2503
+ summary_index: import_zod12.z.number(),
2504
+ delta: import_zod12.z.string()
2659
2505
  });
2660
- var openaiResponsesChunkSchema = import_zod8.z.union([
2506
+ var openaiResponsesChunkSchema = import_zod12.z.union([
2661
2507
  textDeltaChunkSchema,
2662
2508
  responseFinishedChunkSchema,
2663
2509
  responseCreatedChunkSchema,
@@ -2666,7 +2512,7 @@ var openaiResponsesChunkSchema = import_zod8.z.union([
2666
2512
  responseOutputItemAddedSchema,
2667
2513
  responseAnnotationAddedSchema,
2668
2514
  responseReasoningSummaryTextDeltaSchema,
2669
- import_zod8.z.object({ type: import_zod8.z.string() }).passthrough()
2515
+ import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
2670
2516
  // fallback for unknown chunks
2671
2517
  ]);
2672
2518
  function isTextDeltaChunk(chunk) {
@@ -2714,16 +2560,16 @@ function getResponsesModelConfig(modelId) {
2714
2560
  requiredAutoTruncation: false
2715
2561
  };
2716
2562
  }
2717
- var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2718
- metadata: import_zod8.z.any().nullish(),
2719
- parallelToolCalls: import_zod8.z.boolean().nullish(),
2720
- previousResponseId: import_zod8.z.string().nullish(),
2721
- store: import_zod8.z.boolean().nullish(),
2722
- user: import_zod8.z.string().nullish(),
2723
- reasoningEffort: import_zod8.z.string().nullish(),
2724
- strictSchemas: import_zod8.z.boolean().nullish(),
2725
- instructions: import_zod8.z.string().nullish(),
2726
- reasoningSummary: import_zod8.z.string().nullish()
2563
+ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2564
+ metadata: import_zod12.z.any().nullish(),
2565
+ parallelToolCalls: import_zod12.z.boolean().nullish(),
2566
+ previousResponseId: import_zod12.z.string().nullish(),
2567
+ store: import_zod12.z.boolean().nullish(),
2568
+ user: import_zod12.z.string().nullish(),
2569
+ reasoningEffort: import_zod12.z.string().nullish(),
2570
+ strictSchemas: import_zod12.z.boolean().nullish(),
2571
+ instructions: import_zod12.z.string().nullish(),
2572
+ reasoningSummary: import_zod12.z.string().nullish()
2727
2573
  });
2728
2574
  // Annotate the CommonJS export names for ESM import in node:
2729
2575
  0 && (module.exports = {
@@ -2735,6 +2581,10 @@ var openaiResponsesProviderOptionsSchema = import_zod8.z.object({
2735
2581
  OpenAISpeechModel,
2736
2582
  OpenAITranscriptionModel,
2737
2583
  hasDefaultResponseFormat,
2738
- modelMaxImagesPerCall
2584
+ modelMaxImagesPerCall,
2585
+ openAITranscriptionProviderOptions,
2586
+ openaiCompletionProviderOptions,
2587
+ openaiEmbeddingProviderOptions,
2588
+ openaiProviderOptions
2739
2589
  });
2740
2590
  //# sourceMappingURL=index.js.map