@ai-sdk/openai 2.0.0-canary.12 → 2.0.0-canary.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -266,7 +266,13 @@ var openaiProviderOptions = import_zod.z.object({
266
266
  /**
267
267
  * Parameters for prediction mode.
268
268
  */
269
- prediction: import_zod.z.record(import_zod.z.any()).optional()
269
+ prediction: import_zod.z.record(import_zod.z.any()).optional(),
270
+ /**
271
+ * Whether to use structured outputs.
272
+ *
273
+ * @default true
274
+ */
275
+ structuredOutputs: import_zod.z.boolean().optional()
270
276
  });
271
277
 
272
278
  // src/openai-error.ts
@@ -347,10 +353,9 @@ function prepareTools({
347
353
 
348
354
  // src/openai-chat-language-model.ts
349
355
  var OpenAIChatLanguageModel = class {
350
- constructor(modelId, settings, config) {
356
+ constructor(modelId, config) {
351
357
  this.specificationVersion = "v2";
352
358
  this.modelId = modelId;
353
- this.settings = settings;
354
359
  this.config = config;
355
360
  }
356
361
  get provider() {
@@ -383,13 +388,14 @@ var OpenAIChatLanguageModel = class {
383
388
  providerOptions,
384
389
  schema: openaiProviderOptions
385
390
  })) != null ? _a : {};
391
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
386
392
  if (topK != null) {
387
393
  warnings.push({
388
394
  type: "unsupported-setting",
389
395
  setting: "topK"
390
396
  });
391
397
  }
392
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
398
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
393
399
  warnings.push({
394
400
  type: "unsupported-setting",
395
401
  setting: "responseFormat",
@@ -418,12 +424,12 @@ var OpenAIChatLanguageModel = class {
418
424
  presence_penalty: presencePenalty,
419
425
  response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
420
426
  // TODO convert into provider option
421
- this.settings.structuredOutputs && responseFormat.schema != null ? {
427
+ structuredOutputs && responseFormat.schema != null ? {
422
428
  type: "json_schema",
423
429
  json_schema: {
424
430
  schema: responseFormat.schema,
425
431
  strict: true,
426
- name: (_b = responseFormat.name) != null ? _b : "response",
432
+ name: (_c = responseFormat.name) != null ? _c : "response",
427
433
  description: responseFormat.description
428
434
  }
429
435
  } : { type: "json_object" }
@@ -503,7 +509,7 @@ var OpenAIChatLanguageModel = class {
503
509
  } = prepareTools({
504
510
  tools,
505
511
  toolChoice,
506
- structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
512
+ structuredOutputs
507
513
  });
508
514
  return {
509
515
  args: {
@@ -879,13 +885,9 @@ var import_zod5 = require("zod");
879
885
  var import_provider4 = require("@ai-sdk/provider");
880
886
  function convertToOpenAICompletionPrompt({
881
887
  prompt,
882
- inputFormat,
883
888
  user = "user",
884
889
  assistant = "assistant"
885
890
  }) {
886
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
887
- return { prompt: prompt[0].content[0].text };
888
- }
889
891
  let text = "";
890
892
  if (prompt[0].role === "system") {
891
893
  text += `${prompt[0].content}
@@ -1006,7 +1008,6 @@ var OpenAICompletionLanguageModel = class {
1006
1008
  };
1007
1009
  }
1008
1010
  async getArgs({
1009
- inputFormat,
1010
1011
  prompt,
1011
1012
  maxOutputTokens,
1012
1013
  temperature,
@@ -1050,7 +1051,7 @@ var OpenAICompletionLanguageModel = class {
1050
1051
  details: "JSON response format is not supported."
1051
1052
  });
1052
1053
  }
1053
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1054
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1054
1055
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1055
1056
  return {
1056
1057
  args: {
@@ -1252,23 +1253,16 @@ var openaiEmbeddingProviderOptions = import_zod6.z.object({
1252
1253
 
1253
1254
  // src/openai-embedding-model.ts
1254
1255
  var OpenAIEmbeddingModel = class {
1255
- constructor(modelId, settings, config) {
1256
+ constructor(modelId, config) {
1256
1257
  this.specificationVersion = "v2";
1258
+ this.maxEmbeddingsPerCall = 2048;
1259
+ this.supportsParallelCalls = true;
1257
1260
  this.modelId = modelId;
1258
- this.settings = settings;
1259
1261
  this.config = config;
1260
1262
  }
1261
1263
  get provider() {
1262
1264
  return this.config.provider;
1263
1265
  }
1264
- get maxEmbeddingsPerCall() {
1265
- var _a;
1266
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1267
- }
1268
- get supportsParallelCalls() {
1269
- var _a;
1270
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1271
- }
1272
1266
  async doEmbed({
1273
1267
  values,
1274
1268
  headers,
@@ -1434,14 +1428,36 @@ var openaiTools = {
1434
1428
 
1435
1429
  // src/openai-transcription-model.ts
1436
1430
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1431
+ var import_zod11 = require("zod");
1432
+
1433
+ // src/openai-transcription-options.ts
1437
1434
  var import_zod10 = require("zod");
1438
- var openAIProviderOptionsSchema = import_zod10.z.object({
1435
+ var openAITranscriptionProviderOptions = import_zod10.z.object({
1436
+ /**
1437
+ * Additional information to include in the transcription response.
1438
+ */
1439
1439
  include: import_zod10.z.array(import_zod10.z.string()).nullish(),
1440
+ /**
1441
+ * The language of the input audio in ISO-639-1 format.
1442
+ */
1440
1443
  language: import_zod10.z.string().nullish(),
1444
+ /**
1445
+ * An optional text to guide the model's style or continue a previous audio segment.
1446
+ */
1441
1447
  prompt: import_zod10.z.string().nullish(),
1442
- temperature: import_zod10.z.number().min(0).max(1).nullish().default(0),
1443
- timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).nullish().default(["segment"])
1448
+ /**
1449
+ * The sampling temperature, between 0 and 1.
1450
+ * @default 0
1451
+ */
1452
+ temperature: import_zod10.z.number().min(0).max(1).default(0).nullish(),
1453
+ /**
1454
+ * The timestamp granularities to populate for this transcription.
1455
+ * @default ['segment']
1456
+ */
1457
+ timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).nullish()
1444
1458
  });
1459
+
1460
+ // src/openai-transcription-model.ts
1445
1461
  var languageMap = {
1446
1462
  afrikaans: "af",
1447
1463
  arabic: "ar",
@@ -1515,12 +1531,11 @@ var OpenAITranscriptionModel = class {
1515
1531
  mediaType,
1516
1532
  providerOptions
1517
1533
  }) {
1518
- var _a, _b, _c, _d, _e;
1519
1534
  const warnings = [];
1520
1535
  const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
1521
1536
  provider: "openai",
1522
1537
  providerOptions,
1523
- schema: openAIProviderOptionsSchema
1538
+ schema: openAITranscriptionProviderOptions
1524
1539
  });
1525
1540
  const formData = new FormData();
1526
1541
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
@@ -1528,15 +1543,14 @@ var OpenAITranscriptionModel = class {
1528
1543
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1529
1544
  if (openAIOptions) {
1530
1545
  const transcriptionModelOptions = {
1531
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1532
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1533
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1534
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1535
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1546
+ include: openAIOptions.include,
1547
+ language: openAIOptions.language,
1548
+ prompt: openAIOptions.prompt,
1549
+ temperature: openAIOptions.temperature,
1550
+ timestamp_granularities: openAIOptions.timestampGranularities
1536
1551
  };
1537
- for (const key in transcriptionModelOptions) {
1538
- const value = transcriptionModelOptions[key];
1539
- if (value !== void 0) {
1552
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1553
+ if (value != null) {
1540
1554
  formData.append(key, String(value));
1541
1555
  }
1542
1556
  }
@@ -1588,22 +1602,22 @@ var OpenAITranscriptionModel = class {
1588
1602
  };
1589
1603
  }
1590
1604
  };
1591
- var openaiTranscriptionResponseSchema = import_zod10.z.object({
1592
- text: import_zod10.z.string(),
1593
- language: import_zod10.z.string().nullish(),
1594
- duration: import_zod10.z.number().nullish(),
1595
- words: import_zod10.z.array(
1596
- import_zod10.z.object({
1597
- word: import_zod10.z.string(),
1598
- start: import_zod10.z.number(),
1599
- end: import_zod10.z.number()
1605
+ var openaiTranscriptionResponseSchema = import_zod11.z.object({
1606
+ text: import_zod11.z.string(),
1607
+ language: import_zod11.z.string().nullish(),
1608
+ duration: import_zod11.z.number().nullish(),
1609
+ words: import_zod11.z.array(
1610
+ import_zod11.z.object({
1611
+ word: import_zod11.z.string(),
1612
+ start: import_zod11.z.number(),
1613
+ end: import_zod11.z.number()
1600
1614
  })
1601
1615
  ).nullish()
1602
1616
  });
1603
1617
 
1604
1618
  // src/responses/openai-responses-language-model.ts
1605
1619
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1606
- var import_zod11 = require("zod");
1620
+ var import_zod12 = require("zod");
1607
1621
 
1608
1622
  // src/responses/convert-to-openai-responses-messages.ts
1609
1623
  var import_provider6 = require("@ai-sdk/provider");
@@ -1964,55 +1978,55 @@ var OpenAIResponsesLanguageModel = class {
1964
1978
  body,
1965
1979
  failedResponseHandler: openaiFailedResponseHandler,
1966
1980
  successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1967
- import_zod11.z.object({
1968
- id: import_zod11.z.string(),
1969
- created_at: import_zod11.z.number(),
1970
- model: import_zod11.z.string(),
1971
- output: import_zod11.z.array(
1972
- import_zod11.z.discriminatedUnion("type", [
1973
- import_zod11.z.object({
1974
- type: import_zod11.z.literal("message"),
1975
- role: import_zod11.z.literal("assistant"),
1976
- content: import_zod11.z.array(
1977
- import_zod11.z.object({
1978
- type: import_zod11.z.literal("output_text"),
1979
- text: import_zod11.z.string(),
1980
- annotations: import_zod11.z.array(
1981
- import_zod11.z.object({
1982
- type: import_zod11.z.literal("url_citation"),
1983
- start_index: import_zod11.z.number(),
1984
- end_index: import_zod11.z.number(),
1985
- url: import_zod11.z.string(),
1986
- title: import_zod11.z.string()
1981
+ import_zod12.z.object({
1982
+ id: import_zod12.z.string(),
1983
+ created_at: import_zod12.z.number(),
1984
+ model: import_zod12.z.string(),
1985
+ output: import_zod12.z.array(
1986
+ import_zod12.z.discriminatedUnion("type", [
1987
+ import_zod12.z.object({
1988
+ type: import_zod12.z.literal("message"),
1989
+ role: import_zod12.z.literal("assistant"),
1990
+ content: import_zod12.z.array(
1991
+ import_zod12.z.object({
1992
+ type: import_zod12.z.literal("output_text"),
1993
+ text: import_zod12.z.string(),
1994
+ annotations: import_zod12.z.array(
1995
+ import_zod12.z.object({
1996
+ type: import_zod12.z.literal("url_citation"),
1997
+ start_index: import_zod12.z.number(),
1998
+ end_index: import_zod12.z.number(),
1999
+ url: import_zod12.z.string(),
2000
+ title: import_zod12.z.string()
1987
2001
  })
1988
2002
  )
1989
2003
  })
1990
2004
  )
1991
2005
  }),
1992
- import_zod11.z.object({
1993
- type: import_zod11.z.literal("function_call"),
1994
- call_id: import_zod11.z.string(),
1995
- name: import_zod11.z.string(),
1996
- arguments: import_zod11.z.string()
2006
+ import_zod12.z.object({
2007
+ type: import_zod12.z.literal("function_call"),
2008
+ call_id: import_zod12.z.string(),
2009
+ name: import_zod12.z.string(),
2010
+ arguments: import_zod12.z.string()
1997
2011
  }),
1998
- import_zod11.z.object({
1999
- type: import_zod11.z.literal("web_search_call")
2012
+ import_zod12.z.object({
2013
+ type: import_zod12.z.literal("web_search_call")
2000
2014
  }),
2001
- import_zod11.z.object({
2002
- type: import_zod11.z.literal("computer_call")
2015
+ import_zod12.z.object({
2016
+ type: import_zod12.z.literal("computer_call")
2003
2017
  }),
2004
- import_zod11.z.object({
2005
- type: import_zod11.z.literal("reasoning"),
2006
- summary: import_zod11.z.array(
2007
- import_zod11.z.object({
2008
- type: import_zod11.z.literal("summary_text"),
2009
- text: import_zod11.z.string()
2018
+ import_zod12.z.object({
2019
+ type: import_zod12.z.literal("reasoning"),
2020
+ summary: import_zod12.z.array(
2021
+ import_zod12.z.object({
2022
+ type: import_zod12.z.literal("summary_text"),
2023
+ text: import_zod12.z.string()
2010
2024
  })
2011
2025
  )
2012
2026
  })
2013
2027
  ])
2014
2028
  ),
2015
- incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullable(),
2029
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2016
2030
  usage: usageSchema
2017
2031
  })
2018
2032
  ),
@@ -2226,86 +2240,86 @@ var OpenAIResponsesLanguageModel = class {
2226
2240
  };
2227
2241
  }
2228
2242
  };
2229
- var usageSchema = import_zod11.z.object({
2230
- input_tokens: import_zod11.z.number(),
2231
- input_tokens_details: import_zod11.z.object({ cached_tokens: import_zod11.z.number().nullish() }).nullish(),
2232
- output_tokens: import_zod11.z.number(),
2233
- output_tokens_details: import_zod11.z.object({ reasoning_tokens: import_zod11.z.number().nullish() }).nullish()
2243
+ var usageSchema = import_zod12.z.object({
2244
+ input_tokens: import_zod12.z.number(),
2245
+ input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2246
+ output_tokens: import_zod12.z.number(),
2247
+ output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
2234
2248
  });
2235
- var textDeltaChunkSchema = import_zod11.z.object({
2236
- type: import_zod11.z.literal("response.output_text.delta"),
2237
- delta: import_zod11.z.string()
2249
+ var textDeltaChunkSchema = import_zod12.z.object({
2250
+ type: import_zod12.z.literal("response.output_text.delta"),
2251
+ delta: import_zod12.z.string()
2238
2252
  });
2239
- var responseFinishedChunkSchema = import_zod11.z.object({
2240
- type: import_zod11.z.enum(["response.completed", "response.incomplete"]),
2241
- response: import_zod11.z.object({
2242
- incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullish(),
2253
+ var responseFinishedChunkSchema = import_zod12.z.object({
2254
+ type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2255
+ response: import_zod12.z.object({
2256
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2243
2257
  usage: usageSchema
2244
2258
  })
2245
2259
  });
2246
- var responseCreatedChunkSchema = import_zod11.z.object({
2247
- type: import_zod11.z.literal("response.created"),
2248
- response: import_zod11.z.object({
2249
- id: import_zod11.z.string(),
2250
- created_at: import_zod11.z.number(),
2251
- model: import_zod11.z.string()
2260
+ var responseCreatedChunkSchema = import_zod12.z.object({
2261
+ type: import_zod12.z.literal("response.created"),
2262
+ response: import_zod12.z.object({
2263
+ id: import_zod12.z.string(),
2264
+ created_at: import_zod12.z.number(),
2265
+ model: import_zod12.z.string()
2252
2266
  })
2253
2267
  });
2254
- var responseOutputItemDoneSchema = import_zod11.z.object({
2255
- type: import_zod11.z.literal("response.output_item.done"),
2256
- output_index: import_zod11.z.number(),
2257
- item: import_zod11.z.discriminatedUnion("type", [
2258
- import_zod11.z.object({
2259
- type: import_zod11.z.literal("message")
2268
+ var responseOutputItemDoneSchema = import_zod12.z.object({
2269
+ type: import_zod12.z.literal("response.output_item.done"),
2270
+ output_index: import_zod12.z.number(),
2271
+ item: import_zod12.z.discriminatedUnion("type", [
2272
+ import_zod12.z.object({
2273
+ type: import_zod12.z.literal("message")
2260
2274
  }),
2261
- import_zod11.z.object({
2262
- type: import_zod11.z.literal("function_call"),
2263
- id: import_zod11.z.string(),
2264
- call_id: import_zod11.z.string(),
2265
- name: import_zod11.z.string(),
2266
- arguments: import_zod11.z.string(),
2267
- status: import_zod11.z.literal("completed")
2275
+ import_zod12.z.object({
2276
+ type: import_zod12.z.literal("function_call"),
2277
+ id: import_zod12.z.string(),
2278
+ call_id: import_zod12.z.string(),
2279
+ name: import_zod12.z.string(),
2280
+ arguments: import_zod12.z.string(),
2281
+ status: import_zod12.z.literal("completed")
2268
2282
  })
2269
2283
  ])
2270
2284
  });
2271
- var responseFunctionCallArgumentsDeltaSchema = import_zod11.z.object({
2272
- type: import_zod11.z.literal("response.function_call_arguments.delta"),
2273
- item_id: import_zod11.z.string(),
2274
- output_index: import_zod11.z.number(),
2275
- delta: import_zod11.z.string()
2285
+ var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
2286
+ type: import_zod12.z.literal("response.function_call_arguments.delta"),
2287
+ item_id: import_zod12.z.string(),
2288
+ output_index: import_zod12.z.number(),
2289
+ delta: import_zod12.z.string()
2276
2290
  });
2277
- var responseOutputItemAddedSchema = import_zod11.z.object({
2278
- type: import_zod11.z.literal("response.output_item.added"),
2279
- output_index: import_zod11.z.number(),
2280
- item: import_zod11.z.discriminatedUnion("type", [
2281
- import_zod11.z.object({
2282
- type: import_zod11.z.literal("message")
2291
+ var responseOutputItemAddedSchema = import_zod12.z.object({
2292
+ type: import_zod12.z.literal("response.output_item.added"),
2293
+ output_index: import_zod12.z.number(),
2294
+ item: import_zod12.z.discriminatedUnion("type", [
2295
+ import_zod12.z.object({
2296
+ type: import_zod12.z.literal("message")
2283
2297
  }),
2284
- import_zod11.z.object({
2285
- type: import_zod11.z.literal("function_call"),
2286
- id: import_zod11.z.string(),
2287
- call_id: import_zod11.z.string(),
2288
- name: import_zod11.z.string(),
2289
- arguments: import_zod11.z.string()
2298
+ import_zod12.z.object({
2299
+ type: import_zod12.z.literal("function_call"),
2300
+ id: import_zod12.z.string(),
2301
+ call_id: import_zod12.z.string(),
2302
+ name: import_zod12.z.string(),
2303
+ arguments: import_zod12.z.string()
2290
2304
  })
2291
2305
  ])
2292
2306
  });
2293
- var responseAnnotationAddedSchema = import_zod11.z.object({
2294
- type: import_zod11.z.literal("response.output_text.annotation.added"),
2295
- annotation: import_zod11.z.object({
2296
- type: import_zod11.z.literal("url_citation"),
2297
- url: import_zod11.z.string(),
2298
- title: import_zod11.z.string()
2307
+ var responseAnnotationAddedSchema = import_zod12.z.object({
2308
+ type: import_zod12.z.literal("response.output_text.annotation.added"),
2309
+ annotation: import_zod12.z.object({
2310
+ type: import_zod12.z.literal("url_citation"),
2311
+ url: import_zod12.z.string(),
2312
+ title: import_zod12.z.string()
2299
2313
  })
2300
2314
  });
2301
- var responseReasoningSummaryTextDeltaSchema = import_zod11.z.object({
2302
- type: import_zod11.z.literal("response.reasoning_summary_text.delta"),
2303
- item_id: import_zod11.z.string(),
2304
- output_index: import_zod11.z.number(),
2305
- summary_index: import_zod11.z.number(),
2306
- delta: import_zod11.z.string()
2315
+ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
2316
+ type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
2317
+ item_id: import_zod12.z.string(),
2318
+ output_index: import_zod12.z.number(),
2319
+ summary_index: import_zod12.z.number(),
2320
+ delta: import_zod12.z.string()
2307
2321
  });
2308
- var openaiResponsesChunkSchema = import_zod11.z.union([
2322
+ var openaiResponsesChunkSchema = import_zod12.z.union([
2309
2323
  textDeltaChunkSchema,
2310
2324
  responseFinishedChunkSchema,
2311
2325
  responseCreatedChunkSchema,
@@ -2314,7 +2328,7 @@ var openaiResponsesChunkSchema = import_zod11.z.union([
2314
2328
  responseOutputItemAddedSchema,
2315
2329
  responseAnnotationAddedSchema,
2316
2330
  responseReasoningSummaryTextDeltaSchema,
2317
- import_zod11.z.object({ type: import_zod11.z.string() }).passthrough()
2331
+ import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
2318
2332
  // fallback for unknown chunks
2319
2333
  ]);
2320
2334
  function isTextDeltaChunk(chunk) {
@@ -2362,24 +2376,24 @@ function getResponsesModelConfig(modelId) {
2362
2376
  requiredAutoTruncation: false
2363
2377
  };
2364
2378
  }
2365
- var openaiResponsesProviderOptionsSchema = import_zod11.z.object({
2366
- metadata: import_zod11.z.any().nullish(),
2367
- parallelToolCalls: import_zod11.z.boolean().nullish(),
2368
- previousResponseId: import_zod11.z.string().nullish(),
2369
- store: import_zod11.z.boolean().nullish(),
2370
- user: import_zod11.z.string().nullish(),
2371
- reasoningEffort: import_zod11.z.string().nullish(),
2372
- strictSchemas: import_zod11.z.boolean().nullish(),
2373
- instructions: import_zod11.z.string().nullish(),
2374
- reasoningSummary: import_zod11.z.string().nullish()
2379
+ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2380
+ metadata: import_zod12.z.any().nullish(),
2381
+ parallelToolCalls: import_zod12.z.boolean().nullish(),
2382
+ previousResponseId: import_zod12.z.string().nullish(),
2383
+ store: import_zod12.z.boolean().nullish(),
2384
+ user: import_zod12.z.string().nullish(),
2385
+ reasoningEffort: import_zod12.z.string().nullish(),
2386
+ strictSchemas: import_zod12.z.boolean().nullish(),
2387
+ instructions: import_zod12.z.string().nullish(),
2388
+ reasoningSummary: import_zod12.z.string().nullish()
2375
2389
  });
2376
2390
 
2377
2391
  // src/openai-speech-model.ts
2378
2392
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
2379
- var import_zod12 = require("zod");
2380
- var OpenAIProviderOptionsSchema = import_zod12.z.object({
2381
- instructions: import_zod12.z.string().nullish(),
2382
- speed: import_zod12.z.number().min(0.25).max(4).default(1).nullish()
2393
+ var import_zod13 = require("zod");
2394
+ var OpenAIProviderOptionsSchema = import_zod13.z.object({
2395
+ instructions: import_zod13.z.string().nullish(),
2396
+ speed: import_zod13.z.number().min(0.25).max(4).default(1).nullish()
2383
2397
  });
2384
2398
  var OpenAISpeechModel = class {
2385
2399
  constructor(modelId, config) {
@@ -2489,7 +2503,7 @@ function createOpenAI(options = {}) {
2489
2503
  "OpenAI-Project": options.project,
2490
2504
  ...options.headers
2491
2505
  });
2492
- const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
2506
+ const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
2493
2507
  provider: `${providerName}.chat`,
2494
2508
  url: ({ path }) => `${baseURL}${path}`,
2495
2509
  headers: getHeaders,
@@ -2503,7 +2517,7 @@ function createOpenAI(options = {}) {
2503
2517
  compatibility,
2504
2518
  fetch: options.fetch
2505
2519
  });
2506
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
2520
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
2507
2521
  provider: `${providerName}.embedding`,
2508
2522
  url: ({ path }) => `${baseURL}${path}`,
2509
2523
  headers: getHeaders,
@@ -2527,7 +2541,7 @@ function createOpenAI(options = {}) {
2527
2541
  headers: getHeaders,
2528
2542
  fetch: options.fetch
2529
2543
  });
2530
- const createLanguageModel = (modelId, settings) => {
2544
+ const createLanguageModel = (modelId) => {
2531
2545
  if (new.target) {
2532
2546
  throw new Error(
2533
2547
  "The OpenAI model function cannot be called with the new keyword."
@@ -2536,7 +2550,7 @@ function createOpenAI(options = {}) {
2536
2550
  if (modelId === "gpt-3.5-turbo-instruct") {
2537
2551
  return createCompletionModel(modelId);
2538
2552
  }
2539
- return createChatModel(modelId, settings);
2553
+ return createChatModel(modelId);
2540
2554
  };
2541
2555
  const createResponsesModel = (modelId) => {
2542
2556
  return new OpenAIResponsesLanguageModel(modelId, {
@@ -2546,8 +2560,8 @@ function createOpenAI(options = {}) {
2546
2560
  fetch: options.fetch
2547
2561
  });
2548
2562
  };
2549
- const provider = function(modelId, settings) {
2550
- return createLanguageModel(modelId, settings);
2563
+ const provider = function(modelId) {
2564
+ return createLanguageModel(modelId);
2551
2565
  };
2552
2566
  provider.languageModel = createLanguageModel;
2553
2567
  provider.chat = createChatModel;