@ai-sdk/openai 2.0.0-canary.12 → 2.0.0-canary.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -29,6 +29,7 @@ __export(internal_exports, {
29
29
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
30
  hasDefaultResponseFormat: () => hasDefaultResponseFormat,
31
31
  modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
+ openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
32
33
  openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
33
34
  openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
34
35
  openaiProviderOptions: () => openaiProviderOptions
@@ -273,7 +274,13 @@ var openaiProviderOptions = import_zod.z.object({
273
274
  /**
274
275
  * Parameters for prediction mode.
275
276
  */
276
- prediction: import_zod.z.record(import_zod.z.any()).optional()
277
+ prediction: import_zod.z.record(import_zod.z.any()).optional(),
278
+ /**
279
+ * Whether to use structured outputs.
280
+ *
281
+ * @default true
282
+ */
283
+ structuredOutputs: import_zod.z.boolean().optional()
277
284
  });
278
285
 
279
286
  // src/openai-error.ts
@@ -354,20 +361,17 @@ function prepareTools({
354
361
 
355
362
  // src/openai-chat-language-model.ts
356
363
  var OpenAIChatLanguageModel = class {
357
- constructor(modelId, settings, config) {
364
+ constructor(modelId, config) {
358
365
  this.specificationVersion = "v2";
366
+ this.supportedUrls = {
367
+ "image/*": [/^https?:\/\/.*$/]
368
+ };
359
369
  this.modelId = modelId;
360
- this.settings = settings;
361
370
  this.config = config;
362
371
  }
363
372
  get provider() {
364
373
  return this.config.provider;
365
374
  }
366
- async getSupportedUrls() {
367
- return {
368
- "image/*": [/^https?:\/\/.*$/]
369
- };
370
- }
371
375
  async getArgs({
372
376
  prompt,
373
377
  maxOutputTokens,
@@ -390,13 +394,14 @@ var OpenAIChatLanguageModel = class {
390
394
  providerOptions,
391
395
  schema: openaiProviderOptions
392
396
  })) != null ? _a : {};
397
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
393
398
  if (topK != null) {
394
399
  warnings.push({
395
400
  type: "unsupported-setting",
396
401
  setting: "topK"
397
402
  });
398
403
  }
399
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
404
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
400
405
  warnings.push({
401
406
  type: "unsupported-setting",
402
407
  setting: "responseFormat",
@@ -425,12 +430,12 @@ var OpenAIChatLanguageModel = class {
425
430
  presence_penalty: presencePenalty,
426
431
  response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
427
432
  // TODO convert into provider option
428
- this.settings.structuredOutputs && responseFormat.schema != null ? {
433
+ structuredOutputs && responseFormat.schema != null ? {
429
434
  type: "json_schema",
430
435
  json_schema: {
431
436
  schema: responseFormat.schema,
432
437
  strict: true,
433
- name: (_b = responseFormat.name) != null ? _b : "response",
438
+ name: (_c = responseFormat.name) != null ? _c : "response",
434
439
  description: responseFormat.description
435
440
  }
436
441
  } : { type: "json_object" }
@@ -510,7 +515,7 @@ var OpenAIChatLanguageModel = class {
510
515
  } = prepareTools({
511
516
  tools,
512
517
  toolChoice,
513
- structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
518
+ structuredOutputs
514
519
  });
515
520
  return {
516
521
  args: {
@@ -870,11 +875,23 @@ var reasoningModels = {
870
875
  "o1-preview-2024-09-12": {
871
876
  systemMessageMode: "remove"
872
877
  },
878
+ o3: {
879
+ systemMessageMode: "developer"
880
+ },
881
+ "o3-2025-04-16": {
882
+ systemMessageMode: "developer"
883
+ },
873
884
  "o3-mini": {
874
885
  systemMessageMode: "developer"
875
886
  },
876
887
  "o3-mini-2025-01-31": {
877
888
  systemMessageMode: "developer"
889
+ },
890
+ "o4-mini": {
891
+ systemMessageMode: "developer"
892
+ },
893
+ "o4-mini-2025-04-16": {
894
+ systemMessageMode: "developer"
878
895
  }
879
896
  };
880
897
 
@@ -886,13 +903,9 @@ var import_zod5 = require("zod");
886
903
  var import_provider4 = require("@ai-sdk/provider");
887
904
  function convertToOpenAICompletionPrompt({
888
905
  prompt,
889
- inputFormat,
890
906
  user = "user",
891
907
  assistant = "assistant"
892
908
  }) {
893
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
894
- return { prompt: prompt[0].content[0].text };
895
- }
896
909
  let text = "";
897
910
  if (prompt[0].role === "system") {
898
911
  text += `${prompt[0].content}
@@ -998,6 +1011,9 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
998
1011
  var OpenAICompletionLanguageModel = class {
999
1012
  constructor(modelId, config) {
1000
1013
  this.specificationVersion = "v2";
1014
+ this.supportedUrls = {
1015
+ // No URLs are supported for completion models.
1016
+ };
1001
1017
  this.modelId = modelId;
1002
1018
  this.config = config;
1003
1019
  }
@@ -1007,13 +1023,7 @@ var OpenAICompletionLanguageModel = class {
1007
1023
  get provider() {
1008
1024
  return this.config.provider;
1009
1025
  }
1010
- async getSupportedUrls() {
1011
- return {
1012
- // no supported urls for completion models
1013
- };
1014
- }
1015
1026
  async getArgs({
1016
- inputFormat,
1017
1027
  prompt,
1018
1028
  maxOutputTokens,
1019
1029
  temperature,
@@ -1057,7 +1067,7 @@ var OpenAICompletionLanguageModel = class {
1057
1067
  details: "JSON response format is not supported."
1058
1068
  });
1059
1069
  }
1060
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1070
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1061
1071
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1062
1072
  return {
1063
1073
  args: {
@@ -1259,23 +1269,16 @@ var openaiEmbeddingProviderOptions = import_zod6.z.object({
1259
1269
 
1260
1270
  // src/openai-embedding-model.ts
1261
1271
  var OpenAIEmbeddingModel = class {
1262
- constructor(modelId, settings, config) {
1272
+ constructor(modelId, config) {
1263
1273
  this.specificationVersion = "v2";
1274
+ this.maxEmbeddingsPerCall = 2048;
1275
+ this.supportsParallelCalls = true;
1264
1276
  this.modelId = modelId;
1265
- this.settings = settings;
1266
1277
  this.config = config;
1267
1278
  }
1268
1279
  get provider() {
1269
1280
  return this.config.provider;
1270
1281
  }
1271
- get maxEmbeddingsPerCall() {
1272
- var _a;
1273
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1274
- }
1275
- get supportsParallelCalls() {
1276
- var _a;
1277
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1278
- }
1279
1282
  async doEmbed({
1280
1283
  values,
1281
1284
  headers,
@@ -1350,7 +1353,7 @@ var OpenAIImageModel = class {
1350
1353
  this.modelId = modelId;
1351
1354
  this.settings = settings;
1352
1355
  this.config = config;
1353
- this.specificationVersion = "v1";
1356
+ this.specificationVersion = "v2";
1354
1357
  }
1355
1358
  get maxImagesPerCall() {
1356
1359
  var _a, _b;
@@ -1420,14 +1423,36 @@ var openaiImageResponseSchema = import_zod8.z.object({
1420
1423
 
1421
1424
  // src/openai-transcription-model.ts
1422
1425
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1426
+ var import_zod10 = require("zod");
1427
+
1428
+ // src/openai-transcription-options.ts
1423
1429
  var import_zod9 = require("zod");
1424
- var openAIProviderOptionsSchema = import_zod9.z.object({
1430
+ var openAITranscriptionProviderOptions = import_zod9.z.object({
1431
+ /**
1432
+ * Additional information to include in the transcription response.
1433
+ */
1425
1434
  include: import_zod9.z.array(import_zod9.z.string()).nullish(),
1435
+ /**
1436
+ * The language of the input audio in ISO-639-1 format.
1437
+ */
1426
1438
  language: import_zod9.z.string().nullish(),
1439
+ /**
1440
+ * An optional text to guide the model's style or continue a previous audio segment.
1441
+ */
1427
1442
  prompt: import_zod9.z.string().nullish(),
1428
- temperature: import_zod9.z.number().min(0).max(1).nullish().default(0),
1429
- timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).nullish().default(["segment"])
1443
+ /**
1444
+ * The sampling temperature, between 0 and 1.
1445
+ * @default 0
1446
+ */
1447
+ temperature: import_zod9.z.number().min(0).max(1).default(0).nullish(),
1448
+ /**
1449
+ * The timestamp granularities to populate for this transcription.
1450
+ * @default ['segment']
1451
+ */
1452
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).default(["segment"]).nullish()
1430
1453
  });
1454
+
1455
+ // src/openai-transcription-model.ts
1431
1456
  var languageMap = {
1432
1457
  afrikaans: "af",
1433
1458
  arabic: "ar",
@@ -1501,12 +1526,11 @@ var OpenAITranscriptionModel = class {
1501
1526
  mediaType,
1502
1527
  providerOptions
1503
1528
  }) {
1504
- var _a, _b, _c, _d, _e;
1505
1529
  const warnings = [];
1506
1530
  const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
1507
1531
  provider: "openai",
1508
1532
  providerOptions,
1509
- schema: openAIProviderOptionsSchema
1533
+ schema: openAITranscriptionProviderOptions
1510
1534
  });
1511
1535
  const formData = new FormData();
1512
1536
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
@@ -1514,15 +1538,14 @@ var OpenAITranscriptionModel = class {
1514
1538
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1515
1539
  if (openAIOptions) {
1516
1540
  const transcriptionModelOptions = {
1517
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1518
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1519
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1520
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1521
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1541
+ include: openAIOptions.include,
1542
+ language: openAIOptions.language,
1543
+ prompt: openAIOptions.prompt,
1544
+ temperature: openAIOptions.temperature,
1545
+ timestamp_granularities: openAIOptions.timestampGranularities
1522
1546
  };
1523
- for (const key in transcriptionModelOptions) {
1524
- const value = transcriptionModelOptions[key];
1525
- if (value !== void 0) {
1547
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1548
+ if (value != null) {
1526
1549
  formData.append(key, String(value));
1527
1550
  }
1528
1551
  }
@@ -1574,25 +1597,25 @@ var OpenAITranscriptionModel = class {
1574
1597
  };
1575
1598
  }
1576
1599
  };
1577
- var openaiTranscriptionResponseSchema = import_zod9.z.object({
1578
- text: import_zod9.z.string(),
1579
- language: import_zod9.z.string().nullish(),
1580
- duration: import_zod9.z.number().nullish(),
1581
- words: import_zod9.z.array(
1582
- import_zod9.z.object({
1583
- word: import_zod9.z.string(),
1584
- start: import_zod9.z.number(),
1585
- end: import_zod9.z.number()
1600
+ var openaiTranscriptionResponseSchema = import_zod10.z.object({
1601
+ text: import_zod10.z.string(),
1602
+ language: import_zod10.z.string().nullish(),
1603
+ duration: import_zod10.z.number().nullish(),
1604
+ words: import_zod10.z.array(
1605
+ import_zod10.z.object({
1606
+ word: import_zod10.z.string(),
1607
+ start: import_zod10.z.number(),
1608
+ end: import_zod10.z.number()
1586
1609
  })
1587
1610
  ).nullish()
1588
1611
  });
1589
1612
 
1590
1613
  // src/openai-speech-model.ts
1591
1614
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1592
- var import_zod10 = require("zod");
1593
- var OpenAIProviderOptionsSchema = import_zod10.z.object({
1594
- instructions: import_zod10.z.string().nullish(),
1595
- speed: import_zod10.z.number().min(0.25).max(4).default(1).nullish()
1615
+ var import_zod11 = require("zod");
1616
+ var OpenAIProviderOptionsSchema = import_zod11.z.object({
1617
+ instructions: import_zod11.z.string().nullish(),
1618
+ speed: import_zod11.z.number().min(0.25).max(4).default(1).nullish()
1596
1619
  });
1597
1620
  var OpenAISpeechModel = class {
1598
1621
  constructor(modelId, config) {
@@ -1688,7 +1711,7 @@ var OpenAISpeechModel = class {
1688
1711
 
1689
1712
  // src/responses/openai-responses-language-model.ts
1690
1713
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
1691
- var import_zod11 = require("zod");
1714
+ var import_zod12 = require("zod");
1692
1715
 
1693
1716
  // src/responses/convert-to-openai-responses-messages.ts
1694
1717
  var import_provider6 = require("@ai-sdk/provider");
@@ -1897,13 +1920,11 @@ function prepareResponsesTools({
1897
1920
  var OpenAIResponsesLanguageModel = class {
1898
1921
  constructor(modelId, config) {
1899
1922
  this.specificationVersion = "v2";
1900
- this.modelId = modelId;
1901
- this.config = config;
1902
- }
1903
- async getSupportedUrls() {
1904
- return {
1923
+ this.supportedUrls = {
1905
1924
  "image/*": [/^https?:\/\/.*$/]
1906
1925
  };
1926
+ this.modelId = modelId;
1927
+ this.config = config;
1907
1928
  }
1908
1929
  get provider() {
1909
1930
  return this.config.provider;
@@ -2049,55 +2070,55 @@ var OpenAIResponsesLanguageModel = class {
2049
2070
  body,
2050
2071
  failedResponseHandler: openaiFailedResponseHandler,
2051
2072
  successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
2052
- import_zod11.z.object({
2053
- id: import_zod11.z.string(),
2054
- created_at: import_zod11.z.number(),
2055
- model: import_zod11.z.string(),
2056
- output: import_zod11.z.array(
2057
- import_zod11.z.discriminatedUnion("type", [
2058
- import_zod11.z.object({
2059
- type: import_zod11.z.literal("message"),
2060
- role: import_zod11.z.literal("assistant"),
2061
- content: import_zod11.z.array(
2062
- import_zod11.z.object({
2063
- type: import_zod11.z.literal("output_text"),
2064
- text: import_zod11.z.string(),
2065
- annotations: import_zod11.z.array(
2066
- import_zod11.z.object({
2067
- type: import_zod11.z.literal("url_citation"),
2068
- start_index: import_zod11.z.number(),
2069
- end_index: import_zod11.z.number(),
2070
- url: import_zod11.z.string(),
2071
- title: import_zod11.z.string()
2073
+ import_zod12.z.object({
2074
+ id: import_zod12.z.string(),
2075
+ created_at: import_zod12.z.number(),
2076
+ model: import_zod12.z.string(),
2077
+ output: import_zod12.z.array(
2078
+ import_zod12.z.discriminatedUnion("type", [
2079
+ import_zod12.z.object({
2080
+ type: import_zod12.z.literal("message"),
2081
+ role: import_zod12.z.literal("assistant"),
2082
+ content: import_zod12.z.array(
2083
+ import_zod12.z.object({
2084
+ type: import_zod12.z.literal("output_text"),
2085
+ text: import_zod12.z.string(),
2086
+ annotations: import_zod12.z.array(
2087
+ import_zod12.z.object({
2088
+ type: import_zod12.z.literal("url_citation"),
2089
+ start_index: import_zod12.z.number(),
2090
+ end_index: import_zod12.z.number(),
2091
+ url: import_zod12.z.string(),
2092
+ title: import_zod12.z.string()
2072
2093
  })
2073
2094
  )
2074
2095
  })
2075
2096
  )
2076
2097
  }),
2077
- import_zod11.z.object({
2078
- type: import_zod11.z.literal("function_call"),
2079
- call_id: import_zod11.z.string(),
2080
- name: import_zod11.z.string(),
2081
- arguments: import_zod11.z.string()
2098
+ import_zod12.z.object({
2099
+ type: import_zod12.z.literal("function_call"),
2100
+ call_id: import_zod12.z.string(),
2101
+ name: import_zod12.z.string(),
2102
+ arguments: import_zod12.z.string()
2082
2103
  }),
2083
- import_zod11.z.object({
2084
- type: import_zod11.z.literal("web_search_call")
2104
+ import_zod12.z.object({
2105
+ type: import_zod12.z.literal("web_search_call")
2085
2106
  }),
2086
- import_zod11.z.object({
2087
- type: import_zod11.z.literal("computer_call")
2107
+ import_zod12.z.object({
2108
+ type: import_zod12.z.literal("computer_call")
2088
2109
  }),
2089
- import_zod11.z.object({
2090
- type: import_zod11.z.literal("reasoning"),
2091
- summary: import_zod11.z.array(
2092
- import_zod11.z.object({
2093
- type: import_zod11.z.literal("summary_text"),
2094
- text: import_zod11.z.string()
2110
+ import_zod12.z.object({
2111
+ type: import_zod12.z.literal("reasoning"),
2112
+ summary: import_zod12.z.array(
2113
+ import_zod12.z.object({
2114
+ type: import_zod12.z.literal("summary_text"),
2115
+ text: import_zod12.z.string()
2095
2116
  })
2096
2117
  )
2097
2118
  })
2098
2119
  ])
2099
2120
  ),
2100
- incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullable(),
2121
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2101
2122
  usage: usageSchema
2102
2123
  })
2103
2124
  ),
@@ -2311,86 +2332,86 @@ var OpenAIResponsesLanguageModel = class {
2311
2332
  };
2312
2333
  }
2313
2334
  };
2314
- var usageSchema = import_zod11.z.object({
2315
- input_tokens: import_zod11.z.number(),
2316
- input_tokens_details: import_zod11.z.object({ cached_tokens: import_zod11.z.number().nullish() }).nullish(),
2317
- output_tokens: import_zod11.z.number(),
2318
- output_tokens_details: import_zod11.z.object({ reasoning_tokens: import_zod11.z.number().nullish() }).nullish()
2335
+ var usageSchema = import_zod12.z.object({
2336
+ input_tokens: import_zod12.z.number(),
2337
+ input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2338
+ output_tokens: import_zod12.z.number(),
2339
+ output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
2319
2340
  });
2320
- var textDeltaChunkSchema = import_zod11.z.object({
2321
- type: import_zod11.z.literal("response.output_text.delta"),
2322
- delta: import_zod11.z.string()
2341
+ var textDeltaChunkSchema = import_zod12.z.object({
2342
+ type: import_zod12.z.literal("response.output_text.delta"),
2343
+ delta: import_zod12.z.string()
2323
2344
  });
2324
- var responseFinishedChunkSchema = import_zod11.z.object({
2325
- type: import_zod11.z.enum(["response.completed", "response.incomplete"]),
2326
- response: import_zod11.z.object({
2327
- incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullish(),
2345
+ var responseFinishedChunkSchema = import_zod12.z.object({
2346
+ type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2347
+ response: import_zod12.z.object({
2348
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2328
2349
  usage: usageSchema
2329
2350
  })
2330
2351
  });
2331
- var responseCreatedChunkSchema = import_zod11.z.object({
2332
- type: import_zod11.z.literal("response.created"),
2333
- response: import_zod11.z.object({
2334
- id: import_zod11.z.string(),
2335
- created_at: import_zod11.z.number(),
2336
- model: import_zod11.z.string()
2352
+ var responseCreatedChunkSchema = import_zod12.z.object({
2353
+ type: import_zod12.z.literal("response.created"),
2354
+ response: import_zod12.z.object({
2355
+ id: import_zod12.z.string(),
2356
+ created_at: import_zod12.z.number(),
2357
+ model: import_zod12.z.string()
2337
2358
  })
2338
2359
  });
2339
- var responseOutputItemDoneSchema = import_zod11.z.object({
2340
- type: import_zod11.z.literal("response.output_item.done"),
2341
- output_index: import_zod11.z.number(),
2342
- item: import_zod11.z.discriminatedUnion("type", [
2343
- import_zod11.z.object({
2344
- type: import_zod11.z.literal("message")
2360
+ var responseOutputItemDoneSchema = import_zod12.z.object({
2361
+ type: import_zod12.z.literal("response.output_item.done"),
2362
+ output_index: import_zod12.z.number(),
2363
+ item: import_zod12.z.discriminatedUnion("type", [
2364
+ import_zod12.z.object({
2365
+ type: import_zod12.z.literal("message")
2345
2366
  }),
2346
- import_zod11.z.object({
2347
- type: import_zod11.z.literal("function_call"),
2348
- id: import_zod11.z.string(),
2349
- call_id: import_zod11.z.string(),
2350
- name: import_zod11.z.string(),
2351
- arguments: import_zod11.z.string(),
2352
- status: import_zod11.z.literal("completed")
2367
+ import_zod12.z.object({
2368
+ type: import_zod12.z.literal("function_call"),
2369
+ id: import_zod12.z.string(),
2370
+ call_id: import_zod12.z.string(),
2371
+ name: import_zod12.z.string(),
2372
+ arguments: import_zod12.z.string(),
2373
+ status: import_zod12.z.literal("completed")
2353
2374
  })
2354
2375
  ])
2355
2376
  });
2356
- var responseFunctionCallArgumentsDeltaSchema = import_zod11.z.object({
2357
- type: import_zod11.z.literal("response.function_call_arguments.delta"),
2358
- item_id: import_zod11.z.string(),
2359
- output_index: import_zod11.z.number(),
2360
- delta: import_zod11.z.string()
2377
+ var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
2378
+ type: import_zod12.z.literal("response.function_call_arguments.delta"),
2379
+ item_id: import_zod12.z.string(),
2380
+ output_index: import_zod12.z.number(),
2381
+ delta: import_zod12.z.string()
2361
2382
  });
2362
- var responseOutputItemAddedSchema = import_zod11.z.object({
2363
- type: import_zod11.z.literal("response.output_item.added"),
2364
- output_index: import_zod11.z.number(),
2365
- item: import_zod11.z.discriminatedUnion("type", [
2366
- import_zod11.z.object({
2367
- type: import_zod11.z.literal("message")
2383
+ var responseOutputItemAddedSchema = import_zod12.z.object({
2384
+ type: import_zod12.z.literal("response.output_item.added"),
2385
+ output_index: import_zod12.z.number(),
2386
+ item: import_zod12.z.discriminatedUnion("type", [
2387
+ import_zod12.z.object({
2388
+ type: import_zod12.z.literal("message")
2368
2389
  }),
2369
- import_zod11.z.object({
2370
- type: import_zod11.z.literal("function_call"),
2371
- id: import_zod11.z.string(),
2372
- call_id: import_zod11.z.string(),
2373
- name: import_zod11.z.string(),
2374
- arguments: import_zod11.z.string()
2390
+ import_zod12.z.object({
2391
+ type: import_zod12.z.literal("function_call"),
2392
+ id: import_zod12.z.string(),
2393
+ call_id: import_zod12.z.string(),
2394
+ name: import_zod12.z.string(),
2395
+ arguments: import_zod12.z.string()
2375
2396
  })
2376
2397
  ])
2377
2398
  });
2378
- var responseAnnotationAddedSchema = import_zod11.z.object({
2379
- type: import_zod11.z.literal("response.output_text.annotation.added"),
2380
- annotation: import_zod11.z.object({
2381
- type: import_zod11.z.literal("url_citation"),
2382
- url: import_zod11.z.string(),
2383
- title: import_zod11.z.string()
2399
+ var responseAnnotationAddedSchema = import_zod12.z.object({
2400
+ type: import_zod12.z.literal("response.output_text.annotation.added"),
2401
+ annotation: import_zod12.z.object({
2402
+ type: import_zod12.z.literal("url_citation"),
2403
+ url: import_zod12.z.string(),
2404
+ title: import_zod12.z.string()
2384
2405
  })
2385
2406
  });
2386
- var responseReasoningSummaryTextDeltaSchema = import_zod11.z.object({
2387
- type: import_zod11.z.literal("response.reasoning_summary_text.delta"),
2388
- item_id: import_zod11.z.string(),
2389
- output_index: import_zod11.z.number(),
2390
- summary_index: import_zod11.z.number(),
2391
- delta: import_zod11.z.string()
2407
+ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
2408
+ type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
2409
+ item_id: import_zod12.z.string(),
2410
+ output_index: import_zod12.z.number(),
2411
+ summary_index: import_zod12.z.number(),
2412
+ delta: import_zod12.z.string()
2392
2413
  });
2393
- var openaiResponsesChunkSchema = import_zod11.z.union([
2414
+ var openaiResponsesChunkSchema = import_zod12.z.union([
2394
2415
  textDeltaChunkSchema,
2395
2416
  responseFinishedChunkSchema,
2396
2417
  responseCreatedChunkSchema,
@@ -2399,7 +2420,7 @@ var openaiResponsesChunkSchema = import_zod11.z.union([
2399
2420
  responseOutputItemAddedSchema,
2400
2421
  responseAnnotationAddedSchema,
2401
2422
  responseReasoningSummaryTextDeltaSchema,
2402
- import_zod11.z.object({ type: import_zod11.z.string() }).passthrough()
2423
+ import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
2403
2424
  // fallback for unknown chunks
2404
2425
  ]);
2405
2426
  function isTextDeltaChunk(chunk) {
@@ -2447,16 +2468,16 @@ function getResponsesModelConfig(modelId) {
2447
2468
  requiredAutoTruncation: false
2448
2469
  };
2449
2470
  }
2450
- var openaiResponsesProviderOptionsSchema = import_zod11.z.object({
2451
- metadata: import_zod11.z.any().nullish(),
2452
- parallelToolCalls: import_zod11.z.boolean().nullish(),
2453
- previousResponseId: import_zod11.z.string().nullish(),
2454
- store: import_zod11.z.boolean().nullish(),
2455
- user: import_zod11.z.string().nullish(),
2456
- reasoningEffort: import_zod11.z.string().nullish(),
2457
- strictSchemas: import_zod11.z.boolean().nullish(),
2458
- instructions: import_zod11.z.string().nullish(),
2459
- reasoningSummary: import_zod11.z.string().nullish()
2471
+ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2472
+ metadata: import_zod12.z.any().nullish(),
2473
+ parallelToolCalls: import_zod12.z.boolean().nullish(),
2474
+ previousResponseId: import_zod12.z.string().nullish(),
2475
+ store: import_zod12.z.boolean().nullish(),
2476
+ user: import_zod12.z.string().nullish(),
2477
+ reasoningEffort: import_zod12.z.string().nullish(),
2478
+ strictSchemas: import_zod12.z.boolean().nullish(),
2479
+ instructions: import_zod12.z.string().nullish(),
2480
+ reasoningSummary: import_zod12.z.string().nullish()
2460
2481
  });
2461
2482
  // Annotate the CommonJS export names for ESM import in node:
2462
2483
  0 && (module.exports = {
@@ -2469,6 +2490,7 @@ var openaiResponsesProviderOptionsSchema = import_zod11.z.object({
2469
2490
  OpenAITranscriptionModel,
2470
2491
  hasDefaultResponseFormat,
2471
2492
  modelMaxImagesPerCall,
2493
+ openAITranscriptionProviderOptions,
2472
2494
  openaiCompletionProviderOptions,
2473
2495
  openaiEmbeddingProviderOptions,
2474
2496
  openaiProviderOptions