@ai-sdk/openai 2.0.0-canary.12 → 2.0.0-canary.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -266,7 +266,13 @@ var openaiProviderOptions = import_zod.z.object({
266
266
  /**
267
267
  * Parameters for prediction mode.
268
268
  */
269
- prediction: import_zod.z.record(import_zod.z.any()).optional()
269
+ prediction: import_zod.z.record(import_zod.z.any()).optional(),
270
+ /**
271
+ * Whether to use structured outputs.
272
+ *
273
+ * @default true
274
+ */
275
+ structuredOutputs: import_zod.z.boolean().optional()
270
276
  });
271
277
 
272
278
  // src/openai-error.ts
@@ -347,20 +353,17 @@ function prepareTools({
347
353
 
348
354
  // src/openai-chat-language-model.ts
349
355
  var OpenAIChatLanguageModel = class {
350
- constructor(modelId, settings, config) {
356
+ constructor(modelId, config) {
351
357
  this.specificationVersion = "v2";
358
+ this.supportedUrls = {
359
+ "image/*": [/^https?:\/\/.*$/]
360
+ };
352
361
  this.modelId = modelId;
353
- this.settings = settings;
354
362
  this.config = config;
355
363
  }
356
364
  get provider() {
357
365
  return this.config.provider;
358
366
  }
359
- async getSupportedUrls() {
360
- return {
361
- "image/*": [/^https?:\/\/.*$/]
362
- };
363
- }
364
367
  async getArgs({
365
368
  prompt,
366
369
  maxOutputTokens,
@@ -383,13 +386,14 @@ var OpenAIChatLanguageModel = class {
383
386
  providerOptions,
384
387
  schema: openaiProviderOptions
385
388
  })) != null ? _a : {};
389
+ const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
386
390
  if (topK != null) {
387
391
  warnings.push({
388
392
  type: "unsupported-setting",
389
393
  setting: "topK"
390
394
  });
391
395
  }
392
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.settings.structuredOutputs) {
396
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
393
397
  warnings.push({
394
398
  type: "unsupported-setting",
395
399
  setting: "responseFormat",
@@ -418,12 +422,12 @@ var OpenAIChatLanguageModel = class {
418
422
  presence_penalty: presencePenalty,
419
423
  response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
420
424
  // TODO convert into provider option
421
- this.settings.structuredOutputs && responseFormat.schema != null ? {
425
+ structuredOutputs && responseFormat.schema != null ? {
422
426
  type: "json_schema",
423
427
  json_schema: {
424
428
  schema: responseFormat.schema,
425
429
  strict: true,
426
- name: (_b = responseFormat.name) != null ? _b : "response",
430
+ name: (_c = responseFormat.name) != null ? _c : "response",
427
431
  description: responseFormat.description
428
432
  }
429
433
  } : { type: "json_object" }
@@ -503,7 +507,7 @@ var OpenAIChatLanguageModel = class {
503
507
  } = prepareTools({
504
508
  tools,
505
509
  toolChoice,
506
- structuredOutputs: (_c = this.settings.structuredOutputs) != null ? _c : false
510
+ structuredOutputs
507
511
  });
508
512
  return {
509
513
  args: {
@@ -863,11 +867,23 @@ var reasoningModels = {
863
867
  "o1-preview-2024-09-12": {
864
868
  systemMessageMode: "remove"
865
869
  },
870
+ o3: {
871
+ systemMessageMode: "developer"
872
+ },
873
+ "o3-2025-04-16": {
874
+ systemMessageMode: "developer"
875
+ },
866
876
  "o3-mini": {
867
877
  systemMessageMode: "developer"
868
878
  },
869
879
  "o3-mini-2025-01-31": {
870
880
  systemMessageMode: "developer"
881
+ },
882
+ "o4-mini": {
883
+ systemMessageMode: "developer"
884
+ },
885
+ "o4-mini-2025-04-16": {
886
+ systemMessageMode: "developer"
871
887
  }
872
888
  };
873
889
 
@@ -879,13 +895,9 @@ var import_zod5 = require("zod");
879
895
  var import_provider4 = require("@ai-sdk/provider");
880
896
  function convertToOpenAICompletionPrompt({
881
897
  prompt,
882
- inputFormat,
883
898
  user = "user",
884
899
  assistant = "assistant"
885
900
  }) {
886
- if (inputFormat === "prompt" && prompt.length === 1 && prompt[0].role === "user" && prompt[0].content.length === 1 && prompt[0].content[0].type === "text") {
887
- return { prompt: prompt[0].content[0].text };
888
- }
889
901
  let text = "";
890
902
  if (prompt[0].role === "system") {
891
903
  text += `${prompt[0].content}
@@ -991,6 +1003,9 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
991
1003
  var OpenAICompletionLanguageModel = class {
992
1004
  constructor(modelId, config) {
993
1005
  this.specificationVersion = "v2";
1006
+ this.supportedUrls = {
1007
+ // No URLs are supported for completion models.
1008
+ };
994
1009
  this.modelId = modelId;
995
1010
  this.config = config;
996
1011
  }
@@ -1000,13 +1015,7 @@ var OpenAICompletionLanguageModel = class {
1000
1015
  get provider() {
1001
1016
  return this.config.provider;
1002
1017
  }
1003
- async getSupportedUrls() {
1004
- return {
1005
- // no supported urls for completion models
1006
- };
1007
- }
1008
1018
  async getArgs({
1009
- inputFormat,
1010
1019
  prompt,
1011
1020
  maxOutputTokens,
1012
1021
  temperature,
@@ -1050,7 +1059,7 @@ var OpenAICompletionLanguageModel = class {
1050
1059
  details: "JSON response format is not supported."
1051
1060
  });
1052
1061
  }
1053
- const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt, inputFormat });
1062
+ const { prompt: completionPrompt, stopSequences } = convertToOpenAICompletionPrompt({ prompt });
1054
1063
  const stop = [...stopSequences != null ? stopSequences : [], ...userStopSequences != null ? userStopSequences : []];
1055
1064
  return {
1056
1065
  args: {
@@ -1252,23 +1261,16 @@ var openaiEmbeddingProviderOptions = import_zod6.z.object({
1252
1261
 
1253
1262
  // src/openai-embedding-model.ts
1254
1263
  var OpenAIEmbeddingModel = class {
1255
- constructor(modelId, settings, config) {
1264
+ constructor(modelId, config) {
1256
1265
  this.specificationVersion = "v2";
1266
+ this.maxEmbeddingsPerCall = 2048;
1267
+ this.supportsParallelCalls = true;
1257
1268
  this.modelId = modelId;
1258
- this.settings = settings;
1259
1269
  this.config = config;
1260
1270
  }
1261
1271
  get provider() {
1262
1272
  return this.config.provider;
1263
1273
  }
1264
- get maxEmbeddingsPerCall() {
1265
- var _a;
1266
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 2048;
1267
- }
1268
- get supportsParallelCalls() {
1269
- var _a;
1270
- return (_a = this.settings.supportsParallelCalls) != null ? _a : true;
1271
- }
1272
1274
  async doEmbed({
1273
1275
  values,
1274
1276
  headers,
@@ -1343,7 +1345,7 @@ var OpenAIImageModel = class {
1343
1345
  this.modelId = modelId;
1344
1346
  this.settings = settings;
1345
1347
  this.config = config;
1346
- this.specificationVersion = "v1";
1348
+ this.specificationVersion = "v2";
1347
1349
  }
1348
1350
  get maxImagesPerCall() {
1349
1351
  var _a, _b;
@@ -1434,14 +1436,36 @@ var openaiTools = {
1434
1436
 
1435
1437
  // src/openai-transcription-model.ts
1436
1438
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1439
+ var import_zod11 = require("zod");
1440
+
1441
+ // src/openai-transcription-options.ts
1437
1442
  var import_zod10 = require("zod");
1438
- var openAIProviderOptionsSchema = import_zod10.z.object({
1443
+ var openAITranscriptionProviderOptions = import_zod10.z.object({
1444
+ /**
1445
+ * Additional information to include in the transcription response.
1446
+ */
1439
1447
  include: import_zod10.z.array(import_zod10.z.string()).nullish(),
1448
+ /**
1449
+ * The language of the input audio in ISO-639-1 format.
1450
+ */
1440
1451
  language: import_zod10.z.string().nullish(),
1452
+ /**
1453
+ * An optional text to guide the model's style or continue a previous audio segment.
1454
+ */
1441
1455
  prompt: import_zod10.z.string().nullish(),
1442
- temperature: import_zod10.z.number().min(0).max(1).nullish().default(0),
1443
- timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).nullish().default(["segment"])
1456
+ /**
1457
+ * The sampling temperature, between 0 and 1.
1458
+ * @default 0
1459
+ */
1460
+ temperature: import_zod10.z.number().min(0).max(1).default(0).nullish(),
1461
+ /**
1462
+ * The timestamp granularities to populate for this transcription.
1463
+ * @default ['segment']
1464
+ */
1465
+ timestampGranularities: import_zod10.z.array(import_zod10.z.enum(["word", "segment"])).default(["segment"]).nullish()
1444
1466
  });
1467
+
1468
+ // src/openai-transcription-model.ts
1445
1469
  var languageMap = {
1446
1470
  afrikaans: "af",
1447
1471
  arabic: "ar",
@@ -1515,12 +1539,11 @@ var OpenAITranscriptionModel = class {
1515
1539
  mediaType,
1516
1540
  providerOptions
1517
1541
  }) {
1518
- var _a, _b, _c, _d, _e;
1519
1542
  const warnings = [];
1520
1543
  const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
1521
1544
  provider: "openai",
1522
1545
  providerOptions,
1523
- schema: openAIProviderOptionsSchema
1546
+ schema: openAITranscriptionProviderOptions
1524
1547
  });
1525
1548
  const formData = new FormData();
1526
1549
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils7.convertBase64ToUint8Array)(audio)]);
@@ -1528,15 +1551,14 @@ var OpenAITranscriptionModel = class {
1528
1551
  formData.append("file", new File([blob], "audio", { type: mediaType }));
1529
1552
  if (openAIOptions) {
1530
1553
  const transcriptionModelOptions = {
1531
- include: (_a = openAIOptions.include) != null ? _a : void 0,
1532
- language: (_b = openAIOptions.language) != null ? _b : void 0,
1533
- prompt: (_c = openAIOptions.prompt) != null ? _c : void 0,
1534
- temperature: (_d = openAIOptions.temperature) != null ? _d : void 0,
1535
- timestamp_granularities: (_e = openAIOptions.timestampGranularities) != null ? _e : void 0
1554
+ include: openAIOptions.include,
1555
+ language: openAIOptions.language,
1556
+ prompt: openAIOptions.prompt,
1557
+ temperature: openAIOptions.temperature,
1558
+ timestamp_granularities: openAIOptions.timestampGranularities
1536
1559
  };
1537
- for (const key in transcriptionModelOptions) {
1538
- const value = transcriptionModelOptions[key];
1539
- if (value !== void 0) {
1560
+ for (const [key, value] of Object.entries(transcriptionModelOptions)) {
1561
+ if (value != null) {
1540
1562
  formData.append(key, String(value));
1541
1563
  }
1542
1564
  }
@@ -1588,22 +1610,22 @@ var OpenAITranscriptionModel = class {
1588
1610
  };
1589
1611
  }
1590
1612
  };
1591
- var openaiTranscriptionResponseSchema = import_zod10.z.object({
1592
- text: import_zod10.z.string(),
1593
- language: import_zod10.z.string().nullish(),
1594
- duration: import_zod10.z.number().nullish(),
1595
- words: import_zod10.z.array(
1596
- import_zod10.z.object({
1597
- word: import_zod10.z.string(),
1598
- start: import_zod10.z.number(),
1599
- end: import_zod10.z.number()
1613
+ var openaiTranscriptionResponseSchema = import_zod11.z.object({
1614
+ text: import_zod11.z.string(),
1615
+ language: import_zod11.z.string().nullish(),
1616
+ duration: import_zod11.z.number().nullish(),
1617
+ words: import_zod11.z.array(
1618
+ import_zod11.z.object({
1619
+ word: import_zod11.z.string(),
1620
+ start: import_zod11.z.number(),
1621
+ end: import_zod11.z.number()
1600
1622
  })
1601
1623
  ).nullish()
1602
1624
  });
1603
1625
 
1604
1626
  // src/responses/openai-responses-language-model.ts
1605
1627
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1606
- var import_zod11 = require("zod");
1628
+ var import_zod12 = require("zod");
1607
1629
 
1608
1630
  // src/responses/convert-to-openai-responses-messages.ts
1609
1631
  var import_provider6 = require("@ai-sdk/provider");
@@ -1812,13 +1834,11 @@ function prepareResponsesTools({
1812
1834
  var OpenAIResponsesLanguageModel = class {
1813
1835
  constructor(modelId, config) {
1814
1836
  this.specificationVersion = "v2";
1815
- this.modelId = modelId;
1816
- this.config = config;
1817
- }
1818
- async getSupportedUrls() {
1819
- return {
1837
+ this.supportedUrls = {
1820
1838
  "image/*": [/^https?:\/\/.*$/]
1821
1839
  };
1840
+ this.modelId = modelId;
1841
+ this.config = config;
1822
1842
  }
1823
1843
  get provider() {
1824
1844
  return this.config.provider;
@@ -1964,55 +1984,55 @@ var OpenAIResponsesLanguageModel = class {
1964
1984
  body,
1965
1985
  failedResponseHandler: openaiFailedResponseHandler,
1966
1986
  successfulResponseHandler: (0, import_provider_utils8.createJsonResponseHandler)(
1967
- import_zod11.z.object({
1968
- id: import_zod11.z.string(),
1969
- created_at: import_zod11.z.number(),
1970
- model: import_zod11.z.string(),
1971
- output: import_zod11.z.array(
1972
- import_zod11.z.discriminatedUnion("type", [
1973
- import_zod11.z.object({
1974
- type: import_zod11.z.literal("message"),
1975
- role: import_zod11.z.literal("assistant"),
1976
- content: import_zod11.z.array(
1977
- import_zod11.z.object({
1978
- type: import_zod11.z.literal("output_text"),
1979
- text: import_zod11.z.string(),
1980
- annotations: import_zod11.z.array(
1981
- import_zod11.z.object({
1982
- type: import_zod11.z.literal("url_citation"),
1983
- start_index: import_zod11.z.number(),
1984
- end_index: import_zod11.z.number(),
1985
- url: import_zod11.z.string(),
1986
- title: import_zod11.z.string()
1987
+ import_zod12.z.object({
1988
+ id: import_zod12.z.string(),
1989
+ created_at: import_zod12.z.number(),
1990
+ model: import_zod12.z.string(),
1991
+ output: import_zod12.z.array(
1992
+ import_zod12.z.discriminatedUnion("type", [
1993
+ import_zod12.z.object({
1994
+ type: import_zod12.z.literal("message"),
1995
+ role: import_zod12.z.literal("assistant"),
1996
+ content: import_zod12.z.array(
1997
+ import_zod12.z.object({
1998
+ type: import_zod12.z.literal("output_text"),
1999
+ text: import_zod12.z.string(),
2000
+ annotations: import_zod12.z.array(
2001
+ import_zod12.z.object({
2002
+ type: import_zod12.z.literal("url_citation"),
2003
+ start_index: import_zod12.z.number(),
2004
+ end_index: import_zod12.z.number(),
2005
+ url: import_zod12.z.string(),
2006
+ title: import_zod12.z.string()
1987
2007
  })
1988
2008
  )
1989
2009
  })
1990
2010
  )
1991
2011
  }),
1992
- import_zod11.z.object({
1993
- type: import_zod11.z.literal("function_call"),
1994
- call_id: import_zod11.z.string(),
1995
- name: import_zod11.z.string(),
1996
- arguments: import_zod11.z.string()
2012
+ import_zod12.z.object({
2013
+ type: import_zod12.z.literal("function_call"),
2014
+ call_id: import_zod12.z.string(),
2015
+ name: import_zod12.z.string(),
2016
+ arguments: import_zod12.z.string()
1997
2017
  }),
1998
- import_zod11.z.object({
1999
- type: import_zod11.z.literal("web_search_call")
2018
+ import_zod12.z.object({
2019
+ type: import_zod12.z.literal("web_search_call")
2000
2020
  }),
2001
- import_zod11.z.object({
2002
- type: import_zod11.z.literal("computer_call")
2021
+ import_zod12.z.object({
2022
+ type: import_zod12.z.literal("computer_call")
2003
2023
  }),
2004
- import_zod11.z.object({
2005
- type: import_zod11.z.literal("reasoning"),
2006
- summary: import_zod11.z.array(
2007
- import_zod11.z.object({
2008
- type: import_zod11.z.literal("summary_text"),
2009
- text: import_zod11.z.string()
2024
+ import_zod12.z.object({
2025
+ type: import_zod12.z.literal("reasoning"),
2026
+ summary: import_zod12.z.array(
2027
+ import_zod12.z.object({
2028
+ type: import_zod12.z.literal("summary_text"),
2029
+ text: import_zod12.z.string()
2010
2030
  })
2011
2031
  )
2012
2032
  })
2013
2033
  ])
2014
2034
  ),
2015
- incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullable(),
2035
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullable(),
2016
2036
  usage: usageSchema
2017
2037
  })
2018
2038
  ),
@@ -2226,86 +2246,86 @@ var OpenAIResponsesLanguageModel = class {
2226
2246
  };
2227
2247
  }
2228
2248
  };
2229
- var usageSchema = import_zod11.z.object({
2230
- input_tokens: import_zod11.z.number(),
2231
- input_tokens_details: import_zod11.z.object({ cached_tokens: import_zod11.z.number().nullish() }).nullish(),
2232
- output_tokens: import_zod11.z.number(),
2233
- output_tokens_details: import_zod11.z.object({ reasoning_tokens: import_zod11.z.number().nullish() }).nullish()
2249
+ var usageSchema = import_zod12.z.object({
2250
+ input_tokens: import_zod12.z.number(),
2251
+ input_tokens_details: import_zod12.z.object({ cached_tokens: import_zod12.z.number().nullish() }).nullish(),
2252
+ output_tokens: import_zod12.z.number(),
2253
+ output_tokens_details: import_zod12.z.object({ reasoning_tokens: import_zod12.z.number().nullish() }).nullish()
2234
2254
  });
2235
- var textDeltaChunkSchema = import_zod11.z.object({
2236
- type: import_zod11.z.literal("response.output_text.delta"),
2237
- delta: import_zod11.z.string()
2255
+ var textDeltaChunkSchema = import_zod12.z.object({
2256
+ type: import_zod12.z.literal("response.output_text.delta"),
2257
+ delta: import_zod12.z.string()
2238
2258
  });
2239
- var responseFinishedChunkSchema = import_zod11.z.object({
2240
- type: import_zod11.z.enum(["response.completed", "response.incomplete"]),
2241
- response: import_zod11.z.object({
2242
- incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullish(),
2259
+ var responseFinishedChunkSchema = import_zod12.z.object({
2260
+ type: import_zod12.z.enum(["response.completed", "response.incomplete"]),
2261
+ response: import_zod12.z.object({
2262
+ incomplete_details: import_zod12.z.object({ reason: import_zod12.z.string() }).nullish(),
2243
2263
  usage: usageSchema
2244
2264
  })
2245
2265
  });
2246
- var responseCreatedChunkSchema = import_zod11.z.object({
2247
- type: import_zod11.z.literal("response.created"),
2248
- response: import_zod11.z.object({
2249
- id: import_zod11.z.string(),
2250
- created_at: import_zod11.z.number(),
2251
- model: import_zod11.z.string()
2266
+ var responseCreatedChunkSchema = import_zod12.z.object({
2267
+ type: import_zod12.z.literal("response.created"),
2268
+ response: import_zod12.z.object({
2269
+ id: import_zod12.z.string(),
2270
+ created_at: import_zod12.z.number(),
2271
+ model: import_zod12.z.string()
2252
2272
  })
2253
2273
  });
2254
- var responseOutputItemDoneSchema = import_zod11.z.object({
2255
- type: import_zod11.z.literal("response.output_item.done"),
2256
- output_index: import_zod11.z.number(),
2257
- item: import_zod11.z.discriminatedUnion("type", [
2258
- import_zod11.z.object({
2259
- type: import_zod11.z.literal("message")
2274
+ var responseOutputItemDoneSchema = import_zod12.z.object({
2275
+ type: import_zod12.z.literal("response.output_item.done"),
2276
+ output_index: import_zod12.z.number(),
2277
+ item: import_zod12.z.discriminatedUnion("type", [
2278
+ import_zod12.z.object({
2279
+ type: import_zod12.z.literal("message")
2260
2280
  }),
2261
- import_zod11.z.object({
2262
- type: import_zod11.z.literal("function_call"),
2263
- id: import_zod11.z.string(),
2264
- call_id: import_zod11.z.string(),
2265
- name: import_zod11.z.string(),
2266
- arguments: import_zod11.z.string(),
2267
- status: import_zod11.z.literal("completed")
2281
+ import_zod12.z.object({
2282
+ type: import_zod12.z.literal("function_call"),
2283
+ id: import_zod12.z.string(),
2284
+ call_id: import_zod12.z.string(),
2285
+ name: import_zod12.z.string(),
2286
+ arguments: import_zod12.z.string(),
2287
+ status: import_zod12.z.literal("completed")
2268
2288
  })
2269
2289
  ])
2270
2290
  });
2271
- var responseFunctionCallArgumentsDeltaSchema = import_zod11.z.object({
2272
- type: import_zod11.z.literal("response.function_call_arguments.delta"),
2273
- item_id: import_zod11.z.string(),
2274
- output_index: import_zod11.z.number(),
2275
- delta: import_zod11.z.string()
2291
+ var responseFunctionCallArgumentsDeltaSchema = import_zod12.z.object({
2292
+ type: import_zod12.z.literal("response.function_call_arguments.delta"),
2293
+ item_id: import_zod12.z.string(),
2294
+ output_index: import_zod12.z.number(),
2295
+ delta: import_zod12.z.string()
2276
2296
  });
2277
- var responseOutputItemAddedSchema = import_zod11.z.object({
2278
- type: import_zod11.z.literal("response.output_item.added"),
2279
- output_index: import_zod11.z.number(),
2280
- item: import_zod11.z.discriminatedUnion("type", [
2281
- import_zod11.z.object({
2282
- type: import_zod11.z.literal("message")
2297
+ var responseOutputItemAddedSchema = import_zod12.z.object({
2298
+ type: import_zod12.z.literal("response.output_item.added"),
2299
+ output_index: import_zod12.z.number(),
2300
+ item: import_zod12.z.discriminatedUnion("type", [
2301
+ import_zod12.z.object({
2302
+ type: import_zod12.z.literal("message")
2283
2303
  }),
2284
- import_zod11.z.object({
2285
- type: import_zod11.z.literal("function_call"),
2286
- id: import_zod11.z.string(),
2287
- call_id: import_zod11.z.string(),
2288
- name: import_zod11.z.string(),
2289
- arguments: import_zod11.z.string()
2304
+ import_zod12.z.object({
2305
+ type: import_zod12.z.literal("function_call"),
2306
+ id: import_zod12.z.string(),
2307
+ call_id: import_zod12.z.string(),
2308
+ name: import_zod12.z.string(),
2309
+ arguments: import_zod12.z.string()
2290
2310
  })
2291
2311
  ])
2292
2312
  });
2293
- var responseAnnotationAddedSchema = import_zod11.z.object({
2294
- type: import_zod11.z.literal("response.output_text.annotation.added"),
2295
- annotation: import_zod11.z.object({
2296
- type: import_zod11.z.literal("url_citation"),
2297
- url: import_zod11.z.string(),
2298
- title: import_zod11.z.string()
2313
+ var responseAnnotationAddedSchema = import_zod12.z.object({
2314
+ type: import_zod12.z.literal("response.output_text.annotation.added"),
2315
+ annotation: import_zod12.z.object({
2316
+ type: import_zod12.z.literal("url_citation"),
2317
+ url: import_zod12.z.string(),
2318
+ title: import_zod12.z.string()
2299
2319
  })
2300
2320
  });
2301
- var responseReasoningSummaryTextDeltaSchema = import_zod11.z.object({
2302
- type: import_zod11.z.literal("response.reasoning_summary_text.delta"),
2303
- item_id: import_zod11.z.string(),
2304
- output_index: import_zod11.z.number(),
2305
- summary_index: import_zod11.z.number(),
2306
- delta: import_zod11.z.string()
2321
+ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
2322
+ type: import_zod12.z.literal("response.reasoning_summary_text.delta"),
2323
+ item_id: import_zod12.z.string(),
2324
+ output_index: import_zod12.z.number(),
2325
+ summary_index: import_zod12.z.number(),
2326
+ delta: import_zod12.z.string()
2307
2327
  });
2308
- var openaiResponsesChunkSchema = import_zod11.z.union([
2328
+ var openaiResponsesChunkSchema = import_zod12.z.union([
2309
2329
  textDeltaChunkSchema,
2310
2330
  responseFinishedChunkSchema,
2311
2331
  responseCreatedChunkSchema,
@@ -2314,7 +2334,7 @@ var openaiResponsesChunkSchema = import_zod11.z.union([
2314
2334
  responseOutputItemAddedSchema,
2315
2335
  responseAnnotationAddedSchema,
2316
2336
  responseReasoningSummaryTextDeltaSchema,
2317
- import_zod11.z.object({ type: import_zod11.z.string() }).passthrough()
2337
+ import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
2318
2338
  // fallback for unknown chunks
2319
2339
  ]);
2320
2340
  function isTextDeltaChunk(chunk) {
@@ -2362,24 +2382,24 @@ function getResponsesModelConfig(modelId) {
2362
2382
  requiredAutoTruncation: false
2363
2383
  };
2364
2384
  }
2365
- var openaiResponsesProviderOptionsSchema = import_zod11.z.object({
2366
- metadata: import_zod11.z.any().nullish(),
2367
- parallelToolCalls: import_zod11.z.boolean().nullish(),
2368
- previousResponseId: import_zod11.z.string().nullish(),
2369
- store: import_zod11.z.boolean().nullish(),
2370
- user: import_zod11.z.string().nullish(),
2371
- reasoningEffort: import_zod11.z.string().nullish(),
2372
- strictSchemas: import_zod11.z.boolean().nullish(),
2373
- instructions: import_zod11.z.string().nullish(),
2374
- reasoningSummary: import_zod11.z.string().nullish()
2385
+ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
2386
+ metadata: import_zod12.z.any().nullish(),
2387
+ parallelToolCalls: import_zod12.z.boolean().nullish(),
2388
+ previousResponseId: import_zod12.z.string().nullish(),
2389
+ store: import_zod12.z.boolean().nullish(),
2390
+ user: import_zod12.z.string().nullish(),
2391
+ reasoningEffort: import_zod12.z.string().nullish(),
2392
+ strictSchemas: import_zod12.z.boolean().nullish(),
2393
+ instructions: import_zod12.z.string().nullish(),
2394
+ reasoningSummary: import_zod12.z.string().nullish()
2375
2395
  });
2376
2396
 
2377
2397
  // src/openai-speech-model.ts
2378
2398
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
2379
- var import_zod12 = require("zod");
2380
- var OpenAIProviderOptionsSchema = import_zod12.z.object({
2381
- instructions: import_zod12.z.string().nullish(),
2382
- speed: import_zod12.z.number().min(0.25).max(4).default(1).nullish()
2399
+ var import_zod13 = require("zod");
2400
+ var OpenAIProviderOptionsSchema = import_zod13.z.object({
2401
+ instructions: import_zod13.z.string().nullish(),
2402
+ speed: import_zod13.z.number().min(0.25).max(4).default(1).nullish()
2383
2403
  });
2384
2404
  var OpenAISpeechModel = class {
2385
2405
  constructor(modelId, config) {
@@ -2489,7 +2509,7 @@ function createOpenAI(options = {}) {
2489
2509
  "OpenAI-Project": options.project,
2490
2510
  ...options.headers
2491
2511
  });
2492
- const createChatModel = (modelId, settings = {}) => new OpenAIChatLanguageModel(modelId, settings, {
2512
+ const createChatModel = (modelId) => new OpenAIChatLanguageModel(modelId, {
2493
2513
  provider: `${providerName}.chat`,
2494
2514
  url: ({ path }) => `${baseURL}${path}`,
2495
2515
  headers: getHeaders,
@@ -2503,7 +2523,7 @@ function createOpenAI(options = {}) {
2503
2523
  compatibility,
2504
2524
  fetch: options.fetch
2505
2525
  });
2506
- const createEmbeddingModel = (modelId, settings = {}) => new OpenAIEmbeddingModel(modelId, settings, {
2526
+ const createEmbeddingModel = (modelId) => new OpenAIEmbeddingModel(modelId, {
2507
2527
  provider: `${providerName}.embedding`,
2508
2528
  url: ({ path }) => `${baseURL}${path}`,
2509
2529
  headers: getHeaders,
@@ -2527,7 +2547,7 @@ function createOpenAI(options = {}) {
2527
2547
  headers: getHeaders,
2528
2548
  fetch: options.fetch
2529
2549
  });
2530
- const createLanguageModel = (modelId, settings) => {
2550
+ const createLanguageModel = (modelId) => {
2531
2551
  if (new.target) {
2532
2552
  throw new Error(
2533
2553
  "The OpenAI model function cannot be called with the new keyword."
@@ -2536,7 +2556,7 @@ function createOpenAI(options = {}) {
2536
2556
  if (modelId === "gpt-3.5-turbo-instruct") {
2537
2557
  return createCompletionModel(modelId);
2538
2558
  }
2539
- return createChatModel(modelId, settings);
2559
+ return createChatModel(modelId);
2540
2560
  };
2541
2561
  const createResponsesModel = (modelId) => {
2542
2562
  return new OpenAIResponsesLanguageModel(modelId, {
@@ -2546,8 +2566,8 @@ function createOpenAI(options = {}) {
2546
2566
  fetch: options.fetch
2547
2567
  });
2548
2568
  };
2549
- const provider = function(modelId, settings) {
2550
- return createLanguageModel(modelId, settings);
2569
+ const provider = function(modelId) {
2570
+ return createLanguageModel(modelId);
2551
2571
  };
2552
2572
  provider.languageModel = createLanguageModel;
2553
2573
  provider.chat = createChatModel;