@ai-sdk/openai 2.0.0-canary.11 → 2.0.0-canary.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,7 +27,9 @@ __export(internal_exports, {
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
28
  OpenAISpeechModel: () => OpenAISpeechModel,
29
29
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
+ hasDefaultResponseFormat: () => hasDefaultResponseFormat,
30
31
  modelMaxImagesPerCall: () => modelMaxImagesPerCall,
32
+ openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
31
33
  openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
32
34
  openaiProviderOptions: () => openaiProviderOptions
33
35
  });
@@ -366,7 +368,7 @@ var OpenAIChatLanguageModel = class {
366
368
  "image/*": [/^https?:\/\/.*$/]
367
369
  };
368
370
  }
369
- getArgs({
371
+ async getArgs({
370
372
  prompt,
371
373
  maxOutputTokens,
372
374
  temperature,
@@ -383,7 +385,7 @@ var OpenAIChatLanguageModel = class {
383
385
  }) {
384
386
  var _a, _b, _c;
385
387
  const warnings = [];
386
- const openaiOptions = (_a = (0, import_provider_utils3.parseProviderOptions)({
388
+ const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
387
389
  provider: "openai",
388
390
  providerOptions,
389
391
  schema: openaiProviderOptions
@@ -521,7 +523,7 @@ var OpenAIChatLanguageModel = class {
521
523
  }
522
524
  async doGenerate(options) {
523
525
  var _a, _b, _c, _d, _e, _f, _g, _h;
524
- const { args: body, warnings } = this.getArgs(options);
526
+ const { args: body, warnings } = await this.getArgs(options);
525
527
  const {
526
528
  responseHeaders,
527
529
  value: response,
@@ -588,7 +590,7 @@ var OpenAIChatLanguageModel = class {
588
590
  };
589
591
  }
590
592
  async doStream(options) {
591
- const { args, warnings } = this.getArgs(options);
593
+ const { args, warnings } = await this.getArgs(options);
592
594
  const body = {
593
595
  ...args,
594
596
  stream: true,
@@ -878,7 +880,7 @@ var reasoningModels = {
878
880
 
879
881
  // src/openai-completion-language-model.ts
880
882
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
881
- var import_zod4 = require("zod");
883
+ var import_zod5 = require("zod");
882
884
 
883
885
  // src/convert-to-openai-completion-prompt.ts
884
886
  var import_provider4 = require("@ai-sdk/provider");
@@ -959,14 +961,49 @@ ${user}:`]
959
961
  };
960
962
  }
961
963
 
964
+ // src/openai-completion-options.ts
965
+ var import_zod4 = require("zod");
966
+ var openaiCompletionProviderOptions = import_zod4.z.object({
967
+ /**
968
+ Echo back the prompt in addition to the completion.
969
+ */
970
+ echo: import_zod4.z.boolean().optional(),
971
+ /**
972
+ Modify the likelihood of specified tokens appearing in the completion.
973
+
974
+ Accepts a JSON object that maps tokens (specified by their token ID in
975
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
976
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
977
+ the bias is added to the logits generated by the model prior to sampling.
978
+ The exact effect will vary per model, but values between -1 and 1 should
979
+ decrease or increase likelihood of selection; values like -100 or 100
980
+ should result in a ban or exclusive selection of the relevant token.
981
+
982
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
983
+ token from being generated.
984
+ */
985
+ logitBias: import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number()).optional(),
986
+ /**
987
+ The suffix that comes after a completion of inserted text.
988
+ */
989
+ suffix: import_zod4.z.string().optional(),
990
+ /**
991
+ A unique identifier representing your end-user, which can help OpenAI to
992
+ monitor and detect abuse. Learn more.
993
+ */
994
+ user: import_zod4.z.string().optional()
995
+ });
996
+
962
997
  // src/openai-completion-language-model.ts
963
998
  var OpenAICompletionLanguageModel = class {
964
- constructor(modelId, settings, config) {
999
+ constructor(modelId, config) {
965
1000
  this.specificationVersion = "v2";
966
1001
  this.modelId = modelId;
967
- this.settings = settings;
968
1002
  this.config = config;
969
1003
  }
1004
+ get providerOptionsName() {
1005
+ return this.config.provider.split(".")[0].trim();
1006
+ }
970
1007
  get provider() {
971
1008
  return this.config.provider;
972
1009
  }
@@ -975,7 +1012,7 @@ var OpenAICompletionLanguageModel = class {
975
1012
  // no supported urls for completion models
976
1013
  };
977
1014
  }
978
- getArgs({
1015
+ async getArgs({
979
1016
  inputFormat,
980
1017
  prompt,
981
1018
  maxOutputTokens,
@@ -988,9 +1025,22 @@ var OpenAICompletionLanguageModel = class {
988
1025
  responseFormat,
989
1026
  tools,
990
1027
  toolChoice,
991
- seed
1028
+ seed,
1029
+ providerOptions
992
1030
  }) {
993
1031
  const warnings = [];
1032
+ const openaiOptions = {
1033
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1034
+ provider: "openai",
1035
+ providerOptions,
1036
+ schema: openaiCompletionProviderOptions
1037
+ }),
1038
+ ...await (0, import_provider_utils4.parseProviderOptions)({
1039
+ provider: this.providerOptionsName,
1040
+ providerOptions,
1041
+ schema: openaiCompletionProviderOptions
1042
+ })
1043
+ };
994
1044
  if (topK != null) {
995
1045
  warnings.push({ type: "unsupported-setting", setting: "topK" });
996
1046
  }
@@ -1014,10 +1064,10 @@ var OpenAICompletionLanguageModel = class {
1014
1064
  // model id:
1015
1065
  model: this.modelId,
1016
1066
  // model specific settings:
1017
- echo: this.settings.echo,
1018
- logit_bias: this.settings.logitBias,
1019
- suffix: this.settings.suffix,
1020
- user: this.settings.user,
1067
+ echo: openaiOptions.echo,
1068
+ logit_bias: openaiOptions.logitBias,
1069
+ suffix: openaiOptions.suffix,
1070
+ user: openaiOptions.user,
1021
1071
  // standardized settings:
1022
1072
  max_tokens: maxOutputTokens,
1023
1073
  temperature,
@@ -1034,7 +1084,7 @@ var OpenAICompletionLanguageModel = class {
1034
1084
  };
1035
1085
  }
1036
1086
  async doGenerate(options) {
1037
- const { args, warnings } = this.getArgs(options);
1087
+ const { args, warnings } = await this.getArgs(options);
1038
1088
  const {
1039
1089
  responseHeaders,
1040
1090
  value: response,
@@ -1071,7 +1121,7 @@ var OpenAICompletionLanguageModel = class {
1071
1121
  };
1072
1122
  }
1073
1123
  async doStream(options) {
1074
- const { args, warnings } = this.getArgs(options);
1124
+ const { args, warnings } = await this.getArgs(options);
1075
1125
  const body = {
1076
1126
  ...args,
1077
1127
  stream: true,
@@ -1152,36 +1202,36 @@ var OpenAICompletionLanguageModel = class {
1152
1202
  };
1153
1203
  }
1154
1204
  };
1155
- var openaiCompletionResponseSchema = import_zod4.z.object({
1156
- id: import_zod4.z.string().nullish(),
1157
- created: import_zod4.z.number().nullish(),
1158
- model: import_zod4.z.string().nullish(),
1159
- choices: import_zod4.z.array(
1160
- import_zod4.z.object({
1161
- text: import_zod4.z.string(),
1162
- finish_reason: import_zod4.z.string()
1205
+ var openaiCompletionResponseSchema = import_zod5.z.object({
1206
+ id: import_zod5.z.string().nullish(),
1207
+ created: import_zod5.z.number().nullish(),
1208
+ model: import_zod5.z.string().nullish(),
1209
+ choices: import_zod5.z.array(
1210
+ import_zod5.z.object({
1211
+ text: import_zod5.z.string(),
1212
+ finish_reason: import_zod5.z.string()
1163
1213
  })
1164
1214
  ),
1165
- usage: import_zod4.z.object({
1166
- prompt_tokens: import_zod4.z.number(),
1167
- completion_tokens: import_zod4.z.number()
1215
+ usage: import_zod5.z.object({
1216
+ prompt_tokens: import_zod5.z.number(),
1217
+ completion_tokens: import_zod5.z.number()
1168
1218
  })
1169
1219
  });
1170
- var openaiCompletionChunkSchema = import_zod4.z.union([
1171
- import_zod4.z.object({
1172
- id: import_zod4.z.string().nullish(),
1173
- created: import_zod4.z.number().nullish(),
1174
- model: import_zod4.z.string().nullish(),
1175
- choices: import_zod4.z.array(
1176
- import_zod4.z.object({
1177
- text: import_zod4.z.string(),
1178
- finish_reason: import_zod4.z.string().nullish(),
1179
- index: import_zod4.z.number()
1220
+ var openaiCompletionChunkSchema = import_zod5.z.union([
1221
+ import_zod5.z.object({
1222
+ id: import_zod5.z.string().nullish(),
1223
+ created: import_zod5.z.number().nullish(),
1224
+ model: import_zod5.z.string().nullish(),
1225
+ choices: import_zod5.z.array(
1226
+ import_zod5.z.object({
1227
+ text: import_zod5.z.string(),
1228
+ finish_reason: import_zod5.z.string().nullish(),
1229
+ index: import_zod5.z.number()
1180
1230
  })
1181
1231
  ),
1182
- usage: import_zod4.z.object({
1183
- prompt_tokens: import_zod4.z.number(),
1184
- completion_tokens: import_zod4.z.number()
1232
+ usage: import_zod5.z.object({
1233
+ prompt_tokens: import_zod5.z.number(),
1234
+ completion_tokens: import_zod5.z.number()
1185
1235
  }).nullish()
1186
1236
  }),
1187
1237
  openaiErrorDataSchema
@@ -1190,21 +1240,21 @@ var openaiCompletionChunkSchema = import_zod4.z.union([
1190
1240
  // src/openai-embedding-model.ts
1191
1241
  var import_provider5 = require("@ai-sdk/provider");
1192
1242
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
1193
- var import_zod6 = require("zod");
1243
+ var import_zod7 = require("zod");
1194
1244
 
1195
1245
  // src/openai-embedding-options.ts
1196
- var import_zod5 = require("zod");
1197
- var openaiEmbeddingProviderOptions = import_zod5.z.object({
1246
+ var import_zod6 = require("zod");
1247
+ var openaiEmbeddingProviderOptions = import_zod6.z.object({
1198
1248
  /**
1199
1249
  The number of dimensions the resulting output embeddings should have.
1200
1250
  Only supported in text-embedding-3 and later models.
1201
1251
  */
1202
- dimensions: import_zod5.z.number().optional(),
1252
+ dimensions: import_zod6.z.number().optional(),
1203
1253
  /**
1204
1254
  A unique identifier representing your end-user, which can help OpenAI to
1205
1255
  monitor and detect abuse. Learn more.
1206
1256
  */
1207
- user: import_zod5.z.string().optional()
1257
+ user: import_zod6.z.string().optional()
1208
1258
  });
1209
1259
 
1210
1260
  // src/openai-embedding-model.ts
@@ -1241,7 +1291,7 @@ var OpenAIEmbeddingModel = class {
1241
1291
  values
1242
1292
  });
1243
1293
  }
1244
- const openaiOptions = (_a = (0, import_provider_utils5.parseProviderOptions)({
1294
+ const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
1245
1295
  provider: "openai",
1246
1296
  providerOptions,
1247
1297
  schema: openaiEmbeddingProviderOptions
@@ -1277,20 +1327,22 @@ var OpenAIEmbeddingModel = class {
1277
1327
  };
1278
1328
  }
1279
1329
  };
1280
- var openaiTextEmbeddingResponseSchema = import_zod6.z.object({
1281
- data: import_zod6.z.array(import_zod6.z.object({ embedding: import_zod6.z.array(import_zod6.z.number()) })),
1282
- usage: import_zod6.z.object({ prompt_tokens: import_zod6.z.number() }).nullish()
1330
+ var openaiTextEmbeddingResponseSchema = import_zod7.z.object({
1331
+ data: import_zod7.z.array(import_zod7.z.object({ embedding: import_zod7.z.array(import_zod7.z.number()) })),
1332
+ usage: import_zod7.z.object({ prompt_tokens: import_zod7.z.number() }).nullish()
1283
1333
  });
1284
1334
 
1285
1335
  // src/openai-image-model.ts
1286
1336
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
1287
- var import_zod7 = require("zod");
1337
+ var import_zod8 = require("zod");
1288
1338
 
1289
1339
  // src/openai-image-settings.ts
1290
1340
  var modelMaxImagesPerCall = {
1291
1341
  "dall-e-3": 1,
1292
- "dall-e-2": 10
1342
+ "dall-e-2": 10,
1343
+ "gpt-image-1": 10
1293
1344
  };
1345
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1294
1346
 
1295
1347
  // src/openai-image-model.ts
1296
1348
  var OpenAIImageModel = class {
@@ -1342,7 +1394,7 @@ var OpenAIImageModel = class {
1342
1394
  n,
1343
1395
  size,
1344
1396
  ...(_d = providerOptions.openai) != null ? _d : {},
1345
- response_format: "b64_json"
1397
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1346
1398
  },
1347
1399
  failedResponseHandler: openaiFailedResponseHandler,
1348
1400
  successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(
@@ -1362,19 +1414,19 @@ var OpenAIImageModel = class {
1362
1414
  };
1363
1415
  }
1364
1416
  };
1365
- var openaiImageResponseSchema = import_zod7.z.object({
1366
- data: import_zod7.z.array(import_zod7.z.object({ b64_json: import_zod7.z.string() }))
1417
+ var openaiImageResponseSchema = import_zod8.z.object({
1418
+ data: import_zod8.z.array(import_zod8.z.object({ b64_json: import_zod8.z.string() }))
1367
1419
  });
1368
1420
 
1369
1421
  // src/openai-transcription-model.ts
1370
1422
  var import_provider_utils7 = require("@ai-sdk/provider-utils");
1371
- var import_zod8 = require("zod");
1372
- var openAIProviderOptionsSchema = import_zod8.z.object({
1373
- include: import_zod8.z.array(import_zod8.z.string()).nullish(),
1374
- language: import_zod8.z.string().nullish(),
1375
- prompt: import_zod8.z.string().nullish(),
1376
- temperature: import_zod8.z.number().min(0).max(1).nullish().default(0),
1377
- timestampGranularities: import_zod8.z.array(import_zod8.z.enum(["word", "segment"])).nullish().default(["segment"])
1423
+ var import_zod9 = require("zod");
1424
+ var openAIProviderOptionsSchema = import_zod9.z.object({
1425
+ include: import_zod9.z.array(import_zod9.z.string()).nullish(),
1426
+ language: import_zod9.z.string().nullish(),
1427
+ prompt: import_zod9.z.string().nullish(),
1428
+ temperature: import_zod9.z.number().min(0).max(1).nullish().default(0),
1429
+ timestampGranularities: import_zod9.z.array(import_zod9.z.enum(["word", "segment"])).nullish().default(["segment"])
1378
1430
  });
1379
1431
  var languageMap = {
1380
1432
  afrikaans: "af",
@@ -1444,14 +1496,14 @@ var OpenAITranscriptionModel = class {
1444
1496
  get provider() {
1445
1497
  return this.config.provider;
1446
1498
  }
1447
- getArgs({
1499
+ async getArgs({
1448
1500
  audio,
1449
1501
  mediaType,
1450
1502
  providerOptions
1451
1503
  }) {
1452
1504
  var _a, _b, _c, _d, _e;
1453
1505
  const warnings = [];
1454
- const openAIOptions = (0, import_provider_utils7.parseProviderOptions)({
1506
+ const openAIOptions = await (0, import_provider_utils7.parseProviderOptions)({
1455
1507
  provider: "openai",
1456
1508
  providerOptions,
1457
1509
  schema: openAIProviderOptionsSchema
@@ -1483,7 +1535,7 @@ var OpenAITranscriptionModel = class {
1483
1535
  async doGenerate(options) {
1484
1536
  var _a, _b, _c, _d, _e, _f;
1485
1537
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1486
- const { formData, warnings } = this.getArgs(options);
1538
+ const { formData, warnings } = await this.getArgs(options);
1487
1539
  const {
1488
1540
  value: response,
1489
1541
  responseHeaders,
@@ -1522,25 +1574,25 @@ var OpenAITranscriptionModel = class {
1522
1574
  };
1523
1575
  }
1524
1576
  };
1525
- var openaiTranscriptionResponseSchema = import_zod8.z.object({
1526
- text: import_zod8.z.string(),
1527
- language: import_zod8.z.string().nullish(),
1528
- duration: import_zod8.z.number().nullish(),
1529
- words: import_zod8.z.array(
1530
- import_zod8.z.object({
1531
- word: import_zod8.z.string(),
1532
- start: import_zod8.z.number(),
1533
- end: import_zod8.z.number()
1577
+ var openaiTranscriptionResponseSchema = import_zod9.z.object({
1578
+ text: import_zod9.z.string(),
1579
+ language: import_zod9.z.string().nullish(),
1580
+ duration: import_zod9.z.number().nullish(),
1581
+ words: import_zod9.z.array(
1582
+ import_zod9.z.object({
1583
+ word: import_zod9.z.string(),
1584
+ start: import_zod9.z.number(),
1585
+ end: import_zod9.z.number()
1534
1586
  })
1535
1587
  ).nullish()
1536
1588
  });
1537
1589
 
1538
1590
  // src/openai-speech-model.ts
1539
1591
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1540
- var import_zod9 = require("zod");
1541
- var OpenAIProviderOptionsSchema = import_zod9.z.object({
1542
- instructions: import_zod9.z.string().nullish(),
1543
- speed: import_zod9.z.number().min(0.25).max(4).default(1).nullish()
1592
+ var import_zod10 = require("zod");
1593
+ var OpenAIProviderOptionsSchema = import_zod10.z.object({
1594
+ instructions: import_zod10.z.string().nullish(),
1595
+ speed: import_zod10.z.number().min(0.25).max(4).default(1).nullish()
1544
1596
  });
1545
1597
  var OpenAISpeechModel = class {
1546
1598
  constructor(modelId, config) {
@@ -1551,7 +1603,7 @@ var OpenAISpeechModel = class {
1551
1603
  get provider() {
1552
1604
  return this.config.provider;
1553
1605
  }
1554
- getArgs({
1606
+ async getArgs({
1555
1607
  text,
1556
1608
  voice = "alloy",
1557
1609
  outputFormat = "mp3",
@@ -1560,7 +1612,7 @@ var OpenAISpeechModel = class {
1560
1612
  providerOptions
1561
1613
  }) {
1562
1614
  const warnings = [];
1563
- const openAIOptions = (0, import_provider_utils8.parseProviderOptions)({
1615
+ const openAIOptions = await (0, import_provider_utils8.parseProviderOptions)({
1564
1616
  provider: "openai",
1565
1617
  providerOptions,
1566
1618
  schema: OpenAIProviderOptionsSchema
@@ -1601,7 +1653,7 @@ var OpenAISpeechModel = class {
1601
1653
  async doGenerate(options) {
1602
1654
  var _a, _b, _c;
1603
1655
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1604
- const { requestBody, warnings } = this.getArgs(options);
1656
+ const { requestBody, warnings } = await this.getArgs(options);
1605
1657
  const {
1606
1658
  value: audio,
1607
1659
  responseHeaders,
@@ -1636,7 +1688,7 @@ var OpenAISpeechModel = class {
1636
1688
 
1637
1689
  // src/responses/openai-responses-language-model.ts
1638
1690
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
1639
- var import_zod10 = require("zod");
1691
+ var import_zod11 = require("zod");
1640
1692
 
1641
1693
  // src/responses/convert-to-openai-responses-messages.ts
1642
1694
  var import_provider6 = require("@ai-sdk/provider");
@@ -1856,7 +1908,7 @@ var OpenAIResponsesLanguageModel = class {
1856
1908
  get provider() {
1857
1909
  return this.config.provider;
1858
1910
  }
1859
- getArgs({
1911
+ async getArgs({
1860
1912
  maxOutputTokens,
1861
1913
  temperature,
1862
1914
  stopSequences,
@@ -1900,7 +1952,7 @@ var OpenAIResponsesLanguageModel = class {
1900
1952
  systemMessageMode: modelConfig.systemMessageMode
1901
1953
  });
1902
1954
  warnings.push(...messageWarnings);
1903
- const openaiOptions = (0, import_provider_utils9.parseProviderOptions)({
1955
+ const openaiOptions = await (0, import_provider_utils9.parseProviderOptions)({
1904
1956
  provider: "openai",
1905
1957
  providerOptions,
1906
1958
  schema: openaiResponsesProviderOptionsSchema
@@ -1983,7 +2035,7 @@ var OpenAIResponsesLanguageModel = class {
1983
2035
  }
1984
2036
  async doGenerate(options) {
1985
2037
  var _a, _b, _c, _d, _e, _f, _g, _h;
1986
- const { args: body, warnings } = this.getArgs(options);
2038
+ const { args: body, warnings } = await this.getArgs(options);
1987
2039
  const {
1988
2040
  responseHeaders,
1989
2041
  value: response,
@@ -1997,55 +2049,55 @@ var OpenAIResponsesLanguageModel = class {
1997
2049
  body,
1998
2050
  failedResponseHandler: openaiFailedResponseHandler,
1999
2051
  successfulResponseHandler: (0, import_provider_utils9.createJsonResponseHandler)(
2000
- import_zod10.z.object({
2001
- id: import_zod10.z.string(),
2002
- created_at: import_zod10.z.number(),
2003
- model: import_zod10.z.string(),
2004
- output: import_zod10.z.array(
2005
- import_zod10.z.discriminatedUnion("type", [
2006
- import_zod10.z.object({
2007
- type: import_zod10.z.literal("message"),
2008
- role: import_zod10.z.literal("assistant"),
2009
- content: import_zod10.z.array(
2010
- import_zod10.z.object({
2011
- type: import_zod10.z.literal("output_text"),
2012
- text: import_zod10.z.string(),
2013
- annotations: import_zod10.z.array(
2014
- import_zod10.z.object({
2015
- type: import_zod10.z.literal("url_citation"),
2016
- start_index: import_zod10.z.number(),
2017
- end_index: import_zod10.z.number(),
2018
- url: import_zod10.z.string(),
2019
- title: import_zod10.z.string()
2052
+ import_zod11.z.object({
2053
+ id: import_zod11.z.string(),
2054
+ created_at: import_zod11.z.number(),
2055
+ model: import_zod11.z.string(),
2056
+ output: import_zod11.z.array(
2057
+ import_zod11.z.discriminatedUnion("type", [
2058
+ import_zod11.z.object({
2059
+ type: import_zod11.z.literal("message"),
2060
+ role: import_zod11.z.literal("assistant"),
2061
+ content: import_zod11.z.array(
2062
+ import_zod11.z.object({
2063
+ type: import_zod11.z.literal("output_text"),
2064
+ text: import_zod11.z.string(),
2065
+ annotations: import_zod11.z.array(
2066
+ import_zod11.z.object({
2067
+ type: import_zod11.z.literal("url_citation"),
2068
+ start_index: import_zod11.z.number(),
2069
+ end_index: import_zod11.z.number(),
2070
+ url: import_zod11.z.string(),
2071
+ title: import_zod11.z.string()
2020
2072
  })
2021
2073
  )
2022
2074
  })
2023
2075
  )
2024
2076
  }),
2025
- import_zod10.z.object({
2026
- type: import_zod10.z.literal("function_call"),
2027
- call_id: import_zod10.z.string(),
2028
- name: import_zod10.z.string(),
2029
- arguments: import_zod10.z.string()
2077
+ import_zod11.z.object({
2078
+ type: import_zod11.z.literal("function_call"),
2079
+ call_id: import_zod11.z.string(),
2080
+ name: import_zod11.z.string(),
2081
+ arguments: import_zod11.z.string()
2030
2082
  }),
2031
- import_zod10.z.object({
2032
- type: import_zod10.z.literal("web_search_call")
2083
+ import_zod11.z.object({
2084
+ type: import_zod11.z.literal("web_search_call")
2033
2085
  }),
2034
- import_zod10.z.object({
2035
- type: import_zod10.z.literal("computer_call")
2086
+ import_zod11.z.object({
2087
+ type: import_zod11.z.literal("computer_call")
2036
2088
  }),
2037
- import_zod10.z.object({
2038
- type: import_zod10.z.literal("reasoning"),
2039
- summary: import_zod10.z.array(
2040
- import_zod10.z.object({
2041
- type: import_zod10.z.literal("summary_text"),
2042
- text: import_zod10.z.string()
2089
+ import_zod11.z.object({
2090
+ type: import_zod11.z.literal("reasoning"),
2091
+ summary: import_zod11.z.array(
2092
+ import_zod11.z.object({
2093
+ type: import_zod11.z.literal("summary_text"),
2094
+ text: import_zod11.z.string()
2043
2095
  })
2044
2096
  )
2045
2097
  })
2046
2098
  ])
2047
2099
  ),
2048
- incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullable(),
2100
+ incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullable(),
2049
2101
  usage: usageSchema
2050
2102
  })
2051
2103
  ),
@@ -2058,7 +2110,6 @@ var OpenAIResponsesLanguageModel = class {
2058
2110
  case "reasoning": {
2059
2111
  content.push({
2060
2112
  type: "reasoning",
2061
- reasoningType: "text",
2062
2113
  text: part.summary.map((summary) => summary.text).join()
2063
2114
  });
2064
2115
  break;
@@ -2122,7 +2173,7 @@ var OpenAIResponsesLanguageModel = class {
2122
2173
  };
2123
2174
  }
2124
2175
  async doStream(options) {
2125
- const { args: body, warnings } = this.getArgs(options);
2176
+ const { args: body, warnings } = await this.getArgs(options);
2126
2177
  const { responseHeaders, value: response } = await (0, import_provider_utils9.postJsonToApi)({
2127
2178
  url: this.config.url({
2128
2179
  path: "/responses",
@@ -2206,7 +2257,6 @@ var OpenAIResponsesLanguageModel = class {
2206
2257
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2207
2258
  controller.enqueue({
2208
2259
  type: "reasoning",
2209
- reasoningType: "text",
2210
2260
  text: value.delta
2211
2261
  });
2212
2262
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
@@ -2261,86 +2311,86 @@ var OpenAIResponsesLanguageModel = class {
2261
2311
  };
2262
2312
  }
2263
2313
  };
2264
- var usageSchema = import_zod10.z.object({
2265
- input_tokens: import_zod10.z.number(),
2266
- input_tokens_details: import_zod10.z.object({ cached_tokens: import_zod10.z.number().nullish() }).nullish(),
2267
- output_tokens: import_zod10.z.number(),
2268
- output_tokens_details: import_zod10.z.object({ reasoning_tokens: import_zod10.z.number().nullish() }).nullish()
2314
+ var usageSchema = import_zod11.z.object({
2315
+ input_tokens: import_zod11.z.number(),
2316
+ input_tokens_details: import_zod11.z.object({ cached_tokens: import_zod11.z.number().nullish() }).nullish(),
2317
+ output_tokens: import_zod11.z.number(),
2318
+ output_tokens_details: import_zod11.z.object({ reasoning_tokens: import_zod11.z.number().nullish() }).nullish()
2269
2319
  });
2270
- var textDeltaChunkSchema = import_zod10.z.object({
2271
- type: import_zod10.z.literal("response.output_text.delta"),
2272
- delta: import_zod10.z.string()
2320
+ var textDeltaChunkSchema = import_zod11.z.object({
2321
+ type: import_zod11.z.literal("response.output_text.delta"),
2322
+ delta: import_zod11.z.string()
2273
2323
  });
2274
- var responseFinishedChunkSchema = import_zod10.z.object({
2275
- type: import_zod10.z.enum(["response.completed", "response.incomplete"]),
2276
- response: import_zod10.z.object({
2277
- incomplete_details: import_zod10.z.object({ reason: import_zod10.z.string() }).nullish(),
2324
+ var responseFinishedChunkSchema = import_zod11.z.object({
2325
+ type: import_zod11.z.enum(["response.completed", "response.incomplete"]),
2326
+ response: import_zod11.z.object({
2327
+ incomplete_details: import_zod11.z.object({ reason: import_zod11.z.string() }).nullish(),
2278
2328
  usage: usageSchema
2279
2329
  })
2280
2330
  });
2281
- var responseCreatedChunkSchema = import_zod10.z.object({
2282
- type: import_zod10.z.literal("response.created"),
2283
- response: import_zod10.z.object({
2284
- id: import_zod10.z.string(),
2285
- created_at: import_zod10.z.number(),
2286
- model: import_zod10.z.string()
2331
+ var responseCreatedChunkSchema = import_zod11.z.object({
2332
+ type: import_zod11.z.literal("response.created"),
2333
+ response: import_zod11.z.object({
2334
+ id: import_zod11.z.string(),
2335
+ created_at: import_zod11.z.number(),
2336
+ model: import_zod11.z.string()
2287
2337
  })
2288
2338
  });
2289
- var responseOutputItemDoneSchema = import_zod10.z.object({
2290
- type: import_zod10.z.literal("response.output_item.done"),
2291
- output_index: import_zod10.z.number(),
2292
- item: import_zod10.z.discriminatedUnion("type", [
2293
- import_zod10.z.object({
2294
- type: import_zod10.z.literal("message")
2339
+ var responseOutputItemDoneSchema = import_zod11.z.object({
2340
+ type: import_zod11.z.literal("response.output_item.done"),
2341
+ output_index: import_zod11.z.number(),
2342
+ item: import_zod11.z.discriminatedUnion("type", [
2343
+ import_zod11.z.object({
2344
+ type: import_zod11.z.literal("message")
2295
2345
  }),
2296
- import_zod10.z.object({
2297
- type: import_zod10.z.literal("function_call"),
2298
- id: import_zod10.z.string(),
2299
- call_id: import_zod10.z.string(),
2300
- name: import_zod10.z.string(),
2301
- arguments: import_zod10.z.string(),
2302
- status: import_zod10.z.literal("completed")
2346
+ import_zod11.z.object({
2347
+ type: import_zod11.z.literal("function_call"),
2348
+ id: import_zod11.z.string(),
2349
+ call_id: import_zod11.z.string(),
2350
+ name: import_zod11.z.string(),
2351
+ arguments: import_zod11.z.string(),
2352
+ status: import_zod11.z.literal("completed")
2303
2353
  })
2304
2354
  ])
2305
2355
  });
2306
- var responseFunctionCallArgumentsDeltaSchema = import_zod10.z.object({
2307
- type: import_zod10.z.literal("response.function_call_arguments.delta"),
2308
- item_id: import_zod10.z.string(),
2309
- output_index: import_zod10.z.number(),
2310
- delta: import_zod10.z.string()
2356
+ var responseFunctionCallArgumentsDeltaSchema = import_zod11.z.object({
2357
+ type: import_zod11.z.literal("response.function_call_arguments.delta"),
2358
+ item_id: import_zod11.z.string(),
2359
+ output_index: import_zod11.z.number(),
2360
+ delta: import_zod11.z.string()
2311
2361
  });
2312
- var responseOutputItemAddedSchema = import_zod10.z.object({
2313
- type: import_zod10.z.literal("response.output_item.added"),
2314
- output_index: import_zod10.z.number(),
2315
- item: import_zod10.z.discriminatedUnion("type", [
2316
- import_zod10.z.object({
2317
- type: import_zod10.z.literal("message")
2362
+ var responseOutputItemAddedSchema = import_zod11.z.object({
2363
+ type: import_zod11.z.literal("response.output_item.added"),
2364
+ output_index: import_zod11.z.number(),
2365
+ item: import_zod11.z.discriminatedUnion("type", [
2366
+ import_zod11.z.object({
2367
+ type: import_zod11.z.literal("message")
2318
2368
  }),
2319
- import_zod10.z.object({
2320
- type: import_zod10.z.literal("function_call"),
2321
- id: import_zod10.z.string(),
2322
- call_id: import_zod10.z.string(),
2323
- name: import_zod10.z.string(),
2324
- arguments: import_zod10.z.string()
2369
+ import_zod11.z.object({
2370
+ type: import_zod11.z.literal("function_call"),
2371
+ id: import_zod11.z.string(),
2372
+ call_id: import_zod11.z.string(),
2373
+ name: import_zod11.z.string(),
2374
+ arguments: import_zod11.z.string()
2325
2375
  })
2326
2376
  ])
2327
2377
  });
2328
- var responseAnnotationAddedSchema = import_zod10.z.object({
2329
- type: import_zod10.z.literal("response.output_text.annotation.added"),
2330
- annotation: import_zod10.z.object({
2331
- type: import_zod10.z.literal("url_citation"),
2332
- url: import_zod10.z.string(),
2333
- title: import_zod10.z.string()
2378
+ var responseAnnotationAddedSchema = import_zod11.z.object({
2379
+ type: import_zod11.z.literal("response.output_text.annotation.added"),
2380
+ annotation: import_zod11.z.object({
2381
+ type: import_zod11.z.literal("url_citation"),
2382
+ url: import_zod11.z.string(),
2383
+ title: import_zod11.z.string()
2334
2384
  })
2335
2385
  });
2336
- var responseReasoningSummaryTextDeltaSchema = import_zod10.z.object({
2337
- type: import_zod10.z.literal("response.reasoning_summary_text.delta"),
2338
- item_id: import_zod10.z.string(),
2339
- output_index: import_zod10.z.number(),
2340
- summary_index: import_zod10.z.number(),
2341
- delta: import_zod10.z.string()
2386
+ var responseReasoningSummaryTextDeltaSchema = import_zod11.z.object({
2387
+ type: import_zod11.z.literal("response.reasoning_summary_text.delta"),
2388
+ item_id: import_zod11.z.string(),
2389
+ output_index: import_zod11.z.number(),
2390
+ summary_index: import_zod11.z.number(),
2391
+ delta: import_zod11.z.string()
2342
2392
  });
2343
- var openaiResponsesChunkSchema = import_zod10.z.union([
2393
+ var openaiResponsesChunkSchema = import_zod11.z.union([
2344
2394
  textDeltaChunkSchema,
2345
2395
  responseFinishedChunkSchema,
2346
2396
  responseCreatedChunkSchema,
@@ -2349,7 +2399,7 @@ var openaiResponsesChunkSchema = import_zod10.z.union([
2349
2399
  responseOutputItemAddedSchema,
2350
2400
  responseAnnotationAddedSchema,
2351
2401
  responseReasoningSummaryTextDeltaSchema,
2352
- import_zod10.z.object({ type: import_zod10.z.string() }).passthrough()
2402
+ import_zod11.z.object({ type: import_zod11.z.string() }).passthrough()
2353
2403
  // fallback for unknown chunks
2354
2404
  ]);
2355
2405
  function isTextDeltaChunk(chunk) {
@@ -2397,16 +2447,16 @@ function getResponsesModelConfig(modelId) {
2397
2447
  requiredAutoTruncation: false
2398
2448
  };
2399
2449
  }
2400
- var openaiResponsesProviderOptionsSchema = import_zod10.z.object({
2401
- metadata: import_zod10.z.any().nullish(),
2402
- parallelToolCalls: import_zod10.z.boolean().nullish(),
2403
- previousResponseId: import_zod10.z.string().nullish(),
2404
- store: import_zod10.z.boolean().nullish(),
2405
- user: import_zod10.z.string().nullish(),
2406
- reasoningEffort: import_zod10.z.string().nullish(),
2407
- strictSchemas: import_zod10.z.boolean().nullish(),
2408
- instructions: import_zod10.z.string().nullish(),
2409
- reasoningSummary: import_zod10.z.string().nullish()
2450
+ var openaiResponsesProviderOptionsSchema = import_zod11.z.object({
2451
+ metadata: import_zod11.z.any().nullish(),
2452
+ parallelToolCalls: import_zod11.z.boolean().nullish(),
2453
+ previousResponseId: import_zod11.z.string().nullish(),
2454
+ store: import_zod11.z.boolean().nullish(),
2455
+ user: import_zod11.z.string().nullish(),
2456
+ reasoningEffort: import_zod11.z.string().nullish(),
2457
+ strictSchemas: import_zod11.z.boolean().nullish(),
2458
+ instructions: import_zod11.z.string().nullish(),
2459
+ reasoningSummary: import_zod11.z.string().nullish()
2410
2460
  });
2411
2461
  // Annotate the CommonJS export names for ESM import in node:
2412
2462
  0 && (module.exports = {
@@ -2417,7 +2467,9 @@ var openaiResponsesProviderOptionsSchema = import_zod10.z.object({
2417
2467
  OpenAIResponsesLanguageModel,
2418
2468
  OpenAISpeechModel,
2419
2469
  OpenAITranscriptionModel,
2470
+ hasDefaultResponseFormat,
2420
2471
  modelMaxImagesPerCall,
2472
+ openaiCompletionProviderOptions,
2421
2473
  openaiEmbeddingProviderOptions,
2422
2474
  openaiProviderOptions
2423
2475
  });