@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -193,19 +193,6 @@ function getResponseMetadata({
193
193
  };
194
194
  }
195
195
 
196
- // src/map-openai-chat-logprobs.ts
197
- function mapOpenAIChatLogProbsOutput(logprobs) {
198
- var _a, _b;
199
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
200
- token,
201
- logprob,
202
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
203
- token: token2,
204
- logprob: logprob2
205
- })) : []
206
- }))) != null ? _b : void 0;
207
- }
208
-
209
196
  // src/map-openai-finish-reason.ts
210
197
  function mapOpenAIFinishReason(finishReason) {
211
198
  switch (finishReason) {
@@ -233,16 +220,6 @@ var openaiProviderOptions = z.object({
233
220
  * the GPT tokenizer) to an associated bias value from -100 to 100.
234
221
  */
235
222
  logitBias: z.record(z.coerce.number(), z.number()).optional(),
236
- /**
237
- * Return the log probabilities of the tokens.
238
- *
239
- * Setting to true will return the log probabilities of the tokens that
240
- * were generated.
241
- *
242
- * Setting to a number will return the log probabilities of the top n
243
- * tokens that were generated.
244
- */
245
- logprobs: z.union([z.boolean(), z.number()]).optional(),
246
223
  /**
247
224
  * Whether to enable parallel function calling during tool use. Default to true.
248
225
  */
@@ -368,7 +345,7 @@ var OpenAIChatLanguageModel = class {
368
345
  "image/*": [/^https?:\/\/.*$/]
369
346
  };
370
347
  }
371
- getArgs({
348
+ async getArgs({
372
349
  prompt,
373
350
  maxOutputTokens,
374
351
  temperature,
@@ -385,7 +362,7 @@ var OpenAIChatLanguageModel = class {
385
362
  }) {
386
363
  var _a, _b, _c;
387
364
  const warnings = [];
388
- const openaiOptions = (_a = parseProviderOptions({
365
+ const openaiOptions = (_a = await parseProviderOptions({
389
366
  provider: "openai",
390
367
  providerOptions,
391
368
  schema: openaiProviderOptions
@@ -415,8 +392,6 @@ var OpenAIChatLanguageModel = class {
415
392
  model: this.modelId,
416
393
  // model specific settings:
417
394
  logit_bias: openaiOptions.logitBias,
418
- logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
419
- top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
420
395
  user: openaiOptions.user,
421
396
  parallel_tool_calls: openaiOptions.parallelToolCalls,
422
397
  // standardized settings:
@@ -489,20 +464,6 @@ var OpenAIChatLanguageModel = class {
489
464
  message: "logitBias is not supported for reasoning models"
490
465
  });
491
466
  }
492
- if (baseArgs.logprobs != null) {
493
- baseArgs.logprobs = void 0;
494
- warnings.push({
495
- type: "other",
496
- message: "logprobs is not supported for reasoning models"
497
- });
498
- }
499
- if (baseArgs.top_logprobs != null) {
500
- baseArgs.top_logprobs = void 0;
501
- warnings.push({
502
- type: "other",
503
- message: "topLogprobs is not supported for reasoning models"
504
- });
505
- }
506
467
  if (baseArgs.max_tokens != null) {
507
468
  if (baseArgs.max_completion_tokens == null) {
508
469
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
@@ -539,7 +500,7 @@ var OpenAIChatLanguageModel = class {
539
500
  }
540
501
  async doGenerate(options) {
541
502
  var _a, _b, _c, _d, _e, _f, _g, _h;
542
- const { args: body, warnings } = this.getArgs(options);
503
+ const { args: body, warnings } = await this.getArgs(options);
543
504
  const {
544
505
  responseHeaders,
545
506
  value: response,
@@ -602,12 +563,11 @@ var OpenAIChatLanguageModel = class {
602
563
  body: rawResponse
603
564
  },
604
565
  warnings,
605
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
606
566
  providerMetadata
607
567
  };
608
568
  }
609
569
  async doStream(options) {
610
- const { args, warnings } = this.getArgs(options);
570
+ const { args, warnings } = await this.getArgs(options);
611
571
  const body = {
612
572
  ...args,
613
573
  stream: true,
@@ -635,7 +595,6 @@ var OpenAIChatLanguageModel = class {
635
595
  inputTokens: void 0,
636
596
  outputTokens: void 0
637
597
  };
638
- let logprobs;
639
598
  let isFirstChunk = true;
640
599
  const providerMetadata = { openai: {} };
641
600
  return {
@@ -700,13 +659,6 @@ var OpenAIChatLanguageModel = class {
700
659
  text: delta.content
701
660
  });
702
661
  }
703
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
704
- choice == null ? void 0 : choice.logprobs
705
- );
706
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
707
- if (logprobs === void 0) logprobs = [];
708
- logprobs.push(...mappedLogprobs);
709
- }
710
662
  if (delta.tool_calls != null) {
711
663
  for (const toolCallDelta of delta.tool_calls) {
712
664
  const index = toolCallDelta.index;
@@ -793,7 +745,6 @@ var OpenAIChatLanguageModel = class {
793
745
  controller.enqueue({
794
746
  type: "finish",
795
747
  finishReason,
796
- logprobs,
797
748
  usage,
798
749
  ...providerMetadata != null ? { providerMetadata } : {}
799
750
  });
@@ -838,20 +789,6 @@ var openaiChatResponseSchema = z3.object({
838
789
  ).nullish()
839
790
  }),
840
791
  index: z3.number(),
841
- logprobs: z3.object({
842
- content: z3.array(
843
- z3.object({
844
- token: z3.string(),
845
- logprob: z3.number(),
846
- top_logprobs: z3.array(
847
- z3.object({
848
- token: z3.string(),
849
- logprob: z3.number()
850
- })
851
- )
852
- })
853
- ).nullable()
854
- }).nullish(),
855
792
  finish_reason: z3.string().nullish()
856
793
  })
857
794
  ),
@@ -879,20 +816,6 @@ var openaiChatChunkSchema = z3.union([
879
816
  })
880
817
  ).nullish()
881
818
  }).nullish(),
882
- logprobs: z3.object({
883
- content: z3.array(
884
- z3.object({
885
- token: z3.string(),
886
- logprob: z3.number(),
887
- top_logprobs: z3.array(
888
- z3.object({
889
- token: z3.string(),
890
- logprob: z3.number()
891
- })
892
- )
893
- })
894
- ).nullable()
895
- }).nullish(),
896
819
  finish_reason: z3.string().nullable().optional(),
897
820
  index: z3.number()
898
821
  })
@@ -937,9 +860,10 @@ import {
937
860
  combineHeaders as combineHeaders2,
938
861
  createEventSourceResponseHandler as createEventSourceResponseHandler2,
939
862
  createJsonResponseHandler as createJsonResponseHandler2,
863
+ parseProviderOptions as parseProviderOptions2,
940
864
  postJsonToApi as postJsonToApi2
941
865
  } from "@ai-sdk/provider-utils";
942
- import { z as z4 } from "zod";
866
+ import { z as z5 } from "zod";
943
867
 
944
868
  // src/convert-to-openai-completion-prompt.ts
945
869
  import {
@@ -1023,28 +947,49 @@ ${user}:`]
1023
947
  };
1024
948
  }
1025
949
 
1026
- // src/map-openai-completion-logprobs.ts
1027
- function mapOpenAICompletionLogProbs(logprobs) {
1028
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1029
- token,
1030
- logprob: logprobs.token_logprobs[index],
1031
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1032
- ([token2, logprob]) => ({
1033
- token: token2,
1034
- logprob
1035
- })
1036
- ) : []
1037
- }));
1038
- }
950
+ // src/openai-completion-options.ts
951
+ import { z as z4 } from "zod";
952
+ var openaiCompletionProviderOptions = z4.object({
953
+ /**
954
+ Echo back the prompt in addition to the completion.
955
+ */
956
+ echo: z4.boolean().optional(),
957
+ /**
958
+ Modify the likelihood of specified tokens appearing in the completion.
959
+
960
+ Accepts a JSON object that maps tokens (specified by their token ID in
961
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
962
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
963
+ the bias is added to the logits generated by the model prior to sampling.
964
+ The exact effect will vary per model, but values between -1 and 1 should
965
+ decrease or increase likelihood of selection; values like -100 or 100
966
+ should result in a ban or exclusive selection of the relevant token.
967
+
968
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
969
+ token from being generated.
970
+ */
971
+ logitBias: z4.record(z4.string(), z4.number()).optional(),
972
+ /**
973
+ The suffix that comes after a completion of inserted text.
974
+ */
975
+ suffix: z4.string().optional(),
976
+ /**
977
+ A unique identifier representing your end-user, which can help OpenAI to
978
+ monitor and detect abuse. Learn more.
979
+ */
980
+ user: z4.string().optional()
981
+ });
1039
982
 
1040
983
  // src/openai-completion-language-model.ts
1041
984
  var OpenAICompletionLanguageModel = class {
1042
- constructor(modelId, settings, config) {
985
+ constructor(modelId, config) {
1043
986
  this.specificationVersion = "v2";
1044
987
  this.modelId = modelId;
1045
- this.settings = settings;
1046
988
  this.config = config;
1047
989
  }
990
+ get providerOptionsName() {
991
+ return this.config.provider.split(".")[0].trim();
992
+ }
1048
993
  get provider() {
1049
994
  return this.config.provider;
1050
995
  }
@@ -1053,7 +998,7 @@ var OpenAICompletionLanguageModel = class {
1053
998
  // no supported urls for completion models
1054
999
  };
1055
1000
  }
1056
- getArgs({
1001
+ async getArgs({
1057
1002
  inputFormat,
1058
1003
  prompt,
1059
1004
  maxOutputTokens,
@@ -1066,9 +1011,22 @@ var OpenAICompletionLanguageModel = class {
1066
1011
  responseFormat,
1067
1012
  tools,
1068
1013
  toolChoice,
1069
- seed
1014
+ seed,
1015
+ providerOptions
1070
1016
  }) {
1071
1017
  const warnings = [];
1018
+ const openaiOptions = {
1019
+ ...await parseProviderOptions2({
1020
+ provider: "openai",
1021
+ providerOptions,
1022
+ schema: openaiCompletionProviderOptions
1023
+ }),
1024
+ ...await parseProviderOptions2({
1025
+ provider: this.providerOptionsName,
1026
+ providerOptions,
1027
+ schema: openaiCompletionProviderOptions
1028
+ })
1029
+ };
1072
1030
  if (topK != null) {
1073
1031
  warnings.push({ type: "unsupported-setting", setting: "topK" });
1074
1032
  }
@@ -1092,11 +1050,10 @@ var OpenAICompletionLanguageModel = class {
1092
1050
  // model id:
1093
1051
  model: this.modelId,
1094
1052
  // model specific settings:
1095
- echo: this.settings.echo,
1096
- logit_bias: this.settings.logitBias,
1097
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1098
- suffix: this.settings.suffix,
1099
- user: this.settings.user,
1053
+ echo: openaiOptions.echo,
1054
+ logit_bias: openaiOptions.logitBias,
1055
+ suffix: openaiOptions.suffix,
1056
+ user: openaiOptions.user,
1100
1057
  // standardized settings:
1101
1058
  max_tokens: maxOutputTokens,
1102
1059
  temperature,
@@ -1113,7 +1070,7 @@ var OpenAICompletionLanguageModel = class {
1113
1070
  };
1114
1071
  }
1115
1072
  async doGenerate(options) {
1116
- const { args, warnings } = this.getArgs(options);
1073
+ const { args, warnings } = await this.getArgs(options);
1117
1074
  const {
1118
1075
  responseHeaders,
1119
1076
  value: response,
@@ -1140,7 +1097,6 @@ var OpenAICompletionLanguageModel = class {
1140
1097
  outputTokens: response.usage.completion_tokens
1141
1098
  },
1142
1099
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1143
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1144
1100
  request: { body: args },
1145
1101
  response: {
1146
1102
  ...getResponseMetadata(response),
@@ -1151,7 +1107,7 @@ var OpenAICompletionLanguageModel = class {
1151
1107
  };
1152
1108
  }
1153
1109
  async doStream(options) {
1154
- const { args, warnings } = this.getArgs(options);
1110
+ const { args, warnings } = await this.getArgs(options);
1155
1111
  const body = {
1156
1112
  ...args,
1157
1113
  stream: true,
@@ -1177,7 +1133,6 @@ var OpenAICompletionLanguageModel = class {
1177
1133
  inputTokens: void 0,
1178
1134
  outputTokens: void 0
1179
1135
  };
1180
- let logprobs;
1181
1136
  let isFirstChunk = true;
1182
1137
  return {
1183
1138
  stream: response.pipeThrough(
@@ -1218,19 +1173,11 @@ var OpenAICompletionLanguageModel = class {
1218
1173
  text: choice.text
1219
1174
  });
1220
1175
  }
1221
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1222
- choice == null ? void 0 : choice.logprobs
1223
- );
1224
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1225
- if (logprobs === void 0) logprobs = [];
1226
- logprobs.push(...mappedLogprobs);
1227
- }
1228
1176
  },
1229
1177
  flush(controller) {
1230
1178
  controller.enqueue({
1231
1179
  type: "finish",
1232
1180
  finishReason,
1233
- logprobs,
1234
1181
  usage
1235
1182
  });
1236
1183
  }
@@ -1241,46 +1188,36 @@ var OpenAICompletionLanguageModel = class {
1241
1188
  };
1242
1189
  }
1243
1190
  };
1244
- var openaiCompletionResponseSchema = z4.object({
1245
- id: z4.string().nullish(),
1246
- created: z4.number().nullish(),
1247
- model: z4.string().nullish(),
1248
- choices: z4.array(
1249
- z4.object({
1250
- text: z4.string(),
1251
- finish_reason: z4.string(),
1252
- logprobs: z4.object({
1253
- tokens: z4.array(z4.string()),
1254
- token_logprobs: z4.array(z4.number()),
1255
- top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1256
- }).nullish()
1191
+ var openaiCompletionResponseSchema = z5.object({
1192
+ id: z5.string().nullish(),
1193
+ created: z5.number().nullish(),
1194
+ model: z5.string().nullish(),
1195
+ choices: z5.array(
1196
+ z5.object({
1197
+ text: z5.string(),
1198
+ finish_reason: z5.string()
1257
1199
  })
1258
1200
  ),
1259
- usage: z4.object({
1260
- prompt_tokens: z4.number(),
1261
- completion_tokens: z4.number()
1201
+ usage: z5.object({
1202
+ prompt_tokens: z5.number(),
1203
+ completion_tokens: z5.number()
1262
1204
  })
1263
1205
  });
1264
- var openaiCompletionChunkSchema = z4.union([
1265
- z4.object({
1266
- id: z4.string().nullish(),
1267
- created: z4.number().nullish(),
1268
- model: z4.string().nullish(),
1269
- choices: z4.array(
1270
- z4.object({
1271
- text: z4.string(),
1272
- finish_reason: z4.string().nullish(),
1273
- index: z4.number(),
1274
- logprobs: z4.object({
1275
- tokens: z4.array(z4.string()),
1276
- token_logprobs: z4.array(z4.number()),
1277
- top_logprobs: z4.array(z4.record(z4.string(), z4.number())).nullable()
1278
- }).nullish()
1206
+ var openaiCompletionChunkSchema = z5.union([
1207
+ z5.object({
1208
+ id: z5.string().nullish(),
1209
+ created: z5.number().nullish(),
1210
+ model: z5.string().nullish(),
1211
+ choices: z5.array(
1212
+ z5.object({
1213
+ text: z5.string(),
1214
+ finish_reason: z5.string().nullish(),
1215
+ index: z5.number()
1279
1216
  })
1280
1217
  ),
1281
- usage: z4.object({
1282
- prompt_tokens: z4.number(),
1283
- completion_tokens: z4.number()
1218
+ usage: z5.object({
1219
+ prompt_tokens: z5.number(),
1220
+ completion_tokens: z5.number()
1284
1221
  }).nullish()
1285
1222
  }),
1286
1223
  openaiErrorDataSchema
@@ -1293,24 +1230,24 @@ import {
1293
1230
  import {
1294
1231
  combineHeaders as combineHeaders3,
1295
1232
  createJsonResponseHandler as createJsonResponseHandler3,
1296
- parseProviderOptions as parseProviderOptions2,
1233
+ parseProviderOptions as parseProviderOptions3,
1297
1234
  postJsonToApi as postJsonToApi3
1298
1235
  } from "@ai-sdk/provider-utils";
1299
- import { z as z6 } from "zod";
1236
+ import { z as z7 } from "zod";
1300
1237
 
1301
1238
  // src/openai-embedding-options.ts
1302
- import { z as z5 } from "zod";
1303
- var openaiEmbeddingProviderOptions = z5.object({
1239
+ import { z as z6 } from "zod";
1240
+ var openaiEmbeddingProviderOptions = z6.object({
1304
1241
  /**
1305
1242
  The number of dimensions the resulting output embeddings should have.
1306
1243
  Only supported in text-embedding-3 and later models.
1307
1244
  */
1308
- dimensions: z5.number().optional(),
1245
+ dimensions: z6.number().optional(),
1309
1246
  /**
1310
1247
  A unique identifier representing your end-user, which can help OpenAI to
1311
1248
  monitor and detect abuse. Learn more.
1312
1249
  */
1313
- user: z5.string().optional()
1250
+ user: z6.string().optional()
1314
1251
  });
1315
1252
 
1316
1253
  // src/openai-embedding-model.ts
@@ -1347,7 +1284,7 @@ var OpenAIEmbeddingModel = class {
1347
1284
  values
1348
1285
  });
1349
1286
  }
1350
- const openaiOptions = (_a = parseProviderOptions2({
1287
+ const openaiOptions = (_a = await parseProviderOptions3({
1351
1288
  provider: "openai",
1352
1289
  providerOptions,
1353
1290
  schema: openaiEmbeddingProviderOptions
@@ -1383,9 +1320,9 @@ var OpenAIEmbeddingModel = class {
1383
1320
  };
1384
1321
  }
1385
1322
  };
1386
- var openaiTextEmbeddingResponseSchema = z6.object({
1387
- data: z6.array(z6.object({ embedding: z6.array(z6.number()) })),
1388
- usage: z6.object({ prompt_tokens: z6.number() }).nullish()
1323
+ var openaiTextEmbeddingResponseSchema = z7.object({
1324
+ data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1325
+ usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1389
1326
  });
1390
1327
 
1391
1328
  // src/openai-image-model.ts
@@ -1394,13 +1331,15 @@ import {
1394
1331
  createJsonResponseHandler as createJsonResponseHandler4,
1395
1332
  postJsonToApi as postJsonToApi4
1396
1333
  } from "@ai-sdk/provider-utils";
1397
- import { z as z7 } from "zod";
1334
+ import { z as z8 } from "zod";
1398
1335
 
1399
1336
  // src/openai-image-settings.ts
1400
1337
  var modelMaxImagesPerCall = {
1401
1338
  "dall-e-3": 1,
1402
- "dall-e-2": 10
1339
+ "dall-e-2": 10,
1340
+ "gpt-image-1": 10
1403
1341
  };
1342
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1404
1343
 
1405
1344
  // src/openai-image-model.ts
1406
1345
  var OpenAIImageModel = class {
@@ -1452,7 +1391,7 @@ var OpenAIImageModel = class {
1452
1391
  n,
1453
1392
  size,
1454
1393
  ...(_d = providerOptions.openai) != null ? _d : {},
1455
- response_format: "b64_json"
1394
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1456
1395
  },
1457
1396
  failedResponseHandler: openaiFailedResponseHandler,
1458
1397
  successfulResponseHandler: createJsonResponseHandler4(
@@ -1472,8 +1411,8 @@ var OpenAIImageModel = class {
1472
1411
  };
1473
1412
  }
1474
1413
  };
1475
- var openaiImageResponseSchema = z7.object({
1476
- data: z7.array(z7.object({ b64_json: z7.string() }))
1414
+ var openaiImageResponseSchema = z8.object({
1415
+ data: z8.array(z8.object({ b64_json: z8.string() }))
1477
1416
  });
1478
1417
 
1479
1418
  // src/openai-transcription-model.ts
@@ -1481,16 +1420,16 @@ import {
1481
1420
  combineHeaders as combineHeaders5,
1482
1421
  convertBase64ToUint8Array,
1483
1422
  createJsonResponseHandler as createJsonResponseHandler5,
1484
- parseProviderOptions as parseProviderOptions3,
1423
+ parseProviderOptions as parseProviderOptions4,
1485
1424
  postFormDataToApi
1486
1425
  } from "@ai-sdk/provider-utils";
1487
- import { z as z8 } from "zod";
1488
- var openAIProviderOptionsSchema = z8.object({
1489
- include: z8.array(z8.string()).nullish(),
1490
- language: z8.string().nullish(),
1491
- prompt: z8.string().nullish(),
1492
- temperature: z8.number().min(0).max(1).nullish().default(0),
1493
- timestampGranularities: z8.array(z8.enum(["word", "segment"])).nullish().default(["segment"])
1426
+ import { z as z9 } from "zod";
1427
+ var openAIProviderOptionsSchema = z9.object({
1428
+ include: z9.array(z9.string()).nullish(),
1429
+ language: z9.string().nullish(),
1430
+ prompt: z9.string().nullish(),
1431
+ temperature: z9.number().min(0).max(1).nullish().default(0),
1432
+ timestampGranularities: z9.array(z9.enum(["word", "segment"])).nullish().default(["segment"])
1494
1433
  });
1495
1434
  var languageMap = {
1496
1435
  afrikaans: "af",
@@ -1560,14 +1499,14 @@ var OpenAITranscriptionModel = class {
1560
1499
  get provider() {
1561
1500
  return this.config.provider;
1562
1501
  }
1563
- getArgs({
1502
+ async getArgs({
1564
1503
  audio,
1565
1504
  mediaType,
1566
1505
  providerOptions
1567
1506
  }) {
1568
1507
  var _a, _b, _c, _d, _e;
1569
1508
  const warnings = [];
1570
- const openAIOptions = parseProviderOptions3({
1509
+ const openAIOptions = await parseProviderOptions4({
1571
1510
  provider: "openai",
1572
1511
  providerOptions,
1573
1512
  schema: openAIProviderOptionsSchema
@@ -1599,7 +1538,7 @@ var OpenAITranscriptionModel = class {
1599
1538
  async doGenerate(options) {
1600
1539
  var _a, _b, _c, _d, _e, _f;
1601
1540
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1602
- const { formData, warnings } = this.getArgs(options);
1541
+ const { formData, warnings } = await this.getArgs(options);
1603
1542
  const {
1604
1543
  value: response,
1605
1544
  responseHeaders,
@@ -1638,15 +1577,15 @@ var OpenAITranscriptionModel = class {
1638
1577
  };
1639
1578
  }
1640
1579
  };
1641
- var openaiTranscriptionResponseSchema = z8.object({
1642
- text: z8.string(),
1643
- language: z8.string().nullish(),
1644
- duration: z8.number().nullish(),
1645
- words: z8.array(
1646
- z8.object({
1647
- word: z8.string(),
1648
- start: z8.number(),
1649
- end: z8.number()
1580
+ var openaiTranscriptionResponseSchema = z9.object({
1581
+ text: z9.string(),
1582
+ language: z9.string().nullish(),
1583
+ duration: z9.number().nullish(),
1584
+ words: z9.array(
1585
+ z9.object({
1586
+ word: z9.string(),
1587
+ start: z9.number(),
1588
+ end: z9.number()
1650
1589
  })
1651
1590
  ).nullish()
1652
1591
  });
@@ -1655,13 +1594,13 @@ var openaiTranscriptionResponseSchema = z8.object({
1655
1594
  import {
1656
1595
  combineHeaders as combineHeaders6,
1657
1596
  createBinaryResponseHandler,
1658
- parseProviderOptions as parseProviderOptions4,
1597
+ parseProviderOptions as parseProviderOptions5,
1659
1598
  postJsonToApi as postJsonToApi5
1660
1599
  } from "@ai-sdk/provider-utils";
1661
- import { z as z9 } from "zod";
1662
- var OpenAIProviderOptionsSchema = z9.object({
1663
- instructions: z9.string().nullish(),
1664
- speed: z9.number().min(0.25).max(4).default(1).nullish()
1600
+ import { z as z10 } from "zod";
1601
+ var OpenAIProviderOptionsSchema = z10.object({
1602
+ instructions: z10.string().nullish(),
1603
+ speed: z10.number().min(0.25).max(4).default(1).nullish()
1665
1604
  });
1666
1605
  var OpenAISpeechModel = class {
1667
1606
  constructor(modelId, config) {
@@ -1672,7 +1611,7 @@ var OpenAISpeechModel = class {
1672
1611
  get provider() {
1673
1612
  return this.config.provider;
1674
1613
  }
1675
- getArgs({
1614
+ async getArgs({
1676
1615
  text,
1677
1616
  voice = "alloy",
1678
1617
  outputFormat = "mp3",
@@ -1681,7 +1620,7 @@ var OpenAISpeechModel = class {
1681
1620
  providerOptions
1682
1621
  }) {
1683
1622
  const warnings = [];
1684
- const openAIOptions = parseProviderOptions4({
1623
+ const openAIOptions = await parseProviderOptions5({
1685
1624
  provider: "openai",
1686
1625
  providerOptions,
1687
1626
  schema: OpenAIProviderOptionsSchema
@@ -1722,7 +1661,7 @@ var OpenAISpeechModel = class {
1722
1661
  async doGenerate(options) {
1723
1662
  var _a, _b, _c;
1724
1663
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1725
- const { requestBody, warnings } = this.getArgs(options);
1664
+ const { requestBody, warnings } = await this.getArgs(options);
1726
1665
  const {
1727
1666
  value: audio,
1728
1667
  responseHeaders,
@@ -1761,10 +1700,10 @@ import {
1761
1700
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1762
1701
  createJsonResponseHandler as createJsonResponseHandler6,
1763
1702
  generateId as generateId2,
1764
- parseProviderOptions as parseProviderOptions5,
1703
+ parseProviderOptions as parseProviderOptions6,
1765
1704
  postJsonToApi as postJsonToApi6
1766
1705
  } from "@ai-sdk/provider-utils";
1767
- import { z as z10 } from "zod";
1706
+ import { z as z11 } from "zod";
1768
1707
 
1769
1708
  // src/responses/convert-to-openai-responses-messages.ts
1770
1709
  import {
@@ -1988,7 +1927,7 @@ var OpenAIResponsesLanguageModel = class {
1988
1927
  get provider() {
1989
1928
  return this.config.provider;
1990
1929
  }
1991
- getArgs({
1930
+ async getArgs({
1992
1931
  maxOutputTokens,
1993
1932
  temperature,
1994
1933
  stopSequences,
@@ -2032,7 +1971,7 @@ var OpenAIResponsesLanguageModel = class {
2032
1971
  systemMessageMode: modelConfig.systemMessageMode
2033
1972
  });
2034
1973
  warnings.push(...messageWarnings);
2035
- const openaiOptions = parseProviderOptions5({
1974
+ const openaiOptions = await parseProviderOptions6({
2036
1975
  provider: "openai",
2037
1976
  providerOptions,
2038
1977
  schema: openaiResponsesProviderOptionsSchema
@@ -2063,8 +2002,15 @@ var OpenAIResponsesLanguageModel = class {
2063
2002
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2064
2003
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2065
2004
  // model-specific settings:
2066
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2067
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2005
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2006
+ reasoning: {
2007
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2008
+ effort: openaiOptions.reasoningEffort
2009
+ },
2010
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2011
+ summary: openaiOptions.reasoningSummary
2012
+ }
2013
+ }
2068
2014
  },
2069
2015
  ...modelConfig.requiredAutoTruncation && {
2070
2016
  truncation: "auto"
@@ -2108,7 +2054,7 @@ var OpenAIResponsesLanguageModel = class {
2108
2054
  }
2109
2055
  async doGenerate(options) {
2110
2056
  var _a, _b, _c, _d, _e, _f, _g, _h;
2111
- const { args: body, warnings } = this.getArgs(options);
2057
+ const { args: body, warnings } = await this.getArgs(options);
2112
2058
  const {
2113
2059
  responseHeaders,
2114
2060
  value: response,
@@ -2122,49 +2068,55 @@ var OpenAIResponsesLanguageModel = class {
2122
2068
  body,
2123
2069
  failedResponseHandler: openaiFailedResponseHandler,
2124
2070
  successfulResponseHandler: createJsonResponseHandler6(
2125
- z10.object({
2126
- id: z10.string(),
2127
- created_at: z10.number(),
2128
- model: z10.string(),
2129
- output: z10.array(
2130
- z10.discriminatedUnion("type", [
2131
- z10.object({
2132
- type: z10.literal("message"),
2133
- role: z10.literal("assistant"),
2134
- content: z10.array(
2135
- z10.object({
2136
- type: z10.literal("output_text"),
2137
- text: z10.string(),
2138
- annotations: z10.array(
2139
- z10.object({
2140
- type: z10.literal("url_citation"),
2141
- start_index: z10.number(),
2142
- end_index: z10.number(),
2143
- url: z10.string(),
2144
- title: z10.string()
2071
+ z11.object({
2072
+ id: z11.string(),
2073
+ created_at: z11.number(),
2074
+ model: z11.string(),
2075
+ output: z11.array(
2076
+ z11.discriminatedUnion("type", [
2077
+ z11.object({
2078
+ type: z11.literal("message"),
2079
+ role: z11.literal("assistant"),
2080
+ content: z11.array(
2081
+ z11.object({
2082
+ type: z11.literal("output_text"),
2083
+ text: z11.string(),
2084
+ annotations: z11.array(
2085
+ z11.object({
2086
+ type: z11.literal("url_citation"),
2087
+ start_index: z11.number(),
2088
+ end_index: z11.number(),
2089
+ url: z11.string(),
2090
+ title: z11.string()
2145
2091
  })
2146
2092
  )
2147
2093
  })
2148
2094
  )
2149
2095
  }),
2150
- z10.object({
2151
- type: z10.literal("function_call"),
2152
- call_id: z10.string(),
2153
- name: z10.string(),
2154
- arguments: z10.string()
2096
+ z11.object({
2097
+ type: z11.literal("function_call"),
2098
+ call_id: z11.string(),
2099
+ name: z11.string(),
2100
+ arguments: z11.string()
2155
2101
  }),
2156
- z10.object({
2157
- type: z10.literal("web_search_call")
2102
+ z11.object({
2103
+ type: z11.literal("web_search_call")
2158
2104
  }),
2159
- z10.object({
2160
- type: z10.literal("computer_call")
2105
+ z11.object({
2106
+ type: z11.literal("computer_call")
2161
2107
  }),
2162
- z10.object({
2163
- type: z10.literal("reasoning")
2108
+ z11.object({
2109
+ type: z11.literal("reasoning"),
2110
+ summary: z11.array(
2111
+ z11.object({
2112
+ type: z11.literal("summary_text"),
2113
+ text: z11.string()
2114
+ })
2115
+ )
2164
2116
  })
2165
2117
  ])
2166
2118
  ),
2167
- incomplete_details: z10.object({ reason: z10.string() }).nullable(),
2119
+ incomplete_details: z11.object({ reason: z11.string() }).nullable(),
2168
2120
  usage: usageSchema
2169
2121
  })
2170
2122
  ),
@@ -2174,6 +2126,13 @@ var OpenAIResponsesLanguageModel = class {
2174
2126
  const content = [];
2175
2127
  for (const part of response.output) {
2176
2128
  switch (part.type) {
2129
+ case "reasoning": {
2130
+ content.push({
2131
+ type: "reasoning",
2132
+ text: part.summary.map((summary) => summary.text).join()
2133
+ });
2134
+ break;
2135
+ }
2177
2136
  case "message": {
2178
2137
  for (const contentPart of part.content) {
2179
2138
  content.push({
@@ -2233,7 +2192,7 @@ var OpenAIResponsesLanguageModel = class {
2233
2192
  };
2234
2193
  }
2235
2194
  async doStream(options) {
2236
- const { args: body, warnings } = this.getArgs(options);
2195
+ const { args: body, warnings } = await this.getArgs(options);
2237
2196
  const { responseHeaders, value: response } = await postJsonToApi6({
2238
2197
  url: this.config.url({
2239
2198
  path: "/responses",
@@ -2314,6 +2273,11 @@ var OpenAIResponsesLanguageModel = class {
2314
2273
  type: "text",
2315
2274
  text: value.delta
2316
2275
  });
2276
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2277
+ controller.enqueue({
2278
+ type: "reasoning",
2279
+ text: value.delta
2280
+ });
2317
2281
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2318
2282
  ongoingToolCalls[value.output_index] = void 0;
2319
2283
  hasToolCalls = true;
@@ -2366,79 +2330,86 @@ var OpenAIResponsesLanguageModel = class {
2366
2330
  };
2367
2331
  }
2368
2332
  };
2369
- var usageSchema = z10.object({
2370
- input_tokens: z10.number(),
2371
- input_tokens_details: z10.object({ cached_tokens: z10.number().nullish() }).nullish(),
2372
- output_tokens: z10.number(),
2373
- output_tokens_details: z10.object({ reasoning_tokens: z10.number().nullish() }).nullish()
2333
+ var usageSchema = z11.object({
2334
+ input_tokens: z11.number(),
2335
+ input_tokens_details: z11.object({ cached_tokens: z11.number().nullish() }).nullish(),
2336
+ output_tokens: z11.number(),
2337
+ output_tokens_details: z11.object({ reasoning_tokens: z11.number().nullish() }).nullish()
2374
2338
  });
2375
- var textDeltaChunkSchema = z10.object({
2376
- type: z10.literal("response.output_text.delta"),
2377
- delta: z10.string()
2339
+ var textDeltaChunkSchema = z11.object({
2340
+ type: z11.literal("response.output_text.delta"),
2341
+ delta: z11.string()
2378
2342
  });
2379
- var responseFinishedChunkSchema = z10.object({
2380
- type: z10.enum(["response.completed", "response.incomplete"]),
2381
- response: z10.object({
2382
- incomplete_details: z10.object({ reason: z10.string() }).nullish(),
2343
+ var responseFinishedChunkSchema = z11.object({
2344
+ type: z11.enum(["response.completed", "response.incomplete"]),
2345
+ response: z11.object({
2346
+ incomplete_details: z11.object({ reason: z11.string() }).nullish(),
2383
2347
  usage: usageSchema
2384
2348
  })
2385
2349
  });
2386
- var responseCreatedChunkSchema = z10.object({
2387
- type: z10.literal("response.created"),
2388
- response: z10.object({
2389
- id: z10.string(),
2390
- created_at: z10.number(),
2391
- model: z10.string()
2350
+ var responseCreatedChunkSchema = z11.object({
2351
+ type: z11.literal("response.created"),
2352
+ response: z11.object({
2353
+ id: z11.string(),
2354
+ created_at: z11.number(),
2355
+ model: z11.string()
2392
2356
  })
2393
2357
  });
2394
- var responseOutputItemDoneSchema = z10.object({
2395
- type: z10.literal("response.output_item.done"),
2396
- output_index: z10.number(),
2397
- item: z10.discriminatedUnion("type", [
2398
- z10.object({
2399
- type: z10.literal("message")
2358
+ var responseOutputItemDoneSchema = z11.object({
2359
+ type: z11.literal("response.output_item.done"),
2360
+ output_index: z11.number(),
2361
+ item: z11.discriminatedUnion("type", [
2362
+ z11.object({
2363
+ type: z11.literal("message")
2400
2364
  }),
2401
- z10.object({
2402
- type: z10.literal("function_call"),
2403
- id: z10.string(),
2404
- call_id: z10.string(),
2405
- name: z10.string(),
2406
- arguments: z10.string(),
2407
- status: z10.literal("completed")
2365
+ z11.object({
2366
+ type: z11.literal("function_call"),
2367
+ id: z11.string(),
2368
+ call_id: z11.string(),
2369
+ name: z11.string(),
2370
+ arguments: z11.string(),
2371
+ status: z11.literal("completed")
2408
2372
  })
2409
2373
  ])
2410
2374
  });
2411
- var responseFunctionCallArgumentsDeltaSchema = z10.object({
2412
- type: z10.literal("response.function_call_arguments.delta"),
2413
- item_id: z10.string(),
2414
- output_index: z10.number(),
2415
- delta: z10.string()
2375
+ var responseFunctionCallArgumentsDeltaSchema = z11.object({
2376
+ type: z11.literal("response.function_call_arguments.delta"),
2377
+ item_id: z11.string(),
2378
+ output_index: z11.number(),
2379
+ delta: z11.string()
2416
2380
  });
2417
- var responseOutputItemAddedSchema = z10.object({
2418
- type: z10.literal("response.output_item.added"),
2419
- output_index: z10.number(),
2420
- item: z10.discriminatedUnion("type", [
2421
- z10.object({
2422
- type: z10.literal("message")
2381
+ var responseOutputItemAddedSchema = z11.object({
2382
+ type: z11.literal("response.output_item.added"),
2383
+ output_index: z11.number(),
2384
+ item: z11.discriminatedUnion("type", [
2385
+ z11.object({
2386
+ type: z11.literal("message")
2423
2387
  }),
2424
- z10.object({
2425
- type: z10.literal("function_call"),
2426
- id: z10.string(),
2427
- call_id: z10.string(),
2428
- name: z10.string(),
2429
- arguments: z10.string()
2388
+ z11.object({
2389
+ type: z11.literal("function_call"),
2390
+ id: z11.string(),
2391
+ call_id: z11.string(),
2392
+ name: z11.string(),
2393
+ arguments: z11.string()
2430
2394
  })
2431
2395
  ])
2432
2396
  });
2433
- var responseAnnotationAddedSchema = z10.object({
2434
- type: z10.literal("response.output_text.annotation.added"),
2435
- annotation: z10.object({
2436
- type: z10.literal("url_citation"),
2437
- url: z10.string(),
2438
- title: z10.string()
2397
+ var responseAnnotationAddedSchema = z11.object({
2398
+ type: z11.literal("response.output_text.annotation.added"),
2399
+ annotation: z11.object({
2400
+ type: z11.literal("url_citation"),
2401
+ url: z11.string(),
2402
+ title: z11.string()
2439
2403
  })
2440
2404
  });
2441
- var openaiResponsesChunkSchema = z10.union([
2405
+ var responseReasoningSummaryTextDeltaSchema = z11.object({
2406
+ type: z11.literal("response.reasoning_summary_text.delta"),
2407
+ item_id: z11.string(),
2408
+ output_index: z11.number(),
2409
+ summary_index: z11.number(),
2410
+ delta: z11.string()
2411
+ });
2412
+ var openaiResponsesChunkSchema = z11.union([
2442
2413
  textDeltaChunkSchema,
2443
2414
  responseFinishedChunkSchema,
2444
2415
  responseCreatedChunkSchema,
@@ -2446,7 +2417,8 @@ var openaiResponsesChunkSchema = z10.union([
2446
2417
  responseFunctionCallArgumentsDeltaSchema,
2447
2418
  responseOutputItemAddedSchema,
2448
2419
  responseAnnotationAddedSchema,
2449
- z10.object({ type: z10.string() }).passthrough()
2420
+ responseReasoningSummaryTextDeltaSchema,
2421
+ z11.object({ type: z11.string() }).passthrough()
2450
2422
  // fallback for unknown chunks
2451
2423
  ]);
2452
2424
  function isTextDeltaChunk(chunk) {
@@ -2470,6 +2442,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2470
2442
  function isResponseAnnotationAddedChunk(chunk) {
2471
2443
  return chunk.type === "response.output_text.annotation.added";
2472
2444
  }
2445
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2446
+ return chunk.type === "response.reasoning_summary_text.delta";
2447
+ }
2473
2448
  function getResponsesModelConfig(modelId) {
2474
2449
  if (modelId.startsWith("o")) {
2475
2450
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2491,15 +2466,16 @@ function getResponsesModelConfig(modelId) {
2491
2466
  requiredAutoTruncation: false
2492
2467
  };
2493
2468
  }
2494
- var openaiResponsesProviderOptionsSchema = z10.object({
2495
- metadata: z10.any().nullish(),
2496
- parallelToolCalls: z10.boolean().nullish(),
2497
- previousResponseId: z10.string().nullish(),
2498
- store: z10.boolean().nullish(),
2499
- user: z10.string().nullish(),
2500
- reasoningEffort: z10.string().nullish(),
2501
- strictSchemas: z10.boolean().nullish(),
2502
- instructions: z10.string().nullish()
2469
+ var openaiResponsesProviderOptionsSchema = z11.object({
2470
+ metadata: z11.any().nullish(),
2471
+ parallelToolCalls: z11.boolean().nullish(),
2472
+ previousResponseId: z11.string().nullish(),
2473
+ store: z11.boolean().nullish(),
2474
+ user: z11.string().nullish(),
2475
+ reasoningEffort: z11.string().nullish(),
2476
+ strictSchemas: z11.boolean().nullish(),
2477
+ instructions: z11.string().nullish(),
2478
+ reasoningSummary: z11.string().nullish()
2503
2479
  });
2504
2480
  export {
2505
2481
  OpenAIChatLanguageModel,
@@ -2509,7 +2485,9 @@ export {
2509
2485
  OpenAIResponsesLanguageModel,
2510
2486
  OpenAISpeechModel,
2511
2487
  OpenAITranscriptionModel,
2488
+ hasDefaultResponseFormat,
2512
2489
  modelMaxImagesPerCall,
2490
+ openaiCompletionProviderOptions,
2513
2491
  openaiEmbeddingProviderOptions,
2514
2492
  openaiProviderOptions
2515
2493
  };