@ai-sdk/openai 2.0.0-canary.14 → 2.0.0-canary.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,36 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-canary.15
4
+
5
+ ### Patch Changes
6
+
7
+ - 136819b: chore(providers/openai): re-introduce logprobs as providerMetadata
8
+ - 9bd5ab5: feat (provider): add providerMetadata to ImageModelV2 interface (#5977)
9
+
10
+ The `experimental_generateImage` method from the `ai` package now returnes revised prompts for OpenAI's image models.
11
+
12
+ ```js
13
+ const prompt = 'Santa Claus driving a Cadillac';
14
+
15
+ const { providerMetadata } = await experimental_generateImage({
16
+ model: openai.image('dall-e-3'),
17
+ prompt,
18
+ });
19
+
20
+ const revisedPrompt = providerMetadata.openai.images[0]?.revisedPrompt;
21
+
22
+ console.log({
23
+ prompt,
24
+ revisedPrompt,
25
+ });
26
+ ```
27
+
28
+ - 284353f: fix(providers/openai): zod parse error with function
29
+ - Updated dependencies [957b739]
30
+ - Updated dependencies [9bd5ab5]
31
+ - @ai-sdk/provider-utils@3.0.0-canary.14
32
+ - @ai-sdk/provider@2.0.0-canary.13
33
+
3
34
  ## 2.0.0-canary.14
4
35
 
5
36
  ### Patch Changes
package/dist/index.js CHANGED
@@ -238,6 +238,16 @@ var openaiProviderOptions = import_zod.z.object({
238
238
  * the GPT tokenizer) to an associated bias value from -100 to 100.
239
239
  */
240
240
  logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
241
+ /**
242
+ * Return the log probabilities of the tokens.
243
+ *
244
+ * Setting to true will return the log probabilities of the tokens that
245
+ * were generated.
246
+ *
247
+ * Setting to a number will return the log probabilities of the top n
248
+ * tokens that were generated.
249
+ */
250
+ logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
241
251
  /**
242
252
  * Whether to enable parallel function calling during tool use. Default to true.
243
253
  */
@@ -412,6 +422,8 @@ var OpenAIChatLanguageModel = class {
412
422
  model: this.modelId,
413
423
  // model specific settings:
414
424
  logit_bias: openaiOptions.logitBias,
425
+ logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
426
+ top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
415
427
  user: openaiOptions.user,
416
428
  parallel_tool_calls: openaiOptions.parallelToolCalls,
417
429
  // standardized settings:
@@ -484,6 +496,20 @@ var OpenAIChatLanguageModel = class {
484
496
  message: "logitBias is not supported for reasoning models"
485
497
  });
486
498
  }
499
+ if (baseArgs.logprobs != null) {
500
+ baseArgs.logprobs = void 0;
501
+ warnings.push({
502
+ type: "other",
503
+ message: "logprobs is not supported for reasoning models"
504
+ });
505
+ }
506
+ if (baseArgs.top_logprobs != null) {
507
+ baseArgs.top_logprobs = void 0;
508
+ warnings.push({
509
+ type: "other",
510
+ message: "topLogprobs is not supported for reasoning models"
511
+ });
512
+ }
487
513
  if (baseArgs.max_tokens != null) {
488
514
  if (baseArgs.max_completion_tokens == null) {
489
515
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
@@ -519,7 +545,7 @@ var OpenAIChatLanguageModel = class {
519
545
  };
520
546
  }
521
547
  async doGenerate(options) {
522
- var _a, _b, _c, _d, _e, _f, _g, _h;
548
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
523
549
  const { args: body, warnings } = await this.getArgs(options);
524
550
  const {
525
551
  responseHeaders,
@@ -569,12 +595,15 @@ var OpenAIChatLanguageModel = class {
569
595
  if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
570
596
  providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
571
597
  }
598
+ if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
599
+ providerMetadata.openai.logprobs = choice.logprobs.content;
600
+ }
572
601
  return {
573
602
  content,
574
603
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
575
604
  usage: {
576
- inputTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : void 0,
577
- outputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : void 0
605
+ inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
606
+ outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
578
607
  },
579
608
  request: { body },
580
609
  response: {
@@ -809,6 +838,20 @@ var openaiChatResponseSchema = import_zod3.z.object({
809
838
  ).nullish()
810
839
  }),
811
840
  index: import_zod3.z.number(),
841
+ logprobs: import_zod3.z.object({
842
+ content: import_zod3.z.array(
843
+ import_zod3.z.object({
844
+ token: import_zod3.z.string(),
845
+ logprob: import_zod3.z.number(),
846
+ top_logprobs: import_zod3.z.array(
847
+ import_zod3.z.object({
848
+ token: import_zod3.z.string(),
849
+ logprob: import_zod3.z.number()
850
+ })
851
+ )
852
+ })
853
+ ).nullish()
854
+ }).nullish(),
812
855
  finish_reason: import_zod3.z.string().nullish()
813
856
  })
814
857
  ),
@@ -828,7 +871,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
828
871
  import_zod3.z.object({
829
872
  index: import_zod3.z.number(),
830
873
  id: import_zod3.z.string().nullish(),
831
- type: import_zod3.z.literal("function").optional(),
874
+ type: import_zod3.z.literal("function").nullish(),
832
875
  function: import_zod3.z.object({
833
876
  name: import_zod3.z.string().nullish(),
834
877
  arguments: import_zod3.z.string().nullish()
@@ -836,7 +879,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
836
879
  })
837
880
  ).nullish()
838
881
  }).nullish(),
839
- finish_reason: import_zod3.z.string().nullable().optional(),
882
+ finish_reason: import_zod3.z.string().nullish(),
840
883
  index: import_zod3.z.number()
841
884
  })
842
885
  ),
@@ -996,7 +1039,17 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
996
1039
  A unique identifier representing your end-user, which can help OpenAI to
997
1040
  monitor and detect abuse. Learn more.
998
1041
  */
999
- user: import_zod4.z.string().optional()
1042
+ user: import_zod4.z.string().optional(),
1043
+ /**
1044
+ Return the log probabilities of the tokens. Including logprobs will increase
1045
+ the response size and can slow down response times. However, it can
1046
+ be useful to better understand how the model is behaving.
1047
+ Setting to true will return the log probabilities of the tokens that
1048
+ were generated.
1049
+ Setting to a number will return the log probabilities of the top n
1050
+ tokens that were generated.
1051
+ */
1052
+ logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
1000
1053
  });
1001
1054
 
1002
1055
  // src/openai-completion-language-model.ts
@@ -1068,6 +1121,7 @@ var OpenAICompletionLanguageModel = class {
1068
1121
  // model specific settings:
1069
1122
  echo: openaiOptions.echo,
1070
1123
  logit_bias: openaiOptions.logitBias,
1124
+ logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
1071
1125
  suffix: openaiOptions.suffix,
1072
1126
  user: openaiOptions.user,
1073
1127
  // standardized settings:
@@ -1106,6 +1160,10 @@ var OpenAICompletionLanguageModel = class {
1106
1160
  fetch: this.config.fetch
1107
1161
  });
1108
1162
  const choice = response.choices[0];
1163
+ const providerMetadata = { openai: {} };
1164
+ if (choice.logprobs != null) {
1165
+ providerMetadata.openai.logprobs = choice.logprobs;
1166
+ }
1109
1167
  return {
1110
1168
  content: [{ type: "text", text: choice.text }],
1111
1169
  usage: {
@@ -1119,6 +1177,7 @@ var OpenAICompletionLanguageModel = class {
1119
1177
  headers: responseHeaders,
1120
1178
  body: rawResponse
1121
1179
  },
1180
+ providerMetadata,
1122
1181
  warnings
1123
1182
  };
1124
1183
  }
@@ -1145,6 +1204,7 @@ var OpenAICompletionLanguageModel = class {
1145
1204
  fetch: this.config.fetch
1146
1205
  });
1147
1206
  let finishReason = "unknown";
1207
+ const providerMetadata = { openai: {} };
1148
1208
  const usage = {
1149
1209
  inputTokens: void 0,
1150
1210
  outputTokens: void 0
@@ -1183,6 +1243,9 @@ var OpenAICompletionLanguageModel = class {
1183
1243
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1184
1244
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
1185
1245
  }
1246
+ if ((choice == null ? void 0 : choice.logprobs) != null) {
1247
+ providerMetadata.openai.logprobs = choice.logprobs;
1248
+ }
1186
1249
  if ((choice == null ? void 0 : choice.text) != null) {
1187
1250
  controller.enqueue({
1188
1251
  type: "text",
@@ -1194,6 +1257,7 @@ var OpenAICompletionLanguageModel = class {
1194
1257
  controller.enqueue({
1195
1258
  type: "finish",
1196
1259
  finishReason,
1260
+ providerMetadata,
1197
1261
  usage
1198
1262
  });
1199
1263
  }
@@ -1211,7 +1275,12 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
1211
1275
  choices: import_zod5.z.array(
1212
1276
  import_zod5.z.object({
1213
1277
  text: import_zod5.z.string(),
1214
- finish_reason: import_zod5.z.string()
1278
+ finish_reason: import_zod5.z.string(),
1279
+ logprobs: import_zod5.z.object({
1280
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1281
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1282
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1283
+ }).nullish()
1215
1284
  })
1216
1285
  ),
1217
1286
  usage: import_zod5.z.object({
@@ -1228,7 +1297,12 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
1228
1297
  import_zod5.z.object({
1229
1298
  text: import_zod5.z.string(),
1230
1299
  finish_reason: import_zod5.z.string().nullish(),
1231
- index: import_zod5.z.number()
1300
+ index: import_zod5.z.number(),
1301
+ logprobs: import_zod5.z.object({
1302
+ tokens: import_zod5.z.array(import_zod5.z.string()),
1303
+ token_logprobs: import_zod5.z.array(import_zod5.z.number()),
1304
+ top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
1305
+ }).nullish()
1232
1306
  })
1233
1307
  ),
1234
1308
  usage: import_zod5.z.object({
@@ -1405,12 +1479,23 @@ var OpenAIImageModel = class {
1405
1479
  timestamp: currentDate,
1406
1480
  modelId: this.modelId,
1407
1481
  headers: responseHeaders
1482
+ },
1483
+ providerMetadata: {
1484
+ openai: {
1485
+ images: response.data.map(
1486
+ (item) => item.revised_prompt ? {
1487
+ revisedPrompt: item.revised_prompt
1488
+ } : null
1489
+ )
1490
+ }
1408
1491
  }
1409
1492
  };
1410
1493
  }
1411
1494
  };
1412
1495
  var openaiImageResponseSchema = import_zod8.z.object({
1413
- data: import_zod8.z.array(import_zod8.z.object({ b64_json: import_zod8.z.string() }))
1496
+ data: import_zod8.z.array(
1497
+ import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
1498
+ )
1414
1499
  });
1415
1500
 
1416
1501
  // src/openai-tools.ts