@ai-sdk/openai 2.0.12 → 2.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -233,6 +233,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
233
233
  include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
234
234
  "reasoning.encrypted_content": "reasoning.encrypted_content";
235
235
  "file_search_call.results": "file_search_call.results";
236
+ "message.output_text.logprobs": "message.output_text.logprobs";
236
237
  }>>>>;
237
238
  textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
238
239
  low: "low";
@@ -241,6 +242,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
241
242
  }>>>;
242
243
  promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
243
244
  safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
245
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
244
246
  }, z.core.$strip>;
245
247
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
246
248
 
@@ -233,6 +233,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
233
233
  include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
234
234
  "reasoning.encrypted_content": "reasoning.encrypted_content";
235
235
  "file_search_call.results": "file_search_call.results";
236
+ "message.output_text.logprobs": "message.output_text.logprobs";
236
237
  }>>>>;
237
238
  textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
238
239
  low: "low";
@@ -241,6 +242,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
241
242
  }>>>;
242
243
  promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
243
244
  safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
245
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
244
246
  }, z.core.$strip>;
245
247
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
246
248
 
@@ -2446,6 +2446,19 @@ function prepareResponsesTools({
2446
2446
  }
2447
2447
 
2448
2448
  // src/responses/openai-responses-language-model.ts
2449
+ var TOP_LOGPROBS_MAX = 20;
2450
+ var LOGPROBS_SCHEMA = import_v416.z.array(
2451
+ import_v416.z.object({
2452
+ token: import_v416.z.string(),
2453
+ logprob: import_v416.z.number(),
2454
+ top_logprobs: import_v416.z.array(
2455
+ import_v416.z.object({
2456
+ token: import_v416.z.string(),
2457
+ logprob: import_v416.z.number()
2458
+ })
2459
+ )
2460
+ })
2461
+ );
2449
2462
  var OpenAIResponsesLanguageModel = class {
2450
2463
  constructor(modelId, config) {
2451
2464
  this.specificationVersion = "v2";
@@ -2509,6 +2522,8 @@ var OpenAIResponsesLanguageModel = class {
2509
2522
  schema: openaiResponsesProviderOptionsSchema
2510
2523
  });
2511
2524
  const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2525
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2526
+ const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
2512
2527
  const baseArgs = {
2513
2528
  model: this.modelId,
2514
2529
  input: messages,
@@ -2539,9 +2554,10 @@ var OpenAIResponsesLanguageModel = class {
2539
2554
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2540
2555
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2541
2556
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2542
- include: openaiOptions == null ? void 0 : openaiOptions.include,
2557
+ include: openaiOptionsInclude,
2543
2558
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
2544
2559
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
2560
+ top_logprobs: topLogprobs,
2545
2561
  // model-specific settings:
2546
2562
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2547
2563
  reasoning: {
@@ -2625,7 +2641,7 @@ var OpenAIResponsesLanguageModel = class {
2625
2641
  };
2626
2642
  }
2627
2643
  async doGenerate(options) {
2628
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2644
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
2629
2645
  const { args: body, warnings } = await this.getArgs(options);
2630
2646
  const url = this.config.url({
2631
2647
  path: "/responses",
@@ -2659,14 +2675,24 @@ var OpenAIResponsesLanguageModel = class {
2659
2675
  import_v416.z.object({
2660
2676
  type: import_v416.z.literal("output_text"),
2661
2677
  text: import_v416.z.string(),
2678
+ logprobs: LOGPROBS_SCHEMA.nullish(),
2662
2679
  annotations: import_v416.z.array(
2663
- import_v416.z.object({
2664
- type: import_v416.z.literal("url_citation"),
2665
- start_index: import_v416.z.number(),
2666
- end_index: import_v416.z.number(),
2667
- url: import_v416.z.string(),
2668
- title: import_v416.z.string()
2669
- })
2680
+ import_v416.z.discriminatedUnion("type", [
2681
+ import_v416.z.object({
2682
+ type: import_v416.z.literal("url_citation"),
2683
+ start_index: import_v416.z.number(),
2684
+ end_index: import_v416.z.number(),
2685
+ url: import_v416.z.string(),
2686
+ title: import_v416.z.string()
2687
+ }),
2688
+ import_v416.z.object({
2689
+ type: import_v416.z.literal("file_citation"),
2690
+ start_index: import_v416.z.number(),
2691
+ end_index: import_v416.z.number(),
2692
+ file_id: import_v416.z.string(),
2693
+ quote: import_v416.z.string()
2694
+ })
2695
+ ])
2670
2696
  )
2671
2697
  })
2672
2698
  )
@@ -2736,6 +2762,7 @@ var OpenAIResponsesLanguageModel = class {
2736
2762
  });
2737
2763
  }
2738
2764
  const content = [];
2765
+ const logprobs = [];
2739
2766
  for (const part of response.output) {
2740
2767
  switch (part.type) {
2741
2768
  case "reasoning": {
@@ -2758,6 +2785,9 @@ var OpenAIResponsesLanguageModel = class {
2758
2785
  }
2759
2786
  case "message": {
2760
2787
  for (const contentPart of part.content) {
2788
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
2789
+ logprobs.push(contentPart.logprobs);
2790
+ }
2761
2791
  content.push({
2762
2792
  type: "text",
2763
2793
  text: contentPart.text,
@@ -2768,13 +2798,24 @@ var OpenAIResponsesLanguageModel = class {
2768
2798
  }
2769
2799
  });
2770
2800
  for (const annotation of contentPart.annotations) {
2771
- content.push({
2772
- type: "source",
2773
- sourceType: "url",
2774
- id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils14.generateId)(),
2775
- url: annotation.url,
2776
- title: annotation.title
2777
- });
2801
+ if (annotation.type === "url_citation") {
2802
+ content.push({
2803
+ type: "source",
2804
+ sourceType: "url",
2805
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils14.generateId)(),
2806
+ url: annotation.url,
2807
+ title: annotation.title
2808
+ });
2809
+ } else if (annotation.type === "file_citation") {
2810
+ content.push({
2811
+ type: "source",
2812
+ sourceType: "document",
2813
+ id: (_i = (_h = (_g = this.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils14.generateId)(),
2814
+ mediaType: "text/plain",
2815
+ title: annotation.quote,
2816
+ filename: annotation.file_id
2817
+ });
2818
+ }
2778
2819
  }
2779
2820
  }
2780
2821
  break;
@@ -2854,18 +2895,24 @@ var OpenAIResponsesLanguageModel = class {
2854
2895
  }
2855
2896
  }
2856
2897
  }
2898
+ const providerMetadata = {
2899
+ openai: { responseId: response.id }
2900
+ };
2901
+ if (logprobs.length > 0) {
2902
+ providerMetadata.openai.logprobs = logprobs;
2903
+ }
2857
2904
  return {
2858
2905
  content,
2859
2906
  finishReason: mapOpenAIResponseFinishReason({
2860
- finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2907
+ finishReason: (_j = response.incomplete_details) == null ? void 0 : _j.reason,
2861
2908
  hasToolCalls: content.some((part) => part.type === "tool-call")
2862
2909
  }),
2863
2910
  usage: {
2864
2911
  inputTokens: response.usage.input_tokens,
2865
2912
  outputTokens: response.usage.output_tokens,
2866
2913
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2867
- reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2868
- cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2914
+ reasoningTokens: (_l = (_k = response.usage.output_tokens_details) == null ? void 0 : _k.reasoning_tokens) != null ? _l : void 0,
2915
+ cachedInputTokens: (_n = (_m = response.usage.input_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? _n : void 0
2869
2916
  },
2870
2917
  request: { body },
2871
2918
  response: {
@@ -2875,11 +2922,7 @@ var OpenAIResponsesLanguageModel = class {
2875
2922
  headers: responseHeaders,
2876
2923
  body: rawResponse
2877
2924
  },
2878
- providerMetadata: {
2879
- openai: {
2880
- responseId: response.id
2881
- }
2882
- },
2925
+ providerMetadata,
2883
2926
  warnings
2884
2927
  };
2885
2928
  }
@@ -2909,6 +2952,7 @@ var OpenAIResponsesLanguageModel = class {
2909
2952
  outputTokens: void 0,
2910
2953
  totalTokens: void 0
2911
2954
  };
2955
+ const logprobs = [];
2912
2956
  let responseId = null;
2913
2957
  const ongoingToolCalls = {};
2914
2958
  let hasToolCalls = false;
@@ -2920,7 +2964,7 @@ var OpenAIResponsesLanguageModel = class {
2920
2964
  controller.enqueue({ type: "stream-start", warnings });
2921
2965
  },
2922
2966
  transform(chunk, controller) {
2923
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2967
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
2924
2968
  if (options.includeRawChunks) {
2925
2969
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2926
2970
  }
@@ -3134,6 +3178,9 @@ var OpenAIResponsesLanguageModel = class {
3134
3178
  id: value.item_id,
3135
3179
  delta: value.delta
3136
3180
  });
3181
+ if (value.logprobs) {
3182
+ logprobs.push(value.logprobs);
3183
+ }
3137
3184
  } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
3138
3185
  if (value.summary_index > 0) {
3139
3186
  (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
@@ -3172,27 +3219,42 @@ var OpenAIResponsesLanguageModel = class {
3172
3219
  usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
3173
3220
  usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
3174
3221
  } else if (isResponseAnnotationAddedChunk(value)) {
3175
- controller.enqueue({
3176
- type: "source",
3177
- sourceType: "url",
3178
- id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils14.generateId)(),
3179
- url: value.annotation.url,
3180
- title: value.annotation.title
3181
- });
3222
+ if (value.annotation.type === "url_citation") {
3223
+ controller.enqueue({
3224
+ type: "source",
3225
+ sourceType: "url",
3226
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils14.generateId)(),
3227
+ url: value.annotation.url,
3228
+ title: value.annotation.title
3229
+ });
3230
+ } else if (value.annotation.type === "file_citation") {
3231
+ controller.enqueue({
3232
+ type: "source",
3233
+ sourceType: "document",
3234
+ id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils14.generateId)(),
3235
+ mediaType: "text/plain",
3236
+ title: value.annotation.quote,
3237
+ filename: value.annotation.file_id
3238
+ });
3239
+ }
3182
3240
  } else if (isErrorChunk(value)) {
3183
3241
  controller.enqueue({ type: "error", error: value });
3184
3242
  }
3185
3243
  },
3186
3244
  flush(controller) {
3245
+ const providerMetadata = {
3246
+ openai: {
3247
+ responseId
3248
+ }
3249
+ };
3250
+ if (logprobs.length > 0) {
3251
+ providerMetadata.openai.logprobs = logprobs;
3252
+ }
3187
3253
  controller.enqueue({
3188
3254
  type: "finish",
3189
3255
  finishReason,
3190
3256
  usage,
3191
- providerMetadata: {
3192
- openai: {
3193
- responseId
3194
- }
3195
- }
3257
+ providerMetadata
3196
3258
  });
3197
3259
  }
3198
3260
  })
@@ -3211,7 +3273,8 @@ var usageSchema2 = import_v416.z.object({
3211
3273
  var textDeltaChunkSchema = import_v416.z.object({
3212
3274
  type: import_v416.z.literal("response.output_text.delta"),
3213
3275
  item_id: import_v416.z.string(),
3214
- delta: import_v416.z.string()
3276
+ delta: import_v416.z.string(),
3277
+ logprobs: LOGPROBS_SCHEMA.nullish()
3215
3278
  });
3216
3279
  var errorChunkSchema = import_v416.z.object({
3217
3280
  type: import_v416.z.literal("error"),
@@ -3340,11 +3403,18 @@ var responseFunctionCallArgumentsDeltaSchema = import_v416.z.object({
3340
3403
  });
3341
3404
  var responseAnnotationAddedSchema = import_v416.z.object({
3342
3405
  type: import_v416.z.literal("response.output_text.annotation.added"),
3343
- annotation: import_v416.z.object({
3344
- type: import_v416.z.literal("url_citation"),
3345
- url: import_v416.z.string(),
3346
- title: import_v416.z.string()
3347
- })
3406
+ annotation: import_v416.z.discriminatedUnion("type", [
3407
+ import_v416.z.object({
3408
+ type: import_v416.z.literal("url_citation"),
3409
+ url: import_v416.z.string(),
3410
+ title: import_v416.z.string()
3411
+ }),
3412
+ import_v416.z.object({
3413
+ type: import_v416.z.literal("file_citation"),
3414
+ file_id: import_v416.z.string(),
3415
+ quote: import_v416.z.string()
3416
+ })
3417
+ ])
3348
3418
  });
3349
3419
  var responseReasoningSummaryPartAddedSchema = import_v416.z.object({
3350
3420
  type: import_v416.z.literal("response.reasoning_summary_part.added"),
@@ -3452,10 +3522,29 @@ var openaiResponsesProviderOptionsSchema = import_v416.z.object({
3452
3522
  instructions: import_v416.z.string().nullish(),
3453
3523
  reasoningSummary: import_v416.z.string().nullish(),
3454
3524
  serviceTier: import_v416.z.enum(["auto", "flex", "priority"]).nullish(),
3455
- include: import_v416.z.array(import_v416.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3525
+ include: import_v416.z.array(
3526
+ import_v416.z.enum([
3527
+ "reasoning.encrypted_content",
3528
+ "file_search_call.results",
3529
+ "message.output_text.logprobs"
3530
+ ])
3531
+ ).nullish(),
3456
3532
  textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
3457
3533
  promptCacheKey: import_v416.z.string().nullish(),
3458
- safetyIdentifier: import_v416.z.string().nullish()
3534
+ safetyIdentifier: import_v416.z.string().nullish(),
3535
+ /**
3536
+ * Return the log probabilities of the tokens.
3537
+ *
3538
+ * Setting to true will return the log probabilities of the tokens that
3539
+ * were generated.
3540
+ *
3541
+ * Setting to a number will return the log probabilities of the top n
3542
+ * tokens that were generated.
3543
+ *
3544
+ * @see https://platform.openai.com/docs/api-reference/responses/create
3545
+ * @see https://cookbook.openai.com/examples/using_logprobs
3546
+ */
3547
+ logprobs: import_v416.z.union([import_v416.z.boolean(), import_v416.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3459
3548
  });
3460
3549
  // Annotate the CommonJS export names for ESM import in node:
3461
3550
  0 && (module.exports = {