@ai-sdk/openai 2.0.12 → 2.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.13
4
+
5
+ ### Patch Changes
6
+
7
+ - ddc9d99: Implements `logprobs` for OpenAI `providerOptions` and `providerMetaData` in `OpenAIResponsesLanguageModel`
8
+
9
+ You can now set `providerOptions.openai.logprobs` when using `generateText()` and retrieve logprobs from the response via `result.providerMetadata?.openai`
10
+
3
11
  ## 2.0.12
4
12
 
5
13
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -161,6 +161,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
161
161
  include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
162
162
  "reasoning.encrypted_content": "reasoning.encrypted_content";
163
163
  "file_search_call.results": "file_search_call.results";
164
+ "message.output_text.logprobs": "message.output_text.logprobs";
164
165
  }>>>>;
165
166
  textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
166
167
  low: "low";
@@ -169,6 +170,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
169
170
  }>>>;
170
171
  promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
171
172
  safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
173
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
172
174
  }, z.core.$strip>;
173
175
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
174
176
 
package/dist/index.d.ts CHANGED
@@ -161,6 +161,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
161
161
  include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
162
162
  "reasoning.encrypted_content": "reasoning.encrypted_content";
163
163
  "file_search_call.results": "file_search_call.results";
164
+ "message.output_text.logprobs": "message.output_text.logprobs";
164
165
  }>>>>;
165
166
  textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
166
167
  low: "low";
@@ -169,6 +170,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
169
170
  }>>>;
170
171
  promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
171
172
  safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
173
+ logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
172
174
  }, z.core.$strip>;
173
175
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
174
176
 
package/dist/index.js CHANGED
@@ -2127,6 +2127,19 @@ function prepareResponsesTools({
2127
2127
  }
2128
2128
 
2129
2129
  // src/responses/openai-responses-language-model.ts
2130
+ var TOP_LOGPROBS_MAX = 20;
2131
+ var LOGPROBS_SCHEMA = import_v413.z.array(
2132
+ import_v413.z.object({
2133
+ token: import_v413.z.string(),
2134
+ logprob: import_v413.z.number(),
2135
+ top_logprobs: import_v413.z.array(
2136
+ import_v413.z.object({
2137
+ token: import_v413.z.string(),
2138
+ logprob: import_v413.z.number()
2139
+ })
2140
+ )
2141
+ })
2142
+ );
2130
2143
  var OpenAIResponsesLanguageModel = class {
2131
2144
  constructor(modelId, config) {
2132
2145
  this.specificationVersion = "v2";
@@ -2190,6 +2203,8 @@ var OpenAIResponsesLanguageModel = class {
2190
2203
  schema: openaiResponsesProviderOptionsSchema
2191
2204
  });
2192
2205
  const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2206
+ const topLogprobs = typeof (openaiOptions == null ? void 0 : openaiOptions.logprobs) === "number" ? openaiOptions == null ? void 0 : openaiOptions.logprobs : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? TOP_LOGPROBS_MAX : void 0;
2207
+ const openaiOptionsInclude = topLogprobs ? Array.isArray(openaiOptions == null ? void 0 : openaiOptions.include) ? [...openaiOptions == null ? void 0 : openaiOptions.include, "message.output_text.logprobs"] : ["message.output_text.logprobs"] : openaiOptions == null ? void 0 : openaiOptions.include;
2193
2208
  const baseArgs = {
2194
2209
  model: this.modelId,
2195
2210
  input: messages,
@@ -2220,9 +2235,10 @@ var OpenAIResponsesLanguageModel = class {
2220
2235
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2221
2236
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2222
2237
  service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2223
- include: openaiOptions == null ? void 0 : openaiOptions.include,
2238
+ include: openaiOptionsInclude,
2224
2239
  prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
2225
2240
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
2241
+ top_logprobs: topLogprobs,
2226
2242
  // model-specific settings:
2227
2243
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2228
2244
  reasoning: {
@@ -2306,7 +2322,7 @@ var OpenAIResponsesLanguageModel = class {
2306
2322
  };
2307
2323
  }
2308
2324
  async doGenerate(options) {
2309
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2325
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
2310
2326
  const { args: body, warnings } = await this.getArgs(options);
2311
2327
  const url = this.config.url({
2312
2328
  path: "/responses",
@@ -2340,6 +2356,7 @@ var OpenAIResponsesLanguageModel = class {
2340
2356
  import_v413.z.object({
2341
2357
  type: import_v413.z.literal("output_text"),
2342
2358
  text: import_v413.z.string(),
2359
+ logprobs: LOGPROBS_SCHEMA.nullish(),
2343
2360
  annotations: import_v413.z.array(
2344
2361
  import_v413.z.object({
2345
2362
  type: import_v413.z.literal("url_citation"),
@@ -2417,6 +2434,7 @@ var OpenAIResponsesLanguageModel = class {
2417
2434
  });
2418
2435
  }
2419
2436
  const content = [];
2437
+ const logprobs = [];
2420
2438
  for (const part of response.output) {
2421
2439
  switch (part.type) {
2422
2440
  case "reasoning": {
@@ -2439,6 +2457,9 @@ var OpenAIResponsesLanguageModel = class {
2439
2457
  }
2440
2458
  case "message": {
2441
2459
  for (const contentPart of part.content) {
2460
+ if (((_c = (_b = options.providerOptions) == null ? void 0 : _b.openai) == null ? void 0 : _c.logprobs) && contentPart.logprobs) {
2461
+ logprobs.push(contentPart.logprobs);
2462
+ }
2442
2463
  content.push({
2443
2464
  type: "text",
2444
2465
  text: contentPart.text,
@@ -2452,7 +2473,7 @@ var OpenAIResponsesLanguageModel = class {
2452
2473
  content.push({
2453
2474
  type: "source",
2454
2475
  sourceType: "url",
2455
- id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : (0, import_provider_utils12.generateId)(),
2476
+ id: (_f = (_e = (_d = this.config).generateId) == null ? void 0 : _e.call(_d)) != null ? _f : (0, import_provider_utils12.generateId)(),
2456
2477
  url: annotation.url,
2457
2478
  title: annotation.title
2458
2479
  });
@@ -2535,18 +2556,24 @@ var OpenAIResponsesLanguageModel = class {
2535
2556
  }
2536
2557
  }
2537
2558
  }
2559
+ const providerMetadata = {
2560
+ openai: { responseId: response.id }
2561
+ };
2562
+ if (logprobs.length > 0) {
2563
+ providerMetadata.openai.logprobs = logprobs;
2564
+ }
2538
2565
  return {
2539
2566
  content,
2540
2567
  finishReason: mapOpenAIResponseFinishReason({
2541
- finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2568
+ finishReason: (_g = response.incomplete_details) == null ? void 0 : _g.reason,
2542
2569
  hasToolCalls: content.some((part) => part.type === "tool-call")
2543
2570
  }),
2544
2571
  usage: {
2545
2572
  inputTokens: response.usage.input_tokens,
2546
2573
  outputTokens: response.usage.output_tokens,
2547
2574
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2548
- reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2549
- cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2575
+ reasoningTokens: (_i = (_h = response.usage.output_tokens_details) == null ? void 0 : _h.reasoning_tokens) != null ? _i : void 0,
2576
+ cachedInputTokens: (_k = (_j = response.usage.input_tokens_details) == null ? void 0 : _j.cached_tokens) != null ? _k : void 0
2550
2577
  },
2551
2578
  request: { body },
2552
2579
  response: {
@@ -2556,11 +2583,7 @@ var OpenAIResponsesLanguageModel = class {
2556
2583
  headers: responseHeaders,
2557
2584
  body: rawResponse
2558
2585
  },
2559
- providerMetadata: {
2560
- openai: {
2561
- responseId: response.id
2562
- }
2563
- },
2586
+ providerMetadata,
2564
2587
  warnings
2565
2588
  };
2566
2589
  }
@@ -2590,6 +2613,7 @@ var OpenAIResponsesLanguageModel = class {
2590
2613
  outputTokens: void 0,
2591
2614
  totalTokens: void 0
2592
2615
  };
2616
+ const logprobs = [];
2593
2617
  let responseId = null;
2594
2618
  const ongoingToolCalls = {};
2595
2619
  let hasToolCalls = false;
@@ -2815,6 +2839,9 @@ var OpenAIResponsesLanguageModel = class {
2815
2839
  id: value.item_id,
2816
2840
  delta: value.delta
2817
2841
  });
2842
+ if (value.logprobs) {
2843
+ logprobs.push(value.logprobs);
2844
+ }
2818
2845
  } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2819
2846
  if (value.summary_index > 0) {
2820
2847
  (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
@@ -2865,15 +2892,19 @@ var OpenAIResponsesLanguageModel = class {
2865
2892
  }
2866
2893
  },
2867
2894
  flush(controller) {
2895
+ const providerMetadata = {
2896
+ openai: {
2897
+ responseId
2898
+ }
2899
+ };
2900
+ if (logprobs.length > 0) {
2901
+ providerMetadata.openai.logprobs = logprobs;
2902
+ }
2868
2903
  controller.enqueue({
2869
2904
  type: "finish",
2870
2905
  finishReason,
2871
2906
  usage,
2872
- providerMetadata: {
2873
- openai: {
2874
- responseId
2875
- }
2876
- }
2907
+ providerMetadata
2877
2908
  });
2878
2909
  }
2879
2910
  })
@@ -2892,7 +2923,8 @@ var usageSchema2 = import_v413.z.object({
2892
2923
  var textDeltaChunkSchema = import_v413.z.object({
2893
2924
  type: import_v413.z.literal("response.output_text.delta"),
2894
2925
  item_id: import_v413.z.string(),
2895
- delta: import_v413.z.string()
2926
+ delta: import_v413.z.string(),
2927
+ logprobs: LOGPROBS_SCHEMA.nullish()
2896
2928
  });
2897
2929
  var errorChunkSchema = import_v413.z.object({
2898
2930
  type: import_v413.z.literal("error"),
@@ -3133,10 +3165,29 @@ var openaiResponsesProviderOptionsSchema = import_v413.z.object({
3133
3165
  instructions: import_v413.z.string().nullish(),
3134
3166
  reasoningSummary: import_v413.z.string().nullish(),
3135
3167
  serviceTier: import_v413.z.enum(["auto", "flex", "priority"]).nullish(),
3136
- include: import_v413.z.array(import_v413.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3168
+ include: import_v413.z.array(
3169
+ import_v413.z.enum([
3170
+ "reasoning.encrypted_content",
3171
+ "file_search_call.results",
3172
+ "message.output_text.logprobs"
3173
+ ])
3174
+ ).nullish(),
3137
3175
  textVerbosity: import_v413.z.enum(["low", "medium", "high"]).nullish(),
3138
3176
  promptCacheKey: import_v413.z.string().nullish(),
3139
- safetyIdentifier: import_v413.z.string().nullish()
3177
+ safetyIdentifier: import_v413.z.string().nullish(),
3178
+ /**
3179
+ * Return the log probabilities of the tokens.
3180
+ *
3181
+ * Setting to true will return the log probabilities of the tokens that
3182
+ * were generated.
3183
+ *
3184
+ * Setting to a number will return the log probabilities of the top n
3185
+ * tokens that were generated.
3186
+ *
3187
+ * @see https://platform.openai.com/docs/api-reference/responses/create
3188
+ * @see https://cookbook.openai.com/examples/using_logprobs
3189
+ */
3190
+ logprobs: import_v413.z.union([import_v413.z.boolean(), import_v413.z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional()
3140
3191
  });
3141
3192
 
3142
3193
  // src/speech/openai-speech-model.ts