@ai-sdk/openai 2.0.0-canary.10 → 2.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-canary.11
4
+
5
+ ### Patch Changes
6
+
7
+ - 8493141: feat (providers/openai): add support for reasoning summaries
8
+ - Updated dependencies [e86be6f]
9
+ - @ai-sdk/provider@2.0.0-canary.9
10
+ - @ai-sdk/provider-utils@3.0.0-canary.10
11
+
3
12
  ## 2.0.0-canary.10
4
13
 
5
14
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -34,18 +34,6 @@ interface OpenAICompletionSettings {
34
34
  */
35
35
  logitBias?: Record<number, number>;
36
36
  /**
37
- Return the log probabilities of the tokens. Including logprobs will increase
38
- the response size and can slow down response times. However, it can
39
- be useful to better understand how the model is behaving.
40
-
41
- Setting to true will return the log probabilities of the tokens that
42
- were generated.
43
-
44
- Setting to a number will return the log probabilities of the top n
45
- tokens that were generated.
46
- */
47
- logprobs?: boolean | number;
48
- /**
49
37
  The suffix that comes after a completion of inserted text.
50
38
  */
51
39
  suffix?: string;
@@ -236,6 +224,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
236
224
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
237
225
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
238
226
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
227
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
239
228
  }, "strip", z.ZodTypeAny, {
240
229
  user?: string | null | undefined;
241
230
  parallelToolCalls?: boolean | null | undefined;
@@ -245,6 +234,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
245
234
  previousResponseId?: string | null | undefined;
246
235
  strictSchemas?: boolean | null | undefined;
247
236
  instructions?: string | null | undefined;
237
+ reasoningSummary?: string | null | undefined;
248
238
  }, {
249
239
  user?: string | null | undefined;
250
240
  parallelToolCalls?: boolean | null | undefined;
@@ -254,6 +244,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
254
244
  previousResponseId?: string | null | undefined;
255
245
  strictSchemas?: boolean | null | undefined;
256
246
  instructions?: string | null | undefined;
247
+ reasoningSummary?: string | null | undefined;
257
248
  }>;
258
249
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
259
250
 
package/dist/index.d.ts CHANGED
@@ -34,18 +34,6 @@ interface OpenAICompletionSettings {
34
34
  */
35
35
  logitBias?: Record<number, number>;
36
36
  /**
37
- Return the log probabilities of the tokens. Including logprobs will increase
38
- the response size and can slow down response times. However, it can
39
- be useful to better understand how the model is behaving.
40
-
41
- Setting to true will return the log probabilities of the tokens that
42
- were generated.
43
-
44
- Setting to a number will return the log probabilities of the top n
45
- tokens that were generated.
46
- */
47
- logprobs?: boolean | number;
48
- /**
49
37
  The suffix that comes after a completion of inserted text.
50
38
  */
51
39
  suffix?: string;
@@ -236,6 +224,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
236
224
  reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
237
225
  strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
238
226
  instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
227
+ reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
239
228
  }, "strip", z.ZodTypeAny, {
240
229
  user?: string | null | undefined;
241
230
  parallelToolCalls?: boolean | null | undefined;
@@ -245,6 +234,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
245
234
  previousResponseId?: string | null | undefined;
246
235
  strictSchemas?: boolean | null | undefined;
247
236
  instructions?: string | null | undefined;
237
+ reasoningSummary?: string | null | undefined;
248
238
  }, {
249
239
  user?: string | null | undefined;
250
240
  parallelToolCalls?: boolean | null | undefined;
@@ -254,6 +244,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
254
244
  previousResponseId?: string | null | undefined;
255
245
  strictSchemas?: boolean | null | undefined;
256
246
  instructions?: string | null | undefined;
247
+ reasoningSummary?: string | null | undefined;
257
248
  }>;
258
249
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
259
250
 
package/dist/index.js CHANGED
@@ -211,19 +211,6 @@ function getResponseMetadata({
211
211
  };
212
212
  }
213
213
 
214
- // src/map-openai-chat-logprobs.ts
215
- function mapOpenAIChatLogProbsOutput(logprobs) {
216
- var _a, _b;
217
- return (_b = (_a = logprobs == null ? void 0 : logprobs.content) == null ? void 0 : _a.map(({ token, logprob, top_logprobs }) => ({
218
- token,
219
- logprob,
220
- topLogprobs: top_logprobs ? top_logprobs.map(({ token: token2, logprob: logprob2 }) => ({
221
- token: token2,
222
- logprob: logprob2
223
- })) : []
224
- }))) != null ? _b : void 0;
225
- }
226
-
227
214
  // src/map-openai-finish-reason.ts
228
215
  function mapOpenAIFinishReason(finishReason) {
229
216
  switch (finishReason) {
@@ -251,16 +238,6 @@ var openaiProviderOptions = import_zod.z.object({
251
238
  * the GPT tokenizer) to an associated bias value from -100 to 100.
252
239
  */
253
240
  logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
254
- /**
255
- * Return the log probabilities of the tokens.
256
- *
257
- * Setting to true will return the log probabilities of the tokens that
258
- * were generated.
259
- *
260
- * Setting to a number will return the log probabilities of the top n
261
- * tokens that were generated.
262
- */
263
- logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
264
241
  /**
265
242
  * Whether to enable parallel function calling during tool use. Default to true.
266
243
  */
@@ -431,8 +408,6 @@ var OpenAIChatLanguageModel = class {
431
408
  model: this.modelId,
432
409
  // model specific settings:
433
410
  logit_bias: openaiOptions.logitBias,
434
- logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
435
- top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
436
411
  user: openaiOptions.user,
437
412
  parallel_tool_calls: openaiOptions.parallelToolCalls,
438
413
  // standardized settings:
@@ -505,20 +480,6 @@ var OpenAIChatLanguageModel = class {
505
480
  message: "logitBias is not supported for reasoning models"
506
481
  });
507
482
  }
508
- if (baseArgs.logprobs != null) {
509
- baseArgs.logprobs = void 0;
510
- warnings.push({
511
- type: "other",
512
- message: "logprobs is not supported for reasoning models"
513
- });
514
- }
515
- if (baseArgs.top_logprobs != null) {
516
- baseArgs.top_logprobs = void 0;
517
- warnings.push({
518
- type: "other",
519
- message: "topLogprobs is not supported for reasoning models"
520
- });
521
- }
522
483
  if (baseArgs.max_tokens != null) {
523
484
  if (baseArgs.max_completion_tokens == null) {
524
485
  baseArgs.max_completion_tokens = baseArgs.max_tokens;
@@ -618,7 +579,6 @@ var OpenAIChatLanguageModel = class {
618
579
  body: rawResponse
619
580
  },
620
581
  warnings,
621
- logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
622
582
  providerMetadata
623
583
  };
624
584
  }
@@ -651,7 +611,6 @@ var OpenAIChatLanguageModel = class {
651
611
  inputTokens: void 0,
652
612
  outputTokens: void 0
653
613
  };
654
- let logprobs;
655
614
  let isFirstChunk = true;
656
615
  const providerMetadata = { openai: {} };
657
616
  return {
@@ -716,13 +675,6 @@ var OpenAIChatLanguageModel = class {
716
675
  text: delta.content
717
676
  });
718
677
  }
719
- const mappedLogprobs = mapOpenAIChatLogProbsOutput(
720
- choice == null ? void 0 : choice.logprobs
721
- );
722
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
723
- if (logprobs === void 0) logprobs = [];
724
- logprobs.push(...mappedLogprobs);
725
- }
726
678
  if (delta.tool_calls != null) {
727
679
  for (const toolCallDelta of delta.tool_calls) {
728
680
  const index = toolCallDelta.index;
@@ -809,7 +761,6 @@ var OpenAIChatLanguageModel = class {
809
761
  controller.enqueue({
810
762
  type: "finish",
811
763
  finishReason,
812
- logprobs,
813
764
  usage,
814
765
  ...providerMetadata != null ? { providerMetadata } : {}
815
766
  });
@@ -854,20 +805,6 @@ var openaiChatResponseSchema = import_zod3.z.object({
854
805
  ).nullish()
855
806
  }),
856
807
  index: import_zod3.z.number(),
857
- logprobs: import_zod3.z.object({
858
- content: import_zod3.z.array(
859
- import_zod3.z.object({
860
- token: import_zod3.z.string(),
861
- logprob: import_zod3.z.number(),
862
- top_logprobs: import_zod3.z.array(
863
- import_zod3.z.object({
864
- token: import_zod3.z.string(),
865
- logprob: import_zod3.z.number()
866
- })
867
- )
868
- })
869
- ).nullable()
870
- }).nullish(),
871
808
  finish_reason: import_zod3.z.string().nullish()
872
809
  })
873
810
  ),
@@ -895,20 +832,6 @@ var openaiChatChunkSchema = import_zod3.z.union([
895
832
  })
896
833
  ).nullish()
897
834
  }).nullish(),
898
- logprobs: import_zod3.z.object({
899
- content: import_zod3.z.array(
900
- import_zod3.z.object({
901
- token: import_zod3.z.string(),
902
- logprob: import_zod3.z.number(),
903
- top_logprobs: import_zod3.z.array(
904
- import_zod3.z.object({
905
- token: import_zod3.z.string(),
906
- logprob: import_zod3.z.number()
907
- })
908
- )
909
- })
910
- ).nullable()
911
- }).nullish(),
912
835
  finish_reason: import_zod3.z.string().nullable().optional(),
913
836
  index: import_zod3.z.number()
914
837
  })
@@ -1031,20 +954,6 @@ ${user}:`]
1031
954
  };
1032
955
  }
1033
956
 
1034
- // src/map-openai-completion-logprobs.ts
1035
- function mapOpenAICompletionLogProbs(logprobs) {
1036
- return logprobs == null ? void 0 : logprobs.tokens.map((token, index) => ({
1037
- token,
1038
- logprob: logprobs.token_logprobs[index],
1039
- topLogprobs: logprobs.top_logprobs ? Object.entries(logprobs.top_logprobs[index]).map(
1040
- ([token2, logprob]) => ({
1041
- token: token2,
1042
- logprob
1043
- })
1044
- ) : []
1045
- }));
1046
- }
1047
-
1048
957
  // src/openai-completion-language-model.ts
1049
958
  var OpenAICompletionLanguageModel = class {
1050
959
  constructor(modelId, settings, config) {
@@ -1102,7 +1011,6 @@ var OpenAICompletionLanguageModel = class {
1102
1011
  // model specific settings:
1103
1012
  echo: this.settings.echo,
1104
1013
  logit_bias: this.settings.logitBias,
1105
- logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
1106
1014
  suffix: this.settings.suffix,
1107
1015
  user: this.settings.user,
1108
1016
  // standardized settings:
@@ -1148,7 +1056,6 @@ var OpenAICompletionLanguageModel = class {
1148
1056
  outputTokens: response.usage.completion_tokens
1149
1057
  },
1150
1058
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
1151
- logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
1152
1059
  request: { body: args },
1153
1060
  response: {
1154
1061
  ...getResponseMetadata(response),
@@ -1185,7 +1092,6 @@ var OpenAICompletionLanguageModel = class {
1185
1092
  inputTokens: void 0,
1186
1093
  outputTokens: void 0
1187
1094
  };
1188
- let logprobs;
1189
1095
  let isFirstChunk = true;
1190
1096
  return {
1191
1097
  stream: response.pipeThrough(
@@ -1226,19 +1132,11 @@ var OpenAICompletionLanguageModel = class {
1226
1132
  text: choice.text
1227
1133
  });
1228
1134
  }
1229
- const mappedLogprobs = mapOpenAICompletionLogProbs(
1230
- choice == null ? void 0 : choice.logprobs
1231
- );
1232
- if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1233
- if (logprobs === void 0) logprobs = [];
1234
- logprobs.push(...mappedLogprobs);
1235
- }
1236
1135
  },
1237
1136
  flush(controller) {
1238
1137
  controller.enqueue({
1239
1138
  type: "finish",
1240
1139
  finishReason,
1241
- logprobs,
1242
1140
  usage
1243
1141
  });
1244
1142
  }
@@ -1256,12 +1154,7 @@ var openaiCompletionResponseSchema = import_zod4.z.object({
1256
1154
  choices: import_zod4.z.array(
1257
1155
  import_zod4.z.object({
1258
1156
  text: import_zod4.z.string(),
1259
- finish_reason: import_zod4.z.string(),
1260
- logprobs: import_zod4.z.object({
1261
- tokens: import_zod4.z.array(import_zod4.z.string()),
1262
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1263
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1264
- }).nullish()
1157
+ finish_reason: import_zod4.z.string()
1265
1158
  })
1266
1159
  ),
1267
1160
  usage: import_zod4.z.object({
@@ -1278,12 +1171,7 @@ var openaiCompletionChunkSchema = import_zod4.z.union([
1278
1171
  import_zod4.z.object({
1279
1172
  text: import_zod4.z.string(),
1280
1173
  finish_reason: import_zod4.z.string().nullish(),
1281
- index: import_zod4.z.number(),
1282
- logprobs: import_zod4.z.object({
1283
- tokens: import_zod4.z.array(import_zod4.z.string()),
1284
- token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1285
- top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1286
- }).nullish()
1174
+ index: import_zod4.z.number()
1287
1175
  })
1288
1176
  ),
1289
1177
  usage: import_zod4.z.object({
@@ -1960,8 +1848,15 @@ var OpenAIResponsesLanguageModel = class {
1960
1848
  user: openaiOptions == null ? void 0 : openaiOptions.user,
1961
1849
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
1962
1850
  // model-specific settings:
1963
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1964
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
1851
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
1852
+ reasoning: {
1853
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
1854
+ effort: openaiOptions.reasoningEffort
1855
+ },
1856
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
1857
+ summary: openaiOptions.reasoningSummary
1858
+ }
1859
+ }
1965
1860
  },
1966
1861
  ...modelConfig.requiredAutoTruncation && {
1967
1862
  truncation: "auto"
@@ -2057,7 +1952,13 @@ var OpenAIResponsesLanguageModel = class {
2057
1952
  type: import_zod10.z.literal("computer_call")
2058
1953
  }),
2059
1954
  import_zod10.z.object({
2060
- type: import_zod10.z.literal("reasoning")
1955
+ type: import_zod10.z.literal("reasoning"),
1956
+ summary: import_zod10.z.array(
1957
+ import_zod10.z.object({
1958
+ type: import_zod10.z.literal("summary_text"),
1959
+ text: import_zod10.z.string()
1960
+ })
1961
+ )
2061
1962
  })
2062
1963
  ])
2063
1964
  ),
@@ -2071,6 +1972,14 @@ var OpenAIResponsesLanguageModel = class {
2071
1972
  const content = [];
2072
1973
  for (const part of response.output) {
2073
1974
  switch (part.type) {
1975
+ case "reasoning": {
1976
+ content.push({
1977
+ type: "reasoning",
1978
+ reasoningType: "text",
1979
+ text: part.summary.map((summary) => summary.text).join()
1980
+ });
1981
+ break;
1982
+ }
2074
1983
  case "message": {
2075
1984
  for (const contentPart of part.content) {
2076
1985
  content.push({
@@ -2211,6 +2120,12 @@ var OpenAIResponsesLanguageModel = class {
2211
2120
  type: "text",
2212
2121
  text: value.delta
2213
2122
  });
2123
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2124
+ controller.enqueue({
2125
+ type: "reasoning",
2126
+ reasoningType: "text",
2127
+ text: value.delta
2128
+ });
2214
2129
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2215
2130
  ongoingToolCalls[value.output_index] = void 0;
2216
2131
  hasToolCalls = true;
@@ -2335,6 +2250,13 @@ var responseAnnotationAddedSchema = import_zod10.z.object({
2335
2250
  title: import_zod10.z.string()
2336
2251
  })
2337
2252
  });
2253
+ var responseReasoningSummaryTextDeltaSchema = import_zod10.z.object({
2254
+ type: import_zod10.z.literal("response.reasoning_summary_text.delta"),
2255
+ item_id: import_zod10.z.string(),
2256
+ output_index: import_zod10.z.number(),
2257
+ summary_index: import_zod10.z.number(),
2258
+ delta: import_zod10.z.string()
2259
+ });
2338
2260
  var openaiResponsesChunkSchema = import_zod10.z.union([
2339
2261
  textDeltaChunkSchema,
2340
2262
  responseFinishedChunkSchema,
@@ -2343,6 +2265,7 @@ var openaiResponsesChunkSchema = import_zod10.z.union([
2343
2265
  responseFunctionCallArgumentsDeltaSchema,
2344
2266
  responseOutputItemAddedSchema,
2345
2267
  responseAnnotationAddedSchema,
2268
+ responseReasoningSummaryTextDeltaSchema,
2346
2269
  import_zod10.z.object({ type: import_zod10.z.string() }).passthrough()
2347
2270
  // fallback for unknown chunks
2348
2271
  ]);
@@ -2367,6 +2290,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2367
2290
  function isResponseAnnotationAddedChunk(chunk) {
2368
2291
  return chunk.type === "response.output_text.annotation.added";
2369
2292
  }
2293
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2294
+ return chunk.type === "response.reasoning_summary_text.delta";
2295
+ }
2370
2296
  function getResponsesModelConfig(modelId) {
2371
2297
  if (modelId.startsWith("o")) {
2372
2298
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2396,7 +2322,8 @@ var openaiResponsesProviderOptionsSchema = import_zod10.z.object({
2396
2322
  user: import_zod10.z.string().nullish(),
2397
2323
  reasoningEffort: import_zod10.z.string().nullish(),
2398
2324
  strictSchemas: import_zod10.z.boolean().nullish(),
2399
- instructions: import_zod10.z.string().nullish()
2325
+ instructions: import_zod10.z.string().nullish(),
2326
+ reasoningSummary: import_zod10.z.string().nullish()
2400
2327
  });
2401
2328
 
2402
2329
  // src/openai-speech-model.ts