@ai-sdk/openai 1.3.15 → 1.3.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1043,7 +1043,7 @@ var openaiChatChunkSchema = z2.union([
1043
1043
  openaiErrorDataSchema
1044
1044
  ]);
1045
1045
  function isReasoningModel(modelId) {
1046
- return modelId === "o1" || modelId.startsWith("o1-") || modelId === "o3" || modelId.startsWith("o3-") || modelId.startsWith("o4-");
1046
+ return modelId.startsWith("o");
1047
1047
  }
1048
1048
  function isAudioModel(modelId) {
1049
1049
  return modelId.startsWith("gpt-4o-audio-preview");
@@ -2127,8 +2127,15 @@ var OpenAIResponsesLanguageModel = class {
2127
2127
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2128
2128
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2129
2129
  // model-specific settings:
2130
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2131
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2130
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2131
+ reasoning: {
2132
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2133
+ effort: openaiOptions.reasoningEffort
2134
+ },
2135
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2136
+ summary: openaiOptions.reasoningSummary
2137
+ }
2138
+ }
2132
2139
  },
2133
2140
  ...modelConfig.requiredAutoTruncation && {
2134
2141
  truncation: "auto"
@@ -2210,7 +2217,7 @@ var OpenAIResponsesLanguageModel = class {
2210
2217
  }
2211
2218
  }
2212
2219
  async doGenerate(options) {
2213
- var _a, _b, _c, _d, _e;
2220
+ var _a, _b, _c, _d, _e, _f, _g;
2214
2221
  const { args: body, warnings } = this.getArgs(options);
2215
2222
  const {
2216
2223
  responseHeaders,
@@ -2263,7 +2270,13 @@ var OpenAIResponsesLanguageModel = class {
2263
2270
  type: z7.literal("computer_call")
2264
2271
  }),
2265
2272
  z7.object({
2266
- type: z7.literal("reasoning")
2273
+ type: z7.literal("reasoning"),
2274
+ summary: z7.array(
2275
+ z7.object({
2276
+ type: z7.literal("summary_text"),
2277
+ text: z7.string()
2278
+ })
2279
+ )
2267
2280
  })
2268
2281
  ])
2269
2282
  ),
@@ -2281,6 +2294,7 @@ var OpenAIResponsesLanguageModel = class {
2281
2294
  toolName: output.name,
2282
2295
  args: output.arguments
2283
2296
  }));
2297
+ const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2284
2298
  return {
2285
2299
  text: outputTextElements.map((content) => content.text).join("\n"),
2286
2300
  sources: outputTextElements.flatMap(
@@ -2295,10 +2309,14 @@ var OpenAIResponsesLanguageModel = class {
2295
2309
  })
2296
2310
  ),
2297
2311
  finishReason: mapOpenAIResponseFinishReason({
2298
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2312
+ finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2299
2313
  hasToolCalls: toolCalls.length > 0
2300
2314
  }),
2301
2315
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2316
+ reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2317
+ type: "text",
2318
+ text: summary.text
2319
+ })) : void 0,
2302
2320
  usage: {
2303
2321
  promptTokens: response.usage.input_tokens,
2304
2322
  completionTokens: response.usage.output_tokens
@@ -2322,8 +2340,8 @@ var OpenAIResponsesLanguageModel = class {
2322
2340
  providerMetadata: {
2323
2341
  openai: {
2324
2342
  responseId: response.id,
2325
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2326
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2343
+ cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2344
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2327
2345
  }
2328
2346
  },
2329
2347
  warnings
@@ -2406,6 +2424,11 @@ var OpenAIResponsesLanguageModel = class {
2406
2424
  type: "text-delta",
2407
2425
  textDelta: value.delta
2408
2426
  });
2427
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2428
+ controller.enqueue({
2429
+ type: "reasoning",
2430
+ textDelta: value.delta
2431
+ });
2409
2432
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2410
2433
  ongoingToolCalls[value.output_index] = void 0;
2411
2434
  hasToolCalls = true;
@@ -2537,6 +2560,13 @@ var responseAnnotationAddedSchema = z7.object({
2537
2560
  title: z7.string()
2538
2561
  })
2539
2562
  });
2563
+ var responseReasoningSummaryTextDeltaSchema = z7.object({
2564
+ type: z7.literal("response.reasoning_summary_text.delta"),
2565
+ item_id: z7.string(),
2566
+ output_index: z7.number(),
2567
+ summary_index: z7.number(),
2568
+ delta: z7.string()
2569
+ });
2540
2570
  var openaiResponsesChunkSchema = z7.union([
2541
2571
  textDeltaChunkSchema,
2542
2572
  responseFinishedChunkSchema,
@@ -2545,6 +2575,7 @@ var openaiResponsesChunkSchema = z7.union([
2545
2575
  responseFunctionCallArgumentsDeltaSchema,
2546
2576
  responseOutputItemAddedSchema,
2547
2577
  responseAnnotationAddedSchema,
2578
+ responseReasoningSummaryTextDeltaSchema,
2548
2579
  z7.object({ type: z7.string() }).passthrough()
2549
2580
  // fallback for unknown chunks
2550
2581
  ]);
@@ -2569,6 +2600,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2569
2600
  function isResponseAnnotationAddedChunk(chunk) {
2570
2601
  return chunk.type === "response.output_text.annotation.added";
2571
2602
  }
2603
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2604
+ return chunk.type === "response.reasoning_summary_text.delta";
2605
+ }
2572
2606
  function getResponsesModelConfig(modelId) {
2573
2607
  if (modelId.startsWith("o")) {
2574
2608
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2598,7 +2632,8 @@ var openaiResponsesProviderOptionsSchema = z7.object({
2598
2632
  user: z7.string().nullish(),
2599
2633
  reasoningEffort: z7.string().nullish(),
2600
2634
  strictSchemas: z7.boolean().nullish(),
2601
- instructions: z7.string().nullish()
2635
+ instructions: z7.string().nullish(),
2636
+ reasoningSummary: z7.string().nullish()
2602
2637
  });
2603
2638
 
2604
2639
  // src/openai-tools.ts