@ai-sdk/openai 1.3.16 → 1.3.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1543,8 +1543,10 @@ import { z as z5 } from "zod";
1543
1543
  // src/openai-image-settings.ts
1544
1544
  var modelMaxImagesPerCall = {
1545
1545
  "dall-e-3": 1,
1546
- "dall-e-2": 10
1546
+ "dall-e-2": 10,
1547
+ "gpt-image-1": 10
1547
1548
  };
1549
+ var hasDefaultResponseFormat = /* @__PURE__ */ new Set(["gpt-image-1"]);
1548
1550
 
1549
1551
  // src/openai-image-model.ts
1550
1552
  var OpenAIImageModel = class {
@@ -1596,7 +1598,7 @@ var OpenAIImageModel = class {
1596
1598
  n,
1597
1599
  size,
1598
1600
  ...(_d = providerOptions.openai) != null ? _d : {},
1599
- response_format: "b64_json"
1601
+ ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1600
1602
  },
1601
1603
  failedResponseHandler: openaiFailedResponseHandler,
1602
1604
  successfulResponseHandler: createJsonResponseHandler4(
@@ -2127,8 +2129,15 @@ var OpenAIResponsesLanguageModel = class {
2127
2129
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2128
2130
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2129
2131
  // model-specific settings:
2130
- ...modelConfig.isReasoningModel && (openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2131
- reasoning: { effort: openaiOptions == null ? void 0 : openaiOptions.reasoningEffort }
2132
+ ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2133
+ reasoning: {
2134
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
2135
+ effort: openaiOptions.reasoningEffort
2136
+ },
2137
+ ...(openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null && {
2138
+ summary: openaiOptions.reasoningSummary
2139
+ }
2140
+ }
2132
2141
  },
2133
2142
  ...modelConfig.requiredAutoTruncation && {
2134
2143
  truncation: "auto"
@@ -2210,7 +2219,7 @@ var OpenAIResponsesLanguageModel = class {
2210
2219
  }
2211
2220
  }
2212
2221
  async doGenerate(options) {
2213
- var _a, _b, _c, _d, _e;
2222
+ var _a, _b, _c, _d, _e, _f, _g;
2214
2223
  const { args: body, warnings } = this.getArgs(options);
2215
2224
  const {
2216
2225
  responseHeaders,
@@ -2263,7 +2272,13 @@ var OpenAIResponsesLanguageModel = class {
2263
2272
  type: z7.literal("computer_call")
2264
2273
  }),
2265
2274
  z7.object({
2266
- type: z7.literal("reasoning")
2275
+ type: z7.literal("reasoning"),
2276
+ summary: z7.array(
2277
+ z7.object({
2278
+ type: z7.literal("summary_text"),
2279
+ text: z7.string()
2280
+ })
2281
+ )
2267
2282
  })
2268
2283
  ])
2269
2284
  ),
@@ -2281,6 +2296,7 @@ var OpenAIResponsesLanguageModel = class {
2281
2296
  toolName: output.name,
2282
2297
  args: output.arguments
2283
2298
  }));
2299
+ const reasoningSummary = (_b = (_a = response.output.find((item) => item.type === "reasoning")) == null ? void 0 : _a.summary) != null ? _b : null;
2284
2300
  return {
2285
2301
  text: outputTextElements.map((content) => content.text).join("\n"),
2286
2302
  sources: outputTextElements.flatMap(
@@ -2295,10 +2311,14 @@ var OpenAIResponsesLanguageModel = class {
2295
2311
  })
2296
2312
  ),
2297
2313
  finishReason: mapOpenAIResponseFinishReason({
2298
- finishReason: (_a = response.incomplete_details) == null ? void 0 : _a.reason,
2314
+ finishReason: (_c = response.incomplete_details) == null ? void 0 : _c.reason,
2299
2315
  hasToolCalls: toolCalls.length > 0
2300
2316
  }),
2301
2317
  toolCalls: toolCalls.length > 0 ? toolCalls : void 0,
2318
+ reasoning: reasoningSummary ? reasoningSummary.map((summary) => ({
2319
+ type: "text",
2320
+ text: summary.text
2321
+ })) : void 0,
2302
2322
  usage: {
2303
2323
  promptTokens: response.usage.input_tokens,
2304
2324
  completionTokens: response.usage.output_tokens
@@ -2322,8 +2342,8 @@ var OpenAIResponsesLanguageModel = class {
2322
2342
  providerMetadata: {
2323
2343
  openai: {
2324
2344
  responseId: response.id,
2325
- cachedPromptTokens: (_c = (_b = response.usage.input_tokens_details) == null ? void 0 : _b.cached_tokens) != null ? _c : null,
2326
- reasoningTokens: (_e = (_d = response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : null
2345
+ cachedPromptTokens: (_e = (_d = response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : null,
2346
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : null
2327
2347
  }
2328
2348
  },
2329
2349
  warnings
@@ -2406,6 +2426,11 @@ var OpenAIResponsesLanguageModel = class {
2406
2426
  type: "text-delta",
2407
2427
  textDelta: value.delta
2408
2428
  });
2429
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2430
+ controller.enqueue({
2431
+ type: "reasoning",
2432
+ textDelta: value.delta
2433
+ });
2409
2434
  } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2410
2435
  ongoingToolCalls[value.output_index] = void 0;
2411
2436
  hasToolCalls = true;
@@ -2537,6 +2562,13 @@ var responseAnnotationAddedSchema = z7.object({
2537
2562
  title: z7.string()
2538
2563
  })
2539
2564
  });
2565
+ var responseReasoningSummaryTextDeltaSchema = z7.object({
2566
+ type: z7.literal("response.reasoning_summary_text.delta"),
2567
+ item_id: z7.string(),
2568
+ output_index: z7.number(),
2569
+ summary_index: z7.number(),
2570
+ delta: z7.string()
2571
+ });
2540
2572
  var openaiResponsesChunkSchema = z7.union([
2541
2573
  textDeltaChunkSchema,
2542
2574
  responseFinishedChunkSchema,
@@ -2545,6 +2577,7 @@ var openaiResponsesChunkSchema = z7.union([
2545
2577
  responseFunctionCallArgumentsDeltaSchema,
2546
2578
  responseOutputItemAddedSchema,
2547
2579
  responseAnnotationAddedSchema,
2580
+ responseReasoningSummaryTextDeltaSchema,
2548
2581
  z7.object({ type: z7.string() }).passthrough()
2549
2582
  // fallback for unknown chunks
2550
2583
  ]);
@@ -2569,6 +2602,9 @@ function isResponseOutputItemAddedChunk(chunk) {
2569
2602
  function isResponseAnnotationAddedChunk(chunk) {
2570
2603
  return chunk.type === "response.output_text.annotation.added";
2571
2604
  }
2605
+ function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2606
+ return chunk.type === "response.reasoning_summary_text.delta";
2607
+ }
2572
2608
  function getResponsesModelConfig(modelId) {
2573
2609
  if (modelId.startsWith("o")) {
2574
2610
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
@@ -2598,7 +2634,8 @@ var openaiResponsesProviderOptionsSchema = z7.object({
2598
2634
  user: z7.string().nullish(),
2599
2635
  reasoningEffort: z7.string().nullish(),
2600
2636
  strictSchemas: z7.boolean().nullish(),
2601
- instructions: z7.string().nullish()
2637
+ instructions: z7.string().nullish(),
2638
+ reasoningSummary: z7.string().nullish()
2602
2639
  });
2603
2640
 
2604
2641
  // src/openai-tools.ts