@ai-sdk/openai 2.0.28 → 2.0.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2157,35 +2157,34 @@ import {
2157
2157
  } from "@ai-sdk/provider-utils";
2158
2158
  import { z as z17 } from "zod/v4";
2159
2159
 
2160
- // src/responses/convert-to-openai-responses-messages.ts
2160
+ // src/responses/convert-to-openai-responses-input.ts
2161
2161
  import {
2162
2162
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
2163
2163
  } from "@ai-sdk/provider";
2164
- import { parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
2164
+ import { convertToBase64 as convertToBase642, parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
2165
2165
  import { z as z14 } from "zod/v4";
2166
- import { convertToBase64 as convertToBase642 } from "@ai-sdk/provider-utils";
2167
2166
  function isFileId(data, prefixes) {
2168
2167
  if (!prefixes) return false;
2169
2168
  return prefixes.some((prefix) => data.startsWith(prefix));
2170
2169
  }
2171
- async function convertToOpenAIResponsesMessages({
2170
+ async function convertToOpenAIResponsesInput({
2172
2171
  prompt,
2173
2172
  systemMessageMode,
2174
2173
  fileIdPrefixes
2175
2174
  }) {
2176
2175
  var _a, _b, _c, _d, _e, _f;
2177
- const messages = [];
2176
+ const input = [];
2178
2177
  const warnings = [];
2179
2178
  for (const { role, content } of prompt) {
2180
2179
  switch (role) {
2181
2180
  case "system": {
2182
2181
  switch (systemMessageMode) {
2183
2182
  case "system": {
2184
- messages.push({ role: "system", content });
2183
+ input.push({ role: "system", content });
2185
2184
  break;
2186
2185
  }
2187
2186
  case "developer": {
2188
- messages.push({ role: "developer", content });
2187
+ input.push({ role: "developer", content });
2189
2188
  break;
2190
2189
  }
2191
2190
  case "remove": {
@@ -2205,7 +2204,7 @@ async function convertToOpenAIResponsesMessages({
2205
2204
  break;
2206
2205
  }
2207
2206
  case "user": {
2208
- messages.push({
2207
+ input.push({
2209
2208
  role: "user",
2210
2209
  content: content.map((part, index) => {
2211
2210
  var _a2, _b2, _c2;
@@ -2250,10 +2249,11 @@ async function convertToOpenAIResponsesMessages({
2250
2249
  }
2251
2250
  case "assistant": {
2252
2251
  const reasoningMessages = {};
2252
+ const toolCallParts = {};
2253
2253
  for (const part of content) {
2254
2254
  switch (part.type) {
2255
2255
  case "text": {
2256
- messages.push({
2256
+ input.push({
2257
2257
  role: "assistant",
2258
2258
  content: [{ type: "output_text", text: part.text }],
2259
2259
  id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
@@ -2261,10 +2261,11 @@ async function convertToOpenAIResponsesMessages({
2261
2261
  break;
2262
2262
  }
2263
2263
  case "tool-call": {
2264
+ toolCallParts[part.toolCallId] = part;
2264
2265
  if (part.providerExecuted) {
2265
2266
  break;
2266
2267
  }
2267
- messages.push({
2268
+ input.push({
2268
2269
  type: "function_call",
2269
2270
  call_id: part.toolCallId,
2270
2271
  name: part.toolName,
@@ -2305,7 +2306,7 @@ async function convertToOpenAIResponsesMessages({
2305
2306
  encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2306
2307
  summary: summaryParts
2307
2308
  };
2308
- messages.push(reasoningMessages[reasoningId]);
2309
+ input.push(reasoningMessages[reasoningId]);
2309
2310
  } else {
2310
2311
  existingReasoningMessage.summary.push(...summaryParts);
2311
2312
  }
@@ -2336,7 +2337,7 @@ async function convertToOpenAIResponsesMessages({
2336
2337
  contentValue = JSON.stringify(output.value);
2337
2338
  break;
2338
2339
  }
2339
- messages.push({
2340
+ input.push({
2340
2341
  type: "function_call_output",
2341
2342
  call_id: part.toolCallId,
2342
2343
  output: contentValue
@@ -2350,7 +2351,7 @@ async function convertToOpenAIResponsesMessages({
2350
2351
  }
2351
2352
  }
2352
2353
  }
2353
- return { messages, warnings };
2354
+ return { input, warnings };
2354
2355
  }
2355
2356
  var openaiResponsesReasoningProviderOptionsSchema = z14.object({
2356
2357
  itemId: z14.string().nullish(),
@@ -2381,8 +2382,20 @@ import {
2381
2382
  } from "@ai-sdk/provider";
2382
2383
 
2383
2384
  // src/tool/code-interpreter.ts
2384
- import { createProviderDefinedToolFactory as createProviderDefinedToolFactory3 } from "@ai-sdk/provider-utils";
2385
+ import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils";
2385
2386
  import { z as z15 } from "zod/v4";
2387
+ var codeInterpreterInputSchema = z15.object({
2388
+ code: z15.string().nullish(),
2389
+ containerId: z15.string()
2390
+ });
2391
+ var codeInterpreterOutputSchema = z15.object({
2392
+ outputs: z15.array(
2393
+ z15.discriminatedUnion("type", [
2394
+ z15.object({ type: z15.literal("logs"), logs: z15.string() }),
2395
+ z15.object({ type: z15.literal("image"), url: z15.string() })
2396
+ ])
2397
+ ).nullish()
2398
+ });
2386
2399
  var codeInterpreterArgsSchema = z15.object({
2387
2400
  container: z15.union([
2388
2401
  z15.string(),
@@ -2391,14 +2404,15 @@ var codeInterpreterArgsSchema = z15.object({
2391
2404
  })
2392
2405
  ]).optional()
2393
2406
  });
2394
- var codeInterpreterToolFactory = createProviderDefinedToolFactory3({
2407
+ var codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema({
2395
2408
  id: "openai.code_interpreter",
2396
2409
  name: "code_interpreter",
2397
- inputSchema: z15.object({})
2410
+ inputSchema: codeInterpreterInputSchema,
2411
+ outputSchema: codeInterpreterOutputSchema
2398
2412
  });
2399
2413
 
2400
2414
  // src/tool/web-search.ts
2401
- import { createProviderDefinedToolFactory as createProviderDefinedToolFactory4 } from "@ai-sdk/provider-utils";
2415
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory3 } from "@ai-sdk/provider-utils";
2402
2416
  import { z as z16 } from "zod/v4";
2403
2417
  var webSearchArgsSchema = z16.object({
2404
2418
  filters: z16.object({
@@ -2413,7 +2427,7 @@ var webSearchArgsSchema = z16.object({
2413
2427
  timezone: z16.string().optional()
2414
2428
  }).optional()
2415
2429
  });
2416
- var webSearchToolFactory = createProviderDefinedToolFactory4({
2430
+ var webSearchToolFactory = createProviderDefinedToolFactory3({
2417
2431
  id: "openai.web_search",
2418
2432
  name: "web_search",
2419
2433
  inputSchema: z16.object({
@@ -2555,6 +2569,18 @@ var webSearchCallItem = z17.object({
2555
2569
  })
2556
2570
  ]).nullish()
2557
2571
  });
2572
+ var codeInterpreterCallItem = z17.object({
2573
+ type: z17.literal("code_interpreter_call"),
2574
+ id: z17.string(),
2575
+ code: z17.string().nullable(),
2576
+ container_id: z17.string(),
2577
+ outputs: z17.array(
2578
+ z17.discriminatedUnion("type", [
2579
+ z17.object({ type: z17.literal("logs"), logs: z17.string() }),
2580
+ z17.object({ type: z17.literal("image"), url: z17.string() })
2581
+ ])
2582
+ ).nullable()
2583
+ });
2558
2584
  var TOP_LOGPROBS_MAX = 20;
2559
2585
  var LOGPROBS_SCHEMA = z17.array(
2560
2586
  z17.object({
@@ -2596,7 +2622,7 @@ var OpenAIResponsesLanguageModel = class {
2596
2622
  toolChoice,
2597
2623
  responseFormat
2598
2624
  }) {
2599
- var _a, _b, _c;
2625
+ var _a, _b, _c, _d;
2600
2626
  const warnings = [];
2601
2627
  const modelConfig = getResponsesModelConfig(this.modelId);
2602
2628
  if (topK != null) {
@@ -2620,12 +2646,12 @@ var OpenAIResponsesLanguageModel = class {
2620
2646
  if (stopSequences != null) {
2621
2647
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2622
2648
  }
2623
- const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2649
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
2624
2650
  prompt,
2625
2651
  systemMessageMode: modelConfig.systemMessageMode,
2626
2652
  fileIdPrefixes: this.config.fileIdPrefixes
2627
2653
  });
2628
- warnings.push(...messageWarnings);
2654
+ warnings.push(...inputWarnings);
2629
2655
  const openaiOptions = await parseProviderOptions7({
2630
2656
  provider: "openai",
2631
2657
  providerOptions,
@@ -2639,9 +2665,13 @@ var OpenAIResponsesLanguageModel = class {
2639
2665
  (tool) => tool.type === "provider-defined" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
2640
2666
  )) == null ? void 0 : _b.name;
2641
2667
  include = webSearchToolName ? Array.isArray(include) ? [...include, "web_search_call.action.sources"] : ["web_search_call.action.sources"] : include;
2668
+ const codeInterpreterToolName = (_c = tools == null ? void 0 : tools.find(
2669
+ (tool) => tool.type === "provider-defined" && tool.id === "openai.code_interpreter"
2670
+ )) == null ? void 0 : _c.name;
2671
+ include = codeInterpreterToolName ? Array.isArray(include) ? [...include, "code_interpreter_call.outputs"] : ["code_interpreter_call.outputs"] : include;
2642
2672
  const baseArgs = {
2643
2673
  model: this.modelId,
2644
- input: messages,
2674
+ input,
2645
2675
  temperature,
2646
2676
  top_p: topP,
2647
2677
  max_output_tokens: maxOutputTokens,
@@ -2651,7 +2681,7 @@ var OpenAIResponsesLanguageModel = class {
2651
2681
  format: responseFormat.schema != null ? {
2652
2682
  type: "json_schema",
2653
2683
  strict: strictJsonSchema,
2654
- name: (_c = responseFormat.name) != null ? _c : "response",
2684
+ name: (_d = responseFormat.name) != null ? _d : "response",
2655
2685
  description: responseFormat.description,
2656
2686
  schema: responseFormat.schema
2657
2687
  } : { type: "json_object" }
@@ -2822,9 +2852,7 @@ var OpenAIResponsesLanguageModel = class {
2822
2852
  })
2823
2853
  )
2824
2854
  }),
2825
- z17.object({
2826
- type: z17.literal("code_interpreter_call")
2827
- }),
2855
+ codeInterpreterCallItem,
2828
2856
  z17.object({
2829
2857
  type: z17.literal("function_call"),
2830
2858
  call_id: z17.string(),
@@ -3020,6 +3048,28 @@ var OpenAIResponsesLanguageModel = class {
3020
3048
  });
3021
3049
  break;
3022
3050
  }
3051
+ case "code_interpreter_call": {
3052
+ content.push({
3053
+ type: "tool-call",
3054
+ toolCallId: part.id,
3055
+ toolName: "code_interpreter",
3056
+ input: JSON.stringify({
3057
+ code: part.code,
3058
+ containerId: part.container_id
3059
+ }),
3060
+ providerExecuted: true
3061
+ });
3062
+ content.push({
3063
+ type: "tool-result",
3064
+ toolCallId: part.id,
3065
+ toolName: "code_interpreter",
3066
+ result: {
3067
+ outputs: part.outputs
3068
+ },
3069
+ providerExecuted: true
3070
+ });
3071
+ break;
3072
+ }
3023
3073
  }
3024
3074
  }
3025
3075
  const providerMetadata = {
@@ -3263,6 +3313,26 @@ var OpenAIResponsesLanguageModel = class {
3263
3313
  },
3264
3314
  providerExecuted: true
3265
3315
  });
3316
+ } else if (value.item.type === "code_interpreter_call") {
3317
+ controller.enqueue({
3318
+ type: "tool-call",
3319
+ toolCallId: value.item.id,
3320
+ toolName: "code_interpreter",
3321
+ input: JSON.stringify({
3322
+ code: value.item.code,
3323
+ containerId: value.item.container_id
3324
+ }),
3325
+ providerExecuted: true
3326
+ });
3327
+ controller.enqueue({
3328
+ type: "tool-result",
3329
+ toolCallId: value.item.id,
3330
+ toolName: "code_interpreter",
3331
+ result: {
3332
+ outputs: value.item.outputs
3333
+ },
3334
+ providerExecuted: true
3335
+ });
3266
3336
  } else if (value.item.type === "message") {
3267
3337
  controller.enqueue({
3268
3338
  type: "text-end",
@@ -3508,6 +3578,7 @@ var responseOutputItemDoneSchema = z17.object({
3508
3578
  arguments: z17.string(),
3509
3579
  status: z17.literal("completed")
3510
3580
  }),
3581
+ codeInterpreterCallItem,
3511
3582
  webSearchCallItem,
3512
3583
  z17.object({
3513
3584
  type: z17.literal("computer_call"),