@zenning/openai 3.0.18 → 3.0.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.20
4
+
5
+ ### Patch Changes
6
+
7
+ - eriuhgb
8
+
9
+ ## 3.0.19
10
+
11
+ ### Patch Changes
12
+
13
+ - grg
14
+
3
15
  ## 3.0.18
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -461,6 +461,7 @@ declare const openaiResponsesProviderOptionsSchema: _zenning_provider_utils.Lazy
461
461
  type: "compaction";
462
462
  encrypted_content: string;
463
463
  }[] | undefined;
464
+ containsApprovalResponses?: boolean | undefined;
464
465
  }>;
465
466
  type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
466
467
 
package/dist/index.d.ts CHANGED
@@ -461,6 +461,7 @@ declare const openaiResponsesProviderOptionsSchema: _zenning_provider_utils.Lazy
461
461
  type: "compaction";
462
462
  encrypted_content: string;
463
463
  }[] | undefined;
464
+ containsApprovalResponses?: boolean | undefined;
464
465
  }>;
465
466
  type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
466
467
 
package/dist/index.js CHANGED
@@ -2479,7 +2479,8 @@ async function convertToOpenAIResponsesInput({
2479
2479
  hasShellTool = false,
2480
2480
  hasApplyPatchTool = false,
2481
2481
  compactionInput,
2482
- previousResponseId
2482
+ previousResponseId,
2483
+ containsApprovalResponses
2483
2484
  }) {
2484
2485
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2485
2486
  const input = [];
@@ -2566,7 +2567,7 @@ async function convertToOpenAIResponsesInput({
2566
2567
  switch (part.type) {
2567
2568
  case "text": {
2568
2569
  const id = (_b = (_a = part.providerOptions) == null ? void 0 : _a[providerOptionsName]) == null ? void 0 : _b.itemId;
2569
- if (store && id != null && !previousResponseId) {
2570
+ if (store && id != null) {
2570
2571
  input.push({ type: "item_reference", id });
2571
2572
  break;
2572
2573
  }
@@ -2580,12 +2581,12 @@ async function convertToOpenAIResponsesInput({
2580
2581
  case "tool-call": {
2581
2582
  const id = (_g = (_d = (_c = part.providerOptions) == null ? void 0 : _c[providerOptionsName]) == null ? void 0 : _d.itemId) != null ? _g : (_f = (_e = part.providerMetadata) == null ? void 0 : _e[providerOptionsName]) == null ? void 0 : _f.itemId;
2582
2583
  if (part.providerExecuted) {
2583
- if (store && id != null && !previousResponseId) {
2584
+ if (store && id != null) {
2584
2585
  input.push({ type: "item_reference", id });
2585
2586
  }
2586
2587
  break;
2587
2588
  }
2588
- if (store && id != null && !previousResponseId) {
2589
+ if (store && id != null) {
2589
2590
  input.push({ type: "item_reference", id });
2590
2591
  break;
2591
2592
  }
@@ -2644,10 +2645,9 @@ async function convertToOpenAIResponsesInput({
2644
2645
  if (part.output.type === "execution-denied" || part.output.type === "json" && typeof part.output.value === "object" && part.output.value != null && "type" in part.output.value && part.output.value.type === "execution-denied") {
2645
2646
  break;
2646
2647
  }
2647
- if (store && !previousResponseId) {
2648
+ if (store) {
2648
2649
  const itemId = (_j = (_i = (_h = part.providerMetadata) == null ? void 0 : _h[providerOptionsName]) == null ? void 0 : _i.itemId) != null ? _j : part.toolCallId;
2649
2650
  input.push({ type: "item_reference", id: itemId });
2650
- } else if (store) {
2651
2651
  } else {
2652
2652
  warnings.push({
2653
2653
  type: "other",
@@ -2665,7 +2665,7 @@ async function convertToOpenAIResponsesInput({
2665
2665
  const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2666
2666
  if (reasoningId != null) {
2667
2667
  const reasoningMessage = reasoningMessages[reasoningId];
2668
- if (store && !previousResponseId) {
2668
+ if (store) {
2669
2669
  if (reasoningMessage === void 0) {
2670
2670
  input.push({ type: "item_reference", id: reasoningId });
2671
2671
  reasoningMessages[reasoningId] = {
@@ -2674,14 +2674,6 @@ async function convertToOpenAIResponsesInput({
2674
2674
  summary: []
2675
2675
  };
2676
2676
  }
2677
- } else if (store) {
2678
- if (reasoningMessage === void 0) {
2679
- reasoningMessages[reasoningId] = {
2680
- type: "reasoning",
2681
- id: reasoningId,
2682
- summary: []
2683
- };
2684
- }
2685
2677
  } else {
2686
2678
  const summaryParts = [];
2687
2679
  if (part.text.length > 0) {
@@ -3801,7 +3793,12 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils25.lazySchem
3801
3793
  type: import_v420.z.literal("compaction"),
3802
3794
  encrypted_content: import_v420.z.string()
3803
3795
  })
3804
- ).optional()
3796
+ ).optional(),
3797
+ /**
3798
+ * Whether the request contains tool approval responses.
3799
+ * Defaults to `false`.
3800
+ */
3801
+ containsApprovalResponses: import_v420.z.boolean().optional()
3805
3802
  })
3806
3803
  )
3807
3804
  );
@@ -4032,7 +4029,7 @@ var OpenAIResponsesLanguageModel = class {
4032
4029
  toolChoice,
4033
4030
  responseFormat
4034
4031
  }) {
4035
- var _a, _b, _c, _d, _e, _f, _g;
4032
+ var _a, _b, _c, _d, _e, _f, _g, _h;
4036
4033
  const warnings = [];
4037
4034
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
4038
4035
  if (topK != null) {
@@ -4096,10 +4093,11 @@ var OpenAIResponsesLanguageModel = class {
4096
4093
  hasShellTool: hasOpenAITool("openai.shell"),
4097
4094
  hasApplyPatchTool: hasOpenAITool("openai.apply_patch"),
4098
4095
  compactionInput: openaiOptions == null ? void 0 : openaiOptions.compactionInput,
4099
- previousResponseId: (_d = openaiOptions == null ? void 0 : openaiOptions.previousResponseId) != null ? _d : void 0
4096
+ previousResponseId: (_d = openaiOptions == null ? void 0 : openaiOptions.previousResponseId) != null ? _d : void 0,
4097
+ containsApprovalResponses: (_e = openaiOptions == null ? void 0 : openaiOptions.containsApprovalResponses) != null ? _e : false
4100
4098
  });
4101
4099
  warnings.push(...inputWarnings);
4102
- const strictJsonSchema = (_e = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _e : true;
4100
+ const strictJsonSchema = (_f = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _f : true;
4103
4101
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
4104
4102
  function addInclude(key) {
4105
4103
  if (include == null) {
@@ -4115,9 +4113,9 @@ var OpenAIResponsesLanguageModel = class {
4115
4113
  if (topLogprobs) {
4116
4114
  addInclude("message.output_text.logprobs");
4117
4115
  }
4118
- const webSearchToolName = (_f = tools == null ? void 0 : tools.find(
4116
+ const webSearchToolName = (_g = tools == null ? void 0 : tools.find(
4119
4117
  (tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
4120
- )) == null ? void 0 : _f.name;
4118
+ )) == null ? void 0 : _g.name;
4121
4119
  if (webSearchToolName) {
4122
4120
  addInclude("web_search_call.action.sources");
4123
4121
  }
@@ -4140,7 +4138,7 @@ var OpenAIResponsesLanguageModel = class {
4140
4138
  format: responseFormat.schema != null ? {
4141
4139
  type: "json_schema",
4142
4140
  strict: strictJsonSchema,
4143
- name: (_g = responseFormat.name) != null ? _g : "response",
4141
+ name: (_h = responseFormat.name) != null ? _h : "response",
4144
4142
  description: responseFormat.description,
4145
4143
  schema: responseFormat.schema
4146
4144
  } : { type: "json_object" }
@@ -5782,7 +5780,7 @@ var OpenAITranscriptionModel = class {
5782
5780
  };
5783
5781
 
5784
5782
  // src/version.ts
5785
- var VERSION = true ? "3.0.17" : "0.0.0-test";
5783
+ var VERSION = true ? "3.0.20" : "0.0.0-test";
5786
5784
 
5787
5785
  // src/openai-provider.ts
5788
5786
  function createOpenAI(options = {}) {