@zenning/openai 3.0.6 → 3.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.8
4
+
5
+ ### Patch Changes
6
+
7
+ - 35e1a30: Add support for OpenAI Responses API compaction feature via provider options for context window management
8
+ - 10b232c: Fix openai file_search tool to accept optional query param
9
+ - Updated dependencies [35e1a30]
10
+ - @zenning/provider@3.0.5
11
+ - @zenning/provider-utils@4.0.7
12
+
13
+ ## 3.0.7
14
+
15
+ ### Patch Changes
16
+
17
+ - Add support for OpenAI Responses API compaction feature via provider options for context window management
18
+ - 10b232c: Fix openai file_search tool to accept optional query param
19
+ - Updated dependencies
20
+ - @zenning/provider@3.0.4
21
+ - @zenning/provider-utils@4.0.6
22
+
3
23
  ## 3.0.6
4
24
 
5
25
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -457,6 +457,10 @@ declare const openaiResponsesProviderOptionsSchema: _zenning_provider_utils.Lazy
457
457
  user?: string | null | undefined;
458
458
  systemMessageMode?: "remove" | "system" | "developer" | undefined;
459
459
  forceReasoning?: boolean | undefined;
460
+ compactionInput?: {
461
+ type: "compaction";
462
+ encrypted_content: string;
463
+ }[] | undefined;
460
464
  }>;
461
465
  type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
462
466
 
package/dist/index.d.ts CHANGED
@@ -457,6 +457,10 @@ declare const openaiResponsesProviderOptionsSchema: _zenning_provider_utils.Lazy
457
457
  user?: string | null | undefined;
458
458
  systemMessageMode?: "remove" | "system" | "developer" | undefined;
459
459
  forceReasoning?: boolean | undefined;
460
+ compactionInput?: {
461
+ type: "compaction";
462
+ encrypted_content: string;
463
+ }[] | undefined;
460
464
  }>;
461
465
  type OpenAIResponsesProviderOptions = InferSchema<typeof openaiResponsesProviderOptionsSchema>;
462
466
 
package/dist/index.js CHANGED
@@ -2477,12 +2477,16 @@ async function convertToOpenAIResponsesInput({
2477
2477
  store,
2478
2478
  hasLocalShellTool = false,
2479
2479
  hasShellTool = false,
2480
- hasApplyPatchTool = false
2480
+ hasApplyPatchTool = false,
2481
+ compactionInput
2481
2482
  }) {
2482
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
2483
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2483
2484
  const input = [];
2484
2485
  const warnings = [];
2485
2486
  const processedApprovalIds = /* @__PURE__ */ new Set();
2487
+ if (compactionInput && compactionInput.length > 0) {
2488
+ input.push(...compactionInput);
2489
+ }
2486
2490
  for (const { role, content } of prompt) {
2487
2491
  switch (role) {
2488
2492
  case "system": {
@@ -2572,17 +2576,8 @@ async function convertToOpenAIResponsesInput({
2572
2576
  });
2573
2577
  break;
2574
2578
  }
2575
- case "compaction": {
2576
- const id = (_d = (_c = part.providerOptions) == null ? void 0 : _c[providerOptionsName]) == null ? void 0 : _d.itemId;
2577
- input.push({
2578
- role: "assistant",
2579
- content: [{ type: "compaction", encrypted_content: part.encrypted_content }],
2580
- id
2581
- });
2582
- break;
2583
- }
2584
2579
  case "tool-call": {
2585
- const id = (_i = (_f = (_e = part.providerOptions) == null ? void 0 : _e[providerOptionsName]) == null ? void 0 : _f.itemId) != null ? _i : (_h = (_g = part.providerMetadata) == null ? void 0 : _g[providerOptionsName]) == null ? void 0 : _h.itemId;
2580
+ const id = (_g = (_d = (_c = part.providerOptions) == null ? void 0 : _c[providerOptionsName]) == null ? void 0 : _d.itemId) != null ? _g : (_f = (_e = part.providerMetadata) == null ? void 0 : _e[providerOptionsName]) == null ? void 0 : _f.itemId;
2586
2581
  if (part.providerExecuted) {
2587
2582
  if (store && id != null) {
2588
2583
  input.push({ type: "item_reference", id });
@@ -2649,7 +2644,7 @@ async function convertToOpenAIResponsesInput({
2649
2644
  break;
2650
2645
  }
2651
2646
  if (store) {
2652
- const itemId = (_l = (_k = (_j = part.providerMetadata) == null ? void 0 : _j[providerOptionsName]) == null ? void 0 : _k.itemId) != null ? _l : part.toolCallId;
2647
+ const itemId = (_j = (_i = (_h = part.providerMetadata) == null ? void 0 : _h[providerOptionsName]) == null ? void 0 : _i.itemId) != null ? _j : part.toolCallId;
2653
2648
  input.push({ type: "item_reference", id: itemId });
2654
2649
  } else {
2655
2650
  warnings.push({
@@ -2740,7 +2735,7 @@ async function convertToOpenAIResponsesInput({
2740
2735
  }
2741
2736
  const output = part.output;
2742
2737
  if (output.type === "execution-denied") {
2743
- const approvalId = (_n = (_m = output.providerOptions) == null ? void 0 : _m.openai) == null ? void 0 : _n.approvalId;
2738
+ const approvalId = (_l = (_k = output.providerOptions) == null ? void 0 : _k.openai) == null ? void 0 : _l.approvalId;
2744
2739
  if (approvalId) {
2745
2740
  continue;
2746
2741
  }
@@ -2799,7 +2794,7 @@ async function convertToOpenAIResponsesInput({
2799
2794
  contentValue = output.value;
2800
2795
  break;
2801
2796
  case "execution-denied":
2802
- contentValue = (_o = output.reason) != null ? _o : "Tool execution denied.";
2797
+ contentValue = (_m = output.reason) != null ? _m : "Tool execution denied.";
2803
2798
  break;
2804
2799
  case "json":
2805
2800
  case "error-json":
@@ -3785,7 +3780,18 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils25.lazySchem
3785
3780
  * When enabled, the SDK applies reasoning-model parameter compatibility rules
3786
3781
  * and defaults `systemMessageMode` to `developer` unless overridden.
3787
3782
  */
3788
- forceReasoning: import_v420.z.boolean().optional()
3783
+ forceReasoning: import_v420.z.boolean().optional(),
3784
+ /**
3785
+ * Compaction input items to inject into the request.
3786
+ * These are standalone items from the /responses/compact endpoint that contain
3787
+ * encrypted conversation history for context window management.
3788
+ */
3789
+ compactionInput: import_v420.z.array(
3790
+ import_v420.z.object({
3791
+ type: import_v420.z.literal("compaction"),
3792
+ encrypted_content: import_v420.z.string()
3793
+ })
3794
+ ).optional()
3789
3795
  })
3790
3796
  )
3791
3797
  );
@@ -4078,7 +4084,8 @@ var OpenAIResponsesLanguageModel = class {
4078
4084
  store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
4079
4085
  hasLocalShellTool: hasOpenAITool("openai.local_shell"),
4080
4086
  hasShellTool: hasOpenAITool("openai.shell"),
4081
- hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
4087
+ hasApplyPatchTool: hasOpenAITool("openai.apply_patch"),
4088
+ compactionInput: openaiOptions == null ? void 0 : openaiOptions.compactionInput
4082
4089
  });
4083
4090
  warnings.push(...inputWarnings);
4084
4091
  const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
@@ -5764,7 +5771,7 @@ var OpenAITranscriptionModel = class {
5764
5771
  };
5765
5772
 
5766
5773
  // src/version.ts
5767
- var VERSION = true ? "3.0.6" : "0.0.0-test";
5774
+ var VERSION = true ? "3.0.7" : "0.0.0-test";
5768
5775
 
5769
5776
  // src/openai-provider.ts
5770
5777
  function createOpenAI(options = {}) {