@zenning/openai 3.0.25 → 3.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2556,23 +2556,10 @@ async function convertToOpenAIResponsesInput({
2556
2556
  const warnings = [];
2557
2557
  const processedApprovalIds = /* @__PURE__ */ new Set();
2558
2558
  const approvalToolCallIds = new Set(approvalToolCallIdsArray || []);
2559
- console.log("CALDEBUG [convertToOpenAIResponsesInput] Starting conversion:", {
2560
- containsApprovalResponses,
2561
- approvalToolCallIdsCount: approvalToolCallIds.size,
2562
- approvalToolCallIds: Array.from(approvalToolCallIds),
2563
- previousResponseId,
2564
- promptLength: prompt.length
2565
- });
2566
2559
  if (compactionInput && compactionInput.length > 0) {
2567
2560
  input.push(...compactionInput);
2568
2561
  }
2569
2562
  for (const { role, content } of prompt) {
2570
- console.log("CALDEBUG [convertToOpenAIResponsesInput] Processing message:", {
2571
- role,
2572
- isString: typeof content === "string",
2573
- partsCount: typeof content === "string" ? 0 : content.length,
2574
- partTypes: typeof content === "string" ? [] : content.map((p) => p.type)
2575
- });
2576
2563
  switch (role) {
2577
2564
  case "system": {
2578
2565
  switch (systemMessageMode) {
@@ -2803,10 +2790,6 @@ async function convertToOpenAIResponsesInput({
2803
2790
  break;
2804
2791
  }
2805
2792
  case "tool": {
2806
- console.log("CALDEBUG [convertToOpenAIResponsesInput] Processing tool role message:", {
2807
- partsCount: content.length,
2808
- partTypes: content.map((p) => p.type)
2809
- });
2810
2793
  for (const part of content) {
2811
2794
  if (part.type === "tool-approval-response") {
2812
2795
  const approvalResponse = part;
@@ -2830,14 +2813,7 @@ async function convertToOpenAIResponsesInput({
2830
2813
  const output = part.output;
2831
2814
  if (output.type === "execution-denied") {
2832
2815
  const approvalId = (_l = (_k = output.providerOptions) == null ? void 0 : _k.openai) == null ? void 0 : _l.approvalId;
2833
- console.log("CALDEBUG [convertToOpenAIResponsesInput] Found execution-denied:", {
2834
- toolCallId: part.toolCallId,
2835
- toolName: part.toolName,
2836
- hasApprovalId: !!approvalId,
2837
- approvalId,
2838
- willSkip: !!approvalId
2839
- });
2840
- if (approvalId) {
2816
+ if (approvalId && !previousResponseId) {
2841
2817
  continue;
2842
2818
  }
2843
2819
  }
@@ -2946,15 +2922,6 @@ async function convertToOpenAIResponsesInput({
2946
2922
  }
2947
2923
  }
2948
2924
  }
2949
- console.log("CALDEBUG [convertToOpenAIResponsesInput] Final input:", {
2950
- inputLength: input.length,
2951
- inputTypes: input.map((item) => item.type || item.role),
2952
- hasItemReferences: input.some((item) => item.type === "item_reference"),
2953
- itemReferences: input.filter((item) => item.type === "item_reference").map((item) => ({
2954
- type: item.type,
2955
- id: typeof item.id === "string" ? item.id.substring(0, 20) + "..." : item.id
2956
- }))
2957
- });
2958
2925
  return { input, warnings };
2959
2926
  }
2960
2927
  var openaiResponsesReasoningProviderOptionsSchema = z18.object({
@@ -3902,6 +3869,18 @@ var openaiResponsesProviderOptionsSchema = lazySchema18(
3902
3869
  encrypted_content: z20.string()
3903
3870
  })
3904
3871
  ).optional(),
3872
+ /**
3873
+ * Server-side context management configuration.
3874
+ * When enabled with a compaction threshold, the server automatically compacts
3875
+ * the conversation in-stream when the rendered token count crosses the threshold.
3876
+ * @see https://developers.openai.com/api/docs/guides/compaction
3877
+ */
3878
+ contextManagement: z20.array(
3879
+ z20.object({
3880
+ type: z20.literal("compaction"),
3881
+ compact_threshold: z20.number()
3882
+ })
3883
+ ).optional(),
3905
3884
  /**
3906
3885
  * Whether the request contains tool approval responses.
3907
3886
  * Defaults to `false`.
@@ -4280,6 +4259,7 @@ var OpenAIResponsesLanguageModel = class {
4280
4259
  safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
4281
4260
  top_logprobs: topLogprobs,
4282
4261
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
4262
+ context_management: openaiOptions == null ? void 0 : openaiOptions.contextManagement,
4283
4263
  // model-specific settings:
4284
4264
  ...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
4285
4265
  reasoning: {
@@ -5908,7 +5888,7 @@ var OpenAITranscriptionModel = class {
5908
5888
  };
5909
5889
 
5910
5890
  // src/version.ts
5911
- var VERSION = true ? "3.0.25" : "0.0.0-test";
5891
+ var VERSION = true ? "3.0.26" : "0.0.0-test";
5912
5892
 
5913
5893
  // src/openai-provider.ts
5914
5894
  function createOpenAI(options = {}) {