@zenning/openai 3.0.24 → 3.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +15 -20
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +15 -20
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +14 -19
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +14 -19
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +21 -21
- package/LICENSE +0 -13
package/CHANGELOG.md
CHANGED
package/dist/index.d.mts
CHANGED
|
@@ -461,6 +461,10 @@ declare const openaiResponsesProviderOptionsSchema: _zenning_provider_utils.Lazy
|
|
|
461
461
|
type: "compaction";
|
|
462
462
|
encrypted_content: string;
|
|
463
463
|
}[] | undefined;
|
|
464
|
+
contextManagement?: {
|
|
465
|
+
type: "compaction";
|
|
466
|
+
compact_threshold: number;
|
|
467
|
+
}[] | undefined;
|
|
464
468
|
containsApprovalResponses?: boolean | undefined;
|
|
465
469
|
approvalToolCallIds?: string[] | undefined;
|
|
466
470
|
}>;
|
package/dist/index.d.ts
CHANGED
|
@@ -461,6 +461,10 @@ declare const openaiResponsesProviderOptionsSchema: _zenning_provider_utils.Lazy
|
|
|
461
461
|
type: "compaction";
|
|
462
462
|
encrypted_content: string;
|
|
463
463
|
}[] | undefined;
|
|
464
|
+
contextManagement?: {
|
|
465
|
+
type: "compaction";
|
|
466
|
+
compact_threshold: number;
|
|
467
|
+
}[] | undefined;
|
|
464
468
|
containsApprovalResponses?: boolean | undefined;
|
|
465
469
|
approvalToolCallIds?: string[] | undefined;
|
|
466
470
|
}>;
|
package/dist/index.js
CHANGED
|
@@ -2488,13 +2488,6 @@ async function convertToOpenAIResponsesInput({
|
|
|
2488
2488
|
const warnings = [];
|
|
2489
2489
|
const processedApprovalIds = /* @__PURE__ */ new Set();
|
|
2490
2490
|
const approvalToolCallIds = new Set(approvalToolCallIdsArray || []);
|
|
2491
|
-
console.log("CALDEBUG [convertToOpenAIResponsesInput] Starting conversion:", {
|
|
2492
|
-
containsApprovalResponses,
|
|
2493
|
-
approvalToolCallIdsCount: approvalToolCallIds.size,
|
|
2494
|
-
approvalToolCallIds: Array.from(approvalToolCallIds),
|
|
2495
|
-
previousResponseId,
|
|
2496
|
-
promptLength: prompt.length
|
|
2497
|
-
});
|
|
2498
2491
|
if (compactionInput && compactionInput.length > 0) {
|
|
2499
2492
|
input.push(...compactionInput);
|
|
2500
2493
|
}
|
|
@@ -2729,10 +2722,6 @@ async function convertToOpenAIResponsesInput({
|
|
|
2729
2722
|
break;
|
|
2730
2723
|
}
|
|
2731
2724
|
case "tool": {
|
|
2732
|
-
console.log("CALDEBUG [convertToOpenAIResponsesInput] Processing tool role message:", {
|
|
2733
|
-
partsCount: content.length,
|
|
2734
|
-
partTypes: content.map((p) => p.type)
|
|
2735
|
-
});
|
|
2736
2725
|
for (const part of content) {
|
|
2737
2726
|
if (part.type === "tool-approval-response") {
|
|
2738
2727
|
const approvalResponse = part;
|
|
@@ -2756,14 +2745,7 @@ async function convertToOpenAIResponsesInput({
|
|
|
2756
2745
|
const output = part.output;
|
|
2757
2746
|
if (output.type === "execution-denied") {
|
|
2758
2747
|
const approvalId = (_l = (_k = output.providerOptions) == null ? void 0 : _k.openai) == null ? void 0 : _l.approvalId;
|
|
2759
|
-
|
|
2760
|
-
toolCallId: part.toolCallId,
|
|
2761
|
-
toolName: part.toolName,
|
|
2762
|
-
hasApprovalId: !!approvalId,
|
|
2763
|
-
approvalId,
|
|
2764
|
-
willSkip: !!approvalId
|
|
2765
|
-
});
|
|
2766
|
-
if (approvalId) {
|
|
2748
|
+
if (approvalId && !previousResponseId) {
|
|
2767
2749
|
continue;
|
|
2768
2750
|
}
|
|
2769
2751
|
}
|
|
@@ -3819,6 +3801,18 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils25.lazySchem
|
|
|
3819
3801
|
encrypted_content: import_v420.z.string()
|
|
3820
3802
|
})
|
|
3821
3803
|
).optional(),
|
|
3804
|
+
/**
|
|
3805
|
+
* Server-side context management configuration.
|
|
3806
|
+
* When enabled with a compaction threshold, the server automatically compacts
|
|
3807
|
+
* the conversation in-stream when the rendered token count crosses the threshold.
|
|
3808
|
+
* @see https://developers.openai.com/api/docs/guides/compaction
|
|
3809
|
+
*/
|
|
3810
|
+
contextManagement: import_v420.z.array(
|
|
3811
|
+
import_v420.z.object({
|
|
3812
|
+
type: import_v420.z.literal("compaction"),
|
|
3813
|
+
compact_threshold: import_v420.z.number()
|
|
3814
|
+
})
|
|
3815
|
+
).optional(),
|
|
3822
3816
|
/**
|
|
3823
3817
|
* Whether the request contains tool approval responses.
|
|
3824
3818
|
* Defaults to `false`.
|
|
@@ -4195,6 +4189,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
4195
4189
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
4196
4190
|
top_logprobs: topLogprobs,
|
|
4197
4191
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
4192
|
+
context_management: openaiOptions == null ? void 0 : openaiOptions.contextManagement,
|
|
4198
4193
|
// model-specific settings:
|
|
4199
4194
|
...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
4200
4195
|
reasoning: {
|
|
@@ -5811,7 +5806,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5811
5806
|
};
|
|
5812
5807
|
|
|
5813
5808
|
// src/version.ts
|
|
5814
|
-
var VERSION = true ? "3.0.
|
|
5809
|
+
var VERSION = true ? "3.0.26" : "0.0.0-test";
|
|
5815
5810
|
|
|
5816
5811
|
// src/openai-provider.ts
|
|
5817
5812
|
function createOpenAI(options = {}) {
|