@agenr/agenr-plugin 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -0
- package/dist/anthropic-RE4XNAKE.js +5515 -0
- package/dist/azure-openai-responses-IQLXOCZS.js +190 -0
- package/dist/chunk-6DQXEU2A.js +32306 -0
- package/dist/chunk-EAQYK3U2.js +41 -0
- package/dist/chunk-HNWLZUWE.js +31 -0
- package/dist/chunk-JRUUYSFL.js +262 -0
- package/dist/chunk-OLOUBEE5.js +14022 -0
- package/dist/chunk-P5HNPYGQ.js +174 -0
- package/dist/chunk-RD7BUOBD.js +416 -0
- package/dist/chunk-RWWH2U4W.js +7056 -0
- package/dist/chunk-SEOMNQGB.js +86 -0
- package/dist/chunk-SQLXP7LT.js +4792 -0
- package/dist/chunk-URGOKODJ.js +17 -0
- package/dist/dist-R6ESEJ6P.js +1244 -0
- package/dist/google-NAVXTQLO.js +371 -0
- package/dist/google-gemini-cli-NKYJWHX2.js +712 -0
- package/dist/google-vertex-ZBJ2EDRH.js +414 -0
- package/dist/index.js +15942 -0
- package/dist/mistral-SBQYC4J5.js +38407 -0
- package/dist/multipart-parser-DV373IRF.js +371 -0
- package/dist/openai-codex-responses-XN3T3DEN.js +712 -0
- package/dist/openai-completions-75ZFOFU6.js +657 -0
- package/dist/openai-responses-DCK4BVNT.js +198 -0
- package/dist/src-T5RRS2HN.js +1408 -0
- package/openclaw.plugin.json +86 -0
- package/package.json +31 -0
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
import {
|
|
2
|
+
buildCopilotDynamicHeaders,
|
|
3
|
+
hasCopilotVisionInput
|
|
4
|
+
} from "./chunk-HNWLZUWE.js";
|
|
5
|
+
import {
|
|
6
|
+
convertResponsesMessages,
|
|
7
|
+
convertResponsesTools,
|
|
8
|
+
processResponsesStream
|
|
9
|
+
} from "./chunk-RD7BUOBD.js";
|
|
10
|
+
import "./chunk-URGOKODJ.js";
|
|
11
|
+
import {
|
|
12
|
+
OpenAI
|
|
13
|
+
} from "./chunk-RWWH2U4W.js";
|
|
14
|
+
import "./chunk-JRUUYSFL.js";
|
|
15
|
+
import {
|
|
16
|
+
getEnvApiKey
|
|
17
|
+
} from "./chunk-SEOMNQGB.js";
|
|
18
|
+
import {
|
|
19
|
+
buildBaseOptions,
|
|
20
|
+
clampReasoning
|
|
21
|
+
} from "./chunk-P5HNPYGQ.js";
|
|
22
|
+
import {
|
|
23
|
+
AssistantMessageEventStream,
|
|
24
|
+
supportsXhigh
|
|
25
|
+
} from "./chunk-OLOUBEE5.js";
|
|
26
|
+
import "./chunk-EAQYK3U2.js";
|
|
27
|
+
|
|
28
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/openai-responses.js
|
|
29
|
+
var OPENAI_TOOL_CALL_PROVIDERS = /* @__PURE__ */ new Set(["openai", "openai-codex", "opencode"]);
|
|
30
|
+
function resolveCacheRetention(cacheRetention) {
|
|
31
|
+
if (cacheRetention) {
|
|
32
|
+
return cacheRetention;
|
|
33
|
+
}
|
|
34
|
+
if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") {
|
|
35
|
+
return "long";
|
|
36
|
+
}
|
|
37
|
+
return "short";
|
|
38
|
+
}
|
|
39
|
+
function getPromptCacheRetention(baseUrl, cacheRetention) {
|
|
40
|
+
if (cacheRetention !== "long") {
|
|
41
|
+
return void 0;
|
|
42
|
+
}
|
|
43
|
+
if (baseUrl.includes("api.openai.com")) {
|
|
44
|
+
return "24h";
|
|
45
|
+
}
|
|
46
|
+
return void 0;
|
|
47
|
+
}
|
|
48
|
+
var streamOpenAIResponses = (model, context, options) => {
|
|
49
|
+
const stream = new AssistantMessageEventStream();
|
|
50
|
+
(async () => {
|
|
51
|
+
const output = {
|
|
52
|
+
role: "assistant",
|
|
53
|
+
content: [],
|
|
54
|
+
api: model.api,
|
|
55
|
+
provider: model.provider,
|
|
56
|
+
model: model.id,
|
|
57
|
+
usage: {
|
|
58
|
+
input: 0,
|
|
59
|
+
output: 0,
|
|
60
|
+
cacheRead: 0,
|
|
61
|
+
cacheWrite: 0,
|
|
62
|
+
totalTokens: 0,
|
|
63
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
|
|
64
|
+
},
|
|
65
|
+
stopReason: "stop",
|
|
66
|
+
timestamp: Date.now()
|
|
67
|
+
};
|
|
68
|
+
try {
|
|
69
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
|
|
70
|
+
const client = createClient(model, context, apiKey, options?.headers);
|
|
71
|
+
let params = buildParams(model, context, options);
|
|
72
|
+
const nextParams = await options?.onPayload?.(params, model);
|
|
73
|
+
if (nextParams !== void 0) {
|
|
74
|
+
params = nextParams;
|
|
75
|
+
}
|
|
76
|
+
const openaiStream = await client.responses.create(params, options?.signal ? { signal: options.signal } : void 0);
|
|
77
|
+
stream.push({ type: "start", partial: output });
|
|
78
|
+
await processResponsesStream(openaiStream, output, stream, model, {
|
|
79
|
+
serviceTier: options?.serviceTier,
|
|
80
|
+
applyServiceTierPricing
|
|
81
|
+
});
|
|
82
|
+
if (options?.signal?.aborted) {
|
|
83
|
+
throw new Error("Request was aborted");
|
|
84
|
+
}
|
|
85
|
+
if (output.stopReason === "aborted" || output.stopReason === "error") {
|
|
86
|
+
throw new Error("An unknown error occurred");
|
|
87
|
+
}
|
|
88
|
+
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
89
|
+
stream.end();
|
|
90
|
+
} catch (error) {
|
|
91
|
+
for (const block of output.content)
|
|
92
|
+
delete block.index;
|
|
93
|
+
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
|
|
94
|
+
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
|
|
95
|
+
stream.push({ type: "error", reason: output.stopReason, error: output });
|
|
96
|
+
stream.end();
|
|
97
|
+
}
|
|
98
|
+
})();
|
|
99
|
+
return stream;
|
|
100
|
+
};
|
|
101
|
+
var streamSimpleOpenAIResponses = (model, context, options) => {
|
|
102
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
|
|
103
|
+
if (!apiKey) {
|
|
104
|
+
throw new Error(`No API key for provider: ${model.provider}`);
|
|
105
|
+
}
|
|
106
|
+
const base = buildBaseOptions(model, options, apiKey);
|
|
107
|
+
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
|
|
108
|
+
return streamOpenAIResponses(model, context, {
|
|
109
|
+
...base,
|
|
110
|
+
reasoningEffort
|
|
111
|
+
});
|
|
112
|
+
};
|
|
113
|
+
function createClient(model, context, apiKey, optionsHeaders) {
|
|
114
|
+
if (!apiKey) {
|
|
115
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
116
|
+
throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.");
|
|
117
|
+
}
|
|
118
|
+
apiKey = process.env.OPENAI_API_KEY;
|
|
119
|
+
}
|
|
120
|
+
const headers = { ...model.headers };
|
|
121
|
+
if (model.provider === "github-copilot") {
|
|
122
|
+
const hasImages = hasCopilotVisionInput(context.messages);
|
|
123
|
+
const copilotHeaders = buildCopilotDynamicHeaders({
|
|
124
|
+
messages: context.messages,
|
|
125
|
+
hasImages
|
|
126
|
+
});
|
|
127
|
+
Object.assign(headers, copilotHeaders);
|
|
128
|
+
}
|
|
129
|
+
if (optionsHeaders) {
|
|
130
|
+
Object.assign(headers, optionsHeaders);
|
|
131
|
+
}
|
|
132
|
+
return new OpenAI({
|
|
133
|
+
apiKey,
|
|
134
|
+
baseURL: model.baseUrl,
|
|
135
|
+
dangerouslyAllowBrowser: true,
|
|
136
|
+
defaultHeaders: headers
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
function buildParams(model, context, options) {
|
|
140
|
+
const messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);
|
|
141
|
+
const cacheRetention = resolveCacheRetention(options?.cacheRetention);
|
|
142
|
+
const params = {
|
|
143
|
+
model: model.id,
|
|
144
|
+
input: messages,
|
|
145
|
+
stream: true,
|
|
146
|
+
prompt_cache_key: cacheRetention === "none" ? void 0 : options?.sessionId,
|
|
147
|
+
prompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention),
|
|
148
|
+
store: false
|
|
149
|
+
};
|
|
150
|
+
if (options?.maxTokens) {
|
|
151
|
+
params.max_output_tokens = options?.maxTokens;
|
|
152
|
+
}
|
|
153
|
+
if (options?.temperature !== void 0) {
|
|
154
|
+
params.temperature = options?.temperature;
|
|
155
|
+
}
|
|
156
|
+
if (options?.serviceTier !== void 0) {
|
|
157
|
+
params.service_tier = options.serviceTier;
|
|
158
|
+
}
|
|
159
|
+
if (context.tools) {
|
|
160
|
+
params.tools = convertResponsesTools(context.tools);
|
|
161
|
+
}
|
|
162
|
+
if (model.reasoning) {
|
|
163
|
+
if (options?.reasoningEffort || options?.reasoningSummary) {
|
|
164
|
+
params.reasoning = {
|
|
165
|
+
effort: options?.reasoningEffort || "medium",
|
|
166
|
+
summary: options?.reasoningSummary || "auto"
|
|
167
|
+
};
|
|
168
|
+
params.include = ["reasoning.encrypted_content"];
|
|
169
|
+
} else if (model.provider !== "github-copilot") {
|
|
170
|
+
params.reasoning = { effort: "none" };
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
return params;
|
|
174
|
+
}
|
|
175
|
+
function getServiceTierCostMultiplier(serviceTier) {
|
|
176
|
+
switch (serviceTier) {
|
|
177
|
+
case "flex":
|
|
178
|
+
return 0.5;
|
|
179
|
+
case "priority":
|
|
180
|
+
return 2;
|
|
181
|
+
default:
|
|
182
|
+
return 1;
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
function applyServiceTierPricing(usage, serviceTier) {
|
|
186
|
+
const multiplier = getServiceTierCostMultiplier(serviceTier);
|
|
187
|
+
if (multiplier === 1)
|
|
188
|
+
return;
|
|
189
|
+
usage.cost.input *= multiplier;
|
|
190
|
+
usage.cost.output *= multiplier;
|
|
191
|
+
usage.cost.cacheRead *= multiplier;
|
|
192
|
+
usage.cost.cacheWrite *= multiplier;
|
|
193
|
+
usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;
|
|
194
|
+
}
|
|
195
|
+
export {
|
|
196
|
+
streamOpenAIResponses,
|
|
197
|
+
streamSimpleOpenAIResponses
|
|
198
|
+
};
|