@agenr/agenr-plugin 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -0
- package/dist/anthropic-RE4XNAKE.js +5515 -0
- package/dist/azure-openai-responses-IQLXOCZS.js +190 -0
- package/dist/chunk-6DQXEU2A.js +32306 -0
- package/dist/chunk-EAQYK3U2.js +41 -0
- package/dist/chunk-HNWLZUWE.js +31 -0
- package/dist/chunk-JRUUYSFL.js +262 -0
- package/dist/chunk-OLOUBEE5.js +14022 -0
- package/dist/chunk-P5HNPYGQ.js +174 -0
- package/dist/chunk-RD7BUOBD.js +416 -0
- package/dist/chunk-RWWH2U4W.js +7056 -0
- package/dist/chunk-SEOMNQGB.js +86 -0
- package/dist/chunk-SQLXP7LT.js +4792 -0
- package/dist/chunk-URGOKODJ.js +17 -0
- package/dist/dist-R6ESEJ6P.js +1244 -0
- package/dist/google-NAVXTQLO.js +371 -0
- package/dist/google-gemini-cli-NKYJWHX2.js +712 -0
- package/dist/google-vertex-ZBJ2EDRH.js +414 -0
- package/dist/index.js +15942 -0
- package/dist/mistral-SBQYC4J5.js +38407 -0
- package/dist/multipart-parser-DV373IRF.js +371 -0
- package/dist/openai-codex-responses-XN3T3DEN.js +712 -0
- package/dist/openai-completions-75ZFOFU6.js +657 -0
- package/dist/openai-responses-DCK4BVNT.js +198 -0
- package/dist/src-T5RRS2HN.js +1408 -0
- package/openclaw.plugin.json +86 -0
- package/package.json +31 -0
|
@@ -0,0 +1,414 @@
|
|
|
1
|
+
import {
|
|
2
|
+
GoogleGenAI,
|
|
3
|
+
ThinkingLevel,
|
|
4
|
+
convertMessages,
|
|
5
|
+
convertTools,
|
|
6
|
+
isThinkingPart,
|
|
7
|
+
mapStopReason,
|
|
8
|
+
mapToolChoice,
|
|
9
|
+
retainThoughtSignature
|
|
10
|
+
} from "./chunk-6DQXEU2A.js";
|
|
11
|
+
import {
|
|
12
|
+
buildBaseOptions,
|
|
13
|
+
clampReasoning,
|
|
14
|
+
sanitizeSurrogates
|
|
15
|
+
} from "./chunk-P5HNPYGQ.js";
|
|
16
|
+
import {
|
|
17
|
+
AssistantMessageEventStream,
|
|
18
|
+
calculateCost
|
|
19
|
+
} from "./chunk-OLOUBEE5.js";
|
|
20
|
+
import "./chunk-EAQYK3U2.js";
|
|
21
|
+
|
|
22
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/google-vertex.js
|
|
23
|
+
var API_VERSION = "v1";
|
|
24
|
+
var THINKING_LEVEL_MAP = {
|
|
25
|
+
THINKING_LEVEL_UNSPECIFIED: ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
|
|
26
|
+
MINIMAL: ThinkingLevel.MINIMAL,
|
|
27
|
+
LOW: ThinkingLevel.LOW,
|
|
28
|
+
MEDIUM: ThinkingLevel.MEDIUM,
|
|
29
|
+
HIGH: ThinkingLevel.HIGH
|
|
30
|
+
};
|
|
31
|
+
var toolCallCounter = 0;
|
|
32
|
+
var streamGoogleVertex = (model, context, options) => {
|
|
33
|
+
const stream = new AssistantMessageEventStream();
|
|
34
|
+
(async () => {
|
|
35
|
+
const output = {
|
|
36
|
+
role: "assistant",
|
|
37
|
+
content: [],
|
|
38
|
+
api: "google-vertex",
|
|
39
|
+
provider: model.provider,
|
|
40
|
+
model: model.id,
|
|
41
|
+
usage: {
|
|
42
|
+
input: 0,
|
|
43
|
+
output: 0,
|
|
44
|
+
cacheRead: 0,
|
|
45
|
+
cacheWrite: 0,
|
|
46
|
+
totalTokens: 0,
|
|
47
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
|
|
48
|
+
},
|
|
49
|
+
stopReason: "stop",
|
|
50
|
+
timestamp: Date.now()
|
|
51
|
+
};
|
|
52
|
+
try {
|
|
53
|
+
const apiKey = resolveApiKey(options);
|
|
54
|
+
const client = apiKey ? createClientWithApiKey(model, apiKey, options?.headers) : createClient(model, resolveProject(options), resolveLocation(options), options?.headers);
|
|
55
|
+
let params = buildParams(model, context, options);
|
|
56
|
+
const nextParams = await options?.onPayload?.(params, model);
|
|
57
|
+
if (nextParams !== void 0) {
|
|
58
|
+
params = nextParams;
|
|
59
|
+
}
|
|
60
|
+
const googleStream = await client.models.generateContentStream(params);
|
|
61
|
+
stream.push({ type: "start", partial: output });
|
|
62
|
+
let currentBlock = null;
|
|
63
|
+
const blocks = output.content;
|
|
64
|
+
const blockIndex = () => blocks.length - 1;
|
|
65
|
+
for await (const chunk of googleStream) {
|
|
66
|
+
output.responseId ||= chunk.responseId;
|
|
67
|
+
const candidate = chunk.candidates?.[0];
|
|
68
|
+
if (candidate?.content?.parts) {
|
|
69
|
+
for (const part of candidate.content.parts) {
|
|
70
|
+
if (part.text !== void 0) {
|
|
71
|
+
const isThinking = isThinkingPart(part);
|
|
72
|
+
if (!currentBlock || isThinking && currentBlock.type !== "thinking" || !isThinking && currentBlock.type !== "text") {
|
|
73
|
+
if (currentBlock) {
|
|
74
|
+
if (currentBlock.type === "text") {
|
|
75
|
+
stream.push({
|
|
76
|
+
type: "text_end",
|
|
77
|
+
contentIndex: blocks.length - 1,
|
|
78
|
+
content: currentBlock.text,
|
|
79
|
+
partial: output
|
|
80
|
+
});
|
|
81
|
+
} else {
|
|
82
|
+
stream.push({
|
|
83
|
+
type: "thinking_end",
|
|
84
|
+
contentIndex: blockIndex(),
|
|
85
|
+
content: currentBlock.thinking,
|
|
86
|
+
partial: output
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
if (isThinking) {
|
|
91
|
+
currentBlock = { type: "thinking", thinking: "", thinkingSignature: void 0 };
|
|
92
|
+
output.content.push(currentBlock);
|
|
93
|
+
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
94
|
+
} else {
|
|
95
|
+
currentBlock = { type: "text", text: "" };
|
|
96
|
+
output.content.push(currentBlock);
|
|
97
|
+
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
if (currentBlock.type === "thinking") {
|
|
101
|
+
currentBlock.thinking += part.text;
|
|
102
|
+
currentBlock.thinkingSignature = retainThoughtSignature(currentBlock.thinkingSignature, part.thoughtSignature);
|
|
103
|
+
stream.push({
|
|
104
|
+
type: "thinking_delta",
|
|
105
|
+
contentIndex: blockIndex(),
|
|
106
|
+
delta: part.text,
|
|
107
|
+
partial: output
|
|
108
|
+
});
|
|
109
|
+
} else {
|
|
110
|
+
currentBlock.text += part.text;
|
|
111
|
+
currentBlock.textSignature = retainThoughtSignature(currentBlock.textSignature, part.thoughtSignature);
|
|
112
|
+
stream.push({
|
|
113
|
+
type: "text_delta",
|
|
114
|
+
contentIndex: blockIndex(),
|
|
115
|
+
delta: part.text,
|
|
116
|
+
partial: output
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
if (part.functionCall) {
|
|
121
|
+
if (currentBlock) {
|
|
122
|
+
if (currentBlock.type === "text") {
|
|
123
|
+
stream.push({
|
|
124
|
+
type: "text_end",
|
|
125
|
+
contentIndex: blockIndex(),
|
|
126
|
+
content: currentBlock.text,
|
|
127
|
+
partial: output
|
|
128
|
+
});
|
|
129
|
+
} else {
|
|
130
|
+
stream.push({
|
|
131
|
+
type: "thinking_end",
|
|
132
|
+
contentIndex: blockIndex(),
|
|
133
|
+
content: currentBlock.thinking,
|
|
134
|
+
partial: output
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
currentBlock = null;
|
|
138
|
+
}
|
|
139
|
+
const providedId = part.functionCall.id;
|
|
140
|
+
const needsNewId = !providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId);
|
|
141
|
+
const toolCallId = needsNewId ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}` : providedId;
|
|
142
|
+
const toolCall = {
|
|
143
|
+
type: "toolCall",
|
|
144
|
+
id: toolCallId,
|
|
145
|
+
name: part.functionCall.name || "",
|
|
146
|
+
arguments: part.functionCall.args ?? {},
|
|
147
|
+
...part.thoughtSignature && { thoughtSignature: part.thoughtSignature }
|
|
148
|
+
};
|
|
149
|
+
output.content.push(toolCall);
|
|
150
|
+
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
151
|
+
stream.push({
|
|
152
|
+
type: "toolcall_delta",
|
|
153
|
+
contentIndex: blockIndex(),
|
|
154
|
+
delta: JSON.stringify(toolCall.arguments),
|
|
155
|
+
partial: output
|
|
156
|
+
});
|
|
157
|
+
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
if (candidate?.finishReason) {
|
|
162
|
+
output.stopReason = mapStopReason(candidate.finishReason);
|
|
163
|
+
if (output.content.some((b) => b.type === "toolCall")) {
|
|
164
|
+
output.stopReason = "toolUse";
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
if (chunk.usageMetadata) {
|
|
168
|
+
output.usage = {
|
|
169
|
+
input: (chunk.usageMetadata.promptTokenCount || 0) - (chunk.usageMetadata.cachedContentTokenCount || 0),
|
|
170
|
+
output: (chunk.usageMetadata.candidatesTokenCount || 0) + (chunk.usageMetadata.thoughtsTokenCount || 0),
|
|
171
|
+
cacheRead: chunk.usageMetadata.cachedContentTokenCount || 0,
|
|
172
|
+
cacheWrite: 0,
|
|
173
|
+
totalTokens: chunk.usageMetadata.totalTokenCount || 0,
|
|
174
|
+
cost: {
|
|
175
|
+
input: 0,
|
|
176
|
+
output: 0,
|
|
177
|
+
cacheRead: 0,
|
|
178
|
+
cacheWrite: 0,
|
|
179
|
+
total: 0
|
|
180
|
+
}
|
|
181
|
+
};
|
|
182
|
+
calculateCost(model, output.usage);
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
if (currentBlock) {
|
|
186
|
+
if (currentBlock.type === "text") {
|
|
187
|
+
stream.push({
|
|
188
|
+
type: "text_end",
|
|
189
|
+
contentIndex: blockIndex(),
|
|
190
|
+
content: currentBlock.text,
|
|
191
|
+
partial: output
|
|
192
|
+
});
|
|
193
|
+
} else {
|
|
194
|
+
stream.push({
|
|
195
|
+
type: "thinking_end",
|
|
196
|
+
contentIndex: blockIndex(),
|
|
197
|
+
content: currentBlock.thinking,
|
|
198
|
+
partial: output
|
|
199
|
+
});
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
if (options?.signal?.aborted) {
|
|
203
|
+
throw new Error("Request was aborted");
|
|
204
|
+
}
|
|
205
|
+
if (output.stopReason === "aborted" || output.stopReason === "error") {
|
|
206
|
+
throw new Error("An unknown error occurred");
|
|
207
|
+
}
|
|
208
|
+
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
209
|
+
stream.end();
|
|
210
|
+
} catch (error) {
|
|
211
|
+
for (const block of output.content) {
|
|
212
|
+
if ("index" in block) {
|
|
213
|
+
delete block.index;
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
|
|
217
|
+
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
|
|
218
|
+
stream.push({ type: "error", reason: output.stopReason, error: output });
|
|
219
|
+
stream.end();
|
|
220
|
+
}
|
|
221
|
+
})();
|
|
222
|
+
return stream;
|
|
223
|
+
};
|
|
224
|
+
var streamSimpleGoogleVertex = (model, context, options) => {
|
|
225
|
+
const base = buildBaseOptions(model, options, void 0);
|
|
226
|
+
if (!options?.reasoning) {
|
|
227
|
+
return streamGoogleVertex(model, context, {
|
|
228
|
+
...base,
|
|
229
|
+
thinking: { enabled: false }
|
|
230
|
+
});
|
|
231
|
+
}
|
|
232
|
+
const effort = clampReasoning(options.reasoning);
|
|
233
|
+
const geminiModel = model;
|
|
234
|
+
if (isGemini3ProModel(geminiModel) || isGemini3FlashModel(geminiModel)) {
|
|
235
|
+
return streamGoogleVertex(model, context, {
|
|
236
|
+
...base,
|
|
237
|
+
thinking: {
|
|
238
|
+
enabled: true,
|
|
239
|
+
level: getGemini3ThinkingLevel(effort, geminiModel)
|
|
240
|
+
}
|
|
241
|
+
});
|
|
242
|
+
}
|
|
243
|
+
return streamGoogleVertex(model, context, {
|
|
244
|
+
...base,
|
|
245
|
+
thinking: {
|
|
246
|
+
enabled: true,
|
|
247
|
+
budgetTokens: getGoogleBudget(geminiModel, effort, options.thinkingBudgets)
|
|
248
|
+
}
|
|
249
|
+
});
|
|
250
|
+
};
|
|
251
|
+
function createClient(model, project, location, optionsHeaders) {
|
|
252
|
+
const httpOptions = {};
|
|
253
|
+
if (model.headers || optionsHeaders) {
|
|
254
|
+
httpOptions.headers = { ...model.headers, ...optionsHeaders };
|
|
255
|
+
}
|
|
256
|
+
const hasHttpOptions = Object.values(httpOptions).some(Boolean);
|
|
257
|
+
return new GoogleGenAI({
|
|
258
|
+
vertexai: true,
|
|
259
|
+
project,
|
|
260
|
+
location,
|
|
261
|
+
apiVersion: API_VERSION,
|
|
262
|
+
httpOptions: hasHttpOptions ? httpOptions : void 0
|
|
263
|
+
});
|
|
264
|
+
}
|
|
265
|
+
function createClientWithApiKey(model, apiKey, optionsHeaders) {
|
|
266
|
+
const httpOptions = {};
|
|
267
|
+
if (model.headers || optionsHeaders) {
|
|
268
|
+
httpOptions.headers = { ...model.headers, ...optionsHeaders };
|
|
269
|
+
}
|
|
270
|
+
const hasHttpOptions = Object.values(httpOptions).some(Boolean);
|
|
271
|
+
return new GoogleGenAI({
|
|
272
|
+
vertexai: true,
|
|
273
|
+
apiKey,
|
|
274
|
+
apiVersion: API_VERSION,
|
|
275
|
+
httpOptions: hasHttpOptions ? httpOptions : void 0
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
function resolveApiKey(options) {
|
|
279
|
+
const apiKey = options?.apiKey?.trim() || process.env.GOOGLE_CLOUD_API_KEY?.trim();
|
|
280
|
+
if (!apiKey || isPlaceholderApiKey(apiKey)) {
|
|
281
|
+
return void 0;
|
|
282
|
+
}
|
|
283
|
+
return apiKey;
|
|
284
|
+
}
|
|
285
|
+
function isPlaceholderApiKey(apiKey) {
|
|
286
|
+
return /^<[^>]+>$/.test(apiKey);
|
|
287
|
+
}
|
|
288
|
+
function resolveProject(options) {
|
|
289
|
+
const project = options?.project || process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT;
|
|
290
|
+
if (!project) {
|
|
291
|
+
throw new Error("Vertex AI requires a project ID. Set GOOGLE_CLOUD_PROJECT/GCLOUD_PROJECT or pass project in options.");
|
|
292
|
+
}
|
|
293
|
+
return project;
|
|
294
|
+
}
|
|
295
|
+
function resolveLocation(options) {
|
|
296
|
+
const location = options?.location || process.env.GOOGLE_CLOUD_LOCATION;
|
|
297
|
+
if (!location) {
|
|
298
|
+
throw new Error("Vertex AI requires a location. Set GOOGLE_CLOUD_LOCATION or pass location in options.");
|
|
299
|
+
}
|
|
300
|
+
return location;
|
|
301
|
+
}
|
|
302
|
+
function buildParams(model, context, options = {}) {
|
|
303
|
+
const contents = convertMessages(model, context);
|
|
304
|
+
const generationConfig = {};
|
|
305
|
+
if (options.temperature !== void 0) {
|
|
306
|
+
generationConfig.temperature = options.temperature;
|
|
307
|
+
}
|
|
308
|
+
if (options.maxTokens !== void 0) {
|
|
309
|
+
generationConfig.maxOutputTokens = options.maxTokens;
|
|
310
|
+
}
|
|
311
|
+
const config = {
|
|
312
|
+
...Object.keys(generationConfig).length > 0 && generationConfig,
|
|
313
|
+
...context.systemPrompt && { systemInstruction: sanitizeSurrogates(context.systemPrompt) },
|
|
314
|
+
...context.tools && context.tools.length > 0 && { tools: convertTools(context.tools) }
|
|
315
|
+
};
|
|
316
|
+
if (context.tools && context.tools.length > 0 && options.toolChoice) {
|
|
317
|
+
config.toolConfig = {
|
|
318
|
+
functionCallingConfig: {
|
|
319
|
+
mode: mapToolChoice(options.toolChoice)
|
|
320
|
+
}
|
|
321
|
+
};
|
|
322
|
+
} else {
|
|
323
|
+
config.toolConfig = void 0;
|
|
324
|
+
}
|
|
325
|
+
if (options.thinking?.enabled && model.reasoning) {
|
|
326
|
+
const thinkingConfig = { includeThoughts: true };
|
|
327
|
+
if (options.thinking.level !== void 0) {
|
|
328
|
+
thinkingConfig.thinkingLevel = THINKING_LEVEL_MAP[options.thinking.level];
|
|
329
|
+
} else if (options.thinking.budgetTokens !== void 0) {
|
|
330
|
+
thinkingConfig.thinkingBudget = options.thinking.budgetTokens;
|
|
331
|
+
}
|
|
332
|
+
config.thinkingConfig = thinkingConfig;
|
|
333
|
+
} else if (model.reasoning && options.thinking && !options.thinking.enabled) {
|
|
334
|
+
config.thinkingConfig = getDisabledThinkingConfig(model);
|
|
335
|
+
}
|
|
336
|
+
if (options.signal) {
|
|
337
|
+
if (options.signal.aborted) {
|
|
338
|
+
throw new Error("Request aborted");
|
|
339
|
+
}
|
|
340
|
+
config.abortSignal = options.signal;
|
|
341
|
+
}
|
|
342
|
+
const params = {
|
|
343
|
+
model: model.id,
|
|
344
|
+
contents,
|
|
345
|
+
config
|
|
346
|
+
};
|
|
347
|
+
return params;
|
|
348
|
+
}
|
|
349
|
+
function isGemini3ProModel(model) {
|
|
350
|
+
return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase());
|
|
351
|
+
}
|
|
352
|
+
function isGemini3FlashModel(model) {
|
|
353
|
+
return /gemini-3(?:\.\d+)?-flash/.test(model.id.toLowerCase());
|
|
354
|
+
}
|
|
355
|
+
function getDisabledThinkingConfig(model) {
|
|
356
|
+
const geminiModel = model;
|
|
357
|
+
if (isGemini3ProModel(geminiModel)) {
|
|
358
|
+
return { thinkingLevel: ThinkingLevel.LOW };
|
|
359
|
+
}
|
|
360
|
+
if (isGemini3FlashModel(geminiModel)) {
|
|
361
|
+
return { thinkingLevel: ThinkingLevel.MINIMAL };
|
|
362
|
+
}
|
|
363
|
+
return { thinkingBudget: 0 };
|
|
364
|
+
}
|
|
365
|
+
function getGemini3ThinkingLevel(effort, model) {
|
|
366
|
+
if (isGemini3ProModel(model)) {
|
|
367
|
+
switch (effort) {
|
|
368
|
+
case "minimal":
|
|
369
|
+
case "low":
|
|
370
|
+
return "LOW";
|
|
371
|
+
case "medium":
|
|
372
|
+
case "high":
|
|
373
|
+
return "HIGH";
|
|
374
|
+
}
|
|
375
|
+
}
|
|
376
|
+
switch (effort) {
|
|
377
|
+
case "minimal":
|
|
378
|
+
return "MINIMAL";
|
|
379
|
+
case "low":
|
|
380
|
+
return "LOW";
|
|
381
|
+
case "medium":
|
|
382
|
+
return "MEDIUM";
|
|
383
|
+
case "high":
|
|
384
|
+
return "HIGH";
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
function getGoogleBudget(model, effort, customBudgets) {
|
|
388
|
+
if (customBudgets?.[effort] !== void 0) {
|
|
389
|
+
return customBudgets[effort];
|
|
390
|
+
}
|
|
391
|
+
if (model.id.includes("2.5-pro")) {
|
|
392
|
+
const budgets = {
|
|
393
|
+
minimal: 128,
|
|
394
|
+
low: 2048,
|
|
395
|
+
medium: 8192,
|
|
396
|
+
high: 32768
|
|
397
|
+
};
|
|
398
|
+
return budgets[effort];
|
|
399
|
+
}
|
|
400
|
+
if (model.id.includes("2.5-flash")) {
|
|
401
|
+
const budgets = {
|
|
402
|
+
minimal: 128,
|
|
403
|
+
low: 2048,
|
|
404
|
+
medium: 8192,
|
|
405
|
+
high: 24576
|
|
406
|
+
};
|
|
407
|
+
return budgets[effort];
|
|
408
|
+
}
|
|
409
|
+
return -1;
|
|
410
|
+
}
|
|
411
|
+
export {
|
|
412
|
+
streamGoogleVertex,
|
|
413
|
+
streamSimpleGoogleVertex
|
|
414
|
+
};
|