@agenr/agenr-plugin 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,371 @@
1
+ import {
2
+ getEnvApiKey
3
+ } from "./chunk-SEOMNQGB.js";
4
+ import {
5
+ GoogleGenAI,
6
+ convertMessages,
7
+ convertTools,
8
+ isThinkingPart,
9
+ mapStopReason,
10
+ mapToolChoice,
11
+ retainThoughtSignature
12
+ } from "./chunk-6DQXEU2A.js";
13
+ import {
14
+ buildBaseOptions,
15
+ clampReasoning,
16
+ sanitizeSurrogates
17
+ } from "./chunk-P5HNPYGQ.js";
18
+ import {
19
+ AssistantMessageEventStream,
20
+ calculateCost
21
+ } from "./chunk-OLOUBEE5.js";
22
+ import "./chunk-EAQYK3U2.js";
23
+
24
+ // ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/google.js
25
+ var toolCallCounter = 0;
26
+ var streamGoogle = (model, context, options) => {
27
+ const stream = new AssistantMessageEventStream();
28
+ (async () => {
29
+ const output = {
30
+ role: "assistant",
31
+ content: [],
32
+ api: "google-generative-ai",
33
+ provider: model.provider,
34
+ model: model.id,
35
+ usage: {
36
+ input: 0,
37
+ output: 0,
38
+ cacheRead: 0,
39
+ cacheWrite: 0,
40
+ totalTokens: 0,
41
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
42
+ },
43
+ stopReason: "stop",
44
+ timestamp: Date.now()
45
+ };
46
+ try {
47
+ const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
48
+ const client = createClient(model, apiKey, options?.headers);
49
+ let params = buildParams(model, context, options);
50
+ const nextParams = await options?.onPayload?.(params, model);
51
+ if (nextParams !== void 0) {
52
+ params = nextParams;
53
+ }
54
+ const googleStream = await client.models.generateContentStream(params);
55
+ stream.push({ type: "start", partial: output });
56
+ let currentBlock = null;
57
+ const blocks = output.content;
58
+ const blockIndex = () => blocks.length - 1;
59
+ for await (const chunk of googleStream) {
60
+ output.responseId ||= chunk.responseId;
61
+ const candidate = chunk.candidates?.[0];
62
+ if (candidate?.content?.parts) {
63
+ for (const part of candidate.content.parts) {
64
+ if (part.text !== void 0) {
65
+ const isThinking = isThinkingPart(part);
66
+ if (!currentBlock || isThinking && currentBlock.type !== "thinking" || !isThinking && currentBlock.type !== "text") {
67
+ if (currentBlock) {
68
+ if (currentBlock.type === "text") {
69
+ stream.push({
70
+ type: "text_end",
71
+ contentIndex: blocks.length - 1,
72
+ content: currentBlock.text,
73
+ partial: output
74
+ });
75
+ } else {
76
+ stream.push({
77
+ type: "thinking_end",
78
+ contentIndex: blockIndex(),
79
+ content: currentBlock.thinking,
80
+ partial: output
81
+ });
82
+ }
83
+ }
84
+ if (isThinking) {
85
+ currentBlock = { type: "thinking", thinking: "", thinkingSignature: void 0 };
86
+ output.content.push(currentBlock);
87
+ stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
88
+ } else {
89
+ currentBlock = { type: "text", text: "" };
90
+ output.content.push(currentBlock);
91
+ stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
92
+ }
93
+ }
94
+ if (currentBlock.type === "thinking") {
95
+ currentBlock.thinking += part.text;
96
+ currentBlock.thinkingSignature = retainThoughtSignature(currentBlock.thinkingSignature, part.thoughtSignature);
97
+ stream.push({
98
+ type: "thinking_delta",
99
+ contentIndex: blockIndex(),
100
+ delta: part.text,
101
+ partial: output
102
+ });
103
+ } else {
104
+ currentBlock.text += part.text;
105
+ currentBlock.textSignature = retainThoughtSignature(currentBlock.textSignature, part.thoughtSignature);
106
+ stream.push({
107
+ type: "text_delta",
108
+ contentIndex: blockIndex(),
109
+ delta: part.text,
110
+ partial: output
111
+ });
112
+ }
113
+ }
114
+ if (part.functionCall) {
115
+ if (currentBlock) {
116
+ if (currentBlock.type === "text") {
117
+ stream.push({
118
+ type: "text_end",
119
+ contentIndex: blockIndex(),
120
+ content: currentBlock.text,
121
+ partial: output
122
+ });
123
+ } else {
124
+ stream.push({
125
+ type: "thinking_end",
126
+ contentIndex: blockIndex(),
127
+ content: currentBlock.thinking,
128
+ partial: output
129
+ });
130
+ }
131
+ currentBlock = null;
132
+ }
133
+ const providedId = part.functionCall.id;
134
+ const needsNewId = !providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId);
135
+ const toolCallId = needsNewId ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}` : providedId;
136
+ const toolCall = {
137
+ type: "toolCall",
138
+ id: toolCallId,
139
+ name: part.functionCall.name || "",
140
+ arguments: part.functionCall.args ?? {},
141
+ ...part.thoughtSignature && { thoughtSignature: part.thoughtSignature }
142
+ };
143
+ output.content.push(toolCall);
144
+ stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
145
+ stream.push({
146
+ type: "toolcall_delta",
147
+ contentIndex: blockIndex(),
148
+ delta: JSON.stringify(toolCall.arguments),
149
+ partial: output
150
+ });
151
+ stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
152
+ }
153
+ }
154
+ }
155
+ if (candidate?.finishReason) {
156
+ output.stopReason = mapStopReason(candidate.finishReason);
157
+ if (output.content.some((b) => b.type === "toolCall")) {
158
+ output.stopReason = "toolUse";
159
+ }
160
+ }
161
+ if (chunk.usageMetadata) {
162
+ output.usage = {
163
+ input: (chunk.usageMetadata.promptTokenCount || 0) - (chunk.usageMetadata.cachedContentTokenCount || 0),
164
+ output: (chunk.usageMetadata.candidatesTokenCount || 0) + (chunk.usageMetadata.thoughtsTokenCount || 0),
165
+ cacheRead: chunk.usageMetadata.cachedContentTokenCount || 0,
166
+ cacheWrite: 0,
167
+ totalTokens: chunk.usageMetadata.totalTokenCount || 0,
168
+ cost: {
169
+ input: 0,
170
+ output: 0,
171
+ cacheRead: 0,
172
+ cacheWrite: 0,
173
+ total: 0
174
+ }
175
+ };
176
+ calculateCost(model, output.usage);
177
+ }
178
+ }
179
+ if (currentBlock) {
180
+ if (currentBlock.type === "text") {
181
+ stream.push({
182
+ type: "text_end",
183
+ contentIndex: blockIndex(),
184
+ content: currentBlock.text,
185
+ partial: output
186
+ });
187
+ } else {
188
+ stream.push({
189
+ type: "thinking_end",
190
+ contentIndex: blockIndex(),
191
+ content: currentBlock.thinking,
192
+ partial: output
193
+ });
194
+ }
195
+ }
196
+ if (options?.signal?.aborted) {
197
+ throw new Error("Request was aborted");
198
+ }
199
+ if (output.stopReason === "aborted" || output.stopReason === "error") {
200
+ throw new Error("An unknown error occurred");
201
+ }
202
+ stream.push({ type: "done", reason: output.stopReason, message: output });
203
+ stream.end();
204
+ } catch (error) {
205
+ for (const block of output.content) {
206
+ if ("index" in block) {
207
+ delete block.index;
208
+ }
209
+ }
210
+ output.stopReason = options?.signal?.aborted ? "aborted" : "error";
211
+ output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
212
+ stream.push({ type: "error", reason: output.stopReason, error: output });
213
+ stream.end();
214
+ }
215
+ })();
216
+ return stream;
217
+ };
218
+ var streamSimpleGoogle = (model, context, options) => {
219
+ const apiKey = options?.apiKey || getEnvApiKey(model.provider);
220
+ if (!apiKey) {
221
+ throw new Error(`No API key for provider: ${model.provider}`);
222
+ }
223
+ const base = buildBaseOptions(model, options, apiKey);
224
+ if (!options?.reasoning) {
225
+ return streamGoogle(model, context, { ...base, thinking: { enabled: false } });
226
+ }
227
+ const effort = clampReasoning(options.reasoning);
228
+ const googleModel = model;
229
+ if (isGemini3ProModel(googleModel) || isGemini3FlashModel(googleModel)) {
230
+ return streamGoogle(model, context, {
231
+ ...base,
232
+ thinking: {
233
+ enabled: true,
234
+ level: getGemini3ThinkingLevel(effort, googleModel)
235
+ }
236
+ });
237
+ }
238
+ return streamGoogle(model, context, {
239
+ ...base,
240
+ thinking: {
241
+ enabled: true,
242
+ budgetTokens: getGoogleBudget(googleModel, effort, options.thinkingBudgets)
243
+ }
244
+ });
245
+ };
246
+ function createClient(model, apiKey, optionsHeaders) {
247
+ const httpOptions = {};
248
+ if (model.baseUrl) {
249
+ httpOptions.baseUrl = model.baseUrl;
250
+ httpOptions.apiVersion = "";
251
+ }
252
+ if (model.headers || optionsHeaders) {
253
+ httpOptions.headers = { ...model.headers, ...optionsHeaders };
254
+ }
255
+ return new GoogleGenAI({
256
+ apiKey,
257
+ httpOptions: Object.keys(httpOptions).length > 0 ? httpOptions : void 0
258
+ });
259
+ }
260
+ function buildParams(model, context, options = {}) {
261
+ const contents = convertMessages(model, context);
262
+ const generationConfig = {};
263
+ if (options.temperature !== void 0) {
264
+ generationConfig.temperature = options.temperature;
265
+ }
266
+ if (options.maxTokens !== void 0) {
267
+ generationConfig.maxOutputTokens = options.maxTokens;
268
+ }
269
+ const config = {
270
+ ...Object.keys(generationConfig).length > 0 && generationConfig,
271
+ ...context.systemPrompt && { systemInstruction: sanitizeSurrogates(context.systemPrompt) },
272
+ ...context.tools && context.tools.length > 0 && { tools: convertTools(context.tools) }
273
+ };
274
+ if (context.tools && context.tools.length > 0 && options.toolChoice) {
275
+ config.toolConfig = {
276
+ functionCallingConfig: {
277
+ mode: mapToolChoice(options.toolChoice)
278
+ }
279
+ };
280
+ } else {
281
+ config.toolConfig = void 0;
282
+ }
283
+ if (options.thinking?.enabled && model.reasoning) {
284
+ const thinkingConfig = { includeThoughts: true };
285
+ if (options.thinking.level !== void 0) {
286
+ thinkingConfig.thinkingLevel = options.thinking.level;
287
+ } else if (options.thinking.budgetTokens !== void 0) {
288
+ thinkingConfig.thinkingBudget = options.thinking.budgetTokens;
289
+ }
290
+ config.thinkingConfig = thinkingConfig;
291
+ } else if (model.reasoning && options.thinking && !options.thinking.enabled) {
292
+ config.thinkingConfig = getDisabledThinkingConfig(model);
293
+ }
294
+ if (options.signal) {
295
+ if (options.signal.aborted) {
296
+ throw new Error("Request aborted");
297
+ }
298
+ config.abortSignal = options.signal;
299
+ }
300
+ const params = {
301
+ model: model.id,
302
+ contents,
303
+ config
304
+ };
305
+ return params;
306
+ }
307
+ function isGemini3ProModel(model) {
308
+ return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase());
309
+ }
310
+ function isGemini3FlashModel(model) {
311
+ return /gemini-3(?:\.\d+)?-flash/.test(model.id.toLowerCase());
312
+ }
313
+ function getDisabledThinkingConfig(model) {
314
+ if (isGemini3ProModel(model)) {
315
+ return { thinkingLevel: "LOW" };
316
+ }
317
+ if (isGemini3FlashModel(model)) {
318
+ return { thinkingLevel: "MINIMAL" };
319
+ }
320
+ return { thinkingBudget: 0 };
321
+ }
322
+ function getGemini3ThinkingLevel(effort, model) {
323
+ if (isGemini3ProModel(model)) {
324
+ switch (effort) {
325
+ case "minimal":
326
+ case "low":
327
+ return "LOW";
328
+ case "medium":
329
+ case "high":
330
+ return "HIGH";
331
+ }
332
+ }
333
+ switch (effort) {
334
+ case "minimal":
335
+ return "MINIMAL";
336
+ case "low":
337
+ return "LOW";
338
+ case "medium":
339
+ return "MEDIUM";
340
+ case "high":
341
+ return "HIGH";
342
+ }
343
+ }
344
+ function getGoogleBudget(model, effort, customBudgets) {
345
+ if (customBudgets?.[effort] !== void 0) {
346
+ return customBudgets[effort];
347
+ }
348
+ if (model.id.includes("2.5-pro")) {
349
+ const budgets = {
350
+ minimal: 128,
351
+ low: 2048,
352
+ medium: 8192,
353
+ high: 32768
354
+ };
355
+ return budgets[effort];
356
+ }
357
+ if (model.id.includes("2.5-flash")) {
358
+ const budgets = {
359
+ minimal: 128,
360
+ low: 2048,
361
+ medium: 8192,
362
+ high: 24576
363
+ };
364
+ return budgets[effort];
365
+ }
366
+ return -1;
367
+ }
368
+ export {
369
+ streamGoogle,
370
+ streamSimpleGoogle
371
+ };