@elizaos/plugin-google-genai 2.0.0-alpha.9 → 2.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +124 -0
- package/auto-enable.ts +21 -0
- package/dist/browser/index.browser.js +415 -175
- package/dist/browser/index.browser.js.map +11 -11
- package/dist/build.d.ts +3 -0
- package/dist/build.d.ts.map +1 -0
- package/dist/build.js +117 -0
- package/dist/cjs/index.node.cjs +439 -181
- package/dist/cjs/index.node.js.map +11 -11
- package/dist/generated/specs/specs.d.ts +55 -0
- package/dist/generated/specs/specs.d.ts.map +1 -0
- package/dist/generated/specs/specs.js +34 -0
- package/dist/index.browser.d.ts +5 -0
- package/dist/index.browser.d.ts.map +1 -0
- package/dist/index.browser.js +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +252 -0
- package/dist/index.node.d.ts +5 -0
- package/dist/index.node.d.ts.map +1 -0
- package/dist/index.node.js +4 -0
- package/dist/init.d.ts +16 -0
- package/dist/init.d.ts.map +1 -0
- package/dist/init.js +27 -0
- package/dist/models/embedding.d.ts +6 -0
- package/dist/models/embedding.d.ts.map +1 -0
- package/dist/models/embedding.js +57 -0
- package/dist/models/image.d.ts +7 -0
- package/dist/models/image.d.ts.map +1 -0
- package/dist/models/image.js +91 -0
- package/dist/models/index.d.ts +13 -0
- package/dist/models/index.d.ts.map +1 -0
- package/dist/models/index.js +12 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/object.d.ts.map +1 -0
- package/dist/models/object.js +84 -0
- package/dist/models/text.d.ts +51 -0
- package/dist/models/text.d.ts.map +1 -0
- package/dist/models/text.js +257 -0
- package/dist/node/index.node.d.ts +2 -0
- package/dist/node/index.node.js +415 -175
- package/dist/node/index.node.js.map +11 -11
- package/dist/types/index.d.ts +47 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +1 -0
- package/dist/utils/config.d.ts +25 -0
- package/dist/utils/config.d.ts.map +1 -0
- package/dist/utils/config.js +115 -0
- package/dist/utils/events.d.ts +12 -0
- package/dist/utils/events.d.ts.map +1 -0
- package/dist/utils/events.js +14 -0
- package/dist/utils/index.d.ts +4 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +3 -0
- package/dist/utils/tokenization.d.ts +2 -0
- package/dist/utils/tokenization.d.ts.map +1 -0
- package/dist/utils/tokenization.js +3 -0
- package/dist/vitest.config.d.ts +3 -0
- package/dist/vitest.config.d.ts.map +1 -0
- package/dist/vitest.config.js +8 -0
- package/package.json +33 -16
package/dist/cjs/index.node.cjs
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
var __create = Object.create;
|
|
2
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
1
3
|
var __defProp = Object.defineProperty;
|
|
2
4
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
3
5
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
@@ -5,6 +7,28 @@ var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
|
5
7
|
function __accessProp(key) {
|
|
6
8
|
return this[key];
|
|
7
9
|
}
|
|
10
|
+
var __toESMCache_node;
|
|
11
|
+
var __toESMCache_esm;
|
|
12
|
+
var __toESM = (mod, isNodeMode, target) => {
|
|
13
|
+
var canCache = mod != null && typeof mod === "object";
|
|
14
|
+
if (canCache) {
|
|
15
|
+
var cache = isNodeMode ? __toESMCache_node ??= new WeakMap : __toESMCache_esm ??= new WeakMap;
|
|
16
|
+
var cached = cache.get(mod);
|
|
17
|
+
if (cached)
|
|
18
|
+
return cached;
|
|
19
|
+
}
|
|
20
|
+
target = mod != null ? __create(__getProtoOf(mod)) : {};
|
|
21
|
+
const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
|
|
22
|
+
for (let key of __getOwnPropNames(mod))
|
|
23
|
+
if (!__hasOwnProp.call(to, key))
|
|
24
|
+
__defProp(to, key, {
|
|
25
|
+
get: __accessProp.bind(mod, key),
|
|
26
|
+
enumerable: true
|
|
27
|
+
});
|
|
28
|
+
if (canCache)
|
|
29
|
+
cache.set(mod, to);
|
|
30
|
+
return to;
|
|
31
|
+
};
|
|
8
32
|
var __toCommonJS = (from) => {
|
|
9
33
|
var entry = (__moduleCache ??= new WeakMap).get(from), desc;
|
|
10
34
|
if (entry)
|
|
@@ -40,12 +64,13 @@ var __export = (target, all) => {
|
|
|
40
64
|
var exports_index_node = {};
|
|
41
65
|
__export(exports_index_node, {
|
|
42
66
|
googleGenAIPlugin: () => googleGenAIPlugin,
|
|
43
|
-
default: () =>
|
|
67
|
+
default: () => index_node_default
|
|
44
68
|
});
|
|
45
69
|
module.exports = __toCommonJS(exports_index_node);
|
|
46
70
|
|
|
47
71
|
// index.ts
|
|
48
|
-
var
|
|
72
|
+
var ElizaCore3 = __toESM(require("@elizaos/core"));
|
|
73
|
+
var import_core6 = require("@elizaos/core");
|
|
49
74
|
var import_genai3 = require("@google/genai");
|
|
50
75
|
|
|
51
76
|
// init.ts
|
|
@@ -75,9 +100,24 @@ function getApiKey(runtime) {
|
|
|
75
100
|
function getSmallModel(runtime) {
|
|
76
101
|
return getSetting(runtime, "GOOGLE_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gemini-2.0-flash-001") ?? "gemini-2.0-flash-001";
|
|
77
102
|
}
|
|
103
|
+
function getNanoModel(runtime) {
|
|
104
|
+
return getSetting(runtime, "GOOGLE_NANO_MODEL") ?? getSetting(runtime, "NANO_MODEL") ?? getSmallModel(runtime);
|
|
105
|
+
}
|
|
106
|
+
function getMediumModel(runtime) {
|
|
107
|
+
return getSetting(runtime, "GOOGLE_MEDIUM_MODEL") ?? getSetting(runtime, "MEDIUM_MODEL") ?? getSmallModel(runtime);
|
|
108
|
+
}
|
|
78
109
|
function getLargeModel(runtime) {
|
|
79
110
|
return getSetting(runtime, "GOOGLE_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gemini-2.5-pro-preview-03-25") ?? "gemini-2.5-pro-preview-03-25";
|
|
80
111
|
}
|
|
112
|
+
function getMegaModel(runtime) {
|
|
113
|
+
return getSetting(runtime, "GOOGLE_MEGA_MODEL") ?? getSetting(runtime, "MEGA_MODEL") ?? getLargeModel(runtime);
|
|
114
|
+
}
|
|
115
|
+
function getResponseHandlerModel(runtime) {
|
|
116
|
+
return getSetting(runtime, "GOOGLE_RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "GOOGLE_SHOULD_RESPOND_MODEL") ?? getSetting(runtime, "RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "SHOULD_RESPOND_MODEL") ?? getNanoModel(runtime);
|
|
117
|
+
}
|
|
118
|
+
function getActionPlannerModel(runtime) {
|
|
119
|
+
return getSetting(runtime, "GOOGLE_ACTION_PLANNER_MODEL") ?? getSetting(runtime, "GOOGLE_PLANNER_MODEL") ?? getSetting(runtime, "ACTION_PLANNER_MODEL") ?? getSetting(runtime, "PLANNER_MODEL") ?? getMediumModel(runtime);
|
|
120
|
+
}
|
|
81
121
|
function getImageModel(runtime) {
|
|
82
122
|
return getSetting(runtime, "GOOGLE_IMAGE_MODEL") ?? getSetting(runtime, "IMAGE_MODEL", "gemini-2.5-pro-preview-03-25") ?? "gemini-2.5-pro-preview-03-25";
|
|
83
123
|
}
|
|
@@ -136,12 +176,13 @@ function initializeGoogleGenAI(_config, runtime) {
|
|
|
136
176
|
}
|
|
137
177
|
|
|
138
178
|
// models/embedding.ts
|
|
139
|
-
var
|
|
179
|
+
var ElizaCore = __toESM(require("@elizaos/core"));
|
|
180
|
+
var import_core3 = require("@elizaos/core");
|
|
140
181
|
|
|
141
182
|
// utils/events.ts
|
|
142
|
-
var
|
|
183
|
+
var MODEL_USED_EVENT = "MODEL_USED";
|
|
143
184
|
function emitModelUsageEvent(runtime, type, _prompt, usage) {
|
|
144
|
-
runtime.emitEvent(
|
|
185
|
+
runtime.emitEvent(MODEL_USED_EVENT, {
|
|
145
186
|
runtime,
|
|
146
187
|
source: "plugin-google-genai",
|
|
147
188
|
type,
|
|
@@ -159,21 +200,27 @@ async function countTokens(text) {
|
|
|
159
200
|
}
|
|
160
201
|
|
|
161
202
|
// models/embedding.ts
|
|
203
|
+
var TEXT_EMBEDDING_MODEL_TYPE = ElizaCore.ModelType?.TEXT_EMBEDDING ?? "TEXT_EMBEDDING";
|
|
162
204
|
async function handleTextEmbedding(runtime, params) {
|
|
163
205
|
const genAI = createGoogleGenAI(runtime);
|
|
164
206
|
if (!genAI) {
|
|
165
207
|
throw new Error("Google Generative AI client not initialized");
|
|
166
208
|
}
|
|
167
209
|
const embeddingModelName = getEmbeddingModel(runtime);
|
|
168
|
-
|
|
210
|
+
import_core3.logger.debug(`[TEXT_EMBEDDING] Using model: ${embeddingModelName}`);
|
|
169
211
|
if (params === null) {
|
|
170
212
|
return Array(768).fill(0);
|
|
171
213
|
}
|
|
172
|
-
|
|
214
|
+
let text = typeof params === "string" ? params : typeof params === "object" && params.text ? params.text : "";
|
|
173
215
|
if (!text.trim()) {
|
|
174
|
-
|
|
216
|
+
import_core3.logger.warn("Empty text for embedding");
|
|
175
217
|
return Array(768).fill(0);
|
|
176
218
|
}
|
|
219
|
+
const maxChars = 8192 * 4;
|
|
220
|
+
if (text.length > maxChars) {
|
|
221
|
+
import_core3.logger.warn(`[Google GenAI] Embedding input too long (~${Math.ceil(text.length / 4)} tokens), truncating to ~8192 tokens`);
|
|
222
|
+
text = text.slice(0, maxChars);
|
|
223
|
+
}
|
|
177
224
|
try {
|
|
178
225
|
const response = await genAI.models.embedContent({
|
|
179
226
|
model: embeddingModelName,
|
|
@@ -181,21 +228,20 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
181
228
|
});
|
|
182
229
|
const embedding = response.embeddings?.[0]?.values || [];
|
|
183
230
|
const promptTokens = await countTokens(text);
|
|
184
|
-
emitModelUsageEvent(runtime,
|
|
231
|
+
emitModelUsageEvent(runtime, TEXT_EMBEDDING_MODEL_TYPE, text, {
|
|
185
232
|
promptTokens,
|
|
186
233
|
completionTokens: 0,
|
|
187
234
|
totalTokens: promptTokens
|
|
188
235
|
});
|
|
189
|
-
|
|
236
|
+
import_core3.logger.log(`Got embedding with length ${embedding.length}`);
|
|
190
237
|
return embedding;
|
|
191
238
|
} catch (error) {
|
|
192
|
-
|
|
239
|
+
import_core3.logger.error(`Error generating embedding: ${error instanceof Error ? error.message : String(error)}`);
|
|
193
240
|
return Array(768).fill(0);
|
|
194
241
|
}
|
|
195
242
|
}
|
|
196
243
|
// models/image.ts
|
|
197
|
-
var
|
|
198
|
-
var crossFetch = typeof globalThis.fetch === "function" ? globalThis.fetch : fetch;
|
|
244
|
+
var import_core4 = require("@elizaos/core");
|
|
199
245
|
async function handleImageDescription(runtime, params) {
|
|
200
246
|
const genAI = createGoogleGenAI(runtime);
|
|
201
247
|
if (!genAI) {
|
|
@@ -204,7 +250,7 @@ async function handleImageDescription(runtime, params) {
|
|
|
204
250
|
let imageUrl;
|
|
205
251
|
let promptText;
|
|
206
252
|
const modelName = getImageModel(runtime);
|
|
207
|
-
|
|
253
|
+
import_core4.logger.log(`[IMAGE_DESCRIPTION] Using model: ${modelName}`);
|
|
208
254
|
if (typeof params === "string") {
|
|
209
255
|
imageUrl = params;
|
|
210
256
|
promptText = "Please analyze this image and provide a title and detailed description.";
|
|
@@ -213,36 +259,52 @@ async function handleImageDescription(runtime, params) {
|
|
|
213
259
|
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
214
260
|
}
|
|
215
261
|
try {
|
|
216
|
-
const imageResponse = await
|
|
262
|
+
const imageResponse = await fetch(imageUrl);
|
|
217
263
|
if (!imageResponse.ok) {
|
|
218
264
|
throw new Error(`Failed to fetch image: ${imageResponse.statusText}`);
|
|
219
265
|
}
|
|
220
266
|
const imageData = await imageResponse.arrayBuffer();
|
|
221
267
|
const base64Image = Buffer.from(imageData).toString("base64");
|
|
222
268
|
const contentType = imageResponse.headers.get("content-type") || "image/jpeg";
|
|
223
|
-
const
|
|
269
|
+
const details = {
|
|
224
270
|
model: modelName,
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
271
|
+
systemPrompt: "",
|
|
272
|
+
userPrompt: promptText,
|
|
273
|
+
temperature: 0.7,
|
|
274
|
+
maxTokens: 8192,
|
|
275
|
+
purpose: "external_llm",
|
|
276
|
+
actionType: "google-genai.IMAGE_DESCRIPTION.generateContent"
|
|
277
|
+
};
|
|
278
|
+
const response = await import_core4.recordLlmCall(runtime, details, async () => {
|
|
279
|
+
const result = await genAI.models.generateContent({
|
|
280
|
+
model: modelName,
|
|
281
|
+
contents: [
|
|
282
|
+
{
|
|
283
|
+
role: "user",
|
|
284
|
+
parts: [
|
|
285
|
+
{ text: promptText },
|
|
286
|
+
{
|
|
287
|
+
inlineData: {
|
|
288
|
+
mimeType: contentType,
|
|
289
|
+
data: base64Image
|
|
290
|
+
}
|
|
234
291
|
}
|
|
235
|
-
|
|
236
|
-
|
|
292
|
+
]
|
|
293
|
+
}
|
|
294
|
+
],
|
|
295
|
+
config: {
|
|
296
|
+
temperature: 0.7,
|
|
297
|
+
topK: 40,
|
|
298
|
+
topP: 0.95,
|
|
299
|
+
maxOutputTokens: 8192,
|
|
300
|
+
safetySettings: getSafetySettings()
|
|
237
301
|
}
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
safetySettings: getSafetySettings()
|
|
245
|
-
}
|
|
302
|
+
});
|
|
303
|
+
const responseText2 = result.text || "";
|
|
304
|
+
details.response = responseText2;
|
|
305
|
+
details.promptTokens = await countTokens(promptText);
|
|
306
|
+
details.completionTokens = await countTokens(responseText2);
|
|
307
|
+
return result;
|
|
246
308
|
});
|
|
247
309
|
const responseText = response.text || "";
|
|
248
310
|
try {
|
|
@@ -260,161 +322,328 @@ async function handleImageDescription(runtime, params) {
|
|
|
260
322
|
return { title, description };
|
|
261
323
|
} catch (error) {
|
|
262
324
|
const message = error instanceof Error ? error.message : String(error);
|
|
263
|
-
|
|
325
|
+
import_core4.logger.error(`Error analyzing image: ${message}`);
|
|
264
326
|
return {
|
|
265
327
|
title: "Failed to analyze image",
|
|
266
328
|
description: `Error: ${message}`
|
|
267
329
|
};
|
|
268
330
|
}
|
|
269
331
|
}
|
|
270
|
-
// models/
|
|
271
|
-
var
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
332
|
+
// models/text.ts
|
|
333
|
+
var ElizaCore2 = __toESM(require("@elizaos/core"));
|
|
334
|
+
var import_core5 = require("@elizaos/core");
|
|
335
|
+
var CORE_MODEL_TYPES = ElizaCore2.ModelType ?? {};
|
|
336
|
+
var TEXT_NANO_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_NANO ?? "TEXT_NANO";
|
|
337
|
+
var TEXT_MEDIUM_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
338
|
+
var TEXT_SMALL_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_SMALL ?? "TEXT_SMALL";
|
|
339
|
+
var TEXT_LARGE_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_LARGE ?? "TEXT_LARGE";
|
|
340
|
+
var TEXT_MEGA_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEGA ?? "TEXT_MEGA";
|
|
341
|
+
var RESPONSE_HANDLER_MODEL_TYPE = CORE_MODEL_TYPES.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
342
|
+
var ACTION_PLANNER_MODEL_TYPE = CORE_MODEL_TYPES.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
343
|
+
function normalizeToolsForGoogle(tools) {
|
|
344
|
+
if (!tools)
|
|
345
|
+
return;
|
|
346
|
+
if (Array.isArray(tools) && tools.length > 0 && typeof tools[0] === "object" && tools[0] !== null && "functionDeclarations" in tools[0]) {
|
|
347
|
+
return tools;
|
|
276
348
|
}
|
|
277
|
-
const
|
|
278
|
-
const
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
enhancedPrompt += `
|
|
284
|
-
|
|
285
|
-
Please respond with a JSON object that follows this schema:
|
|
286
|
-
${JSON.stringify(params.schema, null, 2)}`;
|
|
349
|
+
const flat = Array.isArray(tools) ? tools : Object.entries(tools).map(([name, value]) => ({ name, ...value }));
|
|
350
|
+
const declarations = [];
|
|
351
|
+
for (const tool of flat) {
|
|
352
|
+
const name = tool.name ?? tool.function?.name;
|
|
353
|
+
if (!name) {
|
|
354
|
+
throw new Error("[GoogleGenAI] Tool definition is missing a name.");
|
|
287
355
|
}
|
|
288
|
-
const
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
safetySettings: getSafetySettings()
|
|
298
|
-
}
|
|
299
|
-
});
|
|
300
|
-
const text = response.text || "";
|
|
301
|
-
const promptTokens = await countTokens(enhancedPrompt);
|
|
302
|
-
const completionTokens = await countTokens(text);
|
|
303
|
-
emitModelUsageEvent(runtime, modelType, params.prompt, {
|
|
304
|
-
promptTokens,
|
|
305
|
-
completionTokens,
|
|
306
|
-
totalTokens: promptTokens + completionTokens
|
|
356
|
+
const description = tool.description ?? tool.function?.description;
|
|
357
|
+
const parameters = tool.parameters ?? tool.inputSchema ?? tool.function?.parameters ?? {
|
|
358
|
+
type: "object",
|
|
359
|
+
properties: {}
|
|
360
|
+
};
|
|
361
|
+
declarations.push({
|
|
362
|
+
name,
|
|
363
|
+
...description ? { description } : {},
|
|
364
|
+
parameters
|
|
307
365
|
});
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
366
|
+
}
|
|
367
|
+
return declarations.length > 0 ? [{ functionDeclarations: declarations }] : undefined;
|
|
368
|
+
}
|
|
369
|
+
function normalizeToolConfigForGoogle(toolChoice) {
|
|
370
|
+
if (!toolChoice)
|
|
371
|
+
return;
|
|
372
|
+
if (toolChoice === "auto") {
|
|
373
|
+
return { functionCallingConfig: { mode: "AUTO" } };
|
|
374
|
+
}
|
|
375
|
+
if (toolChoice === "required") {
|
|
376
|
+
return { functionCallingConfig: { mode: "ANY" } };
|
|
377
|
+
}
|
|
378
|
+
if (toolChoice === "none") {
|
|
379
|
+
return { functionCallingConfig: { mode: "NONE" } };
|
|
380
|
+
}
|
|
381
|
+
let toolName;
|
|
382
|
+
if ("type" in toolChoice) {
|
|
383
|
+
toolName = toolChoice.type === "function" ? toolChoice.function.name : toolChoice.toolName ?? toolChoice.name;
|
|
384
|
+
} else {
|
|
385
|
+
toolName = toolChoice.name;
|
|
386
|
+
}
|
|
387
|
+
if (toolName) {
|
|
388
|
+
return {
|
|
389
|
+
functionCallingConfig: {
|
|
390
|
+
mode: "ANY",
|
|
391
|
+
allowedFunctionNames: [toolName]
|
|
318
392
|
}
|
|
319
|
-
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
return;
|
|
396
|
+
}
|
|
397
|
+
function resolveResponseJsonSchema(responseSchema) {
|
|
398
|
+
if (!responseSchema)
|
|
399
|
+
return;
|
|
400
|
+
if ("schema" in responseSchema && responseSchema.schema) {
|
|
401
|
+
return responseSchema.schema;
|
|
402
|
+
}
|
|
403
|
+
return responseSchema;
|
|
404
|
+
}
|
|
405
|
+
function buildPromptParts(prompt, attachments) {
|
|
406
|
+
const parts = [{ text: prompt }];
|
|
407
|
+
for (const attachment of attachments ?? []) {
|
|
408
|
+
if (attachment.data instanceof URL) {
|
|
409
|
+
parts.push({
|
|
410
|
+
fileData: {
|
|
411
|
+
mimeType: attachment.mediaType,
|
|
412
|
+
fileUri: attachment.data.toString()
|
|
413
|
+
}
|
|
414
|
+
});
|
|
415
|
+
continue;
|
|
320
416
|
}
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
417
|
+
if (typeof attachment.data === "string" && /^https?:\/\//i.test(attachment.data)) {
|
|
418
|
+
parts.push({
|
|
419
|
+
fileData: {
|
|
420
|
+
mimeType: attachment.mediaType,
|
|
421
|
+
fileUri: attachment.data
|
|
422
|
+
}
|
|
423
|
+
});
|
|
424
|
+
continue;
|
|
425
|
+
}
|
|
426
|
+
if (typeof attachment.data === "string") {
|
|
427
|
+
const dataUrlMatch = attachment.data.match(/^data:([^;,]+);base64,(.+)$/i);
|
|
428
|
+
parts.push({
|
|
429
|
+
inlineData: {
|
|
430
|
+
mimeType: dataUrlMatch?.[1] ?? attachment.mediaType,
|
|
431
|
+
data: dataUrlMatch?.[2] ?? attachment.data
|
|
432
|
+
}
|
|
433
|
+
});
|
|
434
|
+
continue;
|
|
435
|
+
}
|
|
436
|
+
parts.push({
|
|
437
|
+
inlineData: {
|
|
438
|
+
mimeType: attachment.mediaType,
|
|
439
|
+
data: Buffer.from(attachment.data).toString("base64")
|
|
440
|
+
}
|
|
441
|
+
});
|
|
324
442
|
}
|
|
443
|
+
return parts;
|
|
325
444
|
}
|
|
326
|
-
|
|
327
|
-
return
|
|
445
|
+
function resolveGoogleSystemInstruction(runtime, params) {
|
|
446
|
+
return import_core5.resolveEffectiveSystemPrompt({
|
|
447
|
+
params,
|
|
448
|
+
fallback: import_core5.buildCanonicalSystemPrompt({ character: runtime.character })
|
|
449
|
+
});
|
|
328
450
|
}
|
|
329
|
-
|
|
330
|
-
return
|
|
451
|
+
function resolveGooglePrompt(params, systemInstruction) {
|
|
452
|
+
return import_core5.renderChatMessagesForPrompt(params.messages, {
|
|
453
|
+
omitDuplicateSystem: systemInstruction
|
|
454
|
+
}) ?? params.prompt;
|
|
331
455
|
}
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
456
|
+
function getModelNameForType(runtime, modelType) {
|
|
457
|
+
switch (modelType) {
|
|
458
|
+
case TEXT_NANO_MODEL_TYPE:
|
|
459
|
+
return getNanoModel(runtime);
|
|
460
|
+
case TEXT_MEDIUM_MODEL_TYPE:
|
|
461
|
+
return getMediumModel(runtime);
|
|
462
|
+
case TEXT_SMALL_MODEL_TYPE:
|
|
463
|
+
return getSmallModel(runtime);
|
|
464
|
+
case TEXT_LARGE_MODEL_TYPE:
|
|
465
|
+
return getLargeModel(runtime);
|
|
466
|
+
case TEXT_MEGA_MODEL_TYPE:
|
|
467
|
+
return getMegaModel(runtime);
|
|
468
|
+
case RESPONSE_HANDLER_MODEL_TYPE:
|
|
469
|
+
return getResponseHandlerModel(runtime);
|
|
470
|
+
case ACTION_PLANNER_MODEL_TYPE:
|
|
471
|
+
return getActionPlannerModel(runtime);
|
|
472
|
+
default:
|
|
473
|
+
return getLargeModel(runtime);
|
|
338
474
|
}
|
|
339
|
-
|
|
475
|
+
}
|
|
476
|
+
function buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences) {
|
|
477
|
+
const tools = normalizeToolsForGoogle(params.tools);
|
|
478
|
+
const toolConfig = normalizeToolConfigForGoogle(params.toolChoice);
|
|
479
|
+
const responseJsonSchema = resolveResponseJsonSchema(params.responseSchema);
|
|
480
|
+
const baseConfig = {
|
|
481
|
+
temperature,
|
|
482
|
+
topK: 40,
|
|
483
|
+
topP: 0.95,
|
|
484
|
+
maxOutputTokens: maxTokens,
|
|
485
|
+
stopSequences,
|
|
486
|
+
safetySettings: getSafetySettings(),
|
|
487
|
+
...systemInstruction && { systemInstruction },
|
|
488
|
+
...tools ? { tools } : {},
|
|
489
|
+
...toolConfig ? { toolConfig } : {},
|
|
490
|
+
...responseJsonSchema ? {
|
|
491
|
+
responseMimeType: "application/json",
|
|
492
|
+
responseJsonSchema
|
|
493
|
+
} : {}
|
|
494
|
+
};
|
|
495
|
+
return baseConfig;
|
|
496
|
+
}
|
|
497
|
+
function createLlmCallDetails(modelName, modelType, prompt, systemInstruction, temperature, maxTokens) {
|
|
498
|
+
return {
|
|
499
|
+
model: modelName,
|
|
500
|
+
systemPrompt: systemInstruction ?? "",
|
|
501
|
+
userPrompt: prompt,
|
|
502
|
+
temperature,
|
|
503
|
+
maxTokens,
|
|
504
|
+
purpose: "external_llm",
|
|
505
|
+
actionType: `google-genai.${modelType}.generateContent`
|
|
506
|
+
};
|
|
507
|
+
}
|
|
508
|
+
async function generateContentWithTrajectory(runtime, genAI, modelName, modelType, prompt, systemInstruction, temperature, maxTokens, request) {
|
|
509
|
+
const details = createLlmCallDetails(modelName, modelType, prompt, systemInstruction, temperature, maxTokens);
|
|
510
|
+
const response = await import_core5.recordLlmCall(runtime, details, async () => {
|
|
511
|
+
const result = await genAI.models.generateContent(request);
|
|
512
|
+
const text2 = result.text || "";
|
|
513
|
+
details.response = text2;
|
|
514
|
+
details.promptTokens = await countTokens(prompt);
|
|
515
|
+
details.completionTokens = await countTokens(text2);
|
|
516
|
+
return result;
|
|
517
|
+
});
|
|
518
|
+
const text = response.text || "";
|
|
519
|
+
const promptTokens = details.promptTokens ?? await countTokens(prompt);
|
|
520
|
+
const completionTokens = details.completionTokens ?? await countTokens(text);
|
|
521
|
+
emitModelUsageEvent(runtime, modelType, prompt, {
|
|
522
|
+
promptTokens,
|
|
523
|
+
completionTokens,
|
|
524
|
+
totalTokens: promptTokens + completionTokens
|
|
525
|
+
});
|
|
526
|
+
return text;
|
|
340
527
|
}
|
|
341
528
|
async function handleTextSmall(runtime, params) {
|
|
342
|
-
const {
|
|
343
|
-
|
|
529
|
+
const {
|
|
530
|
+
stopSequences = [],
|
|
531
|
+
maxTokens = 8192,
|
|
532
|
+
temperature = 0.7,
|
|
533
|
+
attachments
|
|
534
|
+
} = params;
|
|
344
535
|
const genAI = createGoogleGenAI(runtime);
|
|
345
536
|
if (!genAI) {
|
|
346
537
|
throw new Error("Google Generative AI client not initialized");
|
|
347
538
|
}
|
|
348
|
-
const modelName =
|
|
349
|
-
|
|
539
|
+
const modelName = getModelNameForType(runtime, TEXT_SMALL_MODEL_TYPE);
|
|
540
|
+
import_core5.logger.log(`[TEXT_SMALL] Using model: ${modelName}`);
|
|
350
541
|
try {
|
|
351
|
-
const systemInstruction = runtime
|
|
352
|
-
const
|
|
542
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
543
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
544
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, TEXT_SMALL_MODEL_TYPE, promptText, systemInstruction, temperature, maxTokens, {
|
|
353
545
|
model: modelName,
|
|
354
|
-
contents:
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
safetySettings: getSafetySettings(),
|
|
362
|
-
...systemInstruction && { systemInstruction }
|
|
363
|
-
}
|
|
364
|
-
});
|
|
365
|
-
const text = response.text || "";
|
|
366
|
-
const promptTokens = await countTokens(prompt);
|
|
367
|
-
const completionTokens = await countTokens(text);
|
|
368
|
-
emitModelUsageEvent(runtime, import_core7.ModelType.TEXT_SMALL, prompt, {
|
|
369
|
-
promptTokens,
|
|
370
|
-
completionTokens,
|
|
371
|
-
totalTokens: promptTokens + completionTokens
|
|
546
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
547
|
+
{
|
|
548
|
+
role: "user",
|
|
549
|
+
parts: buildPromptParts(promptText, attachments)
|
|
550
|
+
}
|
|
551
|
+
] : promptText,
|
|
552
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
372
553
|
});
|
|
373
|
-
return text;
|
|
374
554
|
} catch (error) {
|
|
375
|
-
|
|
555
|
+
import_core5.logger.error(`[TEXT_SMALL] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
376
556
|
throw error;
|
|
377
557
|
}
|
|
378
558
|
}
|
|
379
559
|
async function handleTextLarge(runtime, params) {
|
|
380
|
-
const {
|
|
381
|
-
|
|
560
|
+
const {
|
|
561
|
+
stopSequences = [],
|
|
562
|
+
maxTokens = 8192,
|
|
563
|
+
temperature = 0.7,
|
|
564
|
+
attachments
|
|
565
|
+
} = params;
|
|
382
566
|
const genAI = createGoogleGenAI(runtime);
|
|
383
567
|
if (!genAI) {
|
|
384
568
|
throw new Error("Google Generative AI client not initialized");
|
|
385
569
|
}
|
|
386
|
-
const modelName =
|
|
387
|
-
|
|
570
|
+
const modelName = getModelNameForType(runtime, TEXT_LARGE_MODEL_TYPE);
|
|
571
|
+
import_core5.logger.log(`[TEXT_LARGE] Using model: ${modelName}`);
|
|
388
572
|
try {
|
|
389
|
-
const systemInstruction = runtime
|
|
390
|
-
const
|
|
573
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
574
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
575
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, TEXT_LARGE_MODEL_TYPE, promptText, systemInstruction, temperature, maxTokens, {
|
|
391
576
|
model: modelName,
|
|
392
|
-
contents:
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
safetySettings: getSafetySettings(),
|
|
400
|
-
...systemInstruction && { systemInstruction }
|
|
401
|
-
}
|
|
577
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
578
|
+
{
|
|
579
|
+
role: "user",
|
|
580
|
+
parts: buildPromptParts(promptText, attachments)
|
|
581
|
+
}
|
|
582
|
+
] : promptText,
|
|
583
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
402
584
|
});
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
585
|
+
} catch (error) {
|
|
586
|
+
import_core5.logger.error(`[TEXT_LARGE] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
587
|
+
throw error;
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
async function handleTextNano(runtime, params) {
|
|
591
|
+
return handleTextWithType(runtime, TEXT_NANO_MODEL_TYPE, params);
|
|
592
|
+
}
|
|
593
|
+
async function handleTextMedium(runtime, params) {
|
|
594
|
+
return handleTextWithType(runtime, TEXT_MEDIUM_MODEL_TYPE, params);
|
|
595
|
+
}
|
|
596
|
+
async function handleTextMega(runtime, params) {
|
|
597
|
+
return handleTextWithType(runtime, TEXT_MEGA_MODEL_TYPE, params);
|
|
598
|
+
}
|
|
599
|
+
async function handleResponseHandler(runtime, params) {
|
|
600
|
+
return handleTextWithType(runtime, RESPONSE_HANDLER_MODEL_TYPE, params);
|
|
601
|
+
}
|
|
602
|
+
async function handleActionPlanner(runtime, params) {
|
|
603
|
+
return handleTextWithType(runtime, ACTION_PLANNER_MODEL_TYPE, params);
|
|
604
|
+
}
|
|
605
|
+
async function handleTextWithType(runtime, modelType, params) {
|
|
606
|
+
const {
|
|
607
|
+
stopSequences = [],
|
|
608
|
+
maxTokens = 8192,
|
|
609
|
+
temperature = 0.7,
|
|
610
|
+
attachments
|
|
611
|
+
} = params;
|
|
612
|
+
const genAI = createGoogleGenAI(runtime);
|
|
613
|
+
if (!genAI) {
|
|
614
|
+
throw new Error("Google Generative AI client not initialized");
|
|
615
|
+
}
|
|
616
|
+
const modelName = getModelNameForType(runtime, modelType);
|
|
617
|
+
import_core5.logger.log(`[${modelType}] Using model: ${modelName}`);
|
|
618
|
+
try {
|
|
619
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
620
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
621
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, modelType, promptText, systemInstruction, temperature, maxTokens, {
|
|
622
|
+
model: modelName,
|
|
623
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
624
|
+
{
|
|
625
|
+
role: "user",
|
|
626
|
+
parts: buildPromptParts(promptText, attachments)
|
|
627
|
+
}
|
|
628
|
+
] : promptText,
|
|
629
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
410
630
|
});
|
|
411
|
-
return text;
|
|
412
631
|
} catch (error) {
|
|
413
|
-
|
|
632
|
+
import_core5.logger.error(`[${modelType}] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
414
633
|
throw error;
|
|
415
634
|
}
|
|
416
635
|
}
|
|
417
636
|
// index.ts
|
|
637
|
+
var CORE_MODEL_TYPES2 = ElizaCore3.ModelType ?? {};
|
|
638
|
+
var TEXT_NANO_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_NANO ?? "TEXT_NANO";
|
|
639
|
+
var TEXT_MEDIUM_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
640
|
+
var TEXT_SMALL_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_SMALL ?? "TEXT_SMALL";
|
|
641
|
+
var TEXT_LARGE_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_LARGE ?? "TEXT_LARGE";
|
|
642
|
+
var TEXT_EMBEDDING_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_EMBEDDING ?? "TEXT_EMBEDDING";
|
|
643
|
+
var IMAGE_DESCRIPTION_MODEL_TYPE = CORE_MODEL_TYPES2.IMAGE_DESCRIPTION ?? "IMAGE_DESCRIPTION";
|
|
644
|
+
var TEXT_MEGA_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_MEGA ?? "TEXT_MEGA";
|
|
645
|
+
var RESPONSE_HANDLER_MODEL_TYPE2 = CORE_MODEL_TYPES2.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
646
|
+
var ACTION_PLANNER_MODEL_TYPE2 = CORE_MODEL_TYPES2.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
418
647
|
var pluginTests = [
|
|
419
648
|
{
|
|
420
649
|
name: "google_genai_plugin_tests",
|
|
@@ -432,22 +661,22 @@ var pluginTests = [
|
|
|
432
661
|
for await (const model of modelList) {
|
|
433
662
|
models.push(model);
|
|
434
663
|
}
|
|
435
|
-
|
|
664
|
+
import_core6.logger.log(`Available models: ${models.length}`);
|
|
436
665
|
}
|
|
437
666
|
},
|
|
438
667
|
{
|
|
439
668
|
name: "google_test_text_embedding",
|
|
440
669
|
fn: async (runtime) => {
|
|
441
670
|
try {
|
|
442
|
-
const embedding = await runtime.useModel(
|
|
671
|
+
const embedding = await runtime.useModel(import_core6.ModelType.TEXT_EMBEDDING, {
|
|
443
672
|
text: "Hello, world!"
|
|
444
673
|
});
|
|
445
|
-
|
|
674
|
+
import_core6.logger.log(`Embedding dimension: ${embedding.length}`);
|
|
446
675
|
if (embedding.length === 0) {
|
|
447
676
|
throw new Error("Failed to generate embedding");
|
|
448
677
|
}
|
|
449
678
|
} catch (error) {
|
|
450
|
-
|
|
679
|
+
import_core6.logger.error(`Error in test_text_embedding: ${error instanceof Error ? error.message : String(error)}`);
|
|
451
680
|
throw error;
|
|
452
681
|
}
|
|
453
682
|
}
|
|
@@ -456,15 +685,15 @@ var pluginTests = [
|
|
|
456
685
|
name: "google_test_text_small",
|
|
457
686
|
fn: async (runtime) => {
|
|
458
687
|
try {
|
|
459
|
-
const text = await runtime.useModel(
|
|
688
|
+
const text = await runtime.useModel(import_core6.ModelType.TEXT_SMALL, {
|
|
460
689
|
prompt: "What is the nature of reality in 10 words?"
|
|
461
690
|
});
|
|
462
691
|
if (text.length === 0) {
|
|
463
692
|
throw new Error("Failed to generate text");
|
|
464
693
|
}
|
|
465
|
-
|
|
694
|
+
import_core6.logger.log("Generated with TEXT_SMALL:", text);
|
|
466
695
|
} catch (error) {
|
|
467
|
-
|
|
696
|
+
import_core6.logger.error(`Error in test_text_small: ${error instanceof Error ? error.message : String(error)}`);
|
|
468
697
|
throw error;
|
|
469
698
|
}
|
|
470
699
|
}
|
|
@@ -473,15 +702,15 @@ var pluginTests = [
|
|
|
473
702
|
name: "google_test_text_large",
|
|
474
703
|
fn: async (runtime) => {
|
|
475
704
|
try {
|
|
476
|
-
const text = await runtime.useModel(
|
|
705
|
+
const text = await runtime.useModel(import_core6.ModelType.TEXT_LARGE, {
|
|
477
706
|
prompt: "Explain quantum mechanics in simple terms."
|
|
478
707
|
});
|
|
479
708
|
if (text.length === 0) {
|
|
480
709
|
throw new Error("Failed to generate text");
|
|
481
710
|
}
|
|
482
|
-
|
|
711
|
+
import_core6.logger.log("Generated with TEXT_LARGE:", `${text.substring(0, 100)}...`);
|
|
483
712
|
} catch (error) {
|
|
484
|
-
|
|
713
|
+
import_core6.logger.error(`Error in test_text_large: ${error instanceof Error ? error.message : String(error)}`);
|
|
485
714
|
throw error;
|
|
486
715
|
}
|
|
487
716
|
}
|
|
@@ -490,20 +719,20 @@ var pluginTests = [
|
|
|
490
719
|
name: "google_test_image_description",
|
|
491
720
|
fn: async (runtime) => {
|
|
492
721
|
try {
|
|
493
|
-
const result = await runtime.useModel(
|
|
494
|
-
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
495
|
-
|
|
722
|
+
const result = await runtime.useModel(import_core6.ModelType.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
723
|
+
if (result != null && typeof result === "object" && "title" in result && "description" in result) {
|
|
724
|
+
import_core6.logger.log("Image description:", JSON.stringify(result));
|
|
496
725
|
} else {
|
|
497
|
-
|
|
726
|
+
import_core6.logger.error(`Invalid image description result format: ${JSON.stringify(result)}`);
|
|
498
727
|
}
|
|
499
728
|
} catch (error) {
|
|
500
|
-
|
|
729
|
+
import_core6.logger.error(`Error in test_image_description: ${error instanceof Error ? error.message : String(error)}`);
|
|
501
730
|
throw error;
|
|
502
731
|
}
|
|
503
732
|
}
|
|
504
733
|
},
|
|
505
734
|
{
|
|
506
|
-
name: "
|
|
735
|
+
name: "google_test_structured_output_via_text_large",
|
|
507
736
|
fn: async (runtime) => {
|
|
508
737
|
try {
|
|
509
738
|
const schema = {
|
|
@@ -515,16 +744,16 @@ var pluginTests = [
|
|
|
515
744
|
},
|
|
516
745
|
required: ["name", "age", "hobbies"]
|
|
517
746
|
};
|
|
518
|
-
const result = await runtime.useModel(
|
|
747
|
+
const result = await runtime.useModel(import_core6.ModelType.TEXT_LARGE, {
|
|
519
748
|
prompt: "Generate a person profile with name, age, and hobbies.",
|
|
520
|
-
schema
|
|
749
|
+
responseSchema: schema
|
|
521
750
|
});
|
|
522
|
-
|
|
523
|
-
if (!result
|
|
524
|
-
throw new Error("Generated
|
|
751
|
+
import_core6.logger.log("Generated structured output:", JSON.stringify(result));
|
|
752
|
+
if (!result) {
|
|
753
|
+
throw new Error("Generated structured output is empty");
|
|
525
754
|
}
|
|
526
755
|
} catch (error) {
|
|
527
|
-
|
|
756
|
+
import_core6.logger.error(`Error in test_structured_output_via_text_large: ${error instanceof Error ? error.message : String(error)}`);
|
|
528
757
|
throw error;
|
|
529
758
|
}
|
|
530
759
|
}
|
|
@@ -542,41 +771,70 @@ var env = getProcessEnv();
|
|
|
542
771
|
var googleGenAIPlugin = {
|
|
543
772
|
name: "google-genai",
|
|
544
773
|
description: "Google Generative AI plugin for Gemini models",
|
|
774
|
+
autoEnable: {
|
|
775
|
+
envKeys: ["GOOGLE_API_KEY", "GOOGLE_GENERATIVE_AI_API_KEY"]
|
|
776
|
+
},
|
|
545
777
|
config: {
|
|
546
778
|
GOOGLE_GENERATIVE_AI_API_KEY: env.GOOGLE_GENERATIVE_AI_API_KEY ?? null,
|
|
779
|
+
GOOGLE_NANO_MODEL: env.GOOGLE_NANO_MODEL ?? null,
|
|
780
|
+
GOOGLE_MEDIUM_MODEL: env.GOOGLE_MEDIUM_MODEL ?? null,
|
|
547
781
|
GOOGLE_SMALL_MODEL: env.GOOGLE_SMALL_MODEL ?? null,
|
|
548
782
|
GOOGLE_LARGE_MODEL: env.GOOGLE_LARGE_MODEL ?? null,
|
|
783
|
+
GOOGLE_MEGA_MODEL: env.GOOGLE_MEGA_MODEL ?? null,
|
|
784
|
+
GOOGLE_RESPONSE_HANDLER_MODEL: env.GOOGLE_RESPONSE_HANDLER_MODEL ?? null,
|
|
785
|
+
GOOGLE_SHOULD_RESPOND_MODEL: env.GOOGLE_SHOULD_RESPOND_MODEL ?? null,
|
|
786
|
+
GOOGLE_ACTION_PLANNER_MODEL: env.GOOGLE_ACTION_PLANNER_MODEL ?? null,
|
|
787
|
+
GOOGLE_PLANNER_MODEL: env.GOOGLE_PLANNER_MODEL ?? null,
|
|
549
788
|
GOOGLE_IMAGE_MODEL: env.GOOGLE_IMAGE_MODEL ?? null,
|
|
550
789
|
GOOGLE_EMBEDDING_MODEL: env.GOOGLE_EMBEDDING_MODEL ?? null,
|
|
790
|
+
NANO_MODEL: env.NANO_MODEL ?? null,
|
|
791
|
+
MEDIUM_MODEL: env.MEDIUM_MODEL ?? null,
|
|
551
792
|
SMALL_MODEL: env.SMALL_MODEL ?? null,
|
|
552
793
|
LARGE_MODEL: env.LARGE_MODEL ?? null,
|
|
794
|
+
MEGA_MODEL: env.MEGA_MODEL ?? null,
|
|
795
|
+
RESPONSE_HANDLER_MODEL: env.RESPONSE_HANDLER_MODEL ?? null,
|
|
796
|
+
SHOULD_RESPOND_MODEL: env.SHOULD_RESPOND_MODEL ?? null,
|
|
797
|
+
ACTION_PLANNER_MODEL: env.ACTION_PLANNER_MODEL ?? null,
|
|
798
|
+
PLANNER_MODEL: env.PLANNER_MODEL ?? null,
|
|
553
799
|
IMAGE_MODEL: env.IMAGE_MODEL ?? null
|
|
554
800
|
},
|
|
555
801
|
async init(config, runtime) {
|
|
556
802
|
initializeGoogleGenAI(config, runtime);
|
|
557
803
|
},
|
|
558
804
|
models: {
|
|
559
|
-
[
|
|
805
|
+
[TEXT_NANO_MODEL_TYPE2]: async (runtime, params) => {
|
|
806
|
+
return handleTextNano(runtime, params);
|
|
807
|
+
},
|
|
808
|
+
[TEXT_MEDIUM_MODEL_TYPE2]: async (runtime, params) => {
|
|
809
|
+
return handleTextMedium(runtime, params);
|
|
810
|
+
},
|
|
811
|
+
[TEXT_SMALL_MODEL_TYPE2]: async (runtime, params) => {
|
|
560
812
|
return handleTextSmall(runtime, params);
|
|
561
813
|
},
|
|
562
|
-
[
|
|
814
|
+
[TEXT_LARGE_MODEL_TYPE2]: async (runtime, params) => {
|
|
563
815
|
return handleTextLarge(runtime, params);
|
|
564
816
|
},
|
|
565
|
-
[
|
|
566
|
-
return
|
|
817
|
+
[TEXT_MEGA_MODEL_TYPE2]: async (runtime, params) => {
|
|
818
|
+
return handleTextMega(runtime, params);
|
|
567
819
|
},
|
|
568
|
-
[
|
|
569
|
-
return
|
|
820
|
+
[RESPONSE_HANDLER_MODEL_TYPE2]: async (runtime, params) => {
|
|
821
|
+
return handleResponseHandler(runtime, params);
|
|
822
|
+
},
|
|
823
|
+
[ACTION_PLANNER_MODEL_TYPE2]: async (runtime, params) => {
|
|
824
|
+
return handleActionPlanner(runtime, params);
|
|
570
825
|
},
|
|
571
|
-
[
|
|
572
|
-
return
|
|
826
|
+
[TEXT_EMBEDDING_MODEL_TYPE2]: async (runtime, params) => {
|
|
827
|
+
return handleTextEmbedding(runtime, params);
|
|
573
828
|
},
|
|
574
|
-
[
|
|
575
|
-
return
|
|
829
|
+
[IMAGE_DESCRIPTION_MODEL_TYPE]: async (runtime, params) => {
|
|
830
|
+
return handleImageDescription(runtime, params);
|
|
576
831
|
}
|
|
577
832
|
},
|
|
578
833
|
tests: pluginTests
|
|
579
834
|
};
|
|
580
|
-
var
|
|
835
|
+
var plugin_google_genai_default = googleGenAIPlugin;
|
|
836
|
+
|
|
837
|
+
// index.node.ts
|
|
838
|
+
var index_node_default = plugin_google_genai_default;
|
|
581
839
|
|
|
582
|
-
//# debugId=
|
|
840
|
+
//# debugId=E1E56B528121F63364756E2164756E21
|