@elizaos/plugin-google-genai 2.0.0-alpha.9 → 2.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +124 -0
- package/auto-enable.ts +21 -0
- package/dist/browser/index.browser.js +415 -175
- package/dist/browser/index.browser.js.map +11 -11
- package/dist/build.d.ts +3 -0
- package/dist/build.d.ts.map +1 -0
- package/dist/build.js +117 -0
- package/dist/cjs/index.node.cjs +439 -181
- package/dist/cjs/index.node.js.map +11 -11
- package/dist/generated/specs/specs.d.ts +55 -0
- package/dist/generated/specs/specs.d.ts.map +1 -0
- package/dist/generated/specs/specs.js +34 -0
- package/dist/index.browser.d.ts +5 -0
- package/dist/index.browser.d.ts.map +1 -0
- package/dist/index.browser.js +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +252 -0
- package/dist/index.node.d.ts +5 -0
- package/dist/index.node.d.ts.map +1 -0
- package/dist/index.node.js +4 -0
- package/dist/init.d.ts +16 -0
- package/dist/init.d.ts.map +1 -0
- package/dist/init.js +27 -0
- package/dist/models/embedding.d.ts +6 -0
- package/dist/models/embedding.d.ts.map +1 -0
- package/dist/models/embedding.js +57 -0
- package/dist/models/image.d.ts +7 -0
- package/dist/models/image.d.ts.map +1 -0
- package/dist/models/image.js +91 -0
- package/dist/models/index.d.ts +13 -0
- package/dist/models/index.d.ts.map +1 -0
- package/dist/models/index.js +12 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/object.d.ts.map +1 -0
- package/dist/models/object.js +84 -0
- package/dist/models/text.d.ts +51 -0
- package/dist/models/text.d.ts.map +1 -0
- package/dist/models/text.js +257 -0
- package/dist/node/index.node.d.ts +2 -0
- package/dist/node/index.node.js +415 -175
- package/dist/node/index.node.js.map +11 -11
- package/dist/types/index.d.ts +47 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +1 -0
- package/dist/utils/config.d.ts +25 -0
- package/dist/utils/config.d.ts.map +1 -0
- package/dist/utils/config.js +115 -0
- package/dist/utils/events.d.ts +12 -0
- package/dist/utils/events.d.ts.map +1 -0
- package/dist/utils/events.js +14 -0
- package/dist/utils/index.d.ts +4 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +3 -0
- package/dist/utils/tokenization.d.ts +2 -0
- package/dist/utils/tokenization.d.ts.map +1 -0
- package/dist/utils/tokenization.js +3 -0
- package/dist/vitest.config.d.ts +3 -0
- package/dist/vitest.config.d.ts.map +1 -0
- package/dist/vitest.config.js +8 -0
- package/package.json +33 -16
package/dist/node/index.node.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
// index.ts
|
|
2
|
-
import
|
|
2
|
+
import * as ElizaCore3 from "@elizaos/core";
|
|
3
|
+
import { logger as logger6, ModelType as ModelType4 } from "@elizaos/core";
|
|
3
4
|
import { GoogleGenAI as GoogleGenAI3 } from "@google/genai";
|
|
4
5
|
|
|
5
6
|
// init.ts
|
|
@@ -29,9 +30,24 @@ function getApiKey(runtime) {
|
|
|
29
30
|
function getSmallModel(runtime) {
|
|
30
31
|
return getSetting(runtime, "GOOGLE_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gemini-2.0-flash-001") ?? "gemini-2.0-flash-001";
|
|
31
32
|
}
|
|
33
|
+
function getNanoModel(runtime) {
|
|
34
|
+
return getSetting(runtime, "GOOGLE_NANO_MODEL") ?? getSetting(runtime, "NANO_MODEL") ?? getSmallModel(runtime);
|
|
35
|
+
}
|
|
36
|
+
function getMediumModel(runtime) {
|
|
37
|
+
return getSetting(runtime, "GOOGLE_MEDIUM_MODEL") ?? getSetting(runtime, "MEDIUM_MODEL") ?? getSmallModel(runtime);
|
|
38
|
+
}
|
|
32
39
|
function getLargeModel(runtime) {
|
|
33
40
|
return getSetting(runtime, "GOOGLE_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gemini-2.5-pro-preview-03-25") ?? "gemini-2.5-pro-preview-03-25";
|
|
34
41
|
}
|
|
42
|
+
function getMegaModel(runtime) {
|
|
43
|
+
return getSetting(runtime, "GOOGLE_MEGA_MODEL") ?? getSetting(runtime, "MEGA_MODEL") ?? getLargeModel(runtime);
|
|
44
|
+
}
|
|
45
|
+
function getResponseHandlerModel(runtime) {
|
|
46
|
+
return getSetting(runtime, "GOOGLE_RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "GOOGLE_SHOULD_RESPOND_MODEL") ?? getSetting(runtime, "RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "SHOULD_RESPOND_MODEL") ?? getNanoModel(runtime);
|
|
47
|
+
}
|
|
48
|
+
function getActionPlannerModel(runtime) {
|
|
49
|
+
return getSetting(runtime, "GOOGLE_ACTION_PLANNER_MODEL") ?? getSetting(runtime, "GOOGLE_PLANNER_MODEL") ?? getSetting(runtime, "ACTION_PLANNER_MODEL") ?? getSetting(runtime, "PLANNER_MODEL") ?? getMediumModel(runtime);
|
|
50
|
+
}
|
|
35
51
|
function getImageModel(runtime) {
|
|
36
52
|
return getSetting(runtime, "GOOGLE_IMAGE_MODEL") ?? getSetting(runtime, "IMAGE_MODEL", "gemini-2.5-pro-preview-03-25") ?? "gemini-2.5-pro-preview-03-25";
|
|
37
53
|
}
|
|
@@ -90,12 +106,13 @@ function initializeGoogleGenAI(_config, runtime) {
|
|
|
90
106
|
}
|
|
91
107
|
|
|
92
108
|
// models/embedding.ts
|
|
93
|
-
import
|
|
109
|
+
import * as ElizaCore from "@elizaos/core";
|
|
110
|
+
import { logger as logger3 } from "@elizaos/core";
|
|
94
111
|
|
|
95
112
|
// utils/events.ts
|
|
96
|
-
|
|
113
|
+
var MODEL_USED_EVENT = "MODEL_USED";
|
|
97
114
|
function emitModelUsageEvent(runtime, type, _prompt, usage) {
|
|
98
|
-
runtime.emitEvent(
|
|
115
|
+
runtime.emitEvent(MODEL_USED_EVENT, {
|
|
99
116
|
runtime,
|
|
100
117
|
source: "plugin-google-genai",
|
|
101
118
|
type,
|
|
@@ -113,6 +130,7 @@ async function countTokens(text) {
|
|
|
113
130
|
}
|
|
114
131
|
|
|
115
132
|
// models/embedding.ts
|
|
133
|
+
var TEXT_EMBEDDING_MODEL_TYPE = ElizaCore.ModelType?.TEXT_EMBEDDING ?? "TEXT_EMBEDDING";
|
|
116
134
|
async function handleTextEmbedding(runtime, params) {
|
|
117
135
|
const genAI = createGoogleGenAI(runtime);
|
|
118
136
|
if (!genAI) {
|
|
@@ -123,11 +141,16 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
123
141
|
if (params === null) {
|
|
124
142
|
return Array(768).fill(0);
|
|
125
143
|
}
|
|
126
|
-
|
|
144
|
+
let text = typeof params === "string" ? params : typeof params === "object" && params.text ? params.text : "";
|
|
127
145
|
if (!text.trim()) {
|
|
128
146
|
logger3.warn("Empty text for embedding");
|
|
129
147
|
return Array(768).fill(0);
|
|
130
148
|
}
|
|
149
|
+
const maxChars = 8192 * 4;
|
|
150
|
+
if (text.length > maxChars) {
|
|
151
|
+
logger3.warn(`[Google GenAI] Embedding input too long (~${Math.ceil(text.length / 4)} tokens), truncating to ~8192 tokens`);
|
|
152
|
+
text = text.slice(0, maxChars);
|
|
153
|
+
}
|
|
131
154
|
try {
|
|
132
155
|
const response = await genAI.models.embedContent({
|
|
133
156
|
model: embeddingModelName,
|
|
@@ -135,7 +158,7 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
135
158
|
});
|
|
136
159
|
const embedding = response.embeddings?.[0]?.values || [];
|
|
137
160
|
const promptTokens = await countTokens(text);
|
|
138
|
-
emitModelUsageEvent(runtime,
|
|
161
|
+
emitModelUsageEvent(runtime, TEXT_EMBEDDING_MODEL_TYPE, text, {
|
|
139
162
|
promptTokens,
|
|
140
163
|
completionTokens: 0,
|
|
141
164
|
totalTokens: promptTokens
|
|
@@ -148,8 +171,7 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
148
171
|
}
|
|
149
172
|
}
|
|
150
173
|
// models/image.ts
|
|
151
|
-
import { logger as logger4 } from "@elizaos/core";
|
|
152
|
-
var crossFetch = typeof globalThis.fetch === "function" ? globalThis.fetch : fetch;
|
|
174
|
+
import { logger as logger4, recordLlmCall } from "@elizaos/core";
|
|
153
175
|
async function handleImageDescription(runtime, params) {
|
|
154
176
|
const genAI = createGoogleGenAI(runtime);
|
|
155
177
|
if (!genAI) {
|
|
@@ -167,36 +189,52 @@ async function handleImageDescription(runtime, params) {
|
|
|
167
189
|
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
168
190
|
}
|
|
169
191
|
try {
|
|
170
|
-
const imageResponse = await
|
|
192
|
+
const imageResponse = await fetch(imageUrl);
|
|
171
193
|
if (!imageResponse.ok) {
|
|
172
194
|
throw new Error(`Failed to fetch image: ${imageResponse.statusText}`);
|
|
173
195
|
}
|
|
174
196
|
const imageData = await imageResponse.arrayBuffer();
|
|
175
197
|
const base64Image = Buffer.from(imageData).toString("base64");
|
|
176
198
|
const contentType = imageResponse.headers.get("content-type") || "image/jpeg";
|
|
177
|
-
const
|
|
199
|
+
const details = {
|
|
178
200
|
model: modelName,
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
201
|
+
systemPrompt: "",
|
|
202
|
+
userPrompt: promptText,
|
|
203
|
+
temperature: 0.7,
|
|
204
|
+
maxTokens: 8192,
|
|
205
|
+
purpose: "external_llm",
|
|
206
|
+
actionType: "google-genai.IMAGE_DESCRIPTION.generateContent"
|
|
207
|
+
};
|
|
208
|
+
const response = await recordLlmCall(runtime, details, async () => {
|
|
209
|
+
const result = await genAI.models.generateContent({
|
|
210
|
+
model: modelName,
|
|
211
|
+
contents: [
|
|
212
|
+
{
|
|
213
|
+
role: "user",
|
|
214
|
+
parts: [
|
|
215
|
+
{ text: promptText },
|
|
216
|
+
{
|
|
217
|
+
inlineData: {
|
|
218
|
+
mimeType: contentType,
|
|
219
|
+
data: base64Image
|
|
220
|
+
}
|
|
188
221
|
}
|
|
189
|
-
|
|
190
|
-
|
|
222
|
+
]
|
|
223
|
+
}
|
|
224
|
+
],
|
|
225
|
+
config: {
|
|
226
|
+
temperature: 0.7,
|
|
227
|
+
topK: 40,
|
|
228
|
+
topP: 0.95,
|
|
229
|
+
maxOutputTokens: 8192,
|
|
230
|
+
safetySettings: getSafetySettings()
|
|
191
231
|
}
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
safetySettings: getSafetySettings()
|
|
199
|
-
}
|
|
232
|
+
});
|
|
233
|
+
const responseText2 = result.text || "";
|
|
234
|
+
details.response = responseText2;
|
|
235
|
+
details.promptTokens = await countTokens(promptText);
|
|
236
|
+
details.completionTokens = await countTokens(responseText2);
|
|
237
|
+
return result;
|
|
200
238
|
});
|
|
201
239
|
const responseText = response.text || "";
|
|
202
240
|
try {
|
|
@@ -221,154 +259,327 @@ async function handleImageDescription(runtime, params) {
|
|
|
221
259
|
};
|
|
222
260
|
}
|
|
223
261
|
}
|
|
224
|
-
// models/
|
|
225
|
-
import
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
262
|
+
// models/text.ts
|
|
263
|
+
import * as ElizaCore2 from "@elizaos/core";
|
|
264
|
+
import {
|
|
265
|
+
buildCanonicalSystemPrompt,
|
|
266
|
+
logger as logger5,
|
|
267
|
+
recordLlmCall as recordLlmCall2,
|
|
268
|
+
renderChatMessagesForPrompt,
|
|
269
|
+
resolveEffectiveSystemPrompt
|
|
270
|
+
} from "@elizaos/core";
|
|
271
|
+
var CORE_MODEL_TYPES = ElizaCore2.ModelType ?? {};
|
|
272
|
+
var TEXT_NANO_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_NANO ?? "TEXT_NANO";
|
|
273
|
+
var TEXT_MEDIUM_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
274
|
+
var TEXT_SMALL_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_SMALL ?? "TEXT_SMALL";
|
|
275
|
+
var TEXT_LARGE_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_LARGE ?? "TEXT_LARGE";
|
|
276
|
+
var TEXT_MEGA_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEGA ?? "TEXT_MEGA";
|
|
277
|
+
var RESPONSE_HANDLER_MODEL_TYPE = CORE_MODEL_TYPES.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
278
|
+
var ACTION_PLANNER_MODEL_TYPE = CORE_MODEL_TYPES.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
279
|
+
function normalizeToolsForGoogle(tools) {
|
|
280
|
+
if (!tools)
|
|
281
|
+
return;
|
|
282
|
+
if (Array.isArray(tools) && tools.length > 0 && typeof tools[0] === "object" && tools[0] !== null && "functionDeclarations" in tools[0]) {
|
|
283
|
+
return tools;
|
|
230
284
|
}
|
|
231
|
-
const
|
|
232
|
-
const
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
enhancedPrompt += `
|
|
238
|
-
|
|
239
|
-
Please respond with a JSON object that follows this schema:
|
|
240
|
-
${JSON.stringify(params.schema, null, 2)}`;
|
|
285
|
+
const flat = Array.isArray(tools) ? tools : Object.entries(tools).map(([name, value]) => ({ name, ...value }));
|
|
286
|
+
const declarations = [];
|
|
287
|
+
for (const tool of flat) {
|
|
288
|
+
const name = tool.name ?? tool.function?.name;
|
|
289
|
+
if (!name) {
|
|
290
|
+
throw new Error("[GoogleGenAI] Tool definition is missing a name.");
|
|
241
291
|
}
|
|
242
|
-
const
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
safetySettings: getSafetySettings()
|
|
252
|
-
}
|
|
253
|
-
});
|
|
254
|
-
const text = response.text || "";
|
|
255
|
-
const promptTokens = await countTokens(enhancedPrompt);
|
|
256
|
-
const completionTokens = await countTokens(text);
|
|
257
|
-
emitModelUsageEvent(runtime, modelType, params.prompt, {
|
|
258
|
-
promptTokens,
|
|
259
|
-
completionTokens,
|
|
260
|
-
totalTokens: promptTokens + completionTokens
|
|
292
|
+
const description = tool.description ?? tool.function?.description;
|
|
293
|
+
const parameters = tool.parameters ?? tool.inputSchema ?? tool.function?.parameters ?? {
|
|
294
|
+
type: "object",
|
|
295
|
+
properties: {}
|
|
296
|
+
};
|
|
297
|
+
declarations.push({
|
|
298
|
+
name,
|
|
299
|
+
...description ? { description } : {},
|
|
300
|
+
parameters
|
|
261
301
|
});
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
302
|
+
}
|
|
303
|
+
return declarations.length > 0 ? [{ functionDeclarations: declarations }] : undefined;
|
|
304
|
+
}
|
|
305
|
+
function normalizeToolConfigForGoogle(toolChoice) {
|
|
306
|
+
if (!toolChoice)
|
|
307
|
+
return;
|
|
308
|
+
if (toolChoice === "auto") {
|
|
309
|
+
return { functionCallingConfig: { mode: "AUTO" } };
|
|
310
|
+
}
|
|
311
|
+
if (toolChoice === "required") {
|
|
312
|
+
return { functionCallingConfig: { mode: "ANY" } };
|
|
313
|
+
}
|
|
314
|
+
if (toolChoice === "none") {
|
|
315
|
+
return { functionCallingConfig: { mode: "NONE" } };
|
|
316
|
+
}
|
|
317
|
+
let toolName;
|
|
318
|
+
if ("type" in toolChoice) {
|
|
319
|
+
toolName = toolChoice.type === "function" ? toolChoice.function.name : toolChoice.toolName ?? toolChoice.name;
|
|
320
|
+
} else {
|
|
321
|
+
toolName = toolChoice.name;
|
|
322
|
+
}
|
|
323
|
+
if (toolName) {
|
|
324
|
+
return {
|
|
325
|
+
functionCallingConfig: {
|
|
326
|
+
mode: "ANY",
|
|
327
|
+
allowedFunctionNames: [toolName]
|
|
272
328
|
}
|
|
273
|
-
|
|
329
|
+
};
|
|
330
|
+
}
|
|
331
|
+
return;
|
|
332
|
+
}
|
|
333
|
+
function resolveResponseJsonSchema(responseSchema) {
|
|
334
|
+
if (!responseSchema)
|
|
335
|
+
return;
|
|
336
|
+
if ("schema" in responseSchema && responseSchema.schema) {
|
|
337
|
+
return responseSchema.schema;
|
|
338
|
+
}
|
|
339
|
+
return responseSchema;
|
|
340
|
+
}
|
|
341
|
+
function buildPromptParts(prompt, attachments) {
|
|
342
|
+
const parts = [{ text: prompt }];
|
|
343
|
+
for (const attachment of attachments ?? []) {
|
|
344
|
+
if (attachment.data instanceof URL) {
|
|
345
|
+
parts.push({
|
|
346
|
+
fileData: {
|
|
347
|
+
mimeType: attachment.mediaType,
|
|
348
|
+
fileUri: attachment.data.toString()
|
|
349
|
+
}
|
|
350
|
+
});
|
|
351
|
+
continue;
|
|
274
352
|
}
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
353
|
+
if (typeof attachment.data === "string" && /^https?:\/\//i.test(attachment.data)) {
|
|
354
|
+
parts.push({
|
|
355
|
+
fileData: {
|
|
356
|
+
mimeType: attachment.mediaType,
|
|
357
|
+
fileUri: attachment.data
|
|
358
|
+
}
|
|
359
|
+
});
|
|
360
|
+
continue;
|
|
361
|
+
}
|
|
362
|
+
if (typeof attachment.data === "string") {
|
|
363
|
+
const dataUrlMatch = attachment.data.match(/^data:([^;,]+);base64,(.+)$/i);
|
|
364
|
+
parts.push({
|
|
365
|
+
inlineData: {
|
|
366
|
+
mimeType: dataUrlMatch?.[1] ?? attachment.mediaType,
|
|
367
|
+
data: dataUrlMatch?.[2] ?? attachment.data
|
|
368
|
+
}
|
|
369
|
+
});
|
|
370
|
+
continue;
|
|
371
|
+
}
|
|
372
|
+
parts.push({
|
|
373
|
+
inlineData: {
|
|
374
|
+
mimeType: attachment.mediaType,
|
|
375
|
+
data: Buffer.from(attachment.data).toString("base64")
|
|
376
|
+
}
|
|
377
|
+
});
|
|
278
378
|
}
|
|
379
|
+
return parts;
|
|
279
380
|
}
|
|
280
|
-
|
|
281
|
-
return
|
|
381
|
+
function resolveGoogleSystemInstruction(runtime, params) {
|
|
382
|
+
return resolveEffectiveSystemPrompt({
|
|
383
|
+
params,
|
|
384
|
+
fallback: buildCanonicalSystemPrompt({ character: runtime.character })
|
|
385
|
+
});
|
|
282
386
|
}
|
|
283
|
-
|
|
284
|
-
return
|
|
387
|
+
function resolveGooglePrompt(params, systemInstruction) {
|
|
388
|
+
return renderChatMessagesForPrompt(params.messages, {
|
|
389
|
+
omitDuplicateSystem: systemInstruction
|
|
390
|
+
}) ?? params.prompt;
|
|
285
391
|
}
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
392
|
+
function getModelNameForType(runtime, modelType) {
|
|
393
|
+
switch (modelType) {
|
|
394
|
+
case TEXT_NANO_MODEL_TYPE:
|
|
395
|
+
return getNanoModel(runtime);
|
|
396
|
+
case TEXT_MEDIUM_MODEL_TYPE:
|
|
397
|
+
return getMediumModel(runtime);
|
|
398
|
+
case TEXT_SMALL_MODEL_TYPE:
|
|
399
|
+
return getSmallModel(runtime);
|
|
400
|
+
case TEXT_LARGE_MODEL_TYPE:
|
|
401
|
+
return getLargeModel(runtime);
|
|
402
|
+
case TEXT_MEGA_MODEL_TYPE:
|
|
403
|
+
return getMegaModel(runtime);
|
|
404
|
+
case RESPONSE_HANDLER_MODEL_TYPE:
|
|
405
|
+
return getResponseHandlerModel(runtime);
|
|
406
|
+
case ACTION_PLANNER_MODEL_TYPE:
|
|
407
|
+
return getActionPlannerModel(runtime);
|
|
408
|
+
default:
|
|
409
|
+
return getLargeModel(runtime);
|
|
292
410
|
}
|
|
293
|
-
|
|
411
|
+
}
|
|
412
|
+
function buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences) {
|
|
413
|
+
const tools = normalizeToolsForGoogle(params.tools);
|
|
414
|
+
const toolConfig = normalizeToolConfigForGoogle(params.toolChoice);
|
|
415
|
+
const responseJsonSchema = resolveResponseJsonSchema(params.responseSchema);
|
|
416
|
+
const baseConfig = {
|
|
417
|
+
temperature,
|
|
418
|
+
topK: 40,
|
|
419
|
+
topP: 0.95,
|
|
420
|
+
maxOutputTokens: maxTokens,
|
|
421
|
+
stopSequences,
|
|
422
|
+
safetySettings: getSafetySettings(),
|
|
423
|
+
...systemInstruction && { systemInstruction },
|
|
424
|
+
...tools ? { tools } : {},
|
|
425
|
+
...toolConfig ? { toolConfig } : {},
|
|
426
|
+
...responseJsonSchema ? {
|
|
427
|
+
responseMimeType: "application/json",
|
|
428
|
+
responseJsonSchema
|
|
429
|
+
} : {}
|
|
430
|
+
};
|
|
431
|
+
return baseConfig;
|
|
432
|
+
}
|
|
433
|
+
function createLlmCallDetails(modelName, modelType, prompt, systemInstruction, temperature, maxTokens) {
|
|
434
|
+
return {
|
|
435
|
+
model: modelName,
|
|
436
|
+
systemPrompt: systemInstruction ?? "",
|
|
437
|
+
userPrompt: prompt,
|
|
438
|
+
temperature,
|
|
439
|
+
maxTokens,
|
|
440
|
+
purpose: "external_llm",
|
|
441
|
+
actionType: `google-genai.${modelType}.generateContent`
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
async function generateContentWithTrajectory(runtime, genAI, modelName, modelType, prompt, systemInstruction, temperature, maxTokens, request) {
|
|
445
|
+
const details = createLlmCallDetails(modelName, modelType, prompt, systemInstruction, temperature, maxTokens);
|
|
446
|
+
const response = await recordLlmCall2(runtime, details, async () => {
|
|
447
|
+
const result = await genAI.models.generateContent(request);
|
|
448
|
+
const text2 = result.text || "";
|
|
449
|
+
details.response = text2;
|
|
450
|
+
details.promptTokens = await countTokens(prompt);
|
|
451
|
+
details.completionTokens = await countTokens(text2);
|
|
452
|
+
return result;
|
|
453
|
+
});
|
|
454
|
+
const text = response.text || "";
|
|
455
|
+
const promptTokens = details.promptTokens ?? await countTokens(prompt);
|
|
456
|
+
const completionTokens = details.completionTokens ?? await countTokens(text);
|
|
457
|
+
emitModelUsageEvent(runtime, modelType, prompt, {
|
|
458
|
+
promptTokens,
|
|
459
|
+
completionTokens,
|
|
460
|
+
totalTokens: promptTokens + completionTokens
|
|
461
|
+
});
|
|
462
|
+
return text;
|
|
294
463
|
}
|
|
295
464
|
async function handleTextSmall(runtime, params) {
|
|
296
|
-
const {
|
|
297
|
-
|
|
465
|
+
const {
|
|
466
|
+
stopSequences = [],
|
|
467
|
+
maxTokens = 8192,
|
|
468
|
+
temperature = 0.7,
|
|
469
|
+
attachments
|
|
470
|
+
} = params;
|
|
298
471
|
const genAI = createGoogleGenAI(runtime);
|
|
299
472
|
if (!genAI) {
|
|
300
473
|
throw new Error("Google Generative AI client not initialized");
|
|
301
474
|
}
|
|
302
|
-
const modelName =
|
|
303
|
-
|
|
475
|
+
const modelName = getModelNameForType(runtime, TEXT_SMALL_MODEL_TYPE);
|
|
476
|
+
logger5.log(`[TEXT_SMALL] Using model: ${modelName}`);
|
|
304
477
|
try {
|
|
305
|
-
const systemInstruction = runtime
|
|
306
|
-
const
|
|
478
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
479
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
480
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, TEXT_SMALL_MODEL_TYPE, promptText, systemInstruction, temperature, maxTokens, {
|
|
307
481
|
model: modelName,
|
|
308
|
-
contents:
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
safetySettings: getSafetySettings(),
|
|
316
|
-
...systemInstruction && { systemInstruction }
|
|
317
|
-
}
|
|
318
|
-
});
|
|
319
|
-
const text = response.text || "";
|
|
320
|
-
const promptTokens = await countTokens(prompt);
|
|
321
|
-
const completionTokens = await countTokens(text);
|
|
322
|
-
emitModelUsageEvent(runtime, ModelType2.TEXT_SMALL, prompt, {
|
|
323
|
-
promptTokens,
|
|
324
|
-
completionTokens,
|
|
325
|
-
totalTokens: promptTokens + completionTokens
|
|
482
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
483
|
+
{
|
|
484
|
+
role: "user",
|
|
485
|
+
parts: buildPromptParts(promptText, attachments)
|
|
486
|
+
}
|
|
487
|
+
] : promptText,
|
|
488
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
326
489
|
});
|
|
327
|
-
return text;
|
|
328
490
|
} catch (error) {
|
|
329
|
-
|
|
491
|
+
logger5.error(`[TEXT_SMALL] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
330
492
|
throw error;
|
|
331
493
|
}
|
|
332
494
|
}
|
|
333
495
|
async function handleTextLarge(runtime, params) {
|
|
334
|
-
const {
|
|
335
|
-
|
|
496
|
+
const {
|
|
497
|
+
stopSequences = [],
|
|
498
|
+
maxTokens = 8192,
|
|
499
|
+
temperature = 0.7,
|
|
500
|
+
attachments
|
|
501
|
+
} = params;
|
|
336
502
|
const genAI = createGoogleGenAI(runtime);
|
|
337
503
|
if (!genAI) {
|
|
338
504
|
throw new Error("Google Generative AI client not initialized");
|
|
339
505
|
}
|
|
340
|
-
const modelName =
|
|
341
|
-
|
|
506
|
+
const modelName = getModelNameForType(runtime, TEXT_LARGE_MODEL_TYPE);
|
|
507
|
+
logger5.log(`[TEXT_LARGE] Using model: ${modelName}`);
|
|
342
508
|
try {
|
|
343
|
-
const systemInstruction = runtime
|
|
344
|
-
const
|
|
509
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
510
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
511
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, TEXT_LARGE_MODEL_TYPE, promptText, systemInstruction, temperature, maxTokens, {
|
|
345
512
|
model: modelName,
|
|
346
|
-
contents:
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
safetySettings: getSafetySettings(),
|
|
354
|
-
...systemInstruction && { systemInstruction }
|
|
355
|
-
}
|
|
513
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
514
|
+
{
|
|
515
|
+
role: "user",
|
|
516
|
+
parts: buildPromptParts(promptText, attachments)
|
|
517
|
+
}
|
|
518
|
+
] : promptText,
|
|
519
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
356
520
|
});
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
521
|
+
} catch (error) {
|
|
522
|
+
logger5.error(`[TEXT_LARGE] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
523
|
+
throw error;
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
async function handleTextNano(runtime, params) {
|
|
527
|
+
return handleTextWithType(runtime, TEXT_NANO_MODEL_TYPE, params);
|
|
528
|
+
}
|
|
529
|
+
async function handleTextMedium(runtime, params) {
|
|
530
|
+
return handleTextWithType(runtime, TEXT_MEDIUM_MODEL_TYPE, params);
|
|
531
|
+
}
|
|
532
|
+
async function handleTextMega(runtime, params) {
|
|
533
|
+
return handleTextWithType(runtime, TEXT_MEGA_MODEL_TYPE, params);
|
|
534
|
+
}
|
|
535
|
+
async function handleResponseHandler(runtime, params) {
|
|
536
|
+
return handleTextWithType(runtime, RESPONSE_HANDLER_MODEL_TYPE, params);
|
|
537
|
+
}
|
|
538
|
+
async function handleActionPlanner(runtime, params) {
|
|
539
|
+
return handleTextWithType(runtime, ACTION_PLANNER_MODEL_TYPE, params);
|
|
540
|
+
}
|
|
541
|
+
async function handleTextWithType(runtime, modelType, params) {
|
|
542
|
+
const {
|
|
543
|
+
stopSequences = [],
|
|
544
|
+
maxTokens = 8192,
|
|
545
|
+
temperature = 0.7,
|
|
546
|
+
attachments
|
|
547
|
+
} = params;
|
|
548
|
+
const genAI = createGoogleGenAI(runtime);
|
|
549
|
+
if (!genAI) {
|
|
550
|
+
throw new Error("Google Generative AI client not initialized");
|
|
551
|
+
}
|
|
552
|
+
const modelName = getModelNameForType(runtime, modelType);
|
|
553
|
+
logger5.log(`[${modelType}] Using model: ${modelName}`);
|
|
554
|
+
try {
|
|
555
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
556
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
557
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, modelType, promptText, systemInstruction, temperature, maxTokens, {
|
|
558
|
+
model: modelName,
|
|
559
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
560
|
+
{
|
|
561
|
+
role: "user",
|
|
562
|
+
parts: buildPromptParts(promptText, attachments)
|
|
563
|
+
}
|
|
564
|
+
] : promptText,
|
|
565
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
364
566
|
});
|
|
365
|
-
return text;
|
|
366
567
|
} catch (error) {
|
|
367
|
-
|
|
568
|
+
logger5.error(`[${modelType}] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
368
569
|
throw error;
|
|
369
570
|
}
|
|
370
571
|
}
|
|
371
572
|
// index.ts
|
|
573
|
+
var CORE_MODEL_TYPES2 = ElizaCore3.ModelType ?? {};
|
|
574
|
+
var TEXT_NANO_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_NANO ?? "TEXT_NANO";
|
|
575
|
+
var TEXT_MEDIUM_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
576
|
+
var TEXT_SMALL_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_SMALL ?? "TEXT_SMALL";
|
|
577
|
+
var TEXT_LARGE_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_LARGE ?? "TEXT_LARGE";
|
|
578
|
+
var TEXT_EMBEDDING_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_EMBEDDING ?? "TEXT_EMBEDDING";
|
|
579
|
+
var IMAGE_DESCRIPTION_MODEL_TYPE = CORE_MODEL_TYPES2.IMAGE_DESCRIPTION ?? "IMAGE_DESCRIPTION";
|
|
580
|
+
var TEXT_MEGA_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_MEGA ?? "TEXT_MEGA";
|
|
581
|
+
var RESPONSE_HANDLER_MODEL_TYPE2 = CORE_MODEL_TYPES2.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
582
|
+
var ACTION_PLANNER_MODEL_TYPE2 = CORE_MODEL_TYPES2.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
372
583
|
var pluginTests = [
|
|
373
584
|
{
|
|
374
585
|
name: "google_genai_plugin_tests",
|
|
@@ -386,22 +597,22 @@ var pluginTests = [
|
|
|
386
597
|
for await (const model of modelList) {
|
|
387
598
|
models.push(model);
|
|
388
599
|
}
|
|
389
|
-
|
|
600
|
+
logger6.log(`Available models: ${models.length}`);
|
|
390
601
|
}
|
|
391
602
|
},
|
|
392
603
|
{
|
|
393
604
|
name: "google_test_text_embedding",
|
|
394
605
|
fn: async (runtime) => {
|
|
395
606
|
try {
|
|
396
|
-
const embedding = await runtime.useModel(
|
|
607
|
+
const embedding = await runtime.useModel(ModelType4.TEXT_EMBEDDING, {
|
|
397
608
|
text: "Hello, world!"
|
|
398
609
|
});
|
|
399
|
-
|
|
610
|
+
logger6.log(`Embedding dimension: ${embedding.length}`);
|
|
400
611
|
if (embedding.length === 0) {
|
|
401
612
|
throw new Error("Failed to generate embedding");
|
|
402
613
|
}
|
|
403
614
|
} catch (error) {
|
|
404
|
-
|
|
615
|
+
logger6.error(`Error in test_text_embedding: ${error instanceof Error ? error.message : String(error)}`);
|
|
405
616
|
throw error;
|
|
406
617
|
}
|
|
407
618
|
}
|
|
@@ -410,15 +621,15 @@ var pluginTests = [
|
|
|
410
621
|
name: "google_test_text_small",
|
|
411
622
|
fn: async (runtime) => {
|
|
412
623
|
try {
|
|
413
|
-
const text = await runtime.useModel(
|
|
624
|
+
const text = await runtime.useModel(ModelType4.TEXT_SMALL, {
|
|
414
625
|
prompt: "What is the nature of reality in 10 words?"
|
|
415
626
|
});
|
|
416
627
|
if (text.length === 0) {
|
|
417
628
|
throw new Error("Failed to generate text");
|
|
418
629
|
}
|
|
419
|
-
|
|
630
|
+
logger6.log("Generated with TEXT_SMALL:", text);
|
|
420
631
|
} catch (error) {
|
|
421
|
-
|
|
632
|
+
logger6.error(`Error in test_text_small: ${error instanceof Error ? error.message : String(error)}`);
|
|
422
633
|
throw error;
|
|
423
634
|
}
|
|
424
635
|
}
|
|
@@ -427,15 +638,15 @@ var pluginTests = [
|
|
|
427
638
|
name: "google_test_text_large",
|
|
428
639
|
fn: async (runtime) => {
|
|
429
640
|
try {
|
|
430
|
-
const text = await runtime.useModel(
|
|
641
|
+
const text = await runtime.useModel(ModelType4.TEXT_LARGE, {
|
|
431
642
|
prompt: "Explain quantum mechanics in simple terms."
|
|
432
643
|
});
|
|
433
644
|
if (text.length === 0) {
|
|
434
645
|
throw new Error("Failed to generate text");
|
|
435
646
|
}
|
|
436
|
-
|
|
647
|
+
logger6.log("Generated with TEXT_LARGE:", `${text.substring(0, 100)}...`);
|
|
437
648
|
} catch (error) {
|
|
438
|
-
|
|
649
|
+
logger6.error(`Error in test_text_large: ${error instanceof Error ? error.message : String(error)}`);
|
|
439
650
|
throw error;
|
|
440
651
|
}
|
|
441
652
|
}
|
|
@@ -444,20 +655,20 @@ var pluginTests = [
|
|
|
444
655
|
name: "google_test_image_description",
|
|
445
656
|
fn: async (runtime) => {
|
|
446
657
|
try {
|
|
447
|
-
const result = await runtime.useModel(
|
|
448
|
-
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
449
|
-
|
|
658
|
+
const result = await runtime.useModel(ModelType4.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
659
|
+
if (result != null && typeof result === "object" && "title" in result && "description" in result) {
|
|
660
|
+
logger6.log("Image description:", JSON.stringify(result));
|
|
450
661
|
} else {
|
|
451
|
-
|
|
662
|
+
logger6.error(`Invalid image description result format: ${JSON.stringify(result)}`);
|
|
452
663
|
}
|
|
453
664
|
} catch (error) {
|
|
454
|
-
|
|
665
|
+
logger6.error(`Error in test_image_description: ${error instanceof Error ? error.message : String(error)}`);
|
|
455
666
|
throw error;
|
|
456
667
|
}
|
|
457
668
|
}
|
|
458
669
|
},
|
|
459
670
|
{
|
|
460
|
-
name: "
|
|
671
|
+
name: "google_test_structured_output_via_text_large",
|
|
461
672
|
fn: async (runtime) => {
|
|
462
673
|
try {
|
|
463
674
|
const schema = {
|
|
@@ -469,16 +680,16 @@ var pluginTests = [
|
|
|
469
680
|
},
|
|
470
681
|
required: ["name", "age", "hobbies"]
|
|
471
682
|
};
|
|
472
|
-
const result = await runtime.useModel(
|
|
683
|
+
const result = await runtime.useModel(ModelType4.TEXT_LARGE, {
|
|
473
684
|
prompt: "Generate a person profile with name, age, and hobbies.",
|
|
474
|
-
schema
|
|
685
|
+
responseSchema: schema
|
|
475
686
|
});
|
|
476
|
-
|
|
477
|
-
if (!result
|
|
478
|
-
throw new Error("Generated
|
|
687
|
+
logger6.log("Generated structured output:", JSON.stringify(result));
|
|
688
|
+
if (!result) {
|
|
689
|
+
throw new Error("Generated structured output is empty");
|
|
479
690
|
}
|
|
480
691
|
} catch (error) {
|
|
481
|
-
|
|
692
|
+
logger6.error(`Error in test_structured_output_via_text_large: ${error instanceof Error ? error.message : String(error)}`);
|
|
482
693
|
throw error;
|
|
483
694
|
}
|
|
484
695
|
}
|
|
@@ -496,45 +707,74 @@ var env = getProcessEnv();
|
|
|
496
707
|
var googleGenAIPlugin = {
|
|
497
708
|
name: "google-genai",
|
|
498
709
|
description: "Google Generative AI plugin for Gemini models",
|
|
710
|
+
autoEnable: {
|
|
711
|
+
envKeys: ["GOOGLE_API_KEY", "GOOGLE_GENERATIVE_AI_API_KEY"]
|
|
712
|
+
},
|
|
499
713
|
config: {
|
|
500
714
|
GOOGLE_GENERATIVE_AI_API_KEY: env.GOOGLE_GENERATIVE_AI_API_KEY ?? null,
|
|
715
|
+
GOOGLE_NANO_MODEL: env.GOOGLE_NANO_MODEL ?? null,
|
|
716
|
+
GOOGLE_MEDIUM_MODEL: env.GOOGLE_MEDIUM_MODEL ?? null,
|
|
501
717
|
GOOGLE_SMALL_MODEL: env.GOOGLE_SMALL_MODEL ?? null,
|
|
502
718
|
GOOGLE_LARGE_MODEL: env.GOOGLE_LARGE_MODEL ?? null,
|
|
719
|
+
GOOGLE_MEGA_MODEL: env.GOOGLE_MEGA_MODEL ?? null,
|
|
720
|
+
GOOGLE_RESPONSE_HANDLER_MODEL: env.GOOGLE_RESPONSE_HANDLER_MODEL ?? null,
|
|
721
|
+
GOOGLE_SHOULD_RESPOND_MODEL: env.GOOGLE_SHOULD_RESPOND_MODEL ?? null,
|
|
722
|
+
GOOGLE_ACTION_PLANNER_MODEL: env.GOOGLE_ACTION_PLANNER_MODEL ?? null,
|
|
723
|
+
GOOGLE_PLANNER_MODEL: env.GOOGLE_PLANNER_MODEL ?? null,
|
|
503
724
|
GOOGLE_IMAGE_MODEL: env.GOOGLE_IMAGE_MODEL ?? null,
|
|
504
725
|
GOOGLE_EMBEDDING_MODEL: env.GOOGLE_EMBEDDING_MODEL ?? null,
|
|
726
|
+
NANO_MODEL: env.NANO_MODEL ?? null,
|
|
727
|
+
MEDIUM_MODEL: env.MEDIUM_MODEL ?? null,
|
|
505
728
|
SMALL_MODEL: env.SMALL_MODEL ?? null,
|
|
506
729
|
LARGE_MODEL: env.LARGE_MODEL ?? null,
|
|
730
|
+
MEGA_MODEL: env.MEGA_MODEL ?? null,
|
|
731
|
+
RESPONSE_HANDLER_MODEL: env.RESPONSE_HANDLER_MODEL ?? null,
|
|
732
|
+
SHOULD_RESPOND_MODEL: env.SHOULD_RESPOND_MODEL ?? null,
|
|
733
|
+
ACTION_PLANNER_MODEL: env.ACTION_PLANNER_MODEL ?? null,
|
|
734
|
+
PLANNER_MODEL: env.PLANNER_MODEL ?? null,
|
|
507
735
|
IMAGE_MODEL: env.IMAGE_MODEL ?? null
|
|
508
736
|
},
|
|
509
737
|
async init(config, runtime) {
|
|
510
738
|
initializeGoogleGenAI(config, runtime);
|
|
511
739
|
},
|
|
512
740
|
models: {
|
|
513
|
-
[
|
|
741
|
+
[TEXT_NANO_MODEL_TYPE2]: async (runtime, params) => {
|
|
742
|
+
return handleTextNano(runtime, params);
|
|
743
|
+
},
|
|
744
|
+
[TEXT_MEDIUM_MODEL_TYPE2]: async (runtime, params) => {
|
|
745
|
+
return handleTextMedium(runtime, params);
|
|
746
|
+
},
|
|
747
|
+
[TEXT_SMALL_MODEL_TYPE2]: async (runtime, params) => {
|
|
514
748
|
return handleTextSmall(runtime, params);
|
|
515
749
|
},
|
|
516
|
-
[
|
|
750
|
+
[TEXT_LARGE_MODEL_TYPE2]: async (runtime, params) => {
|
|
517
751
|
return handleTextLarge(runtime, params);
|
|
518
752
|
},
|
|
519
|
-
[
|
|
520
|
-
return
|
|
753
|
+
[TEXT_MEGA_MODEL_TYPE2]: async (runtime, params) => {
|
|
754
|
+
return handleTextMega(runtime, params);
|
|
521
755
|
},
|
|
522
|
-
[
|
|
523
|
-
return
|
|
756
|
+
[RESPONSE_HANDLER_MODEL_TYPE2]: async (runtime, params) => {
|
|
757
|
+
return handleResponseHandler(runtime, params);
|
|
758
|
+
},
|
|
759
|
+
[ACTION_PLANNER_MODEL_TYPE2]: async (runtime, params) => {
|
|
760
|
+
return handleActionPlanner(runtime, params);
|
|
524
761
|
},
|
|
525
|
-
[
|
|
526
|
-
return
|
|
762
|
+
[TEXT_EMBEDDING_MODEL_TYPE2]: async (runtime, params) => {
|
|
763
|
+
return handleTextEmbedding(runtime, params);
|
|
527
764
|
},
|
|
528
|
-
[
|
|
529
|
-
return
|
|
765
|
+
[IMAGE_DESCRIPTION_MODEL_TYPE]: async (runtime, params) => {
|
|
766
|
+
return handleImageDescription(runtime, params);
|
|
530
767
|
}
|
|
531
768
|
},
|
|
532
769
|
tests: pluginTests
|
|
533
770
|
};
|
|
534
|
-
var
|
|
771
|
+
var plugin_google_genai_default = googleGenAIPlugin;
|
|
772
|
+
|
|
773
|
+
// index.node.ts
|
|
774
|
+
var index_node_default = plugin_google_genai_default;
|
|
535
775
|
export {
|
|
536
776
|
googleGenAIPlugin,
|
|
537
|
-
|
|
777
|
+
index_node_default as default
|
|
538
778
|
};
|
|
539
779
|
|
|
540
|
-
//# debugId=
|
|
780
|
+
//# debugId=20AEB5819D569BEA64756E2164756E21
|