@elizaos/plugin-google-genai 2.0.0-alpha.8 → 2.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +124 -0
- package/auto-enable.ts +21 -0
- package/dist/browser/index.browser.js +412 -165
- package/dist/browser/index.browser.js.map +11 -11
- package/dist/build.d.ts +3 -0
- package/dist/build.d.ts.map +1 -0
- package/dist/build.js +117 -0
- package/dist/cjs/index.node.cjs +413 -172
- package/dist/cjs/index.node.js.map +11 -11
- package/dist/generated/specs/specs.d.ts +55 -0
- package/dist/generated/specs/specs.d.ts.map +1 -0
- package/dist/generated/specs/specs.js +34 -0
- package/dist/index.browser.d.ts +5 -0
- package/dist/index.browser.d.ts.map +1 -0
- package/dist/index.browser.js +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +252 -0
- package/dist/index.node.d.ts +5 -0
- package/dist/index.node.d.ts.map +1 -0
- package/dist/index.node.js +4 -0
- package/dist/init.d.ts +16 -0
- package/dist/init.d.ts.map +1 -0
- package/dist/init.js +27 -0
- package/dist/models/embedding.d.ts +6 -0
- package/dist/models/embedding.d.ts.map +1 -0
- package/dist/models/embedding.js +57 -0
- package/dist/models/image.d.ts +7 -0
- package/dist/models/image.d.ts.map +1 -0
- package/dist/models/image.js +91 -0
- package/dist/models/index.d.ts +13 -0
- package/dist/models/index.d.ts.map +1 -0
- package/dist/models/index.js +12 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/object.d.ts.map +1 -0
- package/dist/models/object.js +84 -0
- package/dist/models/text.d.ts +51 -0
- package/dist/models/text.d.ts.map +1 -0
- package/dist/models/text.js +257 -0
- package/dist/node/index.node.d.ts +2 -0
- package/dist/node/index.node.js +412 -165
- package/dist/node/index.node.js.map +11 -11
- package/dist/types/index.d.ts +47 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +1 -0
- package/dist/utils/config.d.ts +25 -0
- package/dist/utils/config.d.ts.map +1 -0
- package/dist/utils/config.js +115 -0
- package/dist/utils/events.d.ts +12 -0
- package/dist/utils/events.d.ts.map +1 -0
- package/dist/utils/events.js +14 -0
- package/dist/utils/index.d.ts +4 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +3 -0
- package/dist/utils/tokenization.d.ts +2 -0
- package/dist/utils/tokenization.d.ts.map +1 -0
- package/dist/utils/tokenization.js +3 -0
- package/dist/vitest.config.d.ts +3 -0
- package/dist/vitest.config.d.ts.map +1 -0
- package/dist/vitest.config.js +8 -0
- package/package.json +32 -16
package/dist/node/index.node.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
// index.ts
|
|
2
|
-
import
|
|
2
|
+
import * as ElizaCore3 from "@elizaos/core";
|
|
3
|
+
import { logger as logger6, ModelType as ModelType4 } from "@elizaos/core";
|
|
3
4
|
import { GoogleGenAI as GoogleGenAI3 } from "@google/genai";
|
|
4
5
|
|
|
5
6
|
// init.ts
|
|
@@ -29,9 +30,24 @@ function getApiKey(runtime) {
|
|
|
29
30
|
function getSmallModel(runtime) {
|
|
30
31
|
return getSetting(runtime, "GOOGLE_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gemini-2.0-flash-001") ?? "gemini-2.0-flash-001";
|
|
31
32
|
}
|
|
33
|
+
function getNanoModel(runtime) {
|
|
34
|
+
return getSetting(runtime, "GOOGLE_NANO_MODEL") ?? getSetting(runtime, "NANO_MODEL") ?? getSmallModel(runtime);
|
|
35
|
+
}
|
|
36
|
+
function getMediumModel(runtime) {
|
|
37
|
+
return getSetting(runtime, "GOOGLE_MEDIUM_MODEL") ?? getSetting(runtime, "MEDIUM_MODEL") ?? getSmallModel(runtime);
|
|
38
|
+
}
|
|
32
39
|
function getLargeModel(runtime) {
|
|
33
40
|
return getSetting(runtime, "GOOGLE_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gemini-2.5-pro-preview-03-25") ?? "gemini-2.5-pro-preview-03-25";
|
|
34
41
|
}
|
|
42
|
+
function getMegaModel(runtime) {
|
|
43
|
+
return getSetting(runtime, "GOOGLE_MEGA_MODEL") ?? getSetting(runtime, "MEGA_MODEL") ?? getLargeModel(runtime);
|
|
44
|
+
}
|
|
45
|
+
function getResponseHandlerModel(runtime) {
|
|
46
|
+
return getSetting(runtime, "GOOGLE_RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "GOOGLE_SHOULD_RESPOND_MODEL") ?? getSetting(runtime, "RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "SHOULD_RESPOND_MODEL") ?? getNanoModel(runtime);
|
|
47
|
+
}
|
|
48
|
+
function getActionPlannerModel(runtime) {
|
|
49
|
+
return getSetting(runtime, "GOOGLE_ACTION_PLANNER_MODEL") ?? getSetting(runtime, "GOOGLE_PLANNER_MODEL") ?? getSetting(runtime, "ACTION_PLANNER_MODEL") ?? getSetting(runtime, "PLANNER_MODEL") ?? getMediumModel(runtime);
|
|
50
|
+
}
|
|
35
51
|
function getImageModel(runtime) {
|
|
36
52
|
return getSetting(runtime, "GOOGLE_IMAGE_MODEL") ?? getSetting(runtime, "IMAGE_MODEL", "gemini-2.5-pro-preview-03-25") ?? "gemini-2.5-pro-preview-03-25";
|
|
37
53
|
}
|
|
@@ -90,12 +106,13 @@ function initializeGoogleGenAI(_config, runtime) {
|
|
|
90
106
|
}
|
|
91
107
|
|
|
92
108
|
// models/embedding.ts
|
|
93
|
-
import
|
|
109
|
+
import * as ElizaCore from "@elizaos/core";
|
|
110
|
+
import { logger as logger3 } from "@elizaos/core";
|
|
94
111
|
|
|
95
112
|
// utils/events.ts
|
|
96
|
-
|
|
113
|
+
var MODEL_USED_EVENT = "MODEL_USED";
|
|
97
114
|
function emitModelUsageEvent(runtime, type, _prompt, usage) {
|
|
98
|
-
runtime.emitEvent(
|
|
115
|
+
runtime.emitEvent(MODEL_USED_EVENT, {
|
|
99
116
|
runtime,
|
|
100
117
|
source: "plugin-google-genai",
|
|
101
118
|
type,
|
|
@@ -113,6 +130,7 @@ async function countTokens(text) {
|
|
|
113
130
|
}
|
|
114
131
|
|
|
115
132
|
// models/embedding.ts
|
|
133
|
+
var TEXT_EMBEDDING_MODEL_TYPE = ElizaCore.ModelType?.TEXT_EMBEDDING ?? "TEXT_EMBEDDING";
|
|
116
134
|
async function handleTextEmbedding(runtime, params) {
|
|
117
135
|
const genAI = createGoogleGenAI(runtime);
|
|
118
136
|
if (!genAI) {
|
|
@@ -140,7 +158,7 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
140
158
|
});
|
|
141
159
|
const embedding = response.embeddings?.[0]?.values || [];
|
|
142
160
|
const promptTokens = await countTokens(text);
|
|
143
|
-
emitModelUsageEvent(runtime,
|
|
161
|
+
emitModelUsageEvent(runtime, TEXT_EMBEDDING_MODEL_TYPE, text, {
|
|
144
162
|
promptTokens,
|
|
145
163
|
completionTokens: 0,
|
|
146
164
|
totalTokens: promptTokens
|
|
@@ -153,8 +171,7 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
153
171
|
}
|
|
154
172
|
}
|
|
155
173
|
// models/image.ts
|
|
156
|
-
import { logger as logger4 } from "@elizaos/core";
|
|
157
|
-
var crossFetch = typeof globalThis.fetch === "function" ? globalThis.fetch : fetch;
|
|
174
|
+
import { logger as logger4, recordLlmCall } from "@elizaos/core";
|
|
158
175
|
async function handleImageDescription(runtime, params) {
|
|
159
176
|
const genAI = createGoogleGenAI(runtime);
|
|
160
177
|
if (!genAI) {
|
|
@@ -172,36 +189,52 @@ async function handleImageDescription(runtime, params) {
|
|
|
172
189
|
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
173
190
|
}
|
|
174
191
|
try {
|
|
175
|
-
const imageResponse = await
|
|
192
|
+
const imageResponse = await fetch(imageUrl);
|
|
176
193
|
if (!imageResponse.ok) {
|
|
177
194
|
throw new Error(`Failed to fetch image: ${imageResponse.statusText}`);
|
|
178
195
|
}
|
|
179
196
|
const imageData = await imageResponse.arrayBuffer();
|
|
180
197
|
const base64Image = Buffer.from(imageData).toString("base64");
|
|
181
198
|
const contentType = imageResponse.headers.get("content-type") || "image/jpeg";
|
|
182
|
-
const
|
|
199
|
+
const details = {
|
|
183
200
|
model: modelName,
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
201
|
+
systemPrompt: "",
|
|
202
|
+
userPrompt: promptText,
|
|
203
|
+
temperature: 0.7,
|
|
204
|
+
maxTokens: 8192,
|
|
205
|
+
purpose: "external_llm",
|
|
206
|
+
actionType: "google-genai.IMAGE_DESCRIPTION.generateContent"
|
|
207
|
+
};
|
|
208
|
+
const response = await recordLlmCall(runtime, details, async () => {
|
|
209
|
+
const result = await genAI.models.generateContent({
|
|
210
|
+
model: modelName,
|
|
211
|
+
contents: [
|
|
212
|
+
{
|
|
213
|
+
role: "user",
|
|
214
|
+
parts: [
|
|
215
|
+
{ text: promptText },
|
|
216
|
+
{
|
|
217
|
+
inlineData: {
|
|
218
|
+
mimeType: contentType,
|
|
219
|
+
data: base64Image
|
|
220
|
+
}
|
|
193
221
|
}
|
|
194
|
-
|
|
195
|
-
|
|
222
|
+
]
|
|
223
|
+
}
|
|
224
|
+
],
|
|
225
|
+
config: {
|
|
226
|
+
temperature: 0.7,
|
|
227
|
+
topK: 40,
|
|
228
|
+
topP: 0.95,
|
|
229
|
+
maxOutputTokens: 8192,
|
|
230
|
+
safetySettings: getSafetySettings()
|
|
196
231
|
}
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
safetySettings: getSafetySettings()
|
|
204
|
-
}
|
|
232
|
+
});
|
|
233
|
+
const responseText2 = result.text || "";
|
|
234
|
+
details.response = responseText2;
|
|
235
|
+
details.promptTokens = await countTokens(promptText);
|
|
236
|
+
details.completionTokens = await countTokens(responseText2);
|
|
237
|
+
return result;
|
|
205
238
|
});
|
|
206
239
|
const responseText = response.text || "";
|
|
207
240
|
try {
|
|
@@ -226,143 +259,327 @@ async function handleImageDescription(runtime, params) {
|
|
|
226
259
|
};
|
|
227
260
|
}
|
|
228
261
|
}
|
|
229
|
-
// models/
|
|
230
|
-
import
|
|
231
|
-
|
|
262
|
+
// models/text.ts
|
|
263
|
+
import * as ElizaCore2 from "@elizaos/core";
|
|
264
|
+
import {
|
|
265
|
+
buildCanonicalSystemPrompt,
|
|
266
|
+
logger as logger5,
|
|
267
|
+
recordLlmCall as recordLlmCall2,
|
|
268
|
+
renderChatMessagesForPrompt,
|
|
269
|
+
resolveEffectiveSystemPrompt
|
|
270
|
+
} from "@elizaos/core";
|
|
271
|
+
var CORE_MODEL_TYPES = ElizaCore2.ModelType ?? {};
|
|
272
|
+
var TEXT_NANO_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_NANO ?? "TEXT_NANO";
|
|
273
|
+
var TEXT_MEDIUM_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
274
|
+
var TEXT_SMALL_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_SMALL ?? "TEXT_SMALL";
|
|
275
|
+
var TEXT_LARGE_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_LARGE ?? "TEXT_LARGE";
|
|
276
|
+
var TEXT_MEGA_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEGA ?? "TEXT_MEGA";
|
|
277
|
+
var RESPONSE_HANDLER_MODEL_TYPE = CORE_MODEL_TYPES.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
278
|
+
var ACTION_PLANNER_MODEL_TYPE = CORE_MODEL_TYPES.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
279
|
+
function normalizeToolsForGoogle(tools) {
|
|
280
|
+
if (!tools)
|
|
281
|
+
return;
|
|
282
|
+
if (Array.isArray(tools) && tools.length > 0 && typeof tools[0] === "object" && tools[0] !== null && "functionDeclarations" in tools[0]) {
|
|
283
|
+
return tools;
|
|
284
|
+
}
|
|
285
|
+
const flat = Array.isArray(tools) ? tools : Object.entries(tools).map(([name, value]) => ({ name, ...value }));
|
|
286
|
+
const declarations = [];
|
|
287
|
+
for (const tool of flat) {
|
|
288
|
+
const name = tool.name ?? tool.function?.name;
|
|
289
|
+
if (!name) {
|
|
290
|
+
throw new Error("[GoogleGenAI] Tool definition is missing a name.");
|
|
291
|
+
}
|
|
292
|
+
const description = tool.description ?? tool.function?.description;
|
|
293
|
+
const parameters = tool.parameters ?? tool.inputSchema ?? tool.function?.parameters ?? {
|
|
294
|
+
type: "object",
|
|
295
|
+
properties: {}
|
|
296
|
+
};
|
|
297
|
+
declarations.push({
|
|
298
|
+
name,
|
|
299
|
+
...description ? { description } : {},
|
|
300
|
+
parameters
|
|
301
|
+
});
|
|
302
|
+
}
|
|
303
|
+
return declarations.length > 0 ? [{ functionDeclarations: declarations }] : undefined;
|
|
304
|
+
}
|
|
305
|
+
function normalizeToolConfigForGoogle(toolChoice) {
|
|
306
|
+
if (!toolChoice)
|
|
307
|
+
return;
|
|
308
|
+
if (toolChoice === "auto") {
|
|
309
|
+
return { functionCallingConfig: { mode: "AUTO" } };
|
|
310
|
+
}
|
|
311
|
+
if (toolChoice === "required") {
|
|
312
|
+
return { functionCallingConfig: { mode: "ANY" } };
|
|
313
|
+
}
|
|
314
|
+
if (toolChoice === "none") {
|
|
315
|
+
return { functionCallingConfig: { mode: "NONE" } };
|
|
316
|
+
}
|
|
317
|
+
let toolName;
|
|
318
|
+
if ("type" in toolChoice) {
|
|
319
|
+
toolName = toolChoice.type === "function" ? toolChoice.function.name : toolChoice.toolName ?? toolChoice.name;
|
|
320
|
+
} else {
|
|
321
|
+
toolName = toolChoice.name;
|
|
322
|
+
}
|
|
323
|
+
if (toolName) {
|
|
324
|
+
return {
|
|
325
|
+
functionCallingConfig: {
|
|
326
|
+
mode: "ANY",
|
|
327
|
+
allowedFunctionNames: [toolName]
|
|
328
|
+
}
|
|
329
|
+
};
|
|
330
|
+
}
|
|
331
|
+
return;
|
|
332
|
+
}
|
|
333
|
+
function resolveResponseJsonSchema(responseSchema) {
|
|
334
|
+
if (!responseSchema)
|
|
335
|
+
return;
|
|
336
|
+
if ("schema" in responseSchema && responseSchema.schema) {
|
|
337
|
+
return responseSchema.schema;
|
|
338
|
+
}
|
|
339
|
+
return responseSchema;
|
|
340
|
+
}
|
|
341
|
+
function buildPromptParts(prompt, attachments) {
|
|
342
|
+
const parts = [{ text: prompt }];
|
|
343
|
+
for (const attachment of attachments ?? []) {
|
|
344
|
+
if (attachment.data instanceof URL) {
|
|
345
|
+
parts.push({
|
|
346
|
+
fileData: {
|
|
347
|
+
mimeType: attachment.mediaType,
|
|
348
|
+
fileUri: attachment.data.toString()
|
|
349
|
+
}
|
|
350
|
+
});
|
|
351
|
+
continue;
|
|
352
|
+
}
|
|
353
|
+
if (typeof attachment.data === "string" && /^https?:\/\//i.test(attachment.data)) {
|
|
354
|
+
parts.push({
|
|
355
|
+
fileData: {
|
|
356
|
+
mimeType: attachment.mediaType,
|
|
357
|
+
fileUri: attachment.data
|
|
358
|
+
}
|
|
359
|
+
});
|
|
360
|
+
continue;
|
|
361
|
+
}
|
|
362
|
+
if (typeof attachment.data === "string") {
|
|
363
|
+
const dataUrlMatch = attachment.data.match(/^data:([^;,]+);base64,(.+)$/i);
|
|
364
|
+
parts.push({
|
|
365
|
+
inlineData: {
|
|
366
|
+
mimeType: dataUrlMatch?.[1] ?? attachment.mediaType,
|
|
367
|
+
data: dataUrlMatch?.[2] ?? attachment.data
|
|
368
|
+
}
|
|
369
|
+
});
|
|
370
|
+
continue;
|
|
371
|
+
}
|
|
372
|
+
parts.push({
|
|
373
|
+
inlineData: {
|
|
374
|
+
mimeType: attachment.mediaType,
|
|
375
|
+
data: Buffer.from(attachment.data).toString("base64")
|
|
376
|
+
}
|
|
377
|
+
});
|
|
378
|
+
}
|
|
379
|
+
return parts;
|
|
380
|
+
}
|
|
381
|
+
function resolveGoogleSystemInstruction(runtime, params) {
|
|
382
|
+
return resolveEffectiveSystemPrompt({
|
|
383
|
+
params,
|
|
384
|
+
fallback: buildCanonicalSystemPrompt({ character: runtime.character })
|
|
385
|
+
});
|
|
386
|
+
}
|
|
387
|
+
function resolveGooglePrompt(params, systemInstruction) {
|
|
388
|
+
return renderChatMessagesForPrompt(params.messages, {
|
|
389
|
+
omitDuplicateSystem: systemInstruction
|
|
390
|
+
}) ?? params.prompt;
|
|
391
|
+
}
|
|
392
|
+
function getModelNameForType(runtime, modelType) {
|
|
393
|
+
switch (modelType) {
|
|
394
|
+
case TEXT_NANO_MODEL_TYPE:
|
|
395
|
+
return getNanoModel(runtime);
|
|
396
|
+
case TEXT_MEDIUM_MODEL_TYPE:
|
|
397
|
+
return getMediumModel(runtime);
|
|
398
|
+
case TEXT_SMALL_MODEL_TYPE:
|
|
399
|
+
return getSmallModel(runtime);
|
|
400
|
+
case TEXT_LARGE_MODEL_TYPE:
|
|
401
|
+
return getLargeModel(runtime);
|
|
402
|
+
case TEXT_MEGA_MODEL_TYPE:
|
|
403
|
+
return getMegaModel(runtime);
|
|
404
|
+
case RESPONSE_HANDLER_MODEL_TYPE:
|
|
405
|
+
return getResponseHandlerModel(runtime);
|
|
406
|
+
case ACTION_PLANNER_MODEL_TYPE:
|
|
407
|
+
return getActionPlannerModel(runtime);
|
|
408
|
+
default:
|
|
409
|
+
return getLargeModel(runtime);
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
function buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences) {
|
|
413
|
+
const tools = normalizeToolsForGoogle(params.tools);
|
|
414
|
+
const toolConfig = normalizeToolConfigForGoogle(params.toolChoice);
|
|
415
|
+
const responseJsonSchema = resolveResponseJsonSchema(params.responseSchema);
|
|
416
|
+
const baseConfig = {
|
|
417
|
+
temperature,
|
|
418
|
+
topK: 40,
|
|
419
|
+
topP: 0.95,
|
|
420
|
+
maxOutputTokens: maxTokens,
|
|
421
|
+
stopSequences,
|
|
422
|
+
safetySettings: getSafetySettings(),
|
|
423
|
+
...systemInstruction && { systemInstruction },
|
|
424
|
+
...tools ? { tools } : {},
|
|
425
|
+
...toolConfig ? { toolConfig } : {},
|
|
426
|
+
...responseJsonSchema ? {
|
|
427
|
+
responseMimeType: "application/json",
|
|
428
|
+
responseJsonSchema
|
|
429
|
+
} : {}
|
|
430
|
+
};
|
|
431
|
+
return baseConfig;
|
|
432
|
+
}
|
|
433
|
+
function createLlmCallDetails(modelName, modelType, prompt, systemInstruction, temperature, maxTokens) {
|
|
434
|
+
return {
|
|
435
|
+
model: modelName,
|
|
436
|
+
systemPrompt: systemInstruction ?? "",
|
|
437
|
+
userPrompt: prompt,
|
|
438
|
+
temperature,
|
|
439
|
+
maxTokens,
|
|
440
|
+
purpose: "external_llm",
|
|
441
|
+
actionType: `google-genai.${modelType}.generateContent`
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
async function generateContentWithTrajectory(runtime, genAI, modelName, modelType, prompt, systemInstruction, temperature, maxTokens, request) {
|
|
445
|
+
const details = createLlmCallDetails(modelName, modelType, prompt, systemInstruction, temperature, maxTokens);
|
|
446
|
+
const response = await recordLlmCall2(runtime, details, async () => {
|
|
447
|
+
const result = await genAI.models.generateContent(request);
|
|
448
|
+
const text2 = result.text || "";
|
|
449
|
+
details.response = text2;
|
|
450
|
+
details.promptTokens = await countTokens(prompt);
|
|
451
|
+
details.completionTokens = await countTokens(text2);
|
|
452
|
+
return result;
|
|
453
|
+
});
|
|
454
|
+
const text = response.text || "";
|
|
455
|
+
const promptTokens = details.promptTokens ?? await countTokens(prompt);
|
|
456
|
+
const completionTokens = details.completionTokens ?? await countTokens(text);
|
|
457
|
+
emitModelUsageEvent(runtime, modelType, prompt, {
|
|
458
|
+
promptTokens,
|
|
459
|
+
completionTokens,
|
|
460
|
+
totalTokens: promptTokens + completionTokens
|
|
461
|
+
});
|
|
462
|
+
return text;
|
|
463
|
+
}
|
|
464
|
+
async function handleTextSmall(runtime, params) {
|
|
465
|
+
const {
|
|
466
|
+
stopSequences = [],
|
|
467
|
+
maxTokens = 8192,
|
|
468
|
+
temperature = 0.7,
|
|
469
|
+
attachments
|
|
470
|
+
} = params;
|
|
232
471
|
const genAI = createGoogleGenAI(runtime);
|
|
233
472
|
if (!genAI) {
|
|
234
473
|
throw new Error("Google Generative AI client not initialized");
|
|
235
474
|
}
|
|
236
|
-
const modelName =
|
|
237
|
-
|
|
238
|
-
logger5.info(`Using ${modelType} model: ${modelName}`);
|
|
475
|
+
const modelName = getModelNameForType(runtime, TEXT_SMALL_MODEL_TYPE);
|
|
476
|
+
logger5.log(`[TEXT_SMALL] Using model: ${modelName}`);
|
|
239
477
|
try {
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
Please respond with a JSON object that follows this schema:
|
|
245
|
-
${JSON.stringify(params.schema, null, 2)}`;
|
|
246
|
-
}
|
|
247
|
-
const response = await genAI.models.generateContent({
|
|
478
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
479
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
480
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, TEXT_SMALL_MODEL_TYPE, promptText, systemInstruction, temperature, maxTokens, {
|
|
248
481
|
model: modelName,
|
|
249
|
-
contents:
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
topP: 0.95,
|
|
254
|
-
maxOutputTokens: 8192,
|
|
255
|
-
responseMimeType: "application/json",
|
|
256
|
-
safetySettings: getSafetySettings()
|
|
257
|
-
}
|
|
258
|
-
});
|
|
259
|
-
const text = response.text || "";
|
|
260
|
-
const promptTokens = await countTokens(enhancedPrompt);
|
|
261
|
-
const completionTokens = await countTokens(text);
|
|
262
|
-
emitModelUsageEvent(runtime, modelType, params.prompt, {
|
|
263
|
-
promptTokens,
|
|
264
|
-
completionTokens,
|
|
265
|
-
totalTokens: promptTokens + completionTokens
|
|
266
|
-
});
|
|
267
|
-
try {
|
|
268
|
-
return JSON.parse(text);
|
|
269
|
-
} catch {
|
|
270
|
-
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
271
|
-
if (jsonMatch) {
|
|
272
|
-
try {
|
|
273
|
-
return JSON.parse(jsonMatch[0]);
|
|
274
|
-
} catch {
|
|
275
|
-
throw new Error("Failed to parse JSON from response");
|
|
482
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
483
|
+
{
|
|
484
|
+
role: "user",
|
|
485
|
+
parts: buildPromptParts(promptText, attachments)
|
|
276
486
|
}
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
}
|
|
487
|
+
] : promptText,
|
|
488
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
489
|
+
});
|
|
280
490
|
} catch (error) {
|
|
281
|
-
logger5.error(`[
|
|
491
|
+
logger5.error(`[TEXT_SMALL] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
282
492
|
throw error;
|
|
283
493
|
}
|
|
284
494
|
}
|
|
285
|
-
async function
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
import { logger as logger6, ModelType as ModelType2 } from "@elizaos/core";
|
|
293
|
-
async function handleTextSmall(runtime, { prompt, stopSequences = [], maxTokens = 8192, temperature = 0.7 }) {
|
|
495
|
+
async function handleTextLarge(runtime, params) {
|
|
496
|
+
const {
|
|
497
|
+
stopSequences = [],
|
|
498
|
+
maxTokens = 8192,
|
|
499
|
+
temperature = 0.7,
|
|
500
|
+
attachments
|
|
501
|
+
} = params;
|
|
294
502
|
const genAI = createGoogleGenAI(runtime);
|
|
295
503
|
if (!genAI) {
|
|
296
504
|
throw new Error("Google Generative AI client not initialized");
|
|
297
505
|
}
|
|
298
|
-
const modelName =
|
|
299
|
-
|
|
506
|
+
const modelName = getModelNameForType(runtime, TEXT_LARGE_MODEL_TYPE);
|
|
507
|
+
logger5.log(`[TEXT_LARGE] Using model: ${modelName}`);
|
|
300
508
|
try {
|
|
301
|
-
const systemInstruction = runtime
|
|
302
|
-
const
|
|
509
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
510
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
511
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, TEXT_LARGE_MODEL_TYPE, promptText, systemInstruction, temperature, maxTokens, {
|
|
303
512
|
model: modelName,
|
|
304
|
-
contents:
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
safetySettings: getSafetySettings(),
|
|
312
|
-
...systemInstruction && { systemInstruction }
|
|
313
|
-
}
|
|
314
|
-
});
|
|
315
|
-
const text = response.text || "";
|
|
316
|
-
const promptTokens = await countTokens(prompt);
|
|
317
|
-
const completionTokens = await countTokens(text);
|
|
318
|
-
emitModelUsageEvent(runtime, ModelType2.TEXT_SMALL, prompt, {
|
|
319
|
-
promptTokens,
|
|
320
|
-
completionTokens,
|
|
321
|
-
totalTokens: promptTokens + completionTokens
|
|
513
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
514
|
+
{
|
|
515
|
+
role: "user",
|
|
516
|
+
parts: buildPromptParts(promptText, attachments)
|
|
517
|
+
}
|
|
518
|
+
] : promptText,
|
|
519
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
322
520
|
});
|
|
323
|
-
return text;
|
|
324
521
|
} catch (error) {
|
|
325
|
-
|
|
522
|
+
logger5.error(`[TEXT_LARGE] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
326
523
|
throw error;
|
|
327
524
|
}
|
|
328
525
|
}
|
|
329
|
-
async function
|
|
526
|
+
async function handleTextNano(runtime, params) {
|
|
527
|
+
return handleTextWithType(runtime, TEXT_NANO_MODEL_TYPE, params);
|
|
528
|
+
}
|
|
529
|
+
async function handleTextMedium(runtime, params) {
|
|
530
|
+
return handleTextWithType(runtime, TEXT_MEDIUM_MODEL_TYPE, params);
|
|
531
|
+
}
|
|
532
|
+
async function handleTextMega(runtime, params) {
|
|
533
|
+
return handleTextWithType(runtime, TEXT_MEGA_MODEL_TYPE, params);
|
|
534
|
+
}
|
|
535
|
+
async function handleResponseHandler(runtime, params) {
|
|
536
|
+
return handleTextWithType(runtime, RESPONSE_HANDLER_MODEL_TYPE, params);
|
|
537
|
+
}
|
|
538
|
+
async function handleActionPlanner(runtime, params) {
|
|
539
|
+
return handleTextWithType(runtime, ACTION_PLANNER_MODEL_TYPE, params);
|
|
540
|
+
}
|
|
541
|
+
async function handleTextWithType(runtime, modelType, params) {
|
|
542
|
+
const {
|
|
543
|
+
stopSequences = [],
|
|
544
|
+
maxTokens = 8192,
|
|
545
|
+
temperature = 0.7,
|
|
546
|
+
attachments
|
|
547
|
+
} = params;
|
|
330
548
|
const genAI = createGoogleGenAI(runtime);
|
|
331
549
|
if (!genAI) {
|
|
332
550
|
throw new Error("Google Generative AI client not initialized");
|
|
333
551
|
}
|
|
334
|
-
const modelName =
|
|
335
|
-
|
|
552
|
+
const modelName = getModelNameForType(runtime, modelType);
|
|
553
|
+
logger5.log(`[${modelType}] Using model: ${modelName}`);
|
|
336
554
|
try {
|
|
337
|
-
const systemInstruction = runtime
|
|
338
|
-
const
|
|
555
|
+
const systemInstruction = resolveGoogleSystemInstruction(runtime, params);
|
|
556
|
+
const promptText = resolveGooglePrompt(params, systemInstruction);
|
|
557
|
+
return await generateContentWithTrajectory(runtime, genAI, modelName, modelType, promptText, systemInstruction, temperature, maxTokens, {
|
|
339
558
|
model: modelName,
|
|
340
|
-
contents:
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
safetySettings: getSafetySettings(),
|
|
348
|
-
...systemInstruction && { systemInstruction }
|
|
349
|
-
}
|
|
350
|
-
});
|
|
351
|
-
const text = response.text || "";
|
|
352
|
-
const promptTokens = await countTokens(prompt);
|
|
353
|
-
const completionTokens = await countTokens(text);
|
|
354
|
-
emitModelUsageEvent(runtime, ModelType2.TEXT_LARGE, prompt, {
|
|
355
|
-
promptTokens,
|
|
356
|
-
completionTokens,
|
|
357
|
-
totalTokens: promptTokens + completionTokens
|
|
559
|
+
contents: (attachments?.length ?? 0) > 0 ? [
|
|
560
|
+
{
|
|
561
|
+
role: "user",
|
|
562
|
+
parts: buildPromptParts(promptText, attachments)
|
|
563
|
+
}
|
|
564
|
+
] : promptText,
|
|
565
|
+
config: buildGoogleGenerationConfig(params, systemInstruction, temperature, maxTokens, stopSequences)
|
|
358
566
|
});
|
|
359
|
-
return text;
|
|
360
567
|
} catch (error) {
|
|
361
|
-
|
|
568
|
+
logger5.error(`[${modelType}] Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
362
569
|
throw error;
|
|
363
570
|
}
|
|
364
571
|
}
|
|
365
572
|
// index.ts
|
|
573
|
+
var CORE_MODEL_TYPES2 = ElizaCore3.ModelType ?? {};
|
|
574
|
+
var TEXT_NANO_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_NANO ?? "TEXT_NANO";
|
|
575
|
+
var TEXT_MEDIUM_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
576
|
+
var TEXT_SMALL_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_SMALL ?? "TEXT_SMALL";
|
|
577
|
+
var TEXT_LARGE_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_LARGE ?? "TEXT_LARGE";
|
|
578
|
+
var TEXT_EMBEDDING_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_EMBEDDING ?? "TEXT_EMBEDDING";
|
|
579
|
+
var IMAGE_DESCRIPTION_MODEL_TYPE = CORE_MODEL_TYPES2.IMAGE_DESCRIPTION ?? "IMAGE_DESCRIPTION";
|
|
580
|
+
var TEXT_MEGA_MODEL_TYPE2 = CORE_MODEL_TYPES2.TEXT_MEGA ?? "TEXT_MEGA";
|
|
581
|
+
var RESPONSE_HANDLER_MODEL_TYPE2 = CORE_MODEL_TYPES2.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
582
|
+
var ACTION_PLANNER_MODEL_TYPE2 = CORE_MODEL_TYPES2.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
366
583
|
var pluginTests = [
|
|
367
584
|
{
|
|
368
585
|
name: "google_genai_plugin_tests",
|
|
@@ -380,22 +597,22 @@ var pluginTests = [
|
|
|
380
597
|
for await (const model of modelList) {
|
|
381
598
|
models.push(model);
|
|
382
599
|
}
|
|
383
|
-
|
|
600
|
+
logger6.log(`Available models: ${models.length}`);
|
|
384
601
|
}
|
|
385
602
|
},
|
|
386
603
|
{
|
|
387
604
|
name: "google_test_text_embedding",
|
|
388
605
|
fn: async (runtime) => {
|
|
389
606
|
try {
|
|
390
|
-
const embedding = await runtime.useModel(
|
|
607
|
+
const embedding = await runtime.useModel(ModelType4.TEXT_EMBEDDING, {
|
|
391
608
|
text: "Hello, world!"
|
|
392
609
|
});
|
|
393
|
-
|
|
610
|
+
logger6.log(`Embedding dimension: ${embedding.length}`);
|
|
394
611
|
if (embedding.length === 0) {
|
|
395
612
|
throw new Error("Failed to generate embedding");
|
|
396
613
|
}
|
|
397
614
|
} catch (error) {
|
|
398
|
-
|
|
615
|
+
logger6.error(`Error in test_text_embedding: ${error instanceof Error ? error.message : String(error)}`);
|
|
399
616
|
throw error;
|
|
400
617
|
}
|
|
401
618
|
}
|
|
@@ -404,15 +621,15 @@ var pluginTests = [
|
|
|
404
621
|
name: "google_test_text_small",
|
|
405
622
|
fn: async (runtime) => {
|
|
406
623
|
try {
|
|
407
|
-
const text = await runtime.useModel(
|
|
624
|
+
const text = await runtime.useModel(ModelType4.TEXT_SMALL, {
|
|
408
625
|
prompt: "What is the nature of reality in 10 words?"
|
|
409
626
|
});
|
|
410
627
|
if (text.length === 0) {
|
|
411
628
|
throw new Error("Failed to generate text");
|
|
412
629
|
}
|
|
413
|
-
|
|
630
|
+
logger6.log("Generated with TEXT_SMALL:", text);
|
|
414
631
|
} catch (error) {
|
|
415
|
-
|
|
632
|
+
logger6.error(`Error in test_text_small: ${error instanceof Error ? error.message : String(error)}`);
|
|
416
633
|
throw error;
|
|
417
634
|
}
|
|
418
635
|
}
|
|
@@ -421,15 +638,15 @@ var pluginTests = [
|
|
|
421
638
|
name: "google_test_text_large",
|
|
422
639
|
fn: async (runtime) => {
|
|
423
640
|
try {
|
|
424
|
-
const text = await runtime.useModel(
|
|
641
|
+
const text = await runtime.useModel(ModelType4.TEXT_LARGE, {
|
|
425
642
|
prompt: "Explain quantum mechanics in simple terms."
|
|
426
643
|
});
|
|
427
644
|
if (text.length === 0) {
|
|
428
645
|
throw new Error("Failed to generate text");
|
|
429
646
|
}
|
|
430
|
-
|
|
647
|
+
logger6.log("Generated with TEXT_LARGE:", `${text.substring(0, 100)}...`);
|
|
431
648
|
} catch (error) {
|
|
432
|
-
|
|
649
|
+
logger6.error(`Error in test_text_large: ${error instanceof Error ? error.message : String(error)}`);
|
|
433
650
|
throw error;
|
|
434
651
|
}
|
|
435
652
|
}
|
|
@@ -438,20 +655,20 @@ var pluginTests = [
|
|
|
438
655
|
name: "google_test_image_description",
|
|
439
656
|
fn: async (runtime) => {
|
|
440
657
|
try {
|
|
441
|
-
const result = await runtime.useModel(
|
|
442
|
-
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
443
|
-
|
|
658
|
+
const result = await runtime.useModel(ModelType4.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
659
|
+
if (result != null && typeof result === "object" && "title" in result && "description" in result) {
|
|
660
|
+
logger6.log("Image description:", JSON.stringify(result));
|
|
444
661
|
} else {
|
|
445
|
-
|
|
662
|
+
logger6.error(`Invalid image description result format: ${JSON.stringify(result)}`);
|
|
446
663
|
}
|
|
447
664
|
} catch (error) {
|
|
448
|
-
|
|
665
|
+
logger6.error(`Error in test_image_description: ${error instanceof Error ? error.message : String(error)}`);
|
|
449
666
|
throw error;
|
|
450
667
|
}
|
|
451
668
|
}
|
|
452
669
|
},
|
|
453
670
|
{
|
|
454
|
-
name: "
|
|
671
|
+
name: "google_test_structured_output_via_text_large",
|
|
455
672
|
fn: async (runtime) => {
|
|
456
673
|
try {
|
|
457
674
|
const schema = {
|
|
@@ -463,16 +680,16 @@ var pluginTests = [
|
|
|
463
680
|
},
|
|
464
681
|
required: ["name", "age", "hobbies"]
|
|
465
682
|
};
|
|
466
|
-
const result = await runtime.useModel(
|
|
683
|
+
const result = await runtime.useModel(ModelType4.TEXT_LARGE, {
|
|
467
684
|
prompt: "Generate a person profile with name, age, and hobbies.",
|
|
468
|
-
schema
|
|
685
|
+
responseSchema: schema
|
|
469
686
|
});
|
|
470
|
-
|
|
471
|
-
if (!result
|
|
472
|
-
throw new Error("Generated
|
|
687
|
+
logger6.log("Generated structured output:", JSON.stringify(result));
|
|
688
|
+
if (!result) {
|
|
689
|
+
throw new Error("Generated structured output is empty");
|
|
473
690
|
}
|
|
474
691
|
} catch (error) {
|
|
475
|
-
|
|
692
|
+
logger6.error(`Error in test_structured_output_via_text_large: ${error instanceof Error ? error.message : String(error)}`);
|
|
476
693
|
throw error;
|
|
477
694
|
}
|
|
478
695
|
}
|
|
@@ -490,44 +707,74 @@ var env = getProcessEnv();
|
|
|
490
707
|
var googleGenAIPlugin = {
|
|
491
708
|
name: "google-genai",
|
|
492
709
|
description: "Google Generative AI plugin for Gemini models",
|
|
710
|
+
autoEnable: {
|
|
711
|
+
envKeys: ["GOOGLE_API_KEY", "GOOGLE_GENERATIVE_AI_API_KEY"]
|
|
712
|
+
},
|
|
493
713
|
config: {
|
|
494
714
|
GOOGLE_GENERATIVE_AI_API_KEY: env.GOOGLE_GENERATIVE_AI_API_KEY ?? null,
|
|
715
|
+
GOOGLE_NANO_MODEL: env.GOOGLE_NANO_MODEL ?? null,
|
|
716
|
+
GOOGLE_MEDIUM_MODEL: env.GOOGLE_MEDIUM_MODEL ?? null,
|
|
495
717
|
GOOGLE_SMALL_MODEL: env.GOOGLE_SMALL_MODEL ?? null,
|
|
496
718
|
GOOGLE_LARGE_MODEL: env.GOOGLE_LARGE_MODEL ?? null,
|
|
719
|
+
GOOGLE_MEGA_MODEL: env.GOOGLE_MEGA_MODEL ?? null,
|
|
720
|
+
GOOGLE_RESPONSE_HANDLER_MODEL: env.GOOGLE_RESPONSE_HANDLER_MODEL ?? null,
|
|
721
|
+
GOOGLE_SHOULD_RESPOND_MODEL: env.GOOGLE_SHOULD_RESPOND_MODEL ?? null,
|
|
722
|
+
GOOGLE_ACTION_PLANNER_MODEL: env.GOOGLE_ACTION_PLANNER_MODEL ?? null,
|
|
723
|
+
GOOGLE_PLANNER_MODEL: env.GOOGLE_PLANNER_MODEL ?? null,
|
|
497
724
|
GOOGLE_IMAGE_MODEL: env.GOOGLE_IMAGE_MODEL ?? null,
|
|
498
725
|
GOOGLE_EMBEDDING_MODEL: env.GOOGLE_EMBEDDING_MODEL ?? null,
|
|
726
|
+
NANO_MODEL: env.NANO_MODEL ?? null,
|
|
727
|
+
MEDIUM_MODEL: env.MEDIUM_MODEL ?? null,
|
|
499
728
|
SMALL_MODEL: env.SMALL_MODEL ?? null,
|
|
500
729
|
LARGE_MODEL: env.LARGE_MODEL ?? null,
|
|
730
|
+
MEGA_MODEL: env.MEGA_MODEL ?? null,
|
|
731
|
+
RESPONSE_HANDLER_MODEL: env.RESPONSE_HANDLER_MODEL ?? null,
|
|
732
|
+
SHOULD_RESPOND_MODEL: env.SHOULD_RESPOND_MODEL ?? null,
|
|
733
|
+
ACTION_PLANNER_MODEL: env.ACTION_PLANNER_MODEL ?? null,
|
|
734
|
+
PLANNER_MODEL: env.PLANNER_MODEL ?? null,
|
|
501
735
|
IMAGE_MODEL: env.IMAGE_MODEL ?? null
|
|
502
736
|
},
|
|
503
737
|
async init(config, runtime) {
|
|
504
738
|
initializeGoogleGenAI(config, runtime);
|
|
505
739
|
},
|
|
506
740
|
models: {
|
|
507
|
-
[
|
|
741
|
+
[TEXT_NANO_MODEL_TYPE2]: async (runtime, params) => {
|
|
742
|
+
return handleTextNano(runtime, params);
|
|
743
|
+
},
|
|
744
|
+
[TEXT_MEDIUM_MODEL_TYPE2]: async (runtime, params) => {
|
|
745
|
+
return handleTextMedium(runtime, params);
|
|
746
|
+
},
|
|
747
|
+
[TEXT_SMALL_MODEL_TYPE2]: async (runtime, params) => {
|
|
508
748
|
return handleTextSmall(runtime, params);
|
|
509
749
|
},
|
|
510
|
-
[
|
|
750
|
+
[TEXT_LARGE_MODEL_TYPE2]: async (runtime, params) => {
|
|
511
751
|
return handleTextLarge(runtime, params);
|
|
512
752
|
},
|
|
513
|
-
[
|
|
514
|
-
return
|
|
753
|
+
[TEXT_MEGA_MODEL_TYPE2]: async (runtime, params) => {
|
|
754
|
+
return handleTextMega(runtime, params);
|
|
515
755
|
},
|
|
516
|
-
[
|
|
517
|
-
return
|
|
756
|
+
[RESPONSE_HANDLER_MODEL_TYPE2]: async (runtime, params) => {
|
|
757
|
+
return handleResponseHandler(runtime, params);
|
|
758
|
+
},
|
|
759
|
+
[ACTION_PLANNER_MODEL_TYPE2]: async (runtime, params) => {
|
|
760
|
+
return handleActionPlanner(runtime, params);
|
|
518
761
|
},
|
|
519
|
-
[
|
|
520
|
-
return
|
|
762
|
+
[TEXT_EMBEDDING_MODEL_TYPE2]: async (runtime, params) => {
|
|
763
|
+
return handleTextEmbedding(runtime, params);
|
|
521
764
|
},
|
|
522
|
-
[
|
|
523
|
-
return
|
|
765
|
+
[IMAGE_DESCRIPTION_MODEL_TYPE]: async (runtime, params) => {
|
|
766
|
+
return handleImageDescription(runtime, params);
|
|
524
767
|
}
|
|
525
768
|
},
|
|
526
769
|
tests: pluginTests
|
|
527
770
|
};
|
|
771
|
+
var plugin_google_genai_default = googleGenAIPlugin;
|
|
772
|
+
|
|
773
|
+
// index.node.ts
|
|
774
|
+
var index_node_default = plugin_google_genai_default;
|
|
528
775
|
export {
|
|
529
776
|
googleGenAIPlugin,
|
|
530
|
-
|
|
777
|
+
index_node_default as default
|
|
531
778
|
};
|
|
532
779
|
|
|
533
|
-
//# debugId=
|
|
780
|
+
//# debugId=20AEB5819D569BEA64756E2164756E21
|