@elizaos/plugin-google-genai 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Shaw Walters and elizaOS Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,65 @@
1
+ # @elizaos/plugin-google-genai
2
+
3
+ Google Generative AI (Gemini) integration for ElizaOS.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @elizaos/plugin-google-genai
9
+ ```
10
+
11
+ ## Configuration
12
+
13
+ 1. Get your Google AI API key from [Google AI Studio](https://aistudio.google.com/)
14
+ 2. Set the API key in your environment:
15
+
16
+ ```bash
17
+ GOOGLE_GENERATIVE_AI_API_KEY=your_api_key_here
18
+ ```
19
+
20
+ ## Usage
21
+
22
+ Add the plugin to your character configuration:
23
+
24
+ ```json
25
+ {
26
+ "plugins": ["@elizaos/plugin-google-genai"]
27
+ }
28
+ ```
29
+
30
+ ## Supported Models
31
+
32
+ - **Text Generation**:
33
+ - Small: `gemini-2.0-flash-001` (default)
34
+ - Large: `gemini-2.0-flash-001` (default)
35
+ - **Text Embeddings**: `text-embedding-004` (default)
36
+ - **Image Analysis**: `gemini-2.0-flash-001` (default)
37
+
38
+ ## Environment Variables
39
+
40
+ - `GOOGLE_GENERATIVE_AI_API_KEY` (required): Your Google AI API key
41
+ - `GOOGLE_SMALL_MODEL` (optional): Override small model
42
+ - `GOOGLE_LARGE_MODEL` (optional): Override large model
43
+ - `GOOGLE_IMAGE_MODEL` (optional): Override image model
44
+ - `GOOGLE_EMBEDDING_MODEL` (optional): Override embedding model
45
+ - `SMALL_MODEL` (optional): Fallback for small model
46
+ - `LARGE_MODEL` (optional): Fallback for large model
47
+ - `IMAGE_MODEL` (optional): Fallback for image model
48
+
49
+ ## Model Types Provided
50
+
51
+ - `TEXT_SMALL` - Fast text generation using Gemini Flash
52
+ - `TEXT_LARGE` - Complex text generation using Gemini
53
+ - `TEXT_EMBEDDING` - Text embeddings for similarity search
54
+ - `OBJECT_SMALL` - JSON object generation (small model)
55
+ - `OBJECT_LARGE` - Complex JSON object generation (large model)
56
+ - `IMAGE_DESCRIPTION` - Image analysis and description
57
+
58
+ ## Features
59
+
60
+ - Direct integration with Google's Gemini models
61
+ - Support for text generation, embeddings, and image analysis
62
+ - Configurable safety settings
63
+ - Token usage tracking and event emission
64
+ - Automatic JSON parsing for object generation
65
+ - System instruction support from character configuration
@@ -0,0 +1,9 @@
1
+ import { Plugin } from '@elizaos/core';
2
+
3
+ /**
4
+ * Defines the Google Generative AI plugin with its name, description, and configuration options.
5
+ * @type {Plugin}
6
+ */
7
+ declare const googleGenAIPlugin: Plugin;
8
+
9
+ export { googleGenAIPlugin as default, googleGenAIPlugin };
package/dist/index.js ADDED
@@ -0,0 +1,518 @@
1
+ // src/index.ts
2
+ import { GoogleGenAI, HarmCategory, HarmBlockThreshold } from "@google/genai";
3
+ import { EventType, logger, ModelType } from "@elizaos/core";
4
+ import { fetch } from "undici";
5
+ function getSetting(runtime, key, defaultValue) {
6
+ return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
7
+ }
8
+ function getApiKey(runtime) {
9
+ return getSetting(runtime, "GOOGLE_GENERATIVE_AI_API_KEY");
10
+ }
11
+ function getSmallModel(runtime) {
12
+ return getSetting(runtime, "GOOGLE_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gemini-2.0-flash-001") ?? "gemini-2.0-flash-001";
13
+ }
14
+ function getLargeModel(runtime) {
15
+ return getSetting(runtime, "GOOGLE_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gemini-2.0-flash-001") ?? "gemini-2.0-flash-001";
16
+ }
17
+ function getImageModel(runtime) {
18
+ return getSetting(runtime, "GOOGLE_IMAGE_MODEL") ?? getSetting(runtime, "IMAGE_MODEL", "gemini-2.0-flash-001") ?? "gemini-2.0-flash-001";
19
+ }
20
+ function getEmbeddingModel(runtime) {
21
+ return getSetting(runtime, "GOOGLE_EMBEDDING_MODEL", "text-embedding-004") ?? "text-embedding-004";
22
+ }
23
+ function createGoogleGenAI(runtime) {
24
+ const apiKey = getApiKey(runtime);
25
+ if (!apiKey) {
26
+ logger.error("Google Generative AI API Key is missing");
27
+ return null;
28
+ }
29
+ return new GoogleGenAI({ apiKey });
30
+ }
31
+ function getSafetySettings() {
32
+ return [
33
+ {
34
+ category: HarmCategory.HARM_CATEGORY_HARASSMENT,
35
+ threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
36
+ },
37
+ {
38
+ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
39
+ threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
40
+ },
41
+ {
42
+ category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
43
+ threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
44
+ },
45
+ {
46
+ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
47
+ threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
48
+ }
49
+ ];
50
+ }
51
+ function emitModelUsageEvent(runtime, type, prompt, usage) {
52
+ runtime.emitEvent(EventType.MODEL_USED, {
53
+ provider: "google",
54
+ type,
55
+ prompt,
56
+ tokens: {
57
+ prompt: usage.promptTokens,
58
+ completion: usage.completionTokens,
59
+ total: usage.totalTokens
60
+ }
61
+ });
62
+ }
63
+ async function countTokens(text) {
64
+ return Math.ceil(text.length / 4);
65
+ }
66
+ async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
67
+ const genAI = createGoogleGenAI(runtime);
68
+ if (!genAI) {
69
+ throw new Error("Google Generative AI client not initialized");
70
+ }
71
+ const modelName = getModelFn(runtime);
72
+ const temperature = params.temperature ?? 0.1;
73
+ logger.info(`Using ${modelType} model: ${modelName}`);
74
+ try {
75
+ let enhancedPrompt = params.prompt;
76
+ if (params.schema) {
77
+ enhancedPrompt += `
78
+
79
+ Please respond with a JSON object that follows this schema:
80
+ ${JSON.stringify(params.schema, null, 2)}`;
81
+ }
82
+ const response = await genAI.models.generateContent({
83
+ model: modelName,
84
+ contents: enhancedPrompt,
85
+ config: {
86
+ temperature,
87
+ topK: 40,
88
+ topP: 0.95,
89
+ maxOutputTokens: 8192,
90
+ responseMimeType: "application/json",
91
+ safetySettings: getSafetySettings()
92
+ }
93
+ });
94
+ const text = response.text || "";
95
+ const promptTokens = await countTokens(enhancedPrompt);
96
+ const completionTokens = await countTokens(text);
97
+ emitModelUsageEvent(runtime, modelType, params.prompt, {
98
+ promptTokens,
99
+ completionTokens,
100
+ totalTokens: promptTokens + completionTokens
101
+ });
102
+ try {
103
+ return JSON.parse(text);
104
+ } catch (parseError) {
105
+ logger.error("Failed to parse JSON response:", parseError);
106
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
107
+ if (jsonMatch) {
108
+ try {
109
+ return JSON.parse(jsonMatch[0]);
110
+ } catch (secondParseError) {
111
+ throw new Error("Failed to parse JSON from response");
112
+ }
113
+ }
114
+ throw parseError;
115
+ }
116
+ } catch (error) {
117
+ const message = error instanceof Error ? error.message : String(error);
118
+ logger.error(`[generateObject] Error: ${message}`);
119
+ throw error;
120
+ }
121
+ }
122
+ var googleGenAIPlugin = {
123
+ name: "google-genai",
124
+ description: "Google Generative AI plugin for Gemini models",
125
+ config: {
126
+ GOOGLE_GENERATIVE_AI_API_KEY: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
127
+ GOOGLE_SMALL_MODEL: process.env.GOOGLE_SMALL_MODEL,
128
+ GOOGLE_LARGE_MODEL: process.env.GOOGLE_LARGE_MODEL,
129
+ GOOGLE_IMAGE_MODEL: process.env.GOOGLE_IMAGE_MODEL,
130
+ GOOGLE_EMBEDDING_MODEL: process.env.GOOGLE_EMBEDDING_MODEL,
131
+ SMALL_MODEL: process.env.SMALL_MODEL,
132
+ LARGE_MODEL: process.env.LARGE_MODEL,
133
+ IMAGE_MODEL: process.env.IMAGE_MODEL
134
+ },
135
+ async init(_config, runtime) {
136
+ try {
137
+ const apiKey = getApiKey(runtime);
138
+ if (!apiKey) {
139
+ logger.warn(
140
+ "GOOGLE_GENERATIVE_AI_API_KEY is not set in environment - Google AI functionality will be limited"
141
+ );
142
+ return;
143
+ }
144
+ try {
145
+ const genAI = new GoogleGenAI({ apiKey });
146
+ const modelList = await genAI.models.list();
147
+ const models = [];
148
+ for await (const model of modelList) {
149
+ models.push(model);
150
+ }
151
+ logger.log(`Google AI API key validated successfully. Available models: ${models.length}`);
152
+ } catch (fetchError) {
153
+ const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
154
+ logger.warn(`Error validating Google AI API key: ${message}`);
155
+ }
156
+ } catch (error) {
157
+ const message = error instanceof Error ? error.message : String(error);
158
+ logger.warn(
159
+ `Google AI plugin configuration issue: ${message} - You need to configure the GOOGLE_GENERATIVE_AI_API_KEY in your environment variables`
160
+ );
161
+ }
162
+ },
163
+ models: {
164
+ [ModelType.TEXT_SMALL]: async (runtime, { prompt, stopSequences = [] }) => {
165
+ const genAI = createGoogleGenAI(runtime);
166
+ if (!genAI) {
167
+ throw new Error("Google Generative AI client not initialized");
168
+ }
169
+ const modelName = getSmallModel(runtime);
170
+ const temperature = 0.7;
171
+ const maxOutputTokens = 8192;
172
+ logger.log(`[TEXT_SMALL] Using model: ${modelName}`);
173
+ logger.debug(`[TEXT_SMALL] Prompt: ${prompt}`);
174
+ try {
175
+ const systemInstruction = runtime.character.system || void 0;
176
+ const response = await genAI.models.generateContent({
177
+ model: modelName,
178
+ contents: prompt,
179
+ config: {
180
+ temperature,
181
+ topK: 40,
182
+ topP: 0.95,
183
+ maxOutputTokens,
184
+ stopSequences,
185
+ safetySettings: getSafetySettings(),
186
+ ...systemInstruction && { systemInstruction }
187
+ }
188
+ });
189
+ const text = response.text || "";
190
+ const promptTokens = await countTokens(prompt);
191
+ const completionTokens = await countTokens(text);
192
+ emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, {
193
+ promptTokens,
194
+ completionTokens,
195
+ totalTokens: promptTokens + completionTokens
196
+ });
197
+ return text;
198
+ } catch (error) {
199
+ const message = error instanceof Error ? error.message : String(error);
200
+ logger.error(`[TEXT_SMALL] Error: ${message}`);
201
+ throw error;
202
+ }
203
+ },
204
+ [ModelType.TEXT_LARGE]: async (runtime, {
205
+ prompt,
206
+ stopSequences = [],
207
+ maxTokens = 8192,
208
+ temperature = 0.7,
209
+ frequencyPenalty = 0.7,
210
+ presencePenalty = 0.7
211
+ }) => {
212
+ const genAI = createGoogleGenAI(runtime);
213
+ if (!genAI) {
214
+ throw new Error("Google Generative AI client not initialized");
215
+ }
216
+ const modelName = getLargeModel(runtime);
217
+ logger.log(`[TEXT_LARGE] Using model: ${modelName}`);
218
+ logger.debug(`[TEXT_LARGE] Prompt: ${prompt}`);
219
+ try {
220
+ const systemInstruction = runtime.character.system || void 0;
221
+ const response = await genAI.models.generateContent({
222
+ model: modelName,
223
+ contents: prompt,
224
+ config: {
225
+ temperature,
226
+ topK: 40,
227
+ topP: 0.95,
228
+ maxOutputTokens: maxTokens,
229
+ stopSequences,
230
+ safetySettings: getSafetySettings(),
231
+ ...systemInstruction && { systemInstruction }
232
+ }
233
+ });
234
+ const text = response.text || "";
235
+ const promptTokens = await countTokens(prompt);
236
+ const completionTokens = await countTokens(text);
237
+ emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, {
238
+ promptTokens,
239
+ completionTokens,
240
+ totalTokens: promptTokens + completionTokens
241
+ });
242
+ return text;
243
+ } catch (error) {
244
+ const message = error instanceof Error ? error.message : String(error);
245
+ logger.error(`[TEXT_LARGE] Error: ${message}`);
246
+ throw error;
247
+ }
248
+ },
249
+ [ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
250
+ const genAI = createGoogleGenAI(runtime);
251
+ if (!genAI) {
252
+ throw new Error("Google Generative AI client not initialized");
253
+ }
254
+ const embeddingModelName = getEmbeddingModel(runtime);
255
+ logger.debug(`[TEXT_EMBEDDING] Using model: ${embeddingModelName}`);
256
+ if (params === null) {
257
+ logger.debug("Creating test embedding for initialization");
258
+ const dimension = 768;
259
+ const testVector = Array(dimension).fill(0);
260
+ testVector[0] = 0.1;
261
+ return testVector;
262
+ }
263
+ let text;
264
+ if (typeof params === "string") {
265
+ text = params;
266
+ } else if (typeof params === "object" && params.text) {
267
+ text = params.text;
268
+ } else {
269
+ logger.warn("Invalid input format for embedding");
270
+ const dimension = 768;
271
+ const fallbackVector = Array(dimension).fill(0);
272
+ fallbackVector[0] = 0.2;
273
+ return fallbackVector;
274
+ }
275
+ if (!text.trim()) {
276
+ logger.warn("Empty text for embedding");
277
+ const dimension = 768;
278
+ const emptyVector = Array(dimension).fill(0);
279
+ emptyVector[0] = 0.3;
280
+ return emptyVector;
281
+ }
282
+ try {
283
+ const response = await genAI.models.embedContent({
284
+ model: embeddingModelName,
285
+ contents: text
286
+ });
287
+ const embedding = response.embeddings?.[0]?.values || [];
288
+ const promptTokens = await countTokens(text);
289
+ emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, {
290
+ promptTokens,
291
+ completionTokens: 0,
292
+ totalTokens: promptTokens
293
+ });
294
+ logger.log(`Got embedding with length ${embedding.length}`);
295
+ return embedding;
296
+ } catch (error) {
297
+ const message = error instanceof Error ? error.message : String(error);
298
+ logger.error(`Error generating embedding: ${message}`);
299
+ const dimension = 768;
300
+ const errorVector = Array(dimension).fill(0);
301
+ errorVector[0] = 0.6;
302
+ return errorVector;
303
+ }
304
+ },
305
+ [ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
306
+ const genAI = createGoogleGenAI(runtime);
307
+ if (!genAI) {
308
+ throw new Error("Google Generative AI client not initialized");
309
+ }
310
+ let imageUrl;
311
+ let promptText;
312
+ const modelName = getImageModel(runtime);
313
+ logger.log(`[IMAGE_DESCRIPTION] Using model: ${modelName}`);
314
+ if (typeof params === "string") {
315
+ imageUrl = params;
316
+ promptText = "Please analyze this image and provide a title and detailed description.";
317
+ } else {
318
+ imageUrl = params.imageUrl;
319
+ promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
320
+ }
321
+ try {
322
+ const imageResponse = await fetch(imageUrl);
323
+ if (!imageResponse.ok) {
324
+ throw new Error(`Failed to fetch image: ${imageResponse.statusText}`);
325
+ }
326
+ const imageData = await imageResponse.arrayBuffer();
327
+ const base64Image = Buffer.from(imageData).toString("base64");
328
+ const contentType = imageResponse.headers.get("content-type") || "image/jpeg";
329
+ const response = await genAI.models.generateContent({
330
+ model: modelName,
331
+ contents: [
332
+ {
333
+ role: "user",
334
+ parts: [
335
+ { text: promptText },
336
+ {
337
+ inlineData: {
338
+ mimeType: contentType,
339
+ data: base64Image
340
+ }
341
+ }
342
+ ]
343
+ }
344
+ ],
345
+ config: {
346
+ temperature: 0.7,
347
+ topK: 40,
348
+ topP: 0.95,
349
+ maxOutputTokens: 8192,
350
+ safetySettings: getSafetySettings()
351
+ }
352
+ });
353
+ const responseText = response.text || "";
354
+ logger.log("Received response for image description");
355
+ const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
356
+ if (isCustomPrompt) {
357
+ return responseText;
358
+ }
359
+ try {
360
+ const jsonResponse = JSON.parse(responseText);
361
+ if (jsonResponse.title && jsonResponse.description) {
362
+ return jsonResponse;
363
+ }
364
+ } catch (e) {
365
+ logger.debug(`Parsing as JSON failed, processing as text: ${e}`);
366
+ }
367
+ const titleMatch = responseText.match(/title[:\s]+(.+?)(?:\n|$)/i);
368
+ const title = titleMatch?.[1]?.trim() || "Image Analysis";
369
+ const description = responseText.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
370
+ return { title, description };
371
+ } catch (error) {
372
+ const message = error instanceof Error ? error.message : String(error);
373
+ logger.error(`Error analyzing image: ${message}`);
374
+ return {
375
+ title: "Failed to analyze image",
376
+ description: `Error: ${message}`
377
+ };
378
+ }
379
+ },
380
+ [ModelType.OBJECT_SMALL]: async (runtime, params) => {
381
+ return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
382
+ },
383
+ [ModelType.OBJECT_LARGE]: async (runtime, params) => {
384
+ return generateObjectByModelType(runtime, params, ModelType.OBJECT_LARGE, getLargeModel);
385
+ }
386
+ },
387
+ tests: [
388
+ {
389
+ name: "google_genai_plugin_tests",
390
+ tests: [
391
+ {
392
+ name: "google_test_api_key_validation",
393
+ fn: async (runtime) => {
394
+ const apiKey = getApiKey(runtime);
395
+ if (!apiKey) {
396
+ throw new Error("GOOGLE_GENERATIVE_AI_API_KEY not set");
397
+ }
398
+ const genAI = new GoogleGenAI({ apiKey });
399
+ const modelList = await genAI.models.list();
400
+ const models = [];
401
+ for await (const model of modelList) {
402
+ models.push(model);
403
+ }
404
+ logger.log("Available models:", models.length);
405
+ }
406
+ },
407
+ {
408
+ name: "google_test_text_embedding",
409
+ fn: async (runtime) => {
410
+ try {
411
+ const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
412
+ text: "Hello, world!"
413
+ });
414
+ logger.log("Embedding dimension:", embedding.length);
415
+ if (embedding.length === 0) {
416
+ throw new Error("Failed to generate embedding");
417
+ }
418
+ } catch (error) {
419
+ const message = error instanceof Error ? error.message : String(error);
420
+ logger.error(`Error in test_text_embedding: ${message}`);
421
+ throw error;
422
+ }
423
+ }
424
+ },
425
+ {
426
+ name: "google_test_text_small",
427
+ fn: async (runtime) => {
428
+ try {
429
+ const text = await runtime.useModel(ModelType.TEXT_SMALL, {
430
+ prompt: "What is the nature of reality in 10 words?"
431
+ });
432
+ if (text.length === 0) {
433
+ throw new Error("Failed to generate text");
434
+ }
435
+ logger.log("Generated with TEXT_SMALL:", text);
436
+ } catch (error) {
437
+ const message = error instanceof Error ? error.message : String(error);
438
+ logger.error(`Error in test_text_small: ${message}`);
439
+ throw error;
440
+ }
441
+ }
442
+ },
443
+ {
444
+ name: "google_test_text_large",
445
+ fn: async (runtime) => {
446
+ try {
447
+ const text = await runtime.useModel(ModelType.TEXT_LARGE, {
448
+ prompt: "Explain quantum mechanics in simple terms."
449
+ });
450
+ if (text.length === 0) {
451
+ throw new Error("Failed to generate text");
452
+ }
453
+ logger.log("Generated with TEXT_LARGE:", text.substring(0, 100) + "...");
454
+ } catch (error) {
455
+ const message = error instanceof Error ? error.message : String(error);
456
+ logger.error(`Error in test_text_large: ${message}`);
457
+ throw error;
458
+ }
459
+ }
460
+ },
461
+ {
462
+ name: "google_test_image_description",
463
+ fn: async (runtime) => {
464
+ try {
465
+ const result = await runtime.useModel(
466
+ ModelType.IMAGE_DESCRIPTION,
467
+ "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg"
468
+ );
469
+ if (result && typeof result === "object" && "title" in result && "description" in result) {
470
+ logger.log("Image description:", result);
471
+ } else {
472
+ logger.error("Invalid image description result format:", result);
473
+ }
474
+ } catch (error) {
475
+ const message = error instanceof Error ? error.message : String(error);
476
+ logger.error(`Error in test_image_description: ${message}`);
477
+ throw error;
478
+ }
479
+ }
480
+ },
481
+ {
482
+ name: "google_test_object_generation",
483
+ fn: async (runtime) => {
484
+ try {
485
+ const schema = {
486
+ type: "object",
487
+ properties: {
488
+ name: { type: "string" },
489
+ age: { type: "number" },
490
+ hobbies: { type: "array", items: { type: "string" } }
491
+ },
492
+ required: ["name", "age", "hobbies"]
493
+ };
494
+ const result = await runtime.useModel(ModelType.OBJECT_SMALL, {
495
+ prompt: "Generate a person profile with name, age, and hobbies.",
496
+ schema
497
+ });
498
+ logger.log("Generated object:", result);
499
+ if (!result.name || !result.age || !result.hobbies) {
500
+ throw new Error("Generated object missing required fields");
501
+ }
502
+ } catch (error) {
503
+ const message = error instanceof Error ? error.message : String(error);
504
+ logger.error(`Error in test_object_generation: ${message}`);
505
+ throw error;
506
+ }
507
+ }
508
+ }
509
+ ]
510
+ }
511
+ ]
512
+ };
513
+ var index_default = googleGenAIPlugin;
514
+ export {
515
+ index_default as default,
516
+ googleGenAIPlugin
517
+ };
518
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts"],"sourcesContent":["import { GoogleGenAI, HarmCategory, HarmBlockThreshold } from '@google/genai';\nimport type {\n IAgentRuntime,\n ModelTypeName,\n ObjectGenerationParams,\n Plugin,\n GenerateTextParams,\n ImageDescriptionParams,\n TextEmbeddingParams,\n} from '@elizaos/core';\nimport { EventType, logger, ModelType } from '@elizaos/core';\nimport { fetch } from 'undici';\n\n/**\n * Retrieves a configuration setting from the runtime, falling back to environment variables or a default value if not found.\n *\n * @param key - The name of the setting to retrieve.\n * @param defaultValue - The value to return if the setting is not found in the runtime or environment.\n * @returns The resolved setting value, or {@link defaultValue} if not found.\n */\nfunction getSetting(\n runtime: IAgentRuntime,\n key: string,\n defaultValue?: string\n): string | undefined {\n return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;\n}\n\n/**\n * Helper function to get the API key for Google AI\n *\n * @param runtime The runtime context\n * @returns The configured API key\n */\nfunction getApiKey(runtime: IAgentRuntime): string | undefined {\n return getSetting(runtime, 'GOOGLE_GENERATIVE_AI_API_KEY');\n}\n\n/**\n * Helper function to get the small model name with fallbacks\n *\n * @param runtime The runtime context\n * @returns The configured small model name\n */\nfunction getSmallModel(runtime: IAgentRuntime): string {\n return (\n getSetting(runtime, 'GOOGLE_SMALL_MODEL') ??\n getSetting(runtime, 'SMALL_MODEL', 'gemini-2.0-flash-001') ??\n 'gemini-2.0-flash-001'\n );\n}\n\n/**\n * Helper function to get the large model name with fallbacks\n *\n * @param runtime The runtime context\n * @returns The configured large model name\n */\nfunction getLargeModel(runtime: IAgentRuntime): string {\n return (\n getSetting(runtime, 'GOOGLE_LARGE_MODEL') ??\n getSetting(runtime, 'LARGE_MODEL', 'gemini-2.0-flash-001') ??\n 'gemini-2.0-flash-001'\n );\n}\n\n/**\n * Helper function to get the image model name with fallbacks\n *\n * @param runtime The runtime context\n * @returns The configured image model name\n */\nfunction getImageModel(runtime: IAgentRuntime): string {\n return (\n getSetting(runtime, 'GOOGLE_IMAGE_MODEL') ??\n getSetting(runtime, 'IMAGE_MODEL', 'gemini-2.0-flash-001') ??\n 'gemini-2.0-flash-001'\n );\n}\n\n/**\n * Helper function to get the embedding model name with fallbacks\n *\n * @param runtime The runtime context\n * @returns The configured embedding model name\n */\nfunction getEmbeddingModel(runtime: IAgentRuntime): string {\n return (\n getSetting(runtime, 'GOOGLE_EMBEDDING_MODEL', 'text-embedding-004') ?? 'text-embedding-004'\n );\n}\n\n/**\n * Create a Google Generative AI client instance with proper configuration\n *\n * @param runtime The runtime context\n * @returns Configured Google Generative AI instance\n */\nfunction createGoogleGenAI(runtime: IAgentRuntime): GoogleGenAI | null {\n const apiKey = getApiKey(runtime);\n if (!apiKey) {\n logger.error('Google Generative AI API Key is missing');\n return null;\n }\n\n return new GoogleGenAI({ apiKey });\n}\n\n/**\n * Convert safety settings to Google format\n */\nfunction getSafetySettings() {\n return [\n {\n category: HarmCategory.HARM_CATEGORY_HARASSMENT,\n threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,\n },\n {\n category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,\n threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,\n },\n {\n category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,\n threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,\n },\n {\n category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,\n threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,\n },\n ];\n}\n\n/**\n * Emits a model usage event\n * @param runtime The runtime context\n * @param type The model type\n * @param prompt The prompt used\n * @param usage The usage data\n */\nfunction emitModelUsageEvent(\n runtime: IAgentRuntime,\n type: ModelTypeName,\n prompt: string,\n usage: { promptTokens: number; completionTokens: number; totalTokens: number }\n) {\n runtime.emitEvent(EventType.MODEL_USED, {\n provider: 'google',\n type,\n prompt,\n tokens: {\n prompt: usage.promptTokens,\n completion: usage.completionTokens,\n total: usage.totalTokens,\n },\n });\n}\n\n/**\n * Helper function to count tokens for a given text (estimation)\n */\nasync function countTokens(text: string): Promise<number> {\n // Rough estimation: ~1 token per 4 characters\n return Math.ceil(text.length / 4);\n}\n\n/**\n * Helper function to generate objects using specified model type\n */\nasync function generateObjectByModelType(\n runtime: IAgentRuntime,\n params: ObjectGenerationParams,\n modelType: string,\n getModelFn: (runtime: IAgentRuntime) => string\n): Promise<any> {\n const genAI = createGoogleGenAI(runtime);\n if (!genAI) {\n throw new Error('Google Generative AI client not initialized');\n }\n\n const modelName = getModelFn(runtime);\n const temperature = params.temperature ?? 0.1;\n\n logger.info(`Using ${modelType} model: ${modelName}`);\n\n try {\n // Add schema instructions to prompt if provided\n let enhancedPrompt = params.prompt;\n if (params.schema) {\n enhancedPrompt += `\\n\\nPlease respond with a JSON object that follows this schema:\\n${JSON.stringify(params.schema, null, 2)}`;\n }\n\n const response = await genAI.models.generateContent({\n model: modelName,\n contents: enhancedPrompt,\n config: {\n temperature,\n topK: 40,\n topP: 0.95,\n maxOutputTokens: 8192,\n responseMimeType: 'application/json',\n safetySettings: getSafetySettings(),\n },\n });\n\n const text = response.text || '';\n\n // Count tokens for usage tracking\n const promptTokens = await countTokens(enhancedPrompt);\n const completionTokens = await countTokens(text);\n\n emitModelUsageEvent(runtime, modelType as ModelTypeName, params.prompt, {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n });\n\n try {\n return JSON.parse(text);\n } catch (parseError) {\n logger.error('Failed to parse JSON response:', parseError);\n // Try to extract JSON from the response\n const jsonMatch = text.match(/\\{[\\s\\S]*\\}/);\n if (jsonMatch) {\n try {\n return JSON.parse(jsonMatch[0]);\n } catch (secondParseError) {\n throw new Error('Failed to parse JSON from response');\n }\n }\n throw parseError;\n }\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`[generateObject] Error: ${message}`);\n throw error;\n }\n}\n\n/**\n * Defines the Google Generative AI plugin with its name, description, and configuration options.\n * @type {Plugin}\n */\nexport const googleGenAIPlugin: Plugin = {\n name: 'google-genai',\n description: 'Google Generative AI plugin for Gemini models',\n config: {\n GOOGLE_GENERATIVE_AI_API_KEY: process.env.GOOGLE_GENERATIVE_AI_API_KEY,\n GOOGLE_SMALL_MODEL: process.env.GOOGLE_SMALL_MODEL,\n GOOGLE_LARGE_MODEL: process.env.GOOGLE_LARGE_MODEL,\n GOOGLE_IMAGE_MODEL: process.env.GOOGLE_IMAGE_MODEL,\n GOOGLE_EMBEDDING_MODEL: process.env.GOOGLE_EMBEDDING_MODEL,\n SMALL_MODEL: process.env.SMALL_MODEL,\n LARGE_MODEL: process.env.LARGE_MODEL,\n IMAGE_MODEL: process.env.IMAGE_MODEL,\n },\n async init(_config, runtime) {\n try {\n const apiKey = getApiKey(runtime);\n if (!apiKey) {\n logger.warn(\n 'GOOGLE_GENERATIVE_AI_API_KEY is not set in environment - Google AI functionality will be limited'\n );\n return;\n }\n\n // Test the API key by listing models\n try {\n const genAI = new GoogleGenAI({ apiKey });\n const modelList = await genAI.models.list();\n const models = [];\n for await (const model of modelList) {\n models.push(model);\n }\n logger.log(`Google AI API key validated successfully. Available models: ${models.length}`);\n } catch (fetchError: unknown) {\n const message = fetchError instanceof Error ? fetchError.message : String(fetchError);\n logger.warn(`Error validating Google AI API key: ${message}`);\n }\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.warn(\n `Google AI plugin configuration issue: ${message} - You need to configure the GOOGLE_GENERATIVE_AI_API_KEY in your environment variables`\n );\n }\n },\n models: {\n [ModelType.TEXT_SMALL]: async (\n runtime: IAgentRuntime,\n { prompt, stopSequences = [] }: GenerateTextParams\n ) => {\n const genAI = createGoogleGenAI(runtime);\n if (!genAI) {\n throw new Error('Google Generative AI client not initialized');\n }\n\n const modelName = getSmallModel(runtime);\n const temperature = 0.7;\n const maxOutputTokens = 8192;\n\n logger.log(`[TEXT_SMALL] Using model: ${modelName}`);\n logger.debug(`[TEXT_SMALL] Prompt: ${prompt}`);\n\n try {\n const systemInstruction = runtime.character.system || undefined;\n const response = await genAI.models.generateContent({\n model: modelName,\n contents: prompt,\n config: {\n temperature,\n topK: 40,\n topP: 0.95,\n maxOutputTokens,\n stopSequences,\n safetySettings: getSafetySettings(),\n ...(systemInstruction && { systemInstruction }),\n },\n });\n\n const text = response.text || '';\n\n // Count tokens for usage tracking\n const promptTokens = await countTokens(prompt);\n const completionTokens = await countTokens(text);\n\n emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n });\n\n return text;\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`[TEXT_SMALL] Error: ${message}`);\n throw error;\n }\n },\n [ModelType.TEXT_LARGE]: async (\n runtime: IAgentRuntime,\n {\n prompt,\n stopSequences = [],\n maxTokens = 8192,\n temperature = 0.7,\n frequencyPenalty = 0.7,\n presencePenalty = 0.7,\n }: GenerateTextParams\n ) => {\n const genAI = createGoogleGenAI(runtime);\n if (!genAI) {\n throw new Error('Google Generative AI client not initialized');\n }\n\n const modelName = getLargeModel(runtime);\n\n logger.log(`[TEXT_LARGE] Using model: ${modelName}`);\n logger.debug(`[TEXT_LARGE] Prompt: ${prompt}`);\n\n try {\n const systemInstruction = runtime.character.system || undefined;\n const response = await genAI.models.generateContent({\n model: modelName,\n contents: prompt,\n config: {\n temperature,\n topK: 40,\n topP: 0.95,\n maxOutputTokens: maxTokens,\n stopSequences,\n safetySettings: getSafetySettings(),\n ...(systemInstruction && { systemInstruction }),\n },\n });\n\n const text = response.text || '';\n\n // Count tokens for usage tracking\n const promptTokens = await countTokens(prompt);\n const completionTokens = await countTokens(text);\n\n emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n });\n\n return text;\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`[TEXT_LARGE] Error: ${message}`);\n throw error;\n }\n },\n [ModelType.TEXT_EMBEDDING]: async (\n runtime: IAgentRuntime,\n params: TextEmbeddingParams | string | null\n ): Promise<number[]> => {\n const genAI = createGoogleGenAI(runtime);\n if (!genAI) {\n throw new Error('Google Generative AI client not initialized');\n }\n\n const embeddingModelName = getEmbeddingModel(runtime);\n logger.debug(`[TEXT_EMBEDDING] Using model: ${embeddingModelName}`);\n\n // Handle null case for initialization\n if (params === null) {\n logger.debug('Creating test embedding for initialization');\n // Return 768-dimensional vector for text-embedding-004\n const dimension = 768;\n const testVector = Array(dimension).fill(0);\n testVector[0] = 0.1;\n return testVector;\n }\n\n // Extract text from params\n let text: string;\n if (typeof params === 'string') {\n text = params;\n } else if (typeof params === 'object' && params.text) {\n text = params.text;\n } else {\n logger.warn('Invalid input format for embedding');\n const dimension = 768;\n const fallbackVector = Array(dimension).fill(0);\n fallbackVector[0] = 0.2;\n return fallbackVector;\n }\n\n if (!text.trim()) {\n logger.warn('Empty text for embedding');\n const dimension = 768;\n const emptyVector = Array(dimension).fill(0);\n emptyVector[0] = 0.3;\n return emptyVector;\n }\n\n try {\n const response = await genAI.models.embedContent({\n model: embeddingModelName,\n contents: text,\n });\n\n const embedding = response.embeddings?.[0]?.values || [];\n\n // Count tokens for usage tracking\n const promptTokens = await countTokens(text);\n\n emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, {\n promptTokens,\n completionTokens: 0,\n totalTokens: promptTokens,\n });\n\n logger.log(`Got embedding with length ${embedding.length}`);\n return embedding;\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`Error generating embedding: ${message}`);\n // Return error vector\n const dimension = 768;\n const errorVector = Array(dimension).fill(0);\n errorVector[0] = 0.6;\n return errorVector;\n }\n },\n [ModelType.IMAGE_DESCRIPTION]: async (\n runtime: IAgentRuntime,\n params: ImageDescriptionParams | string\n ) => {\n const genAI = createGoogleGenAI(runtime);\n if (!genAI) {\n throw new Error('Google Generative AI client not initialized');\n }\n\n let imageUrl: string;\n let promptText: string | undefined;\n const modelName = getImageModel(runtime);\n logger.log(`[IMAGE_DESCRIPTION] Using model: ${modelName}`);\n\n if (typeof params === 'string') {\n imageUrl = params;\n promptText = 'Please analyze this image and provide a title and detailed description.';\n } else {\n imageUrl = params.imageUrl;\n promptText =\n params.prompt ||\n 'Please analyze this image and provide a title and detailed description.';\n }\n\n try {\n // Fetch image data\n const imageResponse = await fetch(imageUrl);\n if (!imageResponse.ok) {\n throw new Error(`Failed to fetch image: ${imageResponse.statusText}`);\n }\n\n const imageData = await imageResponse.arrayBuffer();\n const base64Image = Buffer.from(imageData).toString('base64');\n\n // Determine MIME type from URL or response headers\n const contentType = imageResponse.headers.get('content-type') || 'image/jpeg';\n\n const response = await genAI.models.generateContent({\n model: modelName,\n contents: [\n {\n role: 'user',\n parts: [\n { text: promptText },\n {\n inlineData: {\n mimeType: contentType,\n data: base64Image,\n },\n },\n ],\n },\n ],\n config: {\n temperature: 0.7,\n topK: 40,\n topP: 0.95,\n maxOutputTokens: 8192,\n safetySettings: getSafetySettings(),\n },\n });\n\n const responseText = response.text || '';\n\n logger.log('Received response for image description');\n\n // Check if a custom prompt was provided\n const isCustomPrompt =\n typeof params === 'object' &&\n params.prompt &&\n params.prompt !==\n 'Please analyze this image and provide a title and detailed description.';\n\n // If custom prompt is used, return the raw content\n if (isCustomPrompt) {\n return responseText;\n }\n\n // Try to parse the response as JSON first\n try {\n const jsonResponse = JSON.parse(responseText);\n if (jsonResponse.title && jsonResponse.description) {\n return jsonResponse;\n }\n } catch (e) {\n // If not valid JSON, process as text\n logger.debug(`Parsing as JSON failed, processing as text: ${e}`);\n }\n\n // Extract title and description from text format\n const titleMatch = responseText.match(/title[:\\s]+(.+?)(?:\\n|$)/i);\n const title = titleMatch?.[1]?.trim() || 'Image Analysis';\n const description = responseText.replace(/title[:\\s]+(.+?)(?:\\n|$)/i, '').trim();\n\n return { title, description };\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`Error analyzing image: ${message}`);\n return {\n title: 'Failed to analyze image',\n description: `Error: ${message}`,\n };\n }\n },\n [ModelType.OBJECT_SMALL]: async (runtime: IAgentRuntime, params: ObjectGenerationParams) => {\n return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);\n },\n [ModelType.OBJECT_LARGE]: async (runtime: IAgentRuntime, params: ObjectGenerationParams) => {\n return generateObjectByModelType(runtime, params, ModelType.OBJECT_LARGE, getLargeModel);\n },\n },\n tests: [\n {\n name: 'google_genai_plugin_tests',\n tests: [\n {\n name: 'google_test_api_key_validation',\n fn: async (runtime: IAgentRuntime) => {\n const apiKey = getApiKey(runtime);\n if (!apiKey) {\n throw new Error('GOOGLE_GENERATIVE_AI_API_KEY not set');\n }\n const genAI = new GoogleGenAI({ apiKey });\n const modelList = await genAI.models.list();\n const models = [];\n for await (const model of modelList) {\n models.push(model);\n }\n logger.log('Available models:', models.length);\n },\n },\n {\n name: 'google_test_text_embedding',\n fn: async (runtime: IAgentRuntime) => {\n try {\n const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {\n text: 'Hello, world!',\n });\n logger.log('Embedding dimension:', embedding.length);\n if (embedding.length === 0) {\n throw new Error('Failed to generate embedding');\n }\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`Error in test_text_embedding: ${message}`);\n throw error;\n }\n },\n },\n {\n name: 'google_test_text_small',\n fn: async (runtime: IAgentRuntime) => {\n try {\n const text = await runtime.useModel(ModelType.TEXT_SMALL, {\n prompt: 'What is the nature of reality in 10 words?',\n });\n if (text.length === 0) {\n throw new Error('Failed to generate text');\n }\n logger.log('Generated with TEXT_SMALL:', text);\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`Error in test_text_small: ${message}`);\n throw error;\n }\n },\n },\n {\n name: 'google_test_text_large',\n fn: async (runtime: IAgentRuntime) => {\n try {\n const text = await runtime.useModel(ModelType.TEXT_LARGE, {\n prompt: 'Explain quantum mechanics in simple terms.',\n });\n if (text.length === 0) {\n throw new Error('Failed to generate text');\n }\n logger.log('Generated with TEXT_LARGE:', text.substring(0, 100) + '...');\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`Error in test_text_large: ${message}`);\n throw error;\n }\n },\n },\n {\n name: 'google_test_image_description',\n fn: async (runtime: IAgentRuntime) => {\n try {\n const result = await runtime.useModel(\n ModelType.IMAGE_DESCRIPTION,\n 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg'\n );\n\n if (\n result &&\n typeof result === 'object' &&\n 'title' in result &&\n 'description' in result\n ) {\n logger.log('Image description:', result);\n } else {\n logger.error('Invalid image description result format:', result);\n }\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`Error in test_image_description: ${message}`);\n throw error;\n }\n },\n },\n {\n name: 'google_test_object_generation',\n fn: async (runtime: IAgentRuntime) => {\n try {\n const schema = {\n type: 'object',\n properties: {\n name: { type: 'string' },\n age: { type: 'number' },\n hobbies: { type: 'array', items: { type: 'string' } },\n },\n required: ['name', 'age', 'hobbies'],\n };\n\n const result = await runtime.useModel(ModelType.OBJECT_SMALL, {\n prompt: 'Generate a person profile with name, age, and hobbies.',\n schema,\n });\n\n logger.log('Generated object:', result);\n\n if (!result.name || !result.age || !result.hobbies) {\n throw new Error('Generated object missing required fields');\n }\n } catch (error: unknown) {\n const message = error instanceof Error ? error.message : String(error);\n logger.error(`Error in test_object_generation: ${message}`);\n throw error;\n }\n },\n },\n ],\n },\n ],\n};\n\nexport default googleGenAIPlugin;\n"],"mappings":";AAAA,SAAS,aAAa,cAAc,0BAA0B;AAU9D,SAAS,WAAW,QAAQ,iBAAiB;AAC7C,SAAS,aAAa;AAStB,SAAS,WACP,SACA,KACA,cACoB;AACpB,SAAO,QAAQ,WAAW,GAAG,KAAK,QAAQ,IAAI,GAAG,KAAK;AACxD;AAQA,SAAS,UAAU,SAA4C;AAC7D,SAAO,WAAW,SAAS,8BAA8B;AAC3D;AAQA,SAAS,cAAc,SAAgC;AACrD,SACE,WAAW,SAAS,oBAAoB,KACxC,WAAW,SAAS,eAAe,sBAAsB,KACzD;AAEJ;AAQA,SAAS,cAAc,SAAgC;AACrD,SACE,WAAW,SAAS,oBAAoB,KACxC,WAAW,SAAS,eAAe,sBAAsB,KACzD;AAEJ;AAQA,SAAS,cAAc,SAAgC;AACrD,SACE,WAAW,SAAS,oBAAoB,KACxC,WAAW,SAAS,eAAe,sBAAsB,KACzD;AAEJ;AAQA,SAAS,kBAAkB,SAAgC;AACzD,SACE,WAAW,SAAS,0BAA0B,oBAAoB,KAAK;AAE3E;AAQA,SAAS,kBAAkB,SAA4C;AACrE,QAAM,SAAS,UAAU,OAAO;AAChC,MAAI,CAAC,QAAQ;AACX,WAAO,MAAM,yCAAyC;AACtD,WAAO;AAAA,EACT;AAEA,SAAO,IAAI,YAAY,EAAE,OAAO,CAAC;AACnC;AAKA,SAAS,oBAAoB;AAC3B,SAAO;AAAA,IACL;AAAA,MACE,UAAU,aAAa;AAAA,MACvB,WAAW,mBAAmB;AAAA,IAChC;AAAA,IACA;AAAA,MACE,UAAU,aAAa;AAAA,MACvB,WAAW,mBAAmB;AAAA,IAChC;AAAA,IACA;AAAA,MACE,UAAU,aAAa;AAAA,MACvB,WAAW,mBAAmB;AAAA,IAChC;AAAA,IACA;AAAA,MACE,UAAU,aAAa;AAAA,MACvB,WAAW,mBAAmB;AAAA,IAChC;AAAA,EACF;AACF;AASA,SAAS,oBACP,SACA,MACA,QACA,OACA;AACA,UAAQ,UAAU,UAAU,YAAY;AAAA,IACtC,UAAU;AAAA,IACV;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,MACN,QAAQ,MAAM;AAAA,MACd,YAAY,MAAM;AAAA,MAClB,OAAO,MAAM;AAAA,IACf;AAAA,EACF,CAAC;AACH;AAKA,eAAe,YAAY,MAA+B;AAExD,SAAO,KAAK,KAAK,KAAK,SAAS,CAAC;AAClC;AAKA,eAAe,0BACb,SACA,QACA,WACA,YACc;AACd,QAAM,QAAQ,kBAAkB,OAAO;AACvC,MAAI,CAAC,OAAO;AACV,UAAM,IAAI,MAAM,6CAA6C;AAAA,EAC/D;AAEA,QAAM,YAAY,WAAW,OAAO;AACpC,QAAM,cAAc,OAAO,eAAe;AAE1C,SAAO,KAAK,SAAS,SAAS,WAAW,SAAS,EAAE;AAEpD,MAAI;AAEF,QAAI,iBAAiB,OAAO;AAC5B,QAAI,OAAO,QAAQ;AACjB,wBAAkB;AAAA;AAAA;AAAA,EAAoE,KAAK,UAAU,OAAO,QAAQ,MAAM,CAAC,CAAC;AAAA,IAC9H;AAEA,UAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;AAAA,MAClD,OAAO;AAAA,MACP,UAAU;AAAA,MACV,QAAQ;AAAA,QACN;AAAA,QACA,MAAM;AAAA,QACN,MAAM;AAAA,QACN,iBAAiB;AAAA,QACjB,kBAAkB;AAAA,QAClB,gBAAgB,kBAAkB;AAAA,MACpC;AAAA,IACF,CAAC;AAED,UAAM,OAAO,SAAS,QAAQ;AAG9B,UAAM,eAAe,MAAM,YAAY,cAAc;AACrD,UAAM,mBAAmB,MAAM,YAAY,IAAI;AAE/C,wBAAoB,SAAS,WAA4B,OAAO,QAAQ;AAAA,MACtE;AAAA,MACA;AAAA,MACA,aAAa,eAAe;AAAA,IAC9B,CAAC;AAED,QAAI;AACF,aAAO,KAAK,MAAM,IAAI;AAAA,IACxB,SAAS,YAAY;AACnB,aAAO,MAAM,kCAAkC,UAAU;AAEzD,YAAM,YAAY,KAAK,MAAM,aAAa;AAC1C,UAAI,WAAW;AACb,YAAI;AACF,iBAAO,KAAK,MAAM,UAAU,CAAC,CAAC;AAAA,QAChC,SAAS,kBAAkB;AACzB,gBAAM,IAAI,MAAM,oCAAoC;AAAA,QACtD;AAAA,MACF;AACA,YAAM;AAAA,IACR;AAAA,EACF,SAAS,OAAgB;AACvB,UAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,WAAO,MAAM,2BAA2B,OAAO,EAAE;AACjD,UAAM;AAAA,EACR;AACF;AAMO,IAAM,oBAA4B;AAAA,EACvC,MAAM;AAAA,EACN,aAAa;AAAA,EACb,QAAQ;AAAA,IACN,8BAA8B,QAAQ,IAAI;AAAA,IAC1C,oBAAoB,QAAQ,IAAI;AAAA,IAChC,oBAAoB,QAAQ,IAAI;AAAA,IAChC,oBAAoB,QAAQ,IAAI;AAAA,IAChC,wBAAwB,QAAQ,IAAI;AAAA,IACpC,aAAa,QAAQ,IAAI;AAAA,IACzB,aAAa,QAAQ,IAAI;AAAA,IACzB,aAAa,QAAQ,IAAI;AAAA,EAC3B;AAAA,EACA,MAAM,KAAK,SAAS,SAAS;AAC3B,QAAI;AACF,YAAM,SAAS,UAAU,OAAO;AAChC,UAAI,CAAC,QAAQ;AACX,eAAO;AAAA,UACL;AAAA,QACF;AACA;AAAA,MACF;AAGA,UAAI;AACF,cAAM,QAAQ,IAAI,YAAY,EAAE,OAAO,CAAC;AACxC,cAAM,YAAY,MAAM,MAAM,OAAO,KAAK;AAC1C,cAAM,SAAS,CAAC;AAChB,yBAAiB,SAAS,WAAW;AACnC,iBAAO,KAAK,KAAK;AAAA,QACnB;AACA,eAAO,IAAI,+DAA+D,OAAO,MAAM,EAAE;AAAA,MAC3F,SAAS,YAAqB;AAC5B,cAAM,UAAU,sBAAsB,QAAQ,WAAW,UAAU,OAAO,UAAU;AACpF,eAAO,KAAK,uCAAuC,OAAO,EAAE;AAAA,MAC9D;AAAA,IACF,SAAS,OAAgB;AACvB,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,aAAO;AAAA,QACL,yCAAyC,OAAO;AAAA,MAClD;AAAA,IACF;AAAA,EACF;AAAA,EACA,QAAQ;AAAA,IACN,CAAC,UAAU,UAAU,GAAG,OACtB,SACA,EAAE,QAAQ,gBAAgB,CAAC,EAAE,MAC1B;AACH,YAAM,QAAQ,kBAAkB,OAAO;AACvC,UAAI,CAAC,OAAO;AACV,cAAM,IAAI,MAAM,6CAA6C;AAAA,MAC/D;AAEA,YAAM,YAAY,cAAc,OAAO;AACvC,YAAM,cAAc;AACpB,YAAM,kBAAkB;AAExB,aAAO,IAAI,6BAA6B,SAAS,EAAE;AACnD,aAAO,MAAM,wBAAwB,MAAM,EAAE;AAE7C,UAAI;AACF,cAAM,oBAAoB,QAAQ,UAAU,UAAU;AACtD,cAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;AAAA,UAClD,OAAO;AAAA,UACP,UAAU;AAAA,UACV,QAAQ;AAAA,YACN;AAAA,YACA,MAAM;AAAA,YACN,MAAM;AAAA,YACN;AAAA,YACA;AAAA,YACA,gBAAgB,kBAAkB;AAAA,YAClC,GAAI,qBAAqB,EAAE,kBAAkB;AAAA,UAC/C;AAAA,QACF,CAAC;AAED,cAAM,OAAO,SAAS,QAAQ;AAG9B,cAAM,eAAe,MAAM,YAAY,MAAM;AAC7C,cAAM,mBAAmB,MAAM,YAAY,IAAI;AAE/C,4BAAoB,SAAS,UAAU,YAAY,QAAQ;AAAA,UACzD;AAAA,UACA;AAAA,UACA,aAAa,eAAe;AAAA,QAC9B,CAAC;AAED,eAAO;AAAA,MACT,SAAS,OAAgB;AACvB,cAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,eAAO,MAAM,uBAAuB,OAAO,EAAE;AAC7C,cAAM;AAAA,MACR;AAAA,IACF;AAAA,IACA,CAAC,UAAU,UAAU,GAAG,OACtB,SACA;AAAA,MACE;AAAA,MACA,gBAAgB,CAAC;AAAA,MACjB,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,mBAAmB;AAAA,MACnB,kBAAkB;AAAA,IACpB,MACG;AACH,YAAM,QAAQ,kBAAkB,OAAO;AACvC,UAAI,CAAC,OAAO;AACV,cAAM,IAAI,MAAM,6CAA6C;AAAA,MAC/D;AAEA,YAAM,YAAY,cAAc,OAAO;AAEvC,aAAO,IAAI,6BAA6B,SAAS,EAAE;AACnD,aAAO,MAAM,wBAAwB,MAAM,EAAE;AAE7C,UAAI;AACF,cAAM,oBAAoB,QAAQ,UAAU,UAAU;AACtD,cAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;AAAA,UAClD,OAAO;AAAA,UACP,UAAU;AAAA,UACV,QAAQ;AAAA,YACN;AAAA,YACA,MAAM;AAAA,YACN,MAAM;AAAA,YACN,iBAAiB;AAAA,YACjB;AAAA,YACA,gBAAgB,kBAAkB;AAAA,YAClC,GAAI,qBAAqB,EAAE,kBAAkB;AAAA,UAC/C;AAAA,QACF,CAAC;AAED,cAAM,OAAO,SAAS,QAAQ;AAG9B,cAAM,eAAe,MAAM,YAAY,MAAM;AAC7C,cAAM,mBAAmB,MAAM,YAAY,IAAI;AAE/C,4BAAoB,SAAS,UAAU,YAAY,QAAQ;AAAA,UACzD;AAAA,UACA;AAAA,UACA,aAAa,eAAe;AAAA,QAC9B,CAAC;AAED,eAAO;AAAA,MACT,SAAS,OAAgB;AACvB,cAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,eAAO,MAAM,uBAAuB,OAAO,EAAE;AAC7C,cAAM;AAAA,MACR;AAAA,IACF;AAAA,IACA,CAAC,UAAU,cAAc,GAAG,OAC1B,SACA,WACsB;AACtB,YAAM,QAAQ,kBAAkB,OAAO;AACvC,UAAI,CAAC,OAAO;AACV,cAAM,IAAI,MAAM,6CAA6C;AAAA,MAC/D;AAEA,YAAM,qBAAqB,kBAAkB,OAAO;AACpD,aAAO,MAAM,iCAAiC,kBAAkB,EAAE;AAGlE,UAAI,WAAW,MAAM;AACnB,eAAO,MAAM,4CAA4C;AAEzD,cAAM,YAAY;AAClB,cAAM,aAAa,MAAM,SAAS,EAAE,KAAK,CAAC;AAC1C,mBAAW,CAAC,IAAI;AAChB,eAAO;AAAA,MACT;AAGA,UAAI;AACJ,UAAI,OAAO,WAAW,UAAU;AAC9B,eAAO;AAAA,MACT,WAAW,OAAO,WAAW,YAAY,OAAO,MAAM;AACpD,eAAO,OAAO;AAAA,MAChB,OAAO;AACL,eAAO,KAAK,oCAAoC;AAChD,cAAM,YAAY;AAClB,cAAM,iBAAiB,MAAM,SAAS,EAAE,KAAK,CAAC;AAC9C,uBAAe,CAAC,IAAI;AACpB,eAAO;AAAA,MACT;AAEA,UAAI,CAAC,KAAK,KAAK,GAAG;AAChB,eAAO,KAAK,0BAA0B;AACtC,cAAM,YAAY;AAClB,cAAM,cAAc,MAAM,SAAS,EAAE,KAAK,CAAC;AAC3C,oBAAY,CAAC,IAAI;AACjB,eAAO;AAAA,MACT;AAEA,UAAI;AACF,cAAM,WAAW,MAAM,MAAM,OAAO,aAAa;AAAA,UAC/C,OAAO;AAAA,UACP,UAAU;AAAA,QACZ,CAAC;AAED,cAAM,YAAY,SAAS,aAAa,CAAC,GAAG,UAAU,CAAC;AAGvD,cAAM,eAAe,MAAM,YAAY,IAAI;AAE3C,4BAAoB,SAAS,UAAU,gBAAgB,MAAM;AAAA,UAC3D;AAAA,UACA,kBAAkB;AAAA,UAClB,aAAa;AAAA,QACf,CAAC;AAED,eAAO,IAAI,6BAA6B,UAAU,MAAM,EAAE;AAC1D,eAAO;AAAA,MACT,SAAS,OAAgB;AACvB,cAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,eAAO,MAAM,+BAA+B,OAAO,EAAE;AAErD,cAAM,YAAY;AAClB,cAAM,cAAc,MAAM,SAAS,EAAE,KAAK,CAAC;AAC3C,oBAAY,CAAC,IAAI;AACjB,eAAO;AAAA,MACT;AAAA,IACF;AAAA,IACA,CAAC,UAAU,iBAAiB,GAAG,OAC7B,SACA,WACG;AACH,YAAM,QAAQ,kBAAkB,OAAO;AACvC,UAAI,CAAC,OAAO;AACV,cAAM,IAAI,MAAM,6CAA6C;AAAA,MAC/D;AAEA,UAAI;AACJ,UAAI;AACJ,YAAM,YAAY,cAAc,OAAO;AACvC,aAAO,IAAI,oCAAoC,SAAS,EAAE;AAE1D,UAAI,OAAO,WAAW,UAAU;AAC9B,mBAAW;AACX,qBAAa;AAAA,MACf,OAAO;AACL,mBAAW,OAAO;AAClB,qBACE,OAAO,UACP;AAAA,MACJ;AAEA,UAAI;AAEF,cAAM,gBAAgB,MAAM,MAAM,QAAQ;AAC1C,YAAI,CAAC,cAAc,IAAI;AACrB,gBAAM,IAAI,MAAM,0BAA0B,cAAc,UAAU,EAAE;AAAA,QACtE;AAEA,cAAM,YAAY,MAAM,cAAc,YAAY;AAClD,cAAM,cAAc,OAAO,KAAK,SAAS,EAAE,SAAS,QAAQ;AAG5D,cAAM,cAAc,cAAc,QAAQ,IAAI,cAAc,KAAK;AAEjE,cAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;AAAA,UAClD,OAAO;AAAA,UACP,UAAU;AAAA,YACR;AAAA,cACE,MAAM;AAAA,cACN,OAAO;AAAA,gBACL,EAAE,MAAM,WAAW;AAAA,gBACnB;AAAA,kBACE,YAAY;AAAA,oBACV,UAAU;AAAA,oBACV,MAAM;AAAA,kBACR;AAAA,gBACF;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,UACA,QAAQ;AAAA,YACN,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,iBAAiB;AAAA,YACjB,gBAAgB,kBAAkB;AAAA,UACpC;AAAA,QACF,CAAC;AAED,cAAM,eAAe,SAAS,QAAQ;AAEtC,eAAO,IAAI,yCAAyC;AAGpD,cAAM,iBACJ,OAAO,WAAW,YAClB,OAAO,UACP,OAAO,WACL;AAGJ,YAAI,gBAAgB;AAClB,iBAAO;AAAA,QACT;AAGA,YAAI;AACF,gBAAM,eAAe,KAAK,MAAM,YAAY;AAC5C,cAAI,aAAa,SAAS,aAAa,aAAa;AAClD,mBAAO;AAAA,UACT;AAAA,QACF,SAAS,GAAG;AAEV,iBAAO,MAAM,+CAA+C,CAAC,EAAE;AAAA,QACjE;AAGA,cAAM,aAAa,aAAa,MAAM,2BAA2B;AACjE,cAAM,QAAQ,aAAa,CAAC,GAAG,KAAK,KAAK;AACzC,cAAM,cAAc,aAAa,QAAQ,6BAA6B,EAAE,EAAE,KAAK;AAE/E,eAAO,EAAE,OAAO,YAAY;AAAA,MAC9B,SAAS,OAAgB;AACvB,cAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,eAAO,MAAM,0BAA0B,OAAO,EAAE;AAChD,eAAO;AAAA,UACL,OAAO;AAAA,UACP,aAAa,UAAU,OAAO;AAAA,QAChC;AAAA,MACF;AAAA,IACF;AAAA,IACA,CAAC,UAAU,YAAY,GAAG,OAAO,SAAwB,WAAmC;AAC1F,aAAO,0BAA0B,SAAS,QAAQ,UAAU,cAAc,aAAa;AAAA,IACzF;AAAA,IACA,CAAC,UAAU,YAAY,GAAG,OAAO,SAAwB,WAAmC;AAC1F,aAAO,0BAA0B,SAAS,QAAQ,UAAU,cAAc,aAAa;AAAA,IACzF;AAAA,EACF;AAAA,EACA,OAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,QACL;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA2B;AACpC,kBAAM,SAAS,UAAU,OAAO;AAChC,gBAAI,CAAC,QAAQ;AACX,oBAAM,IAAI,MAAM,sCAAsC;AAAA,YACxD;AACA,kBAAM,QAAQ,IAAI,YAAY,EAAE,OAAO,CAAC;AACxC,kBAAM,YAAY,MAAM,MAAM,OAAO,KAAK;AAC1C,kBAAM,SAAS,CAAC;AAChB,6BAAiB,SAAS,WAAW;AACnC,qBAAO,KAAK,KAAK;AAAA,YACnB;AACA,mBAAO,IAAI,qBAAqB,OAAO,MAAM;AAAA,UAC/C;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA2B;AACpC,gBAAI;AACF,oBAAM,YAAY,MAAM,QAAQ,SAAS,UAAU,gBAAgB;AAAA,gBACjE,MAAM;AAAA,cACR,CAAC;AACD,qBAAO,IAAI,wBAAwB,UAAU,MAAM;AACnD,kBAAI,UAAU,WAAW,GAAG;AAC1B,sBAAM,IAAI,MAAM,8BAA8B;AAAA,cAChD;AAAA,YACF,SAAS,OAAgB;AACvB,oBAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,qBAAO,MAAM,iCAAiC,OAAO,EAAE;AACvD,oBAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA2B;AACpC,gBAAI;AACF,oBAAM,OAAO,MAAM,QAAQ,SAAS,UAAU,YAAY;AAAA,gBACxD,QAAQ;AAAA,cACV,CAAC;AACD,kBAAI,KAAK,WAAW,GAAG;AACrB,sBAAM,IAAI,MAAM,yBAAyB;AAAA,cAC3C;AACA,qBAAO,IAAI,8BAA8B,IAAI;AAAA,YAC/C,SAAS,OAAgB;AACvB,oBAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,qBAAO,MAAM,6BAA6B,OAAO,EAAE;AACnD,oBAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA2B;AACpC,gBAAI;AACF,oBAAM,OAAO,MAAM,QAAQ,SAAS,UAAU,YAAY;AAAA,gBACxD,QAAQ;AAAA,cACV,CAAC;AACD,kBAAI,KAAK,WAAW,GAAG;AACrB,sBAAM,IAAI,MAAM,yBAAyB;AAAA,cAC3C;AACA,qBAAO,IAAI,8BAA8B,KAAK,UAAU,GAAG,GAAG,IAAI,KAAK;AAAA,YACzE,SAAS,OAAgB;AACvB,oBAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,qBAAO,MAAM,6BAA6B,OAAO,EAAE;AACnD,oBAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA2B;AACpC,gBAAI;AACF,oBAAM,SAAS,MAAM,QAAQ;AAAA,gBAC3B,UAAU;AAAA,gBACV;AAAA,cACF;AAEA,kBACE,UACA,OAAO,WAAW,YAClB,WAAW,UACX,iBAAiB,QACjB;AACA,uBAAO,IAAI,sBAAsB,MAAM;AAAA,cACzC,OAAO;AACL,uBAAO,MAAM,4CAA4C,MAAM;AAAA,cACjE;AAAA,YACF,SAAS,OAAgB;AACvB,oBAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,qBAAO,MAAM,oCAAoC,OAAO,EAAE;AAC1D,oBAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAA2B;AACpC,gBAAI;AACF,oBAAM,SAAS;AAAA,gBACb,MAAM;AAAA,gBACN,YAAY;AAAA,kBACV,MAAM,EAAE,MAAM,SAAS;AAAA,kBACvB,KAAK,EAAE,MAAM,SAAS;AAAA,kBACtB,SAAS,EAAE,MAAM,SAAS,OAAO,EAAE,MAAM,SAAS,EAAE;AAAA,gBACtD;AAAA,gBACA,UAAU,CAAC,QAAQ,OAAO,SAAS;AAAA,cACrC;AAEA,oBAAM,SAAS,MAAM,QAAQ,SAAS,UAAU,cAAc;AAAA,gBAC5D,QAAQ;AAAA,gBACR;AAAA,cACF,CAAC;AAED,qBAAO,IAAI,qBAAqB,MAAM;AAEtC,kBAAI,CAAC,OAAO,QAAQ,CAAC,OAAO,OAAO,CAAC,OAAO,SAAS;AAClD,sBAAM,IAAI,MAAM,0CAA0C;AAAA,cAC5D;AAAA,YACF,SAAS,OAAgB;AACvB,oBAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,qBAAO,MAAM,oCAAoC,OAAO,EAAE;AAC1D,oBAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEA,IAAO,gBAAQ;","names":[]}
package/package.json ADDED
@@ -0,0 +1,109 @@
1
+ {
2
+ "name": "@elizaos/plugin-google-genai",
3
+ "version": "1.0.0",
4
+ "type": "module",
5
+ "main": "dist/index.js",
6
+ "module": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "repository": {
9
+ "type": "git",
10
+ "url": "git+https://github.com/elizaos-plugins/plugin-google-genai.git"
11
+ },
12
+ "exports": {
13
+ "./package.json": "./package.json",
14
+ ".": {
15
+ "import": {
16
+ "types": "./dist/index.d.ts",
17
+ "default": "./dist/index.js"
18
+ }
19
+ }
20
+ },
21
+ "files": [
22
+ "dist"
23
+ ],
24
+ "dependencies": {
25
+ "@elizaos/core": "^1.0.0",
26
+ "@google/genai": "^1.5.1",
27
+ "undici": "^7.9.0"
28
+ },
29
+ "devDependencies": {
30
+ "prettier": "3.5.3",
31
+ "tsup": "8.4.0",
32
+ "typescript": "5.8.3",
33
+ "vitest": "3.1.3",
34
+ "dotenv": "^16.5.0"
35
+ },
36
+ "scripts": {
37
+ "build": "tsup",
38
+ "dev": "tsup --watch",
39
+ "lint": "prettier --write ./src",
40
+ "clean": "rm -rf dist .turbo node_modules .turbo-tsconfig.json tsconfig.tsbuildinfo",
41
+ "format": "prettier --write ./src",
42
+ "format:check": "prettier --check ./src",
43
+ "test": "vitest run",
44
+ "test:watch": "vitest"
45
+ },
46
+ "publishConfig": {
47
+ "access": "public"
48
+ },
49
+ "agentConfig": {
50
+ "pluginType": "elizaos:plugin:1.0.0",
51
+ "pluginParameters": {
52
+ "GOOGLE_GENERATIVE_AI_API_KEY": {
53
+ "type": "string",
54
+ "description": "API key for Google Generative AI (Gemini) obtained from Google AI Studio.",
55
+ "required": true,
56
+ "sensitive": true
57
+ },
58
+ "GOOGLE_EMBEDDING_MODEL": {
59
+ "type": "string",
60
+ "description": "Overrides the default text embedding model used by Google Generative AI.",
61
+ "required": false,
62
+ "default": "text-embedding-004",
63
+ "sensitive": false
64
+ },
65
+ "GOOGLE_IMAGE_MODEL": {
66
+ "type": "string",
67
+ "description": "Overrides the default image analysis model used by Google Generative AI.",
68
+ "required": false,
69
+ "default": "gemini-2.0-flash-001",
70
+ "sensitive": false
71
+ },
72
+ "GOOGLE_SMALL_MODEL": {
73
+ "type": "string",
74
+ "description": "Overrides the default small language model used for fast text/object generation.",
75
+ "required": false,
76
+ "default": "gemini-2.0-flash-001",
77
+ "sensitive": false
78
+ },
79
+ "GOOGLE_LARGE_MODEL": {
80
+ "type": "string",
81
+ "description": "Overrides the default large language model used for complex text/object generation.",
82
+ "required": false,
83
+ "default": "gemini-2.0-flash-001",
84
+ "sensitive": false
85
+ },
86
+ "SMALL_MODEL": {
87
+ "type": "string",
88
+ "description": "General fallback environment variable for the small model name when GOOGLE_SMALL_MODEL is not set.",
89
+ "required": false,
90
+ "default": "gemini-2.0-flash-001",
91
+ "sensitive": false
92
+ },
93
+ "LARGE_MODEL": {
94
+ "type": "string",
95
+ "description": "General fallback environment variable for the large model name when GOOGLE_LARGE_MODEL is not set.",
96
+ "required": false,
97
+ "default": "gemini-2.0-flash-001",
98
+ "sensitive": false
99
+ },
100
+ "IMAGE_MODEL": {
101
+ "type": "string",
102
+ "description": "General fallback environment variable for the image model name when GOOGLE_IMAGE_MODEL is not set.",
103
+ "required": false,
104
+ "default": "gemini-2.0-flash-001",
105
+ "sensitive": false
106
+ }
107
+ }
108
+ }
109
+ }