@elizaos/plugin-google-genai 2.0.0-alpha.8 → 2.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +124 -0
- package/auto-enable.ts +21 -0
- package/dist/browser/index.browser.js +412 -165
- package/dist/browser/index.browser.js.map +11 -11
- package/dist/build.d.ts +3 -0
- package/dist/build.d.ts.map +1 -0
- package/dist/build.js +117 -0
- package/dist/cjs/index.node.cjs +413 -172
- package/dist/cjs/index.node.js.map +11 -11
- package/dist/generated/specs/specs.d.ts +55 -0
- package/dist/generated/specs/specs.d.ts.map +1 -0
- package/dist/generated/specs/specs.js +34 -0
- package/dist/index.browser.d.ts +5 -0
- package/dist/index.browser.d.ts.map +1 -0
- package/dist/index.browser.js +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +252 -0
- package/dist/index.node.d.ts +5 -0
- package/dist/index.node.d.ts.map +1 -0
- package/dist/index.node.js +4 -0
- package/dist/init.d.ts +16 -0
- package/dist/init.d.ts.map +1 -0
- package/dist/init.js +27 -0
- package/dist/models/embedding.d.ts +6 -0
- package/dist/models/embedding.d.ts.map +1 -0
- package/dist/models/embedding.js +57 -0
- package/dist/models/image.d.ts +7 -0
- package/dist/models/image.d.ts.map +1 -0
- package/dist/models/image.js +91 -0
- package/dist/models/index.d.ts +13 -0
- package/dist/models/index.d.ts.map +1 -0
- package/dist/models/index.js +12 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/object.d.ts.map +1 -0
- package/dist/models/object.js +84 -0
- package/dist/models/text.d.ts +51 -0
- package/dist/models/text.d.ts.map +1 -0
- package/dist/models/text.js +257 -0
- package/dist/node/index.node.d.ts +2 -0
- package/dist/node/index.node.js +412 -165
- package/dist/node/index.node.js.map +11 -11
- package/dist/types/index.d.ts +47 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +1 -0
- package/dist/utils/config.d.ts +25 -0
- package/dist/utils/config.d.ts.map +1 -0
- package/dist/utils/config.js +115 -0
- package/dist/utils/events.d.ts +12 -0
- package/dist/utils/events.d.ts.map +1 -0
- package/dist/utils/events.js +14 -0
- package/dist/utils/index.d.ts +4 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +3 -0
- package/dist/utils/tokenization.d.ts +2 -0
- package/dist/utils/tokenization.d.ts.map +1 -0
- package/dist/utils/tokenization.js +3 -0
- package/dist/vitest.config.d.ts +3 -0
- package/dist/vitest.config.d.ts.map +1 -0
- package/dist/vitest.config.js +8 -0
- package/package.json +32 -16
package/dist/init.d.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { type IAgentRuntime } from "@elizaos/core";
|
|
2
|
+
export interface PluginConfig {
|
|
3
|
+
readonly GOOGLE_GENERATIVE_AI_API_KEY?: string;
|
|
4
|
+
readonly GOOGLE_SMALL_MODEL?: string;
|
|
5
|
+
readonly GOOGLE_LARGE_MODEL?: string;
|
|
6
|
+
readonly GOOGLE_IMAGE_MODEL?: string;
|
|
7
|
+
readonly GOOGLE_EMBEDDING_MODEL?: string;
|
|
8
|
+
readonly SMALL_MODEL?: string;
|
|
9
|
+
readonly LARGE_MODEL?: string;
|
|
10
|
+
readonly IMAGE_MODEL?: string;
|
|
11
|
+
}
|
|
12
|
+
export declare function initializeGoogleGenAI(
|
|
13
|
+
_config: PluginConfig,
|
|
14
|
+
runtime: IAgentRuntime,
|
|
15
|
+
): void;
|
|
16
|
+
//# sourceMappingURL=init.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../typescript/init.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,aAAa,EAAU,MAAM,eAAe,CAAC;AAI3D,MAAM,WAAW,YAAY;IAC3B,QAAQ,CAAC,4BAA4B,CAAC,EAAE,MAAM,CAAC;IAC/C,QAAQ,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC;IACrC,QAAQ,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC;IACrC,QAAQ,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC;IACrC,QAAQ,CAAC,sBAAsB,CAAC,EAAE,MAAM,CAAC;IACzC,QAAQ,CAAC,WAAW,CAAC,EAAE,MAAM,CAAC;IAC9B,QAAQ,CAAC,WAAW,CAAC,EAAE,MAAM,CAAC;IAC9B,QAAQ,CAAC,WAAW,CAAC,EAAE,MAAM,CAAC;CAC/B;AAED,wBAAgB,qBAAqB,CAAC,OAAO,EAAE,YAAY,EAAE,OAAO,EAAE,aAAa,GAAG,IAAI,CAsBzF"}
|
package/dist/init.js
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { logger } from "@elizaos/core";
|
|
2
|
+
import { GoogleGenAI } from "@google/genai";
|
|
3
|
+
import { getApiKey } from "./utils/config";
|
|
4
|
+
export function initializeGoogleGenAI(_config, runtime) {
|
|
5
|
+
(async () => {
|
|
6
|
+
try {
|
|
7
|
+
const apiKey = getApiKey(runtime);
|
|
8
|
+
if (!apiKey) {
|
|
9
|
+
logger.warn("GOOGLE_GENERATIVE_AI_API_KEY is not set");
|
|
10
|
+
return;
|
|
11
|
+
}
|
|
12
|
+
const genAI = new GoogleGenAI({ apiKey });
|
|
13
|
+
const modelList = await genAI.models.list();
|
|
14
|
+
const models = [];
|
|
15
|
+
for await (const model of modelList) {
|
|
16
|
+
models.push(model);
|
|
17
|
+
}
|
|
18
|
+
logger.log(
|
|
19
|
+
`Google AI API key validated. Available models: ${models.length}`,
|
|
20
|
+
);
|
|
21
|
+
} catch (error) {
|
|
22
|
+
logger.warn(
|
|
23
|
+
`Google AI configuration error: ${error instanceof Error ? error.message : String(error)}`,
|
|
24
|
+
);
|
|
25
|
+
}
|
|
26
|
+
})();
|
|
27
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"embedding.d.ts","sourceRoot":"","sources":["../../typescript/models/embedding.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,mBAAmB,EAAE,MAAM,eAAe,CAAC;AAUxE,wBAAsB,mBAAmB,CACvC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,mBAAmB,GAAG,MAAM,GAAG,IAAI,GAC1C,OAAO,CAAC,MAAM,EAAE,CAAC,CA0DnB"}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import * as ElizaCore from "@elizaos/core";
|
|
2
|
+
import { logger } from "@elizaos/core";
|
|
3
|
+
import { createGoogleGenAI, getEmbeddingModel } from "../utils/config";
|
|
4
|
+
import { emitModelUsageEvent } from "../utils/events";
|
|
5
|
+
import { countTokens } from "../utils/tokenization";
|
|
6
|
+
|
|
7
|
+
const TEXT_EMBEDDING_MODEL_TYPE =
|
|
8
|
+
ElizaCore.ModelType?.TEXT_EMBEDDING ?? "TEXT_EMBEDDING";
|
|
9
|
+
export async function handleTextEmbedding(runtime, params) {
|
|
10
|
+
const genAI = createGoogleGenAI(runtime);
|
|
11
|
+
if (!genAI) {
|
|
12
|
+
throw new Error("Google Generative AI client not initialized");
|
|
13
|
+
}
|
|
14
|
+
const embeddingModelName = getEmbeddingModel(runtime);
|
|
15
|
+
logger.debug(`[TEXT_EMBEDDING] Using model: ${embeddingModelName}`);
|
|
16
|
+
if (params === null) {
|
|
17
|
+
return Array(768).fill(0);
|
|
18
|
+
}
|
|
19
|
+
let text =
|
|
20
|
+
typeof params === "string"
|
|
21
|
+
? params
|
|
22
|
+
: typeof params === "object" && params.text
|
|
23
|
+
? params.text
|
|
24
|
+
: "";
|
|
25
|
+
if (!text.trim()) {
|
|
26
|
+
logger.warn("Empty text for embedding");
|
|
27
|
+
return Array(768).fill(0);
|
|
28
|
+
}
|
|
29
|
+
// Truncate to stay within embedding model token limits (~4 chars per token)
|
|
30
|
+
const maxChars = 8_192 * 4;
|
|
31
|
+
if (text.length > maxChars) {
|
|
32
|
+
logger.warn(
|
|
33
|
+
`[Google GenAI] Embedding input too long (~${Math.ceil(text.length / 4)} tokens), truncating to ~8192 tokens`,
|
|
34
|
+
);
|
|
35
|
+
text = text.slice(0, maxChars);
|
|
36
|
+
}
|
|
37
|
+
try {
|
|
38
|
+
const response = await genAI.models.embedContent({
|
|
39
|
+
model: embeddingModelName,
|
|
40
|
+
contents: text,
|
|
41
|
+
});
|
|
42
|
+
const embedding = response.embeddings?.[0]?.values || [];
|
|
43
|
+
const promptTokens = await countTokens(text);
|
|
44
|
+
emitModelUsageEvent(runtime, TEXT_EMBEDDING_MODEL_TYPE, text, {
|
|
45
|
+
promptTokens,
|
|
46
|
+
completionTokens: 0,
|
|
47
|
+
totalTokens: promptTokens,
|
|
48
|
+
});
|
|
49
|
+
logger.log(`Got embedding with length ${embedding.length}`);
|
|
50
|
+
return embedding;
|
|
51
|
+
} catch (error) {
|
|
52
|
+
logger.error(
|
|
53
|
+
`Error generating embedding: ${error instanceof Error ? error.message : String(error)}`,
|
|
54
|
+
);
|
|
55
|
+
return Array(768).fill(0);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import type { IAgentRuntime, ImageDescriptionParams } from "@elizaos/core";
|
|
2
|
+
import type { ImageDescriptionResponse } from "../types";
|
|
3
|
+
export declare function handleImageDescription(
|
|
4
|
+
runtime: IAgentRuntime,
|
|
5
|
+
params: ImageDescriptionParams | string,
|
|
6
|
+
): Promise<ImageDescriptionResponse>;
|
|
7
|
+
//# sourceMappingURL=image.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"image.d.ts","sourceRoot":"","sources":["../../typescript/models/image.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,sBAAsB,EAAE,MAAM,eAAe,CAAC;AAE3E,OAAO,KAAK,EAAE,wBAAwB,EAAE,MAAM,UAAU,CAAC;AAKzD,wBAAsB,sBAAsB,CAC1C,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,sBAAsB,GAAG,MAAM,GACtC,OAAO,CAAC,wBAAwB,CAAC,CAoFnC"}
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import { logger } from "@elizaos/core";
|
|
2
|
+
import {
|
|
3
|
+
createGoogleGenAI,
|
|
4
|
+
getImageModel,
|
|
5
|
+
getSafetySettings,
|
|
6
|
+
} from "../utils/config";
|
|
7
|
+
|
|
8
|
+
const crossFetch =
|
|
9
|
+
typeof globalThis.fetch === "function" ? globalThis.fetch : fetch;
|
|
10
|
+
export async function handleImageDescription(runtime, params) {
|
|
11
|
+
const genAI = createGoogleGenAI(runtime);
|
|
12
|
+
if (!genAI) {
|
|
13
|
+
throw new Error("Google Generative AI client not initialized");
|
|
14
|
+
}
|
|
15
|
+
let imageUrl;
|
|
16
|
+
let promptText;
|
|
17
|
+
const modelName = getImageModel(runtime);
|
|
18
|
+
logger.log(`[IMAGE_DESCRIPTION] Using model: ${modelName}`);
|
|
19
|
+
if (typeof params === "string") {
|
|
20
|
+
imageUrl = params;
|
|
21
|
+
promptText =
|
|
22
|
+
"Please analyze this image and provide a title and detailed description.";
|
|
23
|
+
} else {
|
|
24
|
+
imageUrl = params.imageUrl;
|
|
25
|
+
promptText =
|
|
26
|
+
params.prompt ||
|
|
27
|
+
"Please analyze this image and provide a title and detailed description.";
|
|
28
|
+
}
|
|
29
|
+
try {
|
|
30
|
+
const imageResponse = await crossFetch(imageUrl);
|
|
31
|
+
if (!imageResponse.ok) {
|
|
32
|
+
throw new Error(`Failed to fetch image: ${imageResponse.statusText}`);
|
|
33
|
+
}
|
|
34
|
+
const imageData = await imageResponse.arrayBuffer();
|
|
35
|
+
const base64Image = Buffer.from(imageData).toString("base64");
|
|
36
|
+
const contentType =
|
|
37
|
+
imageResponse.headers.get("content-type") || "image/jpeg";
|
|
38
|
+
const response = await genAI.models.generateContent({
|
|
39
|
+
model: modelName,
|
|
40
|
+
contents: [
|
|
41
|
+
{
|
|
42
|
+
role: "user",
|
|
43
|
+
parts: [
|
|
44
|
+
{ text: promptText },
|
|
45
|
+
{
|
|
46
|
+
inlineData: {
|
|
47
|
+
mimeType: contentType,
|
|
48
|
+
data: base64Image,
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
],
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
config: {
|
|
55
|
+
temperature: 0.7,
|
|
56
|
+
topK: 40,
|
|
57
|
+
topP: 0.95,
|
|
58
|
+
maxOutputTokens: 8192,
|
|
59
|
+
safetySettings: getSafetySettings(),
|
|
60
|
+
},
|
|
61
|
+
});
|
|
62
|
+
const responseText = response.text || "";
|
|
63
|
+
try {
|
|
64
|
+
const jsonResponse = JSON.parse(responseText);
|
|
65
|
+
if (
|
|
66
|
+
typeof jsonResponse.title === "string" &&
|
|
67
|
+
typeof jsonResponse.description === "string"
|
|
68
|
+
) {
|
|
69
|
+
return {
|
|
70
|
+
title: jsonResponse.title,
|
|
71
|
+
description: jsonResponse.description,
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
} catch {
|
|
75
|
+
// Fall through to text parsing
|
|
76
|
+
}
|
|
77
|
+
const titleMatch = responseText.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
78
|
+
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
79
|
+
const description = titleMatch
|
|
80
|
+
? responseText.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim()
|
|
81
|
+
: responseText.trim();
|
|
82
|
+
return { title, description };
|
|
83
|
+
} catch (error) {
|
|
84
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
85
|
+
logger.error(`Error analyzing image: ${message}`);
|
|
86
|
+
return {
|
|
87
|
+
title: "Failed to analyze image",
|
|
88
|
+
description: `Error: ${message}`,
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
export { handleTextEmbedding } from "./embedding";
|
|
2
|
+
export { handleImageDescription } from "./image";
|
|
3
|
+
export { handleObjectLarge, handleObjectSmall } from "./object";
|
|
4
|
+
export {
|
|
5
|
+
handleActionPlanner,
|
|
6
|
+
handleResponseHandler,
|
|
7
|
+
handleTextLarge,
|
|
8
|
+
handleTextMedium,
|
|
9
|
+
handleTextMega,
|
|
10
|
+
handleTextNano,
|
|
11
|
+
handleTextSmall,
|
|
12
|
+
} from "./text";
|
|
13
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../typescript/models/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,mBAAmB,EAAE,MAAM,aAAa,CAAC;AAClD,OAAO,EAAE,sBAAsB,EAAE,MAAM,SAAS,CAAC;AACjD,OAAO,EAAE,iBAAiB,EAAE,iBAAiB,EAAE,MAAM,UAAU,CAAC;AAChE,OAAO,EACL,mBAAmB,EACnB,qBAAqB,EACrB,eAAe,EACf,gBAAgB,EAChB,cAAc,EACd,cAAc,EACd,eAAe,GAChB,MAAM,QAAQ,CAAC"}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
export { handleTextEmbedding } from "./embedding";
|
|
2
|
+
export { handleImageDescription } from "./image";
|
|
3
|
+
export { handleObjectLarge, handleObjectSmall } from "./object";
|
|
4
|
+
export {
|
|
5
|
+
handleActionPlanner,
|
|
6
|
+
handleResponseHandler,
|
|
7
|
+
handleTextLarge,
|
|
8
|
+
handleTextMedium,
|
|
9
|
+
handleTextMega,
|
|
10
|
+
handleTextNano,
|
|
11
|
+
handleTextSmall,
|
|
12
|
+
} from "./text";
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { IAgentRuntime, ObjectGenerationParams } from "@elizaos/core";
|
|
2
|
+
export declare function handleObjectSmall(
|
|
3
|
+
runtime: IAgentRuntime,
|
|
4
|
+
params: ObjectGenerationParams,
|
|
5
|
+
): Promise<Record<string, string | number | boolean | null>>;
|
|
6
|
+
export declare function handleObjectLarge(
|
|
7
|
+
runtime: IAgentRuntime,
|
|
8
|
+
params: ObjectGenerationParams,
|
|
9
|
+
): Promise<Record<string, string | number | boolean | null>>;
|
|
10
|
+
//# sourceMappingURL=object.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"object.d.ts","sourceRoot":"","sources":["../../typescript/models/object.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAiB,sBAAsB,EAAE,MAAM,eAAe,CAAC;AA8E1F,wBAAsB,iBAAiB,CACrC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,sBAAsB,GAC7B,OAAO,CAAC,MAAM,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,GAAG,OAAO,GAAG,IAAI,CAAC,CAAC,CAE3D;AAED,wBAAsB,iBAAiB,CACrC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,sBAAsB,GAC7B,OAAO,CAAC,MAAM,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,GAAG,OAAO,GAAG,IAAI,CAAC,CAAC,CAE3D"}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import { logger } from "@elizaos/core";
|
|
2
|
+
import {
|
|
3
|
+
createGoogleGenAI,
|
|
4
|
+
getLargeModel,
|
|
5
|
+
getSafetySettings,
|
|
6
|
+
getSmallModel,
|
|
7
|
+
} from "../utils/config";
|
|
8
|
+
import { emitModelUsageEvent } from "../utils/events";
|
|
9
|
+
import { countTokens } from "../utils/tokenization";
|
|
10
|
+
|
|
11
|
+
async function generateObjectByModelType(
|
|
12
|
+
runtime,
|
|
13
|
+
params,
|
|
14
|
+
modelType,
|
|
15
|
+
getModelFn,
|
|
16
|
+
) {
|
|
17
|
+
const genAI = createGoogleGenAI(runtime);
|
|
18
|
+
if (!genAI) {
|
|
19
|
+
throw new Error("Google Generative AI client not initialized");
|
|
20
|
+
}
|
|
21
|
+
const modelName = getModelFn(runtime);
|
|
22
|
+
const temperature = params.temperature ?? 0.1;
|
|
23
|
+
logger.info(`Using ${modelType} model: ${modelName}`);
|
|
24
|
+
try {
|
|
25
|
+
let enhancedPrompt = params.prompt;
|
|
26
|
+
if (params.schema) {
|
|
27
|
+
enhancedPrompt += `\n\nPlease respond with a JSON object that follows this schema:\n${JSON.stringify(params.schema, null, 2)}`;
|
|
28
|
+
}
|
|
29
|
+
const response = await genAI.models.generateContent({
|
|
30
|
+
model: modelName,
|
|
31
|
+
contents: enhancedPrompt,
|
|
32
|
+
config: {
|
|
33
|
+
temperature,
|
|
34
|
+
topK: 40,
|
|
35
|
+
topP: 0.95,
|
|
36
|
+
maxOutputTokens: 8192,
|
|
37
|
+
responseMimeType: "application/json",
|
|
38
|
+
safetySettings: getSafetySettings(),
|
|
39
|
+
},
|
|
40
|
+
});
|
|
41
|
+
const text = response.text || "";
|
|
42
|
+
const promptTokens = await countTokens(enhancedPrompt);
|
|
43
|
+
const completionTokens = await countTokens(text);
|
|
44
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, {
|
|
45
|
+
promptTokens,
|
|
46
|
+
completionTokens,
|
|
47
|
+
totalTokens: promptTokens + completionTokens,
|
|
48
|
+
});
|
|
49
|
+
try {
|
|
50
|
+
return JSON.parse(text);
|
|
51
|
+
} catch {
|
|
52
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
53
|
+
if (jsonMatch) {
|
|
54
|
+
try {
|
|
55
|
+
return JSON.parse(jsonMatch[0]);
|
|
56
|
+
} catch {
|
|
57
|
+
throw new Error("Failed to parse JSON from response");
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
throw new Error("Failed to parse JSON from response");
|
|
61
|
+
}
|
|
62
|
+
} catch (error) {
|
|
63
|
+
logger.error(
|
|
64
|
+
`[generateObject] Error: ${error instanceof Error ? error.message : String(error)}`,
|
|
65
|
+
);
|
|
66
|
+
throw error;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
export async function handleObjectSmall(runtime, params) {
|
|
70
|
+
return generateObjectByModelType(
|
|
71
|
+
runtime,
|
|
72
|
+
params,
|
|
73
|
+
"OBJECT_SMALL",
|
|
74
|
+
getSmallModel,
|
|
75
|
+
);
|
|
76
|
+
}
|
|
77
|
+
export async function handleObjectLarge(runtime, params) {
|
|
78
|
+
return generateObjectByModelType(
|
|
79
|
+
runtime,
|
|
80
|
+
params,
|
|
81
|
+
"OBJECT_LARGE",
|
|
82
|
+
getLargeModel,
|
|
83
|
+
);
|
|
84
|
+
}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import type { GenerateTextParams, IAgentRuntime } from "@elizaos/core";
|
|
2
|
+
|
|
3
|
+
type ChatAttachment = {
|
|
4
|
+
data: string | Uint8Array | URL;
|
|
5
|
+
mediaType: string;
|
|
6
|
+
filename?: string;
|
|
7
|
+
};
|
|
8
|
+
type GenerateTextParamsWithAttachments = GenerateTextParams & {
|
|
9
|
+
attachments?: ChatAttachment[];
|
|
10
|
+
};
|
|
11
|
+
export declare function handleTextSmall(
|
|
12
|
+
runtime: IAgentRuntime,
|
|
13
|
+
{
|
|
14
|
+
prompt,
|
|
15
|
+
stopSequences,
|
|
16
|
+
maxTokens,
|
|
17
|
+
temperature,
|
|
18
|
+
attachments,
|
|
19
|
+
}: GenerateTextParamsWithAttachments,
|
|
20
|
+
): Promise<string>;
|
|
21
|
+
export declare function handleTextLarge(
|
|
22
|
+
runtime: IAgentRuntime,
|
|
23
|
+
{
|
|
24
|
+
prompt,
|
|
25
|
+
stopSequences,
|
|
26
|
+
maxTokens,
|
|
27
|
+
temperature,
|
|
28
|
+
attachments,
|
|
29
|
+
}: GenerateTextParamsWithAttachments,
|
|
30
|
+
): Promise<string>;
|
|
31
|
+
export declare function handleTextNano(
|
|
32
|
+
runtime: IAgentRuntime,
|
|
33
|
+
params: GenerateTextParamsWithAttachments,
|
|
34
|
+
): Promise<string>;
|
|
35
|
+
export declare function handleTextMedium(
|
|
36
|
+
runtime: IAgentRuntime,
|
|
37
|
+
params: GenerateTextParamsWithAttachments,
|
|
38
|
+
): Promise<string>;
|
|
39
|
+
export declare function handleTextMega(
|
|
40
|
+
runtime: IAgentRuntime,
|
|
41
|
+
params: GenerateTextParamsWithAttachments,
|
|
42
|
+
): Promise<string>;
|
|
43
|
+
export declare function handleResponseHandler(
|
|
44
|
+
runtime: IAgentRuntime,
|
|
45
|
+
params: GenerateTextParamsWithAttachments,
|
|
46
|
+
): Promise<string>;
|
|
47
|
+
export declare function handleActionPlanner(
|
|
48
|
+
runtime: IAgentRuntime,
|
|
49
|
+
params: GenerateTextParamsWithAttachments,
|
|
50
|
+
): Promise<string>;
|
|
51
|
+
//# sourceMappingURL=text.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"text.d.ts","sourceRoot":"","sources":["../../typescript/models/text.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,kBAAkB,EAAE,aAAa,EAAE,MAAM,eAAe,CAAC;AA2BvE,KAAK,cAAc,GAAG;IACpB,IAAI,EAAE,MAAM,GAAG,UAAU,GAAG,GAAG,CAAC;IAChC,SAAS,EAAE,MAAM,CAAC;IAClB,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF,KAAK,iCAAiC,GAAG,kBAAkB,GAAG;IAC5D,WAAW,CAAC,EAAE,cAAc,EAAE,CAAC;CAChC,CAAC;AAyEF,wBAAsB,eAAe,CACnC,OAAO,EAAE,aAAa,EACtB,EACE,MAAM,EACN,aAAkB,EAClB,SAAgB,EAChB,WAAiB,EACjB,WAAW,GACZ,EAAE,iCAAiC,GACnC,OAAO,CAAC,MAAM,CAAC,CA6CjB;AAED,wBAAsB,eAAe,CACnC,OAAO,EAAE,aAAa,EACtB,EACE,MAAM,EACN,aAAkB,EAClB,SAAgB,EAChB,WAAiB,EACjB,WAAW,GACZ,EAAE,iCAAiC,GACnC,OAAO,CAAC,MAAM,CAAC,CA6CjB;AAED,wBAAsB,cAAc,CAClC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,iCAAiC,GACxC,OAAO,CAAC,MAAM,CAAC,CAEjB;AAED,wBAAsB,gBAAgB,CACpC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,iCAAiC,GACxC,OAAO,CAAC,MAAM,CAAC,CAEjB;AAED,wBAAsB,cAAc,CAClC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,iCAAiC,GACxC,OAAO,CAAC,MAAM,CAAC,CAEjB;AAED,wBAAsB,qBAAqB,CACzC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,iCAAiC,GACxC,OAAO,CAAC,MAAM,CAAC,CAEjB;AAED,wBAAsB,mBAAmB,CACvC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,iCAAiC,GACxC,OAAO,CAAC,MAAM,CAAC,CAEjB"}
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
import * as ElizaCore from "@elizaos/core";
|
|
2
|
+
import { logger } from "@elizaos/core";
|
|
3
|
+
import {
|
|
4
|
+
createGoogleGenAI,
|
|
5
|
+
getActionPlannerModel,
|
|
6
|
+
getLargeModel,
|
|
7
|
+
getMediumModel,
|
|
8
|
+
getMegaModel,
|
|
9
|
+
getNanoModel,
|
|
10
|
+
getResponseHandlerModel,
|
|
11
|
+
getSafetySettings,
|
|
12
|
+
getSmallModel,
|
|
13
|
+
} from "../utils/config";
|
|
14
|
+
import { emitModelUsageEvent } from "../utils/events";
|
|
15
|
+
import { countTokens } from "../utils/tokenization";
|
|
16
|
+
|
|
17
|
+
const CORE_MODEL_TYPES = ElizaCore.ModelType ?? {};
|
|
18
|
+
const TEXT_NANO_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_NANO ?? "TEXT_NANO";
|
|
19
|
+
const TEXT_MEDIUM_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
20
|
+
const TEXT_SMALL_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_SMALL ?? "TEXT_SMALL";
|
|
21
|
+
const TEXT_LARGE_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_LARGE ?? "TEXT_LARGE";
|
|
22
|
+
const TEXT_MEGA_MODEL_TYPE = CORE_MODEL_TYPES.TEXT_MEGA ?? "TEXT_MEGA";
|
|
23
|
+
const RESPONSE_HANDLER_MODEL_TYPE =
|
|
24
|
+
CORE_MODEL_TYPES.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
25
|
+
const ACTION_PLANNER_MODEL_TYPE =
|
|
26
|
+
CORE_MODEL_TYPES.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
27
|
+
function buildPromptParts(prompt, attachments) {
|
|
28
|
+
const parts = [{ text: prompt }];
|
|
29
|
+
for (const attachment of attachments ?? []) {
|
|
30
|
+
if (attachment.data instanceof URL) {
|
|
31
|
+
parts.push({
|
|
32
|
+
fileData: {
|
|
33
|
+
mimeType: attachment.mediaType,
|
|
34
|
+
fileUri: attachment.data.toString(),
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
continue;
|
|
38
|
+
}
|
|
39
|
+
if (
|
|
40
|
+
typeof attachment.data === "string" &&
|
|
41
|
+
/^https?:\/\//i.test(attachment.data)
|
|
42
|
+
) {
|
|
43
|
+
parts.push({
|
|
44
|
+
fileData: {
|
|
45
|
+
mimeType: attachment.mediaType,
|
|
46
|
+
fileUri: attachment.data,
|
|
47
|
+
},
|
|
48
|
+
});
|
|
49
|
+
continue;
|
|
50
|
+
}
|
|
51
|
+
if (typeof attachment.data === "string") {
|
|
52
|
+
const dataUrlMatch = attachment.data.match(
|
|
53
|
+
/^data:([^;,]+);base64,(.+)$/i,
|
|
54
|
+
);
|
|
55
|
+
parts.push({
|
|
56
|
+
inlineData: {
|
|
57
|
+
mimeType: dataUrlMatch?.[1] ?? attachment.mediaType,
|
|
58
|
+
data: dataUrlMatch?.[2] ?? attachment.data,
|
|
59
|
+
},
|
|
60
|
+
});
|
|
61
|
+
continue;
|
|
62
|
+
}
|
|
63
|
+
parts.push({
|
|
64
|
+
inlineData: {
|
|
65
|
+
mimeType: attachment.mediaType,
|
|
66
|
+
data: Buffer.from(attachment.data).toString("base64"),
|
|
67
|
+
},
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
return parts;
|
|
71
|
+
}
|
|
72
|
+
function getModelNameForType(runtime, modelType) {
|
|
73
|
+
switch (modelType) {
|
|
74
|
+
case TEXT_NANO_MODEL_TYPE:
|
|
75
|
+
return getNanoModel(runtime);
|
|
76
|
+
case TEXT_MEDIUM_MODEL_TYPE:
|
|
77
|
+
return getMediumModel(runtime);
|
|
78
|
+
case TEXT_SMALL_MODEL_TYPE:
|
|
79
|
+
return getSmallModel(runtime);
|
|
80
|
+
case TEXT_LARGE_MODEL_TYPE:
|
|
81
|
+
return getLargeModel(runtime);
|
|
82
|
+
case TEXT_MEGA_MODEL_TYPE:
|
|
83
|
+
return getMegaModel(runtime);
|
|
84
|
+
case RESPONSE_HANDLER_MODEL_TYPE:
|
|
85
|
+
return getResponseHandlerModel(runtime);
|
|
86
|
+
case ACTION_PLANNER_MODEL_TYPE:
|
|
87
|
+
return getActionPlannerModel(runtime);
|
|
88
|
+
default:
|
|
89
|
+
return getLargeModel(runtime);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
export async function handleTextSmall(
|
|
93
|
+
runtime,
|
|
94
|
+
{
|
|
95
|
+
prompt,
|
|
96
|
+
stopSequences = [],
|
|
97
|
+
maxTokens = 8192,
|
|
98
|
+
temperature = 0.7,
|
|
99
|
+
attachments,
|
|
100
|
+
},
|
|
101
|
+
) {
|
|
102
|
+
const genAI = createGoogleGenAI(runtime);
|
|
103
|
+
if (!genAI) {
|
|
104
|
+
throw new Error("Google Generative AI client not initialized");
|
|
105
|
+
}
|
|
106
|
+
const modelName = getModelNameForType(runtime, TEXT_SMALL_MODEL_TYPE);
|
|
107
|
+
logger.log(`[TEXT_SMALL] Using model: ${modelName}`);
|
|
108
|
+
try {
|
|
109
|
+
const systemInstruction = runtime.character.system || undefined;
|
|
110
|
+
const response = await genAI.models.generateContent({
|
|
111
|
+
model: modelName,
|
|
112
|
+
contents:
|
|
113
|
+
(attachments?.length ?? 0) > 0
|
|
114
|
+
? [{ role: "user", parts: buildPromptParts(prompt, attachments) }]
|
|
115
|
+
: prompt,
|
|
116
|
+
config: {
|
|
117
|
+
temperature,
|
|
118
|
+
topK: 40,
|
|
119
|
+
topP: 0.95,
|
|
120
|
+
maxOutputTokens: maxTokens,
|
|
121
|
+
stopSequences,
|
|
122
|
+
safetySettings: getSafetySettings(),
|
|
123
|
+
...(systemInstruction && { systemInstruction }),
|
|
124
|
+
},
|
|
125
|
+
});
|
|
126
|
+
const text = response.text || "";
|
|
127
|
+
const promptTokens = await countTokens(prompt);
|
|
128
|
+
const completionTokens = await countTokens(text);
|
|
129
|
+
emitModelUsageEvent(runtime, TEXT_SMALL_MODEL_TYPE, prompt, {
|
|
130
|
+
promptTokens,
|
|
131
|
+
completionTokens,
|
|
132
|
+
totalTokens: promptTokens + completionTokens,
|
|
133
|
+
});
|
|
134
|
+
return text;
|
|
135
|
+
} catch (error) {
|
|
136
|
+
logger.error(
|
|
137
|
+
`[TEXT_SMALL] Error: ${error instanceof Error ? error.message : String(error)}`,
|
|
138
|
+
);
|
|
139
|
+
throw error;
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
export async function handleTextLarge(
|
|
143
|
+
runtime,
|
|
144
|
+
{
|
|
145
|
+
prompt,
|
|
146
|
+
stopSequences = [],
|
|
147
|
+
maxTokens = 8192,
|
|
148
|
+
temperature = 0.7,
|
|
149
|
+
attachments,
|
|
150
|
+
},
|
|
151
|
+
) {
|
|
152
|
+
const genAI = createGoogleGenAI(runtime);
|
|
153
|
+
if (!genAI) {
|
|
154
|
+
throw new Error("Google Generative AI client not initialized");
|
|
155
|
+
}
|
|
156
|
+
const modelName = getModelNameForType(runtime, TEXT_LARGE_MODEL_TYPE);
|
|
157
|
+
logger.log(`[TEXT_LARGE] Using model: ${modelName}`);
|
|
158
|
+
try {
|
|
159
|
+
const systemInstruction = runtime.character.system || undefined;
|
|
160
|
+
const response = await genAI.models.generateContent({
|
|
161
|
+
model: modelName,
|
|
162
|
+
contents:
|
|
163
|
+
(attachments?.length ?? 0) > 0
|
|
164
|
+
? [{ role: "user", parts: buildPromptParts(prompt, attachments) }]
|
|
165
|
+
: prompt,
|
|
166
|
+
config: {
|
|
167
|
+
temperature,
|
|
168
|
+
topK: 40,
|
|
169
|
+
topP: 0.95,
|
|
170
|
+
maxOutputTokens: maxTokens,
|
|
171
|
+
stopSequences,
|
|
172
|
+
safetySettings: getSafetySettings(),
|
|
173
|
+
...(systemInstruction && { systemInstruction }),
|
|
174
|
+
},
|
|
175
|
+
});
|
|
176
|
+
const text = response.text || "";
|
|
177
|
+
const promptTokens = await countTokens(prompt);
|
|
178
|
+
const completionTokens = await countTokens(text);
|
|
179
|
+
emitModelUsageEvent(runtime, TEXT_LARGE_MODEL_TYPE, prompt, {
|
|
180
|
+
promptTokens,
|
|
181
|
+
completionTokens,
|
|
182
|
+
totalTokens: promptTokens + completionTokens,
|
|
183
|
+
});
|
|
184
|
+
return text;
|
|
185
|
+
} catch (error) {
|
|
186
|
+
logger.error(
|
|
187
|
+
`[TEXT_LARGE] Error: ${error instanceof Error ? error.message : String(error)}`,
|
|
188
|
+
);
|
|
189
|
+
throw error;
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
export async function handleTextNano(runtime, params) {
|
|
193
|
+
return handleTextWithType(runtime, TEXT_NANO_MODEL_TYPE, params);
|
|
194
|
+
}
|
|
195
|
+
export async function handleTextMedium(runtime, params) {
|
|
196
|
+
return handleTextWithType(runtime, TEXT_MEDIUM_MODEL_TYPE, params);
|
|
197
|
+
}
|
|
198
|
+
export async function handleTextMega(runtime, params) {
|
|
199
|
+
return handleTextWithType(runtime, TEXT_MEGA_MODEL_TYPE, params);
|
|
200
|
+
}
|
|
201
|
+
export async function handleResponseHandler(runtime, params) {
|
|
202
|
+
return handleTextWithType(runtime, RESPONSE_HANDLER_MODEL_TYPE, params);
|
|
203
|
+
}
|
|
204
|
+
export async function handleActionPlanner(runtime, params) {
|
|
205
|
+
return handleTextWithType(runtime, ACTION_PLANNER_MODEL_TYPE, params);
|
|
206
|
+
}
|
|
207
|
+
async function handleTextWithType(
|
|
208
|
+
runtime,
|
|
209
|
+
modelType,
|
|
210
|
+
{
|
|
211
|
+
prompt,
|
|
212
|
+
stopSequences = [],
|
|
213
|
+
maxTokens = 8192,
|
|
214
|
+
temperature = 0.7,
|
|
215
|
+
attachments,
|
|
216
|
+
},
|
|
217
|
+
) {
|
|
218
|
+
const genAI = createGoogleGenAI(runtime);
|
|
219
|
+
if (!genAI) {
|
|
220
|
+
throw new Error("Google Generative AI client not initialized");
|
|
221
|
+
}
|
|
222
|
+
const modelName = getModelNameForType(runtime, modelType);
|
|
223
|
+
logger.log(`[${modelType}] Using model: ${modelName}`);
|
|
224
|
+
try {
|
|
225
|
+
const systemInstruction = runtime.character.system || undefined;
|
|
226
|
+
const response = await genAI.models.generateContent({
|
|
227
|
+
model: modelName,
|
|
228
|
+
contents:
|
|
229
|
+
(attachments?.length ?? 0) > 0
|
|
230
|
+
? [{ role: "user", parts: buildPromptParts(prompt, attachments) }]
|
|
231
|
+
: prompt,
|
|
232
|
+
config: {
|
|
233
|
+
temperature,
|
|
234
|
+
topK: 40,
|
|
235
|
+
topP: 0.95,
|
|
236
|
+
maxOutputTokens: maxTokens,
|
|
237
|
+
stopSequences,
|
|
238
|
+
safetySettings: getSafetySettings(),
|
|
239
|
+
...(systemInstruction && { systemInstruction }),
|
|
240
|
+
},
|
|
241
|
+
});
|
|
242
|
+
const text = response.text || "";
|
|
243
|
+
const promptTokens = await countTokens(prompt);
|
|
244
|
+
const completionTokens = await countTokens(text);
|
|
245
|
+
emitModelUsageEvent(runtime, modelType, prompt, {
|
|
246
|
+
promptTokens,
|
|
247
|
+
completionTokens,
|
|
248
|
+
totalTokens: promptTokens + completionTokens,
|
|
249
|
+
});
|
|
250
|
+
return text;
|
|
251
|
+
} catch (error) {
|
|
252
|
+
logger.error(
|
|
253
|
+
`[${modelType}] Error: ${error instanceof Error ? error.message : String(error)}`,
|
|
254
|
+
);
|
|
255
|
+
throw error;
|
|
256
|
+
}
|
|
257
|
+
}
|