@elizaos/plugin-elizacloud 1.5.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +189 -0
- package/dist/browser/index.browser.js +4 -0
- package/dist/browser/index.browser.js.map +23 -0
- package/dist/browser/index.d.ts +2 -0
- package/dist/cjs/index.d.ts +2 -0
- package/dist/cjs/index.node.cjs +796 -0
- package/dist/cjs/index.node.js.map +20 -0
- package/dist/index.browser.d.ts +2 -0
- package/dist/index.d.ts +29 -0
- package/dist/index.node.d.ts +2 -0
- package/dist/init.d.ts +5 -0
- package/dist/models/embeddings.d.ts +5 -0
- package/dist/models/image.d.ts +17 -0
- package/dist/models/index.d.ts +7 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/speech.d.ts +12 -0
- package/dist/models/text.d.ts +9 -0
- package/dist/models/tokenization.d.ts +9 -0
- package/dist/models/transcription.d.ts +6 -0
- package/dist/node/index.d.ts +2 -0
- package/dist/node/index.node.js +772 -0
- package/dist/node/index.node.js.map +20 -0
- package/dist/providers/openai.d.ts +8 -0
- package/dist/types/index.d.ts +32 -0
- package/dist/utils/config.d.ts +73 -0
- package/dist/utils/events.d.ts +10 -0
- package/dist/utils/helpers.d.ts +24 -0
- package/package.json +196 -0
|
@@ -0,0 +1,772 @@
|
|
|
1
|
+
import { createRequire } from "node:module";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
4
|
+
var __defProp = Object.defineProperty;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
+
var __toESM = (mod, isNodeMode, target) => {
|
|
8
|
+
target = mod != null ? __create(__getProtoOf(mod)) : {};
|
|
9
|
+
const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
|
|
10
|
+
for (let key of __getOwnPropNames(mod))
|
|
11
|
+
if (!__hasOwnProp.call(to, key))
|
|
12
|
+
__defProp(to, key, {
|
|
13
|
+
get: () => mod[key],
|
|
14
|
+
enumerable: true
|
|
15
|
+
});
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
import { logger as logger9, ModelType as ModelType5 } from "@elizaos/core";
|
|
22
|
+
|
|
23
|
+
// src/init.ts
|
|
24
|
+
import { logger as logger2 } from "@elizaos/core";
|
|
25
|
+
|
|
26
|
+
// src/utils/config.ts
|
|
27
|
+
import { logger } from "@elizaos/core";
|
|
28
|
+
function getSetting(runtime, key, defaultValue) {
|
|
29
|
+
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
|
|
30
|
+
}
|
|
31
|
+
function isBrowser() {
|
|
32
|
+
return typeof globalThis !== "undefined" && typeof globalThis.document !== "undefined";
|
|
33
|
+
}
|
|
34
|
+
function isProxyMode(runtime) {
|
|
35
|
+
return isBrowser() && !!getSetting(runtime, "ELIZAOS_CLOUD_BROWSER_BASE_URL");
|
|
36
|
+
}
|
|
37
|
+
function getAuthHeader(runtime, forEmbedding = false) {
|
|
38
|
+
if (isBrowser())
|
|
39
|
+
return {};
|
|
40
|
+
const key = forEmbedding ? getEmbeddingApiKey(runtime) : getApiKey(runtime);
|
|
41
|
+
return key ? { Authorization: `Bearer ${key}` } : {};
|
|
42
|
+
}
|
|
43
|
+
function getBaseURL(runtime) {
|
|
44
|
+
const browserURL = getSetting(runtime, "ELIZAOS_CLOUD_BROWSER_BASE_URL");
|
|
45
|
+
const baseURL = isBrowser() && browserURL ? browserURL : getSetting(runtime, "ELIZAOS_CLOUD_BASE_URL", "https://www.elizacloud.ai/api/v1");
|
|
46
|
+
logger.debug(`[ELIZAOS_CLOUD] Default base URL: ${baseURL}`);
|
|
47
|
+
return baseURL;
|
|
48
|
+
}
|
|
49
|
+
function getEmbeddingBaseURL(runtime) {
|
|
50
|
+
const embeddingURL = isBrowser() ? getSetting(runtime, "ELIZAOS_CLOUD_BROWSER_EMBEDDING_URL") || getSetting(runtime, "ELIZAOS_CLOUD_BROWSER_BASE_URL") : getSetting(runtime, "ELIZAOS_CLOUD_EMBEDDING_URL");
|
|
51
|
+
if (embeddingURL) {
|
|
52
|
+
logger.debug(`[ELIZAOS_CLOUD] Using specific embedding base URL: ${embeddingURL}`);
|
|
53
|
+
return embeddingURL;
|
|
54
|
+
}
|
|
55
|
+
logger.debug("[ELIZAOS_CLOUD] Falling back to general base URL for embeddings.");
|
|
56
|
+
return getBaseURL(runtime);
|
|
57
|
+
}
|
|
58
|
+
function getApiKey(runtime) {
|
|
59
|
+
return getSetting(runtime, "ELIZAOS_CLOUD_API_KEY");
|
|
60
|
+
}
|
|
61
|
+
function getEmbeddingApiKey(runtime) {
|
|
62
|
+
const embeddingApiKey = getSetting(runtime, "ELIZAOS_CLOUD_EMBEDDING_API_KEY");
|
|
63
|
+
if (embeddingApiKey) {
|
|
64
|
+
logger.debug("[ELIZAOS_CLOUD] Using specific embedding API key (present)");
|
|
65
|
+
return embeddingApiKey;
|
|
66
|
+
}
|
|
67
|
+
logger.debug("[ELIZAOS_CLOUD] Falling back to general API key for embeddings.");
|
|
68
|
+
return getApiKey(runtime);
|
|
69
|
+
}
|
|
70
|
+
function getSmallModel(runtime) {
|
|
71
|
+
return getSetting(runtime, "ELIZAOS_CLOUD_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
|
|
72
|
+
}
|
|
73
|
+
function getLargeModel(runtime) {
|
|
74
|
+
return getSetting(runtime, "ELIZAOS_CLOUD_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
|
|
75
|
+
}
|
|
76
|
+
function getImageDescriptionModel(runtime) {
|
|
77
|
+
return getSetting(runtime, "ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MODEL", "gpt-4o-mini") ?? "gpt-4o-mini";
|
|
78
|
+
}
|
|
79
|
+
function getExperimentalTelemetry(runtime) {
|
|
80
|
+
const setting = getSetting(runtime, "ELIZAOS_CLOUD_EXPERIMENTAL_TELEMETRY", "false");
|
|
81
|
+
const normalizedSetting = String(setting).toLowerCase();
|
|
82
|
+
const result = normalizedSetting === "true";
|
|
83
|
+
logger.debug(`[ELIZAOS_CLOUD] Experimental telemetry in function: "${setting}" (type: ${typeof setting}, normalized: "${normalizedSetting}", result: ${result})`);
|
|
84
|
+
return result;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// src/init.ts
|
|
88
|
+
function initializeOpenAI(_config, runtime) {
|
|
89
|
+
new Promise(async (resolve) => {
|
|
90
|
+
resolve();
|
|
91
|
+
try {
|
|
92
|
+
if (!getApiKey(runtime) && !isBrowser()) {
|
|
93
|
+
logger2.warn("ELIZAOS_CLOUD_API_KEY is not set in environment - ElizaOS Cloud functionality will be limited");
|
|
94
|
+
logger2.info("Get your API key from https://www.elizacloud.ai/dashboard/api-keys");
|
|
95
|
+
return;
|
|
96
|
+
}
|
|
97
|
+
try {
|
|
98
|
+
const baseURL = getBaseURL(runtime);
|
|
99
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
100
|
+
headers: { ...getAuthHeader(runtime) }
|
|
101
|
+
});
|
|
102
|
+
if (!response.ok) {
|
|
103
|
+
logger2.warn(`ElizaOS Cloud API key validation failed: ${response.statusText}`);
|
|
104
|
+
logger2.warn("ElizaOS Cloud functionality will be limited until a valid API key is provided");
|
|
105
|
+
logger2.info("Get your API key from https://www.elizacloud.ai/dashboard/api-keys");
|
|
106
|
+
} else {
|
|
107
|
+
logger2.log("ElizaOS Cloud API key validated successfully");
|
|
108
|
+
}
|
|
109
|
+
} catch (fetchError) {
|
|
110
|
+
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
111
|
+
logger2.warn(`Error validating ElizaOS Cloud API key: ${message}`);
|
|
112
|
+
logger2.warn("ElizaOS Cloud functionality will be limited until a valid API key is provided");
|
|
113
|
+
}
|
|
114
|
+
} catch (error) {
|
|
115
|
+
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
116
|
+
logger2.warn(`ElizaOS Cloud plugin configuration issue: ${message} - You need to configure the ELIZAOS_CLOUD_API_KEY in your environment variables`);
|
|
117
|
+
logger2.info("Get your API key from https://www.elizacloud.ai/dashboard/api-keys");
|
|
118
|
+
}
|
|
119
|
+
});
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// src/models/text.ts
|
|
123
|
+
import { logger as logger3, ModelType } from "@elizaos/core";
|
|
124
|
+
import { generateText } from "ai";
|
|
125
|
+
|
|
126
|
+
// src/providers/openai.ts
|
|
127
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
128
|
+
function createOpenAIClient(runtime) {
|
|
129
|
+
const baseURL = getBaseURL(runtime);
|
|
130
|
+
const apiKey = getApiKey(runtime) ?? (isProxyMode(runtime) ? "eliza-proxy" : undefined);
|
|
131
|
+
return createOpenAI({ apiKey: apiKey ?? "", baseURL });
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// src/utils/events.ts
|
|
135
|
+
import {
|
|
136
|
+
EventType
|
|
137
|
+
} from "@elizaos/core";
|
|
138
|
+
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
139
|
+
runtime.emitEvent(EventType.MODEL_USED, {
|
|
140
|
+
provider: "openai",
|
|
141
|
+
type,
|
|
142
|
+
prompt,
|
|
143
|
+
tokens: {
|
|
144
|
+
prompt: usage.inputTokens,
|
|
145
|
+
completion: usage.outputTokens,
|
|
146
|
+
total: usage.totalTokens
|
|
147
|
+
}
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// src/models/text.ts
|
|
152
|
+
async function handleTextSmall(runtime, {
|
|
153
|
+
prompt,
|
|
154
|
+
stopSequences = [],
|
|
155
|
+
maxTokens = 8192,
|
|
156
|
+
temperature = 0.7,
|
|
157
|
+
frequencyPenalty = 0.7,
|
|
158
|
+
presencePenalty = 0.7
|
|
159
|
+
}) {
|
|
160
|
+
const openai = createOpenAIClient(runtime);
|
|
161
|
+
const modelName = getSmallModel(runtime);
|
|
162
|
+
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
163
|
+
logger3.log(`[ELIZAOS_CLOUD] Using TEXT_SMALL model: ${modelName}`);
|
|
164
|
+
logger3.log(prompt);
|
|
165
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
166
|
+
model: openai.languageModel(modelName),
|
|
167
|
+
prompt,
|
|
168
|
+
system: runtime.character.system ?? undefined,
|
|
169
|
+
temperature,
|
|
170
|
+
maxOutputTokens: maxTokens,
|
|
171
|
+
frequencyPenalty,
|
|
172
|
+
presencePenalty,
|
|
173
|
+
stopSequences,
|
|
174
|
+
experimental_telemetry: {
|
|
175
|
+
isEnabled: experimentalTelemetry
|
|
176
|
+
}
|
|
177
|
+
});
|
|
178
|
+
if (usage) {
|
|
179
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
|
|
180
|
+
}
|
|
181
|
+
return openaiResponse;
|
|
182
|
+
}
|
|
183
|
+
async function handleTextLarge(runtime, {
|
|
184
|
+
prompt,
|
|
185
|
+
stopSequences = [],
|
|
186
|
+
maxTokens = 8192,
|
|
187
|
+
temperature = 0.7,
|
|
188
|
+
frequencyPenalty = 0.7,
|
|
189
|
+
presencePenalty = 0.7
|
|
190
|
+
}) {
|
|
191
|
+
const openai = createOpenAIClient(runtime);
|
|
192
|
+
const modelName = getLargeModel(runtime);
|
|
193
|
+
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
194
|
+
logger3.log(`[ELIZAOS_CLOUD] Using TEXT_LARGE model: ${modelName}`);
|
|
195
|
+
logger3.log(prompt);
|
|
196
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
197
|
+
model: openai.languageModel(modelName),
|
|
198
|
+
prompt,
|
|
199
|
+
system: runtime.character.system ?? undefined,
|
|
200
|
+
temperature,
|
|
201
|
+
maxOutputTokens: maxTokens,
|
|
202
|
+
frequencyPenalty,
|
|
203
|
+
presencePenalty,
|
|
204
|
+
stopSequences,
|
|
205
|
+
experimental_telemetry: {
|
|
206
|
+
isEnabled: experimentalTelemetry
|
|
207
|
+
}
|
|
208
|
+
});
|
|
209
|
+
if (usage) {
|
|
210
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
|
|
211
|
+
}
|
|
212
|
+
return openaiResponse;
|
|
213
|
+
}
|
|
214
|
+
// src/models/object.ts
|
|
215
|
+
import { logger as logger5, ModelType as ModelType2 } from "@elizaos/core";
|
|
216
|
+
import { generateObject, JSONParseError as JSONParseError2 } from "ai";
|
|
217
|
+
|
|
218
|
+
// src/utils/helpers.ts
|
|
219
|
+
import { logger as logger4 } from "@elizaos/core";
|
|
220
|
+
import { JSONParseError } from "ai";
|
|
221
|
+
function getJsonRepairFunction() {
|
|
222
|
+
return async ({ text, error }) => {
|
|
223
|
+
try {
|
|
224
|
+
if (error instanceof JSONParseError) {
|
|
225
|
+
const cleanedText = text.replace(/```json\n|\n```|```/g, "");
|
|
226
|
+
JSON.parse(cleanedText);
|
|
227
|
+
return cleanedText;
|
|
228
|
+
}
|
|
229
|
+
return null;
|
|
230
|
+
} catch (jsonError) {
|
|
231
|
+
const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
|
|
232
|
+
logger4.warn(`Failed to repair JSON text: ${message}`);
|
|
233
|
+
return null;
|
|
234
|
+
}
|
|
235
|
+
};
|
|
236
|
+
}
|
|
237
|
+
async function webStreamToNodeStream(webStream) {
|
|
238
|
+
try {
|
|
239
|
+
const { Readable } = await import("node:stream");
|
|
240
|
+
const reader = webStream.getReader();
|
|
241
|
+
return new Readable({
|
|
242
|
+
async read() {
|
|
243
|
+
try {
|
|
244
|
+
const { done, value } = await reader.read();
|
|
245
|
+
if (done) {
|
|
246
|
+
this.push(null);
|
|
247
|
+
} else {
|
|
248
|
+
this.push(value);
|
|
249
|
+
}
|
|
250
|
+
} catch (error) {
|
|
251
|
+
this.destroy(error);
|
|
252
|
+
}
|
|
253
|
+
},
|
|
254
|
+
destroy(error, callback) {
|
|
255
|
+
reader.cancel().finally(() => callback(error));
|
|
256
|
+
}
|
|
257
|
+
});
|
|
258
|
+
} catch (error) {
|
|
259
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
260
|
+
logger4.error(`Failed to load node:stream module: ${message}`);
|
|
261
|
+
throw new Error(`Cannot convert stream: node:stream module unavailable. This feature requires a Node.js environment.`);
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
function parseImageDescriptionResponse(responseText) {
|
|
265
|
+
const titleMatch = responseText.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
266
|
+
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
267
|
+
const description = responseText.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
268
|
+
return { title, description };
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// src/models/object.ts
|
|
272
|
+
async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
|
|
273
|
+
const openai = createOpenAIClient(runtime);
|
|
274
|
+
const modelName = getModelFn(runtime);
|
|
275
|
+
logger5.log(`[ELIZAOS_CLOUD] Using ${modelType} model: ${modelName}`);
|
|
276
|
+
const temperature = params.temperature ?? 0;
|
|
277
|
+
const schemaPresent = !!params.schema;
|
|
278
|
+
if (schemaPresent) {
|
|
279
|
+
logger5.info(`Using ${modelType} without schema validation (schema provided but output=no-schema)`);
|
|
280
|
+
}
|
|
281
|
+
try {
|
|
282
|
+
const { object, usage } = await generateObject({
|
|
283
|
+
model: openai.languageModel(modelName),
|
|
284
|
+
output: "no-schema",
|
|
285
|
+
prompt: params.prompt,
|
|
286
|
+
temperature,
|
|
287
|
+
experimental_repairText: getJsonRepairFunction()
|
|
288
|
+
});
|
|
289
|
+
if (usage) {
|
|
290
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
291
|
+
}
|
|
292
|
+
return object;
|
|
293
|
+
} catch (error) {
|
|
294
|
+
if (error instanceof JSONParseError2) {
|
|
295
|
+
logger5.error(`[generateObject] Failed to parse JSON: ${error.message}`);
|
|
296
|
+
const repairFunction = getJsonRepairFunction();
|
|
297
|
+
const repairedJsonString = await repairFunction({
|
|
298
|
+
text: error.text,
|
|
299
|
+
error
|
|
300
|
+
});
|
|
301
|
+
if (repairedJsonString) {
|
|
302
|
+
try {
|
|
303
|
+
const repairedObject = JSON.parse(repairedJsonString);
|
|
304
|
+
logger5.info("[generateObject] Successfully repaired JSON.");
|
|
305
|
+
return repairedObject;
|
|
306
|
+
} catch (repairParseError) {
|
|
307
|
+
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
308
|
+
logger5.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
309
|
+
throw repairParseError;
|
|
310
|
+
}
|
|
311
|
+
} else {
|
|
312
|
+
logger5.error("[generateObject] JSON repair failed.");
|
|
313
|
+
throw error;
|
|
314
|
+
}
|
|
315
|
+
} else {
|
|
316
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
317
|
+
logger5.error(`[generateObject] Unknown error: ${message}`);
|
|
318
|
+
throw error;
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
async function handleObjectSmall(runtime, params) {
|
|
323
|
+
return generateObjectByModelType(runtime, params, ModelType2.OBJECT_SMALL, getSmallModel);
|
|
324
|
+
}
|
|
325
|
+
async function handleObjectLarge(runtime, params) {
|
|
326
|
+
return generateObjectByModelType(runtime, params, ModelType2.OBJECT_LARGE, getLargeModel);
|
|
327
|
+
}
|
|
328
|
+
// src/models/embeddings.ts
|
|
329
|
+
import { logger as logger6, ModelType as ModelType3, VECTOR_DIMS } from "@elizaos/core";
|
|
330
|
+
async function handleTextEmbedding(runtime, params) {
|
|
331
|
+
const embeddingModelName = getSetting(runtime, "ELIZAOS_CLOUD_EMBEDDING_MODEL", "text-embedding-3-small");
|
|
332
|
+
const embeddingDimension = Number.parseInt(getSetting(runtime, "ELIZAOS_CLOUD_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
333
|
+
if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
|
|
334
|
+
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
|
|
335
|
+
logger6.error(errorMsg);
|
|
336
|
+
throw new Error(errorMsg);
|
|
337
|
+
}
|
|
338
|
+
if (params === null) {
|
|
339
|
+
logger6.debug("Creating test embedding for initialization");
|
|
340
|
+
const testVector = Array(embeddingDimension).fill(0);
|
|
341
|
+
testVector[0] = 0.1;
|
|
342
|
+
return testVector;
|
|
343
|
+
}
|
|
344
|
+
let text;
|
|
345
|
+
if (typeof params === "string") {
|
|
346
|
+
text = params;
|
|
347
|
+
} else if (typeof params === "object" && params.text) {
|
|
348
|
+
text = params.text;
|
|
349
|
+
} else {
|
|
350
|
+
logger6.warn("Invalid input format for embedding");
|
|
351
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
352
|
+
fallbackVector[0] = 0.2;
|
|
353
|
+
return fallbackVector;
|
|
354
|
+
}
|
|
355
|
+
if (!text.trim()) {
|
|
356
|
+
logger6.warn("Empty text for embedding");
|
|
357
|
+
const emptyVector = Array(embeddingDimension).fill(0);
|
|
358
|
+
emptyVector[0] = 0.3;
|
|
359
|
+
return emptyVector;
|
|
360
|
+
}
|
|
361
|
+
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
362
|
+
try {
|
|
363
|
+
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
364
|
+
method: "POST",
|
|
365
|
+
headers: {
|
|
366
|
+
...getAuthHeader(runtime, true),
|
|
367
|
+
"Content-Type": "application/json"
|
|
368
|
+
},
|
|
369
|
+
body: JSON.stringify({
|
|
370
|
+
model: embeddingModelName,
|
|
371
|
+
input: text
|
|
372
|
+
})
|
|
373
|
+
});
|
|
374
|
+
if (!response.ok) {
|
|
375
|
+
logger6.error(`ElizaOS Cloud API error: ${response.status} - ${response.statusText}`);
|
|
376
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
377
|
+
errorVector[0] = 0.4;
|
|
378
|
+
return errorVector;
|
|
379
|
+
}
|
|
380
|
+
const data = await response.json();
|
|
381
|
+
if (!data?.data?.[0]?.embedding) {
|
|
382
|
+
logger6.error("API returned invalid structure");
|
|
383
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
384
|
+
errorVector[0] = 0.5;
|
|
385
|
+
return errorVector;
|
|
386
|
+
}
|
|
387
|
+
const embedding = data.data[0].embedding;
|
|
388
|
+
if (data.usage) {
|
|
389
|
+
const usage = {
|
|
390
|
+
inputTokens: data.usage.prompt_tokens,
|
|
391
|
+
outputTokens: 0,
|
|
392
|
+
totalTokens: data.usage.total_tokens
|
|
393
|
+
};
|
|
394
|
+
emitModelUsageEvent(runtime, ModelType3.TEXT_EMBEDDING, text, usage);
|
|
395
|
+
}
|
|
396
|
+
logger6.log(`Got valid embedding with length ${embedding.length}`);
|
|
397
|
+
return embedding;
|
|
398
|
+
} catch (error) {
|
|
399
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
400
|
+
logger6.error(`Error generating embedding: ${message}`);
|
|
401
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
402
|
+
errorVector[0] = 0.6;
|
|
403
|
+
return errorVector;
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
// src/models/image.ts
|
|
407
|
+
import { logger as logger7, ModelType as ModelType4 } from "@elizaos/core";
|
|
408
|
+
async function handleImageGeneration(runtime, params) {
|
|
409
|
+
const numImages = params.n || 1;
|
|
410
|
+
const size = params.size || "1024x1024";
|
|
411
|
+
const prompt = params.prompt;
|
|
412
|
+
const modelName = "google/gemini-2.5-flash-image-preview";
|
|
413
|
+
logger7.log(`[ELIZAOS_CLOUD] Using IMAGE model: ${modelName}`);
|
|
414
|
+
const baseURL = getBaseURL(runtime);
|
|
415
|
+
const aspectRatioMap = {
|
|
416
|
+
"1024x1024": "1:1",
|
|
417
|
+
"1792x1024": "16:9",
|
|
418
|
+
"1024x1792": "9:16"
|
|
419
|
+
};
|
|
420
|
+
const aspectRatio = aspectRatioMap[size] || "1:1";
|
|
421
|
+
try {
|
|
422
|
+
const response = await fetch(`${baseURL}/generate-image`, {
|
|
423
|
+
method: "POST",
|
|
424
|
+
headers: {
|
|
425
|
+
...getAuthHeader(runtime),
|
|
426
|
+
"Content-Type": "application/json"
|
|
427
|
+
},
|
|
428
|
+
body: JSON.stringify({
|
|
429
|
+
prompt,
|
|
430
|
+
numImages,
|
|
431
|
+
aspectRatio
|
|
432
|
+
})
|
|
433
|
+
});
|
|
434
|
+
if (!response.ok) {
|
|
435
|
+
const errorText = await response.text();
|
|
436
|
+
throw new Error(`Failed to generate image: ${response.status} ${errorText}`);
|
|
437
|
+
}
|
|
438
|
+
const data = await response.json();
|
|
439
|
+
const typedData = data;
|
|
440
|
+
return typedData.images.map((img) => ({
|
|
441
|
+
url: img.url || img.image
|
|
442
|
+
}));
|
|
443
|
+
} catch (error) {
|
|
444
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
445
|
+
logger7.error(`[ELIZAOS_CLOUD] Image generation error: ${message}`);
|
|
446
|
+
throw error;
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
async function handleImageDescription(runtime, params) {
|
|
450
|
+
let imageUrl;
|
|
451
|
+
let promptText;
|
|
452
|
+
const modelName = getImageDescriptionModel(runtime);
|
|
453
|
+
logger7.log(`[ELIZAOS_CLOUD] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
454
|
+
const maxTokens = Number.parseInt(getSetting(runtime, "ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
455
|
+
if (typeof params === "string") {
|
|
456
|
+
imageUrl = params;
|
|
457
|
+
promptText = "Please analyze this image and provide a title and detailed description.";
|
|
458
|
+
} else {
|
|
459
|
+
imageUrl = params.imageUrl;
|
|
460
|
+
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
461
|
+
}
|
|
462
|
+
const messages = [
|
|
463
|
+
{
|
|
464
|
+
role: "user",
|
|
465
|
+
content: [
|
|
466
|
+
{ type: "text", text: promptText },
|
|
467
|
+
{ type: "image_url", image_url: { url: imageUrl } }
|
|
468
|
+
]
|
|
469
|
+
}
|
|
470
|
+
];
|
|
471
|
+
const baseURL = getBaseURL(runtime);
|
|
472
|
+
try {
|
|
473
|
+
const requestBody = {
|
|
474
|
+
model: modelName,
|
|
475
|
+
messages,
|
|
476
|
+
max_tokens: maxTokens
|
|
477
|
+
};
|
|
478
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
479
|
+
method: "POST",
|
|
480
|
+
headers: {
|
|
481
|
+
"Content-Type": "application/json",
|
|
482
|
+
...getAuthHeader(runtime)
|
|
483
|
+
},
|
|
484
|
+
body: JSON.stringify(requestBody)
|
|
485
|
+
});
|
|
486
|
+
if (!response.ok) {
|
|
487
|
+
throw new Error(`ElizaOS Cloud API error: ${response.status}`);
|
|
488
|
+
}
|
|
489
|
+
const result = await response.json();
|
|
490
|
+
const typedResult = result;
|
|
491
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
492
|
+
if (typedResult.usage) {
|
|
493
|
+
emitModelUsageEvent(runtime, ModelType4.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
494
|
+
inputTokens: typedResult.usage.prompt_tokens,
|
|
495
|
+
outputTokens: typedResult.usage.completion_tokens,
|
|
496
|
+
totalTokens: typedResult.usage.total_tokens
|
|
497
|
+
});
|
|
498
|
+
}
|
|
499
|
+
if (!content) {
|
|
500
|
+
return {
|
|
501
|
+
title: "Failed to analyze image",
|
|
502
|
+
description: "No response from API"
|
|
503
|
+
};
|
|
504
|
+
}
|
|
505
|
+
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
506
|
+
if (isCustomPrompt) {
|
|
507
|
+
return content;
|
|
508
|
+
}
|
|
509
|
+
const processedResult = parseImageDescriptionResponse(content);
|
|
510
|
+
return processedResult;
|
|
511
|
+
} catch (error) {
|
|
512
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
513
|
+
logger7.error(`Error analyzing image: ${message}`);
|
|
514
|
+
return {
|
|
515
|
+
title: "Failed to analyze image",
|
|
516
|
+
description: `Error: ${message}`
|
|
517
|
+
};
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
// src/models/speech.ts
|
|
521
|
+
import { logger as logger8 } from "@elizaos/core";
|
|
522
|
+
async function fetchTextToSpeech(runtime, options) {
|
|
523
|
+
const defaultModel = getSetting(runtime, "ELIZAOS_CLOUD_TTS_MODEL", "gpt-4o-mini-tts");
|
|
524
|
+
const defaultVoice = getSetting(runtime, "ELIZAOS_CLOUD_TTS_VOICE", "nova");
|
|
525
|
+
const defaultInstructions = getSetting(runtime, "ELIZAOS_CLOUD_TTS_INSTRUCTIONS", "");
|
|
526
|
+
const baseURL = getBaseURL(runtime);
|
|
527
|
+
const model = options.model || defaultModel;
|
|
528
|
+
const voice = options.voice || defaultVoice;
|
|
529
|
+
const instructions = options.instructions ?? defaultInstructions;
|
|
530
|
+
const format = options.format || "mp3";
|
|
531
|
+
try {
|
|
532
|
+
const res = await fetch(`${baseURL}/audio/speech`, {
|
|
533
|
+
method: "POST",
|
|
534
|
+
headers: {
|
|
535
|
+
...getAuthHeader(runtime),
|
|
536
|
+
"Content-Type": "application/json",
|
|
537
|
+
...format === "mp3" ? { Accept: "audio/mpeg" } : {}
|
|
538
|
+
},
|
|
539
|
+
body: JSON.stringify({
|
|
540
|
+
model,
|
|
541
|
+
voice,
|
|
542
|
+
input: options.text,
|
|
543
|
+
format,
|
|
544
|
+
...instructions && { instructions }
|
|
545
|
+
})
|
|
546
|
+
});
|
|
547
|
+
if (!res.ok) {
|
|
548
|
+
const err = await res.text();
|
|
549
|
+
throw new Error(`ElizaOS Cloud TTS error ${res.status}: ${err}`);
|
|
550
|
+
}
|
|
551
|
+
if (!res.body) {
|
|
552
|
+
throw new Error("ElizaOS Cloud TTS response body is null");
|
|
553
|
+
}
|
|
554
|
+
if (!isBrowser()) {
|
|
555
|
+
return await webStreamToNodeStream(res.body);
|
|
556
|
+
}
|
|
557
|
+
return res.body;
|
|
558
|
+
} catch (err) {
|
|
559
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
560
|
+
throw new Error(`Failed to fetch speech from ElizaOS Cloud TTS: ${message}`);
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
// src/index.ts
|
|
564
|
+
var elizaOSCloudPlugin = {
|
|
565
|
+
name: "elizaOSCloud",
|
|
566
|
+
description: "ElizaOS Cloud plugin - Multi-model AI generation with text, image, and video support",
|
|
567
|
+
config: {
|
|
568
|
+
ELIZAOS_CLOUD_API_KEY: process.env.ELIZAOS_CLOUD_API_KEY,
|
|
569
|
+
ELIZAOS_CLOUD_BASE_URL: process.env.ELIZAOS_CLOUD_BASE_URL,
|
|
570
|
+
ELIZAOS_CLOUD_SMALL_MODEL: process.env.ELIZAOS_CLOUD_SMALL_MODEL,
|
|
571
|
+
ELIZAOS_CLOUD_LARGE_MODEL: process.env.ELIZAOS_CLOUD_LARGE_MODEL,
|
|
572
|
+
SMALL_MODEL: process.env.SMALL_MODEL,
|
|
573
|
+
LARGE_MODEL: process.env.LARGE_MODEL,
|
|
574
|
+
ELIZAOS_CLOUD_EMBEDDING_MODEL: process.env.ELIZAOS_CLOUD_EMBEDDING_MODEL,
|
|
575
|
+
ELIZAOS_CLOUD_EMBEDDING_API_KEY: process.env.ELIZAOS_CLOUD_EMBEDDING_API_KEY,
|
|
576
|
+
ELIZAOS_CLOUD_EMBEDDING_URL: process.env.ELIZAOS_CLOUD_EMBEDDING_URL,
|
|
577
|
+
ELIZAOS_CLOUD_EMBEDDING_DIMENSIONS: process.env.ELIZAOS_CLOUD_EMBEDDING_DIMENSIONS,
|
|
578
|
+
ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MODEL: process.env.ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MODEL,
|
|
579
|
+
ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MAX_TOKENS: process.env.ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MAX_TOKENS,
|
|
580
|
+
ELIZAOS_CLOUD_EXPERIMENTAL_TELEMETRY: process.env.ELIZAOS_CLOUD_EXPERIMENTAL_TELEMETRY
|
|
581
|
+
},
|
|
582
|
+
async init(config, runtime) {
|
|
583
|
+
initializeOpenAI(config, runtime);
|
|
584
|
+
},
|
|
585
|
+
models: {
|
|
586
|
+
[ModelType5.TEXT_EMBEDDING]: handleTextEmbedding,
|
|
587
|
+
[ModelType5.TEXT_SMALL]: handleTextSmall,
|
|
588
|
+
[ModelType5.TEXT_LARGE]: handleTextLarge,
|
|
589
|
+
[ModelType5.IMAGE]: handleImageGeneration,
|
|
590
|
+
[ModelType5.IMAGE_DESCRIPTION]: handleImageDescription,
|
|
591
|
+
[ModelType5.OBJECT_SMALL]: handleObjectSmall,
|
|
592
|
+
[ModelType5.OBJECT_LARGE]: handleObjectLarge
|
|
593
|
+
},
|
|
594
|
+
tests: [
|
|
595
|
+
{
|
|
596
|
+
name: "ELIZAOS_CLOUD_plugin_tests",
|
|
597
|
+
tests: [
|
|
598
|
+
{
|
|
599
|
+
name: "ELIZAOS_CLOUD_test_url_and_api_key_validation",
|
|
600
|
+
fn: async (runtime) => {
|
|
601
|
+
const baseURL = getBaseURL(runtime);
|
|
602
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
603
|
+
headers: {
|
|
604
|
+
Authorization: `Bearer ${getApiKey(runtime)}`
|
|
605
|
+
}
|
|
606
|
+
});
|
|
607
|
+
const data = await response.json();
|
|
608
|
+
logger9.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
|
|
609
|
+
if (!response.ok) {
|
|
610
|
+
throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
},
|
|
614
|
+
{
|
|
615
|
+
name: "ELIZAOS_CLOUD_test_text_embedding",
|
|
616
|
+
fn: async (runtime) => {
|
|
617
|
+
try {
|
|
618
|
+
const embedding = await runtime.useModel(ModelType5.TEXT_EMBEDDING, {
|
|
619
|
+
text: "Hello, world!"
|
|
620
|
+
});
|
|
621
|
+
logger9.log({ embedding }, "embedding");
|
|
622
|
+
} catch (error) {
|
|
623
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
624
|
+
logger9.error(`Error in test_text_embedding: ${message}`);
|
|
625
|
+
throw error;
|
|
626
|
+
}
|
|
627
|
+
}
|
|
628
|
+
},
|
|
629
|
+
{
|
|
630
|
+
name: "ELIZAOS_CLOUD_test_text_large",
|
|
631
|
+
fn: async (runtime) => {
|
|
632
|
+
try {
|
|
633
|
+
const text = await runtime.useModel(ModelType5.TEXT_LARGE, {
|
|
634
|
+
prompt: "What is the nature of reality in 10 words?"
|
|
635
|
+
});
|
|
636
|
+
if (text.length === 0) {
|
|
637
|
+
throw new Error("Failed to generate text");
|
|
638
|
+
}
|
|
639
|
+
logger9.log({ text }, "generated with test_text_large");
|
|
640
|
+
} catch (error) {
|
|
641
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
642
|
+
logger9.error(`Error in test_text_large: ${message}`);
|
|
643
|
+
throw error;
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
},
|
|
647
|
+
{
|
|
648
|
+
name: "ELIZAOS_CLOUD_test_text_small",
|
|
649
|
+
fn: async (runtime) => {
|
|
650
|
+
try {
|
|
651
|
+
const text = await runtime.useModel(ModelType5.TEXT_SMALL, {
|
|
652
|
+
prompt: "What is the nature of reality in 10 words?"
|
|
653
|
+
});
|
|
654
|
+
if (text.length === 0) {
|
|
655
|
+
throw new Error("Failed to generate text");
|
|
656
|
+
}
|
|
657
|
+
logger9.log({ text }, "generated with test_text_small");
|
|
658
|
+
} catch (error) {
|
|
659
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
660
|
+
logger9.error(`Error in test_text_small: ${message}`);
|
|
661
|
+
throw error;
|
|
662
|
+
}
|
|
663
|
+
}
|
|
664
|
+
},
|
|
665
|
+
{
|
|
666
|
+
name: "ELIZAOS_CLOUD_test_image_generation",
|
|
667
|
+
fn: async (runtime) => {
|
|
668
|
+
logger9.log("ELIZAOS_CLOUD_test_image_generation");
|
|
669
|
+
try {
|
|
670
|
+
const image = await runtime.useModel(ModelType5.IMAGE, {
|
|
671
|
+
prompt: "A beautiful sunset over a calm ocean",
|
|
672
|
+
n: 1,
|
|
673
|
+
size: "1024x1024"
|
|
674
|
+
});
|
|
675
|
+
logger9.log({ image }, "generated with test_image_generation");
|
|
676
|
+
} catch (error) {
|
|
677
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
678
|
+
logger9.error(`Error in test_image_generation: ${message}`);
|
|
679
|
+
throw error;
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
},
|
|
683
|
+
{
|
|
684
|
+
name: "image-description",
|
|
685
|
+
fn: async (runtime) => {
|
|
686
|
+
try {
|
|
687
|
+
logger9.log("ELIZAOS_CLOUD_test_image_description");
|
|
688
|
+
try {
|
|
689
|
+
const result = await runtime.useModel(ModelType5.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
690
|
+
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
691
|
+
logger9.log({ result }, "Image description");
|
|
692
|
+
} else {
|
|
693
|
+
logger9.error("Invalid image description result format:", result);
|
|
694
|
+
}
|
|
695
|
+
} catch (e) {
|
|
696
|
+
const message = e instanceof Error ? e.message : String(e);
|
|
697
|
+
logger9.error(`Error in image description test: ${message}`);
|
|
698
|
+
}
|
|
699
|
+
} catch (e) {
|
|
700
|
+
const message = e instanceof Error ? e.message : String(e);
|
|
701
|
+
logger9.error(`Error in ELIZAOS_CLOUD_test_image_description: ${message}`);
|
|
702
|
+
}
|
|
703
|
+
}
|
|
704
|
+
},
|
|
705
|
+
{
|
|
706
|
+
name: "ELIZAOS_CLOUD_test_transcription",
|
|
707
|
+
fn: async (runtime) => {
|
|
708
|
+
logger9.log("ELIZAOS_CLOUD_test_transcription");
|
|
709
|
+
try {
|
|
710
|
+
const response = await fetch("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg");
|
|
711
|
+
const arrayBuffer = await response.arrayBuffer();
|
|
712
|
+
const transcription = await runtime.useModel(ModelType5.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
|
|
713
|
+
logger9.log({ transcription }, "generated with test_transcription");
|
|
714
|
+
} catch (error) {
|
|
715
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
716
|
+
logger9.error(`Error in test_transcription: ${message}`);
|
|
717
|
+
throw error;
|
|
718
|
+
}
|
|
719
|
+
}
|
|
720
|
+
},
|
|
721
|
+
{
|
|
722
|
+
name: "ELIZAOS_CLOUD_test_text_tokenizer_encode",
|
|
723
|
+
fn: async (runtime) => {
|
|
724
|
+
const prompt = "Hello tokenizer encode!";
|
|
725
|
+
const tokens = await runtime.useModel(ModelType5.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
726
|
+
if (!Array.isArray(tokens) || tokens.length === 0) {
|
|
727
|
+
throw new Error("Failed to tokenize text: expected non-empty array of tokens");
|
|
728
|
+
}
|
|
729
|
+
logger9.log({ tokens }, "Tokenized output");
|
|
730
|
+
}
|
|
731
|
+
},
|
|
732
|
+
{
|
|
733
|
+
name: "ELIZAOS_CLOUD_test_text_tokenizer_decode",
|
|
734
|
+
fn: async (runtime) => {
|
|
735
|
+
const prompt = "Hello tokenizer decode!";
|
|
736
|
+
const tokens = await runtime.useModel(ModelType5.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
737
|
+
const decodedText = await runtime.useModel(ModelType5.TEXT_TOKENIZER_DECODE, { tokens });
|
|
738
|
+
if (decodedText !== prompt) {
|
|
739
|
+
throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
|
|
740
|
+
}
|
|
741
|
+
logger9.log({ decodedText }, "Decoded text");
|
|
742
|
+
}
|
|
743
|
+
},
|
|
744
|
+
{
|
|
745
|
+
name: "ELIZAOS_CLOUD_test_text_to_speech",
|
|
746
|
+
fn: async (runtime) => {
|
|
747
|
+
try {
|
|
748
|
+
const response = await fetchTextToSpeech(runtime, {
|
|
749
|
+
text: "Hello, this is a test for text-to-speech."
|
|
750
|
+
});
|
|
751
|
+
if (!response) {
|
|
752
|
+
throw new Error("Failed to generate speech");
|
|
753
|
+
}
|
|
754
|
+
logger9.log("Generated speech successfully");
|
|
755
|
+
} catch (error) {
|
|
756
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
757
|
+
logger9.error(`Error in ELIZAOS_CLOUD_test_text_to_speech: ${message}`);
|
|
758
|
+
throw error;
|
|
759
|
+
}
|
|
760
|
+
}
|
|
761
|
+
}
|
|
762
|
+
]
|
|
763
|
+
}
|
|
764
|
+
]
|
|
765
|
+
};
|
|
766
|
+
var src_default = elizaOSCloudPlugin;
|
|
767
|
+
export {
|
|
768
|
+
elizaOSCloudPlugin,
|
|
769
|
+
src_default as default
|
|
770
|
+
};
|
|
771
|
+
|
|
772
|
+
//# debugId=78F02ACCC44B517064756E2164756E21
|