@elizaos/plugin-elizacloud 1.7.0-alpha.0 → 1.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,7 +18,7 @@ var __toESM = (mod, isNodeMode, target) => {
18
18
  var __require = /* @__PURE__ */ createRequire(import.meta.url);
19
19
 
20
20
  // src/index.ts
21
- import { logger as logger13, ModelType as ModelType6 } from "@elizaos/core";
21
+ import { logger as logger9, ModelType as ModelType5 } from "@elizaos/core";
22
22
 
23
23
  // src/init.ts
24
24
  import { logger as logger2 } from "@elizaos/core";
@@ -26,7 +26,11 @@ import { logger as logger2 } from "@elizaos/core";
26
26
  // src/utils/config.ts
27
27
  import { logger } from "@elizaos/core";
28
28
  function getSetting(runtime, key, defaultValue) {
29
- return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
29
+ const value = runtime.getSetting(key);
30
+ if (value !== undefined && value !== null) {
31
+ return String(value);
32
+ }
33
+ return process.env[key] ?? defaultValue;
30
34
  }
31
35
  function isBrowser() {
32
36
  return typeof globalThis !== "undefined" && typeof globalThis.document !== "undefined";
@@ -124,7 +128,7 @@ function initializeOpenAI(_config, runtime) {
124
128
 
125
129
  // src/models/text.ts
126
130
  import { logger as logger3, ModelType } from "@elizaos/core";
127
- import { generateText } from "ai";
131
+ import { generateText, streamText } from "ai";
128
132
 
129
133
  // src/providers/openai.ts
130
134
  import { createOpenAI } from "@ai-sdk/openai";
@@ -139,33 +143,36 @@ import {
139
143
  EventType
140
144
  } from "@elizaos/core";
141
145
  function emitModelUsageEvent(runtime, type, prompt, usage) {
146
+ const truncatedPrompt = typeof prompt === "string" ? prompt.length > 200 ? `${prompt.slice(0, 200)}…` : prompt : "";
147
+ const inputTokens = Number(usage.inputTokens || 0);
148
+ const outputTokens = Number(usage.outputTokens || 0);
149
+ const totalTokens = Number(usage.totalTokens != null ? usage.totalTokens : inputTokens + outputTokens);
142
150
  runtime.emitEvent(EventType.MODEL_USED, {
143
- provider: "openai",
151
+ runtime,
152
+ source: "elizacloud",
153
+ provider: "elizacloud",
144
154
  type,
145
- prompt,
155
+ prompt: truncatedPrompt,
146
156
  tokens: {
147
- prompt: usage.inputTokens,
148
- completion: usage.outputTokens,
149
- total: usage.totalTokens
157
+ prompt: inputTokens,
158
+ completion: outputTokens,
159
+ total: totalTokens
150
160
  }
151
161
  });
152
162
  }
153
163
 
154
164
  // src/models/text.ts
155
- async function handleTextSmall(runtime, {
156
- prompt,
157
- stopSequences = [],
158
- maxTokens = 8192,
159
- temperature = 0.7,
160
- frequencyPenalty = 0.7,
161
- presencePenalty = 0.7
162
- }) {
165
+ function buildGenerateParams(runtime, modelType, params) {
166
+ const { prompt, stopSequences = [] } = params;
167
+ const temperature = params.temperature ?? 0.7;
168
+ const frequencyPenalty = params.frequencyPenalty ?? 0.7;
169
+ const presencePenalty = params.presencePenalty ?? 0.7;
170
+ const maxTokens = params.maxOutputTokens ?? params.maxTokens ?? 8192;
163
171
  const openai = createOpenAIClient(runtime);
164
- const modelName = getSmallModel(runtime);
172
+ const modelName = modelType === ModelType.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
173
+ const modelLabel = modelType === ModelType.TEXT_SMALL ? "TEXT_SMALL" : "TEXT_LARGE";
165
174
  const experimentalTelemetry = getExperimentalTelemetry(runtime);
166
- logger3.log(`[ELIZAOS_CLOUD] Using TEXT_SMALL model: ${modelName}`);
167
- logger3.log(prompt);
168
- const { text: openaiResponse, usage } = await generateText({
175
+ const generateParams = {
169
176
  model: openai.languageModel(modelName),
170
177
  prompt,
171
178
  system: runtime.character.system ?? undefined,
@@ -177,42 +184,50 @@ async function handleTextSmall(runtime, {
177
184
  experimental_telemetry: {
178
185
  isEnabled: experimentalTelemetry
179
186
  }
180
- });
181
- if (usage) {
182
- emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
183
- }
184
- return openaiResponse;
187
+ };
188
+ return { generateParams, modelName, modelLabel, prompt };
189
+ }
190
+ function handleStreamingGeneration(runtime, modelType, generateParams, prompt, modelLabel) {
191
+ logger3.debug(`[ELIZAOS_CLOUD] Streaming text with ${modelLabel} model`);
192
+ const streamResult = streamText(generateParams);
193
+ return {
194
+ textStream: streamResult.textStream,
195
+ text: streamResult.text,
196
+ usage: streamResult.usage.then((usage) => {
197
+ if (usage) {
198
+ emitModelUsageEvent(runtime, modelType, prompt, usage);
199
+ const inputTokens = usage.inputTokens ?? 0;
200
+ const outputTokens = usage.outputTokens ?? 0;
201
+ return {
202
+ promptTokens: inputTokens,
203
+ completionTokens: outputTokens,
204
+ totalTokens: inputTokens + outputTokens
205
+ };
206
+ }
207
+ return;
208
+ }),
209
+ finishReason: streamResult.finishReason
210
+ };
185
211
  }
186
- async function handleTextLarge(runtime, {
187
- prompt,
188
- stopSequences = [],
189
- maxTokens = 8192,
190
- temperature = 0.7,
191
- frequencyPenalty = 0.7,
192
- presencePenalty = 0.7
193
- }) {
194
- const openai = createOpenAIClient(runtime);
195
- const modelName = getLargeModel(runtime);
196
- const experimentalTelemetry = getExperimentalTelemetry(runtime);
197
- logger3.log(`[ELIZAOS_CLOUD] Using TEXT_LARGE model: ${modelName}`);
212
+ async function generateTextWithModel(runtime, modelType, params) {
213
+ const { generateParams, modelName, modelLabel, prompt } = buildGenerateParams(runtime, modelType, params);
214
+ logger3.debug(`[ELIZAOS_CLOUD] Generating text with ${modelLabel} model: ${modelName}`);
215
+ if (params.stream) {
216
+ return handleStreamingGeneration(runtime, modelType, generateParams, prompt, modelLabel);
217
+ }
218
+ logger3.log(`[ELIZAOS_CLOUD] Using ${modelLabel} model: ${modelName}`);
198
219
  logger3.log(prompt);
199
- const { text: openaiResponse, usage } = await generateText({
200
- model: openai.languageModel(modelName),
201
- prompt,
202
- system: runtime.character.system ?? undefined,
203
- temperature,
204
- maxOutputTokens: maxTokens,
205
- frequencyPenalty,
206
- presencePenalty,
207
- stopSequences,
208
- experimental_telemetry: {
209
- isEnabled: experimentalTelemetry
210
- }
211
- });
212
- if (usage) {
213
- emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
220
+ const response = await generateText(generateParams);
221
+ if (response.usage) {
222
+ emitModelUsageEvent(runtime, modelType, prompt, response.usage);
214
223
  }
215
- return openaiResponse;
224
+ return response.text;
225
+ }
226
+ async function handleTextSmall(runtime, params) {
227
+ return generateTextWithModel(runtime, ModelType.TEXT_SMALL, params);
228
+ }
229
+ async function handleTextLarge(runtime, params) {
230
+ return generateTextWithModel(runtime, ModelType.TEXT_LARGE, params);
216
231
  }
217
232
  // src/models/object.ts
218
233
  import { logger as logger5, ModelType as ModelType2 } from "@elizaos/core";
@@ -237,31 +252,6 @@ function getJsonRepairFunction() {
237
252
  }
238
253
  };
239
254
  }
240
- function detectAudioMimeType(buffer) {
241
- if (buffer.length < 12) {
242
- return "application/octet-stream";
243
- }
244
- if (buffer[0] === 82 && buffer[1] === 73 && buffer[2] === 70 && buffer[3] === 70 && buffer[8] === 87 && buffer[9] === 65 && buffer[10] === 86 && buffer[11] === 69) {
245
- return "audio/wav";
246
- }
247
- if (buffer[0] === 73 && buffer[1] === 68 && buffer[2] === 51 || buffer[0] === 255 && (buffer[1] & 224) === 224) {
248
- return "audio/mpeg";
249
- }
250
- if (buffer[0] === 79 && buffer[1] === 103 && buffer[2] === 103 && buffer[3] === 83) {
251
- return "audio/ogg";
252
- }
253
- if (buffer[0] === 102 && buffer[1] === 76 && buffer[2] === 97 && buffer[3] === 67) {
254
- return "audio/flac";
255
- }
256
- if (buffer[4] === 102 && buffer[5] === 116 && buffer[6] === 121 && buffer[7] === 112) {
257
- return "audio/mp4";
258
- }
259
- if (buffer[0] === 26 && buffer[1] === 69 && buffer[2] === 223 && buffer[3] === 163) {
260
- return "audio/webm";
261
- }
262
- logger4.warn("Could not detect audio format from buffer, using generic binary type");
263
- return "application/octet-stream";
264
- }
265
255
  async function webStreamToNodeStream(webStream) {
266
256
  try {
267
257
  const { Readable } = await import("node:stream");
@@ -355,7 +345,19 @@ async function handleObjectLarge(runtime, params) {
355
345
  }
356
346
  // src/models/embeddings.ts
357
347
  import { logger as logger6, ModelType as ModelType3, VECTOR_DIMS } from "@elizaos/core";
358
- async function handleTextEmbedding(runtime, params) {
348
+ var MAX_BATCH_SIZE = 100;
349
+ function extractRateLimitInfo(response) {
350
+ return {
351
+ remainingRequests: parseInt(response.headers.get("x-ratelimit-remaining-requests") || "", 10) || undefined,
352
+ remainingTokens: parseInt(response.headers.get("x-ratelimit-remaining-tokens") || "", 10) || undefined,
353
+ limitRequests: parseInt(response.headers.get("x-ratelimit-limit-requests") || "", 10) || undefined,
354
+ limitTokens: parseInt(response.headers.get("x-ratelimit-limit-tokens") || "", 10) || undefined,
355
+ resetRequests: response.headers.get("x-ratelimit-reset-requests") || undefined,
356
+ resetTokens: response.headers.get("x-ratelimit-reset-tokens") || undefined,
357
+ retryAfter: parseInt(response.headers.get("retry-after") || "", 10) || undefined
358
+ };
359
+ }
360
+ function getEmbeddingConfig(runtime) {
359
361
  const embeddingModelName = getSetting(runtime, "ELIZAOS_CLOUD_EMBEDDING_MODEL", "text-embedding-3-small");
360
362
  const embeddingDimension = Number.parseInt(getSetting(runtime, "ELIZAOS_CLOUD_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
361
363
  if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
@@ -363,11 +365,23 @@ async function handleTextEmbedding(runtime, params) {
363
365
  logger6.error(errorMsg);
364
366
  throw new Error(errorMsg);
365
367
  }
368
+ return { embeddingModelName, embeddingDimension };
369
+ }
370
+ function createErrorVector(dimension, marker) {
371
+ const vector = Array(dimension).fill(0);
372
+ vector[0] = marker;
373
+ return vector;
374
+ }
375
+ async function handleTextEmbedding(runtime, params) {
376
+ const { embeddingDimension } = getEmbeddingConfig(runtime);
366
377
  if (params === null) {
367
378
  logger6.debug("Creating test embedding for initialization");
368
- const testVector = Array(embeddingDimension).fill(0);
369
- testVector[0] = 0.1;
370
- return testVector;
379
+ return createErrorVector(embeddingDimension, 0.1);
380
+ }
381
+ const anyParams = params;
382
+ if (typeof anyParams === "object" && "texts" in anyParams && Array.isArray(anyParams.texts)) {
383
+ logger6.debug(`[Embeddings] Batch mode: ${anyParams.texts.length} texts`);
384
+ return await handleBatchTextEmbedding(runtime, anyParams.texts);
371
385
  }
372
386
  let text;
373
387
  if (typeof params === "string") {
@@ -376,65 +390,131 @@ async function handleTextEmbedding(runtime, params) {
376
390
  text = params.text;
377
391
  } else {
378
392
  logger6.warn("Invalid input format for embedding");
379
- const fallbackVector = Array(embeddingDimension).fill(0);
380
- fallbackVector[0] = 0.2;
381
- return fallbackVector;
393
+ return createErrorVector(embeddingDimension, 0.2);
382
394
  }
383
395
  if (!text.trim()) {
384
396
  logger6.warn("Empty text for embedding");
385
- const emptyVector = Array(embeddingDimension).fill(0);
386
- emptyVector[0] = 0.3;
387
- return emptyVector;
397
+ return createErrorVector(embeddingDimension, 0.3);
388
398
  }
399
+ const results = await handleBatchTextEmbedding(runtime, [text]);
400
+ return results[0];
401
+ }
402
+ async function handleBatchTextEmbedding(runtime, texts) {
403
+ const { embeddingModelName, embeddingDimension } = getEmbeddingConfig(runtime);
389
404
  const embeddingBaseURL = getEmbeddingBaseURL(runtime);
390
- try {
391
- const response = await fetch(`${embeddingBaseURL}/embeddings`, {
392
- method: "POST",
393
- headers: {
394
- ...getAuthHeader(runtime, true),
395
- "Content-Type": "application/json"
396
- },
397
- body: JSON.stringify({
398
- model: embeddingModelName,
399
- input: text
400
- })
401
- });
402
- if (!response.ok) {
403
- logger6.error(`ElizaOS Cloud API error: ${response.status} - ${response.statusText}`);
404
- const errorVector = Array(embeddingDimension).fill(0);
405
- errorVector[0] = 0.4;
406
- return errorVector;
407
- }
408
- const data = await response.json();
409
- if (!data?.data?.[0]?.embedding) {
410
- logger6.error("API returned invalid structure");
411
- const errorVector = Array(embeddingDimension).fill(0);
412
- errorVector[0] = 0.5;
413
- return errorVector;
405
+ if (!texts || texts.length === 0) {
406
+ logger6.warn("[BatchEmbeddings] Empty texts array");
407
+ return [];
408
+ }
409
+ const validTexts = [];
410
+ const results = new Array(texts.length);
411
+ for (let i = 0;i < texts.length; i++) {
412
+ const text = texts[i]?.trim();
413
+ if (text) {
414
+ validTexts.push({ text, originalIndex: i });
415
+ } else {
416
+ results[i] = createErrorVector(embeddingDimension, 0.3);
414
417
  }
415
- const embedding = data.data[0].embedding;
416
- if (data.usage) {
417
- const usage = {
418
- inputTokens: data.usage.prompt_tokens,
419
- outputTokens: 0,
420
- totalTokens: data.usage.total_tokens
421
- };
422
- emitModelUsageEvent(runtime, ModelType3.TEXT_EMBEDDING, text, usage);
418
+ }
419
+ if (validTexts.length === 0) {
420
+ logger6.warn("[BatchEmbeddings] All texts were empty");
421
+ return results;
422
+ }
423
+ for (let batchStart = 0;batchStart < validTexts.length; batchStart += MAX_BATCH_SIZE) {
424
+ const batchEnd = Math.min(batchStart + MAX_BATCH_SIZE, validTexts.length);
425
+ const batch = validTexts.slice(batchStart, batchEnd);
426
+ const batchTexts = batch.map((b) => b.text);
427
+ logger6.info(`[BatchEmbeddings] Processing batch ${Math.floor(batchStart / MAX_BATCH_SIZE) + 1}/${Math.ceil(validTexts.length / MAX_BATCH_SIZE)}: ${batch.length} texts`);
428
+ try {
429
+ const response = await fetch(`${embeddingBaseURL}/embeddings`, {
430
+ method: "POST",
431
+ headers: {
432
+ ...getAuthHeader(runtime, true),
433
+ "Content-Type": "application/json"
434
+ },
435
+ body: JSON.stringify({
436
+ model: embeddingModelName,
437
+ input: batchTexts
438
+ })
439
+ });
440
+ const rateLimitInfo = extractRateLimitInfo(response);
441
+ if (rateLimitInfo.remainingRequests !== undefined && rateLimitInfo.remainingRequests < 50) {
442
+ logger6.warn(`[BatchEmbeddings] Rate limit: ${rateLimitInfo.remainingRequests}/${rateLimitInfo.limitRequests} requests remaining`);
443
+ }
444
+ if (response.status === 429) {
445
+ const retryAfter = rateLimitInfo.retryAfter || 30;
446
+ logger6.warn(`[BatchEmbeddings] Rate limited, waiting ${retryAfter}s...`);
447
+ await new Promise((resolve) => setTimeout(resolve, retryAfter * 1000));
448
+ const retryResponse = await fetch(`${embeddingBaseURL}/embeddings`, {
449
+ method: "POST",
450
+ headers: {
451
+ ...getAuthHeader(runtime, true),
452
+ "Content-Type": "application/json"
453
+ },
454
+ body: JSON.stringify({
455
+ model: embeddingModelName,
456
+ input: batchTexts
457
+ })
458
+ });
459
+ if (!retryResponse.ok) {
460
+ logger6.error(`[BatchEmbeddings] Retry failed: ${retryResponse.status}`);
461
+ for (const item of batch) {
462
+ results[item.originalIndex] = createErrorVector(embeddingDimension, 0.4);
463
+ }
464
+ continue;
465
+ }
466
+ const retryData = await retryResponse.json();
467
+ if (retryData?.data) {
468
+ for (const item of retryData.data) {
469
+ const originalIndex = batch[item.index].originalIndex;
470
+ results[originalIndex] = item.embedding;
471
+ }
472
+ logger6.info(`[BatchEmbeddings] Retry successful for ${batch.length} embeddings`);
473
+ }
474
+ continue;
475
+ }
476
+ if (!response.ok) {
477
+ logger6.error(`[BatchEmbeddings] API error: ${response.status} - ${response.statusText}`);
478
+ for (const item of batch) {
479
+ results[item.originalIndex] = createErrorVector(embeddingDimension, 0.4);
480
+ }
481
+ continue;
482
+ }
483
+ const data = await response.json();
484
+ if (!data?.data || !Array.isArray(data.data)) {
485
+ logger6.error("[BatchEmbeddings] API returned invalid structure");
486
+ for (const item of batch) {
487
+ results[item.originalIndex] = createErrorVector(embeddingDimension, 0.5);
488
+ }
489
+ continue;
490
+ }
491
+ for (const item of data.data) {
492
+ const originalIndex = batch[item.index].originalIndex;
493
+ results[originalIndex] = item.embedding;
494
+ }
495
+ if (data.usage) {
496
+ const usage = {
497
+ inputTokens: data.usage.prompt_tokens,
498
+ outputTokens: 0,
499
+ totalTokens: data.usage.total_tokens
500
+ };
501
+ emitModelUsageEvent(runtime, ModelType3.TEXT_EMBEDDING, `batch:${batch.length}`, usage);
502
+ }
503
+ logger6.debug(`[BatchEmbeddings] Got ${batch.length} embeddings (${embeddingDimension}d), remaining: ${rateLimitInfo.remainingRequests ?? "unknown"}`);
504
+ } catch (error) {
505
+ const message = error instanceof Error ? error.message : String(error);
506
+ logger6.error(`[BatchEmbeddings] Error: ${message}`);
507
+ for (const item of batch) {
508
+ results[item.originalIndex] = createErrorVector(embeddingDimension, 0.6);
509
+ }
423
510
  }
424
- logger6.log(`Got valid embedding with length ${embedding.length}`);
425
- return embedding;
426
- } catch (error) {
427
- const message = error instanceof Error ? error.message : String(error);
428
- logger6.error(`Error generating embedding: ${message}`);
429
- const errorVector = Array(embeddingDimension).fill(0);
430
- errorVector[0] = 0.6;
431
- return errorVector;
432
511
  }
512
+ return results;
433
513
  }
434
514
  // src/models/image.ts
435
515
  import { logger as logger7, ModelType as ModelType4 } from "@elizaos/core";
436
516
  async function handleImageGeneration(runtime, params) {
437
- const numImages = params.n || 1;
517
+ const numImages = params.count || 1;
438
518
  const size = params.size || "1024x1024";
439
519
  const prompt = params.prompt;
440
520
  const modelName = getImageGenerationModel(runtime);
@@ -530,10 +610,6 @@ async function handleImageDescription(runtime, params) {
530
610
  description: "No response from API"
531
611
  };
532
612
  }
533
- const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
534
- if (isCustomPrompt) {
535
- return content;
536
- }
537
613
  const processedResult = parseImageDescriptionResponse(content);
538
614
  return processedResult;
539
615
  } catch (error) {
@@ -545,89 +621,8 @@ async function handleImageDescription(runtime, params) {
545
621
  };
546
622
  }
547
623
  }
548
- // src/models/transcription.ts
549
- import { logger as logger8 } from "@elizaos/core";
550
- async function handleTranscription(runtime, input) {
551
- let modelName = getSetting(runtime, "ELIZAOS_CLOUD_TRANSCRIPTION_MODEL", "gpt-4o-mini-transcribe");
552
- logger8.log(`[ELIZAOS_CLOUD] Using TRANSCRIPTION model: ${modelName}`);
553
- const baseURL = getBaseURL(runtime);
554
- let blob;
555
- let extraParams = null;
556
- if (input instanceof Blob || input instanceof File) {
557
- blob = input;
558
- } else if (Buffer.isBuffer(input)) {
559
- const detectedMimeType = detectAudioMimeType(input);
560
- logger8.debug(`Auto-detected audio MIME type: ${detectedMimeType}`);
561
- blob = new Blob([input], { type: detectedMimeType });
562
- } else if (typeof input === "object" && input !== null && "audio" in input && input.audio != null) {
563
- const params = input;
564
- if (!(params.audio instanceof Blob) && !(params.audio instanceof File) && !Buffer.isBuffer(params.audio)) {
565
- throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
566
- }
567
- if (Buffer.isBuffer(params.audio)) {
568
- let mimeType = params.mimeType;
569
- if (!mimeType) {
570
- mimeType = detectAudioMimeType(params.audio);
571
- logger8.debug(`Auto-detected audio MIME type: ${mimeType}`);
572
- } else {
573
- logger8.debug(`Using provided MIME type: ${mimeType}`);
574
- }
575
- blob = new Blob([params.audio], { type: mimeType });
576
- } else {
577
- blob = params.audio;
578
- }
579
- extraParams = params;
580
- if (typeof params.model === "string" && params.model) {
581
- modelName = params.model;
582
- }
583
- } else {
584
- throw new Error("TRANSCRIPTION expects a Blob/File/Buffer or an object { audio: Blob/File/Buffer, mimeType?, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
585
- }
586
- const mime = blob.type || "audio/webm";
587
- const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
588
- const formData = new FormData;
589
- formData.append("file", blob, filename);
590
- formData.append("model", String(modelName));
591
- if (extraParams) {
592
- if (typeof extraParams.language === "string") {
593
- formData.append("language", String(extraParams.language));
594
- }
595
- if (typeof extraParams.response_format === "string") {
596
- formData.append("response_format", String(extraParams.response_format));
597
- }
598
- if (typeof extraParams.prompt === "string") {
599
- formData.append("prompt", String(extraParams.prompt));
600
- }
601
- if (typeof extraParams.temperature === "number") {
602
- formData.append("temperature", String(extraParams.temperature));
603
- }
604
- if (Array.isArray(extraParams.timestampGranularities)) {
605
- for (const g of extraParams.timestampGranularities) {
606
- formData.append("timestamp_granularities[]", String(g));
607
- }
608
- }
609
- }
610
- try {
611
- const response = await fetch(`${baseURL}/audio/transcriptions`, {
612
- method: "POST",
613
- headers: {
614
- ...getAuthHeader(runtime)
615
- },
616
- body: formData
617
- });
618
- if (!response.ok) {
619
- throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
620
- }
621
- const data = await response.json();
622
- return data.text || "";
623
- } catch (error) {
624
- const message = error instanceof Error ? error.message : String(error);
625
- logger8.error(`TRANSCRIPTION error: ${message}`);
626
- throw error;
627
- }
628
- }
629
624
  // src/models/speech.ts
630
- import { logger as logger9 } from "@elizaos/core";
625
+ import { logger as logger8 } from "@elizaos/core";
631
626
  async function fetchTextToSpeech(runtime, options) {
632
627
  const defaultModel = getSetting(runtime, "ELIZAOS_CLOUD_TTS_MODEL", "gpt-4o-mini-tts");
633
628
  const defaultVoice = getSetting(runtime, "ELIZAOS_CLOUD_TTS_VOICE", "nova");
@@ -669,367 +664,10 @@ async function fetchTextToSpeech(runtime, options) {
669
664
  throw new Error(`Failed to fetch speech from ElizaOS Cloud TTS: ${message}`);
670
665
  }
671
666
  }
672
- async function handleTextToSpeech(runtime, input) {
673
- const options = typeof input === "string" ? { text: input } : input;
674
- const resolvedModel = options.model || getSetting(runtime, "ELIZAOS_CLOUD_TTS_MODEL", "gpt-4o-mini-tts");
675
- logger9.log(`[ELIZAOS_CLOUD] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
676
- try {
677
- const speechStream = await fetchTextToSpeech(runtime, options);
678
- return speechStream;
679
- } catch (error) {
680
- const message = error instanceof Error ? error.message : String(error);
681
- logger9.error(`Error in TEXT_TO_SPEECH: ${message}`);
682
- throw error;
683
- }
684
- }
685
- // src/models/tokenization.ts
686
- import { ModelType as ModelType5 } from "@elizaos/core";
687
- import { encodingForModel } from "js-tiktoken";
688
- async function tokenizeText(model, prompt) {
689
- const modelName = model === ModelType5.TEXT_SMALL ? process.env.ELIZAOS_CLOUD_SMALL_MODEL ?? process.env.SMALL_MODEL ?? "gpt-5-nano" : process.env.LARGE_MODEL ?? "gpt-5-mini";
690
- const tokens = encodingForModel(modelName).encode(prompt);
691
- return tokens;
692
- }
693
- async function detokenizeText(model, tokens) {
694
- const modelName = model === ModelType5.TEXT_SMALL ? process.env.ELIZAOS_CLOUD_SMALL_MODEL ?? process.env.SMALL_MODEL ?? "gpt-5-nano" : process.env.ELIZAOS_CLOUD_LARGE_MODEL ?? process.env.LARGE_MODEL ?? "gpt-5-mini";
695
- return encodingForModel(modelName).decode(tokens);
696
- }
697
- async function handleTokenizerEncode(_runtime, { prompt, modelType = ModelType5.TEXT_LARGE }) {
698
- return await tokenizeText(modelType ?? ModelType5.TEXT_LARGE, prompt);
699
- }
700
- async function handleTokenizerDecode(_runtime, { tokens, modelType = ModelType5.TEXT_LARGE }) {
701
- return await detokenizeText(modelType ?? ModelType5.TEXT_LARGE, tokens);
702
- }
703
- // src/database/adapter.ts
704
- import { logger as logger10 } from "@elizaos/core";
705
- import pluginSql from "@elizaos/plugin-sql/node";
706
- var DEFAULT_CLOUD_URL = "https://www.elizacloud.ai";
707
- async function createCloudDatabaseAdapter(config) {
708
- const baseUrl = config.baseUrl || DEFAULT_CLOUD_URL;
709
- logger10.info({ src: "plugin:elizacloud", agentId: config.agentId }, "Provisioning cloud database");
710
- const response = await provisionCloudDatabase(config.apiKey, baseUrl, config.agentId);
711
- if (!response.success || !response.connectionUrl) {
712
- logger10.error({
713
- src: "plugin:elizacloud",
714
- error: response.error,
715
- agentId: config.agentId
716
- }, "Failed to provision cloud database");
717
- return null;
718
- }
719
- logger10.info({ src: "plugin:elizacloud", agentId: config.agentId }, "Cloud database provisioned successfully");
720
- const adapter = pluginSql.createDatabaseAdapter({ postgresUrl: response.connectionUrl }, config.agentId);
721
- logger10.info({ src: "plugin:elizacloud", agentId: config.agentId }, "Cloud database adapter created using PostgreSQL connection");
722
- return adapter;
723
- }
724
- async function provisionCloudDatabase(apiKey, baseUrl, agentId) {
725
- try {
726
- const response = await fetch(`${baseUrl}/api/v1/database/provision`, {
727
- method: "POST",
728
- headers: {
729
- Authorization: `Bearer ${apiKey}`,
730
- "Content-Type": "application/json"
731
- },
732
- body: JSON.stringify({
733
- agentId,
734
- type: "postgresql"
735
- })
736
- });
737
- if (!response.ok) {
738
- const errorText = await response.text();
739
- return {
740
- success: false,
741
- error: `Cloud database provisioning failed: ${response.status} ${errorText}`
742
- };
743
- }
744
- const data = await response.json();
745
- return {
746
- success: true,
747
- connectionUrl: data.connectionUrl,
748
- expiresAt: data.expiresAt
749
- };
750
- } catch (error) {
751
- const message = error instanceof Error ? error.message : String(error);
752
- return {
753
- success: false,
754
- error: `Network error during database provisioning: ${message}`
755
- };
756
- }
757
- }
758
-
759
- class CloudDatabaseAdapter {
760
- config;
761
- adapter = null;
762
- constructor(config) {
763
- this.config = config;
764
- }
765
- async initialize() {
766
- if (this.adapter) {
767
- return this.adapter;
768
- }
769
- this.adapter = await createCloudDatabaseAdapter(this.config);
770
- return this.adapter;
771
- }
772
- getAdapter() {
773
- return this.adapter;
774
- }
775
- }
776
-
777
- // src/storage/service.ts
778
- import { logger as logger11 } from "@elizaos/core";
779
- var DEFAULT_CLOUD_URL2 = "https://www.elizacloud.ai";
780
- var STORAGE_ENDPOINT = "/api/v1/storage/files";
781
- function createCloudStorageService(config) {
782
- return new CloudStorageService(config);
783
- }
784
-
785
- class CloudStorageService {
786
- apiKey;
787
- baseUrl;
788
- constructor(config) {
789
- this.apiKey = config.apiKey;
790
- this.baseUrl = config.baseUrl || DEFAULT_CLOUD_URL2;
791
- }
792
- async upload(file, options = {}) {
793
- try {
794
- const formData = new FormData;
795
- let blob;
796
- if (Buffer.isBuffer(file)) {
797
- blob = new Blob([file], {
798
- type: options.contentType || "application/octet-stream"
799
- });
800
- } else {
801
- blob = file;
802
- }
803
- const filename = options.filename || (file instanceof File ? file.name : "file") || "upload";
804
- formData.append("file", blob, filename);
805
- if (options.metadata) {
806
- formData.append("metadata", JSON.stringify(options.metadata));
807
- }
808
- const response = await fetch(`${this.baseUrl}${STORAGE_ENDPOINT}`, {
809
- method: "POST",
810
- headers: {
811
- Authorization: `Bearer ${this.apiKey}`
812
- },
813
- body: formData
814
- });
815
- if (!response.ok) {
816
- const errorData = await response.json().catch(() => ({}));
817
- if (response.status === 402) {
818
- return {
819
- success: false,
820
- error: `Insufficient credits. Required: ${errorData.required || "unknown"}, Available: ${errorData.available || "unknown"}. Top up at ${errorData.topUpUrl || "/dashboard/billing"}`
821
- };
822
- }
823
- return {
824
- success: false,
825
- error: `Upload failed: ${response.status} ${errorData.error || "Unknown error"}`
826
- };
827
- }
828
- const data = await response.json();
829
- logger11.info({ src: "plugin:elizacloud", cost: data.cost, remaining: data.creditsRemaining }, "Storage upload successful");
830
- return {
831
- success: true,
832
- id: data.id,
833
- url: data.url,
834
- pathname: data.pathname,
835
- contentType: data.contentType,
836
- size: data.size,
837
- cost: data.cost,
838
- creditsRemaining: data.creditsRemaining
839
- };
840
- } catch (error) {
841
- const message = error instanceof Error ? error.message : String(error);
842
- logger11.error({ src: "plugin:elizacloud", error }, "Storage upload failed");
843
- return {
844
- success: false,
845
- error: `Upload error: ${message}`
846
- };
847
- }
848
- }
849
- async download(id, url) {
850
- if (url) {
851
- try {
852
- const response = await fetch(url);
853
- if (!response.ok) {
854
- logger11.error({ src: "plugin:elizacloud", status: response.status, url }, "Storage direct download failed");
855
- return null;
856
- }
857
- const arrayBuffer = await response.arrayBuffer();
858
- return Buffer.from(arrayBuffer);
859
- } catch (error) {
860
- logger11.error({ src: "plugin:elizacloud", error }, "Storage direct download error");
861
- return null;
862
- }
863
- }
864
- try {
865
- const response = await fetch(`${this.baseUrl}${STORAGE_ENDPOINT}/${id}?download=true`, {
866
- headers: {
867
- Authorization: `Bearer ${this.apiKey}`
868
- },
869
- redirect: "follow"
870
- });
871
- if (!response.ok) {
872
- logger11.error({ src: "plugin:elizacloud", status: response.status }, "Storage download failed");
873
- return null;
874
- }
875
- const arrayBuffer = await response.arrayBuffer();
876
- return Buffer.from(arrayBuffer);
877
- } catch (error) {
878
- logger11.error({ src: "plugin:elizacloud", error }, "Storage download error");
879
- return null;
880
- }
881
- }
882
- async list(options = {}) {
883
- try {
884
- const params = new URLSearchParams;
885
- if (options.prefix)
886
- params.set("prefix", options.prefix);
887
- if (options.limit)
888
- params.set("limit", String(options.limit));
889
- if (options.cursor)
890
- params.set("cursor", options.cursor);
891
- const response = await fetch(`${this.baseUrl}${STORAGE_ENDPOINT}?${params.toString()}`, {
892
- headers: {
893
- Authorization: `Bearer ${this.apiKey}`
894
- }
895
- });
896
- if (!response.ok) {
897
- logger11.error({ src: "plugin:elizacloud", status: response.status }, "Storage list failed");
898
- return { items: [], hasMore: false };
899
- }
900
- const data = await response.json();
901
- return {
902
- items: data.items || [],
903
- cursor: data.cursor,
904
- hasMore: data.hasMore || false
905
- };
906
- } catch (error) {
907
- logger11.error({ src: "plugin:elizacloud", error }, "Storage list error");
908
- return { items: [], hasMore: false };
909
- }
910
- }
911
- async delete(id, url) {
912
- if (!url) {
913
- logger11.error({ src: "plugin:elizacloud" }, "Storage delete requires file URL");
914
- return false;
915
- }
916
- try {
917
- const params = new URLSearchParams({ url });
918
- const response = await fetch(`${this.baseUrl}${STORAGE_ENDPOINT}/${id}?${params.toString()}`, {
919
- method: "DELETE",
920
- headers: {
921
- Authorization: `Bearer ${this.apiKey}`
922
- }
923
- });
924
- if (!response.ok) {
925
- const errorData = await response.json().catch(() => ({}));
926
- logger11.error({ src: "plugin:elizacloud", status: response.status, error: errorData.error }, "Storage delete failed");
927
- return false;
928
- }
929
- return true;
930
- } catch (error) {
931
- logger11.error({ src: "plugin:elizacloud", error }, "Storage delete error");
932
- return false;
933
- }
934
- }
935
- async getStats() {
936
- try {
937
- const response = await fetch(`${this.baseUrl}${STORAGE_ENDPOINT}?stats=true`, {
938
- headers: {
939
- Authorization: `Bearer ${this.apiKey}`
940
- }
941
- });
942
- if (!response.ok) {
943
- return null;
944
- }
945
- const data = await response.json();
946
- return {
947
- totalFiles: data.stats?.totalFiles || 0,
948
- totalSize: data.stats?.totalSize || 0,
949
- totalSizeGB: data.stats?.totalSizeGB || 0,
950
- pricing: data.pricing || {}
951
- };
952
- } catch (error) {
953
- logger11.error({ src: "plugin:elizacloud", error }, "Storage stats error");
954
- return null;
955
- }
956
- }
957
- }
958
- // src/database/direct-adapter.ts
959
- import { logger as logger12 } from "@elizaos/core";
960
- import pluginSql2 from "@elizaos/plugin-sql/node";
961
- function createDatabaseAdapter(config, agentId) {
962
- const adapter = pluginSql2.createDatabaseAdapter({ postgresUrl: config.postgresUrl }, agentId);
963
- logger12.info({ src: "plugin:elizacloud", agentId }, "Direct database adapter created");
964
- return adapter;
965
- }
966
- async function createDirectDatabaseAdapter(config, agentId) {
967
- return createDatabaseAdapter(config, agentId);
968
- }
969
- // src/database/schema.ts
970
- import pluginSql3 from "@elizaos/plugin-sql/node";
971
- var {
972
- agentTable,
973
- roomTable,
974
- participantTable,
975
- memoryTable,
976
- embeddingTable,
977
- entityTable,
978
- relationshipTable,
979
- componentTable,
980
- taskTable,
981
- logTable,
982
- cacheTable,
983
- worldTable,
984
- serverTable,
985
- messageTable,
986
- messageServerTable,
987
- messageServerAgentsTable,
988
- channelTable,
989
- channelParticipantsTable
990
- } = pluginSql3.schema;
991
- var serverAgentsTable = serverTable;
992
667
  // src/index.ts
993
- var cloudStorageInstance = null;
994
- function getCloudStorage() {
995
- return cloudStorageInstance;
996
- }
997
- async function initializeCloudDatabase(runtime) {
998
- const apiKey = getApiKey(runtime);
999
- const baseUrl = getBaseURL(runtime);
1000
- if (!apiKey) {
1001
- logger13.warn({ src: "plugin:elizacloud" }, "Cloud database enabled but no API key found - skipping database initialization");
1002
- return;
1003
- }
1004
- logger13.info({ src: "plugin:elizacloud", agentId: runtime.agentId }, "Initializing cloud database");
1005
- const adapter = await createCloudDatabaseAdapter({
1006
- apiKey,
1007
- baseUrl,
1008
- agentId: runtime.agentId
1009
- });
1010
- if (adapter) {
1011
- runtime.registerDatabaseAdapter(adapter);
1012
- logger13.info({ src: "plugin:elizacloud", agentId: runtime.agentId }, "Cloud database adapter registered successfully");
1013
- } else {
1014
- logger13.error({ src: "plugin:elizacloud", agentId: runtime.agentId }, "Failed to initialize cloud database adapter");
1015
- }
1016
- }
1017
- function initializeCloudStorage(runtime) {
1018
- const apiKey = getApiKey(runtime);
1019
- const baseUrl = getBaseURL(runtime);
1020
- if (!apiKey) {
1021
- logger13.warn({ src: "plugin:elizacloud" }, "No API key found - cloud storage will not be available");
1022
- return;
1023
- }
1024
- cloudStorageInstance = new CloudStorageService({
1025
- apiKey,
1026
- baseUrl
1027
- });
1028
- logger13.info({ src: "plugin:elizacloud", agentId: runtime.agentId }, "Cloud storage service initialized");
1029
- }
1030
668
  var elizaOSCloudPlugin = {
1031
669
  name: "elizaOSCloud",
1032
- description: "ElizaOS Cloud plugin - Complete AI, storage, and database solution. Provides multi-model inference (GPT-4, Claude, Gemini), embeddings, image generation, transcription, TTS, managed PostgreSQL database, and cloud file storage. A single plugin that replaces all other AI and database plugins.",
670
+ description: "ElizaOS Cloud plugin - Multi-model AI generation with text, image, and video support",
1033
671
  config: {
1034
672
  ELIZAOS_CLOUD_API_KEY: process.env.ELIZAOS_CLOUD_API_KEY,
1035
673
  ELIZAOS_CLOUD_BASE_URL: process.env.ELIZAOS_CLOUD_BASE_URL,
@@ -1043,39 +681,20 @@ var elizaOSCloudPlugin = {
1043
681
  ELIZAOS_CLOUD_EMBEDDING_DIMENSIONS: process.env.ELIZAOS_CLOUD_EMBEDDING_DIMENSIONS,
1044
682
  ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MODEL: process.env.ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MODEL,
1045
683
  ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MAX_TOKENS: process.env.ELIZAOS_CLOUD_IMAGE_DESCRIPTION_MAX_TOKENS,
1046
- ELIZAOS_CLOUD_IMAGE_GENERATION_MODEL: process.env.ELIZAOS_CLOUD_IMAGE_GENERATION_MODEL,
1047
- ELIZAOS_CLOUD_TTS_MODEL: process.env.ELIZAOS_CLOUD_TTS_MODEL,
1048
- ELIZAOS_CLOUD_TTS_VOICE: process.env.ELIZAOS_CLOUD_TTS_VOICE,
1049
- ELIZAOS_CLOUD_TRANSCRIPTION_MODEL: process.env.ELIZAOS_CLOUD_TRANSCRIPTION_MODEL,
1050
- ELIZAOS_CLOUD_DATABASE: process.env.ELIZAOS_CLOUD_DATABASE,
1051
- ELIZAOS_CLOUD_STORAGE: process.env.ELIZAOS_CLOUD_STORAGE,
1052
- ELIZAOS_CLOUD_EXPERIMENTAL_TELEMETRY: process.env.ELIZAOS_CLOUD_EXPERIMENTAL_TELEMETRY
684
+ ELIZAOS_CLOUD_EXPERIMENTAL_TELEMETRY: process.env.ELIZAOS_CLOUD_EXPERIMENTAL_TELEMETRY,
685
+ ELIZAOS_CLOUD_IMAGE_GENERATION_MODEL: process.env.ELIZAOS_CLOUD_IMAGE_GENERATION_MODEL
1053
686
  },
1054
- priority: -1,
1055
687
  async init(config, runtime) {
1056
688
  initializeOpenAI(config, runtime);
1057
- if (!isBrowser()) {
1058
- initializeCloudStorage(runtime);
1059
- }
1060
- const cloudDatabaseEnabled = runtime.getSetting("ELIZAOS_CLOUD_DATABASE") === "true" || process.env.ELIZAOS_CLOUD_DATABASE === "true";
1061
- if (cloudDatabaseEnabled && !isBrowser()) {
1062
- await initializeCloudDatabase(runtime);
1063
- }
1064
689
  },
1065
690
  models: {
1066
- [ModelType6.TEXT_SMALL]: handleTextSmall,
1067
- [ModelType6.TEXT_LARGE]: handleTextLarge,
1068
- [ModelType6.TEXT_REASONING_SMALL]: handleTextSmall,
1069
- [ModelType6.TEXT_REASONING_LARGE]: handleTextLarge,
1070
- [ModelType6.OBJECT_SMALL]: handleObjectSmall,
1071
- [ModelType6.OBJECT_LARGE]: handleObjectLarge,
1072
- [ModelType6.TEXT_EMBEDDING]: handleTextEmbedding,
1073
- [ModelType6.TEXT_TOKENIZER_ENCODE]: handleTokenizerEncode,
1074
- [ModelType6.TEXT_TOKENIZER_DECODE]: handleTokenizerDecode,
1075
- [ModelType6.IMAGE]: handleImageGeneration,
1076
- [ModelType6.IMAGE_DESCRIPTION]: handleImageDescription,
1077
- [ModelType6.TRANSCRIPTION]: handleTranscription,
1078
- [ModelType6.TEXT_TO_SPEECH]: handleTextToSpeech
691
+ [ModelType5.TEXT_EMBEDDING]: handleTextEmbedding,
692
+ [ModelType5.TEXT_SMALL]: handleTextSmall,
693
+ [ModelType5.TEXT_LARGE]: handleTextLarge,
694
+ [ModelType5.IMAGE]: handleImageGeneration,
695
+ [ModelType5.IMAGE_DESCRIPTION]: handleImageDescription,
696
+ [ModelType5.OBJECT_SMALL]: handleObjectSmall,
697
+ [ModelType5.OBJECT_LARGE]: handleObjectLarge
1079
698
  },
1080
699
  tests: [
1081
700
  {
@@ -1091,7 +710,7 @@ var elizaOSCloudPlugin = {
1091
710
  }
1092
711
  });
1093
712
  const data = await response.json();
1094
- logger13.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
713
+ logger9.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
1095
714
  if (!response.ok) {
1096
715
  throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
1097
716
  }
@@ -1101,13 +720,13 @@ var elizaOSCloudPlugin = {
1101
720
  name: "ELIZAOS_CLOUD_test_text_embedding",
1102
721
  fn: async (runtime) => {
1103
722
  try {
1104
- const embedding = await runtime.useModel(ModelType6.TEXT_EMBEDDING, {
723
+ const embedding = await runtime.useModel(ModelType5.TEXT_EMBEDDING, {
1105
724
  text: "Hello, world!"
1106
725
  });
1107
- logger13.log({ embedding }, "embedding");
726
+ logger9.log({ embedding }, "embedding");
1108
727
  } catch (error) {
1109
728
  const message = error instanceof Error ? error.message : String(error);
1110
- logger13.error(`Error in test_text_embedding: ${message}`);
729
+ logger9.error(`Error in test_text_embedding: ${message}`);
1111
730
  throw error;
1112
731
  }
1113
732
  }
@@ -1116,16 +735,16 @@ var elizaOSCloudPlugin = {
1116
735
  name: "ELIZAOS_CLOUD_test_text_large",
1117
736
  fn: async (runtime) => {
1118
737
  try {
1119
- const text = await runtime.useModel(ModelType6.TEXT_LARGE, {
738
+ const text = await runtime.useModel(ModelType5.TEXT_LARGE, {
1120
739
  prompt: "What is the nature of reality in 10 words?"
1121
740
  });
1122
741
  if (text.length === 0) {
1123
742
  throw new Error("Failed to generate text");
1124
743
  }
1125
- logger13.log({ text }, "generated with test_text_large");
744
+ logger9.log({ text }, "generated with test_text_large");
1126
745
  } catch (error) {
1127
746
  const message = error instanceof Error ? error.message : String(error);
1128
- logger13.error(`Error in test_text_large: ${message}`);
747
+ logger9.error(`Error in test_text_large: ${message}`);
1129
748
  throw error;
1130
749
  }
1131
750
  }
@@ -1134,16 +753,16 @@ var elizaOSCloudPlugin = {
1134
753
  name: "ELIZAOS_CLOUD_test_text_small",
1135
754
  fn: async (runtime) => {
1136
755
  try {
1137
- const text = await runtime.useModel(ModelType6.TEXT_SMALL, {
756
+ const text = await runtime.useModel(ModelType5.TEXT_SMALL, {
1138
757
  prompt: "What is the nature of reality in 10 words?"
1139
758
  });
1140
759
  if (text.length === 0) {
1141
760
  throw new Error("Failed to generate text");
1142
761
  }
1143
- logger13.log({ text }, "generated with test_text_small");
762
+ logger9.log({ text }, "generated with test_text_small");
1144
763
  } catch (error) {
1145
764
  const message = error instanceof Error ? error.message : String(error);
1146
- logger13.error(`Error in test_text_small: ${message}`);
765
+ logger9.error(`Error in test_text_small: ${message}`);
1147
766
  throw error;
1148
767
  }
1149
768
  }
@@ -1151,17 +770,17 @@ var elizaOSCloudPlugin = {
1151
770
  {
1152
771
  name: "ELIZAOS_CLOUD_test_image_generation",
1153
772
  fn: async (runtime) => {
1154
- logger13.log("ELIZAOS_CLOUD_test_image_generation");
773
+ logger9.log("ELIZAOS_CLOUD_test_image_generation");
1155
774
  try {
1156
- const image = await runtime.useModel(ModelType6.IMAGE, {
775
+ const image = await runtime.useModel(ModelType5.IMAGE, {
1157
776
  prompt: "A beautiful sunset over a calm ocean",
1158
- n: 1,
777
+ count: 1,
1159
778
  size: "1024x1024"
1160
779
  });
1161
- logger13.log({ image }, "generated with test_image_generation");
780
+ logger9.log({ image }, "generated with test_image_generation");
1162
781
  } catch (error) {
1163
782
  const message = error instanceof Error ? error.message : String(error);
1164
- logger13.error(`Error in test_image_generation: ${message}`);
783
+ logger9.error(`Error in test_image_generation: ${message}`);
1165
784
  throw error;
1166
785
  }
1167
786
  }
@@ -1170,36 +789,36 @@ var elizaOSCloudPlugin = {
1170
789
  name: "image-description",
1171
790
  fn: async (runtime) => {
1172
791
  try {
1173
- logger13.log("ELIZAOS_CLOUD_test_image_description");
792
+ logger9.log("ELIZAOS_CLOUD_test_image_description");
1174
793
  try {
1175
- const result = await runtime.useModel(ModelType6.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
794
+ const result = await runtime.useModel(ModelType5.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
1176
795
  if (result && typeof result === "object" && "title" in result && "description" in result) {
1177
- logger13.log({ result }, "Image description");
796
+ logger9.log({ result }, "Image description");
1178
797
  } else {
1179
- logger13.error("Invalid image description result format:", result);
798
+ logger9.error("Invalid image description result format:", result);
1180
799
  }
1181
800
  } catch (e) {
1182
801
  const message = e instanceof Error ? e.message : String(e);
1183
- logger13.error(`Error in image description test: ${message}`);
802
+ logger9.error(`Error in image description test: ${message}`);
1184
803
  }
1185
804
  } catch (e) {
1186
805
  const message = e instanceof Error ? e.message : String(e);
1187
- logger13.error(`Error in ELIZAOS_CLOUD_test_image_description: ${message}`);
806
+ logger9.error(`Error in ELIZAOS_CLOUD_test_image_description: ${message}`);
1188
807
  }
1189
808
  }
1190
809
  },
1191
810
  {
1192
811
  name: "ELIZAOS_CLOUD_test_transcription",
1193
812
  fn: async (runtime) => {
1194
- logger13.log("ELIZAOS_CLOUD_test_transcription");
813
+ logger9.log("ELIZAOS_CLOUD_test_transcription");
1195
814
  try {
1196
815
  const response = await fetch("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg");
1197
816
  const arrayBuffer = await response.arrayBuffer();
1198
- const transcription = await runtime.useModel(ModelType6.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
1199
- logger13.log({ transcription }, "generated with test_transcription");
817
+ const transcription = await runtime.useModel(ModelType5.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
818
+ logger9.log({ transcription }, "generated with test_transcription");
1200
819
  } catch (error) {
1201
820
  const message = error instanceof Error ? error.message : String(error);
1202
- logger13.error(`Error in test_transcription: ${message}`);
821
+ logger9.error(`Error in test_transcription: ${message}`);
1203
822
  throw error;
1204
823
  }
1205
824
  }
@@ -1208,23 +827,23 @@ var elizaOSCloudPlugin = {
1208
827
  name: "ELIZAOS_CLOUD_test_text_tokenizer_encode",
1209
828
  fn: async (runtime) => {
1210
829
  const prompt = "Hello tokenizer encode!";
1211
- const tokens = await runtime.useModel(ModelType6.TEXT_TOKENIZER_ENCODE, { prompt });
830
+ const tokens = await runtime.useModel(ModelType5.TEXT_TOKENIZER_ENCODE, { prompt, modelType: ModelType5.TEXT_SMALL });
1212
831
  if (!Array.isArray(tokens) || tokens.length === 0) {
1213
832
  throw new Error("Failed to tokenize text: expected non-empty array of tokens");
1214
833
  }
1215
- logger13.log({ tokens }, "Tokenized output");
834
+ logger9.log({ tokens }, "Tokenized output");
1216
835
  }
1217
836
  },
1218
837
  {
1219
838
  name: "ELIZAOS_CLOUD_test_text_tokenizer_decode",
1220
839
  fn: async (runtime) => {
1221
840
  const prompt = "Hello tokenizer decode!";
1222
- const tokens = await runtime.useModel(ModelType6.TEXT_TOKENIZER_ENCODE, { prompt });
1223
- const decodedText = await runtime.useModel(ModelType6.TEXT_TOKENIZER_DECODE, { tokens });
841
+ const tokens = await runtime.useModel(ModelType5.TEXT_TOKENIZER_ENCODE, { prompt, modelType: ModelType5.TEXT_SMALL });
842
+ const decodedText = await runtime.useModel(ModelType5.TEXT_TOKENIZER_DECODE, { tokens, modelType: ModelType5.TEXT_SMALL });
1224
843
  if (decodedText !== prompt) {
1225
844
  throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
1226
845
  }
1227
- logger13.log({ decodedText }, "Decoded text");
846
+ logger9.log({ decodedText }, "Decoded text");
1228
847
  }
1229
848
  },
1230
849
  {
@@ -1237,10 +856,10 @@ var elizaOSCloudPlugin = {
1237
856
  if (!response) {
1238
857
  throw new Error("Failed to generate speech");
1239
858
  }
1240
- logger13.log("Generated speech successfully");
859
+ logger9.log("Generated speech successfully");
1241
860
  } catch (error) {
1242
861
  const message = error instanceof Error ? error.message : String(error);
1243
- logger13.error(`Error in ELIZAOS_CLOUD_test_text_to_speech: ${message}`);
862
+ logger9.error(`Error in ELIZAOS_CLOUD_test_text_to_speech: ${message}`);
1244
863
  throw error;
1245
864
  }
1246
865
  }
@@ -1251,35 +870,8 @@ var elizaOSCloudPlugin = {
1251
870
  };
1252
871
  var src_default = elizaOSCloudPlugin;
1253
872
  export {
1254
- worldTable,
1255
- taskTable,
1256
- serverTable,
1257
- serverAgentsTable,
1258
- roomTable,
1259
- relationshipTable,
1260
- pluginSql3 as pluginSql,
1261
- participantTable,
1262
- messageTable,
1263
- messageServerTable,
1264
- messageServerAgentsTable,
1265
- memoryTable,
1266
- logTable,
1267
- getCloudStorage,
1268
- entityTable,
1269
- embeddingTable,
1270
873
  elizaOSCloudPlugin,
1271
- src_default as default,
1272
- createDirectDatabaseAdapter,
1273
- createDatabaseAdapter,
1274
- createCloudStorageService,
1275
- createCloudDatabaseAdapter,
1276
- componentTable,
1277
- channelTable,
1278
- channelParticipantsTable,
1279
- cacheTable,
1280
- agentTable,
1281
- CloudStorageService,
1282
- CloudDatabaseAdapter
874
+ src_default as default
1283
875
  };
1284
876
 
1285
- //# debugId=1EA438C51CA2A03164756E2164756E21
877
+ //# debugId=6CA236862AF9E08C64756E2164756E21