@elizaos/plugin-local-ai 1.0.0-beta.7 → 1.0.0-beta.72
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +45 -99
- package/dist/index.d.ts +9 -0
- package/dist/index.js +924 -1595
- package/dist/index.js.map +1 -1
- package/package.json +14 -28
package/dist/index.js
CHANGED
|
@@ -1,75 +1,58 @@
|
|
|
1
1
|
// src/index.ts
|
|
2
|
-
import fs5 from "
|
|
3
|
-
import
|
|
4
|
-
import
|
|
5
|
-
import {
|
|
6
|
-
import {
|
|
7
|
-
|
|
2
|
+
import fs5 from "fs";
|
|
3
|
+
import os3 from "os";
|
|
4
|
+
import path5 from "path";
|
|
5
|
+
import { Readable as Readable2 } from "stream";
|
|
6
|
+
import {
|
|
7
|
+
ModelType,
|
|
8
|
+
logger as logger8
|
|
9
|
+
} from "@elizaos/core";
|
|
8
10
|
import {
|
|
9
11
|
LlamaChatSession,
|
|
10
|
-
getLlama
|
|
12
|
+
getLlama
|
|
11
13
|
} from "node-llama-cpp";
|
|
12
14
|
|
|
13
15
|
// src/environment.ts
|
|
14
16
|
import { logger } from "@elizaos/core";
|
|
15
17
|
import { z } from "zod";
|
|
18
|
+
var DEFAULT_SMALL_MODEL = "DeepHermes-3-Llama-3-3B-Preview-q4.gguf";
|
|
19
|
+
var DEFAULT_LARGE_MODEL = "DeepHermes-3-Llama-3-8B-q4.gguf";
|
|
20
|
+
var DEFAULT_EMBEDDING_MODEL = "bge-small-en-v1.5.Q4_K_M.gguf";
|
|
16
21
|
var configSchema = z.object({
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
MEDIUM_OLLAMA_MODEL: z.string().default("deepseek-r1:7b"),
|
|
27
|
-
LARGE_OLLAMA_MODEL: z.string().default("deepseek-r1:7b"),
|
|
28
|
-
// StudioLM Configuration
|
|
29
|
-
STUDIOLM_SERVER_URL: z.string().default("http://localhost:1234"),
|
|
30
|
-
STUDIOLM_SMALL_MODEL: z.string().default("lmstudio-community/deepseek-r1-distill-qwen-1.5b"),
|
|
31
|
-
STUDIOLM_MEDIUM_MODEL: z.string().default("deepseek-r1-distill-qwen-7b"),
|
|
32
|
-
STUDIOLM_EMBEDDING_MODEL: z.union([z.boolean(), z.string()]).default(false)
|
|
22
|
+
LOCAL_SMALL_MODEL: z.string().optional().default(DEFAULT_SMALL_MODEL),
|
|
23
|
+
LOCAL_LARGE_MODEL: z.string().optional().default(DEFAULT_LARGE_MODEL),
|
|
24
|
+
LOCAL_EMBEDDING_MODEL: z.string().optional().default(DEFAULT_EMBEDDING_MODEL),
|
|
25
|
+
MODELS_DIR: z.string().optional(),
|
|
26
|
+
// Path for the models directory
|
|
27
|
+
CACHE_DIR: z.string().optional(),
|
|
28
|
+
// Path for the cache directory
|
|
29
|
+
LOCAL_EMBEDDING_DIMENSIONS: z.string().optional().default("384").transform((val) => parseInt(val, 10))
|
|
30
|
+
// Transform to number
|
|
33
31
|
});
|
|
34
|
-
function
|
|
35
|
-
logger.info("Validating model configuration with values:", {
|
|
36
|
-
USE_LOCAL_AI: config.USE_LOCAL_AI,
|
|
37
|
-
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS,
|
|
38
|
-
USE_OLLAMA_TEXT_MODELS: config.USE_OLLAMA_TEXT_MODELS
|
|
39
|
-
});
|
|
40
|
-
if (!config.USE_LOCAL_AI) {
|
|
41
|
-
config.USE_LOCAL_AI = true;
|
|
42
|
-
logger.info("Setting USE_LOCAL_AI to true as it's required");
|
|
43
|
-
}
|
|
44
|
-
if (config.USE_STUDIOLM_TEXT_MODELS && config.USE_OLLAMA_TEXT_MODELS) {
|
|
45
|
-
throw new Error("StudioLM and Ollama text models cannot be enabled simultaneously");
|
|
46
|
-
}
|
|
47
|
-
logger.info("Configuration is valid");
|
|
48
|
-
}
|
|
49
|
-
async function validateConfig(config) {
|
|
32
|
+
function validateConfig() {
|
|
50
33
|
try {
|
|
51
|
-
const
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
OLLAMA_MODEL: config.OLLAMA_MODEL || "deepseek-r1-distill-qwen-7b",
|
|
63
|
-
OLLAMA_EMBEDDING_MODEL: config.OLLAMA_EMBEDDING_MODEL || "",
|
|
64
|
-
SMALL_OLLAMA_MODEL: config.SMALL_OLLAMA_MODEL || "deepseek-r1:1.5b",
|
|
65
|
-
MEDIUM_OLLAMA_MODEL: config.MEDIUM_OLLAMA_MODEL || "deepseek-r1:7b",
|
|
66
|
-
LARGE_OLLAMA_MODEL: config.LARGE_OLLAMA_MODEL || "deepseek-r1:7b",
|
|
67
|
-
STUDIOLM_SERVER_URL: config.STUDIOLM_SERVER_URL || "http://localhost:1234",
|
|
68
|
-
STUDIOLM_SMALL_MODEL: config.STUDIOLM_SMALL_MODEL || "lmstudio-community/deepseek-r1-distill-qwen-1.5b",
|
|
69
|
-
STUDIOLM_MEDIUM_MODEL: config.STUDIOLM_MEDIUM_MODEL || "deepseek-r1-distill-qwen-7b",
|
|
70
|
-
STUDIOLM_EMBEDDING_MODEL: config.STUDIOLM_EMBEDDING_MODEL || false
|
|
34
|
+
const configToParse = {
|
|
35
|
+
// Read model filenames from environment variables or use undefined (so zod defaults apply)
|
|
36
|
+
LOCAL_SMALL_MODEL: process.env.LOCAL_SMALL_MODEL,
|
|
37
|
+
LOCAL_LARGE_MODEL: process.env.LOCAL_LARGE_MODEL,
|
|
38
|
+
LOCAL_EMBEDDING_MODEL: process.env.LOCAL_EMBEDDING_MODEL,
|
|
39
|
+
MODELS_DIR: process.env.MODELS_DIR,
|
|
40
|
+
// Read models directory path from env
|
|
41
|
+
CACHE_DIR: process.env.CACHE_DIR,
|
|
42
|
+
// Read cache directory path from env
|
|
43
|
+
LOCAL_EMBEDDING_DIMENSIONS: process.env.LOCAL_EMBEDDING_DIMENSIONS
|
|
44
|
+
// Read embedding dimensions
|
|
71
45
|
};
|
|
72
|
-
|
|
46
|
+
logger.debug("Validating configuration for local AI plugin from env:", {
|
|
47
|
+
LOCAL_SMALL_MODEL: configToParse.LOCAL_SMALL_MODEL,
|
|
48
|
+
LOCAL_LARGE_MODEL: configToParse.LOCAL_LARGE_MODEL,
|
|
49
|
+
LOCAL_EMBEDDING_MODEL: configToParse.LOCAL_EMBEDDING_MODEL,
|
|
50
|
+
MODELS_DIR: configToParse.MODELS_DIR,
|
|
51
|
+
CACHE_DIR: configToParse.CACHE_DIR,
|
|
52
|
+
LOCAL_EMBEDDING_DIMENSIONS: configToParse.LOCAL_EMBEDDING_DIMENSIONS
|
|
53
|
+
});
|
|
54
|
+
const validatedConfig = configSchema.parse(configToParse);
|
|
55
|
+
logger.info("Using local AI configuration:", validatedConfig);
|
|
73
56
|
return validatedConfig;
|
|
74
57
|
} catch (error) {
|
|
75
58
|
if (error instanceof z.ZodError) {
|
|
@@ -110,6 +93,18 @@ var MODEL_SPECS = {
|
|
|
110
93
|
type: "llama"
|
|
111
94
|
}
|
|
112
95
|
},
|
|
96
|
+
embedding: {
|
|
97
|
+
name: "bge-small-en-v1.5.Q4_K_M.gguf",
|
|
98
|
+
repo: "ChristianAzinn/bge-small-en-v1.5-gguf",
|
|
99
|
+
size: "133 MB",
|
|
100
|
+
quantization: "Q4_K_M",
|
|
101
|
+
contextSize: 512,
|
|
102
|
+
dimensions: 384,
|
|
103
|
+
tokenizer: {
|
|
104
|
+
name: "ChristianAzinn/bge-small-en-v1.5-gguf",
|
|
105
|
+
type: "llama"
|
|
106
|
+
}
|
|
107
|
+
},
|
|
113
108
|
vision: {
|
|
114
109
|
name: "Florence-2-base-ft",
|
|
115
110
|
repo: "onnx-community/Florence-2-base-ft",
|
|
@@ -147,86 +142,20 @@ var MODEL_SPECS = {
|
|
|
147
142
|
]
|
|
148
143
|
},
|
|
149
144
|
tts: {
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
languages: ["en"],
|
|
157
|
-
features: ["MULTI_SPEAKER", "VOICE_CLONING", "EMOTION_CONTROL", "SPEED_CONTROL"],
|
|
158
|
-
maxInputLength: 4096,
|
|
159
|
-
sampleRate: 24e3,
|
|
160
|
-
contextSize: 2048,
|
|
161
|
-
tokenizer: {
|
|
162
|
-
name: "OuteAI/OuteTTS-0.2-500M",
|
|
163
|
-
type: "llama"
|
|
164
|
-
}
|
|
165
|
-
},
|
|
166
|
-
medium: {
|
|
167
|
-
name: "OuteTTS-0.3-1B.gguf",
|
|
168
|
-
repo: "OuteAI/OuteTTS-0.3-1B-GGUF",
|
|
169
|
-
size: "1B",
|
|
170
|
-
quantization: "Q8_0",
|
|
171
|
-
speakers: ["male_1", "male_2", "male_3", "female_1", "female_2", "female_3"],
|
|
172
|
-
languages: ["en", "es", "fr", "de", "it"],
|
|
173
|
-
features: [
|
|
174
|
-
"MULTI_SPEAKER",
|
|
175
|
-
"VOICE_CLONING",
|
|
176
|
-
"EMOTION_CONTROL",
|
|
177
|
-
"SPEED_CONTROL",
|
|
178
|
-
"MULTILINGUAL",
|
|
179
|
-
"ACCENT_CONTROL"
|
|
180
|
-
],
|
|
181
|
-
maxInputLength: 8192,
|
|
182
|
-
sampleRate: 32e3,
|
|
183
|
-
contextSize: 4096,
|
|
184
|
-
tokenizer: {
|
|
185
|
-
name: "OuteAI/OuteTTS-0.3-1B",
|
|
186
|
-
type: "llama"
|
|
187
|
-
}
|
|
188
|
-
},
|
|
189
|
-
large: {
|
|
190
|
-
name: "OuteTTS-0.3-3B.gguf",
|
|
191
|
-
repo: "OuteAI/OuteTTS-0.3-3B-GGUF",
|
|
192
|
-
size: "3B",
|
|
193
|
-
quantization: "Q8_0",
|
|
194
|
-
speakers: [
|
|
195
|
-
"male_1",
|
|
196
|
-
"male_2",
|
|
197
|
-
"male_3",
|
|
198
|
-
"male_4",
|
|
199
|
-
"female_1",
|
|
200
|
-
"female_2",
|
|
201
|
-
"female_3",
|
|
202
|
-
"female_4"
|
|
203
|
-
],
|
|
204
|
-
languages: ["en", "es", "fr", "de", "it", "pt", "nl", "pl", "ru", "ja", "ko", "zh"],
|
|
205
|
-
features: [
|
|
206
|
-
"MULTI_SPEAKER",
|
|
207
|
-
"VOICE_CLONING",
|
|
208
|
-
"EMOTION_CONTROL",
|
|
209
|
-
"SPEED_CONTROL",
|
|
210
|
-
"MULTILINGUAL",
|
|
211
|
-
"ACCENT_CONTROL",
|
|
212
|
-
"STYLE_TRANSFER",
|
|
213
|
-
"PROSODY_CONTROL"
|
|
214
|
-
],
|
|
215
|
-
maxInputLength: 16384,
|
|
216
|
-
sampleRate: 48e3,
|
|
217
|
-
contextSize: 8192,
|
|
218
|
-
tokenizer: {
|
|
219
|
-
name: "OuteAI/OuteTTS-0.3-3B",
|
|
220
|
-
type: "llama"
|
|
221
|
-
}
|
|
145
|
+
default: {
|
|
146
|
+
modelId: "Xenova/speecht5_tts",
|
|
147
|
+
defaultSampleRate: 16e3,
|
|
148
|
+
// SpeechT5 default
|
|
149
|
+
// Use the standard embedding URL
|
|
150
|
+
defaultSpeakerEmbeddingUrl: "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/speaker_embeddings.bin"
|
|
222
151
|
}
|
|
223
152
|
}
|
|
224
153
|
};
|
|
225
154
|
|
|
226
155
|
// src/utils/downloadManager.ts
|
|
227
|
-
import fs from "
|
|
228
|
-
import https from "
|
|
229
|
-
import path from "
|
|
156
|
+
import fs from "fs";
|
|
157
|
+
import https from "https";
|
|
158
|
+
import path from "path";
|
|
230
159
|
import { logger as logger2 } from "@elizaos/core";
|
|
231
160
|
var DownloadManager = class _DownloadManager {
|
|
232
161
|
static instance = null;
|
|
@@ -324,7 +253,10 @@ var DownloadManager = class _DownloadManager {
|
|
|
324
253
|
reject(new Error(`Failed to download: ${response.statusCode}`));
|
|
325
254
|
return;
|
|
326
255
|
}
|
|
327
|
-
const totalSize = Number.parseInt(
|
|
256
|
+
const totalSize = Number.parseInt(
|
|
257
|
+
response.headers["content-length"] || "0",
|
|
258
|
+
10
|
|
259
|
+
);
|
|
328
260
|
let downloadedSize = 0;
|
|
329
261
|
let lastLoggedPercent = 0;
|
|
330
262
|
const barLength = 30;
|
|
@@ -335,9 +267,13 @@ var DownloadManager = class _DownloadManager {
|
|
|
335
267
|
downloadedSize += chunk.length;
|
|
336
268
|
const percent = Math.round(downloadedSize / totalSize * 100);
|
|
337
269
|
if (percent >= lastLoggedPercent + 5) {
|
|
338
|
-
const filledLength = Math.floor(
|
|
270
|
+
const filledLength = Math.floor(
|
|
271
|
+
downloadedSize / totalSize * barLength
|
|
272
|
+
);
|
|
339
273
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
340
|
-
logger2.info(
|
|
274
|
+
logger2.info(
|
|
275
|
+
`Downloading ${fileName}: ${progressBar} ${percent}%`
|
|
276
|
+
);
|
|
341
277
|
lastLoggedPercent = percent;
|
|
342
278
|
}
|
|
343
279
|
});
|
|
@@ -352,18 +288,24 @@ var DownloadManager = class _DownloadManager {
|
|
|
352
288
|
fs.mkdirSync(destDir, { recursive: true });
|
|
353
289
|
}
|
|
354
290
|
if (!fs.existsSync(tempPath)) {
|
|
355
|
-
reject(
|
|
291
|
+
reject(
|
|
292
|
+
new Error(`Temporary file ${tempPath} does not exist`)
|
|
293
|
+
);
|
|
356
294
|
return;
|
|
357
295
|
}
|
|
358
296
|
if (fs.existsSync(destPath)) {
|
|
359
297
|
try {
|
|
360
298
|
const backupPath = `${destPath}.bak`;
|
|
361
299
|
fs.renameSync(destPath, backupPath);
|
|
362
|
-
logger2.info(
|
|
300
|
+
logger2.info(
|
|
301
|
+
`Created backup of existing file: ${backupPath}`
|
|
302
|
+
);
|
|
363
303
|
fs.renameSync(tempPath, destPath);
|
|
364
304
|
if (fs.existsSync(backupPath)) {
|
|
365
305
|
fs.unlinkSync(backupPath);
|
|
366
|
-
logger2.info(
|
|
306
|
+
logger2.info(
|
|
307
|
+
`Removed backup file after successful update: ${backupPath}`
|
|
308
|
+
);
|
|
367
309
|
}
|
|
368
310
|
} catch (moveErr) {
|
|
369
311
|
logger2.error(
|
|
@@ -373,7 +315,9 @@ var DownloadManager = class _DownloadManager {
|
|
|
373
315
|
if (fs.existsSync(backupPath)) {
|
|
374
316
|
try {
|
|
375
317
|
fs.renameSync(backupPath, destPath);
|
|
376
|
-
logger2.info(
|
|
318
|
+
logger2.info(
|
|
319
|
+
`Restored from backup after failed update: ${backupPath}`
|
|
320
|
+
);
|
|
377
321
|
} catch (restoreErr) {
|
|
378
322
|
logger2.error(
|
|
379
323
|
`Failed to restore from backup: ${restoreErr instanceof Error ? restoreErr.message : String(restoreErr)}`
|
|
@@ -395,7 +339,9 @@ var DownloadManager = class _DownloadManager {
|
|
|
395
339
|
} else {
|
|
396
340
|
fs.renameSync(tempPath, destPath);
|
|
397
341
|
}
|
|
398
|
-
logger2.success(
|
|
342
|
+
logger2.success(
|
|
343
|
+
`Download of ${fileName} completed successfully`
|
|
344
|
+
);
|
|
399
345
|
this.activeDownloads.delete(destPath);
|
|
400
346
|
resolve();
|
|
401
347
|
} catch (err) {
|
|
@@ -417,7 +363,9 @@ var DownloadManager = class _DownloadManager {
|
|
|
417
363
|
});
|
|
418
364
|
});
|
|
419
365
|
file.on("error", (err) => {
|
|
420
|
-
logger2.error(
|
|
366
|
+
logger2.error(
|
|
367
|
+
`File write error: ${err instanceof Error ? err.message : String(err)}`
|
|
368
|
+
);
|
|
421
369
|
file.close(() => {
|
|
422
370
|
if (fs.existsSync(tempPath)) {
|
|
423
371
|
try {
|
|
@@ -435,7 +383,9 @@ var DownloadManager = class _DownloadManager {
|
|
|
435
383
|
}
|
|
436
384
|
);
|
|
437
385
|
request.on("error", (err) => {
|
|
438
|
-
logger2.error(
|
|
386
|
+
logger2.error(
|
|
387
|
+
`Request error: ${err instanceof Error ? err.message : String(err)}`
|
|
388
|
+
);
|
|
439
389
|
if (fs.existsSync(tempPath)) {
|
|
440
390
|
try {
|
|
441
391
|
fs.unlinkSync(tempPath);
|
|
@@ -474,7 +424,9 @@ var DownloadManager = class _DownloadManager {
|
|
|
474
424
|
*/
|
|
475
425
|
async downloadFile(url, destPath) {
|
|
476
426
|
if (this.activeDownloads.has(destPath)) {
|
|
477
|
-
logger2.info(
|
|
427
|
+
logger2.info(
|
|
428
|
+
`Download for ${destPath} already in progress, waiting for it to complete...`
|
|
429
|
+
);
|
|
478
430
|
const existingDownload = this.activeDownloads.get(destPath);
|
|
479
431
|
if (existingDownload) {
|
|
480
432
|
return existingDownload;
|
|
@@ -598,288 +550,11 @@ var DownloadManager = class _DownloadManager {
|
|
|
598
550
|
}
|
|
599
551
|
};
|
|
600
552
|
|
|
601
|
-
// src/utils/ollamaManager.ts
|
|
602
|
-
import { ModelType, logger as logger3 } from "@elizaos/core";
|
|
603
|
-
var OllamaManager = class _OllamaManager {
|
|
604
|
-
static instance = null;
|
|
605
|
-
serverUrl;
|
|
606
|
-
initialized = false;
|
|
607
|
-
availableModels = [];
|
|
608
|
-
configuredModels = {
|
|
609
|
-
small: process.env.SMALL_OLLAMA_MODEL || "deepseek-r1:1.5b",
|
|
610
|
-
medium: process.env.MEDIUM_OLLAMA_MODEL || "deepseek-r1:7b"
|
|
611
|
-
};
|
|
612
|
-
/**
|
|
613
|
-
* Private constructor for initializing OllamaManager.
|
|
614
|
-
*/
|
|
615
|
-
constructor() {
|
|
616
|
-
this.serverUrl = process.env.OLLAMA_SERVER_URL || "http://localhost:11434";
|
|
617
|
-
logger3.info("OllamaManager initialized with configuration:", {
|
|
618
|
-
serverUrl: this.serverUrl,
|
|
619
|
-
configuredModels: this.configuredModels,
|
|
620
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
621
|
-
});
|
|
622
|
-
}
|
|
623
|
-
/**
|
|
624
|
-
* Returns an instance of the OllamaManager class.
|
|
625
|
-
* If an instance does not already exist, a new instance is created and returned.
|
|
626
|
-
* @returns {OllamaManager} The instance of the OllamaManager class.
|
|
627
|
-
*/
|
|
628
|
-
static getInstance() {
|
|
629
|
-
if (!_OllamaManager.instance) {
|
|
630
|
-
_OllamaManager.instance = new _OllamaManager();
|
|
631
|
-
}
|
|
632
|
-
return _OllamaManager.instance;
|
|
633
|
-
}
|
|
634
|
-
/**
|
|
635
|
-
* Asynchronously checks the status of the server by attempting to fetch the "/api/tags" endpoint.
|
|
636
|
-
* @returns A Promise that resolves to a boolean indicating if the server is reachable and responding with a successful status.
|
|
637
|
-
*/
|
|
638
|
-
async checkServerStatus() {
|
|
639
|
-
try {
|
|
640
|
-
const response = await fetch(`${this.serverUrl}/api/tags`);
|
|
641
|
-
if (!response.ok) {
|
|
642
|
-
throw new Error(`Server responded with status: ${response.status}`);
|
|
643
|
-
}
|
|
644
|
-
return true;
|
|
645
|
-
} catch (error) {
|
|
646
|
-
logger3.error("Ollama server check failed:", {
|
|
647
|
-
error: error instanceof Error ? error.message : String(error),
|
|
648
|
-
serverUrl: this.serverUrl,
|
|
649
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
650
|
-
});
|
|
651
|
-
return false;
|
|
652
|
-
}
|
|
653
|
-
}
|
|
654
|
-
/**
|
|
655
|
-
* Fetches the available Ollama models from the specified server URL.
|
|
656
|
-
*
|
|
657
|
-
* @returns {Promise<void>} A Promise that resolves when the available models are successfully fetched.
|
|
658
|
-
*/
|
|
659
|
-
async fetchAvailableModels() {
|
|
660
|
-
try {
|
|
661
|
-
const response = await fetch(`${this.serverUrl}/api/tags`);
|
|
662
|
-
if (!response.ok) {
|
|
663
|
-
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
664
|
-
}
|
|
665
|
-
const data = await response.json();
|
|
666
|
-
this.availableModels = data.models;
|
|
667
|
-
logger3.info("Ollama available models:", {
|
|
668
|
-
count: this.availableModels.length,
|
|
669
|
-
models: this.availableModels.map((m) => m.name),
|
|
670
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
671
|
-
});
|
|
672
|
-
} catch (error) {
|
|
673
|
-
logger3.error("Failed to fetch Ollama models:", {
|
|
674
|
-
error: error instanceof Error ? error.message : String(error),
|
|
675
|
-
serverUrl: this.serverUrl,
|
|
676
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
677
|
-
});
|
|
678
|
-
throw error;
|
|
679
|
-
}
|
|
680
|
-
}
|
|
681
|
-
/**
|
|
682
|
-
* Asynchronously tests a model specified by the given modelId.
|
|
683
|
-
*
|
|
684
|
-
* @param {string} modelId - The ID of the model to be tested.
|
|
685
|
-
* @returns {Promise<boolean>} - A promise that resolves to true if the model test is successful, false otherwise.
|
|
686
|
-
*/
|
|
687
|
-
async testModel(modelId) {
|
|
688
|
-
try {
|
|
689
|
-
const testRequest = {
|
|
690
|
-
model: modelId,
|
|
691
|
-
prompt: "Debug Mode: Test initialization. Respond with 'Initialization successful' if you can read this.",
|
|
692
|
-
stream: false,
|
|
693
|
-
options: {
|
|
694
|
-
temperature: 0.7,
|
|
695
|
-
num_predict: 100
|
|
696
|
-
}
|
|
697
|
-
};
|
|
698
|
-
logger3.info(`Testing model ${modelId}...`);
|
|
699
|
-
const response = await fetch(`${this.serverUrl}/api/generate`, {
|
|
700
|
-
method: "POST",
|
|
701
|
-
headers: {
|
|
702
|
-
"Content-Type": "application/json"
|
|
703
|
-
},
|
|
704
|
-
body: JSON.stringify(testRequest)
|
|
705
|
-
});
|
|
706
|
-
if (!response.ok) {
|
|
707
|
-
throw new Error(`Model test failed with status: ${response.status}`);
|
|
708
|
-
}
|
|
709
|
-
const result = await response.json();
|
|
710
|
-
if (!result.response) {
|
|
711
|
-
throw new Error("No valid response content received");
|
|
712
|
-
}
|
|
713
|
-
logger3.info(`Model ${modelId} test response:`, {
|
|
714
|
-
content: result.response,
|
|
715
|
-
model: result.model,
|
|
716
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
717
|
-
});
|
|
718
|
-
return true;
|
|
719
|
-
} catch (error) {
|
|
720
|
-
logger3.error(`Model ${modelId} test failed:`, {
|
|
721
|
-
error: error instanceof Error ? error.message : String(error),
|
|
722
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
723
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
724
|
-
});
|
|
725
|
-
return false;
|
|
726
|
-
}
|
|
727
|
-
}
|
|
728
|
-
/**
|
|
729
|
-
* Asynchronously tests the configured text models to ensure they are working properly.
|
|
730
|
-
* Logs the test results for each model and outputs a warning if any models fail the test.
|
|
731
|
-
* @returns {Promise<void>} A Promise that resolves when all configured models have been tested.
|
|
732
|
-
*/
|
|
733
|
-
async testTextModels() {
|
|
734
|
-
logger3.info("Testing configured text models...");
|
|
735
|
-
const results = await Promise.all([
|
|
736
|
-
this.testModel(this.configuredModels.small),
|
|
737
|
-
this.testModel(this.configuredModels.medium)
|
|
738
|
-
]);
|
|
739
|
-
const [smallWorking, mediumWorking] = results;
|
|
740
|
-
if (!smallWorking || !mediumWorking) {
|
|
741
|
-
const failedModels = [];
|
|
742
|
-
if (!smallWorking) failedModels.push("small");
|
|
743
|
-
if (!mediumWorking) failedModels.push("medium");
|
|
744
|
-
logger3.warn("Some models failed the test:", {
|
|
745
|
-
failedModels,
|
|
746
|
-
small: this.configuredModels.small,
|
|
747
|
-
medium: this.configuredModels.medium
|
|
748
|
-
});
|
|
749
|
-
} else {
|
|
750
|
-
logger3.success("All configured models passed the test");
|
|
751
|
-
}
|
|
752
|
-
}
|
|
753
|
-
/**
|
|
754
|
-
* Asynchronously initializes the Ollama service by checking server status,
|
|
755
|
-
* fetching available models, and testing text models.
|
|
756
|
-
*
|
|
757
|
-
* @returns A Promise that resolves when initialization is complete
|
|
758
|
-
*/
|
|
759
|
-
async initialize() {
|
|
760
|
-
try {
|
|
761
|
-
if (this.initialized) {
|
|
762
|
-
logger3.info("Ollama already initialized, skipping initialization");
|
|
763
|
-
return;
|
|
764
|
-
}
|
|
765
|
-
logger3.info("Starting Ollama initialization...");
|
|
766
|
-
const serverAvailable = await this.checkServerStatus();
|
|
767
|
-
if (!serverAvailable) {
|
|
768
|
-
throw new Error("Ollama server is not available");
|
|
769
|
-
}
|
|
770
|
-
await this.fetchAvailableModels();
|
|
771
|
-
await this.testTextModels();
|
|
772
|
-
this.initialized = true;
|
|
773
|
-
logger3.success("Ollama initialization complete");
|
|
774
|
-
} catch (error) {
|
|
775
|
-
logger3.error("Ollama initialization failed:", {
|
|
776
|
-
error: error instanceof Error ? error.message : String(error),
|
|
777
|
-
stack: error instanceof Error ? error.stack : void 0
|
|
778
|
-
});
|
|
779
|
-
throw error;
|
|
780
|
-
}
|
|
781
|
-
}
|
|
782
|
-
/**
|
|
783
|
-
* Retrieves the available Ollama models.
|
|
784
|
-
*
|
|
785
|
-
* @returns {OllamaModel[]} An array of OllamaModel objects representing the available models.
|
|
786
|
-
*/
|
|
787
|
-
getAvailableModels() {
|
|
788
|
-
return this.availableModels;
|
|
789
|
-
}
|
|
790
|
-
/**
|
|
791
|
-
* Check if the object is initialized.
|
|
792
|
-
* @returns {boolean} True if the object is initialized, false otherwise.
|
|
793
|
-
*/
|
|
794
|
-
isInitialized() {
|
|
795
|
-
return this.initialized;
|
|
796
|
-
}
|
|
797
|
-
/**
|
|
798
|
-
* Generates text using the Ollama AI model.
|
|
799
|
-
*
|
|
800
|
-
* @param {GenerateTextParams} params - The parameters for generating text.
|
|
801
|
-
* @param {boolean} [isInitialized=false] - Flag indicating if Ollama is already initialized.
|
|
802
|
-
* @returns {Promise<string>} - A promise that resolves with the generated text.
|
|
803
|
-
*/
|
|
804
|
-
async generateText(params, isInitialized = false) {
|
|
805
|
-
try {
|
|
806
|
-
logger3.info("Ollama generateText entry:", {
|
|
807
|
-
isInitialized,
|
|
808
|
-
currentInitState: this.initialized,
|
|
809
|
-
managerInitState: this.isInitialized(),
|
|
810
|
-
modelType: params.modelType,
|
|
811
|
-
contextLength: params.prompt?.length,
|
|
812
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
813
|
-
});
|
|
814
|
-
if (!this.initialized && !isInitialized) {
|
|
815
|
-
throw new Error("Ollama not initialized. Please initialize before generating text.");
|
|
816
|
-
}
|
|
817
|
-
logger3.info("Ollama preparing request:", {
|
|
818
|
-
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
819
|
-
contextLength: params.prompt.length,
|
|
820
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
821
|
-
});
|
|
822
|
-
const request = {
|
|
823
|
-
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
824
|
-
prompt: params.prompt,
|
|
825
|
-
stream: false,
|
|
826
|
-
options: {
|
|
827
|
-
temperature: 0.7,
|
|
828
|
-
top_p: 0.9,
|
|
829
|
-
num_predict: 8192,
|
|
830
|
-
repeat_penalty: 1.2,
|
|
831
|
-
frequency_penalty: 0.7,
|
|
832
|
-
presence_penalty: 0.7
|
|
833
|
-
}
|
|
834
|
-
};
|
|
835
|
-
const response = await fetch(`${this.serverUrl}/api/generate`, {
|
|
836
|
-
method: "POST",
|
|
837
|
-
headers: {
|
|
838
|
-
"Content-Type": "application/json"
|
|
839
|
-
},
|
|
840
|
-
body: JSON.stringify(request)
|
|
841
|
-
});
|
|
842
|
-
if (!response.ok) {
|
|
843
|
-
throw new Error(`Ollama request failed: ${response.status}`);
|
|
844
|
-
}
|
|
845
|
-
const result = await response.json();
|
|
846
|
-
if (!result.response) {
|
|
847
|
-
throw new Error("No valid response content received from Ollama");
|
|
848
|
-
}
|
|
849
|
-
let responseText = result.response;
|
|
850
|
-
logger3.info("Raw response structure:", {
|
|
851
|
-
responseLength: responseText.length,
|
|
852
|
-
hasAction: responseText.includes("action"),
|
|
853
|
-
hasThinkTag: responseText.includes("<think>")
|
|
854
|
-
});
|
|
855
|
-
if (responseText.includes("<think>")) {
|
|
856
|
-
logger3.info("Cleaning think tags from response");
|
|
857
|
-
responseText = responseText.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
858
|
-
logger3.info("Think tags removed from response");
|
|
859
|
-
}
|
|
860
|
-
logger3.info("Ollama request completed successfully:", {
|
|
861
|
-
responseLength: responseText.length,
|
|
862
|
-
hasThinkTags: responseText.includes("<think>"),
|
|
863
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
864
|
-
});
|
|
865
|
-
return responseText;
|
|
866
|
-
} catch (error) {
|
|
867
|
-
logger3.error("Ollama text generation error:", {
|
|
868
|
-
error: error instanceof Error ? error.message : String(error),
|
|
869
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
870
|
-
phase: "text generation",
|
|
871
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
872
|
-
});
|
|
873
|
-
throw error;
|
|
874
|
-
}
|
|
875
|
-
}
|
|
876
|
-
};
|
|
877
|
-
|
|
878
553
|
// src/utils/platform.ts
|
|
879
|
-
import { exec } from "
|
|
880
|
-
import os from "
|
|
881
|
-
import { promisify } from "
|
|
882
|
-
import { logger as
|
|
554
|
+
import { exec } from "child_process";
|
|
555
|
+
import os from "os";
|
|
556
|
+
import { promisify } from "util";
|
|
557
|
+
import { logger as logger3 } from "@elizaos/core";
|
|
883
558
|
var execAsync = promisify(exec);
|
|
884
559
|
var PlatformManager = class _PlatformManager {
|
|
885
560
|
static instance;
|
|
@@ -906,10 +581,10 @@ var PlatformManager = class _PlatformManager {
|
|
|
906
581
|
*/
|
|
907
582
|
async initialize() {
|
|
908
583
|
try {
|
|
909
|
-
|
|
584
|
+
logger3.info("Initializing platform detection...");
|
|
910
585
|
this.capabilities = await this.detectSystemCapabilities();
|
|
911
586
|
} catch (error) {
|
|
912
|
-
|
|
587
|
+
logger3.error("Platform detection failed", { error });
|
|
913
588
|
throw error;
|
|
914
589
|
}
|
|
915
590
|
}
|
|
@@ -971,7 +646,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
971
646
|
return null;
|
|
972
647
|
}
|
|
973
648
|
} catch (error) {
|
|
974
|
-
|
|
649
|
+
logger3.error("GPU detection failed", { error });
|
|
975
650
|
return null;
|
|
976
651
|
}
|
|
977
652
|
}
|
|
@@ -990,14 +665,16 @@ var PlatformManager = class _PlatformManager {
|
|
|
990
665
|
isAppleSilicon: true
|
|
991
666
|
};
|
|
992
667
|
}
|
|
993
|
-
const { stdout: gpuInfo } = await execAsync(
|
|
668
|
+
const { stdout: gpuInfo } = await execAsync(
|
|
669
|
+
"system_profiler SPDisplaysDataType"
|
|
670
|
+
);
|
|
994
671
|
return {
|
|
995
672
|
name: gpuInfo.split("Chipset Model:")[1]?.split("\n")[0]?.trim() || "Unknown GPU",
|
|
996
673
|
type: "metal",
|
|
997
674
|
isAppleSilicon: false
|
|
998
675
|
};
|
|
999
676
|
} catch (error) {
|
|
1000
|
-
|
|
677
|
+
logger3.error("Mac GPU detection failed", { error });
|
|
1001
678
|
return {
|
|
1002
679
|
name: "Unknown Mac GPU",
|
|
1003
680
|
type: "metal",
|
|
@@ -1012,7 +689,9 @@ var PlatformManager = class _PlatformManager {
|
|
|
1012
689
|
*/
|
|
1013
690
|
async detectWindowsGPU() {
|
|
1014
691
|
try {
|
|
1015
|
-
const { stdout } = await execAsync(
|
|
692
|
+
const { stdout } = await execAsync(
|
|
693
|
+
"wmic path win32_VideoController get name"
|
|
694
|
+
);
|
|
1016
695
|
const gpuName = stdout.split("\n")[1].trim();
|
|
1017
696
|
if (gpuName.toLowerCase().includes("nvidia")) {
|
|
1018
697
|
const { stdout: nvidiaInfo } = await execAsync(
|
|
@@ -1032,7 +711,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
1032
711
|
type: "directml"
|
|
1033
712
|
};
|
|
1034
713
|
} catch (error) {
|
|
1035
|
-
|
|
714
|
+
logger3.error("Windows GPU detection failed", { error });
|
|
1036
715
|
return null;
|
|
1037
716
|
}
|
|
1038
717
|
}
|
|
@@ -1068,7 +747,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
1068
747
|
type: "none"
|
|
1069
748
|
};
|
|
1070
749
|
} catch (error) {
|
|
1071
|
-
|
|
750
|
+
logger3.error("Linux GPU detection failed", { error });
|
|
1072
751
|
return null;
|
|
1073
752
|
}
|
|
1074
753
|
}
|
|
@@ -1204,298 +883,11 @@ var getPlatformManager = () => {
|
|
|
1204
883
|
return PlatformManager.getInstance();
|
|
1205
884
|
};
|
|
1206
885
|
|
|
1207
|
-
// src/utils/studiolmManager.ts
|
|
1208
|
-
import { ModelType as ModelType2, logger as logger5 } from "@elizaos/core";
|
|
1209
|
-
var StudioLMManager = class _StudioLMManager {
|
|
1210
|
-
static instance = null;
|
|
1211
|
-
serverUrl;
|
|
1212
|
-
initialized = false;
|
|
1213
|
-
availableModels = [];
|
|
1214
|
-
configuredModels = {
|
|
1215
|
-
small: process.env.STUDIOLM_SMALL_MODEL || "lmstudio-community/deepseek-r1-distill-qwen-1.5b",
|
|
1216
|
-
medium: process.env.STUDIOLM_MEDIUM_MODEL || "deepseek-r1-distill-qwen-7b"
|
|
1217
|
-
};
|
|
1218
|
-
/**
|
|
1219
|
-
* Private constructor for StudioLMManager.
|
|
1220
|
-
* Initializes with default serverUrl if not provided in environment variables.
|
|
1221
|
-
* Logs initialization information including serverUrl, configuredModels, and timestamp.
|
|
1222
|
-
*/
|
|
1223
|
-
constructor() {
|
|
1224
|
-
this.serverUrl = process.env.STUDIOLM_SERVER_URL || "http://localhost:1234";
|
|
1225
|
-
logger5.info("StudioLMManager initialized with configuration:", {
|
|
1226
|
-
serverUrl: this.serverUrl,
|
|
1227
|
-
configuredModels: this.configuredModels,
|
|
1228
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1229
|
-
});
|
|
1230
|
-
}
|
|
1231
|
-
/**
|
|
1232
|
-
* Returns an instance of StudioLMManager. If an instance already exists, it returns the existing instance.
|
|
1233
|
-
* @returns {StudioLMManager} The instance of StudioLMManager
|
|
1234
|
-
*/
|
|
1235
|
-
static getInstance() {
|
|
1236
|
-
if (!_StudioLMManager.instance) {
|
|
1237
|
-
_StudioLMManager.instance = new _StudioLMManager();
|
|
1238
|
-
}
|
|
1239
|
-
return _StudioLMManager.instance;
|
|
1240
|
-
}
|
|
1241
|
-
/**
|
|
1242
|
-
* Check the status of the server by sending a request to the /v1/models endpoint.
|
|
1243
|
-
* @returns {Promise<boolean>} A Promise that resolves to true if the server responds with success status, false otherwise.
|
|
1244
|
-
*/
|
|
1245
|
-
async checkServerStatus() {
|
|
1246
|
-
try {
|
|
1247
|
-
const response = await fetch(`${this.serverUrl}/v1/models`);
|
|
1248
|
-
if (!response.ok) {
|
|
1249
|
-
throw new Error(`Server responded with status: ${response.status}`);
|
|
1250
|
-
}
|
|
1251
|
-
return true;
|
|
1252
|
-
} catch (error) {
|
|
1253
|
-
logger5.error("LM Studio server check failed:", {
|
|
1254
|
-
error: error instanceof Error ? error.message : String(error),
|
|
1255
|
-
serverUrl: this.serverUrl,
|
|
1256
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1257
|
-
});
|
|
1258
|
-
return false;
|
|
1259
|
-
}
|
|
1260
|
-
}
|
|
1261
|
-
/**
|
|
1262
|
-
* Fetches the available models from the server and stores them in the 'availableModels' property.
|
|
1263
|
-
*
|
|
1264
|
-
* @returns {Promise<void>} A Promise that resolves when the models are fetched successfully or rejects with an error.
|
|
1265
|
-
*/
|
|
1266
|
-
async fetchAvailableModels() {
|
|
1267
|
-
try {
|
|
1268
|
-
const response = await fetch(`${this.serverUrl}/v1/models`);
|
|
1269
|
-
if (!response.ok) {
|
|
1270
|
-
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
1271
|
-
}
|
|
1272
|
-
const data = await response.json();
|
|
1273
|
-
this.availableModels = data.data;
|
|
1274
|
-
logger5.info("LM Studio available models:", {
|
|
1275
|
-
count: this.availableModels.length,
|
|
1276
|
-
models: this.availableModels.map((m) => m.id),
|
|
1277
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1278
|
-
});
|
|
1279
|
-
} catch (error) {
|
|
1280
|
-
logger5.error("Failed to fetch LM Studio models:", {
|
|
1281
|
-
error: error instanceof Error ? error.message : String(error),
|
|
1282
|
-
serverUrl: this.serverUrl,
|
|
1283
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1284
|
-
});
|
|
1285
|
-
throw error;
|
|
1286
|
-
}
|
|
1287
|
-
}
|
|
1288
|
-
/**
|
|
1289
|
-
* Asynchronously tests the specified model with a chat completion request.
|
|
1290
|
-
* @param {string} modelId - The ID of the model to test.
|
|
1291
|
-
* @returns {Promise<boolean>} - A promise that resolves to true if the model test was successful, false otherwise.
|
|
1292
|
-
*/
|
|
1293
|
-
async testModel(modelId) {
|
|
1294
|
-
try {
|
|
1295
|
-
const testRequest = {
|
|
1296
|
-
model: modelId,
|
|
1297
|
-
messages: [
|
|
1298
|
-
{
|
|
1299
|
-
role: "system",
|
|
1300
|
-
content: "Always answer in rhymes. Today is Thursday"
|
|
1301
|
-
},
|
|
1302
|
-
{ role: "user", content: "What day is it today?" }
|
|
1303
|
-
],
|
|
1304
|
-
temperature: 0.7,
|
|
1305
|
-
max_tokens: -1,
|
|
1306
|
-
stream: false
|
|
1307
|
-
};
|
|
1308
|
-
logger5.info(`Testing model ${modelId}...`);
|
|
1309
|
-
const response = await fetch(`${this.serverUrl}/v1/chat/completions`, {
|
|
1310
|
-
method: "POST",
|
|
1311
|
-
headers: {
|
|
1312
|
-
"Content-Type": "application/json"
|
|
1313
|
-
},
|
|
1314
|
-
body: JSON.stringify(testRequest)
|
|
1315
|
-
});
|
|
1316
|
-
if (!response.ok) {
|
|
1317
|
-
throw new Error(`Model test failed with status: ${response.status}`);
|
|
1318
|
-
}
|
|
1319
|
-
const result = await response.json();
|
|
1320
|
-
if (!result.choices?.[0]?.message?.content) {
|
|
1321
|
-
throw new Error("No valid response content received");
|
|
1322
|
-
}
|
|
1323
|
-
logger5.info(`Model ${modelId} test response:`, {
|
|
1324
|
-
content: result.choices[0].message.content,
|
|
1325
|
-
model: result.model,
|
|
1326
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1327
|
-
});
|
|
1328
|
-
return true;
|
|
1329
|
-
} catch (error) {
|
|
1330
|
-
logger5.error(`Model ${modelId} test failed:`, {
|
|
1331
|
-
error: error instanceof Error ? error.message : String(error),
|
|
1332
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
1333
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1334
|
-
});
|
|
1335
|
-
return false;
|
|
1336
|
-
}
|
|
1337
|
-
}
|
|
1338
|
-
/**
|
|
1339
|
-
* Tests the configured text models to ensure they are working properly.
|
|
1340
|
-
* Logs the results of the test and any failed models.
|
|
1341
|
-
* @returns {Promise<void>} A promise that resolves when the test is complete.
|
|
1342
|
-
*/
|
|
1343
|
-
async testTextModels() {
|
|
1344
|
-
logger5.info("Testing configured text models...");
|
|
1345
|
-
const results = await Promise.all([
|
|
1346
|
-
this.testModel(this.configuredModels.small),
|
|
1347
|
-
this.testModel(this.configuredModels.medium)
|
|
1348
|
-
]);
|
|
1349
|
-
const [smallWorking, mediumWorking] = results;
|
|
1350
|
-
if (!smallWorking || !mediumWorking) {
|
|
1351
|
-
const failedModels = [];
|
|
1352
|
-
if (!smallWorking) failedModels.push("small");
|
|
1353
|
-
if (!mediumWorking) failedModels.push("medium");
|
|
1354
|
-
logger5.warn("Some models failed the test:", {
|
|
1355
|
-
failedModels,
|
|
1356
|
-
small: this.configuredModels.small,
|
|
1357
|
-
medium: this.configuredModels.medium
|
|
1358
|
-
});
|
|
1359
|
-
} else {
|
|
1360
|
-
logger5.success("All configured models passed the test");
|
|
1361
|
-
}
|
|
1362
|
-
}
|
|
1363
|
-
/**
|
|
1364
|
-
* Initializes StudioLM by checking server status, fetching available models,
|
|
1365
|
-
* and testing text models.
|
|
1366
|
-
*
|
|
1367
|
-
* @returns {Promise<void>} A Promise that resolves when initialization is complete
|
|
1368
|
-
*/
|
|
1369
|
-
async initialize() {
|
|
1370
|
-
try {
|
|
1371
|
-
if (this.initialized) {
|
|
1372
|
-
logger5.info("StudioLM already initialized, skipping initialization");
|
|
1373
|
-
return;
|
|
1374
|
-
}
|
|
1375
|
-
logger5.info("Starting StudioLM initialization...");
|
|
1376
|
-
const serverAvailable = await this.checkServerStatus();
|
|
1377
|
-
if (!serverAvailable) {
|
|
1378
|
-
throw new Error("LM Studio server is not available");
|
|
1379
|
-
}
|
|
1380
|
-
await this.fetchAvailableModels();
|
|
1381
|
-
await this.testTextModels();
|
|
1382
|
-
this.initialized = true;
|
|
1383
|
-
logger5.success("StudioLM initialization complete");
|
|
1384
|
-
} catch (error) {
|
|
1385
|
-
logger5.error("StudioLM initialization failed:", {
|
|
1386
|
-
error: error instanceof Error ? error.message : String(error),
|
|
1387
|
-
stack: error instanceof Error ? error.stack : void 0
|
|
1388
|
-
});
|
|
1389
|
-
throw error;
|
|
1390
|
-
}
|
|
1391
|
-
}
|
|
1392
|
-
/**
|
|
1393
|
-
* Retrieves the available models in the studio.
|
|
1394
|
-
*
|
|
1395
|
-
* @returns {StudioLMModel[]} An array of StudioLMModel objects representing the available models.
|
|
1396
|
-
*/
|
|
1397
|
-
getAvailableModels() {
|
|
1398
|
-
return this.availableModels;
|
|
1399
|
-
}
|
|
1400
|
-
/**
|
|
1401
|
-
* Check if the object is initialized.
|
|
1402
|
-
*
|
|
1403
|
-
* @returns {boolean} Returns true if the object is initialized, otherwise false.
|
|
1404
|
-
*/
|
|
1405
|
-
isInitialized() {
|
|
1406
|
-
return this.initialized;
|
|
1407
|
-
}
|
|
1408
|
-
/**
|
|
1409
|
-
* Asynchronously generates text using StudioLM based on provided parameters.
|
|
1410
|
-
*
|
|
1411
|
-
* @param {GenerateTextParams} params - The parameters for generating text.
|
|
1412
|
-
* @param {boolean} [isInitialized=false] - Flag to indicate if the model is already initialized.
|
|
1413
|
-
* @returns {Promise<string>} The generated text as a Promise.
|
|
1414
|
-
*/
|
|
1415
|
-
async generateText(params, isInitialized = false) {
|
|
1416
|
-
try {
|
|
1417
|
-
logger5.info("StudioLM generateText entry:", {
|
|
1418
|
-
isInitialized,
|
|
1419
|
-
currentInitState: this.initialized,
|
|
1420
|
-
managerInitState: this.isInitialized(),
|
|
1421
|
-
modelType: params.modelType,
|
|
1422
|
-
contextLength: params.prompt?.length,
|
|
1423
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1424
|
-
});
|
|
1425
|
-
if (!this.initialized && !isInitialized) {
|
|
1426
|
-
throw new Error("StudioLM not initialized. Please initialize before generating text.");
|
|
1427
|
-
}
|
|
1428
|
-
const messages = [
|
|
1429
|
-
{
|
|
1430
|
-
role: "system",
|
|
1431
|
-
content: "You are a helpful AI assistant. Respond to the current request only."
|
|
1432
|
-
},
|
|
1433
|
-
{ role: "user", content: params.prompt }
|
|
1434
|
-
];
|
|
1435
|
-
logger5.info("StudioLM preparing request:", {
|
|
1436
|
-
model: params.modelType === ModelType2.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
1437
|
-
messageCount: messages.length,
|
|
1438
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1439
|
-
});
|
|
1440
|
-
logger5.info("Incoming context structure:", {
|
|
1441
|
-
contextLength: params.prompt.length,
|
|
1442
|
-
hasAction: params.prompt.includes("action"),
|
|
1443
|
-
runtime: !!params.runtime,
|
|
1444
|
-
stopSequences: params.stopSequences
|
|
1445
|
-
});
|
|
1446
|
-
const request = {
|
|
1447
|
-
model: params.modelType === ModelType2.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
1448
|
-
messages,
|
|
1449
|
-
temperature: 0.7,
|
|
1450
|
-
max_tokens: 8192,
|
|
1451
|
-
stream: false
|
|
1452
|
-
};
|
|
1453
|
-
const response = await fetch(`${this.serverUrl}/v1/chat/completions`, {
|
|
1454
|
-
method: "POST",
|
|
1455
|
-
headers: {
|
|
1456
|
-
"Content-Type": "application/json"
|
|
1457
|
-
},
|
|
1458
|
-
body: JSON.stringify(request)
|
|
1459
|
-
});
|
|
1460
|
-
if (!response.ok) {
|
|
1461
|
-
throw new Error(`StudioLM request failed: ${response.status}`);
|
|
1462
|
-
}
|
|
1463
|
-
const result = await response.json();
|
|
1464
|
-
if (!result.choices?.[0]?.message?.content) {
|
|
1465
|
-
throw new Error("No valid response content received from StudioLM");
|
|
1466
|
-
}
|
|
1467
|
-
let responseText = result.choices[0].message.content;
|
|
1468
|
-
logger5.info("Raw response structure:", {
|
|
1469
|
-
responseLength: responseText.length,
|
|
1470
|
-
hasAction: responseText.includes("action"),
|
|
1471
|
-
hasThinkTag: responseText.includes("<think>")
|
|
1472
|
-
});
|
|
1473
|
-
if (responseText.includes("<think>")) {
|
|
1474
|
-
logger5.info("Cleaning think tags from response");
|
|
1475
|
-
responseText = responseText.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
1476
|
-
logger5.info("Think tags removed from response");
|
|
1477
|
-
}
|
|
1478
|
-
logger5.info("StudioLM request completed successfully:", {
|
|
1479
|
-
responseLength: responseText.length,
|
|
1480
|
-
hasThinkTags: responseText.includes("<think>"),
|
|
1481
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1482
|
-
});
|
|
1483
|
-
return responseText;
|
|
1484
|
-
} catch (error) {
|
|
1485
|
-
logger5.error("StudioLM text generation error:", {
|
|
1486
|
-
error: error instanceof Error ? error.message : String(error),
|
|
1487
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
1488
|
-
phase: "text generation",
|
|
1489
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1490
|
-
});
|
|
1491
|
-
throw error;
|
|
1492
|
-
}
|
|
1493
|
-
}
|
|
1494
|
-
};
|
|
1495
|
-
|
|
1496
886
|
// src/utils/tokenizerManager.ts
|
|
1497
|
-
import { logger as
|
|
1498
|
-
import {
|
|
887
|
+
import { logger as logger4 } from "@elizaos/core";
|
|
888
|
+
import {
|
|
889
|
+
AutoTokenizer
|
|
890
|
+
} from "@huggingface/transformers";
|
|
1499
891
|
var TokenizerManager = class _TokenizerManager {
|
|
1500
892
|
static instance = null;
|
|
1501
893
|
tokenizers;
|
|
@@ -1534,7 +926,7 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1534
926
|
async loadTokenizer(modelConfig) {
|
|
1535
927
|
try {
|
|
1536
928
|
const tokenizerKey = `${modelConfig.tokenizer.type}-${modelConfig.tokenizer.name}`;
|
|
1537
|
-
|
|
929
|
+
logger4.info("Loading tokenizer:", {
|
|
1538
930
|
key: tokenizerKey,
|
|
1539
931
|
name: modelConfig.tokenizer.name,
|
|
1540
932
|
type: modelConfig.tokenizer.type,
|
|
@@ -1542,50 +934,61 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1542
934
|
cacheDir: this.cacheDir
|
|
1543
935
|
});
|
|
1544
936
|
if (this.tokenizers.has(tokenizerKey)) {
|
|
1545
|
-
|
|
937
|
+
logger4.info("Using cached tokenizer:", { key: tokenizerKey });
|
|
1546
938
|
const cachedTokenizer = this.tokenizers.get(tokenizerKey);
|
|
1547
939
|
if (!cachedTokenizer) {
|
|
1548
|
-
throw new Error(
|
|
940
|
+
throw new Error(
|
|
941
|
+
`Tokenizer ${tokenizerKey} exists in map but returned undefined`
|
|
942
|
+
);
|
|
1549
943
|
}
|
|
1550
944
|
return cachedTokenizer;
|
|
1551
945
|
}
|
|
1552
|
-
const fs6 = await import("
|
|
946
|
+
const fs6 = await import("fs");
|
|
1553
947
|
if (!fs6.existsSync(this.modelsDir)) {
|
|
1554
|
-
|
|
948
|
+
logger4.warn(
|
|
949
|
+
"Models directory does not exist, creating it:",
|
|
950
|
+
this.modelsDir
|
|
951
|
+
);
|
|
1555
952
|
fs6.mkdirSync(this.modelsDir, { recursive: true });
|
|
1556
953
|
}
|
|
1557
|
-
|
|
954
|
+
logger4.info(
|
|
1558
955
|
"Initializing new tokenizer from HuggingFace with models directory:",
|
|
1559
956
|
this.modelsDir
|
|
1560
957
|
);
|
|
1561
958
|
try {
|
|
1562
|
-
const tokenizer = await AutoTokenizer.from_pretrained(
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
959
|
+
const tokenizer = await AutoTokenizer.from_pretrained(
|
|
960
|
+
modelConfig.tokenizer.name,
|
|
961
|
+
{
|
|
962
|
+
cache_dir: this.modelsDir,
|
|
963
|
+
local_files_only: false
|
|
964
|
+
}
|
|
965
|
+
);
|
|
1566
966
|
this.tokenizers.set(tokenizerKey, tokenizer);
|
|
1567
|
-
|
|
967
|
+
logger4.success("Tokenizer loaded successfully:", { key: tokenizerKey });
|
|
1568
968
|
return tokenizer;
|
|
1569
969
|
} catch (tokenizeError) {
|
|
1570
|
-
|
|
970
|
+
logger4.error("Failed to load tokenizer from HuggingFace:", {
|
|
1571
971
|
error: tokenizeError instanceof Error ? tokenizeError.message : String(tokenizeError),
|
|
1572
972
|
stack: tokenizeError instanceof Error ? tokenizeError.stack : void 0,
|
|
1573
973
|
tokenizer: modelConfig.tokenizer.name,
|
|
1574
974
|
modelsDir: this.modelsDir
|
|
1575
975
|
});
|
|
1576
|
-
|
|
1577
|
-
const tokenizer = await AutoTokenizer.from_pretrained(
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
976
|
+
logger4.info("Retrying tokenizer loading...");
|
|
977
|
+
const tokenizer = await AutoTokenizer.from_pretrained(
|
|
978
|
+
modelConfig.tokenizer.name,
|
|
979
|
+
{
|
|
980
|
+
cache_dir: this.modelsDir,
|
|
981
|
+
local_files_only: false
|
|
982
|
+
}
|
|
983
|
+
);
|
|
1581
984
|
this.tokenizers.set(tokenizerKey, tokenizer);
|
|
1582
|
-
|
|
985
|
+
logger4.success("Tokenizer loaded successfully on retry:", {
|
|
1583
986
|
key: tokenizerKey
|
|
1584
987
|
});
|
|
1585
988
|
return tokenizer;
|
|
1586
989
|
}
|
|
1587
990
|
} catch (error) {
|
|
1588
|
-
|
|
991
|
+
logger4.error("Failed to load tokenizer:", {
|
|
1589
992
|
error: error instanceof Error ? error.message : String(error),
|
|
1590
993
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1591
994
|
model: modelConfig.name,
|
|
@@ -1605,23 +1008,23 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1605
1008
|
*/
|
|
1606
1009
|
async encode(text, modelConfig) {
|
|
1607
1010
|
try {
|
|
1608
|
-
|
|
1011
|
+
logger4.info("Encoding text with tokenizer:", {
|
|
1609
1012
|
length: text.length,
|
|
1610
1013
|
tokenizer: modelConfig.tokenizer.name
|
|
1611
1014
|
});
|
|
1612
1015
|
const tokenizer = await this.loadTokenizer(modelConfig);
|
|
1613
|
-
|
|
1016
|
+
logger4.info("Tokenizer loaded, encoding text...");
|
|
1614
1017
|
const encoded = await tokenizer.encode(text, {
|
|
1615
1018
|
add_special_tokens: true,
|
|
1616
1019
|
return_token_type_ids: false
|
|
1617
1020
|
});
|
|
1618
|
-
|
|
1021
|
+
logger4.info("Text encoded successfully:", {
|
|
1619
1022
|
tokenCount: encoded.length,
|
|
1620
1023
|
tokenizer: modelConfig.tokenizer.name
|
|
1621
1024
|
});
|
|
1622
1025
|
return encoded;
|
|
1623
1026
|
} catch (error) {
|
|
1624
|
-
|
|
1027
|
+
logger4.error("Text encoding failed:", {
|
|
1625
1028
|
error: error instanceof Error ? error.message : String(error),
|
|
1626
1029
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1627
1030
|
textLength: text.length,
|
|
@@ -1641,23 +1044,23 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1641
1044
|
*/
|
|
1642
1045
|
async decode(tokens, modelConfig) {
|
|
1643
1046
|
try {
|
|
1644
|
-
|
|
1047
|
+
logger4.info("Decoding tokens with tokenizer:", {
|
|
1645
1048
|
count: tokens.length,
|
|
1646
1049
|
tokenizer: modelConfig.tokenizer.name
|
|
1647
1050
|
});
|
|
1648
1051
|
const tokenizer = await this.loadTokenizer(modelConfig);
|
|
1649
|
-
|
|
1052
|
+
logger4.info("Tokenizer loaded, decoding tokens...");
|
|
1650
1053
|
const decoded = await tokenizer.decode(tokens, {
|
|
1651
1054
|
skip_special_tokens: true,
|
|
1652
1055
|
clean_up_tokenization_spaces: true
|
|
1653
1056
|
});
|
|
1654
|
-
|
|
1057
|
+
logger4.info("Tokens decoded successfully:", {
|
|
1655
1058
|
textLength: decoded.length,
|
|
1656
1059
|
tokenizer: modelConfig.tokenizer.name
|
|
1657
1060
|
});
|
|
1658
1061
|
return decoded;
|
|
1659
1062
|
} catch (error) {
|
|
1660
|
-
|
|
1063
|
+
logger4.error("Token decoding failed:", {
|
|
1661
1064
|
error: error instanceof Error ? error.message : String(error),
|
|
1662
1065
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1663
1066
|
tokenCount: tokens.length,
|
|
@@ -1670,11 +1073,11 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1670
1073
|
};
|
|
1671
1074
|
|
|
1672
1075
|
// src/utils/transcribeManager.ts
|
|
1673
|
-
import { exec as exec2 } from "
|
|
1674
|
-
import fs2 from "
|
|
1675
|
-
import path2 from "
|
|
1676
|
-
import { promisify as promisify2 } from "
|
|
1677
|
-
import { logger as
|
|
1076
|
+
import { exec as exec2 } from "child_process";
|
|
1077
|
+
import fs2 from "fs";
|
|
1078
|
+
import path2 from "path";
|
|
1079
|
+
import { promisify as promisify2 } from "util";
|
|
1080
|
+
import { logger as logger5 } from "@elizaos/core";
|
|
1678
1081
|
import { nodewhisper } from "nodejs-whisper";
|
|
1679
1082
|
var execAsync2 = promisify2(exec2);
|
|
1680
1083
|
var TranscribeManager = class _TranscribeManager {
|
|
@@ -1691,7 +1094,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1691
1094
|
*/
|
|
1692
1095
|
constructor(cacheDir) {
|
|
1693
1096
|
this.cacheDir = path2.join(cacheDir, "whisper");
|
|
1694
|
-
|
|
1097
|
+
logger5.debug("Initializing TranscribeManager", {
|
|
1695
1098
|
cacheDir: this.cacheDir,
|
|
1696
1099
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1697
1100
|
});
|
|
@@ -1707,7 +1110,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1707
1110
|
await this.initializeFFmpeg();
|
|
1708
1111
|
this.ffmpegInitialized = true;
|
|
1709
1112
|
} catch (error) {
|
|
1710
|
-
|
|
1113
|
+
logger5.error("FFmpeg initialization failed:", {
|
|
1711
1114
|
error: error instanceof Error ? error.message : String(error),
|
|
1712
1115
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1713
1116
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1745,13 +1148,13 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1745
1148
|
try {
|
|
1746
1149
|
const { stdout } = await execAsync2("ffmpeg -version");
|
|
1747
1150
|
this.ffmpegVersion = stdout.split("\n")[0];
|
|
1748
|
-
|
|
1151
|
+
logger5.info("FFmpeg version:", {
|
|
1749
1152
|
version: this.ffmpegVersion,
|
|
1750
1153
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1751
1154
|
});
|
|
1752
1155
|
} catch (error) {
|
|
1753
1156
|
this.ffmpegVersion = null;
|
|
1754
|
-
|
|
1157
|
+
logger5.error("Failed to get FFmpeg version:", {
|
|
1755
1158
|
error: error instanceof Error ? error.message : String(error),
|
|
1756
1159
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1757
1160
|
});
|
|
@@ -1774,12 +1177,17 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1774
1177
|
if (this.ffmpegAvailable) {
|
|
1775
1178
|
await this.fetchFFmpegVersion();
|
|
1776
1179
|
await this.verifyFFmpegCapabilities();
|
|
1180
|
+
logger5.success("FFmpeg initialized successfully", {
|
|
1181
|
+
version: this.ffmpegVersion,
|
|
1182
|
+
path: this.ffmpegPath,
|
|
1183
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1184
|
+
});
|
|
1777
1185
|
} else {
|
|
1778
1186
|
this.logFFmpegInstallInstructions();
|
|
1779
1187
|
}
|
|
1780
1188
|
} catch (error) {
|
|
1781
1189
|
this.ffmpegAvailable = false;
|
|
1782
|
-
|
|
1190
|
+
logger5.error("FFmpeg initialization failed:", {
|
|
1783
1191
|
error: error instanceof Error ? error.message : String(error),
|
|
1784
1192
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1785
1193
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1796,10 +1204,12 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1796
1204
|
*/
|
|
1797
1205
|
async checkFFmpegAvailability() {
|
|
1798
1206
|
try {
|
|
1799
|
-
const { stdout, stderr } = await execAsync2(
|
|
1207
|
+
const { stdout, stderr } = await execAsync2(
|
|
1208
|
+
"which ffmpeg || where ffmpeg"
|
|
1209
|
+
);
|
|
1800
1210
|
this.ffmpegPath = stdout.trim();
|
|
1801
1211
|
this.ffmpegAvailable = true;
|
|
1802
|
-
|
|
1212
|
+
logger5.info("FFmpeg found at:", {
|
|
1803
1213
|
path: this.ffmpegPath,
|
|
1804
1214
|
stderr: stderr ? stderr.trim() : void 0,
|
|
1805
1215
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1807,7 +1217,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1807
1217
|
} catch (error) {
|
|
1808
1218
|
this.ffmpegAvailable = false;
|
|
1809
1219
|
this.ffmpegPath = null;
|
|
1810
|
-
|
|
1220
|
+
logger5.error("FFmpeg not found in PATH:", {
|
|
1811
1221
|
error: error instanceof Error ? error.message : String(error),
|
|
1812
1222
|
stderr: error instanceof Error && "stderr" in error ? error.stderr : void 0,
|
|
1813
1223
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1824,10 +1234,12 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1824
1234
|
const { stdout } = await execAsync2("ffmpeg -codecs");
|
|
1825
1235
|
const hasRequiredCodecs = stdout.includes("pcm_s16le") && stdout.includes("wav");
|
|
1826
1236
|
if (!hasRequiredCodecs) {
|
|
1827
|
-
throw new Error(
|
|
1237
|
+
throw new Error(
|
|
1238
|
+
"FFmpeg installation missing required codecs (pcm_s16le, wav)"
|
|
1239
|
+
);
|
|
1828
1240
|
}
|
|
1829
1241
|
} catch (error) {
|
|
1830
|
-
|
|
1242
|
+
logger5.error("FFmpeg capabilities verification failed:", {
|
|
1831
1243
|
error: error instanceof Error ? error.message : String(error),
|
|
1832
1244
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1833
1245
|
});
|
|
@@ -1838,17 +1250,20 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1838
1250
|
* Logs instructions on how to install FFmpeg if it is not properly installed.
|
|
1839
1251
|
*/
|
|
1840
1252
|
logFFmpegInstallInstructions() {
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
|
|
1847
|
-
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
|
|
1851
|
-
|
|
1253
|
+
logger5.warn(
|
|
1254
|
+
"FFmpeg is required but not properly installed. Please install FFmpeg:",
|
|
1255
|
+
{
|
|
1256
|
+
instructions: {
|
|
1257
|
+
mac: "brew install ffmpeg",
|
|
1258
|
+
ubuntu: "sudo apt-get install ffmpeg",
|
|
1259
|
+
windows: "choco install ffmpeg",
|
|
1260
|
+
manual: "Download from https://ffmpeg.org/download.html"
|
|
1261
|
+
},
|
|
1262
|
+
requiredVersion: "4.0 or later",
|
|
1263
|
+
requiredCodecs: ["pcm_s16le", "wav"],
|
|
1264
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1265
|
+
}
|
|
1266
|
+
);
|
|
1852
1267
|
}
|
|
1853
1268
|
/**
|
|
1854
1269
|
* Gets the singleton instance of TranscribeManager, creates a new instance if it doesn't exist.
|
|
@@ -1891,7 +1306,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1891
1306
|
`ffmpeg -y -loglevel error -i "${inputPath}" -acodec pcm_s16le -ar 16000 -ac 1 "${outputPath}"`
|
|
1892
1307
|
);
|
|
1893
1308
|
if (stderr) {
|
|
1894
|
-
|
|
1309
|
+
logger5.warn("FFmpeg conversion error:", {
|
|
1895
1310
|
stderr,
|
|
1896
1311
|
inputPath,
|
|
1897
1312
|
outputPath,
|
|
@@ -1902,7 +1317,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1902
1317
|
throw new Error("WAV file was not created successfully");
|
|
1903
1318
|
}
|
|
1904
1319
|
} catch (error) {
|
|
1905
|
-
|
|
1320
|
+
logger5.error("Audio conversion failed:", {
|
|
1906
1321
|
error: error instanceof Error ? error.message : String(error),
|
|
1907
1322
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1908
1323
|
command: `ffmpeg -y -loglevel error -i "${inputPath}" -acodec pcm_s16le -ar 16000 -ac 1 "${outputPath}"`,
|
|
@@ -1926,10 +1341,15 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1926
1341
|
*/
|
|
1927
1342
|
async preprocessAudio(audioBuffer) {
|
|
1928
1343
|
if (!this.ffmpegAvailable) {
|
|
1929
|
-
throw new Error(
|
|
1344
|
+
throw new Error(
|
|
1345
|
+
"FFmpeg is not installed. Please install FFmpeg to use audio transcription."
|
|
1346
|
+
);
|
|
1930
1347
|
}
|
|
1931
1348
|
try {
|
|
1932
|
-
const tempInputFile = path2.join(
|
|
1349
|
+
const tempInputFile = path2.join(
|
|
1350
|
+
this.cacheDir,
|
|
1351
|
+
`temp_input_${Date.now()}`
|
|
1352
|
+
);
|
|
1933
1353
|
const tempWavFile = path2.join(this.cacheDir, `temp_${Date.now()}.wav`);
|
|
1934
1354
|
fs2.writeFileSync(tempInputFile, audioBuffer);
|
|
1935
1355
|
await this.convertToWav(tempInputFile, tempWavFile);
|
|
@@ -1938,7 +1358,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1938
1358
|
}
|
|
1939
1359
|
return tempWavFile;
|
|
1940
1360
|
} catch (error) {
|
|
1941
|
-
|
|
1361
|
+
logger5.error("Audio preprocessing failed:", {
|
|
1942
1362
|
error: error instanceof Error ? error.message : String(error),
|
|
1943
1363
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1944
1364
|
ffmpegAvailable: this.ffmpegAvailable,
|
|
@@ -1965,7 +1385,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1965
1385
|
}
|
|
1966
1386
|
try {
|
|
1967
1387
|
const wavFile = await this.preprocessAudio(audioBuffer);
|
|
1968
|
-
|
|
1388
|
+
logger5.info("Starting transcription with whisper...");
|
|
1969
1389
|
const originalStdoutWrite = process.stdout.write;
|
|
1970
1390
|
const originalStderrWrite = process.stderr.write;
|
|
1971
1391
|
const noopWrite = () => true;
|
|
@@ -1988,19 +1408,19 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1988
1408
|
}
|
|
1989
1409
|
if (fs2.existsSync(wavFile)) {
|
|
1990
1410
|
fs2.unlinkSync(wavFile);
|
|
1991
|
-
|
|
1411
|
+
logger5.info("Temporary WAV file cleaned up");
|
|
1992
1412
|
}
|
|
1993
1413
|
const cleanText = output.split("\n").map((line) => {
|
|
1994
1414
|
const textMatch = line.match(/](.+)$/);
|
|
1995
1415
|
return textMatch ? textMatch[1].trim() : line.trim();
|
|
1996
1416
|
}).filter((line) => line).join(" ");
|
|
1997
|
-
|
|
1417
|
+
logger5.success("Transcription complete:", {
|
|
1998
1418
|
textLength: cleanText.length,
|
|
1999
1419
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2000
1420
|
});
|
|
2001
1421
|
return { text: cleanText };
|
|
2002
1422
|
} catch (error) {
|
|
2003
|
-
|
|
1423
|
+
logger5.error("Transcription failed:", {
|
|
2004
1424
|
error: error instanceof Error ? error.message : String(error),
|
|
2005
1425
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2006
1426
|
ffmpegAvailable: this.ffmpegAvailable
|
|
@@ -2011,294 +1431,190 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
2011
1431
|
};
|
|
2012
1432
|
|
|
2013
1433
|
// src/utils/ttsManager.ts
|
|
2014
|
-
import fs3 from "
|
|
2015
|
-
import path3 from "
|
|
2016
|
-
import { Readable } from "
|
|
2017
|
-
import { logger as
|
|
2018
|
-
import {
|
|
2019
|
-
|
|
2020
|
-
} from "node-llama-cpp";
|
|
2021
|
-
|
|
2022
|
-
// src/utils/audioUtils.ts
|
|
2023
|
-
import { PassThrough } from "node:stream";
|
|
2024
|
-
function getWavHeader(audioLength, sampleRate, channelCount = 1, bitsPerSample = 16) {
|
|
2025
|
-
const wavHeader = Buffer.alloc(44);
|
|
2026
|
-
wavHeader.write("RIFF", 0);
|
|
2027
|
-
wavHeader.writeUInt32LE(36 + audioLength, 4);
|
|
2028
|
-
wavHeader.write("WAVE", 8);
|
|
2029
|
-
wavHeader.write("fmt ", 12);
|
|
2030
|
-
wavHeader.writeUInt32LE(16, 16);
|
|
2031
|
-
wavHeader.writeUInt16LE(1, 20);
|
|
2032
|
-
wavHeader.writeUInt16LE(channelCount, 22);
|
|
2033
|
-
wavHeader.writeUInt32LE(sampleRate, 24);
|
|
2034
|
-
wavHeader.writeUInt32LE(sampleRate * bitsPerSample * channelCount / 8, 28);
|
|
2035
|
-
wavHeader.writeUInt16LE(bitsPerSample * channelCount / 8, 32);
|
|
2036
|
-
wavHeader.writeUInt16LE(bitsPerSample, 34);
|
|
2037
|
-
wavHeader.write("data", 36);
|
|
2038
|
-
wavHeader.writeUInt32LE(audioLength, 40);
|
|
2039
|
-
return wavHeader;
|
|
2040
|
-
}
|
|
2041
|
-
function prependWavHeader(readable, audioLength, sampleRate, channelCount = 1, bitsPerSample = 16) {
|
|
2042
|
-
const wavHeader = getWavHeader(audioLength, sampleRate, channelCount, bitsPerSample);
|
|
2043
|
-
let pushedHeader = false;
|
|
2044
|
-
const passThrough = new PassThrough();
|
|
2045
|
-
readable.on("data", (data) => {
|
|
2046
|
-
if (!pushedHeader) {
|
|
2047
|
-
passThrough.push(wavHeader);
|
|
2048
|
-
pushedHeader = true;
|
|
2049
|
-
}
|
|
2050
|
-
passThrough.push(data);
|
|
2051
|
-
});
|
|
2052
|
-
readable.on("end", () => {
|
|
2053
|
-
passThrough.end();
|
|
2054
|
-
});
|
|
2055
|
-
return passThrough;
|
|
2056
|
-
}
|
|
2057
|
-
|
|
2058
|
-
// src/utils/ttsManager.ts
|
|
1434
|
+
import fs3 from "fs";
|
|
1435
|
+
import path3 from "path";
|
|
1436
|
+
import { Readable } from "stream";
|
|
1437
|
+
import { logger as logger6, prependWavHeader } from "@elizaos/core";
|
|
1438
|
+
import { pipeline } from "@huggingface/transformers";
|
|
1439
|
+
import { fetch as fetch2 } from "undici";
|
|
2059
1440
|
var TTSManager = class _TTSManager {
|
|
2060
1441
|
static instance = null;
|
|
2061
1442
|
cacheDir;
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
sequence = null;
|
|
1443
|
+
synthesizer = null;
|
|
1444
|
+
defaultSpeakerEmbedding = null;
|
|
2065
1445
|
initialized = false;
|
|
2066
|
-
|
|
2067
|
-
modelsDir;
|
|
2068
|
-
/**
|
|
2069
|
-
* Creates a new instance of TTSManager with the provided cache directory.
|
|
2070
|
-
*
|
|
2071
|
-
* @param {string} cacheDir - The directory where cached data will be stored.
|
|
2072
|
-
*/
|
|
1446
|
+
initializingPromise = null;
|
|
2073
1447
|
constructor(cacheDir) {
|
|
2074
1448
|
this.cacheDir = path3.join(cacheDir, "tts");
|
|
2075
|
-
this.modelsDir = process.env.LLAMALOCAL_PATH?.trim() ? path3.resolve(process.env.LLAMALOCAL_PATH.trim()) : path3.join(process.cwd(), "models");
|
|
2076
|
-
this.downloadManager = DownloadManager.getInstance(this.cacheDir, this.modelsDir);
|
|
2077
1449
|
this.ensureCacheDirectory();
|
|
2078
|
-
|
|
1450
|
+
logger6.debug("TTSManager using Transformers.js initialized");
|
|
2079
1451
|
}
|
|
2080
|
-
/**
|
|
2081
|
-
* Returns an instance of TTSManager, creating a new one if none exist.
|
|
2082
|
-
*
|
|
2083
|
-
* @param {string} cacheDir - The directory path to store cached audio files.
|
|
2084
|
-
* @returns {TTSManager} An instance of TTSManager.
|
|
2085
|
-
*/
|
|
2086
1452
|
static getInstance(cacheDir) {
|
|
2087
1453
|
if (!_TTSManager.instance) {
|
|
2088
1454
|
_TTSManager.instance = new _TTSManager(cacheDir);
|
|
2089
1455
|
}
|
|
2090
1456
|
return _TTSManager.instance;
|
|
2091
1457
|
}
|
|
2092
|
-
/**
|
|
2093
|
-
* Ensures that the cache directory exists. If it does not exist, the directory will be created.
|
|
2094
|
-
*/
|
|
2095
1458
|
ensureCacheDirectory() {
|
|
2096
1459
|
if (!fs3.existsSync(this.cacheDir)) {
|
|
2097
1460
|
fs3.mkdirSync(this.cacheDir, { recursive: true });
|
|
2098
|
-
|
|
1461
|
+
logger6.debug("Created TTS cache directory:", this.cacheDir);
|
|
2099
1462
|
}
|
|
2100
1463
|
}
|
|
2101
|
-
/**
|
|
2102
|
-
* Asynchronously initializes the TTS module with GGUF backend.
|
|
2103
|
-
* If already initialized or missing necessary components (model and context), it returns early.
|
|
2104
|
-
* Handles model download using different URL patterns as fallback if model not found locally.
|
|
2105
|
-
* Initializes the TTS model, creates context, and sets the sequence for TTS generation.
|
|
2106
|
-
* Logs detailed steps and final output of initialization.
|
|
2107
|
-
*
|
|
2108
|
-
* @returns {Promise<void>} A promise that resolves once the TTS module is fully initialized.
|
|
2109
|
-
*/
|
|
2110
1464
|
async initialize() {
|
|
2111
|
-
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
|
-
|
|
2119
|
-
|
|
2120
|
-
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
|
|
2125
|
-
|
|
2126
|
-
|
|
2127
|
-
|
|
2128
|
-
|
|
2129
|
-
|
|
2130
|
-
|
|
2131
|
-
|
|
2132
|
-
|
|
2133
|
-
|
|
2134
|
-
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
|
|
2144
|
-
|
|
2145
|
-
|
|
2146
|
-
|
|
2147
|
-
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
|
|
2153
|
-
|
|
2154
|
-
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
}
|
|
1465
|
+
if (this.initializingPromise) {
|
|
1466
|
+
logger6.debug(
|
|
1467
|
+
"TTS initialization already in progress, awaiting existing promise."
|
|
1468
|
+
);
|
|
1469
|
+
return this.initializingPromise;
|
|
1470
|
+
}
|
|
1471
|
+
if (this.initialized) {
|
|
1472
|
+
logger6.debug("TTS already initialized.");
|
|
1473
|
+
return;
|
|
1474
|
+
}
|
|
1475
|
+
this.initializingPromise = (async () => {
|
|
1476
|
+
try {
|
|
1477
|
+
logger6.info("Initializing TTS with Transformers.js backend...");
|
|
1478
|
+
const ttsModelSpec = MODEL_SPECS.tts.default;
|
|
1479
|
+
if (!ttsModelSpec) {
|
|
1480
|
+
throw new Error(
|
|
1481
|
+
"Default TTS model specification not found in MODEL_SPECS."
|
|
1482
|
+
);
|
|
1483
|
+
}
|
|
1484
|
+
const modelName = ttsModelSpec.modelId;
|
|
1485
|
+
const speakerEmbeddingUrl = ttsModelSpec.defaultSpeakerEmbeddingUrl;
|
|
1486
|
+
logger6.info(`Loading TTS pipeline for model: ${modelName}`);
|
|
1487
|
+
this.synthesizer = await pipeline("text-to-audio", modelName);
|
|
1488
|
+
logger6.success(
|
|
1489
|
+
`TTS pipeline loaded successfully for model: ${modelName}`
|
|
1490
|
+
);
|
|
1491
|
+
if (speakerEmbeddingUrl) {
|
|
1492
|
+
const embeddingFilename = path3.basename(
|
|
1493
|
+
new URL(speakerEmbeddingUrl).pathname
|
|
1494
|
+
);
|
|
1495
|
+
const embeddingPath = path3.join(this.cacheDir, embeddingFilename);
|
|
1496
|
+
if (fs3.existsSync(embeddingPath)) {
|
|
1497
|
+
logger6.info("Loading default speaker embedding from cache...");
|
|
1498
|
+
const buffer = fs3.readFileSync(embeddingPath);
|
|
1499
|
+
this.defaultSpeakerEmbedding = new Float32Array(
|
|
1500
|
+
buffer.buffer,
|
|
1501
|
+
buffer.byteOffset,
|
|
1502
|
+
buffer.length / Float32Array.BYTES_PER_ELEMENT
|
|
1503
|
+
);
|
|
1504
|
+
logger6.success("Default speaker embedding loaded from cache.");
|
|
1505
|
+
} else {
|
|
1506
|
+
logger6.info(
|
|
1507
|
+
`Downloading default speaker embedding from: ${speakerEmbeddingUrl}`
|
|
1508
|
+
);
|
|
1509
|
+
const response = await fetch2(speakerEmbeddingUrl);
|
|
1510
|
+
if (!response.ok) {
|
|
1511
|
+
throw new Error(
|
|
1512
|
+
`Failed to download speaker embedding: ${response.statusText}`
|
|
1513
|
+
);
|
|
1514
|
+
}
|
|
1515
|
+
const buffer = await response.arrayBuffer();
|
|
1516
|
+
this.defaultSpeakerEmbedding = new Float32Array(buffer);
|
|
1517
|
+
fs3.writeFileSync(embeddingPath, Buffer.from(buffer));
|
|
1518
|
+
logger6.success("Default speaker embedding downloaded and cached.");
|
|
2161
1519
|
}
|
|
1520
|
+
} else {
|
|
1521
|
+
logger6.warn(
|
|
1522
|
+
`No default speaker embedding URL specified for model ${modelName}. Speaker control may be limited.`
|
|
1523
|
+
);
|
|
1524
|
+
this.defaultSpeakerEmbedding = null;
|
|
2162
1525
|
}
|
|
2163
|
-
if (!
|
|
2164
|
-
throw
|
|
1526
|
+
if (!this.synthesizer) {
|
|
1527
|
+
throw new Error("TTS initialization failed: Pipeline not loaded.");
|
|
2165
1528
|
}
|
|
1529
|
+
logger6.success("TTS initialization complete (Transformers.js)");
|
|
1530
|
+
this.initialized = true;
|
|
1531
|
+
} catch (error) {
|
|
1532
|
+
logger6.error("TTS (Transformers.js) initialization failed:", {
|
|
1533
|
+
error: error instanceof Error ? error.message : String(error),
|
|
1534
|
+
stack: error instanceof Error ? error.stack : void 0
|
|
1535
|
+
});
|
|
1536
|
+
this.initialized = false;
|
|
1537
|
+
this.synthesizer = null;
|
|
1538
|
+
this.defaultSpeakerEmbedding = null;
|
|
1539
|
+
throw error;
|
|
1540
|
+
} finally {
|
|
1541
|
+
this.initializingPromise = null;
|
|
1542
|
+
logger6.debug(
|
|
1543
|
+
"TTS initializingPromise cleared after completion/failure."
|
|
1544
|
+
);
|
|
2166
1545
|
}
|
|
2167
|
-
|
|
2168
|
-
|
|
2169
|
-
this.model = await llama.loadModel({
|
|
2170
|
-
modelPath,
|
|
2171
|
-
gpuLayers: 0
|
|
2172
|
-
// Force CPU for now until we add GPU support
|
|
2173
|
-
});
|
|
2174
|
-
this.ctx = await this.model.createContext({
|
|
2175
|
-
contextSize: modelSpec.contextSize
|
|
2176
|
-
});
|
|
2177
|
-
this.sequence = this.ctx.getSequence();
|
|
2178
|
-
logger8.success("TTS initialization complete", {
|
|
2179
|
-
modelPath,
|
|
2180
|
-
contextSize: modelSpec.contextSize,
|
|
2181
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2182
|
-
});
|
|
2183
|
-
this.initialized = true;
|
|
2184
|
-
} catch (error) {
|
|
2185
|
-
logger8.error("TTS initialization failed:", {
|
|
2186
|
-
error: error instanceof Error ? error.message : String(error),
|
|
2187
|
-
model: MODEL_SPECS.tts.base.name,
|
|
2188
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2189
|
-
});
|
|
2190
|
-
throw error;
|
|
2191
|
-
}
|
|
1546
|
+
})();
|
|
1547
|
+
return this.initializingPromise;
|
|
2192
1548
|
}
|
|
2193
1549
|
/**
|
|
2194
|
-
* Asynchronously generates speech from a given text using the
|
|
1550
|
+
* Asynchronously generates speech from a given text using the Transformers.js pipeline.
|
|
2195
1551
|
* @param {string} text - The text to generate speech from.
|
|
2196
|
-
* @returns {Promise<Readable>} A promise that resolves to a Readable stream containing the generated audio data.
|
|
2197
|
-
* @throws {Error} If the TTS model is not initialized or if
|
|
1552
|
+
* @returns {Promise<Readable>} A promise that resolves to a Readable stream containing the generated WAV audio data.
|
|
1553
|
+
* @throws {Error} If the TTS model is not initialized or if generation fails.
|
|
2198
1554
|
*/
|
|
2199
1555
|
async generateSpeech(text) {
|
|
2200
1556
|
try {
|
|
2201
|
-
await this.initialize();
|
|
2202
|
-
if (!this.
|
|
2203
|
-
throw new Error("TTS
|
|
2204
|
-
}
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
|
|
2210
|
-
|
|
2211
|
-
|
|
2212
|
-
logger8.info("Starting token generation with optimized limit:", {
|
|
2213
|
-
maxTokens
|
|
2214
|
-
});
|
|
2215
|
-
const responseTokens = [];
|
|
2216
|
-
const _startTime = Date.now();
|
|
2217
|
-
try {
|
|
2218
|
-
for await (const token of this.sequence.evaluate(inputTokens, {
|
|
2219
|
-
temperature: 0.1
|
|
2220
|
-
})) {
|
|
2221
|
-
responseTokens.push(token);
|
|
2222
|
-
const percent = Math.round(responseTokens.length / maxTokens * 100);
|
|
2223
|
-
const barLength = 30;
|
|
2224
|
-
const filledLength = Math.floor(responseTokens.length / maxTokens * barLength);
|
|
2225
|
-
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2226
|
-
logger8.info(
|
|
2227
|
-
`Token generation: ${progressBar} ${percent}% (${responseTokens.length}/${maxTokens})`
|
|
2228
|
-
);
|
|
2229
|
-
if (responseTokens.length >= maxTokens) {
|
|
2230
|
-
logger8.info("Token generation complete");
|
|
2231
|
-
break;
|
|
2232
|
-
}
|
|
1557
|
+
await this.initialize();
|
|
1558
|
+
if (!this.synthesizer) {
|
|
1559
|
+
throw new Error("TTS Manager not properly initialized.");
|
|
1560
|
+
}
|
|
1561
|
+
logger6.info("Starting speech generation with Transformers.js for text:", {
|
|
1562
|
+
text: text.substring(0, 50) + "..."
|
|
1563
|
+
});
|
|
1564
|
+
const output = await this.synthesizer(text, {
|
|
1565
|
+
// Pass embedding only if it was loaded
|
|
1566
|
+
...this.defaultSpeakerEmbedding && {
|
|
1567
|
+
speaker_embeddings: this.defaultSpeakerEmbedding
|
|
2233
1568
|
}
|
|
2234
|
-
}
|
|
2235
|
-
|
|
2236
|
-
|
|
1569
|
+
});
|
|
1570
|
+
const audioFloat32 = output.audio;
|
|
1571
|
+
const samplingRate = output.sampling_rate;
|
|
1572
|
+
logger6.info("Raw audio data received from pipeline:", {
|
|
1573
|
+
samplingRate,
|
|
1574
|
+
length: audioFloat32.length
|
|
1575
|
+
});
|
|
1576
|
+
if (!audioFloat32 || audioFloat32.length === 0) {
|
|
1577
|
+
throw new Error("TTS pipeline generated empty audio output.");
|
|
2237
1578
|
}
|
|
2238
|
-
|
|
2239
|
-
|
|
1579
|
+
const pcmData = new Int16Array(audioFloat32.length);
|
|
1580
|
+
for (let i = 0; i < audioFloat32.length; i++) {
|
|
1581
|
+
const s = Math.max(-1, Math.min(1, audioFloat32[i]));
|
|
1582
|
+
pcmData[i] = s < 0 ? s * 32768 : s * 32767;
|
|
2240
1583
|
}
|
|
2241
|
-
|
|
2242
|
-
|
|
2243
|
-
|
|
2244
|
-
});
|
|
2245
|
-
logger8.info("Audio data generated:", {
|
|
2246
|
-
byteLength: audioData.length,
|
|
2247
|
-
sampleRate: MODEL_SPECS.tts.base.sampleRate
|
|
1584
|
+
const audioBuffer = Buffer.from(pcmData.buffer);
|
|
1585
|
+
logger6.info("Audio data converted to 16-bit PCM Buffer:", {
|
|
1586
|
+
byteLength: audioBuffer.length
|
|
2248
1587
|
});
|
|
2249
1588
|
const audioStream = prependWavHeader(
|
|
2250
|
-
Readable.from(
|
|
2251
|
-
|
|
2252
|
-
|
|
1589
|
+
Readable.from(audioBuffer),
|
|
1590
|
+
audioBuffer.length,
|
|
1591
|
+
// Pass buffer length in bytes
|
|
1592
|
+
samplingRate,
|
|
2253
1593
|
1,
|
|
1594
|
+
// Number of channels (assuming mono)
|
|
2254
1595
|
16
|
|
1596
|
+
// Bit depth
|
|
2255
1597
|
);
|
|
2256
|
-
|
|
1598
|
+
logger6.success("Speech generation complete (Transformers.js)");
|
|
2257
1599
|
return audioStream;
|
|
2258
1600
|
} catch (error) {
|
|
2259
|
-
|
|
1601
|
+
logger6.error("Transformers.js speech generation failed:", {
|
|
2260
1602
|
error: error instanceof Error ? error.message : String(error),
|
|
2261
|
-
text
|
|
1603
|
+
text: text.substring(0, 50) + "...",
|
|
1604
|
+
stack: error instanceof Error ? error.stack : void 0
|
|
2262
1605
|
});
|
|
2263
1606
|
throw error;
|
|
2264
1607
|
}
|
|
2265
1608
|
}
|
|
2266
|
-
/**
|
|
2267
|
-
* Processes the audio response from the TTS service by converting
|
|
2268
|
-
* the data to 16-bit PCM format.
|
|
2269
|
-
* If the response contains direct audio data, it converts Float32Array
|
|
2270
|
-
* to 16-bit PCM. If the response only contains tokens, it converts
|
|
2271
|
-
* them to PCM data. The actual conversion process may vary depending
|
|
2272
|
-
* on the model used.
|
|
2273
|
-
*
|
|
2274
|
-
* @param {TTSResponse} response - The response object from the TTS service
|
|
2275
|
-
* @returns {Buffer} The processed audio data in 16-bit PCM format
|
|
2276
|
-
*/
|
|
2277
|
-
processAudioResponse(response) {
|
|
2278
|
-
if (response.audio) {
|
|
2279
|
-
const pcmData2 = new Int16Array(response.audio.length);
|
|
2280
|
-
for (let i = 0; i < response.audio.length; i++) {
|
|
2281
|
-
const s = Math.max(-1, Math.min(1, response.audio[i]));
|
|
2282
|
-
pcmData2[i] = s < 0 ? s * 32768 : s * 32767;
|
|
2283
|
-
}
|
|
2284
|
-
return Buffer.from(pcmData2.buffer);
|
|
2285
|
-
}
|
|
2286
|
-
const pcmData = new Int16Array(response.tokens.length * 2);
|
|
2287
|
-
for (let i = 0; i < response.tokens.length; i++) {
|
|
2288
|
-
pcmData[i * 2] = response.tokens[i] & 65535;
|
|
2289
|
-
pcmData[i * 2 + 1] = response.tokens[i] >> 16 & 65535;
|
|
2290
|
-
}
|
|
2291
|
-
return Buffer.from(pcmData.buffer);
|
|
2292
|
-
}
|
|
2293
1609
|
};
|
|
2294
1610
|
|
|
2295
1611
|
// src/utils/visionManager.ts
|
|
2296
|
-
import { existsSync } from "
|
|
2297
|
-
import fs4 from "
|
|
2298
|
-
import os2 from "
|
|
2299
|
-
import path4 from "
|
|
2300
|
-
import process2 from "
|
|
2301
|
-
import { logger as
|
|
1612
|
+
import { existsSync } from "fs";
|
|
1613
|
+
import fs4 from "fs";
|
|
1614
|
+
import os2 from "os";
|
|
1615
|
+
import path4 from "path";
|
|
1616
|
+
import process2 from "process";
|
|
1617
|
+
import { logger as logger7 } from "@elizaos/core";
|
|
2302
1618
|
import {
|
|
2303
1619
|
AutoProcessor,
|
|
2304
1620
|
AutoTokenizer as AutoTokenizer2,
|
|
@@ -2334,9 +1650,12 @@ var VisionManager = class _VisionManager {
|
|
|
2334
1650
|
this.modelsDir = path4.join(path4.dirname(cacheDir), "models", "vision");
|
|
2335
1651
|
this.cacheDir = cacheDir;
|
|
2336
1652
|
this.ensureModelsDirExists();
|
|
2337
|
-
this.downloadManager = DownloadManager.getInstance(
|
|
1653
|
+
this.downloadManager = DownloadManager.getInstance(
|
|
1654
|
+
this.cacheDir,
|
|
1655
|
+
this.modelsDir
|
|
1656
|
+
);
|
|
2338
1657
|
this.platformConfig = this.getPlatformConfig();
|
|
2339
|
-
|
|
1658
|
+
logger7.debug("VisionManager initialized");
|
|
2340
1659
|
}
|
|
2341
1660
|
/**
|
|
2342
1661
|
* Retrieves the platform configuration based on the operating system and architecture.
|
|
@@ -2373,7 +1692,7 @@ var VisionManager = class _VisionManager {
|
|
|
2373
1692
|
*/
|
|
2374
1693
|
ensureModelsDirExists() {
|
|
2375
1694
|
if (!existsSync(this.modelsDir)) {
|
|
2376
|
-
|
|
1695
|
+
logger7.debug(`Creating models directory at: ${this.modelsDir}`);
|
|
2377
1696
|
fs4.mkdirSync(this.modelsDir, { recursive: true });
|
|
2378
1697
|
}
|
|
2379
1698
|
}
|
|
@@ -2398,9 +1717,13 @@ var VisionManager = class _VisionManager {
|
|
|
2398
1717
|
* @returns {boolean} - Returns true if cache exists, otherwise returns false.
|
|
2399
1718
|
*/
|
|
2400
1719
|
checkCacheExists(modelId, type) {
|
|
2401
|
-
const modelPath = path4.join(
|
|
1720
|
+
const modelPath = path4.join(
|
|
1721
|
+
this.modelsDir,
|
|
1722
|
+
modelId.replace("/", "--"),
|
|
1723
|
+
type
|
|
1724
|
+
);
|
|
2402
1725
|
if (existsSync(modelPath)) {
|
|
2403
|
-
|
|
1726
|
+
logger7.info(`${type} found at: ${modelPath}`);
|
|
2404
1727
|
return true;
|
|
2405
1728
|
}
|
|
2406
1729
|
return false;
|
|
@@ -2423,7 +1746,7 @@ var VisionManager = class _VisionManager {
|
|
|
2423
1746
|
...component,
|
|
2424
1747
|
dtype: defaultDtype
|
|
2425
1748
|
}));
|
|
2426
|
-
|
|
1749
|
+
logger7.info("Model components configured with dtype:", {
|
|
2427
1750
|
platform,
|
|
2428
1751
|
arch,
|
|
2429
1752
|
defaultDtype,
|
|
@@ -2436,7 +1759,9 @@ var VisionManager = class _VisionManager {
|
|
|
2436
1759
|
* @returns {object} The model configuration object containing device, dtype, and cache_dir.
|
|
2437
1760
|
*/
|
|
2438
1761
|
getModelConfig(componentName) {
|
|
2439
|
-
const component = this.modelComponents.find(
|
|
1762
|
+
const component = this.modelComponents.find(
|
|
1763
|
+
(c) => c.name === componentName
|
|
1764
|
+
);
|
|
2440
1765
|
return {
|
|
2441
1766
|
device: this.platformConfig.device,
|
|
2442
1767
|
dtype: component?.dtype || "fp32",
|
|
@@ -2452,106 +1777,135 @@ var VisionManager = class _VisionManager {
|
|
|
2452
1777
|
async initialize() {
|
|
2453
1778
|
try {
|
|
2454
1779
|
if (this.initialized) {
|
|
2455
|
-
|
|
1780
|
+
logger7.info(
|
|
1781
|
+
"Vision model already initialized, skipping initialization"
|
|
1782
|
+
);
|
|
2456
1783
|
return;
|
|
2457
1784
|
}
|
|
2458
|
-
|
|
1785
|
+
logger7.info("Starting vision model initialization...");
|
|
2459
1786
|
const modelSpec = MODEL_SPECS.vision;
|
|
2460
|
-
|
|
1787
|
+
logger7.info("Configuring environment for vision model...");
|
|
2461
1788
|
env.allowLocalModels = true;
|
|
2462
1789
|
env.allowRemoteModels = true;
|
|
2463
1790
|
if (this.platformConfig.useOnnx) {
|
|
2464
1791
|
env.backends.onnx.enabled = true;
|
|
2465
1792
|
env.backends.onnx.logLevel = "info";
|
|
2466
1793
|
}
|
|
2467
|
-
|
|
1794
|
+
logger7.info("Loading Florence2 model...");
|
|
2468
1795
|
try {
|
|
2469
1796
|
let lastProgress = -1;
|
|
2470
1797
|
const modelCached = this.checkCacheExists(modelSpec.modelId, "model");
|
|
2471
|
-
const model = await Florence2ForConditionalGeneration.from_pretrained(
|
|
2472
|
-
|
|
2473
|
-
|
|
2474
|
-
|
|
2475
|
-
|
|
2476
|
-
|
|
2477
|
-
|
|
2478
|
-
|
|
2479
|
-
|
|
2480
|
-
|
|
2481
|
-
|
|
2482
|
-
|
|
2483
|
-
|
|
2484
|
-
|
|
2485
|
-
|
|
2486
|
-
|
|
1798
|
+
const model = await Florence2ForConditionalGeneration.from_pretrained(
|
|
1799
|
+
modelSpec.modelId,
|
|
1800
|
+
{
|
|
1801
|
+
device: "cpu",
|
|
1802
|
+
cache_dir: this.modelsDir,
|
|
1803
|
+
local_files_only: modelCached,
|
|
1804
|
+
revision: "main",
|
|
1805
|
+
progress_callback: (progressInfo) => {
|
|
1806
|
+
if (modelCached || this.modelDownloaded) return;
|
|
1807
|
+
const progress = "progress" in progressInfo ? Math.max(0, Math.min(1, progressInfo.progress)) : 0;
|
|
1808
|
+
const currentProgress = Math.round(progress * 100);
|
|
1809
|
+
if (currentProgress > lastProgress + 9 || currentProgress === 100) {
|
|
1810
|
+
lastProgress = currentProgress;
|
|
1811
|
+
const barLength = 30;
|
|
1812
|
+
const filledLength = Math.floor(
|
|
1813
|
+
currentProgress / 100 * barLength
|
|
1814
|
+
);
|
|
1815
|
+
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
1816
|
+
logger7.info(
|
|
1817
|
+
`Downloading vision model: ${progressBar} ${currentProgress}%`
|
|
1818
|
+
);
|
|
1819
|
+
if (currentProgress === 100) this.modelDownloaded = true;
|
|
1820
|
+
}
|
|
2487
1821
|
}
|
|
2488
1822
|
}
|
|
2489
|
-
|
|
1823
|
+
);
|
|
2490
1824
|
this.model = model;
|
|
2491
|
-
|
|
1825
|
+
logger7.success("Florence2 model loaded successfully");
|
|
2492
1826
|
} catch (error) {
|
|
2493
|
-
|
|
1827
|
+
logger7.error("Failed to load Florence2 model:", {
|
|
2494
1828
|
error: error instanceof Error ? error.message : String(error),
|
|
2495
1829
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2496
1830
|
modelId: modelSpec.modelId
|
|
2497
1831
|
});
|
|
2498
1832
|
throw error;
|
|
2499
1833
|
}
|
|
2500
|
-
|
|
1834
|
+
logger7.info("Loading vision tokenizer...");
|
|
2501
1835
|
try {
|
|
2502
|
-
const tokenizerCached = this.checkCacheExists(
|
|
1836
|
+
const tokenizerCached = this.checkCacheExists(
|
|
1837
|
+
modelSpec.modelId,
|
|
1838
|
+
"tokenizer"
|
|
1839
|
+
);
|
|
2503
1840
|
let tokenizerProgress = -1;
|
|
2504
|
-
this.tokenizer = await AutoTokenizer2.from_pretrained(
|
|
2505
|
-
|
|
2506
|
-
|
|
2507
|
-
|
|
2508
|
-
|
|
2509
|
-
|
|
2510
|
-
|
|
2511
|
-
|
|
2512
|
-
|
|
2513
|
-
|
|
2514
|
-
|
|
2515
|
-
|
|
2516
|
-
|
|
2517
|
-
|
|
1841
|
+
this.tokenizer = await AutoTokenizer2.from_pretrained(
|
|
1842
|
+
modelSpec.modelId,
|
|
1843
|
+
{
|
|
1844
|
+
cache_dir: this.modelsDir,
|
|
1845
|
+
local_files_only: tokenizerCached,
|
|
1846
|
+
progress_callback: (progressInfo) => {
|
|
1847
|
+
if (tokenizerCached || this.tokenizerDownloaded) return;
|
|
1848
|
+
const progress = "progress" in progressInfo ? Math.max(0, Math.min(1, progressInfo.progress)) : 0;
|
|
1849
|
+
const currentProgress = Math.round(progress * 100);
|
|
1850
|
+
if (currentProgress !== tokenizerProgress) {
|
|
1851
|
+
tokenizerProgress = currentProgress;
|
|
1852
|
+
const barLength = 30;
|
|
1853
|
+
const filledLength = Math.floor(
|
|
1854
|
+
currentProgress / 100 * barLength
|
|
1855
|
+
);
|
|
1856
|
+
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
1857
|
+
logger7.info(
|
|
1858
|
+
`Downloading vision tokenizer: ${progressBar} ${currentProgress}%`
|
|
1859
|
+
);
|
|
1860
|
+
if (currentProgress === 100) this.tokenizerDownloaded = true;
|
|
1861
|
+
}
|
|
2518
1862
|
}
|
|
2519
1863
|
}
|
|
2520
|
-
|
|
2521
|
-
|
|
1864
|
+
);
|
|
1865
|
+
logger7.success("Vision tokenizer loaded successfully");
|
|
2522
1866
|
} catch (error) {
|
|
2523
|
-
|
|
1867
|
+
logger7.error("Failed to load tokenizer:", {
|
|
2524
1868
|
error: error instanceof Error ? error.message : String(error),
|
|
2525
1869
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2526
1870
|
modelId: modelSpec.modelId
|
|
2527
1871
|
});
|
|
2528
1872
|
throw error;
|
|
2529
1873
|
}
|
|
2530
|
-
|
|
1874
|
+
logger7.info("Loading vision processor...");
|
|
2531
1875
|
try {
|
|
2532
|
-
const processorCached = this.checkCacheExists(
|
|
1876
|
+
const processorCached = this.checkCacheExists(
|
|
1877
|
+
modelSpec.modelId,
|
|
1878
|
+
"processor"
|
|
1879
|
+
);
|
|
2533
1880
|
let processorProgress = -1;
|
|
2534
|
-
this.processor = await AutoProcessor.from_pretrained(
|
|
2535
|
-
|
|
2536
|
-
|
|
2537
|
-
|
|
2538
|
-
|
|
2539
|
-
|
|
2540
|
-
|
|
2541
|
-
|
|
2542
|
-
|
|
2543
|
-
|
|
2544
|
-
|
|
2545
|
-
|
|
2546
|
-
|
|
2547
|
-
|
|
2548
|
-
|
|
1881
|
+
this.processor = await AutoProcessor.from_pretrained(
|
|
1882
|
+
modelSpec.modelId,
|
|
1883
|
+
{
|
|
1884
|
+
device: "cpu",
|
|
1885
|
+
cache_dir: this.modelsDir,
|
|
1886
|
+
local_files_only: processorCached,
|
|
1887
|
+
progress_callback: (progressInfo) => {
|
|
1888
|
+
if (processorCached || this.processorDownloaded) return;
|
|
1889
|
+
const progress = "progress" in progressInfo ? Math.max(0, Math.min(1, progressInfo.progress)) : 0;
|
|
1890
|
+
const currentProgress = Math.round(progress * 100);
|
|
1891
|
+
if (currentProgress !== processorProgress) {
|
|
1892
|
+
processorProgress = currentProgress;
|
|
1893
|
+
const barLength = 30;
|
|
1894
|
+
const filledLength = Math.floor(
|
|
1895
|
+
currentProgress / 100 * barLength
|
|
1896
|
+
);
|
|
1897
|
+
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
1898
|
+
logger7.info(
|
|
1899
|
+
`Downloading vision processor: ${progressBar} ${currentProgress}%`
|
|
1900
|
+
);
|
|
1901
|
+
if (currentProgress === 100) this.processorDownloaded = true;
|
|
1902
|
+
}
|
|
2549
1903
|
}
|
|
2550
1904
|
}
|
|
2551
|
-
|
|
2552
|
-
|
|
1905
|
+
);
|
|
1906
|
+
logger7.success("Vision processor loaded successfully");
|
|
2553
1907
|
} catch (error) {
|
|
2554
|
-
|
|
1908
|
+
logger7.error("Failed to load vision processor:", {
|
|
2555
1909
|
error: error instanceof Error ? error.message : String(error),
|
|
2556
1910
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2557
1911
|
modelId: modelSpec.modelId
|
|
@@ -2559,9 +1913,9 @@ var VisionManager = class _VisionManager {
|
|
|
2559
1913
|
throw error;
|
|
2560
1914
|
}
|
|
2561
1915
|
this.initialized = true;
|
|
2562
|
-
|
|
1916
|
+
logger7.success("Vision model initialization complete");
|
|
2563
1917
|
} catch (error) {
|
|
2564
|
-
|
|
1918
|
+
logger7.error("Vision model initialization failed:", {
|
|
2565
1919
|
error: error instanceof Error ? error.message : String(error),
|
|
2566
1920
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2567
1921
|
modelsDir: this.modelsDir
|
|
@@ -2577,13 +1931,13 @@ var VisionManager = class _VisionManager {
|
|
|
2577
1931
|
*/
|
|
2578
1932
|
async fetchImage(url) {
|
|
2579
1933
|
try {
|
|
2580
|
-
|
|
1934
|
+
logger7.info(`Fetching image from URL: ${url.slice(0, 100)}...`);
|
|
2581
1935
|
if (url.startsWith("data:")) {
|
|
2582
|
-
|
|
1936
|
+
logger7.info("Processing data URL...");
|
|
2583
1937
|
const [header, base64Data] = url.split(",");
|
|
2584
1938
|
const mimeType2 = header.split(";")[0].split(":")[1];
|
|
2585
1939
|
const buffer2 = Buffer.from(base64Data, "base64");
|
|
2586
|
-
|
|
1940
|
+
logger7.info("Data URL processed successfully");
|
|
2587
1941
|
return { buffer: buffer2, mimeType: mimeType2 };
|
|
2588
1942
|
}
|
|
2589
1943
|
const response = await fetch(url);
|
|
@@ -2592,14 +1946,14 @@ var VisionManager = class _VisionManager {
|
|
|
2592
1946
|
}
|
|
2593
1947
|
const buffer = Buffer.from(await response.arrayBuffer());
|
|
2594
1948
|
const mimeType = response.headers.get("content-type") || "image/jpeg";
|
|
2595
|
-
|
|
1949
|
+
logger7.info("Image fetched successfully:", {
|
|
2596
1950
|
mimeType,
|
|
2597
1951
|
bufferSize: buffer.length,
|
|
2598
1952
|
status: response.status
|
|
2599
1953
|
});
|
|
2600
1954
|
return { buffer, mimeType };
|
|
2601
1955
|
} catch (error) {
|
|
2602
|
-
|
|
1956
|
+
logger7.error("Failed to fetch image:", {
|
|
2603
1957
|
error: error instanceof Error ? error.message : String(error),
|
|
2604
1958
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2605
1959
|
url
|
|
@@ -2614,37 +1968,37 @@ var VisionManager = class _VisionManager {
|
|
|
2614
1968
|
*/
|
|
2615
1969
|
async processImage(imageUrl) {
|
|
2616
1970
|
try {
|
|
2617
|
-
|
|
1971
|
+
logger7.info("Starting image processing...");
|
|
2618
1972
|
if (!this.initialized) {
|
|
2619
|
-
|
|
1973
|
+
logger7.info("Vision model not initialized, initializing now...");
|
|
2620
1974
|
await this.initialize();
|
|
2621
1975
|
}
|
|
2622
1976
|
if (!this.model || !this.processor || !this.tokenizer) {
|
|
2623
1977
|
throw new Error("Vision model components not properly initialized");
|
|
2624
1978
|
}
|
|
2625
|
-
|
|
1979
|
+
logger7.info("Fetching image...");
|
|
2626
1980
|
const { buffer, mimeType } = await this.fetchImage(imageUrl);
|
|
2627
|
-
|
|
1981
|
+
logger7.info("Creating image blob...");
|
|
2628
1982
|
const blob = new Blob([buffer], { type: mimeType });
|
|
2629
|
-
|
|
1983
|
+
logger7.info("Converting blob to RawImage...");
|
|
2630
1984
|
const image = await RawImage.fromBlob(blob);
|
|
2631
|
-
|
|
1985
|
+
logger7.info("Processing image with vision processor...");
|
|
2632
1986
|
const visionInputs = await this.processor(image);
|
|
2633
|
-
|
|
1987
|
+
logger7.info("Constructing prompts...");
|
|
2634
1988
|
const prompts = this.processor.construct_prompts("<DETAILED_CAPTION>");
|
|
2635
|
-
|
|
1989
|
+
logger7.info("Tokenizing prompts...");
|
|
2636
1990
|
const textInputs = this.tokenizer(prompts);
|
|
2637
|
-
|
|
1991
|
+
logger7.info("Generating image description...");
|
|
2638
1992
|
const generatedIds = await this.model.generate({
|
|
2639
1993
|
...textInputs,
|
|
2640
1994
|
...visionInputs,
|
|
2641
1995
|
max_new_tokens: MODEL_SPECS.vision.maxTokens
|
|
2642
1996
|
});
|
|
2643
|
-
|
|
1997
|
+
logger7.info("Decoding generated text...");
|
|
2644
1998
|
const generatedText = this.tokenizer.batch_decode(generatedIds, {
|
|
2645
1999
|
skip_special_tokens: false
|
|
2646
2000
|
})[0];
|
|
2647
|
-
|
|
2001
|
+
logger7.info("Post-processing generation...");
|
|
2648
2002
|
const result = this.processor.post_process_generation(
|
|
2649
2003
|
generatedText,
|
|
2650
2004
|
"<DETAILED_CAPTION>",
|
|
@@ -2655,13 +2009,13 @@ var VisionManager = class _VisionManager {
|
|
|
2655
2009
|
title: `${detailedCaption.split(".")[0]}.`,
|
|
2656
2010
|
description: detailedCaption
|
|
2657
2011
|
};
|
|
2658
|
-
|
|
2012
|
+
logger7.success("Image processing complete:", {
|
|
2659
2013
|
titleLength: response.title.length,
|
|
2660
2014
|
descriptionLength: response.description.length
|
|
2661
2015
|
});
|
|
2662
2016
|
return response;
|
|
2663
2017
|
} catch (error) {
|
|
2664
|
-
|
|
2018
|
+
logger7.error("Image processing failed:", {
|
|
2665
2019
|
error: error instanceof Error ? error.message : String(error),
|
|
2666
2020
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2667
2021
|
imageUrl,
|
|
@@ -2676,8 +2030,7 @@ var VisionManager = class _VisionManager {
|
|
|
2676
2030
|
};
|
|
2677
2031
|
|
|
2678
2032
|
// src/index.ts
|
|
2679
|
-
|
|
2680
|
-
var __dirname = path5.dirname(__filename);
|
|
2033
|
+
import { basename } from "path";
|
|
2681
2034
|
var wordsToPunish = [
|
|
2682
2035
|
" please",
|
|
2683
2036
|
" feel",
|
|
@@ -2733,30 +2086,33 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2733
2086
|
llama;
|
|
2734
2087
|
smallModel;
|
|
2735
2088
|
mediumModel;
|
|
2089
|
+
embeddingModel;
|
|
2090
|
+
embeddingContext;
|
|
2736
2091
|
ctx;
|
|
2737
2092
|
sequence;
|
|
2738
2093
|
chatSession;
|
|
2739
2094
|
modelPath;
|
|
2740
2095
|
mediumModelPath;
|
|
2096
|
+
embeddingModelPath;
|
|
2741
2097
|
cacheDir;
|
|
2742
|
-
embeddingModel = null;
|
|
2743
2098
|
tokenizerManager;
|
|
2744
2099
|
downloadManager;
|
|
2745
2100
|
visionManager;
|
|
2746
2101
|
activeModelConfig;
|
|
2102
|
+
embeddingModelConfig;
|
|
2747
2103
|
transcribeManager;
|
|
2748
2104
|
ttsManager;
|
|
2749
|
-
|
|
2750
|
-
|
|
2751
|
-
// Initialization state
|
|
2752
|
-
ollamaInitialized = false;
|
|
2753
|
-
studioLMInitialized = false;
|
|
2105
|
+
config = null;
|
|
2106
|
+
// Store validated config
|
|
2107
|
+
// Initialization state flag
|
|
2754
2108
|
smallModelInitialized = false;
|
|
2755
2109
|
mediumModelInitialized = false;
|
|
2756
2110
|
embeddingInitialized = false;
|
|
2757
2111
|
visionInitialized = false;
|
|
2758
2112
|
transcriptionInitialized = false;
|
|
2759
2113
|
ttsInitialized = false;
|
|
2114
|
+
environmentInitialized = false;
|
|
2115
|
+
// Add flag for environment initialization
|
|
2760
2116
|
// Initialization promises to prevent duplicate initialization
|
|
2761
2117
|
smallModelInitializingPromise = null;
|
|
2762
2118
|
mediumModelInitializingPromise = null;
|
|
@@ -2764,49 +2120,96 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2764
2120
|
visionInitializingPromise = null;
|
|
2765
2121
|
transcriptionInitializingPromise = null;
|
|
2766
2122
|
ttsInitializingPromise = null;
|
|
2767
|
-
|
|
2768
|
-
|
|
2123
|
+
environmentInitializingPromise = null;
|
|
2124
|
+
// Add promise for environment
|
|
2769
2125
|
modelsDir;
|
|
2770
2126
|
/**
|
|
2771
2127
|
* Private constructor function to initialize base managers and paths.
|
|
2772
|
-
*
|
|
2128
|
+
* Model paths are set after environment initialization.
|
|
2773
2129
|
*/
|
|
2774
2130
|
constructor() {
|
|
2775
|
-
|
|
2776
|
-
|
|
2777
|
-
|
|
2131
|
+
this.config = validateConfig();
|
|
2132
|
+
this._setupCacheDir();
|
|
2133
|
+
this.activeModelConfig = MODEL_SPECS.small;
|
|
2134
|
+
this.embeddingModelConfig = MODEL_SPECS.embedding;
|
|
2135
|
+
}
|
|
2136
|
+
/**
|
|
2137
|
+
* Post-validation initialization steps that require config to be set.
|
|
2138
|
+
* Called after config validation in initializeEnvironment.
|
|
2139
|
+
*/
|
|
2140
|
+
_postValidateInit() {
|
|
2141
|
+
this._setupModelsDir();
|
|
2142
|
+
this.downloadManager = DownloadManager.getInstance(
|
|
2143
|
+
this.cacheDir,
|
|
2144
|
+
this.modelsDir
|
|
2145
|
+
);
|
|
2146
|
+
this.tokenizerManager = TokenizerManager.getInstance(
|
|
2147
|
+
this.cacheDir,
|
|
2148
|
+
this.modelsDir
|
|
2149
|
+
);
|
|
2150
|
+
this.visionManager = VisionManager.getInstance(this.cacheDir);
|
|
2151
|
+
this.transcribeManager = TranscribeManager.getInstance(this.cacheDir);
|
|
2152
|
+
this.ttsManager = TTSManager.getInstance(this.cacheDir);
|
|
2153
|
+
}
|
|
2154
|
+
/**
|
|
2155
|
+
* Sets up the models directory, reading from config or environment variables,
|
|
2156
|
+
* and ensures the directory exists.
|
|
2157
|
+
*/
|
|
2158
|
+
_setupModelsDir() {
|
|
2159
|
+
const modelsDirEnv = this.config?.MODELS_DIR?.trim() || process.env.MODELS_DIR?.trim();
|
|
2160
|
+
if (modelsDirEnv) {
|
|
2161
|
+
this.modelsDir = path5.resolve(modelsDirEnv);
|
|
2162
|
+
logger8.info(
|
|
2163
|
+
"Using models directory from MODELS_DIR environment variable:",
|
|
2164
|
+
this.modelsDir
|
|
2165
|
+
);
|
|
2778
2166
|
} else {
|
|
2779
|
-
|
|
2780
|
-
|
|
2781
|
-
|
|
2782
|
-
|
|
2783
|
-
|
|
2167
|
+
this.modelsDir = path5.join(os3.homedir(), ".eliza", "models");
|
|
2168
|
+
logger8.info(
|
|
2169
|
+
"MODELS_DIR environment variable not set, using default models directory:",
|
|
2170
|
+
this.modelsDir
|
|
2171
|
+
);
|
|
2172
|
+
}
|
|
2173
|
+
if (!fs5.existsSync(this.modelsDir)) {
|
|
2174
|
+
fs5.mkdirSync(this.modelsDir, { recursive: true });
|
|
2175
|
+
logger8.debug(
|
|
2176
|
+
"Ensured models directory exists (created):",
|
|
2177
|
+
this.modelsDir
|
|
2178
|
+
);
|
|
2179
|
+
} else {
|
|
2180
|
+
logger8.debug("Models directory already exists:", this.modelsDir);
|
|
2784
2181
|
}
|
|
2785
|
-
|
|
2786
|
-
|
|
2787
|
-
|
|
2182
|
+
}
|
|
2183
|
+
/**
|
|
2184
|
+
* Sets up the cache directory, reading from config or environment variables,
|
|
2185
|
+
* and ensures the directory exists.
|
|
2186
|
+
*/
|
|
2187
|
+
_setupCacheDir() {
|
|
2188
|
+
const cacheDirEnv = this.config?.CACHE_DIR?.trim() || process.env.CACHE_DIR?.trim();
|
|
2788
2189
|
if (cacheDirEnv) {
|
|
2789
2190
|
this.cacheDir = path5.resolve(cacheDirEnv);
|
|
2191
|
+
logger8.info(
|
|
2192
|
+
"Using cache directory from CACHE_DIR environment variable:",
|
|
2193
|
+
this.cacheDir
|
|
2194
|
+
);
|
|
2790
2195
|
} else {
|
|
2791
|
-
const cacheDir = path5.join(
|
|
2196
|
+
const cacheDir = path5.join(os3.homedir(), ".eliza", "cache");
|
|
2792
2197
|
if (!fs5.existsSync(cacheDir)) {
|
|
2793
2198
|
fs5.mkdirSync(cacheDir, { recursive: true });
|
|
2794
|
-
|
|
2199
|
+
logger8.debug("Ensuring cache directory exists (created):", cacheDir);
|
|
2795
2200
|
}
|
|
2796
2201
|
this.cacheDir = cacheDir;
|
|
2202
|
+
logger8.info(
|
|
2203
|
+
"CACHE_DIR environment variable not set, using default cache directory:",
|
|
2204
|
+
this.cacheDir
|
|
2205
|
+
);
|
|
2797
2206
|
}
|
|
2798
|
-
|
|
2799
|
-
|
|
2800
|
-
|
|
2801
|
-
|
|
2802
|
-
|
|
2803
|
-
if (process.env.USE_STUDIOLM_TEXT_MODELS === "true") {
|
|
2804
|
-
this.studioLMManager = StudioLMManager.getInstance();
|
|
2805
|
-
}
|
|
2806
|
-
if (process.env.USE_OLLAMA_TEXT_MODELS === "true") {
|
|
2807
|
-
this.ollamaManager = OllamaManager.getInstance();
|
|
2207
|
+
if (!fs5.existsSync(this.cacheDir)) {
|
|
2208
|
+
fs5.mkdirSync(this.cacheDir, { recursive: true });
|
|
2209
|
+
logger8.debug("Ensured cache directory exists (created):", this.cacheDir);
|
|
2210
|
+
} else {
|
|
2211
|
+
logger8.debug("Cache directory already exists:", this.cacheDir);
|
|
2808
2212
|
}
|
|
2809
|
-
this.activeModelConfig = MODEL_SPECS.small;
|
|
2810
2213
|
}
|
|
2811
2214
|
/**
|
|
2812
2215
|
* Retrieves the singleton instance of LocalAIManager. If an instance does not already exist, a new one is created and returned.
|
|
@@ -2819,99 +2222,86 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2819
2222
|
return _LocalAIManager.instance;
|
|
2820
2223
|
}
|
|
2821
2224
|
/**
|
|
2822
|
-
* Initializes the environment by validating the configuration and setting
|
|
2225
|
+
* Initializes the environment by validating the configuration and setting model paths.
|
|
2226
|
+
* Now public to be callable from plugin init and model handlers.
|
|
2823
2227
|
*
|
|
2824
2228
|
* @returns {Promise<void>} A Promise that resolves once the environment has been successfully initialized.
|
|
2825
2229
|
*/
|
|
2826
2230
|
async initializeEnvironment() {
|
|
2827
|
-
|
|
2828
|
-
|
|
2829
|
-
|
|
2830
|
-
|
|
2831
|
-
USE_STUDIOLM_TEXT_MODELS: process.env.USE_STUDIOLM_TEXT_MODELS,
|
|
2832
|
-
USE_OLLAMA_TEXT_MODELS: process.env.USE_OLLAMA_TEXT_MODELS
|
|
2833
|
-
};
|
|
2834
|
-
const validatedConfig = await validateConfig(config);
|
|
2835
|
-
logger10.info("Environment configuration validated");
|
|
2836
|
-
process.env.USE_LOCAL_AI = String(validatedConfig.USE_LOCAL_AI);
|
|
2837
|
-
process.env.USE_STUDIOLM_TEXT_MODELS = String(validatedConfig.USE_STUDIOLM_TEXT_MODELS);
|
|
2838
|
-
process.env.USE_OLLAMA_TEXT_MODELS = String(validatedConfig.USE_OLLAMA_TEXT_MODELS);
|
|
2839
|
-
logger10.success("Environment initialization complete");
|
|
2840
|
-
} catch (error) {
|
|
2841
|
-
logger10.error("Environment validation failed:", {
|
|
2842
|
-
error: error instanceof Error ? error.message : String(error),
|
|
2843
|
-
stack: error instanceof Error ? error.stack : void 0
|
|
2844
|
-
});
|
|
2845
|
-
throw error;
|
|
2846
|
-
}
|
|
2847
|
-
}
|
|
2848
|
-
/**
|
|
2849
|
-
* Asynchronously initializes the Ollama model.
|
|
2850
|
-
*
|
|
2851
|
-
* @returns {Promise<void>} A Promise that resolves when the initialization is complete.
|
|
2852
|
-
* @throws {Error} If the Ollama manager is not created, or if initialization of Ollama models fails.
|
|
2853
|
-
*/
|
|
2854
|
-
async initializeOllama() {
|
|
2855
|
-
try {
|
|
2856
|
-
logger10.info("Initializing Ollama models...");
|
|
2857
|
-
if (!this.ollamaManager) {
|
|
2858
|
-
throw new Error("Ollama manager not created - cannot initialize");
|
|
2859
|
-
}
|
|
2860
|
-
await this.ollamaManager.initialize();
|
|
2861
|
-
if (!this.ollamaManager.isInitialized()) {
|
|
2862
|
-
throw new Error("Ollama initialization failed - models not properly loaded");
|
|
2863
|
-
}
|
|
2864
|
-
logger10.success("Ollama initialization complete");
|
|
2865
|
-
} catch (error) {
|
|
2866
|
-
logger10.error("Ollama initialization failed:", {
|
|
2867
|
-
error: error instanceof Error ? error.message : String(error),
|
|
2868
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
2869
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2870
|
-
});
|
|
2871
|
-
throw error;
|
|
2231
|
+
if (this.environmentInitialized) return;
|
|
2232
|
+
if (this.environmentInitializingPromise) {
|
|
2233
|
+
await this.environmentInitializingPromise;
|
|
2234
|
+
return;
|
|
2872
2235
|
}
|
|
2873
|
-
|
|
2874
|
-
|
|
2875
|
-
|
|
2876
|
-
|
|
2877
|
-
|
|
2878
|
-
|
|
2879
|
-
|
|
2880
|
-
|
|
2881
|
-
|
|
2882
|
-
|
|
2883
|
-
|
|
2884
|
-
|
|
2885
|
-
|
|
2886
|
-
|
|
2887
|
-
|
|
2236
|
+
this.environmentInitializingPromise = (async () => {
|
|
2237
|
+
try {
|
|
2238
|
+
logger8.info("Initializing environment configuration...");
|
|
2239
|
+
this.config = await validateConfig();
|
|
2240
|
+
this._postValidateInit();
|
|
2241
|
+
this.modelPath = path5.join(
|
|
2242
|
+
this.modelsDir,
|
|
2243
|
+
this.config.LOCAL_SMALL_MODEL
|
|
2244
|
+
);
|
|
2245
|
+
this.mediumModelPath = path5.join(
|
|
2246
|
+
this.modelsDir,
|
|
2247
|
+
this.config.LOCAL_LARGE_MODEL
|
|
2248
|
+
);
|
|
2249
|
+
this.embeddingModelPath = path5.join(
|
|
2250
|
+
this.modelsDir,
|
|
2251
|
+
this.config.LOCAL_EMBEDDING_MODEL
|
|
2252
|
+
);
|
|
2253
|
+
logger8.info("Using small model path:", basename(this.modelPath));
|
|
2254
|
+
logger8.info("Using medium model path:", basename(this.mediumModelPath));
|
|
2255
|
+
logger8.info(
|
|
2256
|
+
"Using embedding model path:",
|
|
2257
|
+
basename(this.embeddingModelPath)
|
|
2258
|
+
);
|
|
2259
|
+
logger8.info("Environment configuration validated and model paths set");
|
|
2260
|
+
this.environmentInitialized = true;
|
|
2261
|
+
logger8.success("Environment initialization complete");
|
|
2262
|
+
} catch (error) {
|
|
2263
|
+
logger8.error("Environment validation failed:", {
|
|
2264
|
+
error: error instanceof Error ? error.message : String(error),
|
|
2265
|
+
stack: error instanceof Error ? error.stack : void 0
|
|
2266
|
+
});
|
|
2267
|
+
this.environmentInitializingPromise = null;
|
|
2268
|
+
throw error;
|
|
2888
2269
|
}
|
|
2889
|
-
|
|
2890
|
-
|
|
2891
|
-
} catch (error) {
|
|
2892
|
-
logger10.error("StudioLM initialization failed:", {
|
|
2893
|
-
error: error instanceof Error ? error.message : String(error),
|
|
2894
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
2895
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2896
|
-
});
|
|
2897
|
-
throw error;
|
|
2898
|
-
}
|
|
2270
|
+
})();
|
|
2271
|
+
await this.environmentInitializingPromise;
|
|
2899
2272
|
}
|
|
2900
2273
|
/**
|
|
2901
2274
|
* Downloads the model based on the modelPath provided.
|
|
2902
|
-
* Determines
|
|
2275
|
+
* Determines the model spec and path based on the model type.
|
|
2903
2276
|
*
|
|
2277
|
+
* @param {ModelTypeName} modelType - The type of model to download
|
|
2278
|
+
* @param {ModelSpec} [customModelSpec] - Optional custom model spec to use instead of the default
|
|
2904
2279
|
* @returns A Promise that resolves to a boolean indicating whether the model download was successful.
|
|
2905
2280
|
*/
|
|
2906
|
-
async downloadModel(modelType) {
|
|
2907
|
-
|
|
2908
|
-
|
|
2281
|
+
async downloadModel(modelType, customModelSpec) {
|
|
2282
|
+
let modelSpec;
|
|
2283
|
+
let modelPathToDownload;
|
|
2284
|
+
await this.initializeEnvironment();
|
|
2285
|
+
if (customModelSpec) {
|
|
2286
|
+
modelSpec = customModelSpec;
|
|
2287
|
+
modelPathToDownload = modelType === ModelType.TEXT_EMBEDDING ? this.embeddingModelPath : modelType === ModelType.TEXT_LARGE ? this.mediumModelPath : this.modelPath;
|
|
2288
|
+
} else if (modelType === ModelType.TEXT_EMBEDDING) {
|
|
2289
|
+
modelSpec = MODEL_SPECS.embedding;
|
|
2290
|
+
modelPathToDownload = this.embeddingModelPath;
|
|
2291
|
+
} else {
|
|
2292
|
+
modelSpec = modelType === ModelType.TEXT_LARGE ? MODEL_SPECS.medium : MODEL_SPECS.small;
|
|
2293
|
+
modelPathToDownload = modelType === ModelType.TEXT_LARGE ? this.mediumModelPath : this.modelPath;
|
|
2294
|
+
}
|
|
2909
2295
|
try {
|
|
2910
|
-
return await this.downloadManager.downloadModel(
|
|
2296
|
+
return await this.downloadManager.downloadModel(
|
|
2297
|
+
modelSpec,
|
|
2298
|
+
modelPathToDownload
|
|
2299
|
+
);
|
|
2911
2300
|
} catch (error) {
|
|
2912
|
-
|
|
2301
|
+
logger8.error("Model download failed:", {
|
|
2913
2302
|
error: error instanceof Error ? error.message : String(error),
|
|
2914
|
-
|
|
2303
|
+
modelType,
|
|
2304
|
+
modelPath: modelPathToDownload
|
|
2915
2305
|
});
|
|
2916
2306
|
throw error;
|
|
2917
2307
|
}
|
|
@@ -2926,14 +2316,14 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2926
2316
|
const platformManager = getPlatformManager();
|
|
2927
2317
|
await platformManager.initialize();
|
|
2928
2318
|
const capabilities = platformManager.getCapabilities();
|
|
2929
|
-
|
|
2319
|
+
logger8.info("Platform capabilities detected:", {
|
|
2930
2320
|
platform: capabilities.platform,
|
|
2931
2321
|
gpu: capabilities.gpu?.type || "none",
|
|
2932
2322
|
recommendedModel: capabilities.recommendedModelSize,
|
|
2933
2323
|
supportedBackends: capabilities.supportedBackends
|
|
2934
2324
|
});
|
|
2935
2325
|
} catch (error) {
|
|
2936
|
-
|
|
2326
|
+
logger8.warn("Platform detection failed:", error);
|
|
2937
2327
|
}
|
|
2938
2328
|
}
|
|
2939
2329
|
/**
|
|
@@ -2942,8 +2332,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2942
2332
|
* @param {ModelTypeName} modelType - The type of model to initialize (default: ModelType.TEXT_SMALL)
|
|
2943
2333
|
* @returns {Promise<void>} A promise that resolves when initialization is complete or rejects if an error occurs
|
|
2944
2334
|
*/
|
|
2945
|
-
async initialize(modelType =
|
|
2946
|
-
|
|
2335
|
+
async initialize(modelType = ModelType.TEXT_SMALL) {
|
|
2336
|
+
await this.initializeEnvironment();
|
|
2337
|
+
if (modelType === ModelType.TEXT_LARGE) {
|
|
2947
2338
|
await this.lazyInitMediumModel();
|
|
2948
2339
|
} else {
|
|
2949
2340
|
await this.lazyInitSmallModel();
|
|
@@ -2956,96 +2347,120 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2956
2347
|
*/
|
|
2957
2348
|
async initializeEmbedding() {
|
|
2958
2349
|
try {
|
|
2959
|
-
|
|
2960
|
-
|
|
2350
|
+
await this.initializeEnvironment();
|
|
2351
|
+
logger8.info("Initializing embedding model...");
|
|
2352
|
+
logger8.info("Models directory:", this.modelsDir);
|
|
2961
2353
|
if (!fs5.existsSync(this.modelsDir)) {
|
|
2962
|
-
|
|
2354
|
+
logger8.warn(
|
|
2355
|
+
"Models directory does not exist, creating it:",
|
|
2356
|
+
this.modelsDir
|
|
2357
|
+
);
|
|
2963
2358
|
fs5.mkdirSync(this.modelsDir, { recursive: true });
|
|
2964
2359
|
}
|
|
2360
|
+
await this.downloadModel(ModelType.TEXT_EMBEDDING);
|
|
2361
|
+
if (!this.llama) {
|
|
2362
|
+
this.llama = await getLlama();
|
|
2363
|
+
}
|
|
2965
2364
|
if (!this.embeddingModel) {
|
|
2966
|
-
|
|
2967
|
-
|
|
2968
|
-
|
|
2969
|
-
|
|
2970
|
-
|
|
2971
|
-
|
|
2972
|
-
|
|
2973
|
-
|
|
2974
|
-
|
|
2365
|
+
logger8.info("Loading embedding model:", this.embeddingModelPath);
|
|
2366
|
+
this.embeddingModel = await this.llama.loadModel({
|
|
2367
|
+
modelPath: this.embeddingModelPath,
|
|
2368
|
+
// Use the correct path
|
|
2369
|
+
gpuLayers: 0,
|
|
2370
|
+
// Embedding models are typically small enough to run on CPU
|
|
2371
|
+
vocabOnly: false
|
|
2372
|
+
});
|
|
2373
|
+
this.embeddingContext = await this.embeddingModel.createEmbeddingContext({
|
|
2374
|
+
contextSize: this.embeddingModelConfig.contextSize,
|
|
2375
|
+
batchSize: 512
|
|
2975
2376
|
});
|
|
2976
|
-
|
|
2977
|
-
logger10.info(`Downloading embedding model: ${completedBar} 100%`);
|
|
2978
|
-
logger10.success("FlagEmbedding instance created successfully");
|
|
2377
|
+
logger8.success("Embedding model initialized successfully");
|
|
2979
2378
|
}
|
|
2980
2379
|
} catch (error) {
|
|
2981
|
-
|
|
2380
|
+
logger8.error("Embedding initialization failed with details:", {
|
|
2982
2381
|
error: error instanceof Error ? error.message : String(error),
|
|
2983
2382
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2984
2383
|
modelsDir: this.modelsDir,
|
|
2985
|
-
|
|
2384
|
+
embeddingModelPath: this.embeddingModelPath
|
|
2385
|
+
// Log the path being used
|
|
2986
2386
|
});
|
|
2987
2387
|
throw error;
|
|
2988
2388
|
}
|
|
2989
2389
|
}
|
|
2990
2390
|
/**
|
|
2991
|
-
*
|
|
2992
|
-
*
|
|
2993
|
-
* @param {GenerateTextParams} params - The parameters for generating the text.
|
|
2994
|
-
* @returns {Promise<string>} - A promise that resolves to the generated text.
|
|
2391
|
+
* Generate embeddings using the proper LlamaContext.getEmbedding method.
|
|
2995
2392
|
*/
|
|
2996
|
-
async
|
|
2393
|
+
async generateEmbedding(text) {
|
|
2997
2394
|
try {
|
|
2998
|
-
|
|
2999
|
-
|
|
3000
|
-
|
|
3001
|
-
modelType: params.modelType,
|
|
3002
|
-
studioLMInitialized: this.studioLMInitialized,
|
|
3003
|
-
ollamaInitialized: this.ollamaInitialized,
|
|
3004
|
-
studioLMEnabled: process.env.USE_STUDIOLM_TEXT_MODELS === "true",
|
|
3005
|
-
ollamaEnabled: process.env.USE_OLLAMA_TEXT_MODELS === "true"
|
|
3006
|
-
});
|
|
3007
|
-
if (modelConfig.source === "studiolm") {
|
|
3008
|
-
if (process.env.USE_STUDIOLM_TEXT_MODELS !== "true") {
|
|
3009
|
-
logger10.warn(
|
|
3010
|
-
"StudioLM requested but disabled in environment, falling back to local models"
|
|
3011
|
-
);
|
|
3012
|
-
return this.generateText(params);
|
|
3013
|
-
}
|
|
3014
|
-
if (!this.studioLMManager) {
|
|
3015
|
-
logger10.warn("StudioLM manager not initialized, falling back to local models");
|
|
3016
|
-
return this.generateText(params);
|
|
3017
|
-
}
|
|
3018
|
-
if (!this.studioLMInitialized) {
|
|
3019
|
-
logger10.info("StudioLM not initialized, initializing now...");
|
|
3020
|
-
await this.initializeStudioLM();
|
|
3021
|
-
}
|
|
3022
|
-
return await this.studioLMManager.generateText(params, this.studioLMInitialized);
|
|
3023
|
-
}
|
|
3024
|
-
if (modelConfig.source === "ollama") {
|
|
3025
|
-
if (process.env.USE_OLLAMA_TEXT_MODELS !== "true") {
|
|
3026
|
-
logger10.warn("Ollama requested but disabled in environment, falling back to local models");
|
|
3027
|
-
return this.generateText(params);
|
|
3028
|
-
}
|
|
3029
|
-
if (!this.ollamaManager) {
|
|
3030
|
-
logger10.warn("Ollama manager not initialized, falling back to local models");
|
|
3031
|
-
return this.generateText(params);
|
|
3032
|
-
}
|
|
3033
|
-
if (!this.ollamaInitialized && !this.ollamaManager.isInitialized()) {
|
|
3034
|
-
logger10.info("Initializing Ollama in generateTextOllamaStudio");
|
|
3035
|
-
await this.ollamaManager.initialize();
|
|
3036
|
-
this.ollamaInitialized = true;
|
|
3037
|
-
}
|
|
3038
|
-
return await this.ollamaManager.generateText(params, this.ollamaInitialized);
|
|
2395
|
+
await this.lazyInitEmbedding();
|
|
2396
|
+
if (!this.embeddingModel || !this.embeddingContext) {
|
|
2397
|
+
throw new Error("Failed to initialize embedding model");
|
|
3039
2398
|
}
|
|
3040
|
-
|
|
2399
|
+
logger8.info("Generating embedding for text", { textLength: text.length });
|
|
2400
|
+
const embeddingResult = await this.embeddingContext.getEmbeddingFor(text);
|
|
2401
|
+
const mutableEmbedding = [...embeddingResult.vector];
|
|
2402
|
+
const normalizedEmbedding = this.normalizeEmbedding(mutableEmbedding);
|
|
2403
|
+
logger8.info("Embedding generation complete", {
|
|
2404
|
+
dimensions: normalizedEmbedding.length
|
|
2405
|
+
});
|
|
2406
|
+
return normalizedEmbedding;
|
|
3041
2407
|
} catch (error) {
|
|
3042
|
-
|
|
2408
|
+
logger8.error("Embedding generation failed:", {
|
|
3043
2409
|
error: error instanceof Error ? error.message : String(error),
|
|
3044
2410
|
stack: error instanceof Error ? error.stack : void 0,
|
|
3045
|
-
|
|
2411
|
+
textLength: text?.length ?? "text is null"
|
|
3046
2412
|
});
|
|
3047
|
-
|
|
2413
|
+
const zeroDimensions = this.config?.LOCAL_EMBEDDING_DIMENSIONS ? this.config.LOCAL_EMBEDDING_DIMENSIONS : this.embeddingModelConfig.dimensions;
|
|
2414
|
+
return new Array(zeroDimensions).fill(0);
|
|
2415
|
+
}
|
|
2416
|
+
}
|
|
2417
|
+
/**
|
|
2418
|
+
* Normalizes an embedding vector using L2 normalization
|
|
2419
|
+
*
|
|
2420
|
+
* @param {number[]} embedding - The embedding vector to normalize
|
|
2421
|
+
* @returns {number[]} - The normalized embedding vector
|
|
2422
|
+
*/
|
|
2423
|
+
normalizeEmbedding(embedding) {
|
|
2424
|
+
const squareSum = embedding.reduce((sum, val) => sum + val * val, 0);
|
|
2425
|
+
const norm = Math.sqrt(squareSum);
|
|
2426
|
+
if (norm === 0) {
|
|
2427
|
+
return embedding;
|
|
2428
|
+
}
|
|
2429
|
+
return embedding.map((val) => val / norm);
|
|
2430
|
+
}
|
|
2431
|
+
/**
|
|
2432
|
+
* Lazy initialize the embedding model
|
|
2433
|
+
*/
|
|
2434
|
+
async lazyInitEmbedding() {
|
|
2435
|
+
if (this.embeddingInitialized) return;
|
|
2436
|
+
if (!this.embeddingInitializingPromise) {
|
|
2437
|
+
this.embeddingInitializingPromise = (async () => {
|
|
2438
|
+
try {
|
|
2439
|
+
await this.initializeEnvironment();
|
|
2440
|
+
await this.downloadModel(ModelType.TEXT_EMBEDDING);
|
|
2441
|
+
if (!this.llama) {
|
|
2442
|
+
this.llama = await getLlama();
|
|
2443
|
+
}
|
|
2444
|
+
this.embeddingModel = await this.llama.loadModel({
|
|
2445
|
+
modelPath: this.embeddingModelPath,
|
|
2446
|
+
gpuLayers: 0,
|
|
2447
|
+
// Embedding models are typically small enough to run on CPU
|
|
2448
|
+
vocabOnly: false
|
|
2449
|
+
});
|
|
2450
|
+
this.embeddingContext = await this.embeddingModel.createEmbeddingContext({
|
|
2451
|
+
contextSize: this.embeddingModelConfig.contextSize,
|
|
2452
|
+
batchSize: 512
|
|
2453
|
+
});
|
|
2454
|
+
this.embeddingInitialized = true;
|
|
2455
|
+
logger8.info("Embedding model initialized successfully");
|
|
2456
|
+
} catch (error) {
|
|
2457
|
+
logger8.error("Failed to initialize embedding model:", error);
|
|
2458
|
+
this.embeddingInitializingPromise = null;
|
|
2459
|
+
throw error;
|
|
2460
|
+
}
|
|
2461
|
+
})();
|
|
3048
2462
|
}
|
|
2463
|
+
await this.embeddingInitializingPromise;
|
|
3049
2464
|
}
|
|
3050
2465
|
/**
|
|
3051
2466
|
* Asynchronously generates text based on the provided parameters.
|
|
@@ -3053,7 +2468,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3053
2468
|
*/
|
|
3054
2469
|
async generateText(params) {
|
|
3055
2470
|
try {
|
|
3056
|
-
|
|
2471
|
+
await this.initializeEnvironment();
|
|
2472
|
+
logger8.info("Generating text with model:", params.modelType);
|
|
2473
|
+
if (params.modelType === ModelType.TEXT_LARGE) {
|
|
3057
2474
|
await this.lazyInitMediumModel();
|
|
3058
2475
|
if (!this.mediumModel) {
|
|
3059
2476
|
throw new Error("Medium model initialization failed");
|
|
@@ -3084,15 +2501,18 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3084
2501
|
if (!this.chatSession) {
|
|
3085
2502
|
throw new Error("Failed to create chat session");
|
|
3086
2503
|
}
|
|
3087
|
-
|
|
3088
|
-
|
|
2504
|
+
logger8.info("Created new chat session for model:", params.modelType);
|
|
2505
|
+
logger8.info("Incoming prompt structure:", {
|
|
3089
2506
|
contextLength: params.prompt.length,
|
|
3090
2507
|
hasAction: params.prompt.includes("action"),
|
|
3091
2508
|
runtime: !!params.runtime,
|
|
3092
2509
|
stopSequences: params.stopSequences
|
|
3093
2510
|
});
|
|
3094
|
-
const tokens = await this.tokenizerManager.encode(
|
|
3095
|
-
|
|
2511
|
+
const tokens = await this.tokenizerManager.encode(
|
|
2512
|
+
params.prompt,
|
|
2513
|
+
this.activeModelConfig
|
|
2514
|
+
);
|
|
2515
|
+
logger8.info("Input tokens:", { count: tokens.length });
|
|
3096
2516
|
const systemMessage = "You are a helpful AI assistant. Respond to the current request only.";
|
|
3097
2517
|
await this.chatSession.prompt(systemMessage, {
|
|
3098
2518
|
maxTokens: 1,
|
|
@@ -3104,49 +2524,25 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3104
2524
|
temperature: 0.7,
|
|
3105
2525
|
topP: 0.9,
|
|
3106
2526
|
repeatPenalty: {
|
|
3107
|
-
punishTokensFilter: () => this.smallModel.tokenize(wordsToPunish.join(" ")),
|
|
2527
|
+
punishTokensFilter: () => this.smallModel ? this.smallModel.tokenize(wordsToPunish.join(" ")) : [],
|
|
3108
2528
|
penalty: 1.2,
|
|
3109
2529
|
frequencyPenalty: 0.7,
|
|
3110
2530
|
presencePenalty: 0.7
|
|
3111
2531
|
}
|
|
3112
2532
|
});
|
|
3113
|
-
|
|
2533
|
+
logger8.info("Raw response structure:", {
|
|
3114
2534
|
responseLength: response.length,
|
|
3115
2535
|
hasAction: response.includes("action"),
|
|
3116
2536
|
hasThinkTag: response.includes("<think>")
|
|
3117
2537
|
});
|
|
3118
2538
|
if (response.includes("<think>")) {
|
|
3119
|
-
|
|
2539
|
+
logger8.info("Cleaning think tags from response");
|
|
3120
2540
|
response = response.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
3121
|
-
|
|
2541
|
+
logger8.info("Think tags removed from response");
|
|
3122
2542
|
}
|
|
3123
2543
|
return response;
|
|
3124
2544
|
} catch (error) {
|
|
3125
|
-
|
|
3126
|
-
throw error;
|
|
3127
|
-
}
|
|
3128
|
-
}
|
|
3129
|
-
/**
|
|
3130
|
-
* Generate embeddings - now with lazy initialization
|
|
3131
|
-
*/
|
|
3132
|
-
async generateEmbedding(text) {
|
|
3133
|
-
try {
|
|
3134
|
-
await this.lazyInitEmbedding();
|
|
3135
|
-
if (!this.embeddingModel) {
|
|
3136
|
-
throw new Error("Failed to initialize embedding model");
|
|
3137
|
-
}
|
|
3138
|
-
logger10.info("Generating query embedding...");
|
|
3139
|
-
const embedding = await this.embeddingModel.queryEmbed(text);
|
|
3140
|
-
const dimensions = embedding.length;
|
|
3141
|
-
logger10.info("Embedding generation complete", { dimensions });
|
|
3142
|
-
return Array.from(embedding);
|
|
3143
|
-
} catch (error) {
|
|
3144
|
-
logger10.error("Embedding generation failed:", {
|
|
3145
|
-
error: error instanceof Error ? error.message : String(error),
|
|
3146
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
3147
|
-
// Only access text.length if text exists
|
|
3148
|
-
textLength: text?.length ?? "text is null"
|
|
3149
|
-
});
|
|
2545
|
+
logger8.error("Text generation failed:", error);
|
|
3150
2546
|
throw error;
|
|
3151
2547
|
}
|
|
3152
2548
|
}
|
|
@@ -3160,7 +2556,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3160
2556
|
const dataUrl = `data:${mimeType};base64,${base64}`;
|
|
3161
2557
|
return await this.visionManager.processImage(dataUrl);
|
|
3162
2558
|
} catch (error) {
|
|
3163
|
-
|
|
2559
|
+
logger8.error("Image description failed:", error);
|
|
3164
2560
|
throw error;
|
|
3165
2561
|
}
|
|
3166
2562
|
}
|
|
@@ -3173,7 +2569,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3173
2569
|
const result = await this.transcribeManager.transcribe(audioBuffer);
|
|
3174
2570
|
return result.text;
|
|
3175
2571
|
} catch (error) {
|
|
3176
|
-
|
|
2572
|
+
logger8.error("Audio transcription failed:", {
|
|
3177
2573
|
error: error instanceof Error ? error.message : String(error),
|
|
3178
2574
|
bufferSize: audioBuffer.length
|
|
3179
2575
|
});
|
|
@@ -3188,14 +2584,13 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3188
2584
|
await this.lazyInitTTS();
|
|
3189
2585
|
return await this.ttsManager.generateSpeech(text);
|
|
3190
2586
|
} catch (error) {
|
|
3191
|
-
|
|
2587
|
+
logger8.error("Speech generation failed:", {
|
|
3192
2588
|
error: error instanceof Error ? error.message : String(error),
|
|
3193
2589
|
textLength: text.length
|
|
3194
2590
|
});
|
|
3195
2591
|
throw error;
|
|
3196
2592
|
}
|
|
3197
2593
|
}
|
|
3198
|
-
// Add public accessor methods
|
|
3199
2594
|
/**
|
|
3200
2595
|
* Returns the TokenizerManager associated with this object.
|
|
3201
2596
|
*
|
|
@@ -3211,43 +2606,6 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3211
2606
|
getActiveModelConfig() {
|
|
3212
2607
|
return this.activeModelConfig;
|
|
3213
2608
|
}
|
|
3214
|
-
/**
|
|
3215
|
-
* Retrieves the source configuration for the text model based on environment variables and manager existence.
|
|
3216
|
-
* @returns {TextModelConfig} The configuration object containing the text model source and type.
|
|
3217
|
-
*/
|
|
3218
|
-
getTextModelSource() {
|
|
3219
|
-
try {
|
|
3220
|
-
const config = {
|
|
3221
|
-
source: "local",
|
|
3222
|
-
modelType: ModelType3.TEXT_SMALL
|
|
3223
|
-
};
|
|
3224
|
-
if (process.env.USE_STUDIOLM_TEXT_MODELS === "true" && this.studioLMManager) {
|
|
3225
|
-
config.source = "studiolm";
|
|
3226
|
-
} else if (process.env.USE_OLLAMA_TEXT_MODELS === "true" && this.ollamaManager) {
|
|
3227
|
-
config.source = "ollama";
|
|
3228
|
-
}
|
|
3229
|
-
logger10.info("Selected text model source:", config);
|
|
3230
|
-
return config;
|
|
3231
|
-
} catch (error) {
|
|
3232
|
-
logger10.error("Error determining text model source:", error);
|
|
3233
|
-
return { source: "local", modelType: ModelType3.TEXT_SMALL };
|
|
3234
|
-
}
|
|
3235
|
-
}
|
|
3236
|
-
/**
|
|
3237
|
-
* Generic lazy initialization handler for any model type
|
|
3238
|
-
*/
|
|
3239
|
-
async lazyInitialize(modelType, isInitialized, initPromise, initFunction) {
|
|
3240
|
-
if (isInitialized) {
|
|
3241
|
-
return Promise.resolve(null);
|
|
3242
|
-
}
|
|
3243
|
-
if (initPromise) {
|
|
3244
|
-
logger10.info(`Waiting for ${modelType} initialization to complete...`);
|
|
3245
|
-
await initPromise;
|
|
3246
|
-
return Promise.resolve(null);
|
|
3247
|
-
}
|
|
3248
|
-
logger10.info(`Lazy initializing ${modelType}...`);
|
|
3249
|
-
return initFunction();
|
|
3250
|
-
}
|
|
3251
2609
|
/**
|
|
3252
2610
|
* Lazy initialize the small text model
|
|
3253
2611
|
*/
|
|
@@ -3257,12 +2615,13 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3257
2615
|
this.smallModelInitializingPromise = (async () => {
|
|
3258
2616
|
await this.initializeEnvironment();
|
|
3259
2617
|
await this.checkPlatformCapabilities();
|
|
3260
|
-
await this.downloadModel(
|
|
2618
|
+
await this.downloadModel(ModelType.TEXT_SMALL);
|
|
3261
2619
|
try {
|
|
3262
|
-
this.llama = await
|
|
2620
|
+
this.llama = await getLlama();
|
|
3263
2621
|
const smallModel = await this.llama.loadModel({
|
|
3264
2622
|
gpuLayers: 43,
|
|
3265
2623
|
modelPath: this.modelPath,
|
|
2624
|
+
// Use the potentially overridden path
|
|
3266
2625
|
vocabOnly: false
|
|
3267
2626
|
});
|
|
3268
2627
|
this.smallModel = smallModel;
|
|
@@ -3272,9 +2631,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3272
2631
|
this.ctx = ctx;
|
|
3273
2632
|
this.sequence = void 0;
|
|
3274
2633
|
this.smallModelInitialized = true;
|
|
3275
|
-
|
|
2634
|
+
logger8.info("Small model initialized successfully");
|
|
3276
2635
|
} catch (error) {
|
|
3277
|
-
|
|
2636
|
+
logger8.error("Failed to initialize small model:", error);
|
|
3278
2637
|
this.smallModelInitializingPromise = null;
|
|
3279
2638
|
throw error;
|
|
3280
2639
|
}
|
|
@@ -3289,21 +2648,23 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3289
2648
|
if (this.mediumModelInitialized) return;
|
|
3290
2649
|
if (!this.mediumModelInitializingPromise) {
|
|
3291
2650
|
this.mediumModelInitializingPromise = (async () => {
|
|
2651
|
+
await this.initializeEnvironment();
|
|
3292
2652
|
if (!this.llama) {
|
|
3293
2653
|
await this.lazyInitSmallModel();
|
|
3294
2654
|
}
|
|
3295
|
-
await this.downloadModel(
|
|
2655
|
+
await this.downloadModel(ModelType.TEXT_LARGE);
|
|
3296
2656
|
try {
|
|
3297
2657
|
const mediumModel = await this.llama.loadModel({
|
|
3298
2658
|
gpuLayers: 43,
|
|
3299
2659
|
modelPath: this.mediumModelPath,
|
|
2660
|
+
// Use the potentially overridden path
|
|
3300
2661
|
vocabOnly: false
|
|
3301
2662
|
});
|
|
3302
2663
|
this.mediumModel = mediumModel;
|
|
3303
2664
|
this.mediumModelInitialized = true;
|
|
3304
|
-
|
|
2665
|
+
logger8.info("Medium model initialized successfully");
|
|
3305
2666
|
} catch (error) {
|
|
3306
|
-
|
|
2667
|
+
logger8.error("Failed to initialize medium model:", error);
|
|
3307
2668
|
this.mediumModelInitializingPromise = null;
|
|
3308
2669
|
throw error;
|
|
3309
2670
|
}
|
|
@@ -3311,26 +2672,6 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3311
2672
|
}
|
|
3312
2673
|
await this.mediumModelInitializingPromise;
|
|
3313
2674
|
}
|
|
3314
|
-
/**
|
|
3315
|
-
* Lazy initialize the embedding model
|
|
3316
|
-
*/
|
|
3317
|
-
async lazyInitEmbedding() {
|
|
3318
|
-
if (this.embeddingInitialized) return;
|
|
3319
|
-
if (!this.embeddingInitializingPromise) {
|
|
3320
|
-
this.embeddingInitializingPromise = (async () => {
|
|
3321
|
-
try {
|
|
3322
|
-
await this.initializeEmbedding();
|
|
3323
|
-
this.embeddingInitialized = true;
|
|
3324
|
-
logger10.info("Embedding model initialized successfully");
|
|
3325
|
-
} catch (error) {
|
|
3326
|
-
logger10.error("Failed to initialize embedding model:", error);
|
|
3327
|
-
this.embeddingInitializingPromise = null;
|
|
3328
|
-
throw error;
|
|
3329
|
-
}
|
|
3330
|
-
})();
|
|
3331
|
-
}
|
|
3332
|
-
await this.embeddingInitializingPromise;
|
|
3333
|
-
}
|
|
3334
2675
|
/**
|
|
3335
2676
|
* Lazy initialize the vision model
|
|
3336
2677
|
*/
|
|
@@ -3340,9 +2681,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3340
2681
|
this.visionInitializingPromise = (async () => {
|
|
3341
2682
|
try {
|
|
3342
2683
|
this.visionInitialized = true;
|
|
3343
|
-
|
|
2684
|
+
logger8.info("Vision model initialized successfully");
|
|
3344
2685
|
} catch (error) {
|
|
3345
|
-
|
|
2686
|
+
logger8.error("Failed to initialize vision model:", error);
|
|
3346
2687
|
this.visionInitializingPromise = null;
|
|
3347
2688
|
throw error;
|
|
3348
2689
|
}
|
|
@@ -3358,10 +2699,28 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3358
2699
|
if (!this.transcriptionInitializingPromise) {
|
|
3359
2700
|
this.transcriptionInitializingPromise = (async () => {
|
|
3360
2701
|
try {
|
|
2702
|
+
await this.initializeEnvironment();
|
|
2703
|
+
if (!this.transcribeManager) {
|
|
2704
|
+
this.transcribeManager = TranscribeManager.getInstance(
|
|
2705
|
+
this.cacheDir
|
|
2706
|
+
);
|
|
2707
|
+
}
|
|
2708
|
+
const ffmpegReady = await this.transcribeManager.ensureFFmpeg();
|
|
2709
|
+
if (!ffmpegReady) {
|
|
2710
|
+
logger8.error(
|
|
2711
|
+
"FFmpeg is not available or not configured correctly. Cannot proceed with transcription."
|
|
2712
|
+
);
|
|
2713
|
+
throw new Error(
|
|
2714
|
+
"FFmpeg is required for transcription but is not available. Please see server logs for installation instructions."
|
|
2715
|
+
);
|
|
2716
|
+
}
|
|
3361
2717
|
this.transcriptionInitialized = true;
|
|
3362
|
-
|
|
2718
|
+
logger8.info(
|
|
2719
|
+
"Transcription prerequisites (FFmpeg) checked and ready."
|
|
2720
|
+
);
|
|
2721
|
+
logger8.info("Transcription model initialized successfully");
|
|
3363
2722
|
} catch (error) {
|
|
3364
|
-
|
|
2723
|
+
logger8.error("Failed to initialize transcription model:", error);
|
|
3365
2724
|
this.transcriptionInitializingPromise = null;
|
|
3366
2725
|
throw error;
|
|
3367
2726
|
}
|
|
@@ -3377,10 +2736,12 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3377
2736
|
if (!this.ttsInitializingPromise) {
|
|
3378
2737
|
this.ttsInitializingPromise = (async () => {
|
|
3379
2738
|
try {
|
|
2739
|
+
await this.initializeEnvironment();
|
|
2740
|
+
this.ttsManager = TTSManager.getInstance(this.cacheDir);
|
|
3380
2741
|
this.ttsInitialized = true;
|
|
3381
|
-
|
|
2742
|
+
logger8.info("TTS model initialized successfully");
|
|
3382
2743
|
} catch (error) {
|
|
3383
|
-
|
|
2744
|
+
logger8.error("Failed to lazy initialize TTS components:", error);
|
|
3384
2745
|
this.ttsInitializingPromise = null;
|
|
3385
2746
|
throw error;
|
|
3386
2747
|
}
|
|
@@ -3388,57 +2749,18 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3388
2749
|
}
|
|
3389
2750
|
await this.ttsInitializingPromise;
|
|
3390
2751
|
}
|
|
3391
|
-
/**
|
|
3392
|
-
* Lazy initialize the Ollama integration
|
|
3393
|
-
*/
|
|
3394
|
-
async lazyInitOllama() {
|
|
3395
|
-
if (this.ollamaInitialized) return;
|
|
3396
|
-
if (!this.ollamaInitializingPromise) {
|
|
3397
|
-
this.ollamaInitializingPromise = (async () => {
|
|
3398
|
-
try {
|
|
3399
|
-
await this.initializeOllama();
|
|
3400
|
-
this.ollamaInitialized = true;
|
|
3401
|
-
logger10.info("Ollama initialized successfully");
|
|
3402
|
-
} catch (error) {
|
|
3403
|
-
logger10.error("Failed to initialize Ollama:", error);
|
|
3404
|
-
this.ollamaInitializingPromise = null;
|
|
3405
|
-
throw error;
|
|
3406
|
-
}
|
|
3407
|
-
})();
|
|
3408
|
-
}
|
|
3409
|
-
await this.ollamaInitializingPromise;
|
|
3410
|
-
}
|
|
3411
|
-
/**
|
|
3412
|
-
* Lazy initialize the StudioLM integration
|
|
3413
|
-
*/
|
|
3414
|
-
async lazyInitStudioLM() {
|
|
3415
|
-
if (this.studioLMInitialized) return;
|
|
3416
|
-
if (!this.studioLMInitializingPromise) {
|
|
3417
|
-
this.studioLMInitializingPromise = (async () => {
|
|
3418
|
-
try {
|
|
3419
|
-
await this.initializeStudioLM();
|
|
3420
|
-
this.studioLMInitialized = true;
|
|
3421
|
-
logger10.info("StudioLM initialized successfully");
|
|
3422
|
-
} catch (error) {
|
|
3423
|
-
logger10.error("Failed to initialize StudioLM:", error);
|
|
3424
|
-
this.studioLMInitializingPromise = null;
|
|
3425
|
-
throw error;
|
|
3426
|
-
}
|
|
3427
|
-
})();
|
|
3428
|
-
}
|
|
3429
|
-
await this.studioLMInitializingPromise;
|
|
3430
|
-
}
|
|
3431
2752
|
};
|
|
3432
2753
|
var localAIManager = LocalAIManager.getInstance();
|
|
3433
|
-
var
|
|
2754
|
+
var localAiPlugin = {
|
|
3434
2755
|
name: "local-ai",
|
|
3435
2756
|
description: "Local AI plugin using LLaMA models",
|
|
3436
2757
|
async init() {
|
|
3437
2758
|
try {
|
|
3438
|
-
|
|
3439
|
-
|
|
2759
|
+
logger8.debug("Initializing local-ai plugin environment...");
|
|
2760
|
+
await localAIManager.initializeEnvironment();
|
|
2761
|
+
logger8.success("Local AI plugin configuration validated and initialized");
|
|
3440
2762
|
} catch (error) {
|
|
3441
|
-
|
|
2763
|
+
logger8.error("Plugin initialization failed:", {
|
|
3442
2764
|
error: error instanceof Error ? error.message : String(error),
|
|
3443
2765
|
stack: error instanceof Error ? error.stack : void 0
|
|
3444
2766
|
});
|
|
@@ -3446,60 +2768,46 @@ var localAIPlugin = {
|
|
|
3446
2768
|
}
|
|
3447
2769
|
},
|
|
3448
2770
|
models: {
|
|
3449
|
-
[
|
|
2771
|
+
[ModelType.TEXT_SMALL]: async (runtime, { prompt, stopSequences = [] }) => {
|
|
3450
2772
|
try {
|
|
3451
|
-
|
|
3452
|
-
if (modelConfig.source !== "local") {
|
|
3453
|
-
return await localAIManager.generateTextOllamaStudio({
|
|
3454
|
-
prompt,
|
|
3455
|
-
stopSequences,
|
|
3456
|
-
runtime,
|
|
3457
|
-
modelType: ModelType3.TEXT_SMALL
|
|
3458
|
-
});
|
|
3459
|
-
}
|
|
2773
|
+
await localAIManager.initializeEnvironment();
|
|
3460
2774
|
return await localAIManager.generateText({
|
|
3461
2775
|
prompt,
|
|
3462
2776
|
stopSequences,
|
|
3463
2777
|
runtime,
|
|
3464
|
-
modelType:
|
|
2778
|
+
modelType: ModelType.TEXT_SMALL
|
|
3465
2779
|
});
|
|
3466
2780
|
} catch (error) {
|
|
3467
|
-
|
|
2781
|
+
logger8.error("Error in TEXT_SMALL handler:", error);
|
|
3468
2782
|
throw error;
|
|
3469
2783
|
}
|
|
3470
2784
|
},
|
|
3471
|
-
[
|
|
2785
|
+
[ModelType.TEXT_LARGE]: async (runtime, { prompt, stopSequences = [] }) => {
|
|
3472
2786
|
try {
|
|
3473
|
-
|
|
3474
|
-
if (modelConfig.source !== "local") {
|
|
3475
|
-
return await localAIManager.generateTextOllamaStudio({
|
|
3476
|
-
prompt,
|
|
3477
|
-
stopSequences,
|
|
3478
|
-
runtime,
|
|
3479
|
-
modelType: ModelType3.TEXT_LARGE
|
|
3480
|
-
});
|
|
3481
|
-
}
|
|
2787
|
+
await localAIManager.initializeEnvironment();
|
|
3482
2788
|
return await localAIManager.generateText({
|
|
3483
2789
|
prompt,
|
|
3484
2790
|
stopSequences,
|
|
3485
2791
|
runtime,
|
|
3486
|
-
modelType:
|
|
2792
|
+
modelType: ModelType.TEXT_LARGE
|
|
3487
2793
|
});
|
|
3488
2794
|
} catch (error) {
|
|
3489
|
-
|
|
2795
|
+
logger8.error("Error in TEXT_LARGE handler:", error);
|
|
3490
2796
|
throw error;
|
|
3491
2797
|
}
|
|
3492
2798
|
},
|
|
3493
|
-
[
|
|
2799
|
+
[ModelType.TEXT_EMBEDDING]: async (_runtime, params) => {
|
|
3494
2800
|
const text = params?.text;
|
|
3495
2801
|
try {
|
|
3496
2802
|
if (!text) {
|
|
3497
|
-
|
|
2803
|
+
logger8.debug(
|
|
2804
|
+
"Null or empty text input for embedding, returning zero vector"
|
|
2805
|
+
);
|
|
3498
2806
|
return new Array(384).fill(0);
|
|
3499
2807
|
}
|
|
3500
2808
|
return await localAIManager.generateEmbedding(text);
|
|
3501
2809
|
} catch (error) {
|
|
3502
|
-
|
|
2810
|
+
logger8.error("Error in TEXT_EMBEDDING handler:", {
|
|
3503
2811
|
error: error instanceof Error ? error.message : String(error),
|
|
3504
2812
|
fullText: text,
|
|
3505
2813
|
textType: typeof text,
|
|
@@ -3508,9 +2816,10 @@ var localAIPlugin = {
|
|
|
3508
2816
|
return new Array(384).fill(0);
|
|
3509
2817
|
}
|
|
3510
2818
|
},
|
|
3511
|
-
[
|
|
2819
|
+
[ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
3512
2820
|
try {
|
|
3513
|
-
|
|
2821
|
+
await localAIManager.initializeEnvironment();
|
|
2822
|
+
logger8.info("OBJECT_SMALL handler - Processing request:", {
|
|
3514
2823
|
prompt: params.prompt,
|
|
3515
2824
|
hasSchema: !!params.schema,
|
|
3516
2825
|
temperature: params.temperature
|
|
@@ -3519,23 +2828,12 @@ var localAIPlugin = {
|
|
|
3519
2828
|
if (!jsonPrompt.includes("```json") && !jsonPrompt.includes("respond with valid JSON")) {
|
|
3520
2829
|
jsonPrompt += "\nPlease respond with valid JSON only, without any explanations, markdown formatting, or additional text.";
|
|
3521
2830
|
}
|
|
3522
|
-
const
|
|
3523
|
-
|
|
3524
|
-
|
|
3525
|
-
|
|
3526
|
-
|
|
3527
|
-
|
|
3528
|
-
runtime,
|
|
3529
|
-
modelType: ModelType3.TEXT_SMALL
|
|
3530
|
-
});
|
|
3531
|
-
} else {
|
|
3532
|
-
textResponse = await localAIManager.generateText({
|
|
3533
|
-
prompt: jsonPrompt,
|
|
3534
|
-
stopSequences: params.stopSequences,
|
|
3535
|
-
runtime,
|
|
3536
|
-
modelType: ModelType3.TEXT_SMALL
|
|
3537
|
-
});
|
|
3538
|
-
}
|
|
2831
|
+
const textResponse = await localAIManager.generateText({
|
|
2832
|
+
prompt: jsonPrompt,
|
|
2833
|
+
stopSequences: params.stopSequences,
|
|
2834
|
+
runtime,
|
|
2835
|
+
modelType: ModelType.TEXT_SMALL
|
|
2836
|
+
});
|
|
3539
2837
|
try {
|
|
3540
2838
|
const extractJSON = (text) => {
|
|
3541
2839
|
const jsonBlockRegex = /```(?:json)?\s*([\s\S]*?)\s*```/;
|
|
@@ -3551,17 +2849,22 @@ var localAIPlugin = {
|
|
|
3551
2849
|
return text.trim();
|
|
3552
2850
|
};
|
|
3553
2851
|
const extractedJsonText = extractJSON(textResponse);
|
|
3554
|
-
|
|
2852
|
+
logger8.debug("Extracted JSON text:", extractedJsonText);
|
|
3555
2853
|
let jsonObject;
|
|
3556
2854
|
try {
|
|
3557
2855
|
jsonObject = JSON.parse(extractedJsonText);
|
|
3558
2856
|
} catch (parseError) {
|
|
3559
|
-
|
|
3560
|
-
|
|
2857
|
+
logger8.debug(
|
|
2858
|
+
"Initial JSON parse failed, attempting to fix common issues"
|
|
2859
|
+
);
|
|
2860
|
+
const fixedJson = extractedJsonText.replace(/:\s*"([^"]*)(?:\n)([^"]*)"/g, ': "$1\\n$2"').replace(
|
|
2861
|
+
/"([^"]*?)[^a-zA-Z0-9\s\.,;:\-_\(\)"'\[\]{}]([^"]*?)"/g,
|
|
2862
|
+
'"$1$2"'
|
|
2863
|
+
).replace(/(\s*)(\w+)(\s*):/g, '$1"$2"$3:').replace(/,(\s*[\]}])/g, "$1");
|
|
3561
2864
|
try {
|
|
3562
2865
|
jsonObject = JSON.parse(fixedJson);
|
|
3563
2866
|
} catch (finalError) {
|
|
3564
|
-
|
|
2867
|
+
logger8.error("Failed to parse JSON after fixing:", finalError);
|
|
3565
2868
|
throw new Error("Invalid JSON returned from model");
|
|
3566
2869
|
}
|
|
3567
2870
|
}
|
|
@@ -3573,23 +2876,24 @@ var localAIPlugin = {
|
|
|
3573
2876
|
}
|
|
3574
2877
|
}
|
|
3575
2878
|
} catch (schemaError) {
|
|
3576
|
-
|
|
2879
|
+
logger8.error("Schema validation failed:", schemaError);
|
|
3577
2880
|
}
|
|
3578
2881
|
}
|
|
3579
2882
|
return jsonObject;
|
|
3580
2883
|
} catch (parseError) {
|
|
3581
|
-
|
|
3582
|
-
|
|
2884
|
+
logger8.error("Failed to parse JSON:", parseError);
|
|
2885
|
+
logger8.error("Raw response:", textResponse);
|
|
3583
2886
|
throw new Error("Invalid JSON returned from model");
|
|
3584
2887
|
}
|
|
3585
2888
|
} catch (error) {
|
|
3586
|
-
|
|
2889
|
+
logger8.error("Error in OBJECT_SMALL handler:", error);
|
|
3587
2890
|
throw error;
|
|
3588
2891
|
}
|
|
3589
2892
|
},
|
|
3590
|
-
[
|
|
2893
|
+
[ModelType.OBJECT_LARGE]: async (runtime, params) => {
|
|
3591
2894
|
try {
|
|
3592
|
-
|
|
2895
|
+
await localAIManager.initializeEnvironment();
|
|
2896
|
+
logger8.info("OBJECT_LARGE handler - Processing request:", {
|
|
3593
2897
|
prompt: params.prompt,
|
|
3594
2898
|
hasSchema: !!params.schema,
|
|
3595
2899
|
temperature: params.temperature
|
|
@@ -3598,23 +2902,12 @@ var localAIPlugin = {
|
|
|
3598
2902
|
if (!jsonPrompt.includes("```json") && !jsonPrompt.includes("respond with valid JSON")) {
|
|
3599
2903
|
jsonPrompt += "\nPlease respond with valid JSON only, without any explanations, markdown formatting, or additional text.";
|
|
3600
2904
|
}
|
|
3601
|
-
const
|
|
3602
|
-
|
|
3603
|
-
|
|
3604
|
-
|
|
3605
|
-
|
|
3606
|
-
|
|
3607
|
-
runtime,
|
|
3608
|
-
modelType: ModelType3.TEXT_LARGE
|
|
3609
|
-
});
|
|
3610
|
-
} else {
|
|
3611
|
-
textResponse = await localAIManager.generateText({
|
|
3612
|
-
prompt: jsonPrompt,
|
|
3613
|
-
stopSequences: params.stopSequences,
|
|
3614
|
-
runtime,
|
|
3615
|
-
modelType: ModelType3.TEXT_LARGE
|
|
3616
|
-
});
|
|
3617
|
-
}
|
|
2905
|
+
const textResponse = await localAIManager.generateText({
|
|
2906
|
+
prompt: jsonPrompt,
|
|
2907
|
+
stopSequences: params.stopSequences,
|
|
2908
|
+
runtime,
|
|
2909
|
+
modelType: ModelType.TEXT_LARGE
|
|
2910
|
+
});
|
|
3618
2911
|
try {
|
|
3619
2912
|
const extractJSON = (text) => {
|
|
3620
2913
|
const jsonBlockRegex = /```(?:json)?\s*([\s\S]*?)\s*```/;
|
|
@@ -3634,17 +2927,22 @@ var localAIPlugin = {
|
|
|
3634
2927
|
};
|
|
3635
2928
|
const extractedJsonText = extractJSON(textResponse);
|
|
3636
2929
|
const cleanedJsonText = cleanupJSON(extractedJsonText);
|
|
3637
|
-
|
|
2930
|
+
logger8.debug("Extracted JSON text:", cleanedJsonText);
|
|
3638
2931
|
let jsonObject;
|
|
3639
2932
|
try {
|
|
3640
2933
|
jsonObject = JSON.parse(cleanedJsonText);
|
|
3641
2934
|
} catch (parseError) {
|
|
3642
|
-
|
|
3643
|
-
|
|
2935
|
+
logger8.debug(
|
|
2936
|
+
"Initial JSON parse failed, attempting to fix common issues"
|
|
2937
|
+
);
|
|
2938
|
+
const fixedJson = cleanedJsonText.replace(/:\s*"([^"]*)(?:\n)([^"]*)"/g, ': "$1\\n$2"').replace(
|
|
2939
|
+
/"([^"]*?)[^a-zA-Z0-9\s\.,;:\-_\(\)"'\[\]{}]([^"]*?)"/g,
|
|
2940
|
+
'"$1$2"'
|
|
2941
|
+
).replace(/(\s*)(\w+)(\s*):/g, '$1"$2"$3:').replace(/,(\s*[\]}])/g, "$1");
|
|
3644
2942
|
try {
|
|
3645
2943
|
jsonObject = JSON.parse(fixedJson);
|
|
3646
2944
|
} catch (finalError) {
|
|
3647
|
-
|
|
2945
|
+
logger8.error("Failed to parse JSON after fixing:", finalError);
|
|
3648
2946
|
throw new Error("Invalid JSON returned from model");
|
|
3649
2947
|
}
|
|
3650
2948
|
}
|
|
@@ -3656,43 +2954,43 @@ var localAIPlugin = {
|
|
|
3656
2954
|
}
|
|
3657
2955
|
}
|
|
3658
2956
|
} catch (schemaError) {
|
|
3659
|
-
|
|
2957
|
+
logger8.error("Schema validation failed:", schemaError);
|
|
3660
2958
|
}
|
|
3661
2959
|
}
|
|
3662
2960
|
return jsonObject;
|
|
3663
2961
|
} catch (parseError) {
|
|
3664
|
-
|
|
3665
|
-
|
|
2962
|
+
logger8.error("Failed to parse JSON:", parseError);
|
|
2963
|
+
logger8.error("Raw response:", textResponse);
|
|
3666
2964
|
throw new Error("Invalid JSON returned from model");
|
|
3667
2965
|
}
|
|
3668
2966
|
} catch (error) {
|
|
3669
|
-
|
|
2967
|
+
logger8.error("Error in OBJECT_LARGE handler:", error);
|
|
3670
2968
|
throw error;
|
|
3671
2969
|
}
|
|
3672
2970
|
},
|
|
3673
|
-
[
|
|
2971
|
+
[ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { text }) => {
|
|
3674
2972
|
try {
|
|
3675
2973
|
const manager = localAIManager.getTokenizerManager();
|
|
3676
2974
|
const config = localAIManager.getActiveModelConfig();
|
|
3677
2975
|
return await manager.encode(text, config);
|
|
3678
2976
|
} catch (error) {
|
|
3679
|
-
|
|
2977
|
+
logger8.error("Error in TEXT_TOKENIZER_ENCODE handler:", error);
|
|
3680
2978
|
throw error;
|
|
3681
2979
|
}
|
|
3682
2980
|
},
|
|
3683
|
-
[
|
|
2981
|
+
[ModelType.TEXT_TOKENIZER_DECODE]: async (_runtime, { tokens }) => {
|
|
3684
2982
|
try {
|
|
3685
2983
|
const manager = localAIManager.getTokenizerManager();
|
|
3686
2984
|
const config = localAIManager.getActiveModelConfig();
|
|
3687
2985
|
return await manager.decode(tokens, config);
|
|
3688
2986
|
} catch (error) {
|
|
3689
|
-
|
|
2987
|
+
logger8.error("Error in TEXT_TOKENIZER_DECODE handler:", error);
|
|
3690
2988
|
throw error;
|
|
3691
2989
|
}
|
|
3692
2990
|
},
|
|
3693
|
-
[
|
|
2991
|
+
[ModelType.IMAGE_DESCRIPTION]: async (_runtime, imageUrl) => {
|
|
3694
2992
|
try {
|
|
3695
|
-
|
|
2993
|
+
logger8.info("Processing image from URL:", imageUrl);
|
|
3696
2994
|
const response = await fetch(imageUrl);
|
|
3697
2995
|
if (!response.ok) {
|
|
3698
2996
|
throw new Error(`Failed to fetch image: ${response.statusText}`);
|
|
@@ -3701,32 +2999,32 @@ var localAIPlugin = {
|
|
|
3701
2999
|
const mimeType = response.headers.get("content-type") || "image/jpeg";
|
|
3702
3000
|
return await localAIManager.describeImage(buffer, mimeType);
|
|
3703
3001
|
} catch (error) {
|
|
3704
|
-
|
|
3002
|
+
logger8.error("Error in IMAGE_DESCRIPTION handler:", {
|
|
3705
3003
|
error: error instanceof Error ? error.message : String(error),
|
|
3706
3004
|
imageUrl
|
|
3707
3005
|
});
|
|
3708
3006
|
throw error;
|
|
3709
3007
|
}
|
|
3710
3008
|
},
|
|
3711
|
-
[
|
|
3009
|
+
[ModelType.TRANSCRIPTION]: async (_runtime, audioBuffer) => {
|
|
3712
3010
|
try {
|
|
3713
|
-
|
|
3011
|
+
logger8.info("Processing audio transcription:", {
|
|
3714
3012
|
bufferSize: audioBuffer.length
|
|
3715
3013
|
});
|
|
3716
3014
|
return await localAIManager.transcribeAudio(audioBuffer);
|
|
3717
3015
|
} catch (error) {
|
|
3718
|
-
|
|
3016
|
+
logger8.error("Error in TRANSCRIPTION handler:", {
|
|
3719
3017
|
error: error instanceof Error ? error.message : String(error),
|
|
3720
3018
|
bufferSize: audioBuffer.length
|
|
3721
3019
|
});
|
|
3722
3020
|
throw error;
|
|
3723
3021
|
}
|
|
3724
3022
|
},
|
|
3725
|
-
[
|
|
3023
|
+
[ModelType.TEXT_TO_SPEECH]: async (_runtime, text) => {
|
|
3726
3024
|
try {
|
|
3727
3025
|
return await localAIManager.generateSpeech(text);
|
|
3728
3026
|
} catch (error) {
|
|
3729
|
-
|
|
3027
|
+
logger8.error("Error in TEXT_TO_SPEECH handler:", {
|
|
3730
3028
|
error: error instanceof Error ? error.message : String(error),
|
|
3731
3029
|
textLength: text.length
|
|
3732
3030
|
});
|
|
@@ -3742,21 +3040,21 @@ var localAIPlugin = {
|
|
|
3742
3040
|
name: "local_ai_test_initialization",
|
|
3743
3041
|
fn: async (runtime) => {
|
|
3744
3042
|
try {
|
|
3745
|
-
|
|
3746
|
-
const result = await runtime.useModel(
|
|
3043
|
+
logger8.info("Starting initialization test");
|
|
3044
|
+
const result = await runtime.useModel(ModelType.TEXT_SMALL, {
|
|
3747
3045
|
prompt: "Debug Mode: Test initialization. Respond with 'Initialization successful' if you can read this.",
|
|
3748
3046
|
stopSequences: []
|
|
3749
3047
|
});
|
|
3750
|
-
|
|
3048
|
+
logger8.info("Model response:", result);
|
|
3751
3049
|
if (!result || typeof result !== "string") {
|
|
3752
3050
|
throw new Error("Invalid response from model");
|
|
3753
3051
|
}
|
|
3754
3052
|
if (!result.includes("successful")) {
|
|
3755
3053
|
throw new Error("Model response does not indicate success");
|
|
3756
3054
|
}
|
|
3757
|
-
|
|
3055
|
+
logger8.success("Initialization test completed successfully");
|
|
3758
3056
|
} catch (error) {
|
|
3759
|
-
|
|
3057
|
+
logger8.error("Initialization test failed:", {
|
|
3760
3058
|
error: error instanceof Error ? error.message : String(error),
|
|
3761
3059
|
stack: error instanceof Error ? error.stack : void 0
|
|
3762
3060
|
});
|
|
@@ -3768,21 +3066,21 @@ var localAIPlugin = {
|
|
|
3768
3066
|
name: "local_ai_test_text_large",
|
|
3769
3067
|
fn: async (runtime) => {
|
|
3770
3068
|
try {
|
|
3771
|
-
|
|
3772
|
-
const result = await runtime.useModel(
|
|
3069
|
+
logger8.info("Starting TEXT_LARGE model test");
|
|
3070
|
+
const result = await runtime.useModel(ModelType.TEXT_LARGE, {
|
|
3773
3071
|
prompt: "Debug Mode: Generate a one-sentence response about artificial intelligence.",
|
|
3774
3072
|
stopSequences: []
|
|
3775
3073
|
});
|
|
3776
|
-
|
|
3074
|
+
logger8.info("Large model response:", result);
|
|
3777
3075
|
if (!result || typeof result !== "string") {
|
|
3778
3076
|
throw new Error("Invalid response from large model");
|
|
3779
3077
|
}
|
|
3780
3078
|
if (result.length < 10) {
|
|
3781
3079
|
throw new Error("Response too short, possible model failure");
|
|
3782
3080
|
}
|
|
3783
|
-
|
|
3081
|
+
logger8.success("TEXT_LARGE test completed successfully");
|
|
3784
3082
|
} catch (error) {
|
|
3785
|
-
|
|
3083
|
+
logger8.error("TEXT_LARGE test failed:", {
|
|
3786
3084
|
error: error instanceof Error ? error.message : String(error),
|
|
3787
3085
|
stack: error instanceof Error ? error.stack : void 0
|
|
3788
3086
|
});
|
|
@@ -3794,11 +3092,17 @@ var localAIPlugin = {
|
|
|
3794
3092
|
name: "local_ai_test_text_embedding",
|
|
3795
3093
|
fn: async (runtime) => {
|
|
3796
3094
|
try {
|
|
3797
|
-
|
|
3798
|
-
const embedding = await runtime.useModel(
|
|
3799
|
-
|
|
3800
|
-
|
|
3801
|
-
|
|
3095
|
+
logger8.info("Starting TEXT_EMBEDDING test");
|
|
3096
|
+
const embedding = await runtime.useModel(
|
|
3097
|
+
ModelType.TEXT_EMBEDDING,
|
|
3098
|
+
{
|
|
3099
|
+
text: "This is a test of the text embedding model."
|
|
3100
|
+
}
|
|
3101
|
+
);
|
|
3102
|
+
logger8.info(
|
|
3103
|
+
"Embedding generated with dimensions:",
|
|
3104
|
+
embedding.length
|
|
3105
|
+
);
|
|
3802
3106
|
if (!Array.isArray(embedding)) {
|
|
3803
3107
|
throw new Error("Embedding is not an array");
|
|
3804
3108
|
}
|
|
@@ -3808,13 +3112,16 @@ var localAIPlugin = {
|
|
|
3808
3112
|
if (embedding.some((val) => typeof val !== "number")) {
|
|
3809
3113
|
throw new Error("Embedding contains non-numeric values");
|
|
3810
3114
|
}
|
|
3811
|
-
const nullEmbedding = await runtime.useModel(
|
|
3115
|
+
const nullEmbedding = await runtime.useModel(
|
|
3116
|
+
ModelType.TEXT_EMBEDDING,
|
|
3117
|
+
null
|
|
3118
|
+
);
|
|
3812
3119
|
if (!Array.isArray(nullEmbedding) || nullEmbedding.some((val) => val !== 0)) {
|
|
3813
3120
|
throw new Error("Null input did not return zero vector");
|
|
3814
3121
|
}
|
|
3815
|
-
|
|
3122
|
+
logger8.success("TEXT_EMBEDDING test completed successfully");
|
|
3816
3123
|
} catch (error) {
|
|
3817
|
-
|
|
3124
|
+
logger8.error("TEXT_EMBEDDING test failed:", {
|
|
3818
3125
|
error: error instanceof Error ? error.message : String(error),
|
|
3819
3126
|
stack: error instanceof Error ? error.stack : void 0
|
|
3820
3127
|
});
|
|
@@ -3826,10 +3133,13 @@ var localAIPlugin = {
|
|
|
3826
3133
|
name: "local_ai_test_tokenizer_encode",
|
|
3827
3134
|
fn: async (runtime) => {
|
|
3828
3135
|
try {
|
|
3829
|
-
|
|
3136
|
+
logger8.info("Starting TEXT_TOKENIZER_ENCODE test");
|
|
3830
3137
|
const text = "Hello tokenizer test!";
|
|
3831
|
-
const tokens = await runtime.useModel(
|
|
3832
|
-
|
|
3138
|
+
const tokens = await runtime.useModel(
|
|
3139
|
+
ModelType.TEXT_TOKENIZER_ENCODE,
|
|
3140
|
+
{ text }
|
|
3141
|
+
);
|
|
3142
|
+
logger8.info("Encoded tokens:", { count: tokens.length });
|
|
3833
3143
|
if (!Array.isArray(tokens)) {
|
|
3834
3144
|
throw new Error("Tokens output is not an array");
|
|
3835
3145
|
}
|
|
@@ -3839,9 +3149,11 @@ var localAIPlugin = {
|
|
|
3839
3149
|
if (tokens.some((token) => !Number.isInteger(token))) {
|
|
3840
3150
|
throw new Error("Tokens contain non-integer values");
|
|
3841
3151
|
}
|
|
3842
|
-
|
|
3152
|
+
logger8.success(
|
|
3153
|
+
"TEXT_TOKENIZER_ENCODE test completed successfully"
|
|
3154
|
+
);
|
|
3843
3155
|
} catch (error) {
|
|
3844
|
-
|
|
3156
|
+
logger8.error("TEXT_TOKENIZER_ENCODE test failed:", {
|
|
3845
3157
|
error: error instanceof Error ? error.message : String(error),
|
|
3846
3158
|
stack: error instanceof Error ? error.stack : void 0
|
|
3847
3159
|
});
|
|
@@ -3853,24 +3165,32 @@ var localAIPlugin = {
|
|
|
3853
3165
|
name: "local_ai_test_tokenizer_decode",
|
|
3854
3166
|
fn: async (runtime) => {
|
|
3855
3167
|
try {
|
|
3856
|
-
|
|
3168
|
+
logger8.info("Starting TEXT_TOKENIZER_DECODE test");
|
|
3857
3169
|
const originalText = "Hello tokenizer test!";
|
|
3858
|
-
const tokens = await runtime.useModel(
|
|
3859
|
-
|
|
3860
|
-
|
|
3861
|
-
|
|
3862
|
-
|
|
3863
|
-
|
|
3864
|
-
|
|
3170
|
+
const tokens = await runtime.useModel(
|
|
3171
|
+
ModelType.TEXT_TOKENIZER_ENCODE,
|
|
3172
|
+
{
|
|
3173
|
+
text: originalText
|
|
3174
|
+
}
|
|
3175
|
+
);
|
|
3176
|
+
const decodedText = await runtime.useModel(
|
|
3177
|
+
ModelType.TEXT_TOKENIZER_DECODE,
|
|
3178
|
+
{
|
|
3179
|
+
tokens
|
|
3180
|
+
}
|
|
3181
|
+
);
|
|
3182
|
+
logger8.info("Round trip tokenization:", {
|
|
3865
3183
|
original: originalText,
|
|
3866
3184
|
decoded: decodedText
|
|
3867
3185
|
});
|
|
3868
3186
|
if (typeof decodedText !== "string") {
|
|
3869
3187
|
throw new Error("Decoded output is not a string");
|
|
3870
3188
|
}
|
|
3871
|
-
|
|
3189
|
+
logger8.success(
|
|
3190
|
+
"TEXT_TOKENIZER_DECODE test completed successfully"
|
|
3191
|
+
);
|
|
3872
3192
|
} catch (error) {
|
|
3873
|
-
|
|
3193
|
+
logger8.error("TEXT_TOKENIZER_DECODE test failed:", {
|
|
3874
3194
|
error: error instanceof Error ? error.message : String(error),
|
|
3875
3195
|
stack: error instanceof Error ? error.stack : void 0
|
|
3876
3196
|
});
|
|
@@ -3882,10 +3202,13 @@ var localAIPlugin = {
|
|
|
3882
3202
|
name: "local_ai_test_image_description",
|
|
3883
3203
|
fn: async (runtime) => {
|
|
3884
3204
|
try {
|
|
3885
|
-
|
|
3205
|
+
logger8.info("Starting IMAGE_DESCRIPTION test");
|
|
3886
3206
|
const imageUrl = "https://raw.githubusercontent.com/microsoft/FLAML/main/website/static/img/flaml.png";
|
|
3887
|
-
const result = await runtime.useModel(
|
|
3888
|
-
|
|
3207
|
+
const result = await runtime.useModel(
|
|
3208
|
+
ModelType.IMAGE_DESCRIPTION,
|
|
3209
|
+
imageUrl
|
|
3210
|
+
);
|
|
3211
|
+
logger8.info("Image description result:", result);
|
|
3889
3212
|
if (!result || typeof result !== "object") {
|
|
3890
3213
|
throw new Error("Invalid response format");
|
|
3891
3214
|
}
|
|
@@ -3895,9 +3218,9 @@ var localAIPlugin = {
|
|
|
3895
3218
|
if (typeof result.title !== "string" || typeof result.description !== "string") {
|
|
3896
3219
|
throw new Error("Title or description is not a string");
|
|
3897
3220
|
}
|
|
3898
|
-
|
|
3221
|
+
logger8.success("IMAGE_DESCRIPTION test completed successfully");
|
|
3899
3222
|
} catch (error) {
|
|
3900
|
-
|
|
3223
|
+
logger8.error("IMAGE_DESCRIPTION test failed:", {
|
|
3901
3224
|
error: error instanceof Error ? error.message : String(error),
|
|
3902
3225
|
stack: error instanceof Error ? error.stack : void 0
|
|
3903
3226
|
});
|
|
@@ -3909,7 +3232,7 @@ var localAIPlugin = {
|
|
|
3909
3232
|
name: "local_ai_test_transcription",
|
|
3910
3233
|
fn: async (runtime) => {
|
|
3911
3234
|
try {
|
|
3912
|
-
|
|
3235
|
+
logger8.info("Starting TRANSCRIPTION test");
|
|
3913
3236
|
const audioData = new Uint8Array([
|
|
3914
3237
|
82,
|
|
3915
3238
|
73,
|
|
@@ -3933,14 +3256,17 @@ var localAIPlugin = {
|
|
|
3933
3256
|
// "fmt "
|
|
3934
3257
|
]);
|
|
3935
3258
|
const audioBuffer = Buffer.from(audioData);
|
|
3936
|
-
const transcription = await runtime.useModel(
|
|
3937
|
-
|
|
3259
|
+
const transcription = await runtime.useModel(
|
|
3260
|
+
ModelType.TRANSCRIPTION,
|
|
3261
|
+
audioBuffer
|
|
3262
|
+
);
|
|
3263
|
+
logger8.info("Transcription result:", transcription);
|
|
3938
3264
|
if (typeof transcription !== "string") {
|
|
3939
3265
|
throw new Error("Transcription result is not a string");
|
|
3940
3266
|
}
|
|
3941
|
-
|
|
3267
|
+
logger8.success("TRANSCRIPTION test completed successfully");
|
|
3942
3268
|
} catch (error) {
|
|
3943
|
-
|
|
3269
|
+
logger8.error("TRANSCRIPTION test failed:", {
|
|
3944
3270
|
error: error instanceof Error ? error.message : String(error),
|
|
3945
3271
|
stack: error instanceof Error ? error.stack : void 0
|
|
3946
3272
|
});
|
|
@@ -3952,9 +3278,12 @@ var localAIPlugin = {
|
|
|
3952
3278
|
name: "local_ai_test_text_to_speech",
|
|
3953
3279
|
fn: async (runtime) => {
|
|
3954
3280
|
try {
|
|
3955
|
-
|
|
3281
|
+
logger8.info("Starting TEXT_TO_SPEECH test");
|
|
3956
3282
|
const testText = "This is a test of the text to speech system.";
|
|
3957
|
-
const audioStream = await runtime.useModel(
|
|
3283
|
+
const audioStream = await runtime.useModel(
|
|
3284
|
+
ModelType.TEXT_TO_SPEECH,
|
|
3285
|
+
testText
|
|
3286
|
+
);
|
|
3958
3287
|
if (!(audioStream instanceof Readable2)) {
|
|
3959
3288
|
throw new Error("TTS output is not a readable stream");
|
|
3960
3289
|
}
|
|
@@ -3972,9 +3301,9 @@ var localAIPlugin = {
|
|
|
3972
3301
|
});
|
|
3973
3302
|
audioStream.on("error", reject);
|
|
3974
3303
|
});
|
|
3975
|
-
|
|
3304
|
+
logger8.success("TEXT_TO_SPEECH test completed successfully");
|
|
3976
3305
|
} catch (error) {
|
|
3977
|
-
|
|
3306
|
+
logger8.error("TEXT_TO_SPEECH test failed:", {
|
|
3978
3307
|
error: error instanceof Error ? error.message : String(error),
|
|
3979
3308
|
stack: error instanceof Error ? error.stack : void 0
|
|
3980
3309
|
});
|
|
@@ -3986,9 +3315,9 @@ var localAIPlugin = {
|
|
|
3986
3315
|
}
|
|
3987
3316
|
]
|
|
3988
3317
|
};
|
|
3989
|
-
var index_default =
|
|
3318
|
+
var index_default = localAiPlugin;
|
|
3990
3319
|
export {
|
|
3991
3320
|
index_default as default,
|
|
3992
|
-
|
|
3321
|
+
localAiPlugin
|
|
3993
3322
|
};
|
|
3994
3323
|
//# sourceMappingURL=index.js.map
|