@elizaos/plugin-local-ai 1.0.0-beta.4 → 1.0.0-beta.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +0 -39
- package/dist/index.js +419 -767
- package/dist/index.js.map +1 -1
- package/package.json +3 -4
package/dist/index.js
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
// src/index.ts
|
|
2
2
|
import fs5 from "node:fs";
|
|
3
|
+
import os3 from "node:os";
|
|
3
4
|
import path5 from "node:path";
|
|
4
5
|
import { Readable as Readable2 } from "node:stream";
|
|
5
6
|
import { fileURLToPath } from "node:url";
|
|
6
|
-
import { ModelType as
|
|
7
|
-
import { EmbeddingModel, FlagEmbedding } from "fastembed";
|
|
7
|
+
import { ModelType as ModelType2, logger as logger9 } from "@elizaos/core";
|
|
8
8
|
import {
|
|
9
9
|
LlamaChatSession,
|
|
10
10
|
getLlama as getLlama2
|
|
@@ -16,34 +16,21 @@ import { z } from "zod";
|
|
|
16
16
|
var configSchema = z.object({
|
|
17
17
|
USE_LOCAL_AI: z.boolean().default(true),
|
|
18
18
|
USE_STUDIOLM_TEXT_MODELS: z.boolean().default(false),
|
|
19
|
-
USE_OLLAMA_TEXT_MODELS: z.boolean().default(false),
|
|
20
|
-
// Ollama Configuration
|
|
21
|
-
OLLAMA_SERVER_URL: z.string().default("http://localhost:11434"),
|
|
22
|
-
OLLAMA_MODEL: z.string().default("deepseek-r1-distill-qwen-7b"),
|
|
23
|
-
USE_OLLAMA_EMBEDDING: z.boolean().default(false),
|
|
24
|
-
OLLAMA_EMBEDDING_MODEL: z.string().default(""),
|
|
25
|
-
SMALL_OLLAMA_MODEL: z.string().default("deepseek-r1:1.5b"),
|
|
26
|
-
MEDIUM_OLLAMA_MODEL: z.string().default("deepseek-r1:7b"),
|
|
27
|
-
LARGE_OLLAMA_MODEL: z.string().default("deepseek-r1:7b"),
|
|
28
19
|
// StudioLM Configuration
|
|
29
20
|
STUDIOLM_SERVER_URL: z.string().default("http://localhost:1234"),
|
|
30
21
|
STUDIOLM_SMALL_MODEL: z.string().default("lmstudio-community/deepseek-r1-distill-qwen-1.5b"),
|
|
31
22
|
STUDIOLM_MEDIUM_MODEL: z.string().default("deepseek-r1-distill-qwen-7b"),
|
|
32
|
-
STUDIOLM_EMBEDDING_MODEL: z.
|
|
23
|
+
STUDIOLM_EMBEDDING_MODEL: z.string().default("BAAI/bge-small-en-v1.5")
|
|
33
24
|
});
|
|
34
25
|
function validateModelConfig(config) {
|
|
35
26
|
logger.info("Validating model configuration with values:", {
|
|
36
27
|
USE_LOCAL_AI: config.USE_LOCAL_AI,
|
|
37
|
-
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS
|
|
38
|
-
USE_OLLAMA_TEXT_MODELS: config.USE_OLLAMA_TEXT_MODELS
|
|
28
|
+
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS
|
|
39
29
|
});
|
|
40
30
|
if (!config.USE_LOCAL_AI) {
|
|
41
31
|
config.USE_LOCAL_AI = true;
|
|
42
32
|
logger.info("Setting USE_LOCAL_AI to true as it's required");
|
|
43
33
|
}
|
|
44
|
-
if (config.USE_STUDIOLM_TEXT_MODELS && config.USE_OLLAMA_TEXT_MODELS) {
|
|
45
|
-
throw new Error("StudioLM and Ollama text models cannot be enabled simultaneously");
|
|
46
|
-
}
|
|
47
34
|
logger.info("Configuration is valid");
|
|
48
35
|
}
|
|
49
36
|
async function validateConfig(config) {
|
|
@@ -51,23 +38,15 @@ async function validateConfig(config) {
|
|
|
51
38
|
const booleanConfig = {
|
|
52
39
|
USE_LOCAL_AI: true,
|
|
53
40
|
// Always true
|
|
54
|
-
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS === "true"
|
|
55
|
-
USE_OLLAMA_TEXT_MODELS: config.USE_OLLAMA_TEXT_MODELS === "true",
|
|
56
|
-
USE_OLLAMA_EMBEDDING: config.USE_OLLAMA_EMBEDDING === "true"
|
|
41
|
+
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS === "true"
|
|
57
42
|
};
|
|
58
43
|
validateModelConfig(booleanConfig);
|
|
59
44
|
const fullConfig = {
|
|
60
45
|
...booleanConfig,
|
|
61
|
-
OLLAMA_SERVER_URL: config.OLLAMA_SERVER_URL || "http://localhost:11434",
|
|
62
|
-
OLLAMA_MODEL: config.OLLAMA_MODEL || "deepseek-r1-distill-qwen-7b",
|
|
63
|
-
OLLAMA_EMBEDDING_MODEL: config.OLLAMA_EMBEDDING_MODEL || "",
|
|
64
|
-
SMALL_OLLAMA_MODEL: config.SMALL_OLLAMA_MODEL || "deepseek-r1:1.5b",
|
|
65
|
-
MEDIUM_OLLAMA_MODEL: config.MEDIUM_OLLAMA_MODEL || "deepseek-r1:7b",
|
|
66
|
-
LARGE_OLLAMA_MODEL: config.LARGE_OLLAMA_MODEL || "deepseek-r1:7b",
|
|
67
46
|
STUDIOLM_SERVER_URL: config.STUDIOLM_SERVER_URL || "http://localhost:1234",
|
|
68
47
|
STUDIOLM_SMALL_MODEL: config.STUDIOLM_SMALL_MODEL || "lmstudio-community/deepseek-r1-distill-qwen-1.5b",
|
|
69
48
|
STUDIOLM_MEDIUM_MODEL: config.STUDIOLM_MEDIUM_MODEL || "deepseek-r1-distill-qwen-7b",
|
|
70
|
-
STUDIOLM_EMBEDDING_MODEL: config.STUDIOLM_EMBEDDING_MODEL ||
|
|
49
|
+
STUDIOLM_EMBEDDING_MODEL: config.STUDIOLM_EMBEDDING_MODEL || "BAAI/bge-small-en-v1.5"
|
|
71
50
|
};
|
|
72
51
|
const validatedConfig = configSchema.parse(fullConfig);
|
|
73
52
|
return validatedConfig;
|
|
@@ -110,6 +89,18 @@ var MODEL_SPECS = {
|
|
|
110
89
|
type: "llama"
|
|
111
90
|
}
|
|
112
91
|
},
|
|
92
|
+
embedding: {
|
|
93
|
+
name: "bge-small-en-v1.5.Q4_K_M.gguf",
|
|
94
|
+
repo: "ChristianAzinn/bge-small-en-v1.5-gguf",
|
|
95
|
+
size: "133 MB",
|
|
96
|
+
quantization: "Q4_K_M",
|
|
97
|
+
contextSize: 512,
|
|
98
|
+
dimensions: 384,
|
|
99
|
+
tokenizer: {
|
|
100
|
+
name: "ChristianAzinn/bge-small-en-v1.5-gguf",
|
|
101
|
+
type: "llama"
|
|
102
|
+
}
|
|
103
|
+
},
|
|
113
104
|
vision: {
|
|
114
105
|
name: "Florence-2-base-ft",
|
|
115
106
|
repo: "onnx-community/Florence-2-base-ft",
|
|
@@ -598,288 +589,11 @@ var DownloadManager = class _DownloadManager {
|
|
|
598
589
|
}
|
|
599
590
|
};
|
|
600
591
|
|
|
601
|
-
// src/utils/ollamaManager.ts
|
|
602
|
-
import { ModelType, logger as logger3 } from "@elizaos/core";
|
|
603
|
-
var OllamaManager = class _OllamaManager {
|
|
604
|
-
static instance = null;
|
|
605
|
-
serverUrl;
|
|
606
|
-
initialized = false;
|
|
607
|
-
availableModels = [];
|
|
608
|
-
configuredModels = {
|
|
609
|
-
small: process.env.SMALL_OLLAMA_MODEL || "deepseek-r1:1.5b",
|
|
610
|
-
medium: process.env.MEDIUM_OLLAMA_MODEL || "deepseek-r1:7b"
|
|
611
|
-
};
|
|
612
|
-
/**
|
|
613
|
-
* Private constructor for initializing OllamaManager.
|
|
614
|
-
*/
|
|
615
|
-
constructor() {
|
|
616
|
-
this.serverUrl = process.env.OLLAMA_SERVER_URL || "http://localhost:11434";
|
|
617
|
-
logger3.info("OllamaManager initialized with configuration:", {
|
|
618
|
-
serverUrl: this.serverUrl,
|
|
619
|
-
configuredModels: this.configuredModels,
|
|
620
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
621
|
-
});
|
|
622
|
-
}
|
|
623
|
-
/**
|
|
624
|
-
* Returns an instance of the OllamaManager class.
|
|
625
|
-
* If an instance does not already exist, a new instance is created and returned.
|
|
626
|
-
* @returns {OllamaManager} The instance of the OllamaManager class.
|
|
627
|
-
*/
|
|
628
|
-
static getInstance() {
|
|
629
|
-
if (!_OllamaManager.instance) {
|
|
630
|
-
_OllamaManager.instance = new _OllamaManager();
|
|
631
|
-
}
|
|
632
|
-
return _OllamaManager.instance;
|
|
633
|
-
}
|
|
634
|
-
/**
|
|
635
|
-
* Asynchronously checks the status of the server by attempting to fetch the "/api/tags" endpoint.
|
|
636
|
-
* @returns A Promise that resolves to a boolean indicating if the server is reachable and responding with a successful status.
|
|
637
|
-
*/
|
|
638
|
-
async checkServerStatus() {
|
|
639
|
-
try {
|
|
640
|
-
const response = await fetch(`${this.serverUrl}/api/tags`);
|
|
641
|
-
if (!response.ok) {
|
|
642
|
-
throw new Error(`Server responded with status: ${response.status}`);
|
|
643
|
-
}
|
|
644
|
-
return true;
|
|
645
|
-
} catch (error) {
|
|
646
|
-
logger3.error("Ollama server check failed:", {
|
|
647
|
-
error: error instanceof Error ? error.message : String(error),
|
|
648
|
-
serverUrl: this.serverUrl,
|
|
649
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
650
|
-
});
|
|
651
|
-
return false;
|
|
652
|
-
}
|
|
653
|
-
}
|
|
654
|
-
/**
|
|
655
|
-
* Fetches the available Ollama models from the specified server URL.
|
|
656
|
-
*
|
|
657
|
-
* @returns {Promise<void>} A Promise that resolves when the available models are successfully fetched.
|
|
658
|
-
*/
|
|
659
|
-
async fetchAvailableModels() {
|
|
660
|
-
try {
|
|
661
|
-
const response = await fetch(`${this.serverUrl}/api/tags`);
|
|
662
|
-
if (!response.ok) {
|
|
663
|
-
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
664
|
-
}
|
|
665
|
-
const data = await response.json();
|
|
666
|
-
this.availableModels = data.models;
|
|
667
|
-
logger3.info("Ollama available models:", {
|
|
668
|
-
count: this.availableModels.length,
|
|
669
|
-
models: this.availableModels.map((m) => m.name),
|
|
670
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
671
|
-
});
|
|
672
|
-
} catch (error) {
|
|
673
|
-
logger3.error("Failed to fetch Ollama models:", {
|
|
674
|
-
error: error instanceof Error ? error.message : String(error),
|
|
675
|
-
serverUrl: this.serverUrl,
|
|
676
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
677
|
-
});
|
|
678
|
-
throw error;
|
|
679
|
-
}
|
|
680
|
-
}
|
|
681
|
-
/**
|
|
682
|
-
* Asynchronously tests a model specified by the given modelId.
|
|
683
|
-
*
|
|
684
|
-
* @param {string} modelId - The ID of the model to be tested.
|
|
685
|
-
* @returns {Promise<boolean>} - A promise that resolves to true if the model test is successful, false otherwise.
|
|
686
|
-
*/
|
|
687
|
-
async testModel(modelId) {
|
|
688
|
-
try {
|
|
689
|
-
const testRequest = {
|
|
690
|
-
model: modelId,
|
|
691
|
-
prompt: "Debug Mode: Test initialization. Respond with 'Initialization successful' if you can read this.",
|
|
692
|
-
stream: false,
|
|
693
|
-
options: {
|
|
694
|
-
temperature: 0.7,
|
|
695
|
-
num_predict: 100
|
|
696
|
-
}
|
|
697
|
-
};
|
|
698
|
-
logger3.info(`Testing model ${modelId}...`);
|
|
699
|
-
const response = await fetch(`${this.serverUrl}/api/generate`, {
|
|
700
|
-
method: "POST",
|
|
701
|
-
headers: {
|
|
702
|
-
"Content-Type": "application/json"
|
|
703
|
-
},
|
|
704
|
-
body: JSON.stringify(testRequest)
|
|
705
|
-
});
|
|
706
|
-
if (!response.ok) {
|
|
707
|
-
throw new Error(`Model test failed with status: ${response.status}`);
|
|
708
|
-
}
|
|
709
|
-
const result = await response.json();
|
|
710
|
-
if (!result.response) {
|
|
711
|
-
throw new Error("No valid response content received");
|
|
712
|
-
}
|
|
713
|
-
logger3.info(`Model ${modelId} test response:`, {
|
|
714
|
-
content: result.response,
|
|
715
|
-
model: result.model,
|
|
716
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
717
|
-
});
|
|
718
|
-
return true;
|
|
719
|
-
} catch (error) {
|
|
720
|
-
logger3.error(`Model ${modelId} test failed:`, {
|
|
721
|
-
error: error instanceof Error ? error.message : String(error),
|
|
722
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
723
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
724
|
-
});
|
|
725
|
-
return false;
|
|
726
|
-
}
|
|
727
|
-
}
|
|
728
|
-
/**
|
|
729
|
-
* Asynchronously tests the configured text models to ensure they are working properly.
|
|
730
|
-
* Logs the test results for each model and outputs a warning if any models fail the test.
|
|
731
|
-
* @returns {Promise<void>} A Promise that resolves when all configured models have been tested.
|
|
732
|
-
*/
|
|
733
|
-
async testTextModels() {
|
|
734
|
-
logger3.info("Testing configured text models...");
|
|
735
|
-
const results = await Promise.all([
|
|
736
|
-
this.testModel(this.configuredModels.small),
|
|
737
|
-
this.testModel(this.configuredModels.medium)
|
|
738
|
-
]);
|
|
739
|
-
const [smallWorking, mediumWorking] = results;
|
|
740
|
-
if (!smallWorking || !mediumWorking) {
|
|
741
|
-
const failedModels = [];
|
|
742
|
-
if (!smallWorking) failedModels.push("small");
|
|
743
|
-
if (!mediumWorking) failedModels.push("medium");
|
|
744
|
-
logger3.warn("Some models failed the test:", {
|
|
745
|
-
failedModels,
|
|
746
|
-
small: this.configuredModels.small,
|
|
747
|
-
medium: this.configuredModels.medium
|
|
748
|
-
});
|
|
749
|
-
} else {
|
|
750
|
-
logger3.success("All configured models passed the test");
|
|
751
|
-
}
|
|
752
|
-
}
|
|
753
|
-
/**
|
|
754
|
-
* Asynchronously initializes the Ollama service by checking server status,
|
|
755
|
-
* fetching available models, and testing text models.
|
|
756
|
-
*
|
|
757
|
-
* @returns A Promise that resolves when initialization is complete
|
|
758
|
-
*/
|
|
759
|
-
async initialize() {
|
|
760
|
-
try {
|
|
761
|
-
if (this.initialized) {
|
|
762
|
-
logger3.info("Ollama already initialized, skipping initialization");
|
|
763
|
-
return;
|
|
764
|
-
}
|
|
765
|
-
logger3.info("Starting Ollama initialization...");
|
|
766
|
-
const serverAvailable = await this.checkServerStatus();
|
|
767
|
-
if (!serverAvailable) {
|
|
768
|
-
throw new Error("Ollama server is not available");
|
|
769
|
-
}
|
|
770
|
-
await this.fetchAvailableModels();
|
|
771
|
-
await this.testTextModels();
|
|
772
|
-
this.initialized = true;
|
|
773
|
-
logger3.success("Ollama initialization complete");
|
|
774
|
-
} catch (error) {
|
|
775
|
-
logger3.error("Ollama initialization failed:", {
|
|
776
|
-
error: error instanceof Error ? error.message : String(error),
|
|
777
|
-
stack: error instanceof Error ? error.stack : void 0
|
|
778
|
-
});
|
|
779
|
-
throw error;
|
|
780
|
-
}
|
|
781
|
-
}
|
|
782
|
-
/**
|
|
783
|
-
* Retrieves the available Ollama models.
|
|
784
|
-
*
|
|
785
|
-
* @returns {OllamaModel[]} An array of OllamaModel objects representing the available models.
|
|
786
|
-
*/
|
|
787
|
-
getAvailableModels() {
|
|
788
|
-
return this.availableModels;
|
|
789
|
-
}
|
|
790
|
-
/**
|
|
791
|
-
* Check if the object is initialized.
|
|
792
|
-
* @returns {boolean} True if the object is initialized, false otherwise.
|
|
793
|
-
*/
|
|
794
|
-
isInitialized() {
|
|
795
|
-
return this.initialized;
|
|
796
|
-
}
|
|
797
|
-
/**
|
|
798
|
-
* Generates text using the Ollama AI model.
|
|
799
|
-
*
|
|
800
|
-
* @param {GenerateTextParams} params - The parameters for generating text.
|
|
801
|
-
* @param {boolean} [isInitialized=false] - Flag indicating if Ollama is already initialized.
|
|
802
|
-
* @returns {Promise<string>} - A promise that resolves with the generated text.
|
|
803
|
-
*/
|
|
804
|
-
async generateText(params, isInitialized = false) {
|
|
805
|
-
try {
|
|
806
|
-
logger3.info("Ollama generateText entry:", {
|
|
807
|
-
isInitialized,
|
|
808
|
-
currentInitState: this.initialized,
|
|
809
|
-
managerInitState: this.isInitialized(),
|
|
810
|
-
modelType: params.modelType,
|
|
811
|
-
contextLength: params.prompt?.length,
|
|
812
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
813
|
-
});
|
|
814
|
-
if (!this.initialized && !isInitialized) {
|
|
815
|
-
throw new Error("Ollama not initialized. Please initialize before generating text.");
|
|
816
|
-
}
|
|
817
|
-
logger3.info("Ollama preparing request:", {
|
|
818
|
-
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
819
|
-
contextLength: params.prompt.length,
|
|
820
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
821
|
-
});
|
|
822
|
-
const request = {
|
|
823
|
-
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
824
|
-
prompt: params.prompt,
|
|
825
|
-
stream: false,
|
|
826
|
-
options: {
|
|
827
|
-
temperature: 0.7,
|
|
828
|
-
top_p: 0.9,
|
|
829
|
-
num_predict: 8192,
|
|
830
|
-
repeat_penalty: 1.2,
|
|
831
|
-
frequency_penalty: 0.7,
|
|
832
|
-
presence_penalty: 0.7
|
|
833
|
-
}
|
|
834
|
-
};
|
|
835
|
-
const response = await fetch(`${this.serverUrl}/api/generate`, {
|
|
836
|
-
method: "POST",
|
|
837
|
-
headers: {
|
|
838
|
-
"Content-Type": "application/json"
|
|
839
|
-
},
|
|
840
|
-
body: JSON.stringify(request)
|
|
841
|
-
});
|
|
842
|
-
if (!response.ok) {
|
|
843
|
-
throw new Error(`Ollama request failed: ${response.status}`);
|
|
844
|
-
}
|
|
845
|
-
const result = await response.json();
|
|
846
|
-
if (!result.response) {
|
|
847
|
-
throw new Error("No valid response content received from Ollama");
|
|
848
|
-
}
|
|
849
|
-
let responseText = result.response;
|
|
850
|
-
logger3.info("Raw response structure:", {
|
|
851
|
-
responseLength: responseText.length,
|
|
852
|
-
hasAction: responseText.includes("action"),
|
|
853
|
-
hasThinkTag: responseText.includes("<think>")
|
|
854
|
-
});
|
|
855
|
-
if (responseText.includes("<think>")) {
|
|
856
|
-
logger3.info("Cleaning think tags from response");
|
|
857
|
-
responseText = responseText.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
858
|
-
logger3.info("Think tags removed from response");
|
|
859
|
-
}
|
|
860
|
-
logger3.info("Ollama request completed successfully:", {
|
|
861
|
-
responseLength: responseText.length,
|
|
862
|
-
hasThinkTags: responseText.includes("<think>"),
|
|
863
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
864
|
-
});
|
|
865
|
-
return responseText;
|
|
866
|
-
} catch (error) {
|
|
867
|
-
logger3.error("Ollama text generation error:", {
|
|
868
|
-
error: error instanceof Error ? error.message : String(error),
|
|
869
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
870
|
-
phase: "text generation",
|
|
871
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
872
|
-
});
|
|
873
|
-
throw error;
|
|
874
|
-
}
|
|
875
|
-
}
|
|
876
|
-
};
|
|
877
|
-
|
|
878
592
|
// src/utils/platform.ts
|
|
879
593
|
import { exec } from "node:child_process";
|
|
880
594
|
import os from "node:os";
|
|
881
595
|
import { promisify } from "node:util";
|
|
882
|
-
import { logger as
|
|
596
|
+
import { logger as logger3 } from "@elizaos/core";
|
|
883
597
|
var execAsync = promisify(exec);
|
|
884
598
|
var PlatformManager = class _PlatformManager {
|
|
885
599
|
static instance;
|
|
@@ -906,10 +620,10 @@ var PlatformManager = class _PlatformManager {
|
|
|
906
620
|
*/
|
|
907
621
|
async initialize() {
|
|
908
622
|
try {
|
|
909
|
-
|
|
623
|
+
logger3.info("Initializing platform detection...");
|
|
910
624
|
this.capabilities = await this.detectSystemCapabilities();
|
|
911
625
|
} catch (error) {
|
|
912
|
-
|
|
626
|
+
logger3.error("Platform detection failed", { error });
|
|
913
627
|
throw error;
|
|
914
628
|
}
|
|
915
629
|
}
|
|
@@ -971,7 +685,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
971
685
|
return null;
|
|
972
686
|
}
|
|
973
687
|
} catch (error) {
|
|
974
|
-
|
|
688
|
+
logger3.error("GPU detection failed", { error });
|
|
975
689
|
return null;
|
|
976
690
|
}
|
|
977
691
|
}
|
|
@@ -997,7 +711,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
997
711
|
isAppleSilicon: false
|
|
998
712
|
};
|
|
999
713
|
} catch (error) {
|
|
1000
|
-
|
|
714
|
+
logger3.error("Mac GPU detection failed", { error });
|
|
1001
715
|
return {
|
|
1002
716
|
name: "Unknown Mac GPU",
|
|
1003
717
|
type: "metal",
|
|
@@ -1032,7 +746,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
1032
746
|
type: "directml"
|
|
1033
747
|
};
|
|
1034
748
|
} catch (error) {
|
|
1035
|
-
|
|
749
|
+
logger3.error("Windows GPU detection failed", { error });
|
|
1036
750
|
return null;
|
|
1037
751
|
}
|
|
1038
752
|
}
|
|
@@ -1068,7 +782,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
1068
782
|
type: "none"
|
|
1069
783
|
};
|
|
1070
784
|
} catch (error) {
|
|
1071
|
-
|
|
785
|
+
logger3.error("Linux GPU detection failed", { error });
|
|
1072
786
|
return null;
|
|
1073
787
|
}
|
|
1074
788
|
}
|
|
@@ -1205,7 +919,7 @@ var getPlatformManager = () => {
|
|
|
1205
919
|
};
|
|
1206
920
|
|
|
1207
921
|
// src/utils/studiolmManager.ts
|
|
1208
|
-
import { ModelType
|
|
922
|
+
import { ModelType, logger as logger4 } from "@elizaos/core";
|
|
1209
923
|
var StudioLMManager = class _StudioLMManager {
|
|
1210
924
|
static instance = null;
|
|
1211
925
|
serverUrl;
|
|
@@ -1222,7 +936,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1222
936
|
*/
|
|
1223
937
|
constructor() {
|
|
1224
938
|
this.serverUrl = process.env.STUDIOLM_SERVER_URL || "http://localhost:1234";
|
|
1225
|
-
|
|
939
|
+
logger4.info("StudioLMManager initialized with configuration:", {
|
|
1226
940
|
serverUrl: this.serverUrl,
|
|
1227
941
|
configuredModels: this.configuredModels,
|
|
1228
942
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1250,7 +964,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1250
964
|
}
|
|
1251
965
|
return true;
|
|
1252
966
|
} catch (error) {
|
|
1253
|
-
|
|
967
|
+
logger4.error("LM Studio server check failed:", {
|
|
1254
968
|
error: error instanceof Error ? error.message : String(error),
|
|
1255
969
|
serverUrl: this.serverUrl,
|
|
1256
970
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1271,13 +985,13 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1271
985
|
}
|
|
1272
986
|
const data = await response.json();
|
|
1273
987
|
this.availableModels = data.data;
|
|
1274
|
-
|
|
988
|
+
logger4.info("LM Studio available models:", {
|
|
1275
989
|
count: this.availableModels.length,
|
|
1276
990
|
models: this.availableModels.map((m) => m.id),
|
|
1277
991
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1278
992
|
});
|
|
1279
993
|
} catch (error) {
|
|
1280
|
-
|
|
994
|
+
logger4.error("Failed to fetch LM Studio models:", {
|
|
1281
995
|
error: error instanceof Error ? error.message : String(error),
|
|
1282
996
|
serverUrl: this.serverUrl,
|
|
1283
997
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1305,7 +1019,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1305
1019
|
max_tokens: -1,
|
|
1306
1020
|
stream: false
|
|
1307
1021
|
};
|
|
1308
|
-
|
|
1022
|
+
logger4.info(`Testing model ${modelId}...`);
|
|
1309
1023
|
const response = await fetch(`${this.serverUrl}/v1/chat/completions`, {
|
|
1310
1024
|
method: "POST",
|
|
1311
1025
|
headers: {
|
|
@@ -1320,14 +1034,14 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1320
1034
|
if (!result.choices?.[0]?.message?.content) {
|
|
1321
1035
|
throw new Error("No valid response content received");
|
|
1322
1036
|
}
|
|
1323
|
-
|
|
1037
|
+
logger4.info(`Model ${modelId} test response:`, {
|
|
1324
1038
|
content: result.choices[0].message.content,
|
|
1325
1039
|
model: result.model,
|
|
1326
1040
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1327
1041
|
});
|
|
1328
1042
|
return true;
|
|
1329
1043
|
} catch (error) {
|
|
1330
|
-
|
|
1044
|
+
logger4.error(`Model ${modelId} test failed:`, {
|
|
1331
1045
|
error: error instanceof Error ? error.message : String(error),
|
|
1332
1046
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1333
1047
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1341,7 +1055,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1341
1055
|
* @returns {Promise<void>} A promise that resolves when the test is complete.
|
|
1342
1056
|
*/
|
|
1343
1057
|
async testTextModels() {
|
|
1344
|
-
|
|
1058
|
+
logger4.info("Testing configured text models...");
|
|
1345
1059
|
const results = await Promise.all([
|
|
1346
1060
|
this.testModel(this.configuredModels.small),
|
|
1347
1061
|
this.testModel(this.configuredModels.medium)
|
|
@@ -1351,13 +1065,13 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1351
1065
|
const failedModels = [];
|
|
1352
1066
|
if (!smallWorking) failedModels.push("small");
|
|
1353
1067
|
if (!mediumWorking) failedModels.push("medium");
|
|
1354
|
-
|
|
1068
|
+
logger4.warn("Some models failed the test:", {
|
|
1355
1069
|
failedModels,
|
|
1356
1070
|
small: this.configuredModels.small,
|
|
1357
1071
|
medium: this.configuredModels.medium
|
|
1358
1072
|
});
|
|
1359
1073
|
} else {
|
|
1360
|
-
|
|
1074
|
+
logger4.success("All configured models passed the test");
|
|
1361
1075
|
}
|
|
1362
1076
|
}
|
|
1363
1077
|
/**
|
|
@@ -1369,10 +1083,10 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1369
1083
|
async initialize() {
|
|
1370
1084
|
try {
|
|
1371
1085
|
if (this.initialized) {
|
|
1372
|
-
|
|
1086
|
+
logger4.info("StudioLM already initialized, skipping initialization");
|
|
1373
1087
|
return;
|
|
1374
1088
|
}
|
|
1375
|
-
|
|
1089
|
+
logger4.info("Starting StudioLM initialization...");
|
|
1376
1090
|
const serverAvailable = await this.checkServerStatus();
|
|
1377
1091
|
if (!serverAvailable) {
|
|
1378
1092
|
throw new Error("LM Studio server is not available");
|
|
@@ -1380,9 +1094,9 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1380
1094
|
await this.fetchAvailableModels();
|
|
1381
1095
|
await this.testTextModels();
|
|
1382
1096
|
this.initialized = true;
|
|
1383
|
-
|
|
1097
|
+
logger4.success("StudioLM initialization complete");
|
|
1384
1098
|
} catch (error) {
|
|
1385
|
-
|
|
1099
|
+
logger4.error("StudioLM initialization failed:", {
|
|
1386
1100
|
error: error instanceof Error ? error.message : String(error),
|
|
1387
1101
|
stack: error instanceof Error ? error.stack : void 0
|
|
1388
1102
|
});
|
|
@@ -1414,7 +1128,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1414
1128
|
*/
|
|
1415
1129
|
async generateText(params, isInitialized = false) {
|
|
1416
1130
|
try {
|
|
1417
|
-
|
|
1131
|
+
logger4.info("StudioLM generateText entry:", {
|
|
1418
1132
|
isInitialized,
|
|
1419
1133
|
currentInitState: this.initialized,
|
|
1420
1134
|
managerInitState: this.isInitialized(),
|
|
@@ -1432,19 +1146,19 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1432
1146
|
},
|
|
1433
1147
|
{ role: "user", content: params.prompt }
|
|
1434
1148
|
];
|
|
1435
|
-
|
|
1436
|
-
model: params.modelType ===
|
|
1149
|
+
logger4.info("StudioLM preparing request:", {
|
|
1150
|
+
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
1437
1151
|
messageCount: messages.length,
|
|
1438
1152
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1439
1153
|
});
|
|
1440
|
-
|
|
1154
|
+
logger4.info("Incoming context structure:", {
|
|
1441
1155
|
contextLength: params.prompt.length,
|
|
1442
1156
|
hasAction: params.prompt.includes("action"),
|
|
1443
1157
|
runtime: !!params.runtime,
|
|
1444
1158
|
stopSequences: params.stopSequences
|
|
1445
1159
|
});
|
|
1446
1160
|
const request = {
|
|
1447
|
-
model: params.modelType ===
|
|
1161
|
+
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
1448
1162
|
messages,
|
|
1449
1163
|
temperature: 0.7,
|
|
1450
1164
|
max_tokens: 8192,
|
|
@@ -1465,24 +1179,24 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1465
1179
|
throw new Error("No valid response content received from StudioLM");
|
|
1466
1180
|
}
|
|
1467
1181
|
let responseText = result.choices[0].message.content;
|
|
1468
|
-
|
|
1182
|
+
logger4.info("Raw response structure:", {
|
|
1469
1183
|
responseLength: responseText.length,
|
|
1470
1184
|
hasAction: responseText.includes("action"),
|
|
1471
1185
|
hasThinkTag: responseText.includes("<think>")
|
|
1472
1186
|
});
|
|
1473
1187
|
if (responseText.includes("<think>")) {
|
|
1474
|
-
|
|
1188
|
+
logger4.info("Cleaning think tags from response");
|
|
1475
1189
|
responseText = responseText.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
1476
|
-
|
|
1190
|
+
logger4.info("Think tags removed from response");
|
|
1477
1191
|
}
|
|
1478
|
-
|
|
1192
|
+
logger4.info("StudioLM request completed successfully:", {
|
|
1479
1193
|
responseLength: responseText.length,
|
|
1480
1194
|
hasThinkTags: responseText.includes("<think>"),
|
|
1481
1195
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1482
1196
|
});
|
|
1483
1197
|
return responseText;
|
|
1484
1198
|
} catch (error) {
|
|
1485
|
-
|
|
1199
|
+
logger4.error("StudioLM text generation error:", {
|
|
1486
1200
|
error: error instanceof Error ? error.message : String(error),
|
|
1487
1201
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1488
1202
|
phase: "text generation",
|
|
@@ -1494,7 +1208,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1494
1208
|
};
|
|
1495
1209
|
|
|
1496
1210
|
// src/utils/tokenizerManager.ts
|
|
1497
|
-
import { logger as
|
|
1211
|
+
import { logger as logger5 } from "@elizaos/core";
|
|
1498
1212
|
import { AutoTokenizer } from "@huggingface/transformers";
|
|
1499
1213
|
var TokenizerManager = class _TokenizerManager {
|
|
1500
1214
|
static instance = null;
|
|
@@ -1534,7 +1248,7 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1534
1248
|
async loadTokenizer(modelConfig) {
|
|
1535
1249
|
try {
|
|
1536
1250
|
const tokenizerKey = `${modelConfig.tokenizer.type}-${modelConfig.tokenizer.name}`;
|
|
1537
|
-
|
|
1251
|
+
logger5.info("Loading tokenizer:", {
|
|
1538
1252
|
key: tokenizerKey,
|
|
1539
1253
|
name: modelConfig.tokenizer.name,
|
|
1540
1254
|
type: modelConfig.tokenizer.type,
|
|
@@ -1542,7 +1256,7 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1542
1256
|
cacheDir: this.cacheDir
|
|
1543
1257
|
});
|
|
1544
1258
|
if (this.tokenizers.has(tokenizerKey)) {
|
|
1545
|
-
|
|
1259
|
+
logger5.info("Using cached tokenizer:", { key: tokenizerKey });
|
|
1546
1260
|
const cachedTokenizer = this.tokenizers.get(tokenizerKey);
|
|
1547
1261
|
if (!cachedTokenizer) {
|
|
1548
1262
|
throw new Error(`Tokenizer ${tokenizerKey} exists in map but returned undefined`);
|
|
@@ -1551,10 +1265,10 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1551
1265
|
}
|
|
1552
1266
|
const fs6 = await import("node:fs");
|
|
1553
1267
|
if (!fs6.existsSync(this.modelsDir)) {
|
|
1554
|
-
|
|
1268
|
+
logger5.warn("Models directory does not exist, creating it:", this.modelsDir);
|
|
1555
1269
|
fs6.mkdirSync(this.modelsDir, { recursive: true });
|
|
1556
1270
|
}
|
|
1557
|
-
|
|
1271
|
+
logger5.info(
|
|
1558
1272
|
"Initializing new tokenizer from HuggingFace with models directory:",
|
|
1559
1273
|
this.modelsDir
|
|
1560
1274
|
);
|
|
@@ -1564,28 +1278,28 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1564
1278
|
local_files_only: false
|
|
1565
1279
|
});
|
|
1566
1280
|
this.tokenizers.set(tokenizerKey, tokenizer);
|
|
1567
|
-
|
|
1281
|
+
logger5.success("Tokenizer loaded successfully:", { key: tokenizerKey });
|
|
1568
1282
|
return tokenizer;
|
|
1569
1283
|
} catch (tokenizeError) {
|
|
1570
|
-
|
|
1284
|
+
logger5.error("Failed to load tokenizer from HuggingFace:", {
|
|
1571
1285
|
error: tokenizeError instanceof Error ? tokenizeError.message : String(tokenizeError),
|
|
1572
1286
|
stack: tokenizeError instanceof Error ? tokenizeError.stack : void 0,
|
|
1573
1287
|
tokenizer: modelConfig.tokenizer.name,
|
|
1574
1288
|
modelsDir: this.modelsDir
|
|
1575
1289
|
});
|
|
1576
|
-
|
|
1290
|
+
logger5.info("Retrying tokenizer loading...");
|
|
1577
1291
|
const tokenizer = await AutoTokenizer.from_pretrained(modelConfig.tokenizer.name, {
|
|
1578
1292
|
cache_dir: this.modelsDir,
|
|
1579
1293
|
local_files_only: false
|
|
1580
1294
|
});
|
|
1581
1295
|
this.tokenizers.set(tokenizerKey, tokenizer);
|
|
1582
|
-
|
|
1296
|
+
logger5.success("Tokenizer loaded successfully on retry:", {
|
|
1583
1297
|
key: tokenizerKey
|
|
1584
1298
|
});
|
|
1585
1299
|
return tokenizer;
|
|
1586
1300
|
}
|
|
1587
1301
|
} catch (error) {
|
|
1588
|
-
|
|
1302
|
+
logger5.error("Failed to load tokenizer:", {
|
|
1589
1303
|
error: error instanceof Error ? error.message : String(error),
|
|
1590
1304
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1591
1305
|
model: modelConfig.name,
|
|
@@ -1605,23 +1319,23 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1605
1319
|
*/
|
|
1606
1320
|
async encode(text, modelConfig) {
|
|
1607
1321
|
try {
|
|
1608
|
-
|
|
1322
|
+
logger5.info("Encoding text with tokenizer:", {
|
|
1609
1323
|
length: text.length,
|
|
1610
1324
|
tokenizer: modelConfig.tokenizer.name
|
|
1611
1325
|
});
|
|
1612
1326
|
const tokenizer = await this.loadTokenizer(modelConfig);
|
|
1613
|
-
|
|
1327
|
+
logger5.info("Tokenizer loaded, encoding text...");
|
|
1614
1328
|
const encoded = await tokenizer.encode(text, {
|
|
1615
1329
|
add_special_tokens: true,
|
|
1616
1330
|
return_token_type_ids: false
|
|
1617
1331
|
});
|
|
1618
|
-
|
|
1332
|
+
logger5.info("Text encoded successfully:", {
|
|
1619
1333
|
tokenCount: encoded.length,
|
|
1620
1334
|
tokenizer: modelConfig.tokenizer.name
|
|
1621
1335
|
});
|
|
1622
1336
|
return encoded;
|
|
1623
1337
|
} catch (error) {
|
|
1624
|
-
|
|
1338
|
+
logger5.error("Text encoding failed:", {
|
|
1625
1339
|
error: error instanceof Error ? error.message : String(error),
|
|
1626
1340
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1627
1341
|
textLength: text.length,
|
|
@@ -1641,23 +1355,23 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1641
1355
|
*/
|
|
1642
1356
|
async decode(tokens, modelConfig) {
|
|
1643
1357
|
try {
|
|
1644
|
-
|
|
1358
|
+
logger5.info("Decoding tokens with tokenizer:", {
|
|
1645
1359
|
count: tokens.length,
|
|
1646
1360
|
tokenizer: modelConfig.tokenizer.name
|
|
1647
1361
|
});
|
|
1648
1362
|
const tokenizer = await this.loadTokenizer(modelConfig);
|
|
1649
|
-
|
|
1363
|
+
logger5.info("Tokenizer loaded, decoding tokens...");
|
|
1650
1364
|
const decoded = await tokenizer.decode(tokens, {
|
|
1651
1365
|
skip_special_tokens: true,
|
|
1652
1366
|
clean_up_tokenization_spaces: true
|
|
1653
1367
|
});
|
|
1654
|
-
|
|
1368
|
+
logger5.info("Tokens decoded successfully:", {
|
|
1655
1369
|
textLength: decoded.length,
|
|
1656
1370
|
tokenizer: modelConfig.tokenizer.name
|
|
1657
1371
|
});
|
|
1658
1372
|
return decoded;
|
|
1659
1373
|
} catch (error) {
|
|
1660
|
-
|
|
1374
|
+
logger5.error("Token decoding failed:", {
|
|
1661
1375
|
error: error instanceof Error ? error.message : String(error),
|
|
1662
1376
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1663
1377
|
tokenCount: tokens.length,
|
|
@@ -1674,7 +1388,7 @@ import { exec as exec2 } from "node:child_process";
|
|
|
1674
1388
|
import fs2 from "node:fs";
|
|
1675
1389
|
import path2 from "node:path";
|
|
1676
1390
|
import { promisify as promisify2 } from "node:util";
|
|
1677
|
-
import { logger as
|
|
1391
|
+
import { logger as logger6 } from "@elizaos/core";
|
|
1678
1392
|
import { nodewhisper } from "nodejs-whisper";
|
|
1679
1393
|
var execAsync2 = promisify2(exec2);
|
|
1680
1394
|
var TranscribeManager = class _TranscribeManager {
|
|
@@ -1691,7 +1405,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1691
1405
|
*/
|
|
1692
1406
|
constructor(cacheDir) {
|
|
1693
1407
|
this.cacheDir = path2.join(cacheDir, "whisper");
|
|
1694
|
-
|
|
1408
|
+
logger6.debug("Initializing TranscribeManager", {
|
|
1695
1409
|
cacheDir: this.cacheDir,
|
|
1696
1410
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1697
1411
|
});
|
|
@@ -1707,7 +1421,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1707
1421
|
await this.initializeFFmpeg();
|
|
1708
1422
|
this.ffmpegInitialized = true;
|
|
1709
1423
|
} catch (error) {
|
|
1710
|
-
|
|
1424
|
+
logger6.error("FFmpeg initialization failed:", {
|
|
1711
1425
|
error: error instanceof Error ? error.message : String(error),
|
|
1712
1426
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1713
1427
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1745,13 +1459,13 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1745
1459
|
try {
|
|
1746
1460
|
const { stdout } = await execAsync2("ffmpeg -version");
|
|
1747
1461
|
this.ffmpegVersion = stdout.split("\n")[0];
|
|
1748
|
-
|
|
1462
|
+
logger6.info("FFmpeg version:", {
|
|
1749
1463
|
version: this.ffmpegVersion,
|
|
1750
1464
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1751
1465
|
});
|
|
1752
1466
|
} catch (error) {
|
|
1753
1467
|
this.ffmpegVersion = null;
|
|
1754
|
-
|
|
1468
|
+
logger6.error("Failed to get FFmpeg version:", {
|
|
1755
1469
|
error: error instanceof Error ? error.message : String(error),
|
|
1756
1470
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1757
1471
|
});
|
|
@@ -1779,7 +1493,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1779
1493
|
}
|
|
1780
1494
|
} catch (error) {
|
|
1781
1495
|
this.ffmpegAvailable = false;
|
|
1782
|
-
|
|
1496
|
+
logger6.error("FFmpeg initialization failed:", {
|
|
1783
1497
|
error: error instanceof Error ? error.message : String(error),
|
|
1784
1498
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1785
1499
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1799,7 +1513,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1799
1513
|
const { stdout, stderr } = await execAsync2("which ffmpeg || where ffmpeg");
|
|
1800
1514
|
this.ffmpegPath = stdout.trim();
|
|
1801
1515
|
this.ffmpegAvailable = true;
|
|
1802
|
-
|
|
1516
|
+
logger6.info("FFmpeg found at:", {
|
|
1803
1517
|
path: this.ffmpegPath,
|
|
1804
1518
|
stderr: stderr ? stderr.trim() : void 0,
|
|
1805
1519
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1807,7 +1521,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1807
1521
|
} catch (error) {
|
|
1808
1522
|
this.ffmpegAvailable = false;
|
|
1809
1523
|
this.ffmpegPath = null;
|
|
1810
|
-
|
|
1524
|
+
logger6.error("FFmpeg not found in PATH:", {
|
|
1811
1525
|
error: error instanceof Error ? error.message : String(error),
|
|
1812
1526
|
stderr: error instanceof Error && "stderr" in error ? error.stderr : void 0,
|
|
1813
1527
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1827,7 +1541,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1827
1541
|
throw new Error("FFmpeg installation missing required codecs (pcm_s16le, wav)");
|
|
1828
1542
|
}
|
|
1829
1543
|
} catch (error) {
|
|
1830
|
-
|
|
1544
|
+
logger6.error("FFmpeg capabilities verification failed:", {
|
|
1831
1545
|
error: error instanceof Error ? error.message : String(error),
|
|
1832
1546
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1833
1547
|
});
|
|
@@ -1838,7 +1552,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1838
1552
|
* Logs instructions on how to install FFmpeg if it is not properly installed.
|
|
1839
1553
|
*/
|
|
1840
1554
|
logFFmpegInstallInstructions() {
|
|
1841
|
-
|
|
1555
|
+
logger6.warn("FFmpeg is required but not properly installed. Please install FFmpeg:", {
|
|
1842
1556
|
instructions: {
|
|
1843
1557
|
mac: "brew install ffmpeg",
|
|
1844
1558
|
ubuntu: "sudo apt-get install ffmpeg",
|
|
@@ -1891,7 +1605,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1891
1605
|
`ffmpeg -y -loglevel error -i "${inputPath}" -acodec pcm_s16le -ar 16000 -ac 1 "${outputPath}"`
|
|
1892
1606
|
);
|
|
1893
1607
|
if (stderr) {
|
|
1894
|
-
|
|
1608
|
+
logger6.warn("FFmpeg conversion error:", {
|
|
1895
1609
|
stderr,
|
|
1896
1610
|
inputPath,
|
|
1897
1611
|
outputPath,
|
|
@@ -1902,7 +1616,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1902
1616
|
throw new Error("WAV file was not created successfully");
|
|
1903
1617
|
}
|
|
1904
1618
|
} catch (error) {
|
|
1905
|
-
|
|
1619
|
+
logger6.error("Audio conversion failed:", {
|
|
1906
1620
|
error: error instanceof Error ? error.message : String(error),
|
|
1907
1621
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1908
1622
|
command: `ffmpeg -y -loglevel error -i "${inputPath}" -acodec pcm_s16le -ar 16000 -ac 1 "${outputPath}"`,
|
|
@@ -1938,7 +1652,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1938
1652
|
}
|
|
1939
1653
|
return tempWavFile;
|
|
1940
1654
|
} catch (error) {
|
|
1941
|
-
|
|
1655
|
+
logger6.error("Audio preprocessing failed:", {
|
|
1942
1656
|
error: error instanceof Error ? error.message : String(error),
|
|
1943
1657
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1944
1658
|
ffmpegAvailable: this.ffmpegAvailable,
|
|
@@ -1965,7 +1679,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1965
1679
|
}
|
|
1966
1680
|
try {
|
|
1967
1681
|
const wavFile = await this.preprocessAudio(audioBuffer);
|
|
1968
|
-
|
|
1682
|
+
logger6.info("Starting transcription with whisper...");
|
|
1969
1683
|
const originalStdoutWrite = process.stdout.write;
|
|
1970
1684
|
const originalStderrWrite = process.stderr.write;
|
|
1971
1685
|
const noopWrite = () => true;
|
|
@@ -1988,19 +1702,19 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1988
1702
|
}
|
|
1989
1703
|
if (fs2.existsSync(wavFile)) {
|
|
1990
1704
|
fs2.unlinkSync(wavFile);
|
|
1991
|
-
|
|
1705
|
+
logger6.info("Temporary WAV file cleaned up");
|
|
1992
1706
|
}
|
|
1993
1707
|
const cleanText = output.split("\n").map((line) => {
|
|
1994
1708
|
const textMatch = line.match(/](.+)$/);
|
|
1995
1709
|
return textMatch ? textMatch[1].trim() : line.trim();
|
|
1996
1710
|
}).filter((line) => line).join(" ");
|
|
1997
|
-
|
|
1711
|
+
logger6.success("Transcription complete:", {
|
|
1998
1712
|
textLength: cleanText.length,
|
|
1999
1713
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2000
1714
|
});
|
|
2001
1715
|
return { text: cleanText };
|
|
2002
1716
|
} catch (error) {
|
|
2003
|
-
|
|
1717
|
+
logger6.error("Transcription failed:", {
|
|
2004
1718
|
error: error instanceof Error ? error.message : String(error),
|
|
2005
1719
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2006
1720
|
ffmpegAvailable: this.ffmpegAvailable
|
|
@@ -2014,48 +1728,10 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
2014
1728
|
import fs3 from "node:fs";
|
|
2015
1729
|
import path3 from "node:path";
|
|
2016
1730
|
import { Readable } from "node:stream";
|
|
2017
|
-
import { logger as
|
|
1731
|
+
import { logger as logger7, prependWavHeader } from "@elizaos/core";
|
|
2018
1732
|
import {
|
|
2019
1733
|
getLlama
|
|
2020
1734
|
} from "node-llama-cpp";
|
|
2021
|
-
|
|
2022
|
-
// src/utils/audioUtils.ts
|
|
2023
|
-
import { PassThrough } from "node:stream";
|
|
2024
|
-
function getWavHeader(audioLength, sampleRate, channelCount = 1, bitsPerSample = 16) {
|
|
2025
|
-
const wavHeader = Buffer.alloc(44);
|
|
2026
|
-
wavHeader.write("RIFF", 0);
|
|
2027
|
-
wavHeader.writeUInt32LE(36 + audioLength, 4);
|
|
2028
|
-
wavHeader.write("WAVE", 8);
|
|
2029
|
-
wavHeader.write("fmt ", 12);
|
|
2030
|
-
wavHeader.writeUInt32LE(16, 16);
|
|
2031
|
-
wavHeader.writeUInt16LE(1, 20);
|
|
2032
|
-
wavHeader.writeUInt16LE(channelCount, 22);
|
|
2033
|
-
wavHeader.writeUInt32LE(sampleRate, 24);
|
|
2034
|
-
wavHeader.writeUInt32LE(sampleRate * bitsPerSample * channelCount / 8, 28);
|
|
2035
|
-
wavHeader.writeUInt16LE(bitsPerSample * channelCount / 8, 32);
|
|
2036
|
-
wavHeader.writeUInt16LE(bitsPerSample, 34);
|
|
2037
|
-
wavHeader.write("data", 36);
|
|
2038
|
-
wavHeader.writeUInt32LE(audioLength, 40);
|
|
2039
|
-
return wavHeader;
|
|
2040
|
-
}
|
|
2041
|
-
function prependWavHeader(readable, audioLength, sampleRate, channelCount = 1, bitsPerSample = 16) {
|
|
2042
|
-
const wavHeader = getWavHeader(audioLength, sampleRate, channelCount, bitsPerSample);
|
|
2043
|
-
let pushedHeader = false;
|
|
2044
|
-
const passThrough = new PassThrough();
|
|
2045
|
-
readable.on("data", (data) => {
|
|
2046
|
-
if (!pushedHeader) {
|
|
2047
|
-
passThrough.push(wavHeader);
|
|
2048
|
-
pushedHeader = true;
|
|
2049
|
-
}
|
|
2050
|
-
passThrough.push(data);
|
|
2051
|
-
});
|
|
2052
|
-
readable.on("end", () => {
|
|
2053
|
-
passThrough.end();
|
|
2054
|
-
});
|
|
2055
|
-
return passThrough;
|
|
2056
|
-
}
|
|
2057
|
-
|
|
2058
|
-
// src/utils/ttsManager.ts
|
|
2059
1735
|
var TTSManager = class _TTSManager {
|
|
2060
1736
|
static instance = null;
|
|
2061
1737
|
cacheDir;
|
|
@@ -2075,7 +1751,7 @@ var TTSManager = class _TTSManager {
|
|
|
2075
1751
|
this.modelsDir = process.env.LLAMALOCAL_PATH?.trim() ? path3.resolve(process.env.LLAMALOCAL_PATH.trim()) : path3.join(process.cwd(), "models");
|
|
2076
1752
|
this.downloadManager = DownloadManager.getInstance(this.cacheDir, this.modelsDir);
|
|
2077
1753
|
this.ensureCacheDirectory();
|
|
2078
|
-
|
|
1754
|
+
logger7.debug("TTSManager initialized");
|
|
2079
1755
|
}
|
|
2080
1756
|
/**
|
|
2081
1757
|
* Returns an instance of TTSManager, creating a new one if none exist.
|
|
@@ -2095,7 +1771,7 @@ var TTSManager = class _TTSManager {
|
|
|
2095
1771
|
ensureCacheDirectory() {
|
|
2096
1772
|
if (!fs3.existsSync(this.cacheDir)) {
|
|
2097
1773
|
fs3.mkdirSync(this.cacheDir, { recursive: true });
|
|
2098
|
-
|
|
1774
|
+
logger7.debug("Created TTS cache directory:", this.cacheDir);
|
|
2099
1775
|
}
|
|
2100
1776
|
}
|
|
2101
1777
|
/**
|
|
@@ -2112,7 +1788,7 @@ var TTSManager = class _TTSManager {
|
|
|
2112
1788
|
if (this.initialized && this.model && this.ctx) {
|
|
2113
1789
|
return;
|
|
2114
1790
|
}
|
|
2115
|
-
|
|
1791
|
+
logger7.info("Initializing TTS with GGUF backend...");
|
|
2116
1792
|
const modelSpec = MODEL_SPECS.tts.base;
|
|
2117
1793
|
const modelPath = path3.join(this.modelsDir, modelSpec.name);
|
|
2118
1794
|
if (!fs3.existsSync(modelPath)) {
|
|
@@ -2136,7 +1812,7 @@ var TTSManager = class _TTSManager {
|
|
|
2136
1812
|
let lastError = null;
|
|
2137
1813
|
for (const attempt of attempts) {
|
|
2138
1814
|
try {
|
|
2139
|
-
|
|
1815
|
+
logger7.info("Attempting TTS model download:", {
|
|
2140
1816
|
description: attempt.description,
|
|
2141
1817
|
repo: attempt.spec.repo,
|
|
2142
1818
|
name: attempt.spec.name,
|
|
@@ -2145,15 +1821,15 @@ var TTSManager = class _TTSManager {
|
|
|
2145
1821
|
});
|
|
2146
1822
|
const barLength = 30;
|
|
2147
1823
|
const emptyBar = "\u25B1".repeat(barLength);
|
|
2148
|
-
|
|
1824
|
+
logger7.info(`Downloading TTS model: ${emptyBar} 0%`);
|
|
2149
1825
|
await this.downloadManager.downloadFromUrl(attempt.url, modelPath);
|
|
2150
1826
|
const completedBar = "\u25B0".repeat(barLength);
|
|
2151
|
-
|
|
2152
|
-
|
|
1827
|
+
logger7.info(`Downloading TTS model: ${completedBar} 100%`);
|
|
1828
|
+
logger7.success("TTS model download successful with:", attempt.description);
|
|
2153
1829
|
break;
|
|
2154
1830
|
} catch (error) {
|
|
2155
1831
|
lastError = error;
|
|
2156
|
-
|
|
1832
|
+
logger7.warn("TTS model download attempt failed:", {
|
|
2157
1833
|
description: attempt.description,
|
|
2158
1834
|
error: error instanceof Error ? error.message : String(error),
|
|
2159
1835
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -2164,7 +1840,7 @@ var TTSManager = class _TTSManager {
|
|
|
2164
1840
|
throw lastError || new Error("All download attempts failed");
|
|
2165
1841
|
}
|
|
2166
1842
|
}
|
|
2167
|
-
|
|
1843
|
+
logger7.info("Loading TTS model...");
|
|
2168
1844
|
const llama = await getLlama();
|
|
2169
1845
|
this.model = await llama.loadModel({
|
|
2170
1846
|
modelPath,
|
|
@@ -2175,14 +1851,14 @@ var TTSManager = class _TTSManager {
|
|
|
2175
1851
|
contextSize: modelSpec.contextSize
|
|
2176
1852
|
});
|
|
2177
1853
|
this.sequence = this.ctx.getSequence();
|
|
2178
|
-
|
|
1854
|
+
logger7.success("TTS initialization complete", {
|
|
2179
1855
|
modelPath,
|
|
2180
1856
|
contextSize: modelSpec.contextSize,
|
|
2181
1857
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2182
1858
|
});
|
|
2183
1859
|
this.initialized = true;
|
|
2184
1860
|
} catch (error) {
|
|
2185
|
-
|
|
1861
|
+
logger7.error("TTS initialization failed:", {
|
|
2186
1862
|
error: error instanceof Error ? error.message : String(error),
|
|
2187
1863
|
model: MODEL_SPECS.tts.base.name,
|
|
2188
1864
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -2202,14 +1878,14 @@ var TTSManager = class _TTSManager {
|
|
|
2202
1878
|
if (!this.model || !this.ctx || !this.sequence) {
|
|
2203
1879
|
throw new Error("TTS model not initialized");
|
|
2204
1880
|
}
|
|
2205
|
-
|
|
1881
|
+
logger7.info("Starting speech generation for text:", { text });
|
|
2206
1882
|
const prompt = `[SPEAKER=female_1][LANGUAGE=en]${text}`;
|
|
2207
|
-
|
|
2208
|
-
|
|
1883
|
+
logger7.info("Formatted prompt:", { prompt });
|
|
1884
|
+
logger7.info("Tokenizing input...");
|
|
2209
1885
|
const inputTokens = this.model.tokenize(prompt);
|
|
2210
|
-
|
|
1886
|
+
logger7.info("Input tokenized:", { tokenCount: inputTokens.length });
|
|
2211
1887
|
const maxTokens = inputTokens.length * 2;
|
|
2212
|
-
|
|
1888
|
+
logger7.info("Starting token generation with optimized limit:", {
|
|
2213
1889
|
maxTokens
|
|
2214
1890
|
});
|
|
2215
1891
|
const responseTokens = [];
|
|
@@ -2223,26 +1899,26 @@ var TTSManager = class _TTSManager {
|
|
|
2223
1899
|
const barLength = 30;
|
|
2224
1900
|
const filledLength = Math.floor(responseTokens.length / maxTokens * barLength);
|
|
2225
1901
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2226
|
-
|
|
1902
|
+
logger7.info(
|
|
2227
1903
|
`Token generation: ${progressBar} ${percent}% (${responseTokens.length}/${maxTokens})`
|
|
2228
1904
|
);
|
|
2229
1905
|
if (responseTokens.length >= maxTokens) {
|
|
2230
|
-
|
|
1906
|
+
logger7.info("Token generation complete");
|
|
2231
1907
|
break;
|
|
2232
1908
|
}
|
|
2233
1909
|
}
|
|
2234
1910
|
} catch (error) {
|
|
2235
|
-
|
|
1911
|
+
logger7.error("Token generation error:", error);
|
|
2236
1912
|
throw error;
|
|
2237
1913
|
}
|
|
2238
1914
|
if (responseTokens.length === 0) {
|
|
2239
1915
|
throw new Error("No audio tokens generated");
|
|
2240
1916
|
}
|
|
2241
|
-
|
|
1917
|
+
logger7.info("Converting tokens to audio data...");
|
|
2242
1918
|
const audioData = this.processAudioResponse({
|
|
2243
1919
|
tokens: responseTokens.map((t) => Number.parseInt(this.model.detokenize([t]), 10))
|
|
2244
1920
|
});
|
|
2245
|
-
|
|
1921
|
+
logger7.info("Audio data generated:", {
|
|
2246
1922
|
byteLength: audioData.length,
|
|
2247
1923
|
sampleRate: MODEL_SPECS.tts.base.sampleRate
|
|
2248
1924
|
});
|
|
@@ -2253,10 +1929,10 @@ var TTSManager = class _TTSManager {
|
|
|
2253
1929
|
1,
|
|
2254
1930
|
16
|
|
2255
1931
|
);
|
|
2256
|
-
|
|
1932
|
+
logger7.success("Speech generation complete");
|
|
2257
1933
|
return audioStream;
|
|
2258
1934
|
} catch (error) {
|
|
2259
|
-
|
|
1935
|
+
logger7.error("Speech generation failed:", {
|
|
2260
1936
|
error: error instanceof Error ? error.message : String(error),
|
|
2261
1937
|
text
|
|
2262
1938
|
});
|
|
@@ -2298,7 +1974,7 @@ import fs4 from "node:fs";
|
|
|
2298
1974
|
import os2 from "node:os";
|
|
2299
1975
|
import path4 from "node:path";
|
|
2300
1976
|
import process2 from "node:process";
|
|
2301
|
-
import { logger as
|
|
1977
|
+
import { logger as logger8 } from "@elizaos/core";
|
|
2302
1978
|
import {
|
|
2303
1979
|
AutoProcessor,
|
|
2304
1980
|
AutoTokenizer as AutoTokenizer2,
|
|
@@ -2336,7 +2012,7 @@ var VisionManager = class _VisionManager {
|
|
|
2336
2012
|
this.ensureModelsDirExists();
|
|
2337
2013
|
this.downloadManager = DownloadManager.getInstance(this.cacheDir, this.modelsDir);
|
|
2338
2014
|
this.platformConfig = this.getPlatformConfig();
|
|
2339
|
-
|
|
2015
|
+
logger8.debug("VisionManager initialized");
|
|
2340
2016
|
}
|
|
2341
2017
|
/**
|
|
2342
2018
|
* Retrieves the platform configuration based on the operating system and architecture.
|
|
@@ -2373,7 +2049,7 @@ var VisionManager = class _VisionManager {
|
|
|
2373
2049
|
*/
|
|
2374
2050
|
ensureModelsDirExists() {
|
|
2375
2051
|
if (!existsSync(this.modelsDir)) {
|
|
2376
|
-
|
|
2052
|
+
logger8.debug(`Creating models directory at: ${this.modelsDir}`);
|
|
2377
2053
|
fs4.mkdirSync(this.modelsDir, { recursive: true });
|
|
2378
2054
|
}
|
|
2379
2055
|
}
|
|
@@ -2400,7 +2076,7 @@ var VisionManager = class _VisionManager {
|
|
|
2400
2076
|
checkCacheExists(modelId, type) {
|
|
2401
2077
|
const modelPath = path4.join(this.modelsDir, modelId.replace("/", "--"), type);
|
|
2402
2078
|
if (existsSync(modelPath)) {
|
|
2403
|
-
|
|
2079
|
+
logger8.info(`${type} found at: ${modelPath}`);
|
|
2404
2080
|
return true;
|
|
2405
2081
|
}
|
|
2406
2082
|
return false;
|
|
@@ -2423,7 +2099,7 @@ var VisionManager = class _VisionManager {
|
|
|
2423
2099
|
...component,
|
|
2424
2100
|
dtype: defaultDtype
|
|
2425
2101
|
}));
|
|
2426
|
-
|
|
2102
|
+
logger8.info("Model components configured with dtype:", {
|
|
2427
2103
|
platform,
|
|
2428
2104
|
arch,
|
|
2429
2105
|
defaultDtype,
|
|
@@ -2452,19 +2128,19 @@ var VisionManager = class _VisionManager {
|
|
|
2452
2128
|
async initialize() {
|
|
2453
2129
|
try {
|
|
2454
2130
|
if (this.initialized) {
|
|
2455
|
-
|
|
2131
|
+
logger8.info("Vision model already initialized, skipping initialization");
|
|
2456
2132
|
return;
|
|
2457
2133
|
}
|
|
2458
|
-
|
|
2134
|
+
logger8.info("Starting vision model initialization...");
|
|
2459
2135
|
const modelSpec = MODEL_SPECS.vision;
|
|
2460
|
-
|
|
2136
|
+
logger8.info("Configuring environment for vision model...");
|
|
2461
2137
|
env.allowLocalModels = true;
|
|
2462
2138
|
env.allowRemoteModels = true;
|
|
2463
2139
|
if (this.platformConfig.useOnnx) {
|
|
2464
2140
|
env.backends.onnx.enabled = true;
|
|
2465
2141
|
env.backends.onnx.logLevel = "info";
|
|
2466
2142
|
}
|
|
2467
|
-
|
|
2143
|
+
logger8.info("Loading Florence2 model...");
|
|
2468
2144
|
try {
|
|
2469
2145
|
let lastProgress = -1;
|
|
2470
2146
|
const modelCached = this.checkCacheExists(modelSpec.modelId, "model");
|
|
@@ -2482,22 +2158,22 @@ var VisionManager = class _VisionManager {
|
|
|
2482
2158
|
const barLength = 30;
|
|
2483
2159
|
const filledLength = Math.floor(currentProgress / 100 * barLength);
|
|
2484
2160
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2485
|
-
|
|
2161
|
+
logger8.info(`Downloading vision model: ${progressBar} ${currentProgress}%`);
|
|
2486
2162
|
if (currentProgress === 100) this.modelDownloaded = true;
|
|
2487
2163
|
}
|
|
2488
2164
|
}
|
|
2489
2165
|
});
|
|
2490
2166
|
this.model = model;
|
|
2491
|
-
|
|
2167
|
+
logger8.success("Florence2 model loaded successfully");
|
|
2492
2168
|
} catch (error) {
|
|
2493
|
-
|
|
2169
|
+
logger8.error("Failed to load Florence2 model:", {
|
|
2494
2170
|
error: error instanceof Error ? error.message : String(error),
|
|
2495
2171
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2496
2172
|
modelId: modelSpec.modelId
|
|
2497
2173
|
});
|
|
2498
2174
|
throw error;
|
|
2499
2175
|
}
|
|
2500
|
-
|
|
2176
|
+
logger8.info("Loading vision tokenizer...");
|
|
2501
2177
|
try {
|
|
2502
2178
|
const tokenizerCached = this.checkCacheExists(modelSpec.modelId, "tokenizer");
|
|
2503
2179
|
let tokenizerProgress = -1;
|
|
@@ -2513,21 +2189,21 @@ var VisionManager = class _VisionManager {
|
|
|
2513
2189
|
const barLength = 30;
|
|
2514
2190
|
const filledLength = Math.floor(currentProgress / 100 * barLength);
|
|
2515
2191
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2516
|
-
|
|
2192
|
+
logger8.info(`Downloading vision tokenizer: ${progressBar} ${currentProgress}%`);
|
|
2517
2193
|
if (currentProgress === 100) this.tokenizerDownloaded = true;
|
|
2518
2194
|
}
|
|
2519
2195
|
}
|
|
2520
2196
|
});
|
|
2521
|
-
|
|
2197
|
+
logger8.success("Vision tokenizer loaded successfully");
|
|
2522
2198
|
} catch (error) {
|
|
2523
|
-
|
|
2199
|
+
logger8.error("Failed to load tokenizer:", {
|
|
2524
2200
|
error: error instanceof Error ? error.message : String(error),
|
|
2525
2201
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2526
2202
|
modelId: modelSpec.modelId
|
|
2527
2203
|
});
|
|
2528
2204
|
throw error;
|
|
2529
2205
|
}
|
|
2530
|
-
|
|
2206
|
+
logger8.info("Loading vision processor...");
|
|
2531
2207
|
try {
|
|
2532
2208
|
const processorCached = this.checkCacheExists(modelSpec.modelId, "processor");
|
|
2533
2209
|
let processorProgress = -1;
|
|
@@ -2544,14 +2220,14 @@ var VisionManager = class _VisionManager {
|
|
|
2544
2220
|
const barLength = 30;
|
|
2545
2221
|
const filledLength = Math.floor(currentProgress / 100 * barLength);
|
|
2546
2222
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2547
|
-
|
|
2223
|
+
logger8.info(`Downloading vision processor: ${progressBar} ${currentProgress}%`);
|
|
2548
2224
|
if (currentProgress === 100) this.processorDownloaded = true;
|
|
2549
2225
|
}
|
|
2550
2226
|
}
|
|
2551
2227
|
});
|
|
2552
|
-
|
|
2228
|
+
logger8.success("Vision processor loaded successfully");
|
|
2553
2229
|
} catch (error) {
|
|
2554
|
-
|
|
2230
|
+
logger8.error("Failed to load vision processor:", {
|
|
2555
2231
|
error: error instanceof Error ? error.message : String(error),
|
|
2556
2232
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2557
2233
|
modelId: modelSpec.modelId
|
|
@@ -2559,9 +2235,9 @@ var VisionManager = class _VisionManager {
|
|
|
2559
2235
|
throw error;
|
|
2560
2236
|
}
|
|
2561
2237
|
this.initialized = true;
|
|
2562
|
-
|
|
2238
|
+
logger8.success("Vision model initialization complete");
|
|
2563
2239
|
} catch (error) {
|
|
2564
|
-
|
|
2240
|
+
logger8.error("Vision model initialization failed:", {
|
|
2565
2241
|
error: error instanceof Error ? error.message : String(error),
|
|
2566
2242
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2567
2243
|
modelsDir: this.modelsDir
|
|
@@ -2577,13 +2253,13 @@ var VisionManager = class _VisionManager {
|
|
|
2577
2253
|
*/
|
|
2578
2254
|
async fetchImage(url) {
|
|
2579
2255
|
try {
|
|
2580
|
-
|
|
2256
|
+
logger8.info(`Fetching image from URL: ${url.slice(0, 100)}...`);
|
|
2581
2257
|
if (url.startsWith("data:")) {
|
|
2582
|
-
|
|
2258
|
+
logger8.info("Processing data URL...");
|
|
2583
2259
|
const [header, base64Data] = url.split(",");
|
|
2584
2260
|
const mimeType2 = header.split(";")[0].split(":")[1];
|
|
2585
2261
|
const buffer2 = Buffer.from(base64Data, "base64");
|
|
2586
|
-
|
|
2262
|
+
logger8.info("Data URL processed successfully");
|
|
2587
2263
|
return { buffer: buffer2, mimeType: mimeType2 };
|
|
2588
2264
|
}
|
|
2589
2265
|
const response = await fetch(url);
|
|
@@ -2592,14 +2268,14 @@ var VisionManager = class _VisionManager {
|
|
|
2592
2268
|
}
|
|
2593
2269
|
const buffer = Buffer.from(await response.arrayBuffer());
|
|
2594
2270
|
const mimeType = response.headers.get("content-type") || "image/jpeg";
|
|
2595
|
-
|
|
2271
|
+
logger8.info("Image fetched successfully:", {
|
|
2596
2272
|
mimeType,
|
|
2597
2273
|
bufferSize: buffer.length,
|
|
2598
2274
|
status: response.status
|
|
2599
2275
|
});
|
|
2600
2276
|
return { buffer, mimeType };
|
|
2601
2277
|
} catch (error) {
|
|
2602
|
-
|
|
2278
|
+
logger8.error("Failed to fetch image:", {
|
|
2603
2279
|
error: error instanceof Error ? error.message : String(error),
|
|
2604
2280
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2605
2281
|
url
|
|
@@ -2614,37 +2290,37 @@ var VisionManager = class _VisionManager {
|
|
|
2614
2290
|
*/
|
|
2615
2291
|
async processImage(imageUrl) {
|
|
2616
2292
|
try {
|
|
2617
|
-
|
|
2293
|
+
logger8.info("Starting image processing...");
|
|
2618
2294
|
if (!this.initialized) {
|
|
2619
|
-
|
|
2295
|
+
logger8.info("Vision model not initialized, initializing now...");
|
|
2620
2296
|
await this.initialize();
|
|
2621
2297
|
}
|
|
2622
2298
|
if (!this.model || !this.processor || !this.tokenizer) {
|
|
2623
2299
|
throw new Error("Vision model components not properly initialized");
|
|
2624
2300
|
}
|
|
2625
|
-
|
|
2301
|
+
logger8.info("Fetching image...");
|
|
2626
2302
|
const { buffer, mimeType } = await this.fetchImage(imageUrl);
|
|
2627
|
-
|
|
2303
|
+
logger8.info("Creating image blob...");
|
|
2628
2304
|
const blob = new Blob([buffer], { type: mimeType });
|
|
2629
|
-
|
|
2305
|
+
logger8.info("Converting blob to RawImage...");
|
|
2630
2306
|
const image = await RawImage.fromBlob(blob);
|
|
2631
|
-
|
|
2307
|
+
logger8.info("Processing image with vision processor...");
|
|
2632
2308
|
const visionInputs = await this.processor(image);
|
|
2633
|
-
|
|
2309
|
+
logger8.info("Constructing prompts...");
|
|
2634
2310
|
const prompts = this.processor.construct_prompts("<DETAILED_CAPTION>");
|
|
2635
|
-
|
|
2311
|
+
logger8.info("Tokenizing prompts...");
|
|
2636
2312
|
const textInputs = this.tokenizer(prompts);
|
|
2637
|
-
|
|
2313
|
+
logger8.info("Generating image description...");
|
|
2638
2314
|
const generatedIds = await this.model.generate({
|
|
2639
2315
|
...textInputs,
|
|
2640
2316
|
...visionInputs,
|
|
2641
2317
|
max_new_tokens: MODEL_SPECS.vision.maxTokens
|
|
2642
2318
|
});
|
|
2643
|
-
|
|
2319
|
+
logger8.info("Decoding generated text...");
|
|
2644
2320
|
const generatedText = this.tokenizer.batch_decode(generatedIds, {
|
|
2645
2321
|
skip_special_tokens: false
|
|
2646
2322
|
})[0];
|
|
2647
|
-
|
|
2323
|
+
logger8.info("Post-processing generation...");
|
|
2648
2324
|
const result = this.processor.post_process_generation(
|
|
2649
2325
|
generatedText,
|
|
2650
2326
|
"<DETAILED_CAPTION>",
|
|
@@ -2655,13 +2331,13 @@ var VisionManager = class _VisionManager {
|
|
|
2655
2331
|
title: `${detailedCaption.split(".")[0]}.`,
|
|
2656
2332
|
description: detailedCaption
|
|
2657
2333
|
};
|
|
2658
|
-
|
|
2334
|
+
logger8.success("Image processing complete:", {
|
|
2659
2335
|
titleLength: response.title.length,
|
|
2660
2336
|
descriptionLength: response.description.length
|
|
2661
2337
|
});
|
|
2662
2338
|
return response;
|
|
2663
2339
|
} catch (error) {
|
|
2664
|
-
|
|
2340
|
+
logger8.error("Image processing failed:", {
|
|
2665
2341
|
error: error instanceof Error ? error.message : String(error),
|
|
2666
2342
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2667
2343
|
imageUrl,
|
|
@@ -2733,23 +2409,24 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2733
2409
|
llama;
|
|
2734
2410
|
smallModel;
|
|
2735
2411
|
mediumModel;
|
|
2412
|
+
embeddingModel;
|
|
2413
|
+
embeddingContext;
|
|
2736
2414
|
ctx;
|
|
2737
2415
|
sequence;
|
|
2738
2416
|
chatSession;
|
|
2739
2417
|
modelPath;
|
|
2740
2418
|
mediumModelPath;
|
|
2419
|
+
embeddingModelPath;
|
|
2741
2420
|
cacheDir;
|
|
2742
|
-
embeddingModel = null;
|
|
2743
2421
|
tokenizerManager;
|
|
2744
2422
|
downloadManager;
|
|
2745
2423
|
visionManager;
|
|
2746
2424
|
activeModelConfig;
|
|
2425
|
+
embeddingModelConfig;
|
|
2747
2426
|
transcribeManager;
|
|
2748
2427
|
ttsManager;
|
|
2749
2428
|
studioLMManager;
|
|
2750
|
-
|
|
2751
|
-
// Initialization state flags
|
|
2752
|
-
ollamaInitialized = false;
|
|
2429
|
+
// Initialization state flag
|
|
2753
2430
|
studioLMInitialized = false;
|
|
2754
2431
|
smallModelInitialized = false;
|
|
2755
2432
|
mediumModelInitialized = false;
|
|
@@ -2764,7 +2441,6 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2764
2441
|
visionInitializingPromise = null;
|
|
2765
2442
|
transcriptionInitializingPromise = null;
|
|
2766
2443
|
ttsInitializingPromise = null;
|
|
2767
|
-
ollamaInitializingPromise = null;
|
|
2768
2444
|
studioLMInitializingPromise = null;
|
|
2769
2445
|
modelsDir;
|
|
2770
2446
|
/**
|
|
@@ -2772,26 +2448,27 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2772
2448
|
* This now only sets up the basic infrastructure without loading any models.
|
|
2773
2449
|
*/
|
|
2774
2450
|
constructor() {
|
|
2775
|
-
const modelsDir = path5.join(
|
|
2451
|
+
const modelsDir = path5.join(os3.homedir(), ".eliza", "models");
|
|
2776
2452
|
if (process.env.LLAMALOCAL_PATH?.trim()) {
|
|
2777
2453
|
this.modelsDir = path5.resolve(process.env.LLAMALOCAL_PATH.trim());
|
|
2778
2454
|
} else {
|
|
2779
2455
|
if (!fs5.existsSync(modelsDir)) {
|
|
2780
2456
|
fs5.mkdirSync(modelsDir, { recursive: true });
|
|
2781
|
-
|
|
2457
|
+
logger9.debug("Created models directory");
|
|
2782
2458
|
}
|
|
2783
2459
|
this.modelsDir = modelsDir;
|
|
2784
2460
|
}
|
|
2785
2461
|
this.modelPath = path5.join(this.modelsDir, "DeepHermes-3-Llama-3-3B-Preview-q4.gguf");
|
|
2786
2462
|
this.mediumModelPath = path5.join(this.modelsDir, "DeepHermes-3-Llama-3-8B-q4.gguf");
|
|
2463
|
+
this.embeddingModelPath = path5.join(this.modelsDir, "bge-small-en-v1.5.Q4_K_M.gguf");
|
|
2787
2464
|
const cacheDirEnv = process.env.CACHE_DIR?.trim();
|
|
2788
2465
|
if (cacheDirEnv) {
|
|
2789
2466
|
this.cacheDir = path5.resolve(cacheDirEnv);
|
|
2790
2467
|
} else {
|
|
2791
|
-
const cacheDir = path5.join(
|
|
2468
|
+
const cacheDir = path5.join(os3.homedir(), ".eliza", "cache");
|
|
2792
2469
|
if (!fs5.existsSync(cacheDir)) {
|
|
2793
2470
|
fs5.mkdirSync(cacheDir, { recursive: true });
|
|
2794
|
-
|
|
2471
|
+
logger9.debug("Ensuring cache directory exists:", cacheDir);
|
|
2795
2472
|
}
|
|
2796
2473
|
this.cacheDir = cacheDir;
|
|
2797
2474
|
}
|
|
@@ -2803,10 +2480,8 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2803
2480
|
if (process.env.USE_STUDIOLM_TEXT_MODELS === "true") {
|
|
2804
2481
|
this.studioLMManager = StudioLMManager.getInstance();
|
|
2805
2482
|
}
|
|
2806
|
-
if (process.env.USE_OLLAMA_TEXT_MODELS === "true") {
|
|
2807
|
-
this.ollamaManager = OllamaManager.getInstance();
|
|
2808
|
-
}
|
|
2809
2483
|
this.activeModelConfig = MODEL_SPECS.small;
|
|
2484
|
+
this.embeddingModelConfig = MODEL_SPECS.embedding;
|
|
2810
2485
|
}
|
|
2811
2486
|
/**
|
|
2812
2487
|
* Retrieves the singleton instance of LocalAIManager. If an instance does not already exist, a new one is created and returned.
|
|
@@ -2825,52 +2500,24 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2825
2500
|
*/
|
|
2826
2501
|
async initializeEnvironment() {
|
|
2827
2502
|
try {
|
|
2828
|
-
|
|
2503
|
+
logger9.info("Validating environment configuration...");
|
|
2829
2504
|
const config = {
|
|
2830
2505
|
USE_LOCAL_AI: process.env.USE_LOCAL_AI,
|
|
2831
|
-
USE_STUDIOLM_TEXT_MODELS: process.env.USE_STUDIOLM_TEXT_MODELS
|
|
2832
|
-
USE_OLLAMA_TEXT_MODELS: process.env.USE_OLLAMA_TEXT_MODELS
|
|
2506
|
+
USE_STUDIOLM_TEXT_MODELS: process.env.USE_STUDIOLM_TEXT_MODELS
|
|
2833
2507
|
};
|
|
2834
2508
|
const validatedConfig = await validateConfig(config);
|
|
2835
|
-
|
|
2509
|
+
logger9.info("Environment configuration validated");
|
|
2836
2510
|
process.env.USE_LOCAL_AI = String(validatedConfig.USE_LOCAL_AI);
|
|
2837
2511
|
process.env.USE_STUDIOLM_TEXT_MODELS = String(validatedConfig.USE_STUDIOLM_TEXT_MODELS);
|
|
2838
|
-
|
|
2839
|
-
logger10.success("Environment initialization complete");
|
|
2512
|
+
logger9.success("Environment initialization complete");
|
|
2840
2513
|
} catch (error) {
|
|
2841
|
-
|
|
2514
|
+
logger9.error("Environment validation failed:", {
|
|
2842
2515
|
error: error instanceof Error ? error.message : String(error),
|
|
2843
2516
|
stack: error instanceof Error ? error.stack : void 0
|
|
2844
2517
|
});
|
|
2845
2518
|
throw error;
|
|
2846
2519
|
}
|
|
2847
2520
|
}
|
|
2848
|
-
/**
|
|
2849
|
-
* Asynchronously initializes the Ollama model.
|
|
2850
|
-
*
|
|
2851
|
-
* @returns {Promise<void>} A Promise that resolves when the initialization is complete.
|
|
2852
|
-
* @throws {Error} If the Ollama manager is not created, or if initialization of Ollama models fails.
|
|
2853
|
-
*/
|
|
2854
|
-
async initializeOllama() {
|
|
2855
|
-
try {
|
|
2856
|
-
logger10.info("Initializing Ollama models...");
|
|
2857
|
-
if (!this.ollamaManager) {
|
|
2858
|
-
throw new Error("Ollama manager not created - cannot initialize");
|
|
2859
|
-
}
|
|
2860
|
-
await this.ollamaManager.initialize();
|
|
2861
|
-
if (!this.ollamaManager.isInitialized()) {
|
|
2862
|
-
throw new Error("Ollama initialization failed - models not properly loaded");
|
|
2863
|
-
}
|
|
2864
|
-
logger10.success("Ollama initialization complete");
|
|
2865
|
-
} catch (error) {
|
|
2866
|
-
logger10.error("Ollama initialization failed:", {
|
|
2867
|
-
error: error instanceof Error ? error.message : String(error),
|
|
2868
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
2869
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2870
|
-
});
|
|
2871
|
-
throw error;
|
|
2872
|
-
}
|
|
2873
|
-
}
|
|
2874
2521
|
/**
|
|
2875
2522
|
* Initializes StudioLM model with error handling.
|
|
2876
2523
|
* @returns A Promise that resolves when the initialization is complete.
|
|
@@ -2878,7 +2525,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2878
2525
|
*/
|
|
2879
2526
|
async initializeStudioLM() {
|
|
2880
2527
|
try {
|
|
2881
|
-
|
|
2528
|
+
logger9.info("Initializing StudioLM models...");
|
|
2882
2529
|
if (!this.studioLMManager) {
|
|
2883
2530
|
throw new Error("StudioLM manager not created - cannot initialize");
|
|
2884
2531
|
}
|
|
@@ -2887,9 +2534,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2887
2534
|
throw new Error("StudioLM initialization failed - models not properly loaded");
|
|
2888
2535
|
}
|
|
2889
2536
|
this.studioLMInitialized = true;
|
|
2890
|
-
|
|
2537
|
+
logger9.success("StudioLM initialization complete");
|
|
2891
2538
|
} catch (error) {
|
|
2892
|
-
|
|
2539
|
+
logger9.error("StudioLM initialization failed:", {
|
|
2893
2540
|
error: error instanceof Error ? error.message : String(error),
|
|
2894
2541
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2895
2542
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -2899,18 +2546,31 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2899
2546
|
}
|
|
2900
2547
|
/**
|
|
2901
2548
|
* Downloads the model based on the modelPath provided.
|
|
2902
|
-
* Determines
|
|
2549
|
+
* Determines the model spec and path based on the model type.
|
|
2903
2550
|
*
|
|
2551
|
+
* @param {ModelTypeName} modelType - The type of model to download
|
|
2552
|
+
* @param {ModelSpec} [customModelSpec] - Optional custom model spec to use instead of the default
|
|
2904
2553
|
* @returns A Promise that resolves to a boolean indicating whether the model download was successful.
|
|
2905
2554
|
*/
|
|
2906
|
-
async downloadModel(modelType) {
|
|
2907
|
-
|
|
2908
|
-
|
|
2555
|
+
async downloadModel(modelType, customModelSpec) {
|
|
2556
|
+
let modelSpec;
|
|
2557
|
+
let modelPath;
|
|
2558
|
+
if (customModelSpec) {
|
|
2559
|
+
modelSpec = customModelSpec;
|
|
2560
|
+
modelPath = modelType === ModelType2.TEXT_EMBEDDING ? this.embeddingModelPath : modelType === ModelType2.TEXT_LARGE ? this.mediumModelPath : this.modelPath;
|
|
2561
|
+
} else if (modelType === ModelType2.TEXT_EMBEDDING) {
|
|
2562
|
+
modelSpec = MODEL_SPECS.embedding;
|
|
2563
|
+
modelPath = this.embeddingModelPath;
|
|
2564
|
+
} else {
|
|
2565
|
+
modelSpec = modelType === ModelType2.TEXT_LARGE ? MODEL_SPECS.medium : MODEL_SPECS.small;
|
|
2566
|
+
modelPath = modelType === ModelType2.TEXT_LARGE ? this.mediumModelPath : this.modelPath;
|
|
2567
|
+
}
|
|
2909
2568
|
try {
|
|
2910
2569
|
return await this.downloadManager.downloadModel(modelSpec, modelPath);
|
|
2911
2570
|
} catch (error) {
|
|
2912
|
-
|
|
2571
|
+
logger9.error("Model download failed:", {
|
|
2913
2572
|
error: error instanceof Error ? error.message : String(error),
|
|
2573
|
+
modelType,
|
|
2914
2574
|
modelPath
|
|
2915
2575
|
});
|
|
2916
2576
|
throw error;
|
|
@@ -2926,14 +2586,14 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2926
2586
|
const platformManager = getPlatformManager();
|
|
2927
2587
|
await platformManager.initialize();
|
|
2928
2588
|
const capabilities = platformManager.getCapabilities();
|
|
2929
|
-
|
|
2589
|
+
logger9.info("Platform capabilities detected:", {
|
|
2930
2590
|
platform: capabilities.platform,
|
|
2931
2591
|
gpu: capabilities.gpu?.type || "none",
|
|
2932
2592
|
recommendedModel: capabilities.recommendedModelSize,
|
|
2933
2593
|
supportedBackends: capabilities.supportedBackends
|
|
2934
2594
|
});
|
|
2935
2595
|
} catch (error) {
|
|
2936
|
-
|
|
2596
|
+
logger9.warn("Platform detection failed:", error);
|
|
2937
2597
|
}
|
|
2938
2598
|
}
|
|
2939
2599
|
/**
|
|
@@ -2942,8 +2602,8 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2942
2602
|
* @param {ModelTypeName} modelType - The type of model to initialize (default: ModelType.TEXT_SMALL)
|
|
2943
2603
|
* @returns {Promise<void>} A promise that resolves when initialization is complete or rejects if an error occurs
|
|
2944
2604
|
*/
|
|
2945
|
-
async initialize(modelType =
|
|
2946
|
-
if (modelType ===
|
|
2605
|
+
async initialize(modelType = ModelType2.TEXT_SMALL) {
|
|
2606
|
+
if (modelType === ModelType2.TEXT_LARGE) {
|
|
2947
2607
|
await this.lazyInitMediumModel();
|
|
2948
2608
|
} else {
|
|
2949
2609
|
await this.lazyInitSmallModel();
|
|
@@ -2956,90 +2616,148 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2956
2616
|
*/
|
|
2957
2617
|
async initializeEmbedding() {
|
|
2958
2618
|
try {
|
|
2959
|
-
|
|
2960
|
-
|
|
2619
|
+
logger9.info("Initializing embedding model...");
|
|
2620
|
+
logger9.info("Models directory:", this.modelsDir);
|
|
2961
2621
|
if (!fs5.existsSync(this.modelsDir)) {
|
|
2962
|
-
|
|
2622
|
+
logger9.warn("Models directory does not exist, creating it:", this.modelsDir);
|
|
2963
2623
|
fs5.mkdirSync(this.modelsDir, { recursive: true });
|
|
2964
2624
|
}
|
|
2625
|
+
await this.downloadModel(ModelType2.TEXT_EMBEDDING);
|
|
2626
|
+
if (!this.llama) {
|
|
2627
|
+
this.llama = await getLlama2();
|
|
2628
|
+
}
|
|
2965
2629
|
if (!this.embeddingModel) {
|
|
2966
|
-
|
|
2967
|
-
|
|
2968
|
-
|
|
2969
|
-
|
|
2970
|
-
|
|
2971
|
-
|
|
2972
|
-
|
|
2973
|
-
|
|
2974
|
-
|
|
2630
|
+
logger9.info("Loading embedding model:", this.embeddingModelPath);
|
|
2631
|
+
this.embeddingModel = await this.llama.loadModel({
|
|
2632
|
+
modelPath: this.embeddingModelPath,
|
|
2633
|
+
gpuLayers: 0,
|
|
2634
|
+
// Embedding models are typically small enough to run on CPU
|
|
2635
|
+
vocabOnly: false
|
|
2636
|
+
});
|
|
2637
|
+
this.embeddingContext = await this.embeddingModel.createEmbeddingContext({
|
|
2638
|
+
contextSize: this.embeddingModelConfig.contextSize,
|
|
2639
|
+
batchSize: 512
|
|
2975
2640
|
});
|
|
2976
|
-
|
|
2977
|
-
logger10.info(`Downloading embedding model: ${completedBar} 100%`);
|
|
2978
|
-
logger10.success("FlagEmbedding instance created successfully");
|
|
2641
|
+
logger9.success("Embedding model initialized successfully");
|
|
2979
2642
|
}
|
|
2980
2643
|
} catch (error) {
|
|
2981
|
-
|
|
2644
|
+
logger9.error("Embedding initialization failed with details:", {
|
|
2982
2645
|
error: error instanceof Error ? error.message : String(error),
|
|
2983
2646
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2984
2647
|
modelsDir: this.modelsDir,
|
|
2985
|
-
|
|
2648
|
+
embeddingModelPath: this.embeddingModelPath
|
|
2986
2649
|
});
|
|
2987
2650
|
throw error;
|
|
2988
2651
|
}
|
|
2989
2652
|
}
|
|
2990
2653
|
/**
|
|
2991
|
-
*
|
|
2654
|
+
* Generate embeddings using the proper LlamaContext.getEmbedding method.
|
|
2655
|
+
*/
|
|
2656
|
+
async generateEmbedding(text) {
|
|
2657
|
+
try {
|
|
2658
|
+
await this.lazyInitEmbedding();
|
|
2659
|
+
if (!this.embeddingModel || !this.embeddingContext) {
|
|
2660
|
+
throw new Error("Failed to initialize embedding model");
|
|
2661
|
+
}
|
|
2662
|
+
logger9.info("Generating embedding for text", { textLength: text.length });
|
|
2663
|
+
const embeddingResult = await this.embeddingContext.getEmbeddingFor(text);
|
|
2664
|
+
const mutableEmbedding = [...embeddingResult.vector];
|
|
2665
|
+
const normalizedEmbedding = this.normalizeEmbedding(mutableEmbedding);
|
|
2666
|
+
logger9.info("Embedding generation complete", { dimensions: normalizedEmbedding.length });
|
|
2667
|
+
return normalizedEmbedding;
|
|
2668
|
+
} catch (error) {
|
|
2669
|
+
logger9.error("Embedding generation failed:", {
|
|
2670
|
+
error: error instanceof Error ? error.message : String(error),
|
|
2671
|
+
stack: error instanceof Error ? error.stack : void 0,
|
|
2672
|
+
textLength: text?.length ?? "text is null"
|
|
2673
|
+
});
|
|
2674
|
+
const zeroDimensions = process.env.LOCAL_EMBEDDING_DIMENSIONS ? parseInt(process.env.LOCAL_EMBEDDING_DIMENSIONS, 10) : this.embeddingModelConfig.dimensions;
|
|
2675
|
+
return new Array(zeroDimensions).fill(0);
|
|
2676
|
+
}
|
|
2677
|
+
}
|
|
2678
|
+
/**
|
|
2679
|
+
* Normalizes an embedding vector using L2 normalization
|
|
2680
|
+
*
|
|
2681
|
+
* @param {number[]} embedding - The embedding vector to normalize
|
|
2682
|
+
* @returns {number[]} - The normalized embedding vector
|
|
2683
|
+
*/
|
|
2684
|
+
normalizeEmbedding(embedding) {
|
|
2685
|
+
const squareSum = embedding.reduce((sum, val) => sum + val * val, 0);
|
|
2686
|
+
const norm = Math.sqrt(squareSum);
|
|
2687
|
+
if (norm === 0) {
|
|
2688
|
+
return embedding;
|
|
2689
|
+
}
|
|
2690
|
+
return embedding.map((val) => val / norm);
|
|
2691
|
+
}
|
|
2692
|
+
/**
|
|
2693
|
+
* Lazy initialize the embedding model
|
|
2694
|
+
*/
|
|
2695
|
+
async lazyInitEmbedding() {
|
|
2696
|
+
if (this.embeddingInitialized) return;
|
|
2697
|
+
if (!this.embeddingInitializingPromise) {
|
|
2698
|
+
this.embeddingInitializingPromise = (async () => {
|
|
2699
|
+
try {
|
|
2700
|
+
await this.initializeEnvironment();
|
|
2701
|
+
await this.downloadModel(ModelType2.TEXT_EMBEDDING);
|
|
2702
|
+
if (!this.llama) {
|
|
2703
|
+
this.llama = await getLlama2();
|
|
2704
|
+
}
|
|
2705
|
+
this.embeddingModel = await this.llama.loadModel({
|
|
2706
|
+
modelPath: this.embeddingModelPath,
|
|
2707
|
+
gpuLayers: 0,
|
|
2708
|
+
// Embedding models are typically small enough to run on CPU
|
|
2709
|
+
vocabOnly: false
|
|
2710
|
+
});
|
|
2711
|
+
this.embeddingContext = await this.embeddingModel.createEmbeddingContext({
|
|
2712
|
+
contextSize: this.embeddingModelConfig.contextSize,
|
|
2713
|
+
batchSize: 512
|
|
2714
|
+
});
|
|
2715
|
+
this.embeddingInitialized = true;
|
|
2716
|
+
logger9.info("Embedding model initialized successfully");
|
|
2717
|
+
} catch (error) {
|
|
2718
|
+
logger9.error("Failed to initialize embedding model:", error);
|
|
2719
|
+
this.embeddingInitializingPromise = null;
|
|
2720
|
+
throw error;
|
|
2721
|
+
}
|
|
2722
|
+
})();
|
|
2723
|
+
}
|
|
2724
|
+
await this.embeddingInitializingPromise;
|
|
2725
|
+
}
|
|
2726
|
+
/**
|
|
2727
|
+
* Asynchronously generates text using StudioLM models based on the specified parameters.
|
|
2992
2728
|
*
|
|
2993
2729
|
* @param {GenerateTextParams} params - The parameters for generating the text.
|
|
2994
2730
|
* @returns {Promise<string>} - A promise that resolves to the generated text.
|
|
2995
2731
|
*/
|
|
2996
|
-
async
|
|
2732
|
+
async generateTextLMStudio(params) {
|
|
2997
2733
|
try {
|
|
2998
2734
|
const modelConfig = this.getTextModelSource();
|
|
2999
|
-
|
|
2735
|
+
logger9.info("generateTextLMStudio called with:", {
|
|
3000
2736
|
modelSource: modelConfig.source,
|
|
3001
2737
|
modelType: params.modelType,
|
|
3002
2738
|
studioLMInitialized: this.studioLMInitialized,
|
|
3003
|
-
|
|
3004
|
-
studioLMEnabled: process.env.USE_STUDIOLM_TEXT_MODELS === "true",
|
|
3005
|
-
ollamaEnabled: process.env.USE_OLLAMA_TEXT_MODELS === "true"
|
|
2739
|
+
studioLMEnabled: process.env.USE_STUDIOLM_TEXT_MODELS === "true"
|
|
3006
2740
|
});
|
|
3007
2741
|
if (modelConfig.source === "studiolm") {
|
|
3008
2742
|
if (process.env.USE_STUDIOLM_TEXT_MODELS !== "true") {
|
|
3009
|
-
|
|
2743
|
+
logger9.warn(
|
|
3010
2744
|
"StudioLM requested but disabled in environment, falling back to local models"
|
|
3011
2745
|
);
|
|
3012
2746
|
return this.generateText(params);
|
|
3013
2747
|
}
|
|
3014
2748
|
if (!this.studioLMManager) {
|
|
3015
|
-
|
|
2749
|
+
logger9.warn("StudioLM manager not initialized, falling back to local models");
|
|
3016
2750
|
return this.generateText(params);
|
|
3017
2751
|
}
|
|
3018
2752
|
if (!this.studioLMInitialized) {
|
|
3019
|
-
|
|
2753
|
+
logger9.info("StudioLM not initialized, initializing now...");
|
|
3020
2754
|
await this.initializeStudioLM();
|
|
3021
2755
|
}
|
|
3022
2756
|
return await this.studioLMManager.generateText(params, this.studioLMInitialized);
|
|
3023
2757
|
}
|
|
3024
|
-
if (modelConfig.source === "ollama") {
|
|
3025
|
-
if (process.env.USE_OLLAMA_TEXT_MODELS !== "true") {
|
|
3026
|
-
logger10.warn("Ollama requested but disabled in environment, falling back to local models");
|
|
3027
|
-
return this.generateText(params);
|
|
3028
|
-
}
|
|
3029
|
-
if (!this.ollamaManager) {
|
|
3030
|
-
logger10.warn("Ollama manager not initialized, falling back to local models");
|
|
3031
|
-
return this.generateText(params);
|
|
3032
|
-
}
|
|
3033
|
-
if (!this.ollamaInitialized && !this.ollamaManager.isInitialized()) {
|
|
3034
|
-
logger10.info("Initializing Ollama in generateTextOllamaStudio");
|
|
3035
|
-
await this.ollamaManager.initialize();
|
|
3036
|
-
this.ollamaInitialized = true;
|
|
3037
|
-
}
|
|
3038
|
-
return await this.ollamaManager.generateText(params, this.ollamaInitialized);
|
|
3039
|
-
}
|
|
3040
2758
|
return this.generateText(params);
|
|
3041
2759
|
} catch (error) {
|
|
3042
|
-
|
|
2760
|
+
logger9.error("Text generation with StudioLM failed:", {
|
|
3043
2761
|
error: error instanceof Error ? error.message : String(error),
|
|
3044
2762
|
stack: error instanceof Error ? error.stack : void 0,
|
|
3045
2763
|
modelSource: this.getTextModelSource().source
|
|
@@ -3053,7 +2771,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3053
2771
|
*/
|
|
3054
2772
|
async generateText(params) {
|
|
3055
2773
|
try {
|
|
3056
|
-
if (params.modelType ===
|
|
2774
|
+
if (params.modelType === ModelType2.TEXT_LARGE) {
|
|
3057
2775
|
await this.lazyInitMediumModel();
|
|
3058
2776
|
if (!this.mediumModel) {
|
|
3059
2777
|
throw new Error("Medium model initialization failed");
|
|
@@ -3084,15 +2802,15 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3084
2802
|
if (!this.chatSession) {
|
|
3085
2803
|
throw new Error("Failed to create chat session");
|
|
3086
2804
|
}
|
|
3087
|
-
|
|
3088
|
-
|
|
2805
|
+
logger9.info("Created new chat session for model:", params.modelType);
|
|
2806
|
+
logger9.info("Incoming prompt structure:", {
|
|
3089
2807
|
contextLength: params.prompt.length,
|
|
3090
2808
|
hasAction: params.prompt.includes("action"),
|
|
3091
2809
|
runtime: !!params.runtime,
|
|
3092
2810
|
stopSequences: params.stopSequences
|
|
3093
2811
|
});
|
|
3094
2812
|
const tokens = await this.tokenizerManager.encode(params.prompt, this.activeModelConfig);
|
|
3095
|
-
|
|
2813
|
+
logger9.info("Input tokens:", { count: tokens.length });
|
|
3096
2814
|
const systemMessage = "You are a helpful AI assistant. Respond to the current request only.";
|
|
3097
2815
|
await this.chatSession.prompt(systemMessage, {
|
|
3098
2816
|
maxTokens: 1,
|
|
@@ -3104,49 +2822,25 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3104
2822
|
temperature: 0.7,
|
|
3105
2823
|
topP: 0.9,
|
|
3106
2824
|
repeatPenalty: {
|
|
3107
|
-
punishTokensFilter: () => this.smallModel.tokenize(wordsToPunish.join(" ")),
|
|
2825
|
+
punishTokensFilter: () => this.smallModel ? this.smallModel.tokenize(wordsToPunish.join(" ")) : [],
|
|
3108
2826
|
penalty: 1.2,
|
|
3109
2827
|
frequencyPenalty: 0.7,
|
|
3110
2828
|
presencePenalty: 0.7
|
|
3111
2829
|
}
|
|
3112
2830
|
});
|
|
3113
|
-
|
|
2831
|
+
logger9.info("Raw response structure:", {
|
|
3114
2832
|
responseLength: response.length,
|
|
3115
2833
|
hasAction: response.includes("action"),
|
|
3116
2834
|
hasThinkTag: response.includes("<think>")
|
|
3117
2835
|
});
|
|
3118
2836
|
if (response.includes("<think>")) {
|
|
3119
|
-
|
|
2837
|
+
logger9.info("Cleaning think tags from response");
|
|
3120
2838
|
response = response.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
3121
|
-
|
|
2839
|
+
logger9.info("Think tags removed from response");
|
|
3122
2840
|
}
|
|
3123
2841
|
return response;
|
|
3124
2842
|
} catch (error) {
|
|
3125
|
-
|
|
3126
|
-
throw error;
|
|
3127
|
-
}
|
|
3128
|
-
}
|
|
3129
|
-
/**
|
|
3130
|
-
* Generate embeddings - now with lazy initialization
|
|
3131
|
-
*/
|
|
3132
|
-
async generateEmbedding(text) {
|
|
3133
|
-
try {
|
|
3134
|
-
await this.lazyInitEmbedding();
|
|
3135
|
-
if (!this.embeddingModel) {
|
|
3136
|
-
throw new Error("Failed to initialize embedding model");
|
|
3137
|
-
}
|
|
3138
|
-
logger10.info("Generating query embedding...");
|
|
3139
|
-
const embedding = await this.embeddingModel.queryEmbed(text);
|
|
3140
|
-
const dimensions = embedding.length;
|
|
3141
|
-
logger10.info("Embedding generation complete", { dimensions });
|
|
3142
|
-
return Array.from(embedding);
|
|
3143
|
-
} catch (error) {
|
|
3144
|
-
logger10.error("Embedding generation failed:", {
|
|
3145
|
-
error: error instanceof Error ? error.message : String(error),
|
|
3146
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
3147
|
-
// Only access text.length if text exists
|
|
3148
|
-
textLength: text?.length ?? "text is null"
|
|
3149
|
-
});
|
|
2843
|
+
logger9.error("Text generation failed:", error);
|
|
3150
2844
|
throw error;
|
|
3151
2845
|
}
|
|
3152
2846
|
}
|
|
@@ -3160,7 +2854,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3160
2854
|
const dataUrl = `data:${mimeType};base64,${base64}`;
|
|
3161
2855
|
return await this.visionManager.processImage(dataUrl);
|
|
3162
2856
|
} catch (error) {
|
|
3163
|
-
|
|
2857
|
+
logger9.error("Image description failed:", error);
|
|
3164
2858
|
throw error;
|
|
3165
2859
|
}
|
|
3166
2860
|
}
|
|
@@ -3173,7 +2867,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3173
2867
|
const result = await this.transcribeManager.transcribe(audioBuffer);
|
|
3174
2868
|
return result.text;
|
|
3175
2869
|
} catch (error) {
|
|
3176
|
-
|
|
2870
|
+
logger9.error("Audio transcription failed:", {
|
|
3177
2871
|
error: error instanceof Error ? error.message : String(error),
|
|
3178
2872
|
bufferSize: audioBuffer.length
|
|
3179
2873
|
});
|
|
@@ -3188,7 +2882,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3188
2882
|
await this.lazyInitTTS();
|
|
3189
2883
|
return await this.ttsManager.generateSpeech(text);
|
|
3190
2884
|
} catch (error) {
|
|
3191
|
-
|
|
2885
|
+
logger9.error("Speech generation failed:", {
|
|
3192
2886
|
error: error instanceof Error ? error.message : String(error),
|
|
3193
2887
|
textLength: text.length
|
|
3194
2888
|
});
|
|
@@ -3219,18 +2913,16 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3219
2913
|
try {
|
|
3220
2914
|
const config = {
|
|
3221
2915
|
source: "local",
|
|
3222
|
-
modelType:
|
|
2916
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3223
2917
|
};
|
|
3224
2918
|
if (process.env.USE_STUDIOLM_TEXT_MODELS === "true" && this.studioLMManager) {
|
|
3225
2919
|
config.source = "studiolm";
|
|
3226
|
-
} else if (process.env.USE_OLLAMA_TEXT_MODELS === "true" && this.ollamaManager) {
|
|
3227
|
-
config.source = "ollama";
|
|
3228
2920
|
}
|
|
3229
|
-
|
|
2921
|
+
logger9.info("Selected text model source:", config);
|
|
3230
2922
|
return config;
|
|
3231
2923
|
} catch (error) {
|
|
3232
|
-
|
|
3233
|
-
return { source: "local", modelType:
|
|
2924
|
+
logger9.error("Error determining text model source:", error);
|
|
2925
|
+
return { source: "local", modelType: ModelType2.TEXT_SMALL };
|
|
3234
2926
|
}
|
|
3235
2927
|
}
|
|
3236
2928
|
/**
|
|
@@ -3241,11 +2933,11 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3241
2933
|
return Promise.resolve(null);
|
|
3242
2934
|
}
|
|
3243
2935
|
if (initPromise) {
|
|
3244
|
-
|
|
2936
|
+
logger9.info(`Waiting for ${modelType} initialization to complete...`);
|
|
3245
2937
|
await initPromise;
|
|
3246
2938
|
return Promise.resolve(null);
|
|
3247
2939
|
}
|
|
3248
|
-
|
|
2940
|
+
logger9.info(`Lazy initializing ${modelType}...`);
|
|
3249
2941
|
return initFunction();
|
|
3250
2942
|
}
|
|
3251
2943
|
/**
|
|
@@ -3257,7 +2949,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3257
2949
|
this.smallModelInitializingPromise = (async () => {
|
|
3258
2950
|
await this.initializeEnvironment();
|
|
3259
2951
|
await this.checkPlatformCapabilities();
|
|
3260
|
-
await this.downloadModel(
|
|
2952
|
+
await this.downloadModel(ModelType2.TEXT_SMALL);
|
|
3261
2953
|
try {
|
|
3262
2954
|
this.llama = await getLlama2();
|
|
3263
2955
|
const smallModel = await this.llama.loadModel({
|
|
@@ -3272,9 +2964,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3272
2964
|
this.ctx = ctx;
|
|
3273
2965
|
this.sequence = void 0;
|
|
3274
2966
|
this.smallModelInitialized = true;
|
|
3275
|
-
|
|
2967
|
+
logger9.info("Small model initialized successfully");
|
|
3276
2968
|
} catch (error) {
|
|
3277
|
-
|
|
2969
|
+
logger9.error("Failed to initialize small model:", error);
|
|
3278
2970
|
this.smallModelInitializingPromise = null;
|
|
3279
2971
|
throw error;
|
|
3280
2972
|
}
|
|
@@ -3292,7 +2984,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3292
2984
|
if (!this.llama) {
|
|
3293
2985
|
await this.lazyInitSmallModel();
|
|
3294
2986
|
}
|
|
3295
|
-
await this.downloadModel(
|
|
2987
|
+
await this.downloadModel(ModelType2.TEXT_LARGE);
|
|
3296
2988
|
try {
|
|
3297
2989
|
const mediumModel = await this.llama.loadModel({
|
|
3298
2990
|
gpuLayers: 43,
|
|
@@ -3301,9 +2993,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3301
2993
|
});
|
|
3302
2994
|
this.mediumModel = mediumModel;
|
|
3303
2995
|
this.mediumModelInitialized = true;
|
|
3304
|
-
|
|
2996
|
+
logger9.info("Medium model initialized successfully");
|
|
3305
2997
|
} catch (error) {
|
|
3306
|
-
|
|
2998
|
+
logger9.error("Failed to initialize medium model:", error);
|
|
3307
2999
|
this.mediumModelInitializingPromise = null;
|
|
3308
3000
|
throw error;
|
|
3309
3001
|
}
|
|
@@ -3311,26 +3003,6 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3311
3003
|
}
|
|
3312
3004
|
await this.mediumModelInitializingPromise;
|
|
3313
3005
|
}
|
|
3314
|
-
/**
|
|
3315
|
-
* Lazy initialize the embedding model
|
|
3316
|
-
*/
|
|
3317
|
-
async lazyInitEmbedding() {
|
|
3318
|
-
if (this.embeddingInitialized) return;
|
|
3319
|
-
if (!this.embeddingInitializingPromise) {
|
|
3320
|
-
this.embeddingInitializingPromise = (async () => {
|
|
3321
|
-
try {
|
|
3322
|
-
await this.initializeEmbedding();
|
|
3323
|
-
this.embeddingInitialized = true;
|
|
3324
|
-
logger10.info("Embedding model initialized successfully");
|
|
3325
|
-
} catch (error) {
|
|
3326
|
-
logger10.error("Failed to initialize embedding model:", error);
|
|
3327
|
-
this.embeddingInitializingPromise = null;
|
|
3328
|
-
throw error;
|
|
3329
|
-
}
|
|
3330
|
-
})();
|
|
3331
|
-
}
|
|
3332
|
-
await this.embeddingInitializingPromise;
|
|
3333
|
-
}
|
|
3334
3006
|
/**
|
|
3335
3007
|
* Lazy initialize the vision model
|
|
3336
3008
|
*/
|
|
@@ -3340,9 +3012,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3340
3012
|
this.visionInitializingPromise = (async () => {
|
|
3341
3013
|
try {
|
|
3342
3014
|
this.visionInitialized = true;
|
|
3343
|
-
|
|
3015
|
+
logger9.info("Vision model initialized successfully");
|
|
3344
3016
|
} catch (error) {
|
|
3345
|
-
|
|
3017
|
+
logger9.error("Failed to initialize vision model:", error);
|
|
3346
3018
|
this.visionInitializingPromise = null;
|
|
3347
3019
|
throw error;
|
|
3348
3020
|
}
|
|
@@ -3359,9 +3031,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3359
3031
|
this.transcriptionInitializingPromise = (async () => {
|
|
3360
3032
|
try {
|
|
3361
3033
|
this.transcriptionInitialized = true;
|
|
3362
|
-
|
|
3034
|
+
logger9.info("Transcription model initialized successfully");
|
|
3363
3035
|
} catch (error) {
|
|
3364
|
-
|
|
3036
|
+
logger9.error("Failed to initialize transcription model:", error);
|
|
3365
3037
|
this.transcriptionInitializingPromise = null;
|
|
3366
3038
|
throw error;
|
|
3367
3039
|
}
|
|
@@ -3378,9 +3050,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3378
3050
|
this.ttsInitializingPromise = (async () => {
|
|
3379
3051
|
try {
|
|
3380
3052
|
this.ttsInitialized = true;
|
|
3381
|
-
|
|
3053
|
+
logger9.info("TTS model initialized successfully");
|
|
3382
3054
|
} catch (error) {
|
|
3383
|
-
|
|
3055
|
+
logger9.error("Failed to initialize TTS model:", error);
|
|
3384
3056
|
this.ttsInitializingPromise = null;
|
|
3385
3057
|
throw error;
|
|
3386
3058
|
}
|
|
@@ -3388,26 +3060,6 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3388
3060
|
}
|
|
3389
3061
|
await this.ttsInitializingPromise;
|
|
3390
3062
|
}
|
|
3391
|
-
/**
|
|
3392
|
-
* Lazy initialize the Ollama integration
|
|
3393
|
-
*/
|
|
3394
|
-
async lazyInitOllama() {
|
|
3395
|
-
if (this.ollamaInitialized) return;
|
|
3396
|
-
if (!this.ollamaInitializingPromise) {
|
|
3397
|
-
this.ollamaInitializingPromise = (async () => {
|
|
3398
|
-
try {
|
|
3399
|
-
await this.initializeOllama();
|
|
3400
|
-
this.ollamaInitialized = true;
|
|
3401
|
-
logger10.info("Ollama initialized successfully");
|
|
3402
|
-
} catch (error) {
|
|
3403
|
-
logger10.error("Failed to initialize Ollama:", error);
|
|
3404
|
-
this.ollamaInitializingPromise = null;
|
|
3405
|
-
throw error;
|
|
3406
|
-
}
|
|
3407
|
-
})();
|
|
3408
|
-
}
|
|
3409
|
-
await this.ollamaInitializingPromise;
|
|
3410
|
-
}
|
|
3411
3063
|
/**
|
|
3412
3064
|
* Lazy initialize the StudioLM integration
|
|
3413
3065
|
*/
|
|
@@ -3418,9 +3070,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3418
3070
|
try {
|
|
3419
3071
|
await this.initializeStudioLM();
|
|
3420
3072
|
this.studioLMInitialized = true;
|
|
3421
|
-
|
|
3073
|
+
logger9.info("StudioLM initialized successfully");
|
|
3422
3074
|
} catch (error) {
|
|
3423
|
-
|
|
3075
|
+
logger9.error("Failed to initialize StudioLM:", error);
|
|
3424
3076
|
this.studioLMInitializingPromise = null;
|
|
3425
3077
|
throw error;
|
|
3426
3078
|
}
|
|
@@ -3430,15 +3082,15 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3430
3082
|
}
|
|
3431
3083
|
};
|
|
3432
3084
|
var localAIManager = LocalAIManager.getInstance();
|
|
3433
|
-
var
|
|
3085
|
+
var localAiPlugin = {
|
|
3434
3086
|
name: "local-ai",
|
|
3435
3087
|
description: "Local AI plugin using LLaMA models",
|
|
3436
3088
|
async init() {
|
|
3437
3089
|
try {
|
|
3438
|
-
|
|
3439
|
-
|
|
3090
|
+
logger9.debug("Initializing local-ai plugin...");
|
|
3091
|
+
logger9.success("Local AI plugin configuration validated and initialized");
|
|
3440
3092
|
} catch (error) {
|
|
3441
|
-
|
|
3093
|
+
logger9.error("Plugin initialization failed:", {
|
|
3442
3094
|
error: error instanceof Error ? error.message : String(error),
|
|
3443
3095
|
stack: error instanceof Error ? error.stack : void 0
|
|
3444
3096
|
});
|
|
@@ -3446,60 +3098,60 @@ var localAIPlugin = {
|
|
|
3446
3098
|
}
|
|
3447
3099
|
},
|
|
3448
3100
|
models: {
|
|
3449
|
-
[
|
|
3101
|
+
[ModelType2.TEXT_SMALL]: async (runtime, { prompt, stopSequences = [] }) => {
|
|
3450
3102
|
try {
|
|
3451
3103
|
const modelConfig = localAIManager.getTextModelSource();
|
|
3452
3104
|
if (modelConfig.source !== "local") {
|
|
3453
|
-
return await localAIManager.
|
|
3105
|
+
return await localAIManager.generateTextLMStudio({
|
|
3454
3106
|
prompt,
|
|
3455
3107
|
stopSequences,
|
|
3456
3108
|
runtime,
|
|
3457
|
-
modelType:
|
|
3109
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3458
3110
|
});
|
|
3459
3111
|
}
|
|
3460
3112
|
return await localAIManager.generateText({
|
|
3461
3113
|
prompt,
|
|
3462
3114
|
stopSequences,
|
|
3463
3115
|
runtime,
|
|
3464
|
-
modelType:
|
|
3116
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3465
3117
|
});
|
|
3466
3118
|
} catch (error) {
|
|
3467
|
-
|
|
3119
|
+
logger9.error("Error in TEXT_SMALL handler:", error);
|
|
3468
3120
|
throw error;
|
|
3469
3121
|
}
|
|
3470
3122
|
},
|
|
3471
|
-
[
|
|
3123
|
+
[ModelType2.TEXT_LARGE]: async (runtime, { prompt, stopSequences = [] }) => {
|
|
3472
3124
|
try {
|
|
3473
3125
|
const modelConfig = localAIManager.getTextModelSource();
|
|
3474
3126
|
if (modelConfig.source !== "local") {
|
|
3475
|
-
return await localAIManager.
|
|
3127
|
+
return await localAIManager.generateTextLMStudio({
|
|
3476
3128
|
prompt,
|
|
3477
3129
|
stopSequences,
|
|
3478
3130
|
runtime,
|
|
3479
|
-
modelType:
|
|
3131
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3480
3132
|
});
|
|
3481
3133
|
}
|
|
3482
3134
|
return await localAIManager.generateText({
|
|
3483
3135
|
prompt,
|
|
3484
3136
|
stopSequences,
|
|
3485
3137
|
runtime,
|
|
3486
|
-
modelType:
|
|
3138
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3487
3139
|
});
|
|
3488
3140
|
} catch (error) {
|
|
3489
|
-
|
|
3141
|
+
logger9.error("Error in TEXT_LARGE handler:", error);
|
|
3490
3142
|
throw error;
|
|
3491
3143
|
}
|
|
3492
3144
|
},
|
|
3493
|
-
[
|
|
3145
|
+
[ModelType2.TEXT_EMBEDDING]: async (_runtime, params) => {
|
|
3494
3146
|
const text = params?.text;
|
|
3495
3147
|
try {
|
|
3496
3148
|
if (!text) {
|
|
3497
|
-
|
|
3149
|
+
logger9.debug("Null or empty text input for embedding, returning zero vector");
|
|
3498
3150
|
return new Array(384).fill(0);
|
|
3499
3151
|
}
|
|
3500
3152
|
return await localAIManager.generateEmbedding(text);
|
|
3501
3153
|
} catch (error) {
|
|
3502
|
-
|
|
3154
|
+
logger9.error("Error in TEXT_EMBEDDING handler:", {
|
|
3503
3155
|
error: error instanceof Error ? error.message : String(error),
|
|
3504
3156
|
fullText: text,
|
|
3505
3157
|
textType: typeof text,
|
|
@@ -3508,9 +3160,9 @@ var localAIPlugin = {
|
|
|
3508
3160
|
return new Array(384).fill(0);
|
|
3509
3161
|
}
|
|
3510
3162
|
},
|
|
3511
|
-
[
|
|
3163
|
+
[ModelType2.OBJECT_SMALL]: async (runtime, params) => {
|
|
3512
3164
|
try {
|
|
3513
|
-
|
|
3165
|
+
logger9.info("OBJECT_SMALL handler - Processing request:", {
|
|
3514
3166
|
prompt: params.prompt,
|
|
3515
3167
|
hasSchema: !!params.schema,
|
|
3516
3168
|
temperature: params.temperature
|
|
@@ -3522,18 +3174,18 @@ var localAIPlugin = {
|
|
|
3522
3174
|
const modelConfig = localAIManager.getTextModelSource();
|
|
3523
3175
|
let textResponse;
|
|
3524
3176
|
if (modelConfig.source !== "local") {
|
|
3525
|
-
textResponse = await localAIManager.
|
|
3177
|
+
textResponse = await localAIManager.generateTextLMStudio({
|
|
3526
3178
|
prompt: jsonPrompt,
|
|
3527
3179
|
stopSequences: params.stopSequences,
|
|
3528
3180
|
runtime,
|
|
3529
|
-
modelType:
|
|
3181
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3530
3182
|
});
|
|
3531
3183
|
} else {
|
|
3532
3184
|
textResponse = await localAIManager.generateText({
|
|
3533
3185
|
prompt: jsonPrompt,
|
|
3534
3186
|
stopSequences: params.stopSequences,
|
|
3535
3187
|
runtime,
|
|
3536
|
-
modelType:
|
|
3188
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3537
3189
|
});
|
|
3538
3190
|
}
|
|
3539
3191
|
try {
|
|
@@ -3551,17 +3203,17 @@ var localAIPlugin = {
|
|
|
3551
3203
|
return text.trim();
|
|
3552
3204
|
};
|
|
3553
3205
|
const extractedJsonText = extractJSON(textResponse);
|
|
3554
|
-
|
|
3206
|
+
logger9.debug("Extracted JSON text:", extractedJsonText);
|
|
3555
3207
|
let jsonObject;
|
|
3556
3208
|
try {
|
|
3557
3209
|
jsonObject = JSON.parse(extractedJsonText);
|
|
3558
3210
|
} catch (parseError) {
|
|
3559
|
-
|
|
3211
|
+
logger9.debug("Initial JSON parse failed, attempting to fix common issues");
|
|
3560
3212
|
const fixedJson = extractedJsonText.replace(/:\s*"([^"]*)(?:\n)([^"]*)"/g, ': "$1\\n$2"').replace(/"([^"]*?)[^a-zA-Z0-9\s\.,;:\-_\(\)"'\[\]{}]([^"]*?)"/g, '"$1$2"').replace(/(\s*)(\w+)(\s*):/g, '$1"$2"$3:').replace(/,(\s*[\]}])/g, "$1");
|
|
3561
3213
|
try {
|
|
3562
3214
|
jsonObject = JSON.parse(fixedJson);
|
|
3563
3215
|
} catch (finalError) {
|
|
3564
|
-
|
|
3216
|
+
logger9.error("Failed to parse JSON after fixing:", finalError);
|
|
3565
3217
|
throw new Error("Invalid JSON returned from model");
|
|
3566
3218
|
}
|
|
3567
3219
|
}
|
|
@@ -3573,23 +3225,23 @@ var localAIPlugin = {
|
|
|
3573
3225
|
}
|
|
3574
3226
|
}
|
|
3575
3227
|
} catch (schemaError) {
|
|
3576
|
-
|
|
3228
|
+
logger9.error("Schema validation failed:", schemaError);
|
|
3577
3229
|
}
|
|
3578
3230
|
}
|
|
3579
3231
|
return jsonObject;
|
|
3580
3232
|
} catch (parseError) {
|
|
3581
|
-
|
|
3582
|
-
|
|
3233
|
+
logger9.error("Failed to parse JSON:", parseError);
|
|
3234
|
+
logger9.error("Raw response:", textResponse);
|
|
3583
3235
|
throw new Error("Invalid JSON returned from model");
|
|
3584
3236
|
}
|
|
3585
3237
|
} catch (error) {
|
|
3586
|
-
|
|
3238
|
+
logger9.error("Error in OBJECT_SMALL handler:", error);
|
|
3587
3239
|
throw error;
|
|
3588
3240
|
}
|
|
3589
3241
|
},
|
|
3590
|
-
[
|
|
3242
|
+
[ModelType2.OBJECT_LARGE]: async (runtime, params) => {
|
|
3591
3243
|
try {
|
|
3592
|
-
|
|
3244
|
+
logger9.info("OBJECT_LARGE handler - Processing request:", {
|
|
3593
3245
|
prompt: params.prompt,
|
|
3594
3246
|
hasSchema: !!params.schema,
|
|
3595
3247
|
temperature: params.temperature
|
|
@@ -3601,18 +3253,18 @@ var localAIPlugin = {
|
|
|
3601
3253
|
const modelConfig = localAIManager.getTextModelSource();
|
|
3602
3254
|
let textResponse;
|
|
3603
3255
|
if (modelConfig.source !== "local") {
|
|
3604
|
-
textResponse = await localAIManager.
|
|
3256
|
+
textResponse = await localAIManager.generateTextLMStudio({
|
|
3605
3257
|
prompt: jsonPrompt,
|
|
3606
3258
|
stopSequences: params.stopSequences,
|
|
3607
3259
|
runtime,
|
|
3608
|
-
modelType:
|
|
3260
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3609
3261
|
});
|
|
3610
3262
|
} else {
|
|
3611
3263
|
textResponse = await localAIManager.generateText({
|
|
3612
3264
|
prompt: jsonPrompt,
|
|
3613
3265
|
stopSequences: params.stopSequences,
|
|
3614
3266
|
runtime,
|
|
3615
|
-
modelType:
|
|
3267
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3616
3268
|
});
|
|
3617
3269
|
}
|
|
3618
3270
|
try {
|
|
@@ -3634,17 +3286,17 @@ var localAIPlugin = {
|
|
|
3634
3286
|
};
|
|
3635
3287
|
const extractedJsonText = extractJSON(textResponse);
|
|
3636
3288
|
const cleanedJsonText = cleanupJSON(extractedJsonText);
|
|
3637
|
-
|
|
3289
|
+
logger9.debug("Extracted JSON text:", cleanedJsonText);
|
|
3638
3290
|
let jsonObject;
|
|
3639
3291
|
try {
|
|
3640
3292
|
jsonObject = JSON.parse(cleanedJsonText);
|
|
3641
3293
|
} catch (parseError) {
|
|
3642
|
-
|
|
3294
|
+
logger9.debug("Initial JSON parse failed, attempting to fix common issues");
|
|
3643
3295
|
const fixedJson = cleanedJsonText.replace(/:\s*"([^"]*)(?:\n)([^"]*)"/g, ': "$1\\n$2"').replace(/"([^"]*?)[^a-zA-Z0-9\s\.,;:\-_\(\)"'\[\]{}]([^"]*?)"/g, '"$1$2"').replace(/(\s*)(\w+)(\s*):/g, '$1"$2"$3:').replace(/,(\s*[\]}])/g, "$1");
|
|
3644
3296
|
try {
|
|
3645
3297
|
jsonObject = JSON.parse(fixedJson);
|
|
3646
3298
|
} catch (finalError) {
|
|
3647
|
-
|
|
3299
|
+
logger9.error("Failed to parse JSON after fixing:", finalError);
|
|
3648
3300
|
throw new Error("Invalid JSON returned from model");
|
|
3649
3301
|
}
|
|
3650
3302
|
}
|
|
@@ -3656,43 +3308,43 @@ var localAIPlugin = {
|
|
|
3656
3308
|
}
|
|
3657
3309
|
}
|
|
3658
3310
|
} catch (schemaError) {
|
|
3659
|
-
|
|
3311
|
+
logger9.error("Schema validation failed:", schemaError);
|
|
3660
3312
|
}
|
|
3661
3313
|
}
|
|
3662
3314
|
return jsonObject;
|
|
3663
3315
|
} catch (parseError) {
|
|
3664
|
-
|
|
3665
|
-
|
|
3316
|
+
logger9.error("Failed to parse JSON:", parseError);
|
|
3317
|
+
logger9.error("Raw response:", textResponse);
|
|
3666
3318
|
throw new Error("Invalid JSON returned from model");
|
|
3667
3319
|
}
|
|
3668
3320
|
} catch (error) {
|
|
3669
|
-
|
|
3321
|
+
logger9.error("Error in OBJECT_LARGE handler:", error);
|
|
3670
3322
|
throw error;
|
|
3671
3323
|
}
|
|
3672
3324
|
},
|
|
3673
|
-
[
|
|
3325
|
+
[ModelType2.TEXT_TOKENIZER_ENCODE]: async (_runtime, { text }) => {
|
|
3674
3326
|
try {
|
|
3675
3327
|
const manager = localAIManager.getTokenizerManager();
|
|
3676
3328
|
const config = localAIManager.getActiveModelConfig();
|
|
3677
3329
|
return await manager.encode(text, config);
|
|
3678
3330
|
} catch (error) {
|
|
3679
|
-
|
|
3331
|
+
logger9.error("Error in TEXT_TOKENIZER_ENCODE handler:", error);
|
|
3680
3332
|
throw error;
|
|
3681
3333
|
}
|
|
3682
3334
|
},
|
|
3683
|
-
[
|
|
3335
|
+
[ModelType2.TEXT_TOKENIZER_DECODE]: async (_runtime, { tokens }) => {
|
|
3684
3336
|
try {
|
|
3685
3337
|
const manager = localAIManager.getTokenizerManager();
|
|
3686
3338
|
const config = localAIManager.getActiveModelConfig();
|
|
3687
3339
|
return await manager.decode(tokens, config);
|
|
3688
3340
|
} catch (error) {
|
|
3689
|
-
|
|
3341
|
+
logger9.error("Error in TEXT_TOKENIZER_DECODE handler:", error);
|
|
3690
3342
|
throw error;
|
|
3691
3343
|
}
|
|
3692
3344
|
},
|
|
3693
|
-
[
|
|
3345
|
+
[ModelType2.IMAGE_DESCRIPTION]: async (_runtime, imageUrl) => {
|
|
3694
3346
|
try {
|
|
3695
|
-
|
|
3347
|
+
logger9.info("Processing image from URL:", imageUrl);
|
|
3696
3348
|
const response = await fetch(imageUrl);
|
|
3697
3349
|
if (!response.ok) {
|
|
3698
3350
|
throw new Error(`Failed to fetch image: ${response.statusText}`);
|
|
@@ -3701,32 +3353,32 @@ var localAIPlugin = {
|
|
|
3701
3353
|
const mimeType = response.headers.get("content-type") || "image/jpeg";
|
|
3702
3354
|
return await localAIManager.describeImage(buffer, mimeType);
|
|
3703
3355
|
} catch (error) {
|
|
3704
|
-
|
|
3356
|
+
logger9.error("Error in IMAGE_DESCRIPTION handler:", {
|
|
3705
3357
|
error: error instanceof Error ? error.message : String(error),
|
|
3706
3358
|
imageUrl
|
|
3707
3359
|
});
|
|
3708
3360
|
throw error;
|
|
3709
3361
|
}
|
|
3710
3362
|
},
|
|
3711
|
-
[
|
|
3363
|
+
[ModelType2.TRANSCRIPTION]: async (_runtime, audioBuffer) => {
|
|
3712
3364
|
try {
|
|
3713
|
-
|
|
3365
|
+
logger9.info("Processing audio transcription:", {
|
|
3714
3366
|
bufferSize: audioBuffer.length
|
|
3715
3367
|
});
|
|
3716
3368
|
return await localAIManager.transcribeAudio(audioBuffer);
|
|
3717
3369
|
} catch (error) {
|
|
3718
|
-
|
|
3370
|
+
logger9.error("Error in TRANSCRIPTION handler:", {
|
|
3719
3371
|
error: error instanceof Error ? error.message : String(error),
|
|
3720
3372
|
bufferSize: audioBuffer.length
|
|
3721
3373
|
});
|
|
3722
3374
|
throw error;
|
|
3723
3375
|
}
|
|
3724
3376
|
},
|
|
3725
|
-
[
|
|
3377
|
+
[ModelType2.TEXT_TO_SPEECH]: async (_runtime, text) => {
|
|
3726
3378
|
try {
|
|
3727
3379
|
return await localAIManager.generateSpeech(text);
|
|
3728
3380
|
} catch (error) {
|
|
3729
|
-
|
|
3381
|
+
logger9.error("Error in TEXT_TO_SPEECH handler:", {
|
|
3730
3382
|
error: error instanceof Error ? error.message : String(error),
|
|
3731
3383
|
textLength: text.length
|
|
3732
3384
|
});
|
|
@@ -3742,21 +3394,21 @@ var localAIPlugin = {
|
|
|
3742
3394
|
name: "local_ai_test_initialization",
|
|
3743
3395
|
fn: async (runtime) => {
|
|
3744
3396
|
try {
|
|
3745
|
-
|
|
3746
|
-
const result = await runtime.useModel(
|
|
3397
|
+
logger9.info("Starting initialization test");
|
|
3398
|
+
const result = await runtime.useModel(ModelType2.TEXT_SMALL, {
|
|
3747
3399
|
prompt: "Debug Mode: Test initialization. Respond with 'Initialization successful' if you can read this.",
|
|
3748
3400
|
stopSequences: []
|
|
3749
3401
|
});
|
|
3750
|
-
|
|
3402
|
+
logger9.info("Model response:", result);
|
|
3751
3403
|
if (!result || typeof result !== "string") {
|
|
3752
3404
|
throw new Error("Invalid response from model");
|
|
3753
3405
|
}
|
|
3754
3406
|
if (!result.includes("successful")) {
|
|
3755
3407
|
throw new Error("Model response does not indicate success");
|
|
3756
3408
|
}
|
|
3757
|
-
|
|
3409
|
+
logger9.success("Initialization test completed successfully");
|
|
3758
3410
|
} catch (error) {
|
|
3759
|
-
|
|
3411
|
+
logger9.error("Initialization test failed:", {
|
|
3760
3412
|
error: error instanceof Error ? error.message : String(error),
|
|
3761
3413
|
stack: error instanceof Error ? error.stack : void 0
|
|
3762
3414
|
});
|
|
@@ -3768,21 +3420,21 @@ var localAIPlugin = {
|
|
|
3768
3420
|
name: "local_ai_test_text_large",
|
|
3769
3421
|
fn: async (runtime) => {
|
|
3770
3422
|
try {
|
|
3771
|
-
|
|
3772
|
-
const result = await runtime.useModel(
|
|
3423
|
+
logger9.info("Starting TEXT_LARGE model test");
|
|
3424
|
+
const result = await runtime.useModel(ModelType2.TEXT_LARGE, {
|
|
3773
3425
|
prompt: "Debug Mode: Generate a one-sentence response about artificial intelligence.",
|
|
3774
3426
|
stopSequences: []
|
|
3775
3427
|
});
|
|
3776
|
-
|
|
3428
|
+
logger9.info("Large model response:", result);
|
|
3777
3429
|
if (!result || typeof result !== "string") {
|
|
3778
3430
|
throw new Error("Invalid response from large model");
|
|
3779
3431
|
}
|
|
3780
3432
|
if (result.length < 10) {
|
|
3781
3433
|
throw new Error("Response too short, possible model failure");
|
|
3782
3434
|
}
|
|
3783
|
-
|
|
3435
|
+
logger9.success("TEXT_LARGE test completed successfully");
|
|
3784
3436
|
} catch (error) {
|
|
3785
|
-
|
|
3437
|
+
logger9.error("TEXT_LARGE test failed:", {
|
|
3786
3438
|
error: error instanceof Error ? error.message : String(error),
|
|
3787
3439
|
stack: error instanceof Error ? error.stack : void 0
|
|
3788
3440
|
});
|
|
@@ -3794,11 +3446,11 @@ var localAIPlugin = {
|
|
|
3794
3446
|
name: "local_ai_test_text_embedding",
|
|
3795
3447
|
fn: async (runtime) => {
|
|
3796
3448
|
try {
|
|
3797
|
-
|
|
3798
|
-
const embedding = await runtime.useModel(
|
|
3449
|
+
logger9.info("Starting TEXT_EMBEDDING test");
|
|
3450
|
+
const embedding = await runtime.useModel(ModelType2.TEXT_EMBEDDING, {
|
|
3799
3451
|
text: "This is a test of the text embedding model."
|
|
3800
3452
|
});
|
|
3801
|
-
|
|
3453
|
+
logger9.info("Embedding generated with dimensions:", embedding.length);
|
|
3802
3454
|
if (!Array.isArray(embedding)) {
|
|
3803
3455
|
throw new Error("Embedding is not an array");
|
|
3804
3456
|
}
|
|
@@ -3808,13 +3460,13 @@ var localAIPlugin = {
|
|
|
3808
3460
|
if (embedding.some((val) => typeof val !== "number")) {
|
|
3809
3461
|
throw new Error("Embedding contains non-numeric values");
|
|
3810
3462
|
}
|
|
3811
|
-
const nullEmbedding = await runtime.useModel(
|
|
3463
|
+
const nullEmbedding = await runtime.useModel(ModelType2.TEXT_EMBEDDING, null);
|
|
3812
3464
|
if (!Array.isArray(nullEmbedding) || nullEmbedding.some((val) => val !== 0)) {
|
|
3813
3465
|
throw new Error("Null input did not return zero vector");
|
|
3814
3466
|
}
|
|
3815
|
-
|
|
3467
|
+
logger9.success("TEXT_EMBEDDING test completed successfully");
|
|
3816
3468
|
} catch (error) {
|
|
3817
|
-
|
|
3469
|
+
logger9.error("TEXT_EMBEDDING test failed:", {
|
|
3818
3470
|
error: error instanceof Error ? error.message : String(error),
|
|
3819
3471
|
stack: error instanceof Error ? error.stack : void 0
|
|
3820
3472
|
});
|
|
@@ -3826,10 +3478,10 @@ var localAIPlugin = {
|
|
|
3826
3478
|
name: "local_ai_test_tokenizer_encode",
|
|
3827
3479
|
fn: async (runtime) => {
|
|
3828
3480
|
try {
|
|
3829
|
-
|
|
3481
|
+
logger9.info("Starting TEXT_TOKENIZER_ENCODE test");
|
|
3830
3482
|
const text = "Hello tokenizer test!";
|
|
3831
|
-
const tokens = await runtime.useModel(
|
|
3832
|
-
|
|
3483
|
+
const tokens = await runtime.useModel(ModelType2.TEXT_TOKENIZER_ENCODE, { text });
|
|
3484
|
+
logger9.info("Encoded tokens:", { count: tokens.length });
|
|
3833
3485
|
if (!Array.isArray(tokens)) {
|
|
3834
3486
|
throw new Error("Tokens output is not an array");
|
|
3835
3487
|
}
|
|
@@ -3839,9 +3491,9 @@ var localAIPlugin = {
|
|
|
3839
3491
|
if (tokens.some((token) => !Number.isInteger(token))) {
|
|
3840
3492
|
throw new Error("Tokens contain non-integer values");
|
|
3841
3493
|
}
|
|
3842
|
-
|
|
3494
|
+
logger9.success("TEXT_TOKENIZER_ENCODE test completed successfully");
|
|
3843
3495
|
} catch (error) {
|
|
3844
|
-
|
|
3496
|
+
logger9.error("TEXT_TOKENIZER_ENCODE test failed:", {
|
|
3845
3497
|
error: error instanceof Error ? error.message : String(error),
|
|
3846
3498
|
stack: error instanceof Error ? error.stack : void 0
|
|
3847
3499
|
});
|
|
@@ -3853,24 +3505,24 @@ var localAIPlugin = {
|
|
|
3853
3505
|
name: "local_ai_test_tokenizer_decode",
|
|
3854
3506
|
fn: async (runtime) => {
|
|
3855
3507
|
try {
|
|
3856
|
-
|
|
3508
|
+
logger9.info("Starting TEXT_TOKENIZER_DECODE test");
|
|
3857
3509
|
const originalText = "Hello tokenizer test!";
|
|
3858
|
-
const tokens = await runtime.useModel(
|
|
3510
|
+
const tokens = await runtime.useModel(ModelType2.TEXT_TOKENIZER_ENCODE, {
|
|
3859
3511
|
text: originalText
|
|
3860
3512
|
});
|
|
3861
|
-
const decodedText = await runtime.useModel(
|
|
3513
|
+
const decodedText = await runtime.useModel(ModelType2.TEXT_TOKENIZER_DECODE, {
|
|
3862
3514
|
tokens
|
|
3863
3515
|
});
|
|
3864
|
-
|
|
3516
|
+
logger9.info("Round trip tokenization:", {
|
|
3865
3517
|
original: originalText,
|
|
3866
3518
|
decoded: decodedText
|
|
3867
3519
|
});
|
|
3868
3520
|
if (typeof decodedText !== "string") {
|
|
3869
3521
|
throw new Error("Decoded output is not a string");
|
|
3870
3522
|
}
|
|
3871
|
-
|
|
3523
|
+
logger9.success("TEXT_TOKENIZER_DECODE test completed successfully");
|
|
3872
3524
|
} catch (error) {
|
|
3873
|
-
|
|
3525
|
+
logger9.error("TEXT_TOKENIZER_DECODE test failed:", {
|
|
3874
3526
|
error: error instanceof Error ? error.message : String(error),
|
|
3875
3527
|
stack: error instanceof Error ? error.stack : void 0
|
|
3876
3528
|
});
|
|
@@ -3882,10 +3534,10 @@ var localAIPlugin = {
|
|
|
3882
3534
|
name: "local_ai_test_image_description",
|
|
3883
3535
|
fn: async (runtime) => {
|
|
3884
3536
|
try {
|
|
3885
|
-
|
|
3537
|
+
logger9.info("Starting IMAGE_DESCRIPTION test");
|
|
3886
3538
|
const imageUrl = "https://raw.githubusercontent.com/microsoft/FLAML/main/website/static/img/flaml.png";
|
|
3887
|
-
const result = await runtime.useModel(
|
|
3888
|
-
|
|
3539
|
+
const result = await runtime.useModel(ModelType2.IMAGE_DESCRIPTION, imageUrl);
|
|
3540
|
+
logger9.info("Image description result:", result);
|
|
3889
3541
|
if (!result || typeof result !== "object") {
|
|
3890
3542
|
throw new Error("Invalid response format");
|
|
3891
3543
|
}
|
|
@@ -3895,9 +3547,9 @@ var localAIPlugin = {
|
|
|
3895
3547
|
if (typeof result.title !== "string" || typeof result.description !== "string") {
|
|
3896
3548
|
throw new Error("Title or description is not a string");
|
|
3897
3549
|
}
|
|
3898
|
-
|
|
3550
|
+
logger9.success("IMAGE_DESCRIPTION test completed successfully");
|
|
3899
3551
|
} catch (error) {
|
|
3900
|
-
|
|
3552
|
+
logger9.error("IMAGE_DESCRIPTION test failed:", {
|
|
3901
3553
|
error: error instanceof Error ? error.message : String(error),
|
|
3902
3554
|
stack: error instanceof Error ? error.stack : void 0
|
|
3903
3555
|
});
|
|
@@ -3909,7 +3561,7 @@ var localAIPlugin = {
|
|
|
3909
3561
|
name: "local_ai_test_transcription",
|
|
3910
3562
|
fn: async (runtime) => {
|
|
3911
3563
|
try {
|
|
3912
|
-
|
|
3564
|
+
logger9.info("Starting TRANSCRIPTION test");
|
|
3913
3565
|
const audioData = new Uint8Array([
|
|
3914
3566
|
82,
|
|
3915
3567
|
73,
|
|
@@ -3933,14 +3585,14 @@ var localAIPlugin = {
|
|
|
3933
3585
|
// "fmt "
|
|
3934
3586
|
]);
|
|
3935
3587
|
const audioBuffer = Buffer.from(audioData);
|
|
3936
|
-
const transcription = await runtime.useModel(
|
|
3937
|
-
|
|
3588
|
+
const transcription = await runtime.useModel(ModelType2.TRANSCRIPTION, audioBuffer);
|
|
3589
|
+
logger9.info("Transcription result:", transcription);
|
|
3938
3590
|
if (typeof transcription !== "string") {
|
|
3939
3591
|
throw new Error("Transcription result is not a string");
|
|
3940
3592
|
}
|
|
3941
|
-
|
|
3593
|
+
logger9.success("TRANSCRIPTION test completed successfully");
|
|
3942
3594
|
} catch (error) {
|
|
3943
|
-
|
|
3595
|
+
logger9.error("TRANSCRIPTION test failed:", {
|
|
3944
3596
|
error: error instanceof Error ? error.message : String(error),
|
|
3945
3597
|
stack: error instanceof Error ? error.stack : void 0
|
|
3946
3598
|
});
|
|
@@ -3952,9 +3604,9 @@ var localAIPlugin = {
|
|
|
3952
3604
|
name: "local_ai_test_text_to_speech",
|
|
3953
3605
|
fn: async (runtime) => {
|
|
3954
3606
|
try {
|
|
3955
|
-
|
|
3607
|
+
logger9.info("Starting TEXT_TO_SPEECH test");
|
|
3956
3608
|
const testText = "This is a test of the text to speech system.";
|
|
3957
|
-
const audioStream = await runtime.useModel(
|
|
3609
|
+
const audioStream = await runtime.useModel(ModelType2.TEXT_TO_SPEECH, testText);
|
|
3958
3610
|
if (!(audioStream instanceof Readable2)) {
|
|
3959
3611
|
throw new Error("TTS output is not a readable stream");
|
|
3960
3612
|
}
|
|
@@ -3972,9 +3624,9 @@ var localAIPlugin = {
|
|
|
3972
3624
|
});
|
|
3973
3625
|
audioStream.on("error", reject);
|
|
3974
3626
|
});
|
|
3975
|
-
|
|
3627
|
+
logger9.success("TEXT_TO_SPEECH test completed successfully");
|
|
3976
3628
|
} catch (error) {
|
|
3977
|
-
|
|
3629
|
+
logger9.error("TEXT_TO_SPEECH test failed:", {
|
|
3978
3630
|
error: error instanceof Error ? error.message : String(error),
|
|
3979
3631
|
stack: error instanceof Error ? error.stack : void 0
|
|
3980
3632
|
});
|
|
@@ -3986,9 +3638,9 @@ var localAIPlugin = {
|
|
|
3986
3638
|
}
|
|
3987
3639
|
]
|
|
3988
3640
|
};
|
|
3989
|
-
var index_default =
|
|
3641
|
+
var index_default = localAiPlugin;
|
|
3990
3642
|
export {
|
|
3991
3643
|
index_default as default,
|
|
3992
|
-
|
|
3644
|
+
localAiPlugin
|
|
3993
3645
|
};
|
|
3994
3646
|
//# sourceMappingURL=index.js.map
|