@elizaos/plugin-local-ai 1.0.0-beta.7 → 1.0.0-beta.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +12 -12
- package/dist/index.js +299 -635
- package/dist/index.js.map +1 -1
- package/package.json +3 -3
package/dist/index.js
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
// src/index.ts
|
|
2
2
|
import fs5 from "node:fs";
|
|
3
|
+
import os3 from "node:os";
|
|
3
4
|
import path5 from "node:path";
|
|
4
5
|
import { Readable as Readable2 } from "node:stream";
|
|
5
6
|
import { fileURLToPath } from "node:url";
|
|
6
|
-
import { ModelType as
|
|
7
|
+
import { ModelType as ModelType2, logger as logger9 } from "@elizaos/core";
|
|
7
8
|
import { EmbeddingModel, FlagEmbedding } from "fastembed";
|
|
8
9
|
import {
|
|
9
10
|
LlamaChatSession,
|
|
@@ -16,15 +17,6 @@ import { z } from "zod";
|
|
|
16
17
|
var configSchema = z.object({
|
|
17
18
|
USE_LOCAL_AI: z.boolean().default(true),
|
|
18
19
|
USE_STUDIOLM_TEXT_MODELS: z.boolean().default(false),
|
|
19
|
-
USE_OLLAMA_TEXT_MODELS: z.boolean().default(false),
|
|
20
|
-
// Ollama Configuration
|
|
21
|
-
OLLAMA_SERVER_URL: z.string().default("http://localhost:11434"),
|
|
22
|
-
OLLAMA_MODEL: z.string().default("deepseek-r1-distill-qwen-7b"),
|
|
23
|
-
USE_OLLAMA_EMBEDDING: z.boolean().default(false),
|
|
24
|
-
OLLAMA_EMBEDDING_MODEL: z.string().default(""),
|
|
25
|
-
SMALL_OLLAMA_MODEL: z.string().default("deepseek-r1:1.5b"),
|
|
26
|
-
MEDIUM_OLLAMA_MODEL: z.string().default("deepseek-r1:7b"),
|
|
27
|
-
LARGE_OLLAMA_MODEL: z.string().default("deepseek-r1:7b"),
|
|
28
20
|
// StudioLM Configuration
|
|
29
21
|
STUDIOLM_SERVER_URL: z.string().default("http://localhost:1234"),
|
|
30
22
|
STUDIOLM_SMALL_MODEL: z.string().default("lmstudio-community/deepseek-r1-distill-qwen-1.5b"),
|
|
@@ -34,16 +26,12 @@ var configSchema = z.object({
|
|
|
34
26
|
function validateModelConfig(config) {
|
|
35
27
|
logger.info("Validating model configuration with values:", {
|
|
36
28
|
USE_LOCAL_AI: config.USE_LOCAL_AI,
|
|
37
|
-
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS
|
|
38
|
-
USE_OLLAMA_TEXT_MODELS: config.USE_OLLAMA_TEXT_MODELS
|
|
29
|
+
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS
|
|
39
30
|
});
|
|
40
31
|
if (!config.USE_LOCAL_AI) {
|
|
41
32
|
config.USE_LOCAL_AI = true;
|
|
42
33
|
logger.info("Setting USE_LOCAL_AI to true as it's required");
|
|
43
34
|
}
|
|
44
|
-
if (config.USE_STUDIOLM_TEXT_MODELS && config.USE_OLLAMA_TEXT_MODELS) {
|
|
45
|
-
throw new Error("StudioLM and Ollama text models cannot be enabled simultaneously");
|
|
46
|
-
}
|
|
47
35
|
logger.info("Configuration is valid");
|
|
48
36
|
}
|
|
49
37
|
async function validateConfig(config) {
|
|
@@ -51,19 +39,11 @@ async function validateConfig(config) {
|
|
|
51
39
|
const booleanConfig = {
|
|
52
40
|
USE_LOCAL_AI: true,
|
|
53
41
|
// Always true
|
|
54
|
-
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS === "true"
|
|
55
|
-
USE_OLLAMA_TEXT_MODELS: config.USE_OLLAMA_TEXT_MODELS === "true",
|
|
56
|
-
USE_OLLAMA_EMBEDDING: config.USE_OLLAMA_EMBEDDING === "true"
|
|
42
|
+
USE_STUDIOLM_TEXT_MODELS: config.USE_STUDIOLM_TEXT_MODELS === "true"
|
|
57
43
|
};
|
|
58
44
|
validateModelConfig(booleanConfig);
|
|
59
45
|
const fullConfig = {
|
|
60
46
|
...booleanConfig,
|
|
61
|
-
OLLAMA_SERVER_URL: config.OLLAMA_SERVER_URL || "http://localhost:11434",
|
|
62
|
-
OLLAMA_MODEL: config.OLLAMA_MODEL || "deepseek-r1-distill-qwen-7b",
|
|
63
|
-
OLLAMA_EMBEDDING_MODEL: config.OLLAMA_EMBEDDING_MODEL || "",
|
|
64
|
-
SMALL_OLLAMA_MODEL: config.SMALL_OLLAMA_MODEL || "deepseek-r1:1.5b",
|
|
65
|
-
MEDIUM_OLLAMA_MODEL: config.MEDIUM_OLLAMA_MODEL || "deepseek-r1:7b",
|
|
66
|
-
LARGE_OLLAMA_MODEL: config.LARGE_OLLAMA_MODEL || "deepseek-r1:7b",
|
|
67
47
|
STUDIOLM_SERVER_URL: config.STUDIOLM_SERVER_URL || "http://localhost:1234",
|
|
68
48
|
STUDIOLM_SMALL_MODEL: config.STUDIOLM_SMALL_MODEL || "lmstudio-community/deepseek-r1-distill-qwen-1.5b",
|
|
69
49
|
STUDIOLM_MEDIUM_MODEL: config.STUDIOLM_MEDIUM_MODEL || "deepseek-r1-distill-qwen-7b",
|
|
@@ -598,288 +578,11 @@ var DownloadManager = class _DownloadManager {
|
|
|
598
578
|
}
|
|
599
579
|
};
|
|
600
580
|
|
|
601
|
-
// src/utils/ollamaManager.ts
|
|
602
|
-
import { ModelType, logger as logger3 } from "@elizaos/core";
|
|
603
|
-
var OllamaManager = class _OllamaManager {
|
|
604
|
-
static instance = null;
|
|
605
|
-
serverUrl;
|
|
606
|
-
initialized = false;
|
|
607
|
-
availableModels = [];
|
|
608
|
-
configuredModels = {
|
|
609
|
-
small: process.env.SMALL_OLLAMA_MODEL || "deepseek-r1:1.5b",
|
|
610
|
-
medium: process.env.MEDIUM_OLLAMA_MODEL || "deepseek-r1:7b"
|
|
611
|
-
};
|
|
612
|
-
/**
|
|
613
|
-
* Private constructor for initializing OllamaManager.
|
|
614
|
-
*/
|
|
615
|
-
constructor() {
|
|
616
|
-
this.serverUrl = process.env.OLLAMA_SERVER_URL || "http://localhost:11434";
|
|
617
|
-
logger3.info("OllamaManager initialized with configuration:", {
|
|
618
|
-
serverUrl: this.serverUrl,
|
|
619
|
-
configuredModels: this.configuredModels,
|
|
620
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
621
|
-
});
|
|
622
|
-
}
|
|
623
|
-
/**
|
|
624
|
-
* Returns an instance of the OllamaManager class.
|
|
625
|
-
* If an instance does not already exist, a new instance is created and returned.
|
|
626
|
-
* @returns {OllamaManager} The instance of the OllamaManager class.
|
|
627
|
-
*/
|
|
628
|
-
static getInstance() {
|
|
629
|
-
if (!_OllamaManager.instance) {
|
|
630
|
-
_OllamaManager.instance = new _OllamaManager();
|
|
631
|
-
}
|
|
632
|
-
return _OllamaManager.instance;
|
|
633
|
-
}
|
|
634
|
-
/**
|
|
635
|
-
* Asynchronously checks the status of the server by attempting to fetch the "/api/tags" endpoint.
|
|
636
|
-
* @returns A Promise that resolves to a boolean indicating if the server is reachable and responding with a successful status.
|
|
637
|
-
*/
|
|
638
|
-
async checkServerStatus() {
|
|
639
|
-
try {
|
|
640
|
-
const response = await fetch(`${this.serverUrl}/api/tags`);
|
|
641
|
-
if (!response.ok) {
|
|
642
|
-
throw new Error(`Server responded with status: ${response.status}`);
|
|
643
|
-
}
|
|
644
|
-
return true;
|
|
645
|
-
} catch (error) {
|
|
646
|
-
logger3.error("Ollama server check failed:", {
|
|
647
|
-
error: error instanceof Error ? error.message : String(error),
|
|
648
|
-
serverUrl: this.serverUrl,
|
|
649
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
650
|
-
});
|
|
651
|
-
return false;
|
|
652
|
-
}
|
|
653
|
-
}
|
|
654
|
-
/**
|
|
655
|
-
* Fetches the available Ollama models from the specified server URL.
|
|
656
|
-
*
|
|
657
|
-
* @returns {Promise<void>} A Promise that resolves when the available models are successfully fetched.
|
|
658
|
-
*/
|
|
659
|
-
async fetchAvailableModels() {
|
|
660
|
-
try {
|
|
661
|
-
const response = await fetch(`${this.serverUrl}/api/tags`);
|
|
662
|
-
if (!response.ok) {
|
|
663
|
-
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
664
|
-
}
|
|
665
|
-
const data = await response.json();
|
|
666
|
-
this.availableModels = data.models;
|
|
667
|
-
logger3.info("Ollama available models:", {
|
|
668
|
-
count: this.availableModels.length,
|
|
669
|
-
models: this.availableModels.map((m) => m.name),
|
|
670
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
671
|
-
});
|
|
672
|
-
} catch (error) {
|
|
673
|
-
logger3.error("Failed to fetch Ollama models:", {
|
|
674
|
-
error: error instanceof Error ? error.message : String(error),
|
|
675
|
-
serverUrl: this.serverUrl,
|
|
676
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
677
|
-
});
|
|
678
|
-
throw error;
|
|
679
|
-
}
|
|
680
|
-
}
|
|
681
|
-
/**
|
|
682
|
-
* Asynchronously tests a model specified by the given modelId.
|
|
683
|
-
*
|
|
684
|
-
* @param {string} modelId - The ID of the model to be tested.
|
|
685
|
-
* @returns {Promise<boolean>} - A promise that resolves to true if the model test is successful, false otherwise.
|
|
686
|
-
*/
|
|
687
|
-
async testModel(modelId) {
|
|
688
|
-
try {
|
|
689
|
-
const testRequest = {
|
|
690
|
-
model: modelId,
|
|
691
|
-
prompt: "Debug Mode: Test initialization. Respond with 'Initialization successful' if you can read this.",
|
|
692
|
-
stream: false,
|
|
693
|
-
options: {
|
|
694
|
-
temperature: 0.7,
|
|
695
|
-
num_predict: 100
|
|
696
|
-
}
|
|
697
|
-
};
|
|
698
|
-
logger3.info(`Testing model ${modelId}...`);
|
|
699
|
-
const response = await fetch(`${this.serverUrl}/api/generate`, {
|
|
700
|
-
method: "POST",
|
|
701
|
-
headers: {
|
|
702
|
-
"Content-Type": "application/json"
|
|
703
|
-
},
|
|
704
|
-
body: JSON.stringify(testRequest)
|
|
705
|
-
});
|
|
706
|
-
if (!response.ok) {
|
|
707
|
-
throw new Error(`Model test failed with status: ${response.status}`);
|
|
708
|
-
}
|
|
709
|
-
const result = await response.json();
|
|
710
|
-
if (!result.response) {
|
|
711
|
-
throw new Error("No valid response content received");
|
|
712
|
-
}
|
|
713
|
-
logger3.info(`Model ${modelId} test response:`, {
|
|
714
|
-
content: result.response,
|
|
715
|
-
model: result.model,
|
|
716
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
717
|
-
});
|
|
718
|
-
return true;
|
|
719
|
-
} catch (error) {
|
|
720
|
-
logger3.error(`Model ${modelId} test failed:`, {
|
|
721
|
-
error: error instanceof Error ? error.message : String(error),
|
|
722
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
723
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
724
|
-
});
|
|
725
|
-
return false;
|
|
726
|
-
}
|
|
727
|
-
}
|
|
728
|
-
/**
|
|
729
|
-
* Asynchronously tests the configured text models to ensure they are working properly.
|
|
730
|
-
* Logs the test results for each model and outputs a warning if any models fail the test.
|
|
731
|
-
* @returns {Promise<void>} A Promise that resolves when all configured models have been tested.
|
|
732
|
-
*/
|
|
733
|
-
async testTextModels() {
|
|
734
|
-
logger3.info("Testing configured text models...");
|
|
735
|
-
const results = await Promise.all([
|
|
736
|
-
this.testModel(this.configuredModels.small),
|
|
737
|
-
this.testModel(this.configuredModels.medium)
|
|
738
|
-
]);
|
|
739
|
-
const [smallWorking, mediumWorking] = results;
|
|
740
|
-
if (!smallWorking || !mediumWorking) {
|
|
741
|
-
const failedModels = [];
|
|
742
|
-
if (!smallWorking) failedModels.push("small");
|
|
743
|
-
if (!mediumWorking) failedModels.push("medium");
|
|
744
|
-
logger3.warn("Some models failed the test:", {
|
|
745
|
-
failedModels,
|
|
746
|
-
small: this.configuredModels.small,
|
|
747
|
-
medium: this.configuredModels.medium
|
|
748
|
-
});
|
|
749
|
-
} else {
|
|
750
|
-
logger3.success("All configured models passed the test");
|
|
751
|
-
}
|
|
752
|
-
}
|
|
753
|
-
/**
|
|
754
|
-
* Asynchronously initializes the Ollama service by checking server status,
|
|
755
|
-
* fetching available models, and testing text models.
|
|
756
|
-
*
|
|
757
|
-
* @returns A Promise that resolves when initialization is complete
|
|
758
|
-
*/
|
|
759
|
-
async initialize() {
|
|
760
|
-
try {
|
|
761
|
-
if (this.initialized) {
|
|
762
|
-
logger3.info("Ollama already initialized, skipping initialization");
|
|
763
|
-
return;
|
|
764
|
-
}
|
|
765
|
-
logger3.info("Starting Ollama initialization...");
|
|
766
|
-
const serverAvailable = await this.checkServerStatus();
|
|
767
|
-
if (!serverAvailable) {
|
|
768
|
-
throw new Error("Ollama server is not available");
|
|
769
|
-
}
|
|
770
|
-
await this.fetchAvailableModels();
|
|
771
|
-
await this.testTextModels();
|
|
772
|
-
this.initialized = true;
|
|
773
|
-
logger3.success("Ollama initialization complete");
|
|
774
|
-
} catch (error) {
|
|
775
|
-
logger3.error("Ollama initialization failed:", {
|
|
776
|
-
error: error instanceof Error ? error.message : String(error),
|
|
777
|
-
stack: error instanceof Error ? error.stack : void 0
|
|
778
|
-
});
|
|
779
|
-
throw error;
|
|
780
|
-
}
|
|
781
|
-
}
|
|
782
|
-
/**
|
|
783
|
-
* Retrieves the available Ollama models.
|
|
784
|
-
*
|
|
785
|
-
* @returns {OllamaModel[]} An array of OllamaModel objects representing the available models.
|
|
786
|
-
*/
|
|
787
|
-
getAvailableModels() {
|
|
788
|
-
return this.availableModels;
|
|
789
|
-
}
|
|
790
|
-
/**
|
|
791
|
-
* Check if the object is initialized.
|
|
792
|
-
* @returns {boolean} True if the object is initialized, false otherwise.
|
|
793
|
-
*/
|
|
794
|
-
isInitialized() {
|
|
795
|
-
return this.initialized;
|
|
796
|
-
}
|
|
797
|
-
/**
|
|
798
|
-
* Generates text using the Ollama AI model.
|
|
799
|
-
*
|
|
800
|
-
* @param {GenerateTextParams} params - The parameters for generating text.
|
|
801
|
-
* @param {boolean} [isInitialized=false] - Flag indicating if Ollama is already initialized.
|
|
802
|
-
* @returns {Promise<string>} - A promise that resolves with the generated text.
|
|
803
|
-
*/
|
|
804
|
-
async generateText(params, isInitialized = false) {
|
|
805
|
-
try {
|
|
806
|
-
logger3.info("Ollama generateText entry:", {
|
|
807
|
-
isInitialized,
|
|
808
|
-
currentInitState: this.initialized,
|
|
809
|
-
managerInitState: this.isInitialized(),
|
|
810
|
-
modelType: params.modelType,
|
|
811
|
-
contextLength: params.prompt?.length,
|
|
812
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
813
|
-
});
|
|
814
|
-
if (!this.initialized && !isInitialized) {
|
|
815
|
-
throw new Error("Ollama not initialized. Please initialize before generating text.");
|
|
816
|
-
}
|
|
817
|
-
logger3.info("Ollama preparing request:", {
|
|
818
|
-
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
819
|
-
contextLength: params.prompt.length,
|
|
820
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
821
|
-
});
|
|
822
|
-
const request = {
|
|
823
|
-
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
824
|
-
prompt: params.prompt,
|
|
825
|
-
stream: false,
|
|
826
|
-
options: {
|
|
827
|
-
temperature: 0.7,
|
|
828
|
-
top_p: 0.9,
|
|
829
|
-
num_predict: 8192,
|
|
830
|
-
repeat_penalty: 1.2,
|
|
831
|
-
frequency_penalty: 0.7,
|
|
832
|
-
presence_penalty: 0.7
|
|
833
|
-
}
|
|
834
|
-
};
|
|
835
|
-
const response = await fetch(`${this.serverUrl}/api/generate`, {
|
|
836
|
-
method: "POST",
|
|
837
|
-
headers: {
|
|
838
|
-
"Content-Type": "application/json"
|
|
839
|
-
},
|
|
840
|
-
body: JSON.stringify(request)
|
|
841
|
-
});
|
|
842
|
-
if (!response.ok) {
|
|
843
|
-
throw new Error(`Ollama request failed: ${response.status}`);
|
|
844
|
-
}
|
|
845
|
-
const result = await response.json();
|
|
846
|
-
if (!result.response) {
|
|
847
|
-
throw new Error("No valid response content received from Ollama");
|
|
848
|
-
}
|
|
849
|
-
let responseText = result.response;
|
|
850
|
-
logger3.info("Raw response structure:", {
|
|
851
|
-
responseLength: responseText.length,
|
|
852
|
-
hasAction: responseText.includes("action"),
|
|
853
|
-
hasThinkTag: responseText.includes("<think>")
|
|
854
|
-
});
|
|
855
|
-
if (responseText.includes("<think>")) {
|
|
856
|
-
logger3.info("Cleaning think tags from response");
|
|
857
|
-
responseText = responseText.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
858
|
-
logger3.info("Think tags removed from response");
|
|
859
|
-
}
|
|
860
|
-
logger3.info("Ollama request completed successfully:", {
|
|
861
|
-
responseLength: responseText.length,
|
|
862
|
-
hasThinkTags: responseText.includes("<think>"),
|
|
863
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
864
|
-
});
|
|
865
|
-
return responseText;
|
|
866
|
-
} catch (error) {
|
|
867
|
-
logger3.error("Ollama text generation error:", {
|
|
868
|
-
error: error instanceof Error ? error.message : String(error),
|
|
869
|
-
stack: error instanceof Error ? error.stack : void 0,
|
|
870
|
-
phase: "text generation",
|
|
871
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
872
|
-
});
|
|
873
|
-
throw error;
|
|
874
|
-
}
|
|
875
|
-
}
|
|
876
|
-
};
|
|
877
|
-
|
|
878
581
|
// src/utils/platform.ts
|
|
879
582
|
import { exec } from "node:child_process";
|
|
880
583
|
import os from "node:os";
|
|
881
584
|
import { promisify } from "node:util";
|
|
882
|
-
import { logger as
|
|
585
|
+
import { logger as logger3 } from "@elizaos/core";
|
|
883
586
|
var execAsync = promisify(exec);
|
|
884
587
|
var PlatformManager = class _PlatformManager {
|
|
885
588
|
static instance;
|
|
@@ -906,10 +609,10 @@ var PlatformManager = class _PlatformManager {
|
|
|
906
609
|
*/
|
|
907
610
|
async initialize() {
|
|
908
611
|
try {
|
|
909
|
-
|
|
612
|
+
logger3.info("Initializing platform detection...");
|
|
910
613
|
this.capabilities = await this.detectSystemCapabilities();
|
|
911
614
|
} catch (error) {
|
|
912
|
-
|
|
615
|
+
logger3.error("Platform detection failed", { error });
|
|
913
616
|
throw error;
|
|
914
617
|
}
|
|
915
618
|
}
|
|
@@ -971,7 +674,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
971
674
|
return null;
|
|
972
675
|
}
|
|
973
676
|
} catch (error) {
|
|
974
|
-
|
|
677
|
+
logger3.error("GPU detection failed", { error });
|
|
975
678
|
return null;
|
|
976
679
|
}
|
|
977
680
|
}
|
|
@@ -997,7 +700,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
997
700
|
isAppleSilicon: false
|
|
998
701
|
};
|
|
999
702
|
} catch (error) {
|
|
1000
|
-
|
|
703
|
+
logger3.error("Mac GPU detection failed", { error });
|
|
1001
704
|
return {
|
|
1002
705
|
name: "Unknown Mac GPU",
|
|
1003
706
|
type: "metal",
|
|
@@ -1032,7 +735,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
1032
735
|
type: "directml"
|
|
1033
736
|
};
|
|
1034
737
|
} catch (error) {
|
|
1035
|
-
|
|
738
|
+
logger3.error("Windows GPU detection failed", { error });
|
|
1036
739
|
return null;
|
|
1037
740
|
}
|
|
1038
741
|
}
|
|
@@ -1068,7 +771,7 @@ var PlatformManager = class _PlatformManager {
|
|
|
1068
771
|
type: "none"
|
|
1069
772
|
};
|
|
1070
773
|
} catch (error) {
|
|
1071
|
-
|
|
774
|
+
logger3.error("Linux GPU detection failed", { error });
|
|
1072
775
|
return null;
|
|
1073
776
|
}
|
|
1074
777
|
}
|
|
@@ -1205,7 +908,7 @@ var getPlatformManager = () => {
|
|
|
1205
908
|
};
|
|
1206
909
|
|
|
1207
910
|
// src/utils/studiolmManager.ts
|
|
1208
|
-
import { ModelType
|
|
911
|
+
import { ModelType, logger as logger4 } from "@elizaos/core";
|
|
1209
912
|
var StudioLMManager = class _StudioLMManager {
|
|
1210
913
|
static instance = null;
|
|
1211
914
|
serverUrl;
|
|
@@ -1222,7 +925,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1222
925
|
*/
|
|
1223
926
|
constructor() {
|
|
1224
927
|
this.serverUrl = process.env.STUDIOLM_SERVER_URL || "http://localhost:1234";
|
|
1225
|
-
|
|
928
|
+
logger4.info("StudioLMManager initialized with configuration:", {
|
|
1226
929
|
serverUrl: this.serverUrl,
|
|
1227
930
|
configuredModels: this.configuredModels,
|
|
1228
931
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1250,7 +953,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1250
953
|
}
|
|
1251
954
|
return true;
|
|
1252
955
|
} catch (error) {
|
|
1253
|
-
|
|
956
|
+
logger4.error("LM Studio server check failed:", {
|
|
1254
957
|
error: error instanceof Error ? error.message : String(error),
|
|
1255
958
|
serverUrl: this.serverUrl,
|
|
1256
959
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1271,13 +974,13 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1271
974
|
}
|
|
1272
975
|
const data = await response.json();
|
|
1273
976
|
this.availableModels = data.data;
|
|
1274
|
-
|
|
977
|
+
logger4.info("LM Studio available models:", {
|
|
1275
978
|
count: this.availableModels.length,
|
|
1276
979
|
models: this.availableModels.map((m) => m.id),
|
|
1277
980
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1278
981
|
});
|
|
1279
982
|
} catch (error) {
|
|
1280
|
-
|
|
983
|
+
logger4.error("Failed to fetch LM Studio models:", {
|
|
1281
984
|
error: error instanceof Error ? error.message : String(error),
|
|
1282
985
|
serverUrl: this.serverUrl,
|
|
1283
986
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1305,7 +1008,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1305
1008
|
max_tokens: -1,
|
|
1306
1009
|
stream: false
|
|
1307
1010
|
};
|
|
1308
|
-
|
|
1011
|
+
logger4.info(`Testing model ${modelId}...`);
|
|
1309
1012
|
const response = await fetch(`${this.serverUrl}/v1/chat/completions`, {
|
|
1310
1013
|
method: "POST",
|
|
1311
1014
|
headers: {
|
|
@@ -1320,14 +1023,14 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1320
1023
|
if (!result.choices?.[0]?.message?.content) {
|
|
1321
1024
|
throw new Error("No valid response content received");
|
|
1322
1025
|
}
|
|
1323
|
-
|
|
1026
|
+
logger4.info(`Model ${modelId} test response:`, {
|
|
1324
1027
|
content: result.choices[0].message.content,
|
|
1325
1028
|
model: result.model,
|
|
1326
1029
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1327
1030
|
});
|
|
1328
1031
|
return true;
|
|
1329
1032
|
} catch (error) {
|
|
1330
|
-
|
|
1033
|
+
logger4.error(`Model ${modelId} test failed:`, {
|
|
1331
1034
|
error: error instanceof Error ? error.message : String(error),
|
|
1332
1035
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1333
1036
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1341,7 +1044,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1341
1044
|
* @returns {Promise<void>} A promise that resolves when the test is complete.
|
|
1342
1045
|
*/
|
|
1343
1046
|
async testTextModels() {
|
|
1344
|
-
|
|
1047
|
+
logger4.info("Testing configured text models...");
|
|
1345
1048
|
const results = await Promise.all([
|
|
1346
1049
|
this.testModel(this.configuredModels.small),
|
|
1347
1050
|
this.testModel(this.configuredModels.medium)
|
|
@@ -1351,13 +1054,13 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1351
1054
|
const failedModels = [];
|
|
1352
1055
|
if (!smallWorking) failedModels.push("small");
|
|
1353
1056
|
if (!mediumWorking) failedModels.push("medium");
|
|
1354
|
-
|
|
1057
|
+
logger4.warn("Some models failed the test:", {
|
|
1355
1058
|
failedModels,
|
|
1356
1059
|
small: this.configuredModels.small,
|
|
1357
1060
|
medium: this.configuredModels.medium
|
|
1358
1061
|
});
|
|
1359
1062
|
} else {
|
|
1360
|
-
|
|
1063
|
+
logger4.success("All configured models passed the test");
|
|
1361
1064
|
}
|
|
1362
1065
|
}
|
|
1363
1066
|
/**
|
|
@@ -1369,10 +1072,10 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1369
1072
|
async initialize() {
|
|
1370
1073
|
try {
|
|
1371
1074
|
if (this.initialized) {
|
|
1372
|
-
|
|
1075
|
+
logger4.info("StudioLM already initialized, skipping initialization");
|
|
1373
1076
|
return;
|
|
1374
1077
|
}
|
|
1375
|
-
|
|
1078
|
+
logger4.info("Starting StudioLM initialization...");
|
|
1376
1079
|
const serverAvailable = await this.checkServerStatus();
|
|
1377
1080
|
if (!serverAvailable) {
|
|
1378
1081
|
throw new Error("LM Studio server is not available");
|
|
@@ -1380,9 +1083,9 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1380
1083
|
await this.fetchAvailableModels();
|
|
1381
1084
|
await this.testTextModels();
|
|
1382
1085
|
this.initialized = true;
|
|
1383
|
-
|
|
1086
|
+
logger4.success("StudioLM initialization complete");
|
|
1384
1087
|
} catch (error) {
|
|
1385
|
-
|
|
1088
|
+
logger4.error("StudioLM initialization failed:", {
|
|
1386
1089
|
error: error instanceof Error ? error.message : String(error),
|
|
1387
1090
|
stack: error instanceof Error ? error.stack : void 0
|
|
1388
1091
|
});
|
|
@@ -1414,7 +1117,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1414
1117
|
*/
|
|
1415
1118
|
async generateText(params, isInitialized = false) {
|
|
1416
1119
|
try {
|
|
1417
|
-
|
|
1120
|
+
logger4.info("StudioLM generateText entry:", {
|
|
1418
1121
|
isInitialized,
|
|
1419
1122
|
currentInitState: this.initialized,
|
|
1420
1123
|
managerInitState: this.isInitialized(),
|
|
@@ -1432,19 +1135,19 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1432
1135
|
},
|
|
1433
1136
|
{ role: "user", content: params.prompt }
|
|
1434
1137
|
];
|
|
1435
|
-
|
|
1436
|
-
model: params.modelType ===
|
|
1138
|
+
logger4.info("StudioLM preparing request:", {
|
|
1139
|
+
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
1437
1140
|
messageCount: messages.length,
|
|
1438
1141
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1439
1142
|
});
|
|
1440
|
-
|
|
1143
|
+
logger4.info("Incoming context structure:", {
|
|
1441
1144
|
contextLength: params.prompt.length,
|
|
1442
1145
|
hasAction: params.prompt.includes("action"),
|
|
1443
1146
|
runtime: !!params.runtime,
|
|
1444
1147
|
stopSequences: params.stopSequences
|
|
1445
1148
|
});
|
|
1446
1149
|
const request = {
|
|
1447
|
-
model: params.modelType ===
|
|
1150
|
+
model: params.modelType === ModelType.TEXT_LARGE ? this.configuredModels.medium : this.configuredModels.small,
|
|
1448
1151
|
messages,
|
|
1449
1152
|
temperature: 0.7,
|
|
1450
1153
|
max_tokens: 8192,
|
|
@@ -1465,24 +1168,24 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1465
1168
|
throw new Error("No valid response content received from StudioLM");
|
|
1466
1169
|
}
|
|
1467
1170
|
let responseText = result.choices[0].message.content;
|
|
1468
|
-
|
|
1171
|
+
logger4.info("Raw response structure:", {
|
|
1469
1172
|
responseLength: responseText.length,
|
|
1470
1173
|
hasAction: responseText.includes("action"),
|
|
1471
1174
|
hasThinkTag: responseText.includes("<think>")
|
|
1472
1175
|
});
|
|
1473
1176
|
if (responseText.includes("<think>")) {
|
|
1474
|
-
|
|
1177
|
+
logger4.info("Cleaning think tags from response");
|
|
1475
1178
|
responseText = responseText.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
1476
|
-
|
|
1179
|
+
logger4.info("Think tags removed from response");
|
|
1477
1180
|
}
|
|
1478
|
-
|
|
1181
|
+
logger4.info("StudioLM request completed successfully:", {
|
|
1479
1182
|
responseLength: responseText.length,
|
|
1480
1183
|
hasThinkTags: responseText.includes("<think>"),
|
|
1481
1184
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1482
1185
|
});
|
|
1483
1186
|
return responseText;
|
|
1484
1187
|
} catch (error) {
|
|
1485
|
-
|
|
1188
|
+
logger4.error("StudioLM text generation error:", {
|
|
1486
1189
|
error: error instanceof Error ? error.message : String(error),
|
|
1487
1190
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1488
1191
|
phase: "text generation",
|
|
@@ -1494,7 +1197,7 @@ var StudioLMManager = class _StudioLMManager {
|
|
|
1494
1197
|
};
|
|
1495
1198
|
|
|
1496
1199
|
// src/utils/tokenizerManager.ts
|
|
1497
|
-
import { logger as
|
|
1200
|
+
import { logger as logger5 } from "@elizaos/core";
|
|
1498
1201
|
import { AutoTokenizer } from "@huggingface/transformers";
|
|
1499
1202
|
var TokenizerManager = class _TokenizerManager {
|
|
1500
1203
|
static instance = null;
|
|
@@ -1534,7 +1237,7 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1534
1237
|
async loadTokenizer(modelConfig) {
|
|
1535
1238
|
try {
|
|
1536
1239
|
const tokenizerKey = `${modelConfig.tokenizer.type}-${modelConfig.tokenizer.name}`;
|
|
1537
|
-
|
|
1240
|
+
logger5.info("Loading tokenizer:", {
|
|
1538
1241
|
key: tokenizerKey,
|
|
1539
1242
|
name: modelConfig.tokenizer.name,
|
|
1540
1243
|
type: modelConfig.tokenizer.type,
|
|
@@ -1542,7 +1245,7 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1542
1245
|
cacheDir: this.cacheDir
|
|
1543
1246
|
});
|
|
1544
1247
|
if (this.tokenizers.has(tokenizerKey)) {
|
|
1545
|
-
|
|
1248
|
+
logger5.info("Using cached tokenizer:", { key: tokenizerKey });
|
|
1546
1249
|
const cachedTokenizer = this.tokenizers.get(tokenizerKey);
|
|
1547
1250
|
if (!cachedTokenizer) {
|
|
1548
1251
|
throw new Error(`Tokenizer ${tokenizerKey} exists in map but returned undefined`);
|
|
@@ -1551,10 +1254,10 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1551
1254
|
}
|
|
1552
1255
|
const fs6 = await import("node:fs");
|
|
1553
1256
|
if (!fs6.existsSync(this.modelsDir)) {
|
|
1554
|
-
|
|
1257
|
+
logger5.warn("Models directory does not exist, creating it:", this.modelsDir);
|
|
1555
1258
|
fs6.mkdirSync(this.modelsDir, { recursive: true });
|
|
1556
1259
|
}
|
|
1557
|
-
|
|
1260
|
+
logger5.info(
|
|
1558
1261
|
"Initializing new tokenizer from HuggingFace with models directory:",
|
|
1559
1262
|
this.modelsDir
|
|
1560
1263
|
);
|
|
@@ -1564,28 +1267,28 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1564
1267
|
local_files_only: false
|
|
1565
1268
|
});
|
|
1566
1269
|
this.tokenizers.set(tokenizerKey, tokenizer);
|
|
1567
|
-
|
|
1270
|
+
logger5.success("Tokenizer loaded successfully:", { key: tokenizerKey });
|
|
1568
1271
|
return tokenizer;
|
|
1569
1272
|
} catch (tokenizeError) {
|
|
1570
|
-
|
|
1273
|
+
logger5.error("Failed to load tokenizer from HuggingFace:", {
|
|
1571
1274
|
error: tokenizeError instanceof Error ? tokenizeError.message : String(tokenizeError),
|
|
1572
1275
|
stack: tokenizeError instanceof Error ? tokenizeError.stack : void 0,
|
|
1573
1276
|
tokenizer: modelConfig.tokenizer.name,
|
|
1574
1277
|
modelsDir: this.modelsDir
|
|
1575
1278
|
});
|
|
1576
|
-
|
|
1279
|
+
logger5.info("Retrying tokenizer loading...");
|
|
1577
1280
|
const tokenizer = await AutoTokenizer.from_pretrained(modelConfig.tokenizer.name, {
|
|
1578
1281
|
cache_dir: this.modelsDir,
|
|
1579
1282
|
local_files_only: false
|
|
1580
1283
|
});
|
|
1581
1284
|
this.tokenizers.set(tokenizerKey, tokenizer);
|
|
1582
|
-
|
|
1285
|
+
logger5.success("Tokenizer loaded successfully on retry:", {
|
|
1583
1286
|
key: tokenizerKey
|
|
1584
1287
|
});
|
|
1585
1288
|
return tokenizer;
|
|
1586
1289
|
}
|
|
1587
1290
|
} catch (error) {
|
|
1588
|
-
|
|
1291
|
+
logger5.error("Failed to load tokenizer:", {
|
|
1589
1292
|
error: error instanceof Error ? error.message : String(error),
|
|
1590
1293
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1591
1294
|
model: modelConfig.name,
|
|
@@ -1605,23 +1308,23 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1605
1308
|
*/
|
|
1606
1309
|
async encode(text, modelConfig) {
|
|
1607
1310
|
try {
|
|
1608
|
-
|
|
1311
|
+
logger5.info("Encoding text with tokenizer:", {
|
|
1609
1312
|
length: text.length,
|
|
1610
1313
|
tokenizer: modelConfig.tokenizer.name
|
|
1611
1314
|
});
|
|
1612
1315
|
const tokenizer = await this.loadTokenizer(modelConfig);
|
|
1613
|
-
|
|
1316
|
+
logger5.info("Tokenizer loaded, encoding text...");
|
|
1614
1317
|
const encoded = await tokenizer.encode(text, {
|
|
1615
1318
|
add_special_tokens: true,
|
|
1616
1319
|
return_token_type_ids: false
|
|
1617
1320
|
});
|
|
1618
|
-
|
|
1321
|
+
logger5.info("Text encoded successfully:", {
|
|
1619
1322
|
tokenCount: encoded.length,
|
|
1620
1323
|
tokenizer: modelConfig.tokenizer.name
|
|
1621
1324
|
});
|
|
1622
1325
|
return encoded;
|
|
1623
1326
|
} catch (error) {
|
|
1624
|
-
|
|
1327
|
+
logger5.error("Text encoding failed:", {
|
|
1625
1328
|
error: error instanceof Error ? error.message : String(error),
|
|
1626
1329
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1627
1330
|
textLength: text.length,
|
|
@@ -1641,23 +1344,23 @@ var TokenizerManager = class _TokenizerManager {
|
|
|
1641
1344
|
*/
|
|
1642
1345
|
async decode(tokens, modelConfig) {
|
|
1643
1346
|
try {
|
|
1644
|
-
|
|
1347
|
+
logger5.info("Decoding tokens with tokenizer:", {
|
|
1645
1348
|
count: tokens.length,
|
|
1646
1349
|
tokenizer: modelConfig.tokenizer.name
|
|
1647
1350
|
});
|
|
1648
1351
|
const tokenizer = await this.loadTokenizer(modelConfig);
|
|
1649
|
-
|
|
1352
|
+
logger5.info("Tokenizer loaded, decoding tokens...");
|
|
1650
1353
|
const decoded = await tokenizer.decode(tokens, {
|
|
1651
1354
|
skip_special_tokens: true,
|
|
1652
1355
|
clean_up_tokenization_spaces: true
|
|
1653
1356
|
});
|
|
1654
|
-
|
|
1357
|
+
logger5.info("Tokens decoded successfully:", {
|
|
1655
1358
|
textLength: decoded.length,
|
|
1656
1359
|
tokenizer: modelConfig.tokenizer.name
|
|
1657
1360
|
});
|
|
1658
1361
|
return decoded;
|
|
1659
1362
|
} catch (error) {
|
|
1660
|
-
|
|
1363
|
+
logger5.error("Token decoding failed:", {
|
|
1661
1364
|
error: error instanceof Error ? error.message : String(error),
|
|
1662
1365
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1663
1366
|
tokenCount: tokens.length,
|
|
@@ -1674,7 +1377,7 @@ import { exec as exec2 } from "node:child_process";
|
|
|
1674
1377
|
import fs2 from "node:fs";
|
|
1675
1378
|
import path2 from "node:path";
|
|
1676
1379
|
import { promisify as promisify2 } from "node:util";
|
|
1677
|
-
import { logger as
|
|
1380
|
+
import { logger as logger6 } from "@elizaos/core";
|
|
1678
1381
|
import { nodewhisper } from "nodejs-whisper";
|
|
1679
1382
|
var execAsync2 = promisify2(exec2);
|
|
1680
1383
|
var TranscribeManager = class _TranscribeManager {
|
|
@@ -1691,7 +1394,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1691
1394
|
*/
|
|
1692
1395
|
constructor(cacheDir) {
|
|
1693
1396
|
this.cacheDir = path2.join(cacheDir, "whisper");
|
|
1694
|
-
|
|
1397
|
+
logger6.debug("Initializing TranscribeManager", {
|
|
1695
1398
|
cacheDir: this.cacheDir,
|
|
1696
1399
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1697
1400
|
});
|
|
@@ -1707,7 +1410,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1707
1410
|
await this.initializeFFmpeg();
|
|
1708
1411
|
this.ffmpegInitialized = true;
|
|
1709
1412
|
} catch (error) {
|
|
1710
|
-
|
|
1413
|
+
logger6.error("FFmpeg initialization failed:", {
|
|
1711
1414
|
error: error instanceof Error ? error.message : String(error),
|
|
1712
1415
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1713
1416
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1745,13 +1448,13 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1745
1448
|
try {
|
|
1746
1449
|
const { stdout } = await execAsync2("ffmpeg -version");
|
|
1747
1450
|
this.ffmpegVersion = stdout.split("\n")[0];
|
|
1748
|
-
|
|
1451
|
+
logger6.info("FFmpeg version:", {
|
|
1749
1452
|
version: this.ffmpegVersion,
|
|
1750
1453
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1751
1454
|
});
|
|
1752
1455
|
} catch (error) {
|
|
1753
1456
|
this.ffmpegVersion = null;
|
|
1754
|
-
|
|
1457
|
+
logger6.error("Failed to get FFmpeg version:", {
|
|
1755
1458
|
error: error instanceof Error ? error.message : String(error),
|
|
1756
1459
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1757
1460
|
});
|
|
@@ -1779,7 +1482,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1779
1482
|
}
|
|
1780
1483
|
} catch (error) {
|
|
1781
1484
|
this.ffmpegAvailable = false;
|
|
1782
|
-
|
|
1485
|
+
logger6.error("FFmpeg initialization failed:", {
|
|
1783
1486
|
error: error instanceof Error ? error.message : String(error),
|
|
1784
1487
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1785
1488
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1799,7 +1502,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1799
1502
|
const { stdout, stderr } = await execAsync2("which ffmpeg || where ffmpeg");
|
|
1800
1503
|
this.ffmpegPath = stdout.trim();
|
|
1801
1504
|
this.ffmpegAvailable = true;
|
|
1802
|
-
|
|
1505
|
+
logger6.info("FFmpeg found at:", {
|
|
1803
1506
|
path: this.ffmpegPath,
|
|
1804
1507
|
stderr: stderr ? stderr.trim() : void 0,
|
|
1805
1508
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1807,7 +1510,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1807
1510
|
} catch (error) {
|
|
1808
1511
|
this.ffmpegAvailable = false;
|
|
1809
1512
|
this.ffmpegPath = null;
|
|
1810
|
-
|
|
1513
|
+
logger6.error("FFmpeg not found in PATH:", {
|
|
1811
1514
|
error: error instanceof Error ? error.message : String(error),
|
|
1812
1515
|
stderr: error instanceof Error && "stderr" in error ? error.stderr : void 0,
|
|
1813
1516
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -1827,7 +1530,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1827
1530
|
throw new Error("FFmpeg installation missing required codecs (pcm_s16le, wav)");
|
|
1828
1531
|
}
|
|
1829
1532
|
} catch (error) {
|
|
1830
|
-
|
|
1533
|
+
logger6.error("FFmpeg capabilities verification failed:", {
|
|
1831
1534
|
error: error instanceof Error ? error.message : String(error),
|
|
1832
1535
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1833
1536
|
});
|
|
@@ -1838,7 +1541,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1838
1541
|
* Logs instructions on how to install FFmpeg if it is not properly installed.
|
|
1839
1542
|
*/
|
|
1840
1543
|
logFFmpegInstallInstructions() {
|
|
1841
|
-
|
|
1544
|
+
logger6.warn("FFmpeg is required but not properly installed. Please install FFmpeg:", {
|
|
1842
1545
|
instructions: {
|
|
1843
1546
|
mac: "brew install ffmpeg",
|
|
1844
1547
|
ubuntu: "sudo apt-get install ffmpeg",
|
|
@@ -1891,7 +1594,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1891
1594
|
`ffmpeg -y -loglevel error -i "${inputPath}" -acodec pcm_s16le -ar 16000 -ac 1 "${outputPath}"`
|
|
1892
1595
|
);
|
|
1893
1596
|
if (stderr) {
|
|
1894
|
-
|
|
1597
|
+
logger6.warn("FFmpeg conversion error:", {
|
|
1895
1598
|
stderr,
|
|
1896
1599
|
inputPath,
|
|
1897
1600
|
outputPath,
|
|
@@ -1902,7 +1605,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1902
1605
|
throw new Error("WAV file was not created successfully");
|
|
1903
1606
|
}
|
|
1904
1607
|
} catch (error) {
|
|
1905
|
-
|
|
1608
|
+
logger6.error("Audio conversion failed:", {
|
|
1906
1609
|
error: error instanceof Error ? error.message : String(error),
|
|
1907
1610
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1908
1611
|
command: `ffmpeg -y -loglevel error -i "${inputPath}" -acodec pcm_s16le -ar 16000 -ac 1 "${outputPath}"`,
|
|
@@ -1938,7 +1641,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1938
1641
|
}
|
|
1939
1642
|
return tempWavFile;
|
|
1940
1643
|
} catch (error) {
|
|
1941
|
-
|
|
1644
|
+
logger6.error("Audio preprocessing failed:", {
|
|
1942
1645
|
error: error instanceof Error ? error.message : String(error),
|
|
1943
1646
|
stack: error instanceof Error ? error.stack : void 0,
|
|
1944
1647
|
ffmpegAvailable: this.ffmpegAvailable,
|
|
@@ -1965,7 +1668,7 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1965
1668
|
}
|
|
1966
1669
|
try {
|
|
1967
1670
|
const wavFile = await this.preprocessAudio(audioBuffer);
|
|
1968
|
-
|
|
1671
|
+
logger6.info("Starting transcription with whisper...");
|
|
1969
1672
|
const originalStdoutWrite = process.stdout.write;
|
|
1970
1673
|
const originalStderrWrite = process.stderr.write;
|
|
1971
1674
|
const noopWrite = () => true;
|
|
@@ -1988,19 +1691,19 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
1988
1691
|
}
|
|
1989
1692
|
if (fs2.existsSync(wavFile)) {
|
|
1990
1693
|
fs2.unlinkSync(wavFile);
|
|
1991
|
-
|
|
1694
|
+
logger6.info("Temporary WAV file cleaned up");
|
|
1992
1695
|
}
|
|
1993
1696
|
const cleanText = output.split("\n").map((line) => {
|
|
1994
1697
|
const textMatch = line.match(/](.+)$/);
|
|
1995
1698
|
return textMatch ? textMatch[1].trim() : line.trim();
|
|
1996
1699
|
}).filter((line) => line).join(" ");
|
|
1997
|
-
|
|
1700
|
+
logger6.success("Transcription complete:", {
|
|
1998
1701
|
textLength: cleanText.length,
|
|
1999
1702
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2000
1703
|
});
|
|
2001
1704
|
return { text: cleanText };
|
|
2002
1705
|
} catch (error) {
|
|
2003
|
-
|
|
1706
|
+
logger6.error("Transcription failed:", {
|
|
2004
1707
|
error: error instanceof Error ? error.message : String(error),
|
|
2005
1708
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2006
1709
|
ffmpegAvailable: this.ffmpegAvailable
|
|
@@ -2014,48 +1717,10 @@ var TranscribeManager = class _TranscribeManager {
|
|
|
2014
1717
|
import fs3 from "node:fs";
|
|
2015
1718
|
import path3 from "node:path";
|
|
2016
1719
|
import { Readable } from "node:stream";
|
|
2017
|
-
import { logger as
|
|
1720
|
+
import { logger as logger7, prependWavHeader } from "@elizaos/core";
|
|
2018
1721
|
import {
|
|
2019
1722
|
getLlama
|
|
2020
1723
|
} from "node-llama-cpp";
|
|
2021
|
-
|
|
2022
|
-
// src/utils/audioUtils.ts
|
|
2023
|
-
import { PassThrough } from "node:stream";
|
|
2024
|
-
function getWavHeader(audioLength, sampleRate, channelCount = 1, bitsPerSample = 16) {
|
|
2025
|
-
const wavHeader = Buffer.alloc(44);
|
|
2026
|
-
wavHeader.write("RIFF", 0);
|
|
2027
|
-
wavHeader.writeUInt32LE(36 + audioLength, 4);
|
|
2028
|
-
wavHeader.write("WAVE", 8);
|
|
2029
|
-
wavHeader.write("fmt ", 12);
|
|
2030
|
-
wavHeader.writeUInt32LE(16, 16);
|
|
2031
|
-
wavHeader.writeUInt16LE(1, 20);
|
|
2032
|
-
wavHeader.writeUInt16LE(channelCount, 22);
|
|
2033
|
-
wavHeader.writeUInt32LE(sampleRate, 24);
|
|
2034
|
-
wavHeader.writeUInt32LE(sampleRate * bitsPerSample * channelCount / 8, 28);
|
|
2035
|
-
wavHeader.writeUInt16LE(bitsPerSample * channelCount / 8, 32);
|
|
2036
|
-
wavHeader.writeUInt16LE(bitsPerSample, 34);
|
|
2037
|
-
wavHeader.write("data", 36);
|
|
2038
|
-
wavHeader.writeUInt32LE(audioLength, 40);
|
|
2039
|
-
return wavHeader;
|
|
2040
|
-
}
|
|
2041
|
-
function prependWavHeader(readable, audioLength, sampleRate, channelCount = 1, bitsPerSample = 16) {
|
|
2042
|
-
const wavHeader = getWavHeader(audioLength, sampleRate, channelCount, bitsPerSample);
|
|
2043
|
-
let pushedHeader = false;
|
|
2044
|
-
const passThrough = new PassThrough();
|
|
2045
|
-
readable.on("data", (data) => {
|
|
2046
|
-
if (!pushedHeader) {
|
|
2047
|
-
passThrough.push(wavHeader);
|
|
2048
|
-
pushedHeader = true;
|
|
2049
|
-
}
|
|
2050
|
-
passThrough.push(data);
|
|
2051
|
-
});
|
|
2052
|
-
readable.on("end", () => {
|
|
2053
|
-
passThrough.end();
|
|
2054
|
-
});
|
|
2055
|
-
return passThrough;
|
|
2056
|
-
}
|
|
2057
|
-
|
|
2058
|
-
// src/utils/ttsManager.ts
|
|
2059
1724
|
var TTSManager = class _TTSManager {
|
|
2060
1725
|
static instance = null;
|
|
2061
1726
|
cacheDir;
|
|
@@ -2075,7 +1740,7 @@ var TTSManager = class _TTSManager {
|
|
|
2075
1740
|
this.modelsDir = process.env.LLAMALOCAL_PATH?.trim() ? path3.resolve(process.env.LLAMALOCAL_PATH.trim()) : path3.join(process.cwd(), "models");
|
|
2076
1741
|
this.downloadManager = DownloadManager.getInstance(this.cacheDir, this.modelsDir);
|
|
2077
1742
|
this.ensureCacheDirectory();
|
|
2078
|
-
|
|
1743
|
+
logger7.debug("TTSManager initialized");
|
|
2079
1744
|
}
|
|
2080
1745
|
/**
|
|
2081
1746
|
* Returns an instance of TTSManager, creating a new one if none exist.
|
|
@@ -2095,7 +1760,7 @@ var TTSManager = class _TTSManager {
|
|
|
2095
1760
|
ensureCacheDirectory() {
|
|
2096
1761
|
if (!fs3.existsSync(this.cacheDir)) {
|
|
2097
1762
|
fs3.mkdirSync(this.cacheDir, { recursive: true });
|
|
2098
|
-
|
|
1763
|
+
logger7.debug("Created TTS cache directory:", this.cacheDir);
|
|
2099
1764
|
}
|
|
2100
1765
|
}
|
|
2101
1766
|
/**
|
|
@@ -2112,7 +1777,7 @@ var TTSManager = class _TTSManager {
|
|
|
2112
1777
|
if (this.initialized && this.model && this.ctx) {
|
|
2113
1778
|
return;
|
|
2114
1779
|
}
|
|
2115
|
-
|
|
1780
|
+
logger7.info("Initializing TTS with GGUF backend...");
|
|
2116
1781
|
const modelSpec = MODEL_SPECS.tts.base;
|
|
2117
1782
|
const modelPath = path3.join(this.modelsDir, modelSpec.name);
|
|
2118
1783
|
if (!fs3.existsSync(modelPath)) {
|
|
@@ -2136,7 +1801,7 @@ var TTSManager = class _TTSManager {
|
|
|
2136
1801
|
let lastError = null;
|
|
2137
1802
|
for (const attempt of attempts) {
|
|
2138
1803
|
try {
|
|
2139
|
-
|
|
1804
|
+
logger7.info("Attempting TTS model download:", {
|
|
2140
1805
|
description: attempt.description,
|
|
2141
1806
|
repo: attempt.spec.repo,
|
|
2142
1807
|
name: attempt.spec.name,
|
|
@@ -2145,15 +1810,15 @@ var TTSManager = class _TTSManager {
|
|
|
2145
1810
|
});
|
|
2146
1811
|
const barLength = 30;
|
|
2147
1812
|
const emptyBar = "\u25B1".repeat(barLength);
|
|
2148
|
-
|
|
1813
|
+
logger7.info(`Downloading TTS model: ${emptyBar} 0%`);
|
|
2149
1814
|
await this.downloadManager.downloadFromUrl(attempt.url, modelPath);
|
|
2150
1815
|
const completedBar = "\u25B0".repeat(barLength);
|
|
2151
|
-
|
|
2152
|
-
|
|
1816
|
+
logger7.info(`Downloading TTS model: ${completedBar} 100%`);
|
|
1817
|
+
logger7.success("TTS model download successful with:", attempt.description);
|
|
2153
1818
|
break;
|
|
2154
1819
|
} catch (error) {
|
|
2155
1820
|
lastError = error;
|
|
2156
|
-
|
|
1821
|
+
logger7.warn("TTS model download attempt failed:", {
|
|
2157
1822
|
description: attempt.description,
|
|
2158
1823
|
error: error instanceof Error ? error.message : String(error),
|
|
2159
1824
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -2164,7 +1829,7 @@ var TTSManager = class _TTSManager {
|
|
|
2164
1829
|
throw lastError || new Error("All download attempts failed");
|
|
2165
1830
|
}
|
|
2166
1831
|
}
|
|
2167
|
-
|
|
1832
|
+
logger7.info("Loading TTS model...");
|
|
2168
1833
|
const llama = await getLlama();
|
|
2169
1834
|
this.model = await llama.loadModel({
|
|
2170
1835
|
modelPath,
|
|
@@ -2175,14 +1840,14 @@ var TTSManager = class _TTSManager {
|
|
|
2175
1840
|
contextSize: modelSpec.contextSize
|
|
2176
1841
|
});
|
|
2177
1842
|
this.sequence = this.ctx.getSequence();
|
|
2178
|
-
|
|
1843
|
+
logger7.success("TTS initialization complete", {
|
|
2179
1844
|
modelPath,
|
|
2180
1845
|
contextSize: modelSpec.contextSize,
|
|
2181
1846
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
2182
1847
|
});
|
|
2183
1848
|
this.initialized = true;
|
|
2184
1849
|
} catch (error) {
|
|
2185
|
-
|
|
1850
|
+
logger7.error("TTS initialization failed:", {
|
|
2186
1851
|
error: error instanceof Error ? error.message : String(error),
|
|
2187
1852
|
model: MODEL_SPECS.tts.base.name,
|
|
2188
1853
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -2202,14 +1867,14 @@ var TTSManager = class _TTSManager {
|
|
|
2202
1867
|
if (!this.model || !this.ctx || !this.sequence) {
|
|
2203
1868
|
throw new Error("TTS model not initialized");
|
|
2204
1869
|
}
|
|
2205
|
-
|
|
1870
|
+
logger7.info("Starting speech generation for text:", { text });
|
|
2206
1871
|
const prompt = `[SPEAKER=female_1][LANGUAGE=en]${text}`;
|
|
2207
|
-
|
|
2208
|
-
|
|
1872
|
+
logger7.info("Formatted prompt:", { prompt });
|
|
1873
|
+
logger7.info("Tokenizing input...");
|
|
2209
1874
|
const inputTokens = this.model.tokenize(prompt);
|
|
2210
|
-
|
|
1875
|
+
logger7.info("Input tokenized:", { tokenCount: inputTokens.length });
|
|
2211
1876
|
const maxTokens = inputTokens.length * 2;
|
|
2212
|
-
|
|
1877
|
+
logger7.info("Starting token generation with optimized limit:", {
|
|
2213
1878
|
maxTokens
|
|
2214
1879
|
});
|
|
2215
1880
|
const responseTokens = [];
|
|
@@ -2223,26 +1888,26 @@ var TTSManager = class _TTSManager {
|
|
|
2223
1888
|
const barLength = 30;
|
|
2224
1889
|
const filledLength = Math.floor(responseTokens.length / maxTokens * barLength);
|
|
2225
1890
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2226
|
-
|
|
1891
|
+
logger7.info(
|
|
2227
1892
|
`Token generation: ${progressBar} ${percent}% (${responseTokens.length}/${maxTokens})`
|
|
2228
1893
|
);
|
|
2229
1894
|
if (responseTokens.length >= maxTokens) {
|
|
2230
|
-
|
|
1895
|
+
logger7.info("Token generation complete");
|
|
2231
1896
|
break;
|
|
2232
1897
|
}
|
|
2233
1898
|
}
|
|
2234
1899
|
} catch (error) {
|
|
2235
|
-
|
|
1900
|
+
logger7.error("Token generation error:", error);
|
|
2236
1901
|
throw error;
|
|
2237
1902
|
}
|
|
2238
1903
|
if (responseTokens.length === 0) {
|
|
2239
1904
|
throw new Error("No audio tokens generated");
|
|
2240
1905
|
}
|
|
2241
|
-
|
|
1906
|
+
logger7.info("Converting tokens to audio data...");
|
|
2242
1907
|
const audioData = this.processAudioResponse({
|
|
2243
1908
|
tokens: responseTokens.map((t) => Number.parseInt(this.model.detokenize([t]), 10))
|
|
2244
1909
|
});
|
|
2245
|
-
|
|
1910
|
+
logger7.info("Audio data generated:", {
|
|
2246
1911
|
byteLength: audioData.length,
|
|
2247
1912
|
sampleRate: MODEL_SPECS.tts.base.sampleRate
|
|
2248
1913
|
});
|
|
@@ -2253,10 +1918,10 @@ var TTSManager = class _TTSManager {
|
|
|
2253
1918
|
1,
|
|
2254
1919
|
16
|
|
2255
1920
|
);
|
|
2256
|
-
|
|
1921
|
+
logger7.success("Speech generation complete");
|
|
2257
1922
|
return audioStream;
|
|
2258
1923
|
} catch (error) {
|
|
2259
|
-
|
|
1924
|
+
logger7.error("Speech generation failed:", {
|
|
2260
1925
|
error: error instanceof Error ? error.message : String(error),
|
|
2261
1926
|
text
|
|
2262
1927
|
});
|
|
@@ -2298,7 +1963,7 @@ import fs4 from "node:fs";
|
|
|
2298
1963
|
import os2 from "node:os";
|
|
2299
1964
|
import path4 from "node:path";
|
|
2300
1965
|
import process2 from "node:process";
|
|
2301
|
-
import { logger as
|
|
1966
|
+
import { logger as logger8 } from "@elizaos/core";
|
|
2302
1967
|
import {
|
|
2303
1968
|
AutoProcessor,
|
|
2304
1969
|
AutoTokenizer as AutoTokenizer2,
|
|
@@ -2336,7 +2001,7 @@ var VisionManager = class _VisionManager {
|
|
|
2336
2001
|
this.ensureModelsDirExists();
|
|
2337
2002
|
this.downloadManager = DownloadManager.getInstance(this.cacheDir, this.modelsDir);
|
|
2338
2003
|
this.platformConfig = this.getPlatformConfig();
|
|
2339
|
-
|
|
2004
|
+
logger8.debug("VisionManager initialized");
|
|
2340
2005
|
}
|
|
2341
2006
|
/**
|
|
2342
2007
|
* Retrieves the platform configuration based on the operating system and architecture.
|
|
@@ -2373,7 +2038,7 @@ var VisionManager = class _VisionManager {
|
|
|
2373
2038
|
*/
|
|
2374
2039
|
ensureModelsDirExists() {
|
|
2375
2040
|
if (!existsSync(this.modelsDir)) {
|
|
2376
|
-
|
|
2041
|
+
logger8.debug(`Creating models directory at: ${this.modelsDir}`);
|
|
2377
2042
|
fs4.mkdirSync(this.modelsDir, { recursive: true });
|
|
2378
2043
|
}
|
|
2379
2044
|
}
|
|
@@ -2400,7 +2065,7 @@ var VisionManager = class _VisionManager {
|
|
|
2400
2065
|
checkCacheExists(modelId, type) {
|
|
2401
2066
|
const modelPath = path4.join(this.modelsDir, modelId.replace("/", "--"), type);
|
|
2402
2067
|
if (existsSync(modelPath)) {
|
|
2403
|
-
|
|
2068
|
+
logger8.info(`${type} found at: ${modelPath}`);
|
|
2404
2069
|
return true;
|
|
2405
2070
|
}
|
|
2406
2071
|
return false;
|
|
@@ -2423,7 +2088,7 @@ var VisionManager = class _VisionManager {
|
|
|
2423
2088
|
...component,
|
|
2424
2089
|
dtype: defaultDtype
|
|
2425
2090
|
}));
|
|
2426
|
-
|
|
2091
|
+
logger8.info("Model components configured with dtype:", {
|
|
2427
2092
|
platform,
|
|
2428
2093
|
arch,
|
|
2429
2094
|
defaultDtype,
|
|
@@ -2452,19 +2117,19 @@ var VisionManager = class _VisionManager {
|
|
|
2452
2117
|
async initialize() {
|
|
2453
2118
|
try {
|
|
2454
2119
|
if (this.initialized) {
|
|
2455
|
-
|
|
2120
|
+
logger8.info("Vision model already initialized, skipping initialization");
|
|
2456
2121
|
return;
|
|
2457
2122
|
}
|
|
2458
|
-
|
|
2123
|
+
logger8.info("Starting vision model initialization...");
|
|
2459
2124
|
const modelSpec = MODEL_SPECS.vision;
|
|
2460
|
-
|
|
2125
|
+
logger8.info("Configuring environment for vision model...");
|
|
2461
2126
|
env.allowLocalModels = true;
|
|
2462
2127
|
env.allowRemoteModels = true;
|
|
2463
2128
|
if (this.platformConfig.useOnnx) {
|
|
2464
2129
|
env.backends.onnx.enabled = true;
|
|
2465
2130
|
env.backends.onnx.logLevel = "info";
|
|
2466
2131
|
}
|
|
2467
|
-
|
|
2132
|
+
logger8.info("Loading Florence2 model...");
|
|
2468
2133
|
try {
|
|
2469
2134
|
let lastProgress = -1;
|
|
2470
2135
|
const modelCached = this.checkCacheExists(modelSpec.modelId, "model");
|
|
@@ -2482,22 +2147,22 @@ var VisionManager = class _VisionManager {
|
|
|
2482
2147
|
const barLength = 30;
|
|
2483
2148
|
const filledLength = Math.floor(currentProgress / 100 * barLength);
|
|
2484
2149
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2485
|
-
|
|
2150
|
+
logger8.info(`Downloading vision model: ${progressBar} ${currentProgress}%`);
|
|
2486
2151
|
if (currentProgress === 100) this.modelDownloaded = true;
|
|
2487
2152
|
}
|
|
2488
2153
|
}
|
|
2489
2154
|
});
|
|
2490
2155
|
this.model = model;
|
|
2491
|
-
|
|
2156
|
+
logger8.success("Florence2 model loaded successfully");
|
|
2492
2157
|
} catch (error) {
|
|
2493
|
-
|
|
2158
|
+
logger8.error("Failed to load Florence2 model:", {
|
|
2494
2159
|
error: error instanceof Error ? error.message : String(error),
|
|
2495
2160
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2496
2161
|
modelId: modelSpec.modelId
|
|
2497
2162
|
});
|
|
2498
2163
|
throw error;
|
|
2499
2164
|
}
|
|
2500
|
-
|
|
2165
|
+
logger8.info("Loading vision tokenizer...");
|
|
2501
2166
|
try {
|
|
2502
2167
|
const tokenizerCached = this.checkCacheExists(modelSpec.modelId, "tokenizer");
|
|
2503
2168
|
let tokenizerProgress = -1;
|
|
@@ -2513,21 +2178,21 @@ var VisionManager = class _VisionManager {
|
|
|
2513
2178
|
const barLength = 30;
|
|
2514
2179
|
const filledLength = Math.floor(currentProgress / 100 * barLength);
|
|
2515
2180
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2516
|
-
|
|
2181
|
+
logger8.info(`Downloading vision tokenizer: ${progressBar} ${currentProgress}%`);
|
|
2517
2182
|
if (currentProgress === 100) this.tokenizerDownloaded = true;
|
|
2518
2183
|
}
|
|
2519
2184
|
}
|
|
2520
2185
|
});
|
|
2521
|
-
|
|
2186
|
+
logger8.success("Vision tokenizer loaded successfully");
|
|
2522
2187
|
} catch (error) {
|
|
2523
|
-
|
|
2188
|
+
logger8.error("Failed to load tokenizer:", {
|
|
2524
2189
|
error: error instanceof Error ? error.message : String(error),
|
|
2525
2190
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2526
2191
|
modelId: modelSpec.modelId
|
|
2527
2192
|
});
|
|
2528
2193
|
throw error;
|
|
2529
2194
|
}
|
|
2530
|
-
|
|
2195
|
+
logger8.info("Loading vision processor...");
|
|
2531
2196
|
try {
|
|
2532
2197
|
const processorCached = this.checkCacheExists(modelSpec.modelId, "processor");
|
|
2533
2198
|
let processorProgress = -1;
|
|
@@ -2544,14 +2209,14 @@ var VisionManager = class _VisionManager {
|
|
|
2544
2209
|
const barLength = 30;
|
|
2545
2210
|
const filledLength = Math.floor(currentProgress / 100 * barLength);
|
|
2546
2211
|
const progressBar = "\u25B0".repeat(filledLength) + "\u25B1".repeat(barLength - filledLength);
|
|
2547
|
-
|
|
2212
|
+
logger8.info(`Downloading vision processor: ${progressBar} ${currentProgress}%`);
|
|
2548
2213
|
if (currentProgress === 100) this.processorDownloaded = true;
|
|
2549
2214
|
}
|
|
2550
2215
|
}
|
|
2551
2216
|
});
|
|
2552
|
-
|
|
2217
|
+
logger8.success("Vision processor loaded successfully");
|
|
2553
2218
|
} catch (error) {
|
|
2554
|
-
|
|
2219
|
+
logger8.error("Failed to load vision processor:", {
|
|
2555
2220
|
error: error instanceof Error ? error.message : String(error),
|
|
2556
2221
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2557
2222
|
modelId: modelSpec.modelId
|
|
@@ -2559,9 +2224,9 @@ var VisionManager = class _VisionManager {
|
|
|
2559
2224
|
throw error;
|
|
2560
2225
|
}
|
|
2561
2226
|
this.initialized = true;
|
|
2562
|
-
|
|
2227
|
+
logger8.success("Vision model initialization complete");
|
|
2563
2228
|
} catch (error) {
|
|
2564
|
-
|
|
2229
|
+
logger8.error("Vision model initialization failed:", {
|
|
2565
2230
|
error: error instanceof Error ? error.message : String(error),
|
|
2566
2231
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2567
2232
|
modelsDir: this.modelsDir
|
|
@@ -2577,13 +2242,13 @@ var VisionManager = class _VisionManager {
|
|
|
2577
2242
|
*/
|
|
2578
2243
|
async fetchImage(url) {
|
|
2579
2244
|
try {
|
|
2580
|
-
|
|
2245
|
+
logger8.info(`Fetching image from URL: ${url.slice(0, 100)}...`);
|
|
2581
2246
|
if (url.startsWith("data:")) {
|
|
2582
|
-
|
|
2247
|
+
logger8.info("Processing data URL...");
|
|
2583
2248
|
const [header, base64Data] = url.split(",");
|
|
2584
2249
|
const mimeType2 = header.split(";")[0].split(":")[1];
|
|
2585
2250
|
const buffer2 = Buffer.from(base64Data, "base64");
|
|
2586
|
-
|
|
2251
|
+
logger8.info("Data URL processed successfully");
|
|
2587
2252
|
return { buffer: buffer2, mimeType: mimeType2 };
|
|
2588
2253
|
}
|
|
2589
2254
|
const response = await fetch(url);
|
|
@@ -2592,14 +2257,14 @@ var VisionManager = class _VisionManager {
|
|
|
2592
2257
|
}
|
|
2593
2258
|
const buffer = Buffer.from(await response.arrayBuffer());
|
|
2594
2259
|
const mimeType = response.headers.get("content-type") || "image/jpeg";
|
|
2595
|
-
|
|
2260
|
+
logger8.info("Image fetched successfully:", {
|
|
2596
2261
|
mimeType,
|
|
2597
2262
|
bufferSize: buffer.length,
|
|
2598
2263
|
status: response.status
|
|
2599
2264
|
});
|
|
2600
2265
|
return { buffer, mimeType };
|
|
2601
2266
|
} catch (error) {
|
|
2602
|
-
|
|
2267
|
+
logger8.error("Failed to fetch image:", {
|
|
2603
2268
|
error: error instanceof Error ? error.message : String(error),
|
|
2604
2269
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2605
2270
|
url
|
|
@@ -2614,37 +2279,37 @@ var VisionManager = class _VisionManager {
|
|
|
2614
2279
|
*/
|
|
2615
2280
|
async processImage(imageUrl) {
|
|
2616
2281
|
try {
|
|
2617
|
-
|
|
2282
|
+
logger8.info("Starting image processing...");
|
|
2618
2283
|
if (!this.initialized) {
|
|
2619
|
-
|
|
2284
|
+
logger8.info("Vision model not initialized, initializing now...");
|
|
2620
2285
|
await this.initialize();
|
|
2621
2286
|
}
|
|
2622
2287
|
if (!this.model || !this.processor || !this.tokenizer) {
|
|
2623
2288
|
throw new Error("Vision model components not properly initialized");
|
|
2624
2289
|
}
|
|
2625
|
-
|
|
2290
|
+
logger8.info("Fetching image...");
|
|
2626
2291
|
const { buffer, mimeType } = await this.fetchImage(imageUrl);
|
|
2627
|
-
|
|
2292
|
+
logger8.info("Creating image blob...");
|
|
2628
2293
|
const blob = new Blob([buffer], { type: mimeType });
|
|
2629
|
-
|
|
2294
|
+
logger8.info("Converting blob to RawImage...");
|
|
2630
2295
|
const image = await RawImage.fromBlob(blob);
|
|
2631
|
-
|
|
2296
|
+
logger8.info("Processing image with vision processor...");
|
|
2632
2297
|
const visionInputs = await this.processor(image);
|
|
2633
|
-
|
|
2298
|
+
logger8.info("Constructing prompts...");
|
|
2634
2299
|
const prompts = this.processor.construct_prompts("<DETAILED_CAPTION>");
|
|
2635
|
-
|
|
2300
|
+
logger8.info("Tokenizing prompts...");
|
|
2636
2301
|
const textInputs = this.tokenizer(prompts);
|
|
2637
|
-
|
|
2302
|
+
logger8.info("Generating image description...");
|
|
2638
2303
|
const generatedIds = await this.model.generate({
|
|
2639
2304
|
...textInputs,
|
|
2640
2305
|
...visionInputs,
|
|
2641
2306
|
max_new_tokens: MODEL_SPECS.vision.maxTokens
|
|
2642
2307
|
});
|
|
2643
|
-
|
|
2308
|
+
logger8.info("Decoding generated text...");
|
|
2644
2309
|
const generatedText = this.tokenizer.batch_decode(generatedIds, {
|
|
2645
2310
|
skip_special_tokens: false
|
|
2646
2311
|
})[0];
|
|
2647
|
-
|
|
2312
|
+
logger8.info("Post-processing generation...");
|
|
2648
2313
|
const result = this.processor.post_process_generation(
|
|
2649
2314
|
generatedText,
|
|
2650
2315
|
"<DETAILED_CAPTION>",
|
|
@@ -2655,13 +2320,13 @@ var VisionManager = class _VisionManager {
|
|
|
2655
2320
|
title: `${detailedCaption.split(".")[0]}.`,
|
|
2656
2321
|
description: detailedCaption
|
|
2657
2322
|
};
|
|
2658
|
-
|
|
2323
|
+
logger8.success("Image processing complete:", {
|
|
2659
2324
|
titleLength: response.title.length,
|
|
2660
2325
|
descriptionLength: response.description.length
|
|
2661
2326
|
});
|
|
2662
2327
|
return response;
|
|
2663
2328
|
} catch (error) {
|
|
2664
|
-
|
|
2329
|
+
logger8.error("Image processing failed:", {
|
|
2665
2330
|
error: error instanceof Error ? error.message : String(error),
|
|
2666
2331
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2667
2332
|
imageUrl,
|
|
@@ -2747,7 +2412,6 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2747
2412
|
transcribeManager;
|
|
2748
2413
|
ttsManager;
|
|
2749
2414
|
studioLMManager;
|
|
2750
|
-
ollamaManager;
|
|
2751
2415
|
// Initialization state flags
|
|
2752
2416
|
ollamaInitialized = false;
|
|
2753
2417
|
studioLMInitialized = false;
|
|
@@ -2772,13 +2436,13 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2772
2436
|
* This now only sets up the basic infrastructure without loading any models.
|
|
2773
2437
|
*/
|
|
2774
2438
|
constructor() {
|
|
2775
|
-
const modelsDir = path5.join(
|
|
2439
|
+
const modelsDir = path5.join(os3.homedir(), ".eliza", "models");
|
|
2776
2440
|
if (process.env.LLAMALOCAL_PATH?.trim()) {
|
|
2777
2441
|
this.modelsDir = path5.resolve(process.env.LLAMALOCAL_PATH.trim());
|
|
2778
2442
|
} else {
|
|
2779
2443
|
if (!fs5.existsSync(modelsDir)) {
|
|
2780
2444
|
fs5.mkdirSync(modelsDir, { recursive: true });
|
|
2781
|
-
|
|
2445
|
+
logger9.debug("Created models directory");
|
|
2782
2446
|
}
|
|
2783
2447
|
this.modelsDir = modelsDir;
|
|
2784
2448
|
}
|
|
@@ -2788,10 +2452,10 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2788
2452
|
if (cacheDirEnv) {
|
|
2789
2453
|
this.cacheDir = path5.resolve(cacheDirEnv);
|
|
2790
2454
|
} else {
|
|
2791
|
-
const cacheDir = path5.join(
|
|
2455
|
+
const cacheDir = path5.join(os3.homedir(), ".eliza", "cache");
|
|
2792
2456
|
if (!fs5.existsSync(cacheDir)) {
|
|
2793
2457
|
fs5.mkdirSync(cacheDir, { recursive: true });
|
|
2794
|
-
|
|
2458
|
+
logger9.debug("Ensuring cache directory exists:", cacheDir);
|
|
2795
2459
|
}
|
|
2796
2460
|
this.cacheDir = cacheDir;
|
|
2797
2461
|
}
|
|
@@ -2825,20 +2489,20 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2825
2489
|
*/
|
|
2826
2490
|
async initializeEnvironment() {
|
|
2827
2491
|
try {
|
|
2828
|
-
|
|
2492
|
+
logger9.info("Validating environment configuration...");
|
|
2829
2493
|
const config = {
|
|
2830
2494
|
USE_LOCAL_AI: process.env.USE_LOCAL_AI,
|
|
2831
2495
|
USE_STUDIOLM_TEXT_MODELS: process.env.USE_STUDIOLM_TEXT_MODELS,
|
|
2832
2496
|
USE_OLLAMA_TEXT_MODELS: process.env.USE_OLLAMA_TEXT_MODELS
|
|
2833
2497
|
};
|
|
2834
2498
|
const validatedConfig = await validateConfig(config);
|
|
2835
|
-
|
|
2499
|
+
logger9.info("Environment configuration validated");
|
|
2836
2500
|
process.env.USE_LOCAL_AI = String(validatedConfig.USE_LOCAL_AI);
|
|
2837
2501
|
process.env.USE_STUDIOLM_TEXT_MODELS = String(validatedConfig.USE_STUDIOLM_TEXT_MODELS);
|
|
2838
2502
|
process.env.USE_OLLAMA_TEXT_MODELS = String(validatedConfig.USE_OLLAMA_TEXT_MODELS);
|
|
2839
|
-
|
|
2503
|
+
logger9.success("Environment initialization complete");
|
|
2840
2504
|
} catch (error) {
|
|
2841
|
-
|
|
2505
|
+
logger9.error("Environment validation failed:", {
|
|
2842
2506
|
error: error instanceof Error ? error.message : String(error),
|
|
2843
2507
|
stack: error instanceof Error ? error.stack : void 0
|
|
2844
2508
|
});
|
|
@@ -2853,7 +2517,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2853
2517
|
*/
|
|
2854
2518
|
async initializeOllama() {
|
|
2855
2519
|
try {
|
|
2856
|
-
|
|
2520
|
+
logger9.info("Initializing Ollama models...");
|
|
2857
2521
|
if (!this.ollamaManager) {
|
|
2858
2522
|
throw new Error("Ollama manager not created - cannot initialize");
|
|
2859
2523
|
}
|
|
@@ -2861,9 +2525,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2861
2525
|
if (!this.ollamaManager.isInitialized()) {
|
|
2862
2526
|
throw new Error("Ollama initialization failed - models not properly loaded");
|
|
2863
2527
|
}
|
|
2864
|
-
|
|
2528
|
+
logger9.success("Ollama initialization complete");
|
|
2865
2529
|
} catch (error) {
|
|
2866
|
-
|
|
2530
|
+
logger9.error("Ollama initialization failed:", {
|
|
2867
2531
|
error: error instanceof Error ? error.message : String(error),
|
|
2868
2532
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2869
2533
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -2878,7 +2542,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2878
2542
|
*/
|
|
2879
2543
|
async initializeStudioLM() {
|
|
2880
2544
|
try {
|
|
2881
|
-
|
|
2545
|
+
logger9.info("Initializing StudioLM models...");
|
|
2882
2546
|
if (!this.studioLMManager) {
|
|
2883
2547
|
throw new Error("StudioLM manager not created - cannot initialize");
|
|
2884
2548
|
}
|
|
@@ -2887,9 +2551,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2887
2551
|
throw new Error("StudioLM initialization failed - models not properly loaded");
|
|
2888
2552
|
}
|
|
2889
2553
|
this.studioLMInitialized = true;
|
|
2890
|
-
|
|
2554
|
+
logger9.success("StudioLM initialization complete");
|
|
2891
2555
|
} catch (error) {
|
|
2892
|
-
|
|
2556
|
+
logger9.error("StudioLM initialization failed:", {
|
|
2893
2557
|
error: error instanceof Error ? error.message : String(error),
|
|
2894
2558
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2895
2559
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -2904,12 +2568,12 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2904
2568
|
* @returns A Promise that resolves to a boolean indicating whether the model download was successful.
|
|
2905
2569
|
*/
|
|
2906
2570
|
async downloadModel(modelType) {
|
|
2907
|
-
const modelSpec = modelType ===
|
|
2908
|
-
const modelPath = modelType ===
|
|
2571
|
+
const modelSpec = modelType === ModelType2.TEXT_LARGE ? MODEL_SPECS.medium : MODEL_SPECS.small;
|
|
2572
|
+
const modelPath = modelType === ModelType2.TEXT_LARGE ? this.mediumModelPath : this.modelPath;
|
|
2909
2573
|
try {
|
|
2910
2574
|
return await this.downloadManager.downloadModel(modelSpec, modelPath);
|
|
2911
2575
|
} catch (error) {
|
|
2912
|
-
|
|
2576
|
+
logger9.error("Model download failed:", {
|
|
2913
2577
|
error: error instanceof Error ? error.message : String(error),
|
|
2914
2578
|
modelPath
|
|
2915
2579
|
});
|
|
@@ -2926,14 +2590,14 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2926
2590
|
const platformManager = getPlatformManager();
|
|
2927
2591
|
await platformManager.initialize();
|
|
2928
2592
|
const capabilities = platformManager.getCapabilities();
|
|
2929
|
-
|
|
2593
|
+
logger9.info("Platform capabilities detected:", {
|
|
2930
2594
|
platform: capabilities.platform,
|
|
2931
2595
|
gpu: capabilities.gpu?.type || "none",
|
|
2932
2596
|
recommendedModel: capabilities.recommendedModelSize,
|
|
2933
2597
|
supportedBackends: capabilities.supportedBackends
|
|
2934
2598
|
});
|
|
2935
2599
|
} catch (error) {
|
|
2936
|
-
|
|
2600
|
+
logger9.warn("Platform detection failed:", error);
|
|
2937
2601
|
}
|
|
2938
2602
|
}
|
|
2939
2603
|
/**
|
|
@@ -2942,8 +2606,8 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2942
2606
|
* @param {ModelTypeName} modelType - The type of model to initialize (default: ModelType.TEXT_SMALL)
|
|
2943
2607
|
* @returns {Promise<void>} A promise that resolves when initialization is complete or rejects if an error occurs
|
|
2944
2608
|
*/
|
|
2945
|
-
async initialize(modelType =
|
|
2946
|
-
if (modelType ===
|
|
2609
|
+
async initialize(modelType = ModelType2.TEXT_SMALL) {
|
|
2610
|
+
if (modelType === ModelType2.TEXT_LARGE) {
|
|
2947
2611
|
await this.lazyInitMediumModel();
|
|
2948
2612
|
} else {
|
|
2949
2613
|
await this.lazyInitSmallModel();
|
|
@@ -2956,17 +2620,17 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2956
2620
|
*/
|
|
2957
2621
|
async initializeEmbedding() {
|
|
2958
2622
|
try {
|
|
2959
|
-
|
|
2960
|
-
|
|
2623
|
+
logger9.info("Initializing embedding model...");
|
|
2624
|
+
logger9.info("Models directory:", this.modelsDir);
|
|
2961
2625
|
if (!fs5.existsSync(this.modelsDir)) {
|
|
2962
|
-
|
|
2626
|
+
logger9.warn("Models directory does not exist, creating it:", this.modelsDir);
|
|
2963
2627
|
fs5.mkdirSync(this.modelsDir, { recursive: true });
|
|
2964
2628
|
}
|
|
2965
2629
|
if (!this.embeddingModel) {
|
|
2966
|
-
|
|
2630
|
+
logger9.info("Creating new FlagEmbedding instance with BGESmallENV15 model");
|
|
2967
2631
|
const barLength = 30;
|
|
2968
2632
|
const emptyBar = "\u25B1".repeat(barLength);
|
|
2969
|
-
|
|
2633
|
+
logger9.info(`Downloading embedding model: ${emptyBar} 0%`);
|
|
2970
2634
|
this.embeddingModel = await FlagEmbedding.init({
|
|
2971
2635
|
cacheDir: this.modelsDir,
|
|
2972
2636
|
model: EmbeddingModel.BGESmallENV15,
|
|
@@ -2974,11 +2638,11 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2974
2638
|
showDownloadProgress: false
|
|
2975
2639
|
});
|
|
2976
2640
|
const completedBar = "\u25B0".repeat(barLength);
|
|
2977
|
-
|
|
2978
|
-
|
|
2641
|
+
logger9.info(`Downloading embedding model: ${completedBar} 100%`);
|
|
2642
|
+
logger9.success("FlagEmbedding instance created successfully");
|
|
2979
2643
|
}
|
|
2980
2644
|
} catch (error) {
|
|
2981
|
-
|
|
2645
|
+
logger9.error("Embedding initialization failed with details:", {
|
|
2982
2646
|
error: error instanceof Error ? error.message : String(error),
|
|
2983
2647
|
stack: error instanceof Error ? error.stack : void 0,
|
|
2984
2648
|
modelsDir: this.modelsDir,
|
|
@@ -2996,7 +2660,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
2996
2660
|
async generateTextOllamaStudio(params) {
|
|
2997
2661
|
try {
|
|
2998
2662
|
const modelConfig = this.getTextModelSource();
|
|
2999
|
-
|
|
2663
|
+
logger9.info("generateTextOllamaStudio called with:", {
|
|
3000
2664
|
modelSource: modelConfig.source,
|
|
3001
2665
|
modelType: params.modelType,
|
|
3002
2666
|
studioLMInitialized: this.studioLMInitialized,
|
|
@@ -3006,32 +2670,32 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3006
2670
|
});
|
|
3007
2671
|
if (modelConfig.source === "studiolm") {
|
|
3008
2672
|
if (process.env.USE_STUDIOLM_TEXT_MODELS !== "true") {
|
|
3009
|
-
|
|
2673
|
+
logger9.warn(
|
|
3010
2674
|
"StudioLM requested but disabled in environment, falling back to local models"
|
|
3011
2675
|
);
|
|
3012
2676
|
return this.generateText(params);
|
|
3013
2677
|
}
|
|
3014
2678
|
if (!this.studioLMManager) {
|
|
3015
|
-
|
|
2679
|
+
logger9.warn("StudioLM manager not initialized, falling back to local models");
|
|
3016
2680
|
return this.generateText(params);
|
|
3017
2681
|
}
|
|
3018
2682
|
if (!this.studioLMInitialized) {
|
|
3019
|
-
|
|
2683
|
+
logger9.info("StudioLM not initialized, initializing now...");
|
|
3020
2684
|
await this.initializeStudioLM();
|
|
3021
2685
|
}
|
|
3022
2686
|
return await this.studioLMManager.generateText(params, this.studioLMInitialized);
|
|
3023
2687
|
}
|
|
3024
2688
|
if (modelConfig.source === "ollama") {
|
|
3025
2689
|
if (process.env.USE_OLLAMA_TEXT_MODELS !== "true") {
|
|
3026
|
-
|
|
2690
|
+
logger9.warn("Ollama requested but disabled in environment, falling back to local models");
|
|
3027
2691
|
return this.generateText(params);
|
|
3028
2692
|
}
|
|
3029
2693
|
if (!this.ollamaManager) {
|
|
3030
|
-
|
|
2694
|
+
logger9.warn("Ollama manager not initialized, falling back to local models");
|
|
3031
2695
|
return this.generateText(params);
|
|
3032
2696
|
}
|
|
3033
2697
|
if (!this.ollamaInitialized && !this.ollamaManager.isInitialized()) {
|
|
3034
|
-
|
|
2698
|
+
logger9.info("Initializing Ollama in generateTextOllamaStudio");
|
|
3035
2699
|
await this.ollamaManager.initialize();
|
|
3036
2700
|
this.ollamaInitialized = true;
|
|
3037
2701
|
}
|
|
@@ -3039,7 +2703,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3039
2703
|
}
|
|
3040
2704
|
return this.generateText(params);
|
|
3041
2705
|
} catch (error) {
|
|
3042
|
-
|
|
2706
|
+
logger9.error("Text generation with Ollama/StudioLM failed:", {
|
|
3043
2707
|
error: error instanceof Error ? error.message : String(error),
|
|
3044
2708
|
stack: error instanceof Error ? error.stack : void 0,
|
|
3045
2709
|
modelSource: this.getTextModelSource().source
|
|
@@ -3053,7 +2717,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3053
2717
|
*/
|
|
3054
2718
|
async generateText(params) {
|
|
3055
2719
|
try {
|
|
3056
|
-
if (params.modelType ===
|
|
2720
|
+
if (params.modelType === ModelType2.TEXT_LARGE) {
|
|
3057
2721
|
await this.lazyInitMediumModel();
|
|
3058
2722
|
if (!this.mediumModel) {
|
|
3059
2723
|
throw new Error("Medium model initialization failed");
|
|
@@ -3084,15 +2748,15 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3084
2748
|
if (!this.chatSession) {
|
|
3085
2749
|
throw new Error("Failed to create chat session");
|
|
3086
2750
|
}
|
|
3087
|
-
|
|
3088
|
-
|
|
2751
|
+
logger9.info("Created new chat session for model:", params.modelType);
|
|
2752
|
+
logger9.info("Incoming prompt structure:", {
|
|
3089
2753
|
contextLength: params.prompt.length,
|
|
3090
2754
|
hasAction: params.prompt.includes("action"),
|
|
3091
2755
|
runtime: !!params.runtime,
|
|
3092
2756
|
stopSequences: params.stopSequences
|
|
3093
2757
|
});
|
|
3094
2758
|
const tokens = await this.tokenizerManager.encode(params.prompt, this.activeModelConfig);
|
|
3095
|
-
|
|
2759
|
+
logger9.info("Input tokens:", { count: tokens.length });
|
|
3096
2760
|
const systemMessage = "You are a helpful AI assistant. Respond to the current request only.";
|
|
3097
2761
|
await this.chatSession.prompt(systemMessage, {
|
|
3098
2762
|
maxTokens: 1,
|
|
@@ -3110,19 +2774,19 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3110
2774
|
presencePenalty: 0.7
|
|
3111
2775
|
}
|
|
3112
2776
|
});
|
|
3113
|
-
|
|
2777
|
+
logger9.info("Raw response structure:", {
|
|
3114
2778
|
responseLength: response.length,
|
|
3115
2779
|
hasAction: response.includes("action"),
|
|
3116
2780
|
hasThinkTag: response.includes("<think>")
|
|
3117
2781
|
});
|
|
3118
2782
|
if (response.includes("<think>")) {
|
|
3119
|
-
|
|
2783
|
+
logger9.info("Cleaning think tags from response");
|
|
3120
2784
|
response = response.replace(/<think>[\s\S]*?<\/think>\n?/g, "");
|
|
3121
|
-
|
|
2785
|
+
logger9.info("Think tags removed from response");
|
|
3122
2786
|
}
|
|
3123
2787
|
return response;
|
|
3124
2788
|
} catch (error) {
|
|
3125
|
-
|
|
2789
|
+
logger9.error("Text generation failed:", error);
|
|
3126
2790
|
throw error;
|
|
3127
2791
|
}
|
|
3128
2792
|
}
|
|
@@ -3135,13 +2799,13 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3135
2799
|
if (!this.embeddingModel) {
|
|
3136
2800
|
throw new Error("Failed to initialize embedding model");
|
|
3137
2801
|
}
|
|
3138
|
-
|
|
2802
|
+
logger9.info("Generating query embedding...");
|
|
3139
2803
|
const embedding = await this.embeddingModel.queryEmbed(text);
|
|
3140
2804
|
const dimensions = embedding.length;
|
|
3141
|
-
|
|
2805
|
+
logger9.info("Embedding generation complete", { dimensions });
|
|
3142
2806
|
return Array.from(embedding);
|
|
3143
2807
|
} catch (error) {
|
|
3144
|
-
|
|
2808
|
+
logger9.error("Embedding generation failed:", {
|
|
3145
2809
|
error: error instanceof Error ? error.message : String(error),
|
|
3146
2810
|
stack: error instanceof Error ? error.stack : void 0,
|
|
3147
2811
|
// Only access text.length if text exists
|
|
@@ -3160,7 +2824,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3160
2824
|
const dataUrl = `data:${mimeType};base64,${base64}`;
|
|
3161
2825
|
return await this.visionManager.processImage(dataUrl);
|
|
3162
2826
|
} catch (error) {
|
|
3163
|
-
|
|
2827
|
+
logger9.error("Image description failed:", error);
|
|
3164
2828
|
throw error;
|
|
3165
2829
|
}
|
|
3166
2830
|
}
|
|
@@ -3173,7 +2837,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3173
2837
|
const result = await this.transcribeManager.transcribe(audioBuffer);
|
|
3174
2838
|
return result.text;
|
|
3175
2839
|
} catch (error) {
|
|
3176
|
-
|
|
2840
|
+
logger9.error("Audio transcription failed:", {
|
|
3177
2841
|
error: error instanceof Error ? error.message : String(error),
|
|
3178
2842
|
bufferSize: audioBuffer.length
|
|
3179
2843
|
});
|
|
@@ -3188,7 +2852,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3188
2852
|
await this.lazyInitTTS();
|
|
3189
2853
|
return await this.ttsManager.generateSpeech(text);
|
|
3190
2854
|
} catch (error) {
|
|
3191
|
-
|
|
2855
|
+
logger9.error("Speech generation failed:", {
|
|
3192
2856
|
error: error instanceof Error ? error.message : String(error),
|
|
3193
2857
|
textLength: text.length
|
|
3194
2858
|
});
|
|
@@ -3219,18 +2883,18 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3219
2883
|
try {
|
|
3220
2884
|
const config = {
|
|
3221
2885
|
source: "local",
|
|
3222
|
-
modelType:
|
|
2886
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3223
2887
|
};
|
|
3224
2888
|
if (process.env.USE_STUDIOLM_TEXT_MODELS === "true" && this.studioLMManager) {
|
|
3225
2889
|
config.source = "studiolm";
|
|
3226
2890
|
} else if (process.env.USE_OLLAMA_TEXT_MODELS === "true" && this.ollamaManager) {
|
|
3227
2891
|
config.source = "ollama";
|
|
3228
2892
|
}
|
|
3229
|
-
|
|
2893
|
+
logger9.info("Selected text model source:", config);
|
|
3230
2894
|
return config;
|
|
3231
2895
|
} catch (error) {
|
|
3232
|
-
|
|
3233
|
-
return { source: "local", modelType:
|
|
2896
|
+
logger9.error("Error determining text model source:", error);
|
|
2897
|
+
return { source: "local", modelType: ModelType2.TEXT_SMALL };
|
|
3234
2898
|
}
|
|
3235
2899
|
}
|
|
3236
2900
|
/**
|
|
@@ -3241,11 +2905,11 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3241
2905
|
return Promise.resolve(null);
|
|
3242
2906
|
}
|
|
3243
2907
|
if (initPromise) {
|
|
3244
|
-
|
|
2908
|
+
logger9.info(`Waiting for ${modelType} initialization to complete...`);
|
|
3245
2909
|
await initPromise;
|
|
3246
2910
|
return Promise.resolve(null);
|
|
3247
2911
|
}
|
|
3248
|
-
|
|
2912
|
+
logger9.info(`Lazy initializing ${modelType}...`);
|
|
3249
2913
|
return initFunction();
|
|
3250
2914
|
}
|
|
3251
2915
|
/**
|
|
@@ -3257,7 +2921,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3257
2921
|
this.smallModelInitializingPromise = (async () => {
|
|
3258
2922
|
await this.initializeEnvironment();
|
|
3259
2923
|
await this.checkPlatformCapabilities();
|
|
3260
|
-
await this.downloadModel(
|
|
2924
|
+
await this.downloadModel(ModelType2.TEXT_SMALL);
|
|
3261
2925
|
try {
|
|
3262
2926
|
this.llama = await getLlama2();
|
|
3263
2927
|
const smallModel = await this.llama.loadModel({
|
|
@@ -3272,9 +2936,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3272
2936
|
this.ctx = ctx;
|
|
3273
2937
|
this.sequence = void 0;
|
|
3274
2938
|
this.smallModelInitialized = true;
|
|
3275
|
-
|
|
2939
|
+
logger9.info("Small model initialized successfully");
|
|
3276
2940
|
} catch (error) {
|
|
3277
|
-
|
|
2941
|
+
logger9.error("Failed to initialize small model:", error);
|
|
3278
2942
|
this.smallModelInitializingPromise = null;
|
|
3279
2943
|
throw error;
|
|
3280
2944
|
}
|
|
@@ -3292,7 +2956,7 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3292
2956
|
if (!this.llama) {
|
|
3293
2957
|
await this.lazyInitSmallModel();
|
|
3294
2958
|
}
|
|
3295
|
-
await this.downloadModel(
|
|
2959
|
+
await this.downloadModel(ModelType2.TEXT_LARGE);
|
|
3296
2960
|
try {
|
|
3297
2961
|
const mediumModel = await this.llama.loadModel({
|
|
3298
2962
|
gpuLayers: 43,
|
|
@@ -3301,9 +2965,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3301
2965
|
});
|
|
3302
2966
|
this.mediumModel = mediumModel;
|
|
3303
2967
|
this.mediumModelInitialized = true;
|
|
3304
|
-
|
|
2968
|
+
logger9.info("Medium model initialized successfully");
|
|
3305
2969
|
} catch (error) {
|
|
3306
|
-
|
|
2970
|
+
logger9.error("Failed to initialize medium model:", error);
|
|
3307
2971
|
this.mediumModelInitializingPromise = null;
|
|
3308
2972
|
throw error;
|
|
3309
2973
|
}
|
|
@@ -3321,9 +2985,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3321
2985
|
try {
|
|
3322
2986
|
await this.initializeEmbedding();
|
|
3323
2987
|
this.embeddingInitialized = true;
|
|
3324
|
-
|
|
2988
|
+
logger9.info("Embedding model initialized successfully");
|
|
3325
2989
|
} catch (error) {
|
|
3326
|
-
|
|
2990
|
+
logger9.error("Failed to initialize embedding model:", error);
|
|
3327
2991
|
this.embeddingInitializingPromise = null;
|
|
3328
2992
|
throw error;
|
|
3329
2993
|
}
|
|
@@ -3340,9 +3004,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3340
3004
|
this.visionInitializingPromise = (async () => {
|
|
3341
3005
|
try {
|
|
3342
3006
|
this.visionInitialized = true;
|
|
3343
|
-
|
|
3007
|
+
logger9.info("Vision model initialized successfully");
|
|
3344
3008
|
} catch (error) {
|
|
3345
|
-
|
|
3009
|
+
logger9.error("Failed to initialize vision model:", error);
|
|
3346
3010
|
this.visionInitializingPromise = null;
|
|
3347
3011
|
throw error;
|
|
3348
3012
|
}
|
|
@@ -3359,9 +3023,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3359
3023
|
this.transcriptionInitializingPromise = (async () => {
|
|
3360
3024
|
try {
|
|
3361
3025
|
this.transcriptionInitialized = true;
|
|
3362
|
-
|
|
3026
|
+
logger9.info("Transcription model initialized successfully");
|
|
3363
3027
|
} catch (error) {
|
|
3364
|
-
|
|
3028
|
+
logger9.error("Failed to initialize transcription model:", error);
|
|
3365
3029
|
this.transcriptionInitializingPromise = null;
|
|
3366
3030
|
throw error;
|
|
3367
3031
|
}
|
|
@@ -3378,9 +3042,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3378
3042
|
this.ttsInitializingPromise = (async () => {
|
|
3379
3043
|
try {
|
|
3380
3044
|
this.ttsInitialized = true;
|
|
3381
|
-
|
|
3045
|
+
logger9.info("TTS model initialized successfully");
|
|
3382
3046
|
} catch (error) {
|
|
3383
|
-
|
|
3047
|
+
logger9.error("Failed to initialize TTS model:", error);
|
|
3384
3048
|
this.ttsInitializingPromise = null;
|
|
3385
3049
|
throw error;
|
|
3386
3050
|
}
|
|
@@ -3398,9 +3062,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3398
3062
|
try {
|
|
3399
3063
|
await this.initializeOllama();
|
|
3400
3064
|
this.ollamaInitialized = true;
|
|
3401
|
-
|
|
3065
|
+
logger9.info("Ollama initialized successfully");
|
|
3402
3066
|
} catch (error) {
|
|
3403
|
-
|
|
3067
|
+
logger9.error("Failed to initialize Ollama:", error);
|
|
3404
3068
|
this.ollamaInitializingPromise = null;
|
|
3405
3069
|
throw error;
|
|
3406
3070
|
}
|
|
@@ -3418,9 +3082,9 @@ var LocalAIManager = class _LocalAIManager {
|
|
|
3418
3082
|
try {
|
|
3419
3083
|
await this.initializeStudioLM();
|
|
3420
3084
|
this.studioLMInitialized = true;
|
|
3421
|
-
|
|
3085
|
+
logger9.info("StudioLM initialized successfully");
|
|
3422
3086
|
} catch (error) {
|
|
3423
|
-
|
|
3087
|
+
logger9.error("Failed to initialize StudioLM:", error);
|
|
3424
3088
|
this.studioLMInitializingPromise = null;
|
|
3425
3089
|
throw error;
|
|
3426
3090
|
}
|
|
@@ -3435,10 +3099,10 @@ var localAIPlugin = {
|
|
|
3435
3099
|
description: "Local AI plugin using LLaMA models",
|
|
3436
3100
|
async init() {
|
|
3437
3101
|
try {
|
|
3438
|
-
|
|
3439
|
-
|
|
3102
|
+
logger9.debug("Initializing local-ai plugin...");
|
|
3103
|
+
logger9.success("Local AI plugin configuration validated and initialized");
|
|
3440
3104
|
} catch (error) {
|
|
3441
|
-
|
|
3105
|
+
logger9.error("Plugin initialization failed:", {
|
|
3442
3106
|
error: error instanceof Error ? error.message : String(error),
|
|
3443
3107
|
stack: error instanceof Error ? error.stack : void 0
|
|
3444
3108
|
});
|
|
@@ -3446,7 +3110,7 @@ var localAIPlugin = {
|
|
|
3446
3110
|
}
|
|
3447
3111
|
},
|
|
3448
3112
|
models: {
|
|
3449
|
-
[
|
|
3113
|
+
[ModelType2.TEXT_SMALL]: async (runtime, { prompt, stopSequences = [] }) => {
|
|
3450
3114
|
try {
|
|
3451
3115
|
const modelConfig = localAIManager.getTextModelSource();
|
|
3452
3116
|
if (modelConfig.source !== "local") {
|
|
@@ -3454,21 +3118,21 @@ var localAIPlugin = {
|
|
|
3454
3118
|
prompt,
|
|
3455
3119
|
stopSequences,
|
|
3456
3120
|
runtime,
|
|
3457
|
-
modelType:
|
|
3121
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3458
3122
|
});
|
|
3459
3123
|
}
|
|
3460
3124
|
return await localAIManager.generateText({
|
|
3461
3125
|
prompt,
|
|
3462
3126
|
stopSequences,
|
|
3463
3127
|
runtime,
|
|
3464
|
-
modelType:
|
|
3128
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3465
3129
|
});
|
|
3466
3130
|
} catch (error) {
|
|
3467
|
-
|
|
3131
|
+
logger9.error("Error in TEXT_SMALL handler:", error);
|
|
3468
3132
|
throw error;
|
|
3469
3133
|
}
|
|
3470
3134
|
},
|
|
3471
|
-
[
|
|
3135
|
+
[ModelType2.TEXT_LARGE]: async (runtime, { prompt, stopSequences = [] }) => {
|
|
3472
3136
|
try {
|
|
3473
3137
|
const modelConfig = localAIManager.getTextModelSource();
|
|
3474
3138
|
if (modelConfig.source !== "local") {
|
|
@@ -3476,30 +3140,30 @@ var localAIPlugin = {
|
|
|
3476
3140
|
prompt,
|
|
3477
3141
|
stopSequences,
|
|
3478
3142
|
runtime,
|
|
3479
|
-
modelType:
|
|
3143
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3480
3144
|
});
|
|
3481
3145
|
}
|
|
3482
3146
|
return await localAIManager.generateText({
|
|
3483
3147
|
prompt,
|
|
3484
3148
|
stopSequences,
|
|
3485
3149
|
runtime,
|
|
3486
|
-
modelType:
|
|
3150
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3487
3151
|
});
|
|
3488
3152
|
} catch (error) {
|
|
3489
|
-
|
|
3153
|
+
logger9.error("Error in TEXT_LARGE handler:", error);
|
|
3490
3154
|
throw error;
|
|
3491
3155
|
}
|
|
3492
3156
|
},
|
|
3493
|
-
[
|
|
3157
|
+
[ModelType2.TEXT_EMBEDDING]: async (_runtime, params) => {
|
|
3494
3158
|
const text = params?.text;
|
|
3495
3159
|
try {
|
|
3496
3160
|
if (!text) {
|
|
3497
|
-
|
|
3161
|
+
logger9.debug("Null or empty text input for embedding, returning zero vector");
|
|
3498
3162
|
return new Array(384).fill(0);
|
|
3499
3163
|
}
|
|
3500
3164
|
return await localAIManager.generateEmbedding(text);
|
|
3501
3165
|
} catch (error) {
|
|
3502
|
-
|
|
3166
|
+
logger9.error("Error in TEXT_EMBEDDING handler:", {
|
|
3503
3167
|
error: error instanceof Error ? error.message : String(error),
|
|
3504
3168
|
fullText: text,
|
|
3505
3169
|
textType: typeof text,
|
|
@@ -3508,9 +3172,9 @@ var localAIPlugin = {
|
|
|
3508
3172
|
return new Array(384).fill(0);
|
|
3509
3173
|
}
|
|
3510
3174
|
},
|
|
3511
|
-
[
|
|
3175
|
+
[ModelType2.OBJECT_SMALL]: async (runtime, params) => {
|
|
3512
3176
|
try {
|
|
3513
|
-
|
|
3177
|
+
logger9.info("OBJECT_SMALL handler - Processing request:", {
|
|
3514
3178
|
prompt: params.prompt,
|
|
3515
3179
|
hasSchema: !!params.schema,
|
|
3516
3180
|
temperature: params.temperature
|
|
@@ -3526,14 +3190,14 @@ var localAIPlugin = {
|
|
|
3526
3190
|
prompt: jsonPrompt,
|
|
3527
3191
|
stopSequences: params.stopSequences,
|
|
3528
3192
|
runtime,
|
|
3529
|
-
modelType:
|
|
3193
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3530
3194
|
});
|
|
3531
3195
|
} else {
|
|
3532
3196
|
textResponse = await localAIManager.generateText({
|
|
3533
3197
|
prompt: jsonPrompt,
|
|
3534
3198
|
stopSequences: params.stopSequences,
|
|
3535
3199
|
runtime,
|
|
3536
|
-
modelType:
|
|
3200
|
+
modelType: ModelType2.TEXT_SMALL
|
|
3537
3201
|
});
|
|
3538
3202
|
}
|
|
3539
3203
|
try {
|
|
@@ -3551,17 +3215,17 @@ var localAIPlugin = {
|
|
|
3551
3215
|
return text.trim();
|
|
3552
3216
|
};
|
|
3553
3217
|
const extractedJsonText = extractJSON(textResponse);
|
|
3554
|
-
|
|
3218
|
+
logger9.debug("Extracted JSON text:", extractedJsonText);
|
|
3555
3219
|
let jsonObject;
|
|
3556
3220
|
try {
|
|
3557
3221
|
jsonObject = JSON.parse(extractedJsonText);
|
|
3558
3222
|
} catch (parseError) {
|
|
3559
|
-
|
|
3223
|
+
logger9.debug("Initial JSON parse failed, attempting to fix common issues");
|
|
3560
3224
|
const fixedJson = extractedJsonText.replace(/:\s*"([^"]*)(?:\n)([^"]*)"/g, ': "$1\\n$2"').replace(/"([^"]*?)[^a-zA-Z0-9\s\.,;:\-_\(\)"'\[\]{}]([^"]*?)"/g, '"$1$2"').replace(/(\s*)(\w+)(\s*):/g, '$1"$2"$3:').replace(/,(\s*[\]}])/g, "$1");
|
|
3561
3225
|
try {
|
|
3562
3226
|
jsonObject = JSON.parse(fixedJson);
|
|
3563
3227
|
} catch (finalError) {
|
|
3564
|
-
|
|
3228
|
+
logger9.error("Failed to parse JSON after fixing:", finalError);
|
|
3565
3229
|
throw new Error("Invalid JSON returned from model");
|
|
3566
3230
|
}
|
|
3567
3231
|
}
|
|
@@ -3573,23 +3237,23 @@ var localAIPlugin = {
|
|
|
3573
3237
|
}
|
|
3574
3238
|
}
|
|
3575
3239
|
} catch (schemaError) {
|
|
3576
|
-
|
|
3240
|
+
logger9.error("Schema validation failed:", schemaError);
|
|
3577
3241
|
}
|
|
3578
3242
|
}
|
|
3579
3243
|
return jsonObject;
|
|
3580
3244
|
} catch (parseError) {
|
|
3581
|
-
|
|
3582
|
-
|
|
3245
|
+
logger9.error("Failed to parse JSON:", parseError);
|
|
3246
|
+
logger9.error("Raw response:", textResponse);
|
|
3583
3247
|
throw new Error("Invalid JSON returned from model");
|
|
3584
3248
|
}
|
|
3585
3249
|
} catch (error) {
|
|
3586
|
-
|
|
3250
|
+
logger9.error("Error in OBJECT_SMALL handler:", error);
|
|
3587
3251
|
throw error;
|
|
3588
3252
|
}
|
|
3589
3253
|
},
|
|
3590
|
-
[
|
|
3254
|
+
[ModelType2.OBJECT_LARGE]: async (runtime, params) => {
|
|
3591
3255
|
try {
|
|
3592
|
-
|
|
3256
|
+
logger9.info("OBJECT_LARGE handler - Processing request:", {
|
|
3593
3257
|
prompt: params.prompt,
|
|
3594
3258
|
hasSchema: !!params.schema,
|
|
3595
3259
|
temperature: params.temperature
|
|
@@ -3605,14 +3269,14 @@ var localAIPlugin = {
|
|
|
3605
3269
|
prompt: jsonPrompt,
|
|
3606
3270
|
stopSequences: params.stopSequences,
|
|
3607
3271
|
runtime,
|
|
3608
|
-
modelType:
|
|
3272
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3609
3273
|
});
|
|
3610
3274
|
} else {
|
|
3611
3275
|
textResponse = await localAIManager.generateText({
|
|
3612
3276
|
prompt: jsonPrompt,
|
|
3613
3277
|
stopSequences: params.stopSequences,
|
|
3614
3278
|
runtime,
|
|
3615
|
-
modelType:
|
|
3279
|
+
modelType: ModelType2.TEXT_LARGE
|
|
3616
3280
|
});
|
|
3617
3281
|
}
|
|
3618
3282
|
try {
|
|
@@ -3634,17 +3298,17 @@ var localAIPlugin = {
|
|
|
3634
3298
|
};
|
|
3635
3299
|
const extractedJsonText = extractJSON(textResponse);
|
|
3636
3300
|
const cleanedJsonText = cleanupJSON(extractedJsonText);
|
|
3637
|
-
|
|
3301
|
+
logger9.debug("Extracted JSON text:", cleanedJsonText);
|
|
3638
3302
|
let jsonObject;
|
|
3639
3303
|
try {
|
|
3640
3304
|
jsonObject = JSON.parse(cleanedJsonText);
|
|
3641
3305
|
} catch (parseError) {
|
|
3642
|
-
|
|
3306
|
+
logger9.debug("Initial JSON parse failed, attempting to fix common issues");
|
|
3643
3307
|
const fixedJson = cleanedJsonText.replace(/:\s*"([^"]*)(?:\n)([^"]*)"/g, ': "$1\\n$2"').replace(/"([^"]*?)[^a-zA-Z0-9\s\.,;:\-_\(\)"'\[\]{}]([^"]*?)"/g, '"$1$2"').replace(/(\s*)(\w+)(\s*):/g, '$1"$2"$3:').replace(/,(\s*[\]}])/g, "$1");
|
|
3644
3308
|
try {
|
|
3645
3309
|
jsonObject = JSON.parse(fixedJson);
|
|
3646
3310
|
} catch (finalError) {
|
|
3647
|
-
|
|
3311
|
+
logger9.error("Failed to parse JSON after fixing:", finalError);
|
|
3648
3312
|
throw new Error("Invalid JSON returned from model");
|
|
3649
3313
|
}
|
|
3650
3314
|
}
|
|
@@ -3656,43 +3320,43 @@ var localAIPlugin = {
|
|
|
3656
3320
|
}
|
|
3657
3321
|
}
|
|
3658
3322
|
} catch (schemaError) {
|
|
3659
|
-
|
|
3323
|
+
logger9.error("Schema validation failed:", schemaError);
|
|
3660
3324
|
}
|
|
3661
3325
|
}
|
|
3662
3326
|
return jsonObject;
|
|
3663
3327
|
} catch (parseError) {
|
|
3664
|
-
|
|
3665
|
-
|
|
3328
|
+
logger9.error("Failed to parse JSON:", parseError);
|
|
3329
|
+
logger9.error("Raw response:", textResponse);
|
|
3666
3330
|
throw new Error("Invalid JSON returned from model");
|
|
3667
3331
|
}
|
|
3668
3332
|
} catch (error) {
|
|
3669
|
-
|
|
3333
|
+
logger9.error("Error in OBJECT_LARGE handler:", error);
|
|
3670
3334
|
throw error;
|
|
3671
3335
|
}
|
|
3672
3336
|
},
|
|
3673
|
-
[
|
|
3337
|
+
[ModelType2.TEXT_TOKENIZER_ENCODE]: async (_runtime, { text }) => {
|
|
3674
3338
|
try {
|
|
3675
3339
|
const manager = localAIManager.getTokenizerManager();
|
|
3676
3340
|
const config = localAIManager.getActiveModelConfig();
|
|
3677
3341
|
return await manager.encode(text, config);
|
|
3678
3342
|
} catch (error) {
|
|
3679
|
-
|
|
3343
|
+
logger9.error("Error in TEXT_TOKENIZER_ENCODE handler:", error);
|
|
3680
3344
|
throw error;
|
|
3681
3345
|
}
|
|
3682
3346
|
},
|
|
3683
|
-
[
|
|
3347
|
+
[ModelType2.TEXT_TOKENIZER_DECODE]: async (_runtime, { tokens }) => {
|
|
3684
3348
|
try {
|
|
3685
3349
|
const manager = localAIManager.getTokenizerManager();
|
|
3686
3350
|
const config = localAIManager.getActiveModelConfig();
|
|
3687
3351
|
return await manager.decode(tokens, config);
|
|
3688
3352
|
} catch (error) {
|
|
3689
|
-
|
|
3353
|
+
logger9.error("Error in TEXT_TOKENIZER_DECODE handler:", error);
|
|
3690
3354
|
throw error;
|
|
3691
3355
|
}
|
|
3692
3356
|
},
|
|
3693
|
-
[
|
|
3357
|
+
[ModelType2.IMAGE_DESCRIPTION]: async (_runtime, imageUrl) => {
|
|
3694
3358
|
try {
|
|
3695
|
-
|
|
3359
|
+
logger9.info("Processing image from URL:", imageUrl);
|
|
3696
3360
|
const response = await fetch(imageUrl);
|
|
3697
3361
|
if (!response.ok) {
|
|
3698
3362
|
throw new Error(`Failed to fetch image: ${response.statusText}`);
|
|
@@ -3701,32 +3365,32 @@ var localAIPlugin = {
|
|
|
3701
3365
|
const mimeType = response.headers.get("content-type") || "image/jpeg";
|
|
3702
3366
|
return await localAIManager.describeImage(buffer, mimeType);
|
|
3703
3367
|
} catch (error) {
|
|
3704
|
-
|
|
3368
|
+
logger9.error("Error in IMAGE_DESCRIPTION handler:", {
|
|
3705
3369
|
error: error instanceof Error ? error.message : String(error),
|
|
3706
3370
|
imageUrl
|
|
3707
3371
|
});
|
|
3708
3372
|
throw error;
|
|
3709
3373
|
}
|
|
3710
3374
|
},
|
|
3711
|
-
[
|
|
3375
|
+
[ModelType2.TRANSCRIPTION]: async (_runtime, audioBuffer) => {
|
|
3712
3376
|
try {
|
|
3713
|
-
|
|
3377
|
+
logger9.info("Processing audio transcription:", {
|
|
3714
3378
|
bufferSize: audioBuffer.length
|
|
3715
3379
|
});
|
|
3716
3380
|
return await localAIManager.transcribeAudio(audioBuffer);
|
|
3717
3381
|
} catch (error) {
|
|
3718
|
-
|
|
3382
|
+
logger9.error("Error in TRANSCRIPTION handler:", {
|
|
3719
3383
|
error: error instanceof Error ? error.message : String(error),
|
|
3720
3384
|
bufferSize: audioBuffer.length
|
|
3721
3385
|
});
|
|
3722
3386
|
throw error;
|
|
3723
3387
|
}
|
|
3724
3388
|
},
|
|
3725
|
-
[
|
|
3389
|
+
[ModelType2.TEXT_TO_SPEECH]: async (_runtime, text) => {
|
|
3726
3390
|
try {
|
|
3727
3391
|
return await localAIManager.generateSpeech(text);
|
|
3728
3392
|
} catch (error) {
|
|
3729
|
-
|
|
3393
|
+
logger9.error("Error in TEXT_TO_SPEECH handler:", {
|
|
3730
3394
|
error: error instanceof Error ? error.message : String(error),
|
|
3731
3395
|
textLength: text.length
|
|
3732
3396
|
});
|
|
@@ -3742,21 +3406,21 @@ var localAIPlugin = {
|
|
|
3742
3406
|
name: "local_ai_test_initialization",
|
|
3743
3407
|
fn: async (runtime) => {
|
|
3744
3408
|
try {
|
|
3745
|
-
|
|
3746
|
-
const result = await runtime.useModel(
|
|
3409
|
+
logger9.info("Starting initialization test");
|
|
3410
|
+
const result = await runtime.useModel(ModelType2.TEXT_SMALL, {
|
|
3747
3411
|
prompt: "Debug Mode: Test initialization. Respond with 'Initialization successful' if you can read this.",
|
|
3748
3412
|
stopSequences: []
|
|
3749
3413
|
});
|
|
3750
|
-
|
|
3414
|
+
logger9.info("Model response:", result);
|
|
3751
3415
|
if (!result || typeof result !== "string") {
|
|
3752
3416
|
throw new Error("Invalid response from model");
|
|
3753
3417
|
}
|
|
3754
3418
|
if (!result.includes("successful")) {
|
|
3755
3419
|
throw new Error("Model response does not indicate success");
|
|
3756
3420
|
}
|
|
3757
|
-
|
|
3421
|
+
logger9.success("Initialization test completed successfully");
|
|
3758
3422
|
} catch (error) {
|
|
3759
|
-
|
|
3423
|
+
logger9.error("Initialization test failed:", {
|
|
3760
3424
|
error: error instanceof Error ? error.message : String(error),
|
|
3761
3425
|
stack: error instanceof Error ? error.stack : void 0
|
|
3762
3426
|
});
|
|
@@ -3768,21 +3432,21 @@ var localAIPlugin = {
|
|
|
3768
3432
|
name: "local_ai_test_text_large",
|
|
3769
3433
|
fn: async (runtime) => {
|
|
3770
3434
|
try {
|
|
3771
|
-
|
|
3772
|
-
const result = await runtime.useModel(
|
|
3435
|
+
logger9.info("Starting TEXT_LARGE model test");
|
|
3436
|
+
const result = await runtime.useModel(ModelType2.TEXT_LARGE, {
|
|
3773
3437
|
prompt: "Debug Mode: Generate a one-sentence response about artificial intelligence.",
|
|
3774
3438
|
stopSequences: []
|
|
3775
3439
|
});
|
|
3776
|
-
|
|
3440
|
+
logger9.info("Large model response:", result);
|
|
3777
3441
|
if (!result || typeof result !== "string") {
|
|
3778
3442
|
throw new Error("Invalid response from large model");
|
|
3779
3443
|
}
|
|
3780
3444
|
if (result.length < 10) {
|
|
3781
3445
|
throw new Error("Response too short, possible model failure");
|
|
3782
3446
|
}
|
|
3783
|
-
|
|
3447
|
+
logger9.success("TEXT_LARGE test completed successfully");
|
|
3784
3448
|
} catch (error) {
|
|
3785
|
-
|
|
3449
|
+
logger9.error("TEXT_LARGE test failed:", {
|
|
3786
3450
|
error: error instanceof Error ? error.message : String(error),
|
|
3787
3451
|
stack: error instanceof Error ? error.stack : void 0
|
|
3788
3452
|
});
|
|
@@ -3794,11 +3458,11 @@ var localAIPlugin = {
|
|
|
3794
3458
|
name: "local_ai_test_text_embedding",
|
|
3795
3459
|
fn: async (runtime) => {
|
|
3796
3460
|
try {
|
|
3797
|
-
|
|
3798
|
-
const embedding = await runtime.useModel(
|
|
3461
|
+
logger9.info("Starting TEXT_EMBEDDING test");
|
|
3462
|
+
const embedding = await runtime.useModel(ModelType2.TEXT_EMBEDDING, {
|
|
3799
3463
|
text: "This is a test of the text embedding model."
|
|
3800
3464
|
});
|
|
3801
|
-
|
|
3465
|
+
logger9.info("Embedding generated with dimensions:", embedding.length);
|
|
3802
3466
|
if (!Array.isArray(embedding)) {
|
|
3803
3467
|
throw new Error("Embedding is not an array");
|
|
3804
3468
|
}
|
|
@@ -3808,13 +3472,13 @@ var localAIPlugin = {
|
|
|
3808
3472
|
if (embedding.some((val) => typeof val !== "number")) {
|
|
3809
3473
|
throw new Error("Embedding contains non-numeric values");
|
|
3810
3474
|
}
|
|
3811
|
-
const nullEmbedding = await runtime.useModel(
|
|
3475
|
+
const nullEmbedding = await runtime.useModel(ModelType2.TEXT_EMBEDDING, null);
|
|
3812
3476
|
if (!Array.isArray(nullEmbedding) || nullEmbedding.some((val) => val !== 0)) {
|
|
3813
3477
|
throw new Error("Null input did not return zero vector");
|
|
3814
3478
|
}
|
|
3815
|
-
|
|
3479
|
+
logger9.success("TEXT_EMBEDDING test completed successfully");
|
|
3816
3480
|
} catch (error) {
|
|
3817
|
-
|
|
3481
|
+
logger9.error("TEXT_EMBEDDING test failed:", {
|
|
3818
3482
|
error: error instanceof Error ? error.message : String(error),
|
|
3819
3483
|
stack: error instanceof Error ? error.stack : void 0
|
|
3820
3484
|
});
|
|
@@ -3826,10 +3490,10 @@ var localAIPlugin = {
|
|
|
3826
3490
|
name: "local_ai_test_tokenizer_encode",
|
|
3827
3491
|
fn: async (runtime) => {
|
|
3828
3492
|
try {
|
|
3829
|
-
|
|
3493
|
+
logger9.info("Starting TEXT_TOKENIZER_ENCODE test");
|
|
3830
3494
|
const text = "Hello tokenizer test!";
|
|
3831
|
-
const tokens = await runtime.useModel(
|
|
3832
|
-
|
|
3495
|
+
const tokens = await runtime.useModel(ModelType2.TEXT_TOKENIZER_ENCODE, { text });
|
|
3496
|
+
logger9.info("Encoded tokens:", { count: tokens.length });
|
|
3833
3497
|
if (!Array.isArray(tokens)) {
|
|
3834
3498
|
throw new Error("Tokens output is not an array");
|
|
3835
3499
|
}
|
|
@@ -3839,9 +3503,9 @@ var localAIPlugin = {
|
|
|
3839
3503
|
if (tokens.some((token) => !Number.isInteger(token))) {
|
|
3840
3504
|
throw new Error("Tokens contain non-integer values");
|
|
3841
3505
|
}
|
|
3842
|
-
|
|
3506
|
+
logger9.success("TEXT_TOKENIZER_ENCODE test completed successfully");
|
|
3843
3507
|
} catch (error) {
|
|
3844
|
-
|
|
3508
|
+
logger9.error("TEXT_TOKENIZER_ENCODE test failed:", {
|
|
3845
3509
|
error: error instanceof Error ? error.message : String(error),
|
|
3846
3510
|
stack: error instanceof Error ? error.stack : void 0
|
|
3847
3511
|
});
|
|
@@ -3853,24 +3517,24 @@ var localAIPlugin = {
|
|
|
3853
3517
|
name: "local_ai_test_tokenizer_decode",
|
|
3854
3518
|
fn: async (runtime) => {
|
|
3855
3519
|
try {
|
|
3856
|
-
|
|
3520
|
+
logger9.info("Starting TEXT_TOKENIZER_DECODE test");
|
|
3857
3521
|
const originalText = "Hello tokenizer test!";
|
|
3858
|
-
const tokens = await runtime.useModel(
|
|
3522
|
+
const tokens = await runtime.useModel(ModelType2.TEXT_TOKENIZER_ENCODE, {
|
|
3859
3523
|
text: originalText
|
|
3860
3524
|
});
|
|
3861
|
-
const decodedText = await runtime.useModel(
|
|
3525
|
+
const decodedText = await runtime.useModel(ModelType2.TEXT_TOKENIZER_DECODE, {
|
|
3862
3526
|
tokens
|
|
3863
3527
|
});
|
|
3864
|
-
|
|
3528
|
+
logger9.info("Round trip tokenization:", {
|
|
3865
3529
|
original: originalText,
|
|
3866
3530
|
decoded: decodedText
|
|
3867
3531
|
});
|
|
3868
3532
|
if (typeof decodedText !== "string") {
|
|
3869
3533
|
throw new Error("Decoded output is not a string");
|
|
3870
3534
|
}
|
|
3871
|
-
|
|
3535
|
+
logger9.success("TEXT_TOKENIZER_DECODE test completed successfully");
|
|
3872
3536
|
} catch (error) {
|
|
3873
|
-
|
|
3537
|
+
logger9.error("TEXT_TOKENIZER_DECODE test failed:", {
|
|
3874
3538
|
error: error instanceof Error ? error.message : String(error),
|
|
3875
3539
|
stack: error instanceof Error ? error.stack : void 0
|
|
3876
3540
|
});
|
|
@@ -3882,10 +3546,10 @@ var localAIPlugin = {
|
|
|
3882
3546
|
name: "local_ai_test_image_description",
|
|
3883
3547
|
fn: async (runtime) => {
|
|
3884
3548
|
try {
|
|
3885
|
-
|
|
3549
|
+
logger9.info("Starting IMAGE_DESCRIPTION test");
|
|
3886
3550
|
const imageUrl = "https://raw.githubusercontent.com/microsoft/FLAML/main/website/static/img/flaml.png";
|
|
3887
|
-
const result = await runtime.useModel(
|
|
3888
|
-
|
|
3551
|
+
const result = await runtime.useModel(ModelType2.IMAGE_DESCRIPTION, imageUrl);
|
|
3552
|
+
logger9.info("Image description result:", result);
|
|
3889
3553
|
if (!result || typeof result !== "object") {
|
|
3890
3554
|
throw new Error("Invalid response format");
|
|
3891
3555
|
}
|
|
@@ -3895,9 +3559,9 @@ var localAIPlugin = {
|
|
|
3895
3559
|
if (typeof result.title !== "string" || typeof result.description !== "string") {
|
|
3896
3560
|
throw new Error("Title or description is not a string");
|
|
3897
3561
|
}
|
|
3898
|
-
|
|
3562
|
+
logger9.success("IMAGE_DESCRIPTION test completed successfully");
|
|
3899
3563
|
} catch (error) {
|
|
3900
|
-
|
|
3564
|
+
logger9.error("IMAGE_DESCRIPTION test failed:", {
|
|
3901
3565
|
error: error instanceof Error ? error.message : String(error),
|
|
3902
3566
|
stack: error instanceof Error ? error.stack : void 0
|
|
3903
3567
|
});
|
|
@@ -3909,7 +3573,7 @@ var localAIPlugin = {
|
|
|
3909
3573
|
name: "local_ai_test_transcription",
|
|
3910
3574
|
fn: async (runtime) => {
|
|
3911
3575
|
try {
|
|
3912
|
-
|
|
3576
|
+
logger9.info("Starting TRANSCRIPTION test");
|
|
3913
3577
|
const audioData = new Uint8Array([
|
|
3914
3578
|
82,
|
|
3915
3579
|
73,
|
|
@@ -3933,14 +3597,14 @@ var localAIPlugin = {
|
|
|
3933
3597
|
// "fmt "
|
|
3934
3598
|
]);
|
|
3935
3599
|
const audioBuffer = Buffer.from(audioData);
|
|
3936
|
-
const transcription = await runtime.useModel(
|
|
3937
|
-
|
|
3600
|
+
const transcription = await runtime.useModel(ModelType2.TRANSCRIPTION, audioBuffer);
|
|
3601
|
+
logger9.info("Transcription result:", transcription);
|
|
3938
3602
|
if (typeof transcription !== "string") {
|
|
3939
3603
|
throw new Error("Transcription result is not a string");
|
|
3940
3604
|
}
|
|
3941
|
-
|
|
3605
|
+
logger9.success("TRANSCRIPTION test completed successfully");
|
|
3942
3606
|
} catch (error) {
|
|
3943
|
-
|
|
3607
|
+
logger9.error("TRANSCRIPTION test failed:", {
|
|
3944
3608
|
error: error instanceof Error ? error.message : String(error),
|
|
3945
3609
|
stack: error instanceof Error ? error.stack : void 0
|
|
3946
3610
|
});
|
|
@@ -3952,9 +3616,9 @@ var localAIPlugin = {
|
|
|
3952
3616
|
name: "local_ai_test_text_to_speech",
|
|
3953
3617
|
fn: async (runtime) => {
|
|
3954
3618
|
try {
|
|
3955
|
-
|
|
3619
|
+
logger9.info("Starting TEXT_TO_SPEECH test");
|
|
3956
3620
|
const testText = "This is a test of the text to speech system.";
|
|
3957
|
-
const audioStream = await runtime.useModel(
|
|
3621
|
+
const audioStream = await runtime.useModel(ModelType2.TEXT_TO_SPEECH, testText);
|
|
3958
3622
|
if (!(audioStream instanceof Readable2)) {
|
|
3959
3623
|
throw new Error("TTS output is not a readable stream");
|
|
3960
3624
|
}
|
|
@@ -3972,9 +3636,9 @@ var localAIPlugin = {
|
|
|
3972
3636
|
});
|
|
3973
3637
|
audioStream.on("error", reject);
|
|
3974
3638
|
});
|
|
3975
|
-
|
|
3639
|
+
logger9.success("TEXT_TO_SPEECH test completed successfully");
|
|
3976
3640
|
} catch (error) {
|
|
3977
|
-
|
|
3641
|
+
logger9.error("TEXT_TO_SPEECH test failed:", {
|
|
3978
3642
|
error: error instanceof Error ? error.message : String(error),
|
|
3979
3643
|
stack: error instanceof Error ? error.stack : void 0
|
|
3980
3644
|
});
|