@elizaos/plugin-knowledge 1.0.6 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +156 -100
- package/dist/.vite/manifest.json +2 -2
- package/dist/assets/{index-DlJcnv-R.js → index-BMDX6vvo.js} +24 -24
- package/dist/assets/index-o1rKIvUo.css +1 -0
- package/dist/index.d.ts +3 -3
- package/dist/index.html +2 -2
- package/dist/index.js +531 -164
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
- package/dist/assets/index-DiVvUMt0.css +0 -1
package/dist/index.js
CHANGED
|
@@ -9,7 +9,7 @@ import {
|
|
|
9
9
|
} from "./chunk-536BD2UA.js";
|
|
10
10
|
|
|
11
11
|
// src/index.ts
|
|
12
|
-
import { logger as
|
|
12
|
+
import { logger as logger7 } from "@elizaos/core";
|
|
13
13
|
|
|
14
14
|
// src/types.ts
|
|
15
15
|
import z from "zod";
|
|
@@ -18,7 +18,7 @@ var ModelConfigSchema = z.object({
|
|
|
18
18
|
// NOTE: If EMBEDDING_PROVIDER is not specified, the plugin automatically assumes
|
|
19
19
|
// plugin-openai is being used and will use OPENAI_EMBEDDING_MODEL and
|
|
20
20
|
// OPENAI_EMBEDDING_DIMENSIONS for configuration
|
|
21
|
-
EMBEDDING_PROVIDER: z.enum(["openai", "google"]),
|
|
21
|
+
EMBEDDING_PROVIDER: z.enum(["openai", "google"]).optional(),
|
|
22
22
|
TEXT_PROVIDER: z.enum(["openai", "anthropic", "openrouter", "google"]).optional(),
|
|
23
23
|
// API keys
|
|
24
24
|
OPENAI_API_KEY: z.string().optional(),
|
|
@@ -66,14 +66,14 @@ function validateModelConfig(runtime) {
|
|
|
66
66
|
const openaiApiKey2 = getSetting("OPENAI_API_KEY");
|
|
67
67
|
const openaiEmbeddingModel = getSetting("OPENAI_EMBEDDING_MODEL");
|
|
68
68
|
if (openaiApiKey2 && openaiEmbeddingModel) {
|
|
69
|
-
logger.
|
|
69
|
+
logger.debug("EMBEDDING_PROVIDER not specified, using configuration from plugin-openai");
|
|
70
70
|
} else {
|
|
71
|
-
logger.
|
|
72
|
-
"EMBEDDING_PROVIDER not specified
|
|
71
|
+
logger.debug(
|
|
72
|
+
"EMBEDDING_PROVIDER not specified. Assuming embeddings are provided by another plugin (e.g., plugin-google-genai)."
|
|
73
73
|
);
|
|
74
74
|
}
|
|
75
75
|
}
|
|
76
|
-
const finalEmbeddingProvider = embeddingProvider
|
|
76
|
+
const finalEmbeddingProvider = embeddingProvider;
|
|
77
77
|
const textEmbeddingModel = getSetting("TEXT_EMBEDDING_MODEL") || getSetting("OPENAI_EMBEDDING_MODEL") || "text-embedding-3-small";
|
|
78
78
|
const embeddingDimension = getSetting("EMBEDDING_DIMENSION") || getSetting("OPENAI_EMBEDDING_DIMENSIONS") || "1536";
|
|
79
79
|
const openaiApiKey = getSetting("OPENAI_API_KEY");
|
|
@@ -106,23 +106,21 @@ function validateModelConfig(runtime) {
|
|
|
106
106
|
}
|
|
107
107
|
}
|
|
108
108
|
function validateConfigRequirements(config, assumePluginOpenAI) {
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
throw new Error("OPENAI_EMBEDDING_MODEL is required when using plugin-openai configuration");
|
|
122
|
-
}
|
|
109
|
+
const embeddingProvider = config.EMBEDDING_PROVIDER;
|
|
110
|
+
if (embeddingProvider === "openai" && !config.OPENAI_API_KEY) {
|
|
111
|
+
throw new Error('OPENAI_API_KEY is required when EMBEDDING_PROVIDER is set to "openai"');
|
|
112
|
+
}
|
|
113
|
+
if (embeddingProvider === "google" && !config.GOOGLE_API_KEY) {
|
|
114
|
+
throw new Error('GOOGLE_API_KEY is required when EMBEDDING_PROVIDER is set to "google"');
|
|
115
|
+
}
|
|
116
|
+
if (!embeddingProvider) {
|
|
117
|
+
logger.debug("No EMBEDDING_PROVIDER specified. Embeddings will be handled by the runtime.");
|
|
118
|
+
}
|
|
119
|
+
if (assumePluginOpenAI && config.OPENAI_API_KEY && !config.TEXT_EMBEDDING_MODEL) {
|
|
120
|
+
throw new Error("OPENAI_EMBEDDING_MODEL is required when using plugin-openai configuration");
|
|
123
121
|
}
|
|
124
122
|
if (config.CTX_KNOWLEDGE_ENABLED) {
|
|
125
|
-
logger.
|
|
123
|
+
logger.debug("Contextual Knowledge is enabled. Validating text generation settings...");
|
|
126
124
|
if (config.TEXT_PROVIDER === "openai" && !config.OPENAI_API_KEY) {
|
|
127
125
|
throw new Error('OPENAI_API_KEY is required when TEXT_PROVIDER is set to "openai"');
|
|
128
126
|
}
|
|
@@ -138,18 +136,18 @@ function validateConfigRequirements(config, assumePluginOpenAI) {
|
|
|
138
136
|
if (config.TEXT_PROVIDER === "openrouter") {
|
|
139
137
|
const modelName = config.TEXT_MODEL?.toLowerCase() || "";
|
|
140
138
|
if (modelName.includes("claude") || modelName.includes("gemini")) {
|
|
141
|
-
logger.
|
|
139
|
+
logger.debug(
|
|
142
140
|
`Using ${modelName} with OpenRouter. This configuration supports document caching for improved performance.`
|
|
143
141
|
);
|
|
144
142
|
}
|
|
145
143
|
}
|
|
146
144
|
} else {
|
|
147
145
|
if (assumePluginOpenAI) {
|
|
148
|
-
logger.
|
|
149
|
-
"Contextual Knowledge is disabled.
|
|
146
|
+
logger.debug(
|
|
147
|
+
"Contextual Knowledge is disabled. Embeddings will be handled by the runtime (e.g., plugin-openai, plugin-google-genai)."
|
|
150
148
|
);
|
|
151
149
|
} else {
|
|
152
|
-
logger.
|
|
150
|
+
logger.debug("Contextual Knowledge is disabled. Using configured embedding provider.");
|
|
153
151
|
}
|
|
154
152
|
}
|
|
155
153
|
}
|
|
@@ -192,7 +190,7 @@ async function getProviderRateLimits(runtime) {
|
|
|
192
190
|
// src/service.ts
|
|
193
191
|
import {
|
|
194
192
|
createUniqueUuid,
|
|
195
|
-
logger as
|
|
193
|
+
logger as logger4,
|
|
196
194
|
MemoryType as MemoryType2,
|
|
197
195
|
ModelType as ModelType2,
|
|
198
196
|
Semaphore,
|
|
@@ -204,7 +202,7 @@ import {
|
|
|
204
202
|
import {
|
|
205
203
|
MemoryType,
|
|
206
204
|
ModelType,
|
|
207
|
-
logger as
|
|
205
|
+
logger as logger3,
|
|
208
206
|
splitChunks
|
|
209
207
|
} from "@elizaos/core";
|
|
210
208
|
|
|
@@ -687,12 +685,342 @@ ${chunkContent}`;
|
|
|
687
685
|
return generatedContext.trim();
|
|
688
686
|
}
|
|
689
687
|
|
|
688
|
+
// src/llm.ts
|
|
689
|
+
import { generateText as aiGenerateText, embed } from "ai";
|
|
690
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
691
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
692
|
+
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
|
693
|
+
import { google } from "@ai-sdk/google";
|
|
694
|
+
import { logger as logger2 } from "@elizaos/core";
|
|
695
|
+
async function generateText(prompt, system, overrideConfig) {
|
|
696
|
+
const config = validateModelConfig();
|
|
697
|
+
const provider = overrideConfig?.provider || config.TEXT_PROVIDER;
|
|
698
|
+
const modelName = overrideConfig?.modelName || config.TEXT_MODEL;
|
|
699
|
+
const maxTokens = overrideConfig?.maxTokens || config.MAX_OUTPUT_TOKENS;
|
|
700
|
+
const autoCacheContextualRetrieval = overrideConfig?.autoCacheContextualRetrieval !== false;
|
|
701
|
+
try {
|
|
702
|
+
switch (provider) {
|
|
703
|
+
case "anthropic":
|
|
704
|
+
return await generateAnthropicText(prompt, system, modelName, maxTokens);
|
|
705
|
+
case "openai":
|
|
706
|
+
return await generateOpenAIText(prompt, system, modelName, maxTokens);
|
|
707
|
+
case "openrouter":
|
|
708
|
+
return await generateOpenRouterText(
|
|
709
|
+
prompt,
|
|
710
|
+
system,
|
|
711
|
+
modelName,
|
|
712
|
+
maxTokens,
|
|
713
|
+
overrideConfig?.cacheDocument,
|
|
714
|
+
overrideConfig?.cacheOptions,
|
|
715
|
+
autoCacheContextualRetrieval
|
|
716
|
+
);
|
|
717
|
+
case "google":
|
|
718
|
+
return await generateGoogleText(prompt, system, modelName, maxTokens, config);
|
|
719
|
+
default:
|
|
720
|
+
throw new Error(`Unsupported text provider: ${provider}`);
|
|
721
|
+
}
|
|
722
|
+
} catch (error) {
|
|
723
|
+
logger2.error(`[LLM Service - ${provider}] Error generating text with ${modelName}:`, error);
|
|
724
|
+
throw error;
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
async function generateAnthropicText(prompt, system, modelName, maxTokens) {
|
|
728
|
+
const config = validateModelConfig();
|
|
729
|
+
const anthropic = createAnthropic({
|
|
730
|
+
apiKey: config.ANTHROPIC_API_KEY,
|
|
731
|
+
baseURL: config.ANTHROPIC_BASE_URL
|
|
732
|
+
});
|
|
733
|
+
const modelInstance = anthropic(modelName);
|
|
734
|
+
const result = await aiGenerateText({
|
|
735
|
+
model: modelInstance,
|
|
736
|
+
prompt,
|
|
737
|
+
system,
|
|
738
|
+
temperature: 0.3,
|
|
739
|
+
maxTokens
|
|
740
|
+
});
|
|
741
|
+
logger2.debug(
|
|
742
|
+
`[LLM Service - Anthropic] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
|
|
743
|
+
);
|
|
744
|
+
return result;
|
|
745
|
+
}
|
|
746
|
+
async function generateOpenAIText(prompt, system, modelName, maxTokens) {
|
|
747
|
+
const config = validateModelConfig();
|
|
748
|
+
const openai = createOpenAI({
|
|
749
|
+
apiKey: config.OPENAI_API_KEY,
|
|
750
|
+
baseURL: config.OPENAI_BASE_URL
|
|
751
|
+
});
|
|
752
|
+
const modelInstance = openai.chat(modelName);
|
|
753
|
+
const result = await aiGenerateText({
|
|
754
|
+
model: modelInstance,
|
|
755
|
+
prompt,
|
|
756
|
+
system,
|
|
757
|
+
temperature: 0.3,
|
|
758
|
+
maxTokens
|
|
759
|
+
});
|
|
760
|
+
logger2.debug(
|
|
761
|
+
`[LLM Service - OpenAI] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
|
|
762
|
+
);
|
|
763
|
+
return result;
|
|
764
|
+
}
|
|
765
|
+
async function generateGoogleText(prompt, system, modelName, maxTokens, config) {
|
|
766
|
+
const googleProvider = google;
|
|
767
|
+
if (config.GOOGLE_API_KEY) {
|
|
768
|
+
process.env.GOOGLE_GENERATIVE_AI_API_KEY = config.GOOGLE_API_KEY;
|
|
769
|
+
}
|
|
770
|
+
const modelInstance = googleProvider(modelName);
|
|
771
|
+
const result = await aiGenerateText({
|
|
772
|
+
model: modelInstance,
|
|
773
|
+
prompt,
|
|
774
|
+
system,
|
|
775
|
+
temperature: 0.3,
|
|
776
|
+
maxTokens
|
|
777
|
+
});
|
|
778
|
+
logger2.debug(
|
|
779
|
+
`[LLM Service - Google] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
|
|
780
|
+
);
|
|
781
|
+
return result;
|
|
782
|
+
}
|
|
783
|
+
async function generateOpenRouterText(prompt, system, modelName, maxTokens, cacheDocument, cacheOptions, autoCacheContextualRetrieval = true) {
|
|
784
|
+
const config = validateModelConfig();
|
|
785
|
+
const openrouter = createOpenRouter({
|
|
786
|
+
apiKey: config.OPENROUTER_API_KEY,
|
|
787
|
+
baseURL: config.OPENROUTER_BASE_URL
|
|
788
|
+
});
|
|
789
|
+
const modelInstance = openrouter.chat(modelName);
|
|
790
|
+
const isClaudeModel = modelName.toLowerCase().includes("claude");
|
|
791
|
+
const isGeminiModel = modelName.toLowerCase().includes("gemini");
|
|
792
|
+
const isGemini25Model = modelName.toLowerCase().includes("gemini-2.5");
|
|
793
|
+
const supportsCaching = isClaudeModel || isGeminiModel;
|
|
794
|
+
let documentForCaching = cacheDocument;
|
|
795
|
+
if (!documentForCaching && autoCacheContextualRetrieval && supportsCaching) {
|
|
796
|
+
const docMatch = prompt.match(/<document>([\s\S]*?)<\/document>/);
|
|
797
|
+
if (docMatch && docMatch[1]) {
|
|
798
|
+
documentForCaching = docMatch[1].trim();
|
|
799
|
+
logger2.debug(
|
|
800
|
+
`[LLM Service - OpenRouter] Auto-detected document for caching (${documentForCaching.length} chars)`
|
|
801
|
+
);
|
|
802
|
+
}
|
|
803
|
+
}
|
|
804
|
+
if (documentForCaching && supportsCaching) {
|
|
805
|
+
const effectiveCacheOptions = cacheOptions || { type: "ephemeral" };
|
|
806
|
+
let promptText = prompt;
|
|
807
|
+
if (promptText.includes("<document>")) {
|
|
808
|
+
promptText = promptText.replace(/<document>[\s\S]*?<\/document>/, "").trim();
|
|
809
|
+
}
|
|
810
|
+
if (isClaudeModel) {
|
|
811
|
+
return await generateClaudeWithCaching(
|
|
812
|
+
promptText,
|
|
813
|
+
system,
|
|
814
|
+
modelInstance,
|
|
815
|
+
modelName,
|
|
816
|
+
maxTokens,
|
|
817
|
+
documentForCaching
|
|
818
|
+
);
|
|
819
|
+
} else if (isGeminiModel) {
|
|
820
|
+
return await generateGeminiWithCaching(
|
|
821
|
+
promptText,
|
|
822
|
+
system,
|
|
823
|
+
modelInstance,
|
|
824
|
+
modelName,
|
|
825
|
+
maxTokens,
|
|
826
|
+
documentForCaching,
|
|
827
|
+
isGemini25Model
|
|
828
|
+
);
|
|
829
|
+
}
|
|
830
|
+
}
|
|
831
|
+
logger2.debug("[LLM Service - OpenRouter] Using standard request without caching");
|
|
832
|
+
return await generateStandardOpenRouterText(prompt, system, modelInstance, modelName, maxTokens);
|
|
833
|
+
}
|
|
834
|
+
async function generateClaudeWithCaching(promptText, system, modelInstance, modelName, maxTokens, documentForCaching) {
|
|
835
|
+
logger2.debug(
|
|
836
|
+
`[LLM Service - OpenRouter] Using explicit prompt caching with Claude model ${modelName}`
|
|
837
|
+
);
|
|
838
|
+
const messages = [
|
|
839
|
+
// System message with cached document (if system is provided)
|
|
840
|
+
system ? {
|
|
841
|
+
role: "system",
|
|
842
|
+
content: [
|
|
843
|
+
{
|
|
844
|
+
type: "text",
|
|
845
|
+
text: system
|
|
846
|
+
},
|
|
847
|
+
{
|
|
848
|
+
type: "text",
|
|
849
|
+
text: documentForCaching,
|
|
850
|
+
cache_control: {
|
|
851
|
+
type: "ephemeral"
|
|
852
|
+
}
|
|
853
|
+
}
|
|
854
|
+
]
|
|
855
|
+
} : (
|
|
856
|
+
// User message with cached document (if no system message)
|
|
857
|
+
{
|
|
858
|
+
role: "user",
|
|
859
|
+
content: [
|
|
860
|
+
{
|
|
861
|
+
type: "text",
|
|
862
|
+
text: "Document for context:"
|
|
863
|
+
},
|
|
864
|
+
{
|
|
865
|
+
type: "text",
|
|
866
|
+
text: documentForCaching,
|
|
867
|
+
cache_control: {
|
|
868
|
+
type: "ephemeral"
|
|
869
|
+
}
|
|
870
|
+
},
|
|
871
|
+
{
|
|
872
|
+
type: "text",
|
|
873
|
+
text: promptText
|
|
874
|
+
}
|
|
875
|
+
]
|
|
876
|
+
}
|
|
877
|
+
),
|
|
878
|
+
// Only add user message if system was provided (otherwise we included user above)
|
|
879
|
+
system ? {
|
|
880
|
+
role: "user",
|
|
881
|
+
content: [
|
|
882
|
+
{
|
|
883
|
+
type: "text",
|
|
884
|
+
text: promptText
|
|
885
|
+
}
|
|
886
|
+
]
|
|
887
|
+
} : null
|
|
888
|
+
].filter(Boolean);
|
|
889
|
+
logger2.debug("[LLM Service - OpenRouter] Using Claude-specific caching structure");
|
|
890
|
+
const result = await aiGenerateText({
|
|
891
|
+
model: modelInstance,
|
|
892
|
+
messages,
|
|
893
|
+
temperature: 0.3,
|
|
894
|
+
maxTokens,
|
|
895
|
+
providerOptions: {
|
|
896
|
+
openrouter: {
|
|
897
|
+
usage: {
|
|
898
|
+
include: true
|
|
899
|
+
}
|
|
900
|
+
}
|
|
901
|
+
}
|
|
902
|
+
});
|
|
903
|
+
logCacheMetrics(result);
|
|
904
|
+
logger2.debug(
|
|
905
|
+
`[LLM Service - OpenRouter] Text generated with ${modelName} using Claude caching. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
|
|
906
|
+
);
|
|
907
|
+
return result;
|
|
908
|
+
}
|
|
909
|
+
async function generateGeminiWithCaching(promptText, system, modelInstance, modelName, maxTokens, documentForCaching, isGemini25Model) {
|
|
910
|
+
const usingImplicitCaching = isGemini25Model;
|
|
911
|
+
const estimatedDocTokens = Math.ceil(documentForCaching.length / 4);
|
|
912
|
+
const minTokensForImplicitCache = modelName.toLowerCase().includes("flash") ? 1028 : 2048;
|
|
913
|
+
const likelyTriggersCaching = estimatedDocTokens >= minTokensForImplicitCache;
|
|
914
|
+
if (usingImplicitCaching) {
|
|
915
|
+
logger2.debug(
|
|
916
|
+
`[LLM Service - OpenRouter] Using Gemini 2.5 implicit caching with model ${modelName}`
|
|
917
|
+
);
|
|
918
|
+
logger2.debug(
|
|
919
|
+
`[LLM Service - OpenRouter] Gemini 2.5 models automatically cache large prompts (no cache_control needed)`
|
|
920
|
+
);
|
|
921
|
+
if (likelyTriggersCaching) {
|
|
922
|
+
logger2.debug(
|
|
923
|
+
`[LLM Service - OpenRouter] Document size ~${estimatedDocTokens} tokens exceeds minimum ${minTokensForImplicitCache} tokens for implicit caching`
|
|
924
|
+
);
|
|
925
|
+
} else {
|
|
926
|
+
logger2.debug(
|
|
927
|
+
`[LLM Service - OpenRouter] Warning: Document size ~${estimatedDocTokens} tokens may not meet minimum ${minTokensForImplicitCache} token threshold for implicit caching`
|
|
928
|
+
);
|
|
929
|
+
}
|
|
930
|
+
} else {
|
|
931
|
+
logger2.debug(
|
|
932
|
+
`[LLM Service - OpenRouter] Using standard prompt format with Gemini model ${modelName}`
|
|
933
|
+
);
|
|
934
|
+
logger2.debug(
|
|
935
|
+
`[LLM Service - OpenRouter] Note: Only Gemini 2.5 models support automatic implicit caching`
|
|
936
|
+
);
|
|
937
|
+
}
|
|
938
|
+
const geminiSystemPrefix = system ? `${system}
|
|
939
|
+
|
|
940
|
+
` : "";
|
|
941
|
+
const geminiPrompt = `${geminiSystemPrefix}${documentForCaching}
|
|
942
|
+
|
|
943
|
+
${promptText}`;
|
|
944
|
+
const result = await aiGenerateText({
|
|
945
|
+
model: modelInstance,
|
|
946
|
+
prompt: geminiPrompt,
|
|
947
|
+
temperature: 0.3,
|
|
948
|
+
maxTokens,
|
|
949
|
+
providerOptions: {
|
|
950
|
+
openrouter: {
|
|
951
|
+
usage: {
|
|
952
|
+
include: true
|
|
953
|
+
// Include usage info to see cache metrics
|
|
954
|
+
}
|
|
955
|
+
}
|
|
956
|
+
}
|
|
957
|
+
});
|
|
958
|
+
logCacheMetrics(result);
|
|
959
|
+
logger2.debug(
|
|
960
|
+
`[LLM Service - OpenRouter] Text generated with ${modelName} using ${usingImplicitCaching ? "implicit" : "standard"} caching. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
|
|
961
|
+
);
|
|
962
|
+
return result;
|
|
963
|
+
}
|
|
964
|
+
async function generateStandardOpenRouterText(prompt, system, modelInstance, modelName, maxTokens) {
|
|
965
|
+
const result = await aiGenerateText({
|
|
966
|
+
model: modelInstance,
|
|
967
|
+
prompt,
|
|
968
|
+
system,
|
|
969
|
+
temperature: 0.3,
|
|
970
|
+
maxTokens,
|
|
971
|
+
providerOptions: {
|
|
972
|
+
openrouter: {
|
|
973
|
+
usage: {
|
|
974
|
+
include: true
|
|
975
|
+
// Include usage info to see cache metrics
|
|
976
|
+
}
|
|
977
|
+
}
|
|
978
|
+
}
|
|
979
|
+
});
|
|
980
|
+
logger2.debug(
|
|
981
|
+
`[LLM Service - OpenRouter] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
|
|
982
|
+
);
|
|
983
|
+
return result;
|
|
984
|
+
}
|
|
985
|
+
function logCacheMetrics(result) {
|
|
986
|
+
if (result.usage && result.usage.cacheTokens) {
|
|
987
|
+
logger2.debug(
|
|
988
|
+
`[LLM Service - OpenRouter] Cache metrics - Cached tokens: ${result.usage.cacheTokens}, Cache discount: ${result.usage.cacheDiscount}`
|
|
989
|
+
);
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
|
|
690
993
|
// src/document-processor.ts
|
|
691
994
|
var ctxKnowledgeEnabled = process.env.CTX_KNOWLEDGE_ENABLED === "true" || process.env.CTX_KNOWLEDGE_ENABLED === "True";
|
|
995
|
+
function shouldUseCustomLLM() {
|
|
996
|
+
const textProvider = process.env.TEXT_PROVIDER;
|
|
997
|
+
const textModel = process.env.TEXT_MODEL;
|
|
998
|
+
if (!textProvider || !textModel) {
|
|
999
|
+
return false;
|
|
1000
|
+
}
|
|
1001
|
+
switch (textProvider.toLowerCase()) {
|
|
1002
|
+
case "openrouter":
|
|
1003
|
+
return !!process.env.OPENROUTER_API_KEY;
|
|
1004
|
+
case "openai":
|
|
1005
|
+
return !!process.env.OPENAI_API_KEY;
|
|
1006
|
+
case "anthropic":
|
|
1007
|
+
return !!process.env.ANTHROPIC_API_KEY;
|
|
1008
|
+
case "google":
|
|
1009
|
+
return !!process.env.GOOGLE_API_KEY;
|
|
1010
|
+
default:
|
|
1011
|
+
return false;
|
|
1012
|
+
}
|
|
1013
|
+
}
|
|
1014
|
+
var useCustomLLM = shouldUseCustomLLM();
|
|
692
1015
|
if (ctxKnowledgeEnabled) {
|
|
693
|
-
|
|
1016
|
+
logger3.info(`Document processor starting with Contextual Knowledge ENABLED`);
|
|
1017
|
+
if (useCustomLLM) {
|
|
1018
|
+
logger3.info(`Using Custom LLM with provider: ${process.env.TEXT_PROVIDER}, model: ${process.env.TEXT_MODEL}`);
|
|
1019
|
+
} else {
|
|
1020
|
+
logger3.info(`Using ElizaOS Runtime LLM (default behavior)`);
|
|
1021
|
+
}
|
|
694
1022
|
} else {
|
|
695
|
-
|
|
1023
|
+
logger3.info(`Document processor starting with Contextual Knowledge DISABLED`);
|
|
696
1024
|
}
|
|
697
1025
|
async function processFragmentsSynchronously({
|
|
698
1026
|
runtime,
|
|
@@ -705,15 +1033,15 @@ async function processFragmentsSynchronously({
|
|
|
705
1033
|
worldId
|
|
706
1034
|
}) {
|
|
707
1035
|
if (!fullDocumentText || fullDocumentText.trim() === "") {
|
|
708
|
-
|
|
1036
|
+
logger3.warn(`No text content available to chunk for document ${documentId}.`);
|
|
709
1037
|
return 0;
|
|
710
1038
|
}
|
|
711
1039
|
const chunks = await splitDocumentIntoChunks(fullDocumentText);
|
|
712
1040
|
if (chunks.length === 0) {
|
|
713
|
-
|
|
1041
|
+
logger3.warn(`No chunks generated from text for ${documentId}. No fragments to save.`);
|
|
714
1042
|
return 0;
|
|
715
1043
|
}
|
|
716
|
-
|
|
1044
|
+
logger3.info(`Split content into ${chunks.length} chunks for document ${documentId}`);
|
|
717
1045
|
const providerLimits = await getProviderRateLimits();
|
|
718
1046
|
const CONCURRENCY_LIMIT = Math.min(30, providerLimits.maxConcurrentRequests || 30);
|
|
719
1047
|
const rateLimiter = createRateLimiter(providerLimits.requestsPerMinute || 60);
|
|
@@ -731,11 +1059,11 @@ async function processFragmentsSynchronously({
|
|
|
731
1059
|
rateLimiter
|
|
732
1060
|
});
|
|
733
1061
|
if (failedCount > 0) {
|
|
734
|
-
|
|
1062
|
+
logger3.warn(
|
|
735
1063
|
`Failed to process ${failedCount} chunks out of ${chunks.length} for document ${documentId}`
|
|
736
1064
|
);
|
|
737
1065
|
}
|
|
738
|
-
|
|
1066
|
+
logger3.info(`Finished saving ${savedCount} fragments for document ${documentId}.`);
|
|
739
1067
|
return savedCount;
|
|
740
1068
|
}
|
|
741
1069
|
async function extractTextFromDocument(fileBuffer, contentType, originalFilename) {
|
|
@@ -744,15 +1072,15 @@ async function extractTextFromDocument(fileBuffer, contentType, originalFilename
|
|
|
744
1072
|
}
|
|
745
1073
|
try {
|
|
746
1074
|
if (contentType === "application/pdf") {
|
|
747
|
-
|
|
1075
|
+
logger3.debug(`Extracting text from PDF: ${originalFilename}`);
|
|
748
1076
|
return await convertPdfToTextFromBuffer(fileBuffer, originalFilename);
|
|
749
1077
|
} else {
|
|
750
|
-
|
|
1078
|
+
logger3.debug(`Extracting text from non-PDF: ${originalFilename} (Type: ${contentType})`);
|
|
751
1079
|
if (contentType.includes("text/") || contentType.includes("application/json") || contentType.includes("application/xml")) {
|
|
752
1080
|
try {
|
|
753
1081
|
return fileBuffer.toString("utf8");
|
|
754
1082
|
} catch (textError) {
|
|
755
|
-
|
|
1083
|
+
logger3.warn(
|
|
756
1084
|
`Failed to decode ${originalFilename} as UTF-8, falling back to binary extraction`
|
|
757
1085
|
);
|
|
758
1086
|
}
|
|
@@ -760,7 +1088,7 @@ async function extractTextFromDocument(fileBuffer, contentType, originalFilename
|
|
|
760
1088
|
return await extractTextFromFileBuffer(fileBuffer, contentType, originalFilename);
|
|
761
1089
|
}
|
|
762
1090
|
} catch (error) {
|
|
763
|
-
|
|
1091
|
+
logger3.error(`Error extracting text from ${originalFilename}: ${error.message}`);
|
|
764
1092
|
throw new Error(`Failed to extract text from ${originalFilename}: ${error.message}`);
|
|
765
1093
|
}
|
|
766
1094
|
}
|
|
@@ -805,7 +1133,7 @@ async function splitDocumentIntoChunks(documentText) {
|
|
|
805
1133
|
const tokenChunkOverlap = DEFAULT_CHUNK_OVERLAP_TOKENS;
|
|
806
1134
|
const targetCharChunkSize = Math.round(tokenChunkSize * DEFAULT_CHARS_PER_TOKEN);
|
|
807
1135
|
const targetCharChunkOverlap = Math.round(tokenChunkOverlap * DEFAULT_CHARS_PER_TOKEN);
|
|
808
|
-
|
|
1136
|
+
logger3.debug(
|
|
809
1137
|
`Using core splitChunks with settings: tokenChunkSize=${tokenChunkSize}, tokenChunkOverlap=${tokenChunkOverlap}, charChunkSize=${targetCharChunkSize}, charChunkOverlap=${targetCharChunkOverlap}`
|
|
810
1138
|
);
|
|
811
1139
|
return await splitChunks(documentText, tokenChunkSize, tokenChunkOverlap);
|
|
@@ -829,7 +1157,7 @@ async function processAndSaveFragments({
|
|
|
829
1157
|
for (let i = 0; i < chunks.length; i += concurrencyLimit) {
|
|
830
1158
|
const batchChunks = chunks.slice(i, i + concurrencyLimit);
|
|
831
1159
|
const batchOriginalIndices = Array.from({ length: batchChunks.length }, (_, k) => i + k);
|
|
832
|
-
|
|
1160
|
+
logger3.debug(
|
|
833
1161
|
`Processing batch of ${batchChunks.length} chunks for document ${documentId}. Starting original index: ${batchOriginalIndices[0]}, batch ${Math.floor(i / concurrencyLimit) + 1}/${Math.ceil(chunks.length / concurrencyLimit)}`
|
|
834
1162
|
);
|
|
835
1163
|
const contextualizedChunks = await getContextualizedChunks(
|
|
@@ -849,13 +1177,13 @@ async function processAndSaveFragments({
|
|
|
849
1177
|
if (!result.success) {
|
|
850
1178
|
failedCount++;
|
|
851
1179
|
failedChunks.push(originalChunkIndex);
|
|
852
|
-
|
|
1180
|
+
logger3.warn(`Failed to process chunk ${originalChunkIndex} for document ${documentId}`);
|
|
853
1181
|
continue;
|
|
854
1182
|
}
|
|
855
1183
|
const contextualizedChunkText = result.text;
|
|
856
1184
|
const embedding = result.embedding;
|
|
857
1185
|
if (!embedding || embedding.length === 0) {
|
|
858
|
-
|
|
1186
|
+
logger3.warn(
|
|
859
1187
|
`Zero vector detected for chunk ${originalChunkIndex} (document ${documentId}). Embedding: ${JSON.stringify(result.embedding)}`
|
|
860
1188
|
);
|
|
861
1189
|
failedCount++;
|
|
@@ -880,12 +1208,12 @@ async function processAndSaveFragments({
|
|
|
880
1208
|
}
|
|
881
1209
|
};
|
|
882
1210
|
await runtime.createMemory(fragmentMemory, "knowledge");
|
|
883
|
-
|
|
1211
|
+
logger3.debug(
|
|
884
1212
|
`Saved fragment ${originalChunkIndex + 1} for document ${documentId} (Fragment ID: ${fragmentMemory.id})`
|
|
885
1213
|
);
|
|
886
1214
|
savedCount++;
|
|
887
1215
|
} catch (saveError) {
|
|
888
|
-
|
|
1216
|
+
logger3.error(
|
|
889
1217
|
`Error saving chunk ${originalChunkIndex} to database: ${saveError.message}`,
|
|
890
1218
|
saveError.stack
|
|
891
1219
|
);
|
|
@@ -900,8 +1228,26 @@ async function processAndSaveFragments({
|
|
|
900
1228
|
return { savedCount, failedCount, failedChunks };
|
|
901
1229
|
}
|
|
902
1230
|
async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLimiter) {
|
|
1231
|
+
const validChunks = contextualizedChunks.filter((chunk) => chunk.success);
|
|
1232
|
+
const failedChunks = contextualizedChunks.filter((chunk) => !chunk.success);
|
|
1233
|
+
if (validChunks.length === 0) {
|
|
1234
|
+
return failedChunks.map((chunk) => ({
|
|
1235
|
+
success: false,
|
|
1236
|
+
index: chunk.index,
|
|
1237
|
+
error: new Error("Chunk processing failed"),
|
|
1238
|
+
text: chunk.contextualizedText
|
|
1239
|
+
}));
|
|
1240
|
+
}
|
|
903
1241
|
return await Promise.all(
|
|
904
1242
|
contextualizedChunks.map(async (contextualizedChunk) => {
|
|
1243
|
+
if (!contextualizedChunk.success) {
|
|
1244
|
+
return {
|
|
1245
|
+
success: false,
|
|
1246
|
+
index: contextualizedChunk.index,
|
|
1247
|
+
error: new Error("Chunk processing failed"),
|
|
1248
|
+
text: contextualizedChunk.contextualizedText
|
|
1249
|
+
};
|
|
1250
|
+
}
|
|
905
1251
|
await rateLimiter();
|
|
906
1252
|
try {
|
|
907
1253
|
const generateEmbeddingOperation = async () => {
|
|
@@ -929,7 +1275,7 @@ async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLi
|
|
|
929
1275
|
text: contextualizedChunk.contextualizedText
|
|
930
1276
|
};
|
|
931
1277
|
} catch (error) {
|
|
932
|
-
|
|
1278
|
+
logger3.error(
|
|
933
1279
|
`Error generating embedding for chunk ${contextualizedChunk.index}: ${error.message}`
|
|
934
1280
|
);
|
|
935
1281
|
return {
|
|
@@ -944,7 +1290,7 @@ async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLi
|
|
|
944
1290
|
}
|
|
945
1291
|
async function getContextualizedChunks(runtime, fullDocumentText, chunks, contentType, batchOriginalIndices) {
|
|
946
1292
|
if (ctxKnowledgeEnabled && fullDocumentText) {
|
|
947
|
-
|
|
1293
|
+
logger3.debug(`Generating contexts for ${chunks.length} chunks`);
|
|
948
1294
|
return await generateContextsInBatch(
|
|
949
1295
|
runtime,
|
|
950
1296
|
fullDocumentText,
|
|
@@ -989,17 +1335,31 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
|
|
|
989
1335
|
try {
|
|
990
1336
|
let llmResponse;
|
|
991
1337
|
const generateTextOperation = async () => {
|
|
992
|
-
if (
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
1338
|
+
if (useCustomLLM) {
|
|
1339
|
+
if (item.usesCaching) {
|
|
1340
|
+
return await generateText(
|
|
1341
|
+
item.promptText,
|
|
1342
|
+
item.systemPrompt,
|
|
1343
|
+
{
|
|
1344
|
+
cacheDocument: item.fullDocumentTextForContext,
|
|
1345
|
+
cacheOptions: { type: "ephemeral" },
|
|
1346
|
+
autoCacheContextualRetrieval: true
|
|
1347
|
+
}
|
|
1348
|
+
);
|
|
1349
|
+
} else {
|
|
1350
|
+
return await generateText(item.prompt);
|
|
1351
|
+
}
|
|
999
1352
|
} else {
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1353
|
+
if (item.usesCaching) {
|
|
1354
|
+
return await runtime.useModel(ModelType.TEXT_LARGE, {
|
|
1355
|
+
prompt: item.promptText,
|
|
1356
|
+
system: item.systemPrompt
|
|
1357
|
+
});
|
|
1358
|
+
} else {
|
|
1359
|
+
return await runtime.useModel(ModelType.TEXT_LARGE, {
|
|
1360
|
+
prompt: item.prompt
|
|
1361
|
+
});
|
|
1362
|
+
}
|
|
1003
1363
|
}
|
|
1004
1364
|
};
|
|
1005
1365
|
llmResponse = await withRateLimitRetry(
|
|
@@ -1008,7 +1368,7 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
|
|
|
1008
1368
|
);
|
|
1009
1369
|
const generatedContext = llmResponse.text;
|
|
1010
1370
|
const contextualizedText = getChunkWithContext(item.chunkText, generatedContext);
|
|
1011
|
-
|
|
1371
|
+
logger3.debug(
|
|
1012
1372
|
`Context added for chunk ${item.originalIndex}. New length: ${contextualizedText.length}`
|
|
1013
1373
|
);
|
|
1014
1374
|
return {
|
|
@@ -1017,7 +1377,7 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
|
|
|
1017
1377
|
index: item.originalIndex
|
|
1018
1378
|
};
|
|
1019
1379
|
} catch (error) {
|
|
1020
|
-
|
|
1380
|
+
logger3.error(
|
|
1021
1381
|
`Error generating context for chunk ${item.originalIndex}: ${error.message}`,
|
|
1022
1382
|
error.stack
|
|
1023
1383
|
);
|
|
@@ -1038,7 +1398,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
|
|
|
1038
1398
|
if (isUsingCacheCapableModel) {
|
|
1039
1399
|
const cachingPromptInfo = contentType ? getCachingPromptForMimeType(contentType, chunkText) : getCachingContextualizationPrompt(chunkText);
|
|
1040
1400
|
if (cachingPromptInfo.prompt.startsWith("Error:")) {
|
|
1041
|
-
|
|
1401
|
+
logger3.warn(
|
|
1042
1402
|
`Skipping contextualization for chunk ${originalIndex} due to: ${cachingPromptInfo.prompt}`
|
|
1043
1403
|
);
|
|
1044
1404
|
return {
|
|
@@ -1060,7 +1420,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
|
|
|
1060
1420
|
} else {
|
|
1061
1421
|
const prompt = contentType ? getPromptForMimeType(contentType, fullDocumentText, chunkText) : getContextualizationPrompt(fullDocumentText, chunkText);
|
|
1062
1422
|
if (prompt.startsWith("Error:")) {
|
|
1063
|
-
|
|
1423
|
+
logger3.warn(`Skipping contextualization for chunk ${originalIndex} due to: ${prompt}`);
|
|
1064
1424
|
return {
|
|
1065
1425
|
prompt: null,
|
|
1066
1426
|
originalIndex,
|
|
@@ -1078,7 +1438,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
|
|
|
1078
1438
|
};
|
|
1079
1439
|
}
|
|
1080
1440
|
} catch (error) {
|
|
1081
|
-
|
|
1441
|
+
logger3.error(
|
|
1082
1442
|
`Error preparing prompt for chunk ${originalIndex}: ${error.message}`,
|
|
1083
1443
|
error.stack
|
|
1084
1444
|
);
|
|
@@ -1099,7 +1459,7 @@ async function generateEmbeddingWithValidation(runtime, text) {
|
|
|
1099
1459
|
});
|
|
1100
1460
|
const embedding = Array.isArray(embeddingResult) ? embeddingResult : embeddingResult?.embedding;
|
|
1101
1461
|
if (!embedding || embedding.length === 0) {
|
|
1102
|
-
|
|
1462
|
+
logger3.warn(`Zero vector detected. Embedding result: ${JSON.stringify(embedding)}`);
|
|
1103
1463
|
return {
|
|
1104
1464
|
embedding: null,
|
|
1105
1465
|
success: false,
|
|
@@ -1117,12 +1477,12 @@ async function withRateLimitRetry(operation, errorContext, retryDelay) {
|
|
|
1117
1477
|
} catch (error) {
|
|
1118
1478
|
if (error.status === 429) {
|
|
1119
1479
|
const delay = retryDelay || error.headers?.["retry-after"] || 5;
|
|
1120
|
-
|
|
1480
|
+
logger3.warn(`Rate limit hit for ${errorContext}. Retrying after ${delay}s`);
|
|
1121
1481
|
await new Promise((resolve) => setTimeout(resolve, delay * 1e3));
|
|
1122
1482
|
try {
|
|
1123
1483
|
return await operation();
|
|
1124
1484
|
} catch (retryError) {
|
|
1125
|
-
|
|
1485
|
+
logger3.error(`Failed after retry for ${errorContext}: ${retryError.message}`);
|
|
1126
1486
|
throw retryError;
|
|
1127
1487
|
}
|
|
1128
1488
|
}
|
|
@@ -1141,7 +1501,7 @@ function createRateLimiter(requestsPerMinute) {
|
|
|
1141
1501
|
const oldestRequest = requestTimes[0];
|
|
1142
1502
|
const timeToWait = Math.max(0, oldestRequest + intervalMs - now);
|
|
1143
1503
|
if (timeToWait > 0) {
|
|
1144
|
-
|
|
1504
|
+
logger3.debug(`Rate limiting applied, waiting ${timeToWait}ms before next request`);
|
|
1145
1505
|
await new Promise((resolve) => setTimeout(resolve, timeToWait));
|
|
1146
1506
|
}
|
|
1147
1507
|
}
|
|
@@ -1168,9 +1528,10 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1168
1528
|
if (typeof value === "string") return value.toLowerCase() === "true";
|
|
1169
1529
|
return false;
|
|
1170
1530
|
};
|
|
1531
|
+
const loadDocsOnStartup = parseBooleanEnv(config?.LOAD_DOCS_ON_STARTUP) || process.env.LOAD_DOCS_ON_STARTUP === "true";
|
|
1171
1532
|
this.knowledgeConfig = {
|
|
1172
1533
|
CTX_KNOWLEDGE_ENABLED: parseBooleanEnv(config?.CTX_KNOWLEDGE_ENABLED),
|
|
1173
|
-
LOAD_DOCS_ON_STARTUP:
|
|
1534
|
+
LOAD_DOCS_ON_STARTUP: loadDocsOnStartup,
|
|
1174
1535
|
MAX_INPUT_TOKENS: config?.MAX_INPUT_TOKENS,
|
|
1175
1536
|
MAX_OUTPUT_TOKENS: config?.MAX_OUTPUT_TOKENS,
|
|
1176
1537
|
EMBEDDING_PROVIDER: config?.EMBEDDING_PROVIDER,
|
|
@@ -1178,34 +1539,37 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1178
1539
|
TEXT_EMBEDDING_MODEL: config?.TEXT_EMBEDDING_MODEL
|
|
1179
1540
|
};
|
|
1180
1541
|
this.config = { ...this.knowledgeConfig };
|
|
1181
|
-
|
|
1542
|
+
logger4.info(
|
|
1182
1543
|
`KnowledgeService initialized for agent ${this.runtime.agentId} with config:`,
|
|
1183
1544
|
this.knowledgeConfig
|
|
1184
1545
|
);
|
|
1185
1546
|
if (this.knowledgeConfig.LOAD_DOCS_ON_STARTUP) {
|
|
1547
|
+
logger4.info("LOAD_DOCS_ON_STARTUP is enabled. Loading documents from docs folder...");
|
|
1186
1548
|
this.loadInitialDocuments().catch((error) => {
|
|
1187
|
-
|
|
1549
|
+
logger4.error("Error during initial document loading in KnowledgeService:", error);
|
|
1188
1550
|
});
|
|
1551
|
+
} else {
|
|
1552
|
+
logger4.info("LOAD_DOCS_ON_STARTUP is disabled. Skipping automatic document loading.");
|
|
1189
1553
|
}
|
|
1190
1554
|
}
|
|
1191
1555
|
async loadInitialDocuments() {
|
|
1192
|
-
|
|
1556
|
+
logger4.info(
|
|
1193
1557
|
`KnowledgeService: Checking for documents to load on startup for agent ${this.runtime.agentId}`
|
|
1194
1558
|
);
|
|
1195
1559
|
try {
|
|
1196
1560
|
await new Promise((resolve) => setTimeout(resolve, 1e3));
|
|
1197
1561
|
const result = await loadDocsFromPath(this, this.runtime.agentId);
|
|
1198
1562
|
if (result.successful > 0) {
|
|
1199
|
-
|
|
1563
|
+
logger4.info(
|
|
1200
1564
|
`KnowledgeService: Loaded ${result.successful} documents from docs folder on startup for agent ${this.runtime.agentId}`
|
|
1201
1565
|
);
|
|
1202
1566
|
} else {
|
|
1203
|
-
|
|
1567
|
+
logger4.info(
|
|
1204
1568
|
`KnowledgeService: No new documents found to load on startup for agent ${this.runtime.agentId}`
|
|
1205
1569
|
);
|
|
1206
1570
|
}
|
|
1207
1571
|
} catch (error) {
|
|
1208
|
-
|
|
1572
|
+
logger4.error(
|
|
1209
1573
|
`KnowledgeService: Error loading documents on startup for agent ${this.runtime.agentId}:`,
|
|
1210
1574
|
error
|
|
1211
1575
|
);
|
|
@@ -1217,23 +1581,23 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1217
1581
|
* @returns Initialized Knowledge service
|
|
1218
1582
|
*/
|
|
1219
1583
|
static async start(runtime) {
|
|
1220
|
-
|
|
1584
|
+
logger4.info(`Starting Knowledge service for agent: ${runtime.agentId}`);
|
|
1221
1585
|
const service = new _KnowledgeService(runtime);
|
|
1222
1586
|
if (service.runtime.character?.knowledge && service.runtime.character.knowledge.length > 0) {
|
|
1223
|
-
|
|
1587
|
+
logger4.info(
|
|
1224
1588
|
`KnowledgeService: Processing ${service.runtime.character.knowledge.length} character knowledge items.`
|
|
1225
1589
|
);
|
|
1226
1590
|
const stringKnowledge = service.runtime.character.knowledge.filter(
|
|
1227
1591
|
(item) => typeof item === "string"
|
|
1228
1592
|
);
|
|
1229
1593
|
await service.processCharacterKnowledge(stringKnowledge).catch((err) => {
|
|
1230
|
-
|
|
1594
|
+
logger4.error(
|
|
1231
1595
|
`KnowledgeService: Error processing character knowledge during startup: ${err.message}`,
|
|
1232
1596
|
err
|
|
1233
1597
|
);
|
|
1234
1598
|
});
|
|
1235
1599
|
} else {
|
|
1236
|
-
|
|
1600
|
+
logger4.info(
|
|
1237
1601
|
`KnowledgeService: No character knowledge to process for agent ${runtime.agentId}.`
|
|
1238
1602
|
);
|
|
1239
1603
|
}
|
|
@@ -1244,10 +1608,10 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1244
1608
|
* @param runtime Agent runtime
|
|
1245
1609
|
*/
|
|
1246
1610
|
static async stop(runtime) {
|
|
1247
|
-
|
|
1611
|
+
logger4.info(`Stopping Knowledge service for agent: ${runtime.agentId}`);
|
|
1248
1612
|
const service = runtime.getService(_KnowledgeService.serviceType);
|
|
1249
1613
|
if (!service) {
|
|
1250
|
-
|
|
1614
|
+
logger4.warn(`KnowledgeService not found for agent ${runtime.agentId} during stop.`);
|
|
1251
1615
|
}
|
|
1252
1616
|
if (service instanceof _KnowledgeService) {
|
|
1253
1617
|
await service.stop();
|
|
@@ -1257,7 +1621,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1257
1621
|
* Stop the service
|
|
1258
1622
|
*/
|
|
1259
1623
|
async stop() {
|
|
1260
|
-
|
|
1624
|
+
logger4.info(`Knowledge service stopping for agent: ${this.runtime.agentId}`);
|
|
1261
1625
|
}
|
|
1262
1626
|
/**
|
|
1263
1627
|
* Add knowledge to the system
|
|
@@ -1266,13 +1630,13 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1266
1630
|
*/
|
|
1267
1631
|
async addKnowledge(options) {
|
|
1268
1632
|
const agentId = options.agentId || this.runtime.agentId;
|
|
1269
|
-
|
|
1633
|
+
logger4.info(
|
|
1270
1634
|
`KnowledgeService processing document for agent: ${agentId}, file: ${options.originalFilename}, type: ${options.contentType}`
|
|
1271
1635
|
);
|
|
1272
1636
|
try {
|
|
1273
1637
|
const existingDocument = await this.runtime.getMemoryById(options.clientDocumentId);
|
|
1274
1638
|
if (existingDocument && existingDocument.metadata?.type === MemoryType2.DOCUMENT) {
|
|
1275
|
-
|
|
1639
|
+
logger4.info(
|
|
1276
1640
|
`Document ${options.originalFilename} with ID ${options.clientDocumentId} already exists. Skipping processing.`
|
|
1277
1641
|
);
|
|
1278
1642
|
const fragments = await this.runtime.getMemories({
|
|
@@ -1291,7 +1655,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1291
1655
|
};
|
|
1292
1656
|
}
|
|
1293
1657
|
} catch (error) {
|
|
1294
|
-
|
|
1658
|
+
logger4.debug(
|
|
1295
1659
|
`Document ${options.clientDocumentId} not found or error checking existence, proceeding with processing: ${error instanceof Error ? error.message : String(error)}`
|
|
1296
1660
|
);
|
|
1297
1661
|
}
|
|
@@ -1315,7 +1679,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1315
1679
|
}) {
|
|
1316
1680
|
const agentId = passedAgentId || this.runtime.agentId;
|
|
1317
1681
|
try {
|
|
1318
|
-
|
|
1682
|
+
logger4.debug(
|
|
1319
1683
|
`KnowledgeService: Processing document ${originalFilename} (type: ${contentType}) via processDocument for agent: ${agentId}`
|
|
1320
1684
|
);
|
|
1321
1685
|
let fileBuffer = null;
|
|
@@ -1326,7 +1690,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1326
1690
|
try {
|
|
1327
1691
|
fileBuffer = Buffer.from(content, "base64");
|
|
1328
1692
|
} catch (e) {
|
|
1329
|
-
|
|
1693
|
+
logger4.error(
|
|
1330
1694
|
`KnowledgeService: Failed to convert base64 to buffer for ${originalFilename}: ${e.message}`
|
|
1331
1695
|
);
|
|
1332
1696
|
throw new Error(`Invalid base64 content for PDF file ${originalFilename}`);
|
|
@@ -1337,7 +1701,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1337
1701
|
try {
|
|
1338
1702
|
fileBuffer = Buffer.from(content, "base64");
|
|
1339
1703
|
} catch (e) {
|
|
1340
|
-
|
|
1704
|
+
logger4.error(
|
|
1341
1705
|
`KnowledgeService: Failed to convert base64 to buffer for ${originalFilename}: ${e.message}`
|
|
1342
1706
|
);
|
|
1343
1707
|
throw new Error(`Invalid base64 content for binary file ${originalFilename}`);
|
|
@@ -1354,11 +1718,11 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1354
1718
|
if (invalidCharCount > 0 && invalidCharCount / textLength > 0.1) {
|
|
1355
1719
|
throw new Error("Decoded content contains too many invalid characters");
|
|
1356
1720
|
}
|
|
1357
|
-
|
|
1721
|
+
logger4.debug(`Successfully decoded base64 content for text file: ${originalFilename}`);
|
|
1358
1722
|
extractedText = decodedText;
|
|
1359
1723
|
documentContentToStore = decodedText;
|
|
1360
1724
|
} catch (e) {
|
|
1361
|
-
|
|
1725
|
+
logger4.error(
|
|
1362
1726
|
`Failed to decode base64 for ${originalFilename}: ${e instanceof Error ? e.message : String(e)}`
|
|
1363
1727
|
);
|
|
1364
1728
|
throw new Error(
|
|
@@ -1366,7 +1730,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1366
1730
|
);
|
|
1367
1731
|
}
|
|
1368
1732
|
} else {
|
|
1369
|
-
|
|
1733
|
+
logger4.debug(`Treating content as plain text for file: ${originalFilename}`);
|
|
1370
1734
|
extractedText = content;
|
|
1371
1735
|
documentContentToStore = content;
|
|
1372
1736
|
}
|
|
@@ -1375,7 +1739,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1375
1739
|
const noTextError = new Error(
|
|
1376
1740
|
`KnowledgeService: No text content extracted from ${originalFilename} (type: ${contentType}).`
|
|
1377
1741
|
);
|
|
1378
|
-
|
|
1742
|
+
logger4.warn(noTextError.message);
|
|
1379
1743
|
throw noTextError;
|
|
1380
1744
|
}
|
|
1381
1745
|
const documentMemory = createDocumentMemory({
|
|
@@ -1401,14 +1765,14 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1401
1765
|
roomId: roomId || agentId,
|
|
1402
1766
|
entityId: entityId || agentId
|
|
1403
1767
|
};
|
|
1404
|
-
|
|
1768
|
+
logger4.debug(
|
|
1405
1769
|
`KnowledgeService: Creating memory with agentId=${agentId}, entityId=${entityId}, roomId=${roomId}, this.runtime.agentId=${this.runtime.agentId}`
|
|
1406
1770
|
);
|
|
1407
|
-
|
|
1771
|
+
logger4.debug(
|
|
1408
1772
|
`KnowledgeService: memoryWithScope agentId=${memoryWithScope.agentId}, entityId=${memoryWithScope.entityId}`
|
|
1409
1773
|
);
|
|
1410
1774
|
await this.runtime.createMemory(memoryWithScope, "documents");
|
|
1411
|
-
|
|
1775
|
+
logger4.debug(
|
|
1412
1776
|
`KnowledgeService: Stored document ${originalFilename} (Memory ID: ${memoryWithScope.id})`
|
|
1413
1777
|
);
|
|
1414
1778
|
const fragmentCount = await processFragmentsSynchronously({
|
|
@@ -1422,7 +1786,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1422
1786
|
entityId: entityId || agentId,
|
|
1423
1787
|
worldId: worldId || agentId
|
|
1424
1788
|
});
|
|
1425
|
-
|
|
1789
|
+
logger4.info(
|
|
1426
1790
|
`KnowledgeService: Document ${originalFilename} processed with ${fragmentCount} fragments for agent ${agentId}`
|
|
1427
1791
|
);
|
|
1428
1792
|
return {
|
|
@@ -1431,7 +1795,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1431
1795
|
fragmentCount
|
|
1432
1796
|
};
|
|
1433
1797
|
} catch (error) {
|
|
1434
|
-
|
|
1798
|
+
logger4.error(
|
|
1435
1799
|
`KnowledgeService: Error processing document ${originalFilename}: ${error.message}`,
|
|
1436
1800
|
error.stack
|
|
1437
1801
|
);
|
|
@@ -1440,7 +1804,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1440
1804
|
}
|
|
1441
1805
|
// --- Knowledge methods moved from AgentRuntime ---
|
|
1442
1806
|
async handleProcessingError(error, context) {
|
|
1443
|
-
|
|
1807
|
+
logger4.error(`KnowledgeService: Error ${context}:`, error?.message || error || "Unknown error");
|
|
1444
1808
|
throw error;
|
|
1445
1809
|
}
|
|
1446
1810
|
async checkExistingKnowledge(knowledgeId) {
|
|
@@ -1448,9 +1812,9 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1448
1812
|
return !!existingDocument;
|
|
1449
1813
|
}
|
|
1450
1814
|
async getKnowledge(message, scope) {
|
|
1451
|
-
|
|
1815
|
+
logger4.debug("KnowledgeService: getKnowledge called for message id: " + message.id);
|
|
1452
1816
|
if (!message?.content?.text || message?.content?.text.trim().length === 0) {
|
|
1453
|
-
|
|
1817
|
+
logger4.warn("KnowledgeService: Invalid or empty message content for knowledge query.");
|
|
1454
1818
|
return [];
|
|
1455
1819
|
}
|
|
1456
1820
|
const embedding = await this.runtime.useModel(ModelType2.TEXT_EMBEDDING, {
|
|
@@ -1481,7 +1845,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1481
1845
|
}
|
|
1482
1846
|
async processCharacterKnowledge(items) {
|
|
1483
1847
|
await new Promise((resolve) => setTimeout(resolve, 1e3));
|
|
1484
|
-
|
|
1848
|
+
logger4.info(
|
|
1485
1849
|
`KnowledgeService: Processing ${items.length} character knowledge items for agent ${this.runtime.agentId}`
|
|
1486
1850
|
);
|
|
1487
1851
|
const processingPromises = items.map(async (item) => {
|
|
@@ -1489,12 +1853,12 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1489
1853
|
try {
|
|
1490
1854
|
const knowledgeId = createUniqueUuid(this.runtime.agentId + item, item);
|
|
1491
1855
|
if (await this.checkExistingKnowledge(knowledgeId)) {
|
|
1492
|
-
|
|
1856
|
+
logger4.debug(
|
|
1493
1857
|
`KnowledgeService: Character knowledge item with ID ${knowledgeId} already exists. Skipping.`
|
|
1494
1858
|
);
|
|
1495
1859
|
return;
|
|
1496
1860
|
}
|
|
1497
|
-
|
|
1861
|
+
logger4.debug(
|
|
1498
1862
|
`KnowledgeService: Processing character knowledge for ${this.runtime.character?.name} - ${item.slice(0, 100)}`
|
|
1499
1863
|
);
|
|
1500
1864
|
let metadata = {
|
|
@@ -1545,7 +1909,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1545
1909
|
}
|
|
1546
1910
|
});
|
|
1547
1911
|
await Promise.all(processingPromises);
|
|
1548
|
-
|
|
1912
|
+
logger4.info(
|
|
1549
1913
|
`KnowledgeService: Finished processing character knowledge for agent ${this.runtime.agentId}.`
|
|
1550
1914
|
);
|
|
1551
1915
|
}
|
|
@@ -1565,7 +1929,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1565
1929
|
worldId: scope?.worldId ?? this.runtime.agentId,
|
|
1566
1930
|
entityId: scope?.entityId ?? this.runtime.agentId
|
|
1567
1931
|
};
|
|
1568
|
-
|
|
1932
|
+
logger4.debug(`KnowledgeService: _internalAddKnowledge called for item ID ${item.id}`);
|
|
1569
1933
|
const documentMemory = {
|
|
1570
1934
|
id: item.id,
|
|
1571
1935
|
// This ID should be the unique ID for the document being added.
|
|
@@ -1587,7 +1951,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1587
1951
|
};
|
|
1588
1952
|
const existingDocument = await this.runtime.getMemoryById(item.id);
|
|
1589
1953
|
if (existingDocument) {
|
|
1590
|
-
|
|
1954
|
+
logger4.debug(
|
|
1591
1955
|
`KnowledgeService: Document ${item.id} already exists in _internalAddKnowledge, updating...`
|
|
1592
1956
|
);
|
|
1593
1957
|
await this.runtime.updateMemory({
|
|
@@ -1611,13 +1975,13 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1611
1975
|
await this.processDocumentFragment(fragment);
|
|
1612
1976
|
fragmentsProcessed++;
|
|
1613
1977
|
} catch (error) {
|
|
1614
|
-
|
|
1978
|
+
logger4.error(
|
|
1615
1979
|
`KnowledgeService: Error processing fragment ${fragment.id} for document ${item.id}:`,
|
|
1616
1980
|
error
|
|
1617
1981
|
);
|
|
1618
1982
|
}
|
|
1619
1983
|
}
|
|
1620
|
-
|
|
1984
|
+
logger4.debug(
|
|
1621
1985
|
`KnowledgeService: Processed ${fragmentsProcessed}/${fragments.length} fragments for document ${item.id}.`
|
|
1622
1986
|
);
|
|
1623
1987
|
}
|
|
@@ -1626,7 +1990,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1626
1990
|
await this.runtime.addEmbeddingToMemory(fragment);
|
|
1627
1991
|
await this.runtime.createMemory(fragment, "knowledge");
|
|
1628
1992
|
} catch (error) {
|
|
1629
|
-
|
|
1993
|
+
logger4.error(
|
|
1630
1994
|
`KnowledgeService: Error processing fragment ${fragment.id}:`,
|
|
1631
1995
|
error instanceof Error ? error.message : String(error)
|
|
1632
1996
|
);
|
|
@@ -1691,7 +2055,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
|
|
|
1691
2055
|
*/
|
|
1692
2056
|
async deleteMemory(memoryId) {
|
|
1693
2057
|
await this.runtime.deleteMemory(memoryId);
|
|
1694
|
-
|
|
2058
|
+
logger4.info(
|
|
1695
2059
|
`KnowledgeService: Deleted memory ${memoryId} for agent ${this.runtime.agentId}. Assumed it was a document or related fragment.`
|
|
1696
2060
|
);
|
|
1697
2061
|
}
|
|
@@ -2634,7 +2998,7 @@ var KnowledgeTestSuite = class {
|
|
|
2634
2998
|
var tests_default = new KnowledgeTestSuite();
|
|
2635
2999
|
|
|
2636
3000
|
// src/actions.ts
|
|
2637
|
-
import { logger as
|
|
3001
|
+
import { logger as logger5, stringToUuid } from "@elizaos/core";
|
|
2638
3002
|
import * as fs2 from "fs";
|
|
2639
3003
|
import * as path2 from "path";
|
|
2640
3004
|
var processKnowledgeAction = {
|
|
@@ -2694,7 +3058,7 @@ var processKnowledgeAction = {
|
|
|
2694
3058
|
const hasPath = pathPattern.test(text);
|
|
2695
3059
|
const service = runtime.getService(KnowledgeService.serviceType);
|
|
2696
3060
|
if (!service) {
|
|
2697
|
-
|
|
3061
|
+
logger5.warn(
|
|
2698
3062
|
"Knowledge service not available for PROCESS_KNOWLEDGE action"
|
|
2699
3063
|
);
|
|
2700
3064
|
return false;
|
|
@@ -2779,7 +3143,7 @@ var processKnowledgeAction = {
|
|
|
2779
3143
|
await callback(response);
|
|
2780
3144
|
}
|
|
2781
3145
|
} catch (error) {
|
|
2782
|
-
|
|
3146
|
+
logger5.error("Error in PROCESS_KNOWLEDGE action:", error);
|
|
2783
3147
|
const errorResponse = {
|
|
2784
3148
|
text: `I encountered an error while processing the knowledge: ${error instanceof Error ? error.message : "Unknown error"}`
|
|
2785
3149
|
};
|
|
@@ -2890,7 +3254,7 @@ ${formattedResults}`
|
|
|
2890
3254
|
await callback(response);
|
|
2891
3255
|
}
|
|
2892
3256
|
} catch (error) {
|
|
2893
|
-
|
|
3257
|
+
logger5.error("Error in SEARCH_KNOWLEDGE action:", error);
|
|
2894
3258
|
const errorResponse = {
|
|
2895
3259
|
text: `I encountered an error while searching the knowledge base: ${error instanceof Error ? error.message : "Unknown error"}`
|
|
2896
3260
|
};
|
|
@@ -2903,7 +3267,7 @@ ${formattedResults}`
|
|
|
2903
3267
|
var knowledgeActions = [processKnowledgeAction, searchKnowledgeAction];
|
|
2904
3268
|
|
|
2905
3269
|
// src/routes.ts
|
|
2906
|
-
import { createUniqueUuid as createUniqueUuid2, logger as
|
|
3270
|
+
import { createUniqueUuid as createUniqueUuid2, logger as logger6, ModelType as ModelType4 } from "@elizaos/core";
|
|
2907
3271
|
import fs3 from "fs";
|
|
2908
3272
|
import path3 from "path";
|
|
2909
3273
|
import multer from "multer";
|
|
@@ -2954,7 +3318,7 @@ var cleanupFile = (filePath) => {
|
|
|
2954
3318
|
try {
|
|
2955
3319
|
fs3.unlinkSync(filePath);
|
|
2956
3320
|
} catch (error) {
|
|
2957
|
-
|
|
3321
|
+
logger6.error(`Error cleaning up file ${filePath}:`, error);
|
|
2958
3322
|
}
|
|
2959
3323
|
}
|
|
2960
3324
|
};
|
|
@@ -2981,7 +3345,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
2981
3345
|
}
|
|
2982
3346
|
const agentId = req.body.agentId || req.query.agentId;
|
|
2983
3347
|
if (!agentId) {
|
|
2984
|
-
|
|
3348
|
+
logger6.error("[KNOWLEDGE UPLOAD HANDLER] No agent ID provided in request");
|
|
2985
3349
|
return sendError(
|
|
2986
3350
|
res,
|
|
2987
3351
|
400,
|
|
@@ -2990,13 +3354,13 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
2990
3354
|
);
|
|
2991
3355
|
}
|
|
2992
3356
|
const worldId = req.body.worldId || agentId;
|
|
2993
|
-
|
|
3357
|
+
logger6.info(`[KNOWLEDGE UPLOAD HANDLER] Processing upload for agent: ${agentId}`);
|
|
2994
3358
|
const processingPromises = files.map(async (file, index) => {
|
|
2995
3359
|
let knowledgeId;
|
|
2996
3360
|
const originalFilename = file.originalname;
|
|
2997
3361
|
const filePath = file.path;
|
|
2998
3362
|
knowledgeId = req.body?.documentIds && req.body.documentIds[index] || req.body?.documentId || createUniqueUuid2(runtime, `knowledge-${originalFilename}-${Date.now()}`);
|
|
2999
|
-
|
|
3363
|
+
logger6.debug(
|
|
3000
3364
|
`[KNOWLEDGE UPLOAD HANDLER] File: ${originalFilename}, Agent ID: ${agentId}, World ID: ${worldId}, Knowledge ID: ${knowledgeId}`
|
|
3001
3365
|
);
|
|
3002
3366
|
try {
|
|
@@ -3030,7 +3394,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
3030
3394
|
status: "success"
|
|
3031
3395
|
};
|
|
3032
3396
|
} catch (fileError) {
|
|
3033
|
-
|
|
3397
|
+
logger6.error(
|
|
3034
3398
|
`[KNOWLEDGE UPLOAD HANDLER] Error processing file ${file.originalname}: ${fileError}`
|
|
3035
3399
|
);
|
|
3036
3400
|
cleanupFile(filePath);
|
|
@@ -3051,7 +3415,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
3051
3415
|
}
|
|
3052
3416
|
const agentId = req.body.agentId || req.query.agentId;
|
|
3053
3417
|
if (!agentId) {
|
|
3054
|
-
|
|
3418
|
+
logger6.error("[KNOWLEDGE URL HANDLER] No agent ID provided in request");
|
|
3055
3419
|
return sendError(
|
|
3056
3420
|
res,
|
|
3057
3421
|
400,
|
|
@@ -3059,7 +3423,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
3059
3423
|
"Agent ID is required for uploading knowledge from URLs"
|
|
3060
3424
|
);
|
|
3061
3425
|
}
|
|
3062
|
-
|
|
3426
|
+
logger6.info(`[KNOWLEDGE URL HANDLER] Processing URL upload for agent: ${agentId}`);
|
|
3063
3427
|
const processingPromises = fileUrls.map(async (fileUrl) => {
|
|
3064
3428
|
try {
|
|
3065
3429
|
const normalizedUrl = normalizeS3Url(fileUrl);
|
|
@@ -3068,7 +3432,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
3068
3432
|
const pathSegments = urlObject.pathname.split("/");
|
|
3069
3433
|
const encodedFilename = pathSegments[pathSegments.length - 1] || "document.pdf";
|
|
3070
3434
|
const originalFilename = decodeURIComponent(encodedFilename);
|
|
3071
|
-
|
|
3435
|
+
logger6.info(`[KNOWLEDGE URL HANDLER] Fetching content from URL: ${fileUrl}`);
|
|
3072
3436
|
const { content, contentType: fetchedContentType } = await fetchUrlContent(fileUrl);
|
|
3073
3437
|
let contentType = fetchedContentType;
|
|
3074
3438
|
if (contentType === "application/octet-stream") {
|
|
@@ -3107,7 +3471,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
3107
3471
|
url: normalizedUrl
|
|
3108
3472
|
}
|
|
3109
3473
|
};
|
|
3110
|
-
|
|
3474
|
+
logger6.debug(
|
|
3111
3475
|
`[KNOWLEDGE URL HANDLER] Processing knowledge from URL: ${fileUrl} (type: ${contentType})`
|
|
3112
3476
|
);
|
|
3113
3477
|
const result = await service.addKnowledge(addKnowledgeOpts);
|
|
@@ -3121,7 +3485,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
3121
3485
|
status: "success"
|
|
3122
3486
|
};
|
|
3123
3487
|
} catch (urlError) {
|
|
3124
|
-
|
|
3488
|
+
logger6.error(`[KNOWLEDGE URL HANDLER] Error processing URL ${fileUrl}: ${urlError}`);
|
|
3125
3489
|
return {
|
|
3126
3490
|
fileUrl,
|
|
3127
3491
|
status: "error_processing",
|
|
@@ -3133,7 +3497,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
|
|
|
3133
3497
|
sendSuccess(res, results);
|
|
3134
3498
|
}
|
|
3135
3499
|
} catch (error) {
|
|
3136
|
-
|
|
3500
|
+
logger6.error("[KNOWLEDGE HANDLER] Error processing knowledge:", error);
|
|
3137
3501
|
if (hasUploadedFiles) {
|
|
3138
3502
|
cleanupFiles(req.files);
|
|
3139
3503
|
}
|
|
@@ -3172,7 +3536,7 @@ async function getKnowledgeDocumentsHandler(req, res, runtime) {
|
|
|
3172
3536
|
// Or if the URL is stored in the metadata (check if it exists)
|
|
3173
3537
|
memory.metadata && "url" in memory.metadata && typeof memory.metadata.url === "string" && normalizedRequestUrls.includes(normalizeS3Url(memory.metadata.url))
|
|
3174
3538
|
);
|
|
3175
|
-
|
|
3539
|
+
logger6.debug(
|
|
3176
3540
|
`[KNOWLEDGE GET HANDLER] Filtered documents by URLs: ${fileUrls.length} URLs, found ${filteredMemories.length} matching documents`
|
|
3177
3541
|
);
|
|
3178
3542
|
}
|
|
@@ -3187,12 +3551,12 @@ async function getKnowledgeDocumentsHandler(req, res, runtime) {
|
|
|
3187
3551
|
totalRequested: fileUrls ? fileUrls.length : 0
|
|
3188
3552
|
});
|
|
3189
3553
|
} catch (error) {
|
|
3190
|
-
|
|
3554
|
+
logger6.error("[KNOWLEDGE GET HANDLER] Error retrieving documents:", error);
|
|
3191
3555
|
sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve documents", error.message);
|
|
3192
3556
|
}
|
|
3193
3557
|
}
|
|
3194
3558
|
async function deleteKnowledgeDocumentHandler(req, res, runtime) {
|
|
3195
|
-
|
|
3559
|
+
logger6.debug(`[KNOWLEDGE DELETE HANDLER] Received DELETE request:
|
|
3196
3560
|
- path: ${req.path}
|
|
3197
3561
|
- params: ${JSON.stringify(req.params)}
|
|
3198
3562
|
`);
|
|
@@ -3207,26 +3571,26 @@ async function deleteKnowledgeDocumentHandler(req, res, runtime) {
|
|
|
3207
3571
|
}
|
|
3208
3572
|
const knowledgeId = req.params.knowledgeId;
|
|
3209
3573
|
if (!knowledgeId || knowledgeId.length < 36) {
|
|
3210
|
-
|
|
3574
|
+
logger6.error(`[KNOWLEDGE DELETE HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
|
|
3211
3575
|
return sendError(res, 400, "INVALID_ID", "Invalid Knowledge ID format");
|
|
3212
3576
|
}
|
|
3213
3577
|
try {
|
|
3214
3578
|
const typedKnowledgeId = knowledgeId;
|
|
3215
|
-
|
|
3579
|
+
logger6.debug(
|
|
3216
3580
|
`[KNOWLEDGE DELETE HANDLER] Attempting to delete document with ID: ${typedKnowledgeId}`
|
|
3217
3581
|
);
|
|
3218
3582
|
await service.deleteMemory(typedKnowledgeId);
|
|
3219
|
-
|
|
3583
|
+
logger6.info(
|
|
3220
3584
|
`[KNOWLEDGE DELETE HANDLER] Successfully deleted document with ID: ${typedKnowledgeId}`
|
|
3221
3585
|
);
|
|
3222
3586
|
sendSuccess(res, null, 204);
|
|
3223
3587
|
} catch (error) {
|
|
3224
|
-
|
|
3588
|
+
logger6.error(`[KNOWLEDGE DELETE HANDLER] Error deleting document ${knowledgeId}:`, error);
|
|
3225
3589
|
sendError(res, 500, "DELETE_ERROR", "Failed to delete document", error.message);
|
|
3226
3590
|
}
|
|
3227
3591
|
}
|
|
3228
3592
|
async function getKnowledgeByIdHandler(req, res, runtime) {
|
|
3229
|
-
|
|
3593
|
+
logger6.debug(`[KNOWLEDGE GET BY ID HANDLER] Received GET request:
|
|
3230
3594
|
- path: ${req.path}
|
|
3231
3595
|
- params: ${JSON.stringify(req.params)}
|
|
3232
3596
|
`);
|
|
@@ -3241,11 +3605,11 @@ async function getKnowledgeByIdHandler(req, res, runtime) {
|
|
|
3241
3605
|
}
|
|
3242
3606
|
const knowledgeId = req.params.knowledgeId;
|
|
3243
3607
|
if (!knowledgeId || knowledgeId.length < 36) {
|
|
3244
|
-
|
|
3608
|
+
logger6.error(`[KNOWLEDGE GET BY ID HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
|
|
3245
3609
|
return sendError(res, 400, "INVALID_ID", "Invalid Knowledge ID format");
|
|
3246
3610
|
}
|
|
3247
3611
|
try {
|
|
3248
|
-
|
|
3612
|
+
logger6.debug(`[KNOWLEDGE GET BY ID HANDLER] Retrieving document with ID: ${knowledgeId}`);
|
|
3249
3613
|
const agentId = req.query.agentId;
|
|
3250
3614
|
const memories = await service.getMemories({
|
|
3251
3615
|
tableName: "documents",
|
|
@@ -3262,17 +3626,17 @@ async function getKnowledgeByIdHandler(req, res, runtime) {
|
|
|
3262
3626
|
};
|
|
3263
3627
|
sendSuccess(res, { document: cleanDocument });
|
|
3264
3628
|
} catch (error) {
|
|
3265
|
-
|
|
3629
|
+
logger6.error(`[KNOWLEDGE GET BY ID HANDLER] Error retrieving document ${knowledgeId}:`, error);
|
|
3266
3630
|
sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve document", error.message);
|
|
3267
3631
|
}
|
|
3268
3632
|
}
|
|
3269
3633
|
async function knowledgePanelHandler(req, res, runtime) {
|
|
3270
3634
|
const agentId = runtime.agentId;
|
|
3271
|
-
|
|
3635
|
+
logger6.debug(`[KNOWLEDGE PANEL] Serving panel for agent ${agentId}, request path: ${req.path}`);
|
|
3272
3636
|
try {
|
|
3273
3637
|
const currentDir = path3.dirname(new URL(import.meta.url).pathname);
|
|
3274
3638
|
const frontendPath = path3.join(currentDir, "../dist/index.html");
|
|
3275
|
-
|
|
3639
|
+
logger6.debug(`[KNOWLEDGE PANEL] Looking for frontend at: ${frontendPath}`);
|
|
3276
3640
|
if (fs3.existsSync(frontendPath)) {
|
|
3277
3641
|
const html = await fs3.promises.readFile(frontendPath, "utf8");
|
|
3278
3642
|
const injectedHtml = html.replace(
|
|
@@ -3306,10 +3670,10 @@ async function knowledgePanelHandler(req, res, runtime) {
|
|
|
3306
3670
|
}
|
|
3307
3671
|
}
|
|
3308
3672
|
} catch (manifestError) {
|
|
3309
|
-
|
|
3673
|
+
logger6.error("[KNOWLEDGE PANEL] Error reading manifest:", manifestError);
|
|
3310
3674
|
}
|
|
3311
3675
|
}
|
|
3312
|
-
|
|
3676
|
+
logger6.debug(`[KNOWLEDGE PANEL] Using fallback with CSS: ${cssFile}, JS: ${jsFile}`);
|
|
3313
3677
|
const html = `
|
|
3314
3678
|
<!DOCTYPE html>
|
|
3315
3679
|
<html lang="en">
|
|
@@ -3343,13 +3707,13 @@ async function knowledgePanelHandler(req, res, runtime) {
|
|
|
3343
3707
|
res.end(html);
|
|
3344
3708
|
}
|
|
3345
3709
|
} catch (error) {
|
|
3346
|
-
|
|
3710
|
+
logger6.error("[KNOWLEDGE PANEL] Error serving frontend:", error);
|
|
3347
3711
|
sendError(res, 500, "FRONTEND_ERROR", "Failed to load knowledge panel", error.message);
|
|
3348
3712
|
}
|
|
3349
3713
|
}
|
|
3350
3714
|
async function frontendAssetHandler(req, res, runtime) {
|
|
3351
3715
|
try {
|
|
3352
|
-
|
|
3716
|
+
logger6.debug(
|
|
3353
3717
|
`[KNOWLEDGE ASSET HANDLER] Called with req.path: ${req.path}, req.originalUrl: ${req.originalUrl}, req.params: ${JSON.stringify(req.params)}`
|
|
3354
3718
|
);
|
|
3355
3719
|
const currentDir = path3.dirname(new URL(import.meta.url).pathname);
|
|
@@ -3369,7 +3733,7 @@ async function frontendAssetHandler(req, res, runtime) {
|
|
|
3369
3733
|
);
|
|
3370
3734
|
}
|
|
3371
3735
|
const assetPath = path3.join(currentDir, "../dist/assets", assetName);
|
|
3372
|
-
|
|
3736
|
+
logger6.debug(`[KNOWLEDGE ASSET HANDLER] Attempting to serve asset: ${assetPath}`);
|
|
3373
3737
|
if (fs3.existsSync(assetPath)) {
|
|
3374
3738
|
const fileStream = fs3.createReadStream(assetPath);
|
|
3375
3739
|
let contentType = "application/octet-stream";
|
|
@@ -3384,7 +3748,7 @@ async function frontendAssetHandler(req, res, runtime) {
|
|
|
3384
3748
|
sendError(res, 404, "NOT_FOUND", `Asset not found: ${req.url}`);
|
|
3385
3749
|
}
|
|
3386
3750
|
} catch (error) {
|
|
3387
|
-
|
|
3751
|
+
logger6.error(`[KNOWLEDGE ASSET HANDLER] Error serving asset ${req.url}:`, error);
|
|
3388
3752
|
sendError(res, 500, "ASSET_ERROR", `Failed to load asset ${req.url}`, error.message);
|
|
3389
3753
|
}
|
|
3390
3754
|
}
|
|
@@ -3408,7 +3772,7 @@ async function getKnowledgeChunksHandler(req, res, runtime) {
|
|
|
3408
3772
|
) : chunks;
|
|
3409
3773
|
sendSuccess(res, { chunks: filteredChunks });
|
|
3410
3774
|
} catch (error) {
|
|
3411
|
-
|
|
3775
|
+
logger6.error("[KNOWLEDGE CHUNKS GET HANDLER] Error retrieving chunks:", error);
|
|
3412
3776
|
sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve knowledge chunks", error.message);
|
|
3413
3777
|
}
|
|
3414
3778
|
}
|
|
@@ -3430,14 +3794,14 @@ async function searchKnowledgeHandler(req, res, runtime) {
|
|
|
3430
3794
|
return sendError(res, 400, "INVALID_QUERY", "Search query cannot be empty");
|
|
3431
3795
|
}
|
|
3432
3796
|
if (req.query.threshold && (parsedThreshold < 0 || parsedThreshold > 1)) {
|
|
3433
|
-
|
|
3797
|
+
logger6.debug(
|
|
3434
3798
|
`[KNOWLEDGE SEARCH] Threshold value ${parsedThreshold} was clamped to ${matchThreshold}`
|
|
3435
3799
|
);
|
|
3436
3800
|
}
|
|
3437
3801
|
if (req.query.limit && (parsedLimit < 1 || parsedLimit > 100)) {
|
|
3438
|
-
|
|
3802
|
+
logger6.debug(`[KNOWLEDGE SEARCH] Limit value ${parsedLimit} was clamped to ${limit}`);
|
|
3439
3803
|
}
|
|
3440
|
-
|
|
3804
|
+
logger6.debug(
|
|
3441
3805
|
`[KNOWLEDGE SEARCH] Searching for: "${searchText}" with threshold: ${matchThreshold}, limit: ${limit}`
|
|
3442
3806
|
);
|
|
3443
3807
|
const embedding = await runtime.useModel(ModelType4.TEXT_EMBEDDING, {
|
|
@@ -3464,7 +3828,7 @@ async function searchKnowledgeHandler(req, res, runtime) {
|
|
|
3464
3828
|
documentFilename = document.metadata.filename || documentFilename;
|
|
3465
3829
|
}
|
|
3466
3830
|
} catch (e) {
|
|
3467
|
-
|
|
3831
|
+
logger6.debug(`Could not fetch document ${documentId} for fragment`);
|
|
3468
3832
|
}
|
|
3469
3833
|
}
|
|
3470
3834
|
return {
|
|
@@ -3479,7 +3843,7 @@ async function searchKnowledgeHandler(req, res, runtime) {
|
|
|
3479
3843
|
};
|
|
3480
3844
|
})
|
|
3481
3845
|
);
|
|
3482
|
-
|
|
3846
|
+
logger6.info(
|
|
3483
3847
|
`[KNOWLEDGE SEARCH] Found ${enhancedResults.length} results for query: "${searchText}"`
|
|
3484
3848
|
);
|
|
3485
3849
|
sendSuccess(res, {
|
|
@@ -3489,7 +3853,7 @@ async function searchKnowledgeHandler(req, res, runtime) {
|
|
|
3489
3853
|
count: enhancedResults.length
|
|
3490
3854
|
});
|
|
3491
3855
|
} catch (error) {
|
|
3492
|
-
|
|
3856
|
+
logger6.error("[KNOWLEDGE SEARCH] Error searching knowledge:", error);
|
|
3493
3857
|
sendError(res, 500, "SEARCH_ERROR", "Failed to search knowledge", error.message);
|
|
3494
3858
|
}
|
|
3495
3859
|
}
|
|
@@ -3501,7 +3865,7 @@ async function uploadKnowledgeWithMulter(req, res, runtime) {
|
|
|
3501
3865
|
);
|
|
3502
3866
|
uploadArray(req, res, (err) => {
|
|
3503
3867
|
if (err) {
|
|
3504
|
-
|
|
3868
|
+
logger6.error("[KNOWLEDGE UPLOAD] Multer error:", err);
|
|
3505
3869
|
return sendError(res, 400, "UPLOAD_ERROR", err.message);
|
|
3506
3870
|
}
|
|
3507
3871
|
uploadKnowledgeHandler(req, res, runtime);
|
|
@@ -3564,35 +3928,36 @@ var knowledgePlugin = {
|
|
|
3564
3928
|
CTX_KNOWLEDGE_ENABLED: "false"
|
|
3565
3929
|
},
|
|
3566
3930
|
async init(config, runtime) {
|
|
3567
|
-
|
|
3931
|
+
logger7.info("Initializing Knowledge Plugin...");
|
|
3568
3932
|
try {
|
|
3569
|
-
|
|
3933
|
+
logger7.info("Validating model configuration for Knowledge plugin...");
|
|
3570
3934
|
const validatedConfig = validateModelConfig(runtime);
|
|
3571
3935
|
if (validatedConfig.CTX_KNOWLEDGE_ENABLED) {
|
|
3572
|
-
|
|
3573
|
-
|
|
3936
|
+
logger7.info("Running in Contextual Knowledge mode with text generation capabilities.");
|
|
3937
|
+
logger7.info(
|
|
3574
3938
|
`Using ${validatedConfig.EMBEDDING_PROVIDER} for embeddings and ${validatedConfig.TEXT_PROVIDER} for text generation.`
|
|
3575
3939
|
);
|
|
3576
3940
|
} else {
|
|
3577
3941
|
const usingPluginOpenAI = !process.env.EMBEDDING_PROVIDER;
|
|
3578
3942
|
if (usingPluginOpenAI) {
|
|
3579
|
-
|
|
3943
|
+
logger7.info(
|
|
3580
3944
|
"Running in Basic Embedding mode with auto-detected configuration from plugin-openai."
|
|
3581
3945
|
);
|
|
3582
3946
|
} else {
|
|
3583
|
-
|
|
3947
|
+
logger7.info(
|
|
3584
3948
|
"Running in Basic Embedding mode (CTX_KNOWLEDGE_ENABLED=false). TEXT_PROVIDER and TEXT_MODEL not required."
|
|
3585
3949
|
);
|
|
3586
3950
|
}
|
|
3587
|
-
|
|
3951
|
+
logger7.info(
|
|
3588
3952
|
`Using ${validatedConfig.EMBEDDING_PROVIDER} for embeddings with ${validatedConfig.TEXT_EMBEDDING_MODEL}.`
|
|
3589
3953
|
);
|
|
3590
3954
|
}
|
|
3591
|
-
|
|
3955
|
+
logger7.info("Model configuration validated successfully.");
|
|
3592
3956
|
if (runtime) {
|
|
3593
|
-
|
|
3594
|
-
const loadDocsOnStartup = config.LOAD_DOCS_ON_STARTUP
|
|
3957
|
+
logger7.info(`Knowledge Plugin initialized for agent: ${runtime.agentId}`);
|
|
3958
|
+
const loadDocsOnStartup = config.LOAD_DOCS_ON_STARTUP === "true" || process.env.LOAD_DOCS_ON_STARTUP === "true";
|
|
3595
3959
|
if (loadDocsOnStartup) {
|
|
3960
|
+
logger7.info("LOAD_DOCS_ON_STARTUP is enabled. Scheduling document loading...");
|
|
3596
3961
|
setTimeout(async () => {
|
|
3597
3962
|
try {
|
|
3598
3963
|
const service = runtime.getService(KnowledgeService.serviceType);
|
|
@@ -3600,20 +3965,22 @@ var knowledgePlugin = {
|
|
|
3600
3965
|
const { loadDocsFromPath: loadDocsFromPath2 } = await import("./docs-loader-IBTEOAYT.js");
|
|
3601
3966
|
const result = await loadDocsFromPath2(service, runtime.agentId);
|
|
3602
3967
|
if (result.successful > 0) {
|
|
3603
|
-
|
|
3968
|
+
logger7.info(`Loaded ${result.successful} documents from docs folder on startup`);
|
|
3604
3969
|
}
|
|
3605
3970
|
}
|
|
3606
3971
|
} catch (error) {
|
|
3607
|
-
|
|
3972
|
+
logger7.error("Error loading documents on startup:", error);
|
|
3608
3973
|
}
|
|
3609
3974
|
}, 5e3);
|
|
3975
|
+
} else {
|
|
3976
|
+
logger7.info("LOAD_DOCS_ON_STARTUP is not enabled. Skipping automatic document loading.");
|
|
3610
3977
|
}
|
|
3611
3978
|
}
|
|
3612
|
-
|
|
3979
|
+
logger7.info(
|
|
3613
3980
|
"Knowledge Plugin initialized. Frontend panel should be discoverable via its public route."
|
|
3614
3981
|
);
|
|
3615
3982
|
} catch (error) {
|
|
3616
|
-
|
|
3983
|
+
logger7.error("Failed to initialize Knowledge plugin:", error);
|
|
3617
3984
|
throw error;
|
|
3618
3985
|
}
|
|
3619
3986
|
},
|