@elizaos/plugin-knowledge 1.0.5 → 1.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -4,11 +4,12 @@ import {
4
4
  fetchUrlContent,
5
5
  isBinaryContentType,
6
6
  loadDocsFromPath,
7
+ looksLikeBase64,
7
8
  normalizeS3Url
8
- } from "./chunk-MFXNKYBS.js";
9
+ } from "./chunk-536BD2UA.js";
9
10
 
10
11
  // src/index.ts
11
- import { logger as logger6 } from "@elizaos/core";
12
+ import { logger as logger7 } from "@elizaos/core";
12
13
 
13
14
  // src/types.ts
14
15
  import z from "zod";
@@ -17,7 +18,7 @@ var ModelConfigSchema = z.object({
17
18
  // NOTE: If EMBEDDING_PROVIDER is not specified, the plugin automatically assumes
18
19
  // plugin-openai is being used and will use OPENAI_EMBEDDING_MODEL and
19
20
  // OPENAI_EMBEDDING_DIMENSIONS for configuration
20
- EMBEDDING_PROVIDER: z.enum(["openai", "google"]),
21
+ EMBEDDING_PROVIDER: z.enum(["openai", "google"]).optional(),
21
22
  TEXT_PROVIDER: z.enum(["openai", "anthropic", "openrouter", "google"]).optional(),
22
23
  // API keys
23
24
  OPENAI_API_KEY: z.string().optional(),
@@ -65,14 +66,14 @@ function validateModelConfig(runtime) {
65
66
  const openaiApiKey2 = getSetting("OPENAI_API_KEY");
66
67
  const openaiEmbeddingModel = getSetting("OPENAI_EMBEDDING_MODEL");
67
68
  if (openaiApiKey2 && openaiEmbeddingModel) {
68
- logger.info("EMBEDDING_PROVIDER not specified, using configuration from plugin-openai");
69
+ logger.debug("EMBEDDING_PROVIDER not specified, using configuration from plugin-openai");
69
70
  } else {
70
- logger.warn(
71
- "EMBEDDING_PROVIDER not specified, but plugin-openai configuration incomplete. Check OPENAI_API_KEY and OPENAI_EMBEDDING_MODEL."
71
+ logger.debug(
72
+ "EMBEDDING_PROVIDER not specified. Assuming embeddings are provided by another plugin (e.g., plugin-google-genai)."
72
73
  );
73
74
  }
74
75
  }
75
- const finalEmbeddingProvider = embeddingProvider || "openai";
76
+ const finalEmbeddingProvider = embeddingProvider;
76
77
  const textEmbeddingModel = getSetting("TEXT_EMBEDDING_MODEL") || getSetting("OPENAI_EMBEDDING_MODEL") || "text-embedding-3-small";
77
78
  const embeddingDimension = getSetting("EMBEDDING_DIMENSION") || getSetting("OPENAI_EMBEDDING_DIMENSIONS") || "1536";
78
79
  const openaiApiKey = getSetting("OPENAI_API_KEY");
@@ -105,23 +106,21 @@ function validateModelConfig(runtime) {
105
106
  }
106
107
  }
107
108
  function validateConfigRequirements(config, assumePluginOpenAI) {
108
- if (!assumePluginOpenAI) {
109
- if (config.EMBEDDING_PROVIDER === "openai" && !config.OPENAI_API_KEY) {
110
- throw new Error('OPENAI_API_KEY is required when EMBEDDING_PROVIDER is set to "openai"');
111
- }
112
- if (config.EMBEDDING_PROVIDER === "google" && !config.GOOGLE_API_KEY) {
113
- throw new Error('GOOGLE_API_KEY is required when EMBEDDING_PROVIDER is set to "google"');
114
- }
115
- } else {
116
- if (!config.OPENAI_API_KEY) {
117
- throw new Error("OPENAI_API_KEY is required when using plugin-openai configuration");
118
- }
119
- if (!config.TEXT_EMBEDDING_MODEL) {
120
- throw new Error("OPENAI_EMBEDDING_MODEL is required when using plugin-openai configuration");
121
- }
109
+ const embeddingProvider = config.EMBEDDING_PROVIDER;
110
+ if (embeddingProvider === "openai" && !config.OPENAI_API_KEY) {
111
+ throw new Error('OPENAI_API_KEY is required when EMBEDDING_PROVIDER is set to "openai"');
112
+ }
113
+ if (embeddingProvider === "google" && !config.GOOGLE_API_KEY) {
114
+ throw new Error('GOOGLE_API_KEY is required when EMBEDDING_PROVIDER is set to "google"');
115
+ }
116
+ if (!embeddingProvider) {
117
+ logger.debug("No EMBEDDING_PROVIDER specified. Embeddings will be handled by the runtime.");
118
+ }
119
+ if (assumePluginOpenAI && config.OPENAI_API_KEY && !config.TEXT_EMBEDDING_MODEL) {
120
+ throw new Error("OPENAI_EMBEDDING_MODEL is required when using plugin-openai configuration");
122
121
  }
123
122
  if (config.CTX_KNOWLEDGE_ENABLED) {
124
- logger.info("Contextual Knowledge is enabled. Validating text generation settings...");
123
+ logger.debug("Contextual Knowledge is enabled. Validating text generation settings...");
125
124
  if (config.TEXT_PROVIDER === "openai" && !config.OPENAI_API_KEY) {
126
125
  throw new Error('OPENAI_API_KEY is required when TEXT_PROVIDER is set to "openai"');
127
126
  }
@@ -137,18 +136,18 @@ function validateConfigRequirements(config, assumePluginOpenAI) {
137
136
  if (config.TEXT_PROVIDER === "openrouter") {
138
137
  const modelName = config.TEXT_MODEL?.toLowerCase() || "";
139
138
  if (modelName.includes("claude") || modelName.includes("gemini")) {
140
- logger.info(
139
+ logger.debug(
141
140
  `Using ${modelName} with OpenRouter. This configuration supports document caching for improved performance.`
142
141
  );
143
142
  }
144
143
  }
145
144
  } else {
146
145
  if (assumePluginOpenAI) {
147
- logger.info(
148
- "Contextual Knowledge is disabled. Using embedding configuration from plugin-openai."
146
+ logger.debug(
147
+ "Contextual Knowledge is disabled. Embeddings will be handled by the runtime (e.g., plugin-openai, plugin-google-genai)."
149
148
  );
150
149
  } else {
151
- logger.info("Contextual Knowledge is disabled. Using basic embedding-only configuration.");
150
+ logger.debug("Contextual Knowledge is disabled. Using configured embedding provider.");
152
151
  }
153
152
  }
154
153
  }
@@ -191,7 +190,7 @@ async function getProviderRateLimits(runtime) {
191
190
  // src/service.ts
192
191
  import {
193
192
  createUniqueUuid,
194
- logger as logger3,
193
+ logger as logger4,
195
194
  MemoryType as MemoryType2,
196
195
  ModelType as ModelType2,
197
196
  Semaphore,
@@ -203,7 +202,7 @@ import {
203
202
  import {
204
203
  MemoryType,
205
204
  ModelType,
206
- logger as logger2,
205
+ logger as logger3,
207
206
  splitChunks
208
207
  } from "@elizaos/core";
209
208
 
@@ -686,12 +685,342 @@ ${chunkContent}`;
686
685
  return generatedContext.trim();
687
686
  }
688
687
 
688
+ // src/llm.ts
689
+ import { generateText as aiGenerateText, embed } from "ai";
690
+ import { createOpenAI } from "@ai-sdk/openai";
691
+ import { createAnthropic } from "@ai-sdk/anthropic";
692
+ import { createOpenRouter } from "@openrouter/ai-sdk-provider";
693
+ import { google } from "@ai-sdk/google";
694
+ import { logger as logger2 } from "@elizaos/core";
695
+ async function generateText(prompt, system, overrideConfig) {
696
+ const config = validateModelConfig();
697
+ const provider = overrideConfig?.provider || config.TEXT_PROVIDER;
698
+ const modelName = overrideConfig?.modelName || config.TEXT_MODEL;
699
+ const maxTokens = overrideConfig?.maxTokens || config.MAX_OUTPUT_TOKENS;
700
+ const autoCacheContextualRetrieval = overrideConfig?.autoCacheContextualRetrieval !== false;
701
+ try {
702
+ switch (provider) {
703
+ case "anthropic":
704
+ return await generateAnthropicText(prompt, system, modelName, maxTokens);
705
+ case "openai":
706
+ return await generateOpenAIText(prompt, system, modelName, maxTokens);
707
+ case "openrouter":
708
+ return await generateOpenRouterText(
709
+ prompt,
710
+ system,
711
+ modelName,
712
+ maxTokens,
713
+ overrideConfig?.cacheDocument,
714
+ overrideConfig?.cacheOptions,
715
+ autoCacheContextualRetrieval
716
+ );
717
+ case "google":
718
+ return await generateGoogleText(prompt, system, modelName, maxTokens, config);
719
+ default:
720
+ throw new Error(`Unsupported text provider: ${provider}`);
721
+ }
722
+ } catch (error) {
723
+ logger2.error(`[LLM Service - ${provider}] Error generating text with ${modelName}:`, error);
724
+ throw error;
725
+ }
726
+ }
727
+ async function generateAnthropicText(prompt, system, modelName, maxTokens) {
728
+ const config = validateModelConfig();
729
+ const anthropic = createAnthropic({
730
+ apiKey: config.ANTHROPIC_API_KEY,
731
+ baseURL: config.ANTHROPIC_BASE_URL
732
+ });
733
+ const modelInstance = anthropic(modelName);
734
+ const result = await aiGenerateText({
735
+ model: modelInstance,
736
+ prompt,
737
+ system,
738
+ temperature: 0.3,
739
+ maxTokens
740
+ });
741
+ logger2.debug(
742
+ `[LLM Service - Anthropic] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
743
+ );
744
+ return result;
745
+ }
746
+ async function generateOpenAIText(prompt, system, modelName, maxTokens) {
747
+ const config = validateModelConfig();
748
+ const openai = createOpenAI({
749
+ apiKey: config.OPENAI_API_KEY,
750
+ baseURL: config.OPENAI_BASE_URL
751
+ });
752
+ const modelInstance = openai.chat(modelName);
753
+ const result = await aiGenerateText({
754
+ model: modelInstance,
755
+ prompt,
756
+ system,
757
+ temperature: 0.3,
758
+ maxTokens
759
+ });
760
+ logger2.debug(
761
+ `[LLM Service - OpenAI] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
762
+ );
763
+ return result;
764
+ }
765
+ async function generateGoogleText(prompt, system, modelName, maxTokens, config) {
766
+ const googleProvider = google;
767
+ if (config.GOOGLE_API_KEY) {
768
+ process.env.GOOGLE_GENERATIVE_AI_API_KEY = config.GOOGLE_API_KEY;
769
+ }
770
+ const modelInstance = googleProvider(modelName);
771
+ const result = await aiGenerateText({
772
+ model: modelInstance,
773
+ prompt,
774
+ system,
775
+ temperature: 0.3,
776
+ maxTokens
777
+ });
778
+ logger2.debug(
779
+ `[LLM Service - Google] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
780
+ );
781
+ return result;
782
+ }
783
+ async function generateOpenRouterText(prompt, system, modelName, maxTokens, cacheDocument, cacheOptions, autoCacheContextualRetrieval = true) {
784
+ const config = validateModelConfig();
785
+ const openrouter = createOpenRouter({
786
+ apiKey: config.OPENROUTER_API_KEY,
787
+ baseURL: config.OPENROUTER_BASE_URL
788
+ });
789
+ const modelInstance = openrouter.chat(modelName);
790
+ const isClaudeModel = modelName.toLowerCase().includes("claude");
791
+ const isGeminiModel = modelName.toLowerCase().includes("gemini");
792
+ const isGemini25Model = modelName.toLowerCase().includes("gemini-2.5");
793
+ const supportsCaching = isClaudeModel || isGeminiModel;
794
+ let documentForCaching = cacheDocument;
795
+ if (!documentForCaching && autoCacheContextualRetrieval && supportsCaching) {
796
+ const docMatch = prompt.match(/<document>([\s\S]*?)<\/document>/);
797
+ if (docMatch && docMatch[1]) {
798
+ documentForCaching = docMatch[1].trim();
799
+ logger2.debug(
800
+ `[LLM Service - OpenRouter] Auto-detected document for caching (${documentForCaching.length} chars)`
801
+ );
802
+ }
803
+ }
804
+ if (documentForCaching && supportsCaching) {
805
+ const effectiveCacheOptions = cacheOptions || { type: "ephemeral" };
806
+ let promptText = prompt;
807
+ if (promptText.includes("<document>")) {
808
+ promptText = promptText.replace(/<document>[\s\S]*?<\/document>/, "").trim();
809
+ }
810
+ if (isClaudeModel) {
811
+ return await generateClaudeWithCaching(
812
+ promptText,
813
+ system,
814
+ modelInstance,
815
+ modelName,
816
+ maxTokens,
817
+ documentForCaching
818
+ );
819
+ } else if (isGeminiModel) {
820
+ return await generateGeminiWithCaching(
821
+ promptText,
822
+ system,
823
+ modelInstance,
824
+ modelName,
825
+ maxTokens,
826
+ documentForCaching,
827
+ isGemini25Model
828
+ );
829
+ }
830
+ }
831
+ logger2.debug("[LLM Service - OpenRouter] Using standard request without caching");
832
+ return await generateStandardOpenRouterText(prompt, system, modelInstance, modelName, maxTokens);
833
+ }
834
+ async function generateClaudeWithCaching(promptText, system, modelInstance, modelName, maxTokens, documentForCaching) {
835
+ logger2.debug(
836
+ `[LLM Service - OpenRouter] Using explicit prompt caching with Claude model ${modelName}`
837
+ );
838
+ const messages = [
839
+ // System message with cached document (if system is provided)
840
+ system ? {
841
+ role: "system",
842
+ content: [
843
+ {
844
+ type: "text",
845
+ text: system
846
+ },
847
+ {
848
+ type: "text",
849
+ text: documentForCaching,
850
+ cache_control: {
851
+ type: "ephemeral"
852
+ }
853
+ }
854
+ ]
855
+ } : (
856
+ // User message with cached document (if no system message)
857
+ {
858
+ role: "user",
859
+ content: [
860
+ {
861
+ type: "text",
862
+ text: "Document for context:"
863
+ },
864
+ {
865
+ type: "text",
866
+ text: documentForCaching,
867
+ cache_control: {
868
+ type: "ephemeral"
869
+ }
870
+ },
871
+ {
872
+ type: "text",
873
+ text: promptText
874
+ }
875
+ ]
876
+ }
877
+ ),
878
+ // Only add user message if system was provided (otherwise we included user above)
879
+ system ? {
880
+ role: "user",
881
+ content: [
882
+ {
883
+ type: "text",
884
+ text: promptText
885
+ }
886
+ ]
887
+ } : null
888
+ ].filter(Boolean);
889
+ logger2.debug("[LLM Service - OpenRouter] Using Claude-specific caching structure");
890
+ const result = await aiGenerateText({
891
+ model: modelInstance,
892
+ messages,
893
+ temperature: 0.3,
894
+ maxTokens,
895
+ providerOptions: {
896
+ openrouter: {
897
+ usage: {
898
+ include: true
899
+ }
900
+ }
901
+ }
902
+ });
903
+ logCacheMetrics(result);
904
+ logger2.debug(
905
+ `[LLM Service - OpenRouter] Text generated with ${modelName} using Claude caching. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
906
+ );
907
+ return result;
908
+ }
909
+ async function generateGeminiWithCaching(promptText, system, modelInstance, modelName, maxTokens, documentForCaching, isGemini25Model) {
910
+ const usingImplicitCaching = isGemini25Model;
911
+ const estimatedDocTokens = Math.ceil(documentForCaching.length / 4);
912
+ const minTokensForImplicitCache = modelName.toLowerCase().includes("flash") ? 1028 : 2048;
913
+ const likelyTriggersCaching = estimatedDocTokens >= minTokensForImplicitCache;
914
+ if (usingImplicitCaching) {
915
+ logger2.debug(
916
+ `[LLM Service - OpenRouter] Using Gemini 2.5 implicit caching with model ${modelName}`
917
+ );
918
+ logger2.debug(
919
+ `[LLM Service - OpenRouter] Gemini 2.5 models automatically cache large prompts (no cache_control needed)`
920
+ );
921
+ if (likelyTriggersCaching) {
922
+ logger2.debug(
923
+ `[LLM Service - OpenRouter] Document size ~${estimatedDocTokens} tokens exceeds minimum ${minTokensForImplicitCache} tokens for implicit caching`
924
+ );
925
+ } else {
926
+ logger2.debug(
927
+ `[LLM Service - OpenRouter] Warning: Document size ~${estimatedDocTokens} tokens may not meet minimum ${minTokensForImplicitCache} token threshold for implicit caching`
928
+ );
929
+ }
930
+ } else {
931
+ logger2.debug(
932
+ `[LLM Service - OpenRouter] Using standard prompt format with Gemini model ${modelName}`
933
+ );
934
+ logger2.debug(
935
+ `[LLM Service - OpenRouter] Note: Only Gemini 2.5 models support automatic implicit caching`
936
+ );
937
+ }
938
+ const geminiSystemPrefix = system ? `${system}
939
+
940
+ ` : "";
941
+ const geminiPrompt = `${geminiSystemPrefix}${documentForCaching}
942
+
943
+ ${promptText}`;
944
+ const result = await aiGenerateText({
945
+ model: modelInstance,
946
+ prompt: geminiPrompt,
947
+ temperature: 0.3,
948
+ maxTokens,
949
+ providerOptions: {
950
+ openrouter: {
951
+ usage: {
952
+ include: true
953
+ // Include usage info to see cache metrics
954
+ }
955
+ }
956
+ }
957
+ });
958
+ logCacheMetrics(result);
959
+ logger2.debug(
960
+ `[LLM Service - OpenRouter] Text generated with ${modelName} using ${usingImplicitCaching ? "implicit" : "standard"} caching. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
961
+ );
962
+ return result;
963
+ }
964
+ async function generateStandardOpenRouterText(prompt, system, modelInstance, modelName, maxTokens) {
965
+ const result = await aiGenerateText({
966
+ model: modelInstance,
967
+ prompt,
968
+ system,
969
+ temperature: 0.3,
970
+ maxTokens,
971
+ providerOptions: {
972
+ openrouter: {
973
+ usage: {
974
+ include: true
975
+ // Include usage info to see cache metrics
976
+ }
977
+ }
978
+ }
979
+ });
980
+ logger2.debug(
981
+ `[LLM Service - OpenRouter] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
982
+ );
983
+ return result;
984
+ }
985
+ function logCacheMetrics(result) {
986
+ if (result.usage && result.usage.cacheTokens) {
987
+ logger2.debug(
988
+ `[LLM Service - OpenRouter] Cache metrics - Cached tokens: ${result.usage.cacheTokens}, Cache discount: ${result.usage.cacheDiscount}`
989
+ );
990
+ }
991
+ }
992
+
689
993
  // src/document-processor.ts
690
994
  var ctxKnowledgeEnabled = process.env.CTX_KNOWLEDGE_ENABLED === "true" || process.env.CTX_KNOWLEDGE_ENABLED === "True";
995
+ function shouldUseCustomLLM() {
996
+ const textProvider = process.env.TEXT_PROVIDER;
997
+ const textModel = process.env.TEXT_MODEL;
998
+ if (!textProvider || !textModel) {
999
+ return false;
1000
+ }
1001
+ switch (textProvider.toLowerCase()) {
1002
+ case "openrouter":
1003
+ return !!process.env.OPENROUTER_API_KEY;
1004
+ case "openai":
1005
+ return !!process.env.OPENAI_API_KEY;
1006
+ case "anthropic":
1007
+ return !!process.env.ANTHROPIC_API_KEY;
1008
+ case "google":
1009
+ return !!process.env.GOOGLE_API_KEY;
1010
+ default:
1011
+ return false;
1012
+ }
1013
+ }
1014
+ var useCustomLLM = shouldUseCustomLLM();
691
1015
  if (ctxKnowledgeEnabled) {
692
- logger2.info(`Document processor starting with Contextual Knowledge ENABLED`);
1016
+ logger3.info(`Document processor starting with Contextual Knowledge ENABLED`);
1017
+ if (useCustomLLM) {
1018
+ logger3.info(`Using Custom LLM with provider: ${process.env.TEXT_PROVIDER}, model: ${process.env.TEXT_MODEL}`);
1019
+ } else {
1020
+ logger3.info(`Using ElizaOS Runtime LLM (default behavior)`);
1021
+ }
693
1022
  } else {
694
- logger2.info(`Document processor starting with Contextual Knowledge DISABLED`);
1023
+ logger3.info(`Document processor starting with Contextual Knowledge DISABLED`);
695
1024
  }
696
1025
  async function processFragmentsSynchronously({
697
1026
  runtime,
@@ -704,15 +1033,15 @@ async function processFragmentsSynchronously({
704
1033
  worldId
705
1034
  }) {
706
1035
  if (!fullDocumentText || fullDocumentText.trim() === "") {
707
- logger2.warn(`No text content available to chunk for document ${documentId}.`);
1036
+ logger3.warn(`No text content available to chunk for document ${documentId}.`);
708
1037
  return 0;
709
1038
  }
710
1039
  const chunks = await splitDocumentIntoChunks(fullDocumentText);
711
1040
  if (chunks.length === 0) {
712
- logger2.warn(`No chunks generated from text for ${documentId}. No fragments to save.`);
1041
+ logger3.warn(`No chunks generated from text for ${documentId}. No fragments to save.`);
713
1042
  return 0;
714
1043
  }
715
- logger2.info(`Split content into ${chunks.length} chunks for document ${documentId}`);
1044
+ logger3.info(`Split content into ${chunks.length} chunks for document ${documentId}`);
716
1045
  const providerLimits = await getProviderRateLimits();
717
1046
  const CONCURRENCY_LIMIT = Math.min(30, providerLimits.maxConcurrentRequests || 30);
718
1047
  const rateLimiter = createRateLimiter(providerLimits.requestsPerMinute || 60);
@@ -730,11 +1059,11 @@ async function processFragmentsSynchronously({
730
1059
  rateLimiter
731
1060
  });
732
1061
  if (failedCount > 0) {
733
- logger2.warn(
1062
+ logger3.warn(
734
1063
  `Failed to process ${failedCount} chunks out of ${chunks.length} for document ${documentId}`
735
1064
  );
736
1065
  }
737
- logger2.info(`Finished saving ${savedCount} fragments for document ${documentId}.`);
1066
+ logger3.info(`Finished saving ${savedCount} fragments for document ${documentId}.`);
738
1067
  return savedCount;
739
1068
  }
740
1069
  async function extractTextFromDocument(fileBuffer, contentType, originalFilename) {
@@ -743,15 +1072,15 @@ async function extractTextFromDocument(fileBuffer, contentType, originalFilename
743
1072
  }
744
1073
  try {
745
1074
  if (contentType === "application/pdf") {
746
- logger2.debug(`Extracting text from PDF: ${originalFilename}`);
1075
+ logger3.debug(`Extracting text from PDF: ${originalFilename}`);
747
1076
  return await convertPdfToTextFromBuffer(fileBuffer, originalFilename);
748
1077
  } else {
749
- logger2.debug(`Extracting text from non-PDF: ${originalFilename} (Type: ${contentType})`);
1078
+ logger3.debug(`Extracting text from non-PDF: ${originalFilename} (Type: ${contentType})`);
750
1079
  if (contentType.includes("text/") || contentType.includes("application/json") || contentType.includes("application/xml")) {
751
1080
  try {
752
1081
  return fileBuffer.toString("utf8");
753
1082
  } catch (textError) {
754
- logger2.warn(
1083
+ logger3.warn(
755
1084
  `Failed to decode ${originalFilename} as UTF-8, falling back to binary extraction`
756
1085
  );
757
1086
  }
@@ -759,7 +1088,7 @@ async function extractTextFromDocument(fileBuffer, contentType, originalFilename
759
1088
  return await extractTextFromFileBuffer(fileBuffer, contentType, originalFilename);
760
1089
  }
761
1090
  } catch (error) {
762
- logger2.error(`Error extracting text from ${originalFilename}: ${error.message}`);
1091
+ logger3.error(`Error extracting text from ${originalFilename}: ${error.message}`);
763
1092
  throw new Error(`Failed to extract text from ${originalFilename}: ${error.message}`);
764
1093
  }
765
1094
  }
@@ -804,7 +1133,7 @@ async function splitDocumentIntoChunks(documentText) {
804
1133
  const tokenChunkOverlap = DEFAULT_CHUNK_OVERLAP_TOKENS;
805
1134
  const targetCharChunkSize = Math.round(tokenChunkSize * DEFAULT_CHARS_PER_TOKEN);
806
1135
  const targetCharChunkOverlap = Math.round(tokenChunkOverlap * DEFAULT_CHARS_PER_TOKEN);
807
- logger2.debug(
1136
+ logger3.debug(
808
1137
  `Using core splitChunks with settings: tokenChunkSize=${tokenChunkSize}, tokenChunkOverlap=${tokenChunkOverlap}, charChunkSize=${targetCharChunkSize}, charChunkOverlap=${targetCharChunkOverlap}`
809
1138
  );
810
1139
  return await splitChunks(documentText, tokenChunkSize, tokenChunkOverlap);
@@ -828,7 +1157,7 @@ async function processAndSaveFragments({
828
1157
  for (let i = 0; i < chunks.length; i += concurrencyLimit) {
829
1158
  const batchChunks = chunks.slice(i, i + concurrencyLimit);
830
1159
  const batchOriginalIndices = Array.from({ length: batchChunks.length }, (_, k) => i + k);
831
- logger2.debug(
1160
+ logger3.debug(
832
1161
  `Processing batch of ${batchChunks.length} chunks for document ${documentId}. Starting original index: ${batchOriginalIndices[0]}, batch ${Math.floor(i / concurrencyLimit) + 1}/${Math.ceil(chunks.length / concurrencyLimit)}`
833
1162
  );
834
1163
  const contextualizedChunks = await getContextualizedChunks(
@@ -848,13 +1177,13 @@ async function processAndSaveFragments({
848
1177
  if (!result.success) {
849
1178
  failedCount++;
850
1179
  failedChunks.push(originalChunkIndex);
851
- logger2.warn(`Failed to process chunk ${originalChunkIndex} for document ${documentId}`);
1180
+ logger3.warn(`Failed to process chunk ${originalChunkIndex} for document ${documentId}`);
852
1181
  continue;
853
1182
  }
854
1183
  const contextualizedChunkText = result.text;
855
1184
  const embedding = result.embedding;
856
1185
  if (!embedding || embedding.length === 0) {
857
- logger2.warn(
1186
+ logger3.warn(
858
1187
  `Zero vector detected for chunk ${originalChunkIndex} (document ${documentId}). Embedding: ${JSON.stringify(result.embedding)}`
859
1188
  );
860
1189
  failedCount++;
@@ -879,12 +1208,12 @@ async function processAndSaveFragments({
879
1208
  }
880
1209
  };
881
1210
  await runtime.createMemory(fragmentMemory, "knowledge");
882
- logger2.debug(
1211
+ logger3.debug(
883
1212
  `Saved fragment ${originalChunkIndex + 1} for document ${documentId} (Fragment ID: ${fragmentMemory.id})`
884
1213
  );
885
1214
  savedCount++;
886
1215
  } catch (saveError) {
887
- logger2.error(
1216
+ logger3.error(
888
1217
  `Error saving chunk ${originalChunkIndex} to database: ${saveError.message}`,
889
1218
  saveError.stack
890
1219
  );
@@ -899,8 +1228,26 @@ async function processAndSaveFragments({
899
1228
  return { savedCount, failedCount, failedChunks };
900
1229
  }
901
1230
  async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLimiter) {
1231
+ const validChunks = contextualizedChunks.filter((chunk) => chunk.success);
1232
+ const failedChunks = contextualizedChunks.filter((chunk) => !chunk.success);
1233
+ if (validChunks.length === 0) {
1234
+ return failedChunks.map((chunk) => ({
1235
+ success: false,
1236
+ index: chunk.index,
1237
+ error: new Error("Chunk processing failed"),
1238
+ text: chunk.contextualizedText
1239
+ }));
1240
+ }
902
1241
  return await Promise.all(
903
1242
  contextualizedChunks.map(async (contextualizedChunk) => {
1243
+ if (!contextualizedChunk.success) {
1244
+ return {
1245
+ success: false,
1246
+ index: contextualizedChunk.index,
1247
+ error: new Error("Chunk processing failed"),
1248
+ text: contextualizedChunk.contextualizedText
1249
+ };
1250
+ }
904
1251
  await rateLimiter();
905
1252
  try {
906
1253
  const generateEmbeddingOperation = async () => {
@@ -928,7 +1275,7 @@ async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLi
928
1275
  text: contextualizedChunk.contextualizedText
929
1276
  };
930
1277
  } catch (error) {
931
- logger2.error(
1278
+ logger3.error(
932
1279
  `Error generating embedding for chunk ${contextualizedChunk.index}: ${error.message}`
933
1280
  );
934
1281
  return {
@@ -943,7 +1290,7 @@ async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLi
943
1290
  }
944
1291
  async function getContextualizedChunks(runtime, fullDocumentText, chunks, contentType, batchOriginalIndices) {
945
1292
  if (ctxKnowledgeEnabled && fullDocumentText) {
946
- logger2.debug(`Generating contexts for ${chunks.length} chunks`);
1293
+ logger3.debug(`Generating contexts for ${chunks.length} chunks`);
947
1294
  return await generateContextsInBatch(
948
1295
  runtime,
949
1296
  fullDocumentText,
@@ -988,17 +1335,31 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
988
1335
  try {
989
1336
  let llmResponse;
990
1337
  const generateTextOperation = async () => {
991
- if (item.usesCaching) {
992
- return await runtime.useModel(ModelType.TEXT_LARGE, {
993
- prompt: item.promptText,
994
- system: item.systemPrompt
995
- // cacheDocument: item.fullDocumentTextForContext, // Not directly supported by useModel
996
- // cacheOptions: { type: 'ephemeral' }, // Not directly supported by useModel
997
- });
1338
+ if (useCustomLLM) {
1339
+ if (item.usesCaching) {
1340
+ return await generateText(
1341
+ item.promptText,
1342
+ item.systemPrompt,
1343
+ {
1344
+ cacheDocument: item.fullDocumentTextForContext,
1345
+ cacheOptions: { type: "ephemeral" },
1346
+ autoCacheContextualRetrieval: true
1347
+ }
1348
+ );
1349
+ } else {
1350
+ return await generateText(item.prompt);
1351
+ }
998
1352
  } else {
999
- return await runtime.useModel(ModelType.TEXT_LARGE, {
1000
- prompt: item.prompt
1001
- });
1353
+ if (item.usesCaching) {
1354
+ return await runtime.useModel(ModelType.TEXT_LARGE, {
1355
+ prompt: item.promptText,
1356
+ system: item.systemPrompt
1357
+ });
1358
+ } else {
1359
+ return await runtime.useModel(ModelType.TEXT_LARGE, {
1360
+ prompt: item.prompt
1361
+ });
1362
+ }
1002
1363
  }
1003
1364
  };
1004
1365
  llmResponse = await withRateLimitRetry(
@@ -1007,7 +1368,7 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
1007
1368
  );
1008
1369
  const generatedContext = llmResponse.text;
1009
1370
  const contextualizedText = getChunkWithContext(item.chunkText, generatedContext);
1010
- logger2.debug(
1371
+ logger3.debug(
1011
1372
  `Context added for chunk ${item.originalIndex}. New length: ${contextualizedText.length}`
1012
1373
  );
1013
1374
  return {
@@ -1016,7 +1377,7 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
1016
1377
  index: item.originalIndex
1017
1378
  };
1018
1379
  } catch (error) {
1019
- logger2.error(
1380
+ logger3.error(
1020
1381
  `Error generating context for chunk ${item.originalIndex}: ${error.message}`,
1021
1382
  error.stack
1022
1383
  );
@@ -1037,7 +1398,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
1037
1398
  if (isUsingCacheCapableModel) {
1038
1399
  const cachingPromptInfo = contentType ? getCachingPromptForMimeType(contentType, chunkText) : getCachingContextualizationPrompt(chunkText);
1039
1400
  if (cachingPromptInfo.prompt.startsWith("Error:")) {
1040
- logger2.warn(
1401
+ logger3.warn(
1041
1402
  `Skipping contextualization for chunk ${originalIndex} due to: ${cachingPromptInfo.prompt}`
1042
1403
  );
1043
1404
  return {
@@ -1059,7 +1420,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
1059
1420
  } else {
1060
1421
  const prompt = contentType ? getPromptForMimeType(contentType, fullDocumentText, chunkText) : getContextualizationPrompt(fullDocumentText, chunkText);
1061
1422
  if (prompt.startsWith("Error:")) {
1062
- logger2.warn(`Skipping contextualization for chunk ${originalIndex} due to: ${prompt}`);
1423
+ logger3.warn(`Skipping contextualization for chunk ${originalIndex} due to: ${prompt}`);
1063
1424
  return {
1064
1425
  prompt: null,
1065
1426
  originalIndex,
@@ -1077,7 +1438,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
1077
1438
  };
1078
1439
  }
1079
1440
  } catch (error) {
1080
- logger2.error(
1441
+ logger3.error(
1081
1442
  `Error preparing prompt for chunk ${originalIndex}: ${error.message}`,
1082
1443
  error.stack
1083
1444
  );
@@ -1098,7 +1459,7 @@ async function generateEmbeddingWithValidation(runtime, text) {
1098
1459
  });
1099
1460
  const embedding = Array.isArray(embeddingResult) ? embeddingResult : embeddingResult?.embedding;
1100
1461
  if (!embedding || embedding.length === 0) {
1101
- logger2.warn(`Zero vector detected. Embedding result: ${JSON.stringify(embeddingResult)}`);
1462
+ logger3.warn(`Zero vector detected. Embedding result: ${JSON.stringify(embedding)}`);
1102
1463
  return {
1103
1464
  embedding: null,
1104
1465
  success: false,
@@ -1116,12 +1477,12 @@ async function withRateLimitRetry(operation, errorContext, retryDelay) {
1116
1477
  } catch (error) {
1117
1478
  if (error.status === 429) {
1118
1479
  const delay = retryDelay || error.headers?.["retry-after"] || 5;
1119
- logger2.warn(`Rate limit hit for ${errorContext}. Retrying after ${delay}s`);
1480
+ logger3.warn(`Rate limit hit for ${errorContext}. Retrying after ${delay}s`);
1120
1481
  await new Promise((resolve) => setTimeout(resolve, delay * 1e3));
1121
1482
  try {
1122
1483
  return await operation();
1123
1484
  } catch (retryError) {
1124
- logger2.error(`Failed after retry for ${errorContext}: ${retryError.message}`);
1485
+ logger3.error(`Failed after retry for ${errorContext}: ${retryError.message}`);
1125
1486
  throw retryError;
1126
1487
  }
1127
1488
  }
@@ -1140,7 +1501,7 @@ function createRateLimiter(requestsPerMinute) {
1140
1501
  const oldestRequest = requestTimes[0];
1141
1502
  const timeToWait = Math.max(0, oldestRequest + intervalMs - now);
1142
1503
  if (timeToWait > 0) {
1143
- logger2.debug(`Rate limiting applied, waiting ${timeToWait}ms before next request`);
1504
+ logger3.debug(`Rate limiting applied, waiting ${timeToWait}ms before next request`);
1144
1505
  await new Promise((resolve) => setTimeout(resolve, timeToWait));
1145
1506
  }
1146
1507
  }
@@ -1167,9 +1528,10 @@ var KnowledgeService = class _KnowledgeService extends Service {
1167
1528
  if (typeof value === "string") return value.toLowerCase() === "true";
1168
1529
  return false;
1169
1530
  };
1531
+ const loadDocsOnStartup = parseBooleanEnv(config?.LOAD_DOCS_ON_STARTUP) || process.env.LOAD_DOCS_ON_STARTUP === "true";
1170
1532
  this.knowledgeConfig = {
1171
1533
  CTX_KNOWLEDGE_ENABLED: parseBooleanEnv(config?.CTX_KNOWLEDGE_ENABLED),
1172
- LOAD_DOCS_ON_STARTUP: parseBooleanEnv(config?.LOAD_DOCS_ON_STARTUP),
1534
+ LOAD_DOCS_ON_STARTUP: loadDocsOnStartup,
1173
1535
  MAX_INPUT_TOKENS: config?.MAX_INPUT_TOKENS,
1174
1536
  MAX_OUTPUT_TOKENS: config?.MAX_OUTPUT_TOKENS,
1175
1537
  EMBEDDING_PROVIDER: config?.EMBEDDING_PROVIDER,
@@ -1177,34 +1539,37 @@ var KnowledgeService = class _KnowledgeService extends Service {
1177
1539
  TEXT_EMBEDDING_MODEL: config?.TEXT_EMBEDDING_MODEL
1178
1540
  };
1179
1541
  this.config = { ...this.knowledgeConfig };
1180
- logger3.info(
1542
+ logger4.info(
1181
1543
  `KnowledgeService initialized for agent ${this.runtime.agentId} with config:`,
1182
1544
  this.knowledgeConfig
1183
1545
  );
1184
1546
  if (this.knowledgeConfig.LOAD_DOCS_ON_STARTUP) {
1547
+ logger4.info("LOAD_DOCS_ON_STARTUP is enabled. Loading documents from docs folder...");
1185
1548
  this.loadInitialDocuments().catch((error) => {
1186
- logger3.error("Error during initial document loading in KnowledgeService:", error);
1549
+ logger4.error("Error during initial document loading in KnowledgeService:", error);
1187
1550
  });
1551
+ } else {
1552
+ logger4.info("LOAD_DOCS_ON_STARTUP is disabled. Skipping automatic document loading.");
1188
1553
  }
1189
1554
  }
1190
1555
  async loadInitialDocuments() {
1191
- logger3.info(
1556
+ logger4.info(
1192
1557
  `KnowledgeService: Checking for documents to load on startup for agent ${this.runtime.agentId}`
1193
1558
  );
1194
1559
  try {
1195
1560
  await new Promise((resolve) => setTimeout(resolve, 1e3));
1196
1561
  const result = await loadDocsFromPath(this, this.runtime.agentId);
1197
1562
  if (result.successful > 0) {
1198
- logger3.info(
1563
+ logger4.info(
1199
1564
  `KnowledgeService: Loaded ${result.successful} documents from docs folder on startup for agent ${this.runtime.agentId}`
1200
1565
  );
1201
1566
  } else {
1202
- logger3.info(
1567
+ logger4.info(
1203
1568
  `KnowledgeService: No new documents found to load on startup for agent ${this.runtime.agentId}`
1204
1569
  );
1205
1570
  }
1206
1571
  } catch (error) {
1207
- logger3.error(
1572
+ logger4.error(
1208
1573
  `KnowledgeService: Error loading documents on startup for agent ${this.runtime.agentId}:`,
1209
1574
  error
1210
1575
  );
@@ -1216,23 +1581,23 @@ var KnowledgeService = class _KnowledgeService extends Service {
1216
1581
  * @returns Initialized Knowledge service
1217
1582
  */
1218
1583
  static async start(runtime) {
1219
- logger3.info(`Starting Knowledge service for agent: ${runtime.agentId}`);
1584
+ logger4.info(`Starting Knowledge service for agent: ${runtime.agentId}`);
1220
1585
  const service = new _KnowledgeService(runtime);
1221
1586
  if (service.runtime.character?.knowledge && service.runtime.character.knowledge.length > 0) {
1222
- logger3.info(
1587
+ logger4.info(
1223
1588
  `KnowledgeService: Processing ${service.runtime.character.knowledge.length} character knowledge items.`
1224
1589
  );
1225
1590
  const stringKnowledge = service.runtime.character.knowledge.filter(
1226
1591
  (item) => typeof item === "string"
1227
1592
  );
1228
1593
  await service.processCharacterKnowledge(stringKnowledge).catch((err) => {
1229
- logger3.error(
1594
+ logger4.error(
1230
1595
  `KnowledgeService: Error processing character knowledge during startup: ${err.message}`,
1231
1596
  err
1232
1597
  );
1233
1598
  });
1234
1599
  } else {
1235
- logger3.info(
1600
+ logger4.info(
1236
1601
  `KnowledgeService: No character knowledge to process for agent ${runtime.agentId}.`
1237
1602
  );
1238
1603
  }
@@ -1243,10 +1608,10 @@ var KnowledgeService = class _KnowledgeService extends Service {
1243
1608
  * @param runtime Agent runtime
1244
1609
  */
1245
1610
  static async stop(runtime) {
1246
- logger3.info(`Stopping Knowledge service for agent: ${runtime.agentId}`);
1611
+ logger4.info(`Stopping Knowledge service for agent: ${runtime.agentId}`);
1247
1612
  const service = runtime.getService(_KnowledgeService.serviceType);
1248
1613
  if (!service) {
1249
- logger3.warn(`KnowledgeService not found for agent ${runtime.agentId} during stop.`);
1614
+ logger4.warn(`KnowledgeService not found for agent ${runtime.agentId} during stop.`);
1250
1615
  }
1251
1616
  if (service instanceof _KnowledgeService) {
1252
1617
  await service.stop();
@@ -1256,7 +1621,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1256
1621
  * Stop the service
1257
1622
  */
1258
1623
  async stop() {
1259
- logger3.info(`Knowledge service stopping for agent: ${this.runtime.agentId}`);
1624
+ logger4.info(`Knowledge service stopping for agent: ${this.runtime.agentId}`);
1260
1625
  }
1261
1626
  /**
1262
1627
  * Add knowledge to the system
@@ -1264,14 +1629,14 @@ var KnowledgeService = class _KnowledgeService extends Service {
1264
1629
  * @returns Promise with document processing result
1265
1630
  */
1266
1631
  async addKnowledge(options) {
1267
- const agentId = this.runtime.agentId;
1268
- logger3.info(
1269
- `KnowledgeService (agent: ${agentId}) processing document for public addKnowledge: ${options.originalFilename}, type: ${options.contentType}`
1632
+ const agentId = options.agentId || this.runtime.agentId;
1633
+ logger4.info(
1634
+ `KnowledgeService processing document for agent: ${agentId}, file: ${options.originalFilename}, type: ${options.contentType}`
1270
1635
  );
1271
1636
  try {
1272
1637
  const existingDocument = await this.runtime.getMemoryById(options.clientDocumentId);
1273
1638
  if (existingDocument && existingDocument.metadata?.type === MemoryType2.DOCUMENT) {
1274
- logger3.info(
1639
+ logger4.info(
1275
1640
  `Document ${options.originalFilename} with ID ${options.clientDocumentId} already exists. Skipping processing.`
1276
1641
  );
1277
1642
  const fragments = await this.runtime.getMemories({
@@ -1290,7 +1655,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1290
1655
  };
1291
1656
  }
1292
1657
  } catch (error) {
1293
- logger3.debug(
1658
+ logger4.debug(
1294
1659
  `Document ${options.clientDocumentId} not found or error checking existence, proceeding with processing: ${error instanceof Error ? error.message : String(error)}`
1295
1660
  );
1296
1661
  }
@@ -1302,6 +1667,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1302
1667
  * @returns Promise with document processing result
1303
1668
  */
1304
1669
  async processDocument({
1670
+ agentId: passedAgentId,
1305
1671
  clientDocumentId,
1306
1672
  contentType,
1307
1673
  originalFilename,
@@ -1311,10 +1677,10 @@ var KnowledgeService = class _KnowledgeService extends Service {
1311
1677
  entityId,
1312
1678
  metadata
1313
1679
  }) {
1314
- const agentId = this.runtime.agentId;
1680
+ const agentId = passedAgentId || this.runtime.agentId;
1315
1681
  try {
1316
- logger3.debug(
1317
- `KnowledgeService: Processing document ${originalFilename} (type: ${contentType}) via processDocument`
1682
+ logger4.debug(
1683
+ `KnowledgeService: Processing document ${originalFilename} (type: ${contentType}) via processDocument for agent: ${agentId}`
1318
1684
  );
1319
1685
  let fileBuffer = null;
1320
1686
  let extractedText;
@@ -1324,7 +1690,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1324
1690
  try {
1325
1691
  fileBuffer = Buffer.from(content, "base64");
1326
1692
  } catch (e) {
1327
- logger3.error(
1693
+ logger4.error(
1328
1694
  `KnowledgeService: Failed to convert base64 to buffer for ${originalFilename}: ${e.message}`
1329
1695
  );
1330
1696
  throw new Error(`Invalid base64 content for PDF file ${originalFilename}`);
@@ -1335,7 +1701,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1335
1701
  try {
1336
1702
  fileBuffer = Buffer.from(content, "base64");
1337
1703
  } catch (e) {
1338
- logger3.error(
1704
+ logger4.error(
1339
1705
  `KnowledgeService: Failed to convert base64 to buffer for ${originalFilename}: ${e.message}`
1340
1706
  );
1341
1707
  throw new Error(`Invalid base64 content for binary file ${originalFilename}`);
@@ -1343,9 +1709,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1343
1709
  extractedText = await extractTextFromDocument(fileBuffer, contentType, originalFilename);
1344
1710
  documentContentToStore = extractedText;
1345
1711
  } else {
1346
- const base64Regex = /^[A-Za-z0-9+/]+=*$/;
1347
- const looksLikeBase64 = content && content.length > 0 && base64Regex.test(content.replace(/\s/g, ""));
1348
- if (looksLikeBase64) {
1712
+ if (looksLikeBase64(content)) {
1349
1713
  try {
1350
1714
  const decodedBuffer = Buffer.from(content, "base64");
1351
1715
  const decodedText = decodedBuffer.toString("utf8");
@@ -1354,11 +1718,11 @@ var KnowledgeService = class _KnowledgeService extends Service {
1354
1718
  if (invalidCharCount > 0 && invalidCharCount / textLength > 0.1) {
1355
1719
  throw new Error("Decoded content contains too many invalid characters");
1356
1720
  }
1357
- logger3.debug(`Successfully decoded base64 content for text file: ${originalFilename}`);
1721
+ logger4.debug(`Successfully decoded base64 content for text file: ${originalFilename}`);
1358
1722
  extractedText = decodedText;
1359
1723
  documentContentToStore = decodedText;
1360
1724
  } catch (e) {
1361
- logger3.error(
1725
+ logger4.error(
1362
1726
  `Failed to decode base64 for ${originalFilename}: ${e instanceof Error ? e.message : String(e)}`
1363
1727
  );
1364
1728
  throw new Error(
@@ -1366,7 +1730,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1366
1730
  );
1367
1731
  }
1368
1732
  } else {
1369
- logger3.debug(`Treating content as plain text for file: ${originalFilename}`);
1733
+ logger4.debug(`Treating content as plain text for file: ${originalFilename}`);
1370
1734
  extractedText = content;
1371
1735
  documentContentToStore = content;
1372
1736
  }
@@ -1375,7 +1739,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1375
1739
  const noTextError = new Error(
1376
1740
  `KnowledgeService: No text content extracted from ${originalFilename} (type: ${contentType}).`
1377
1741
  );
1378
- logger3.warn(noTextError.message);
1742
+ logger4.warn(noTextError.message);
1379
1743
  throw noTextError;
1380
1744
  }
1381
1745
  const documentMemory = createDocumentMemory({
@@ -1401,14 +1765,14 @@ var KnowledgeService = class _KnowledgeService extends Service {
1401
1765
  roomId: roomId || agentId,
1402
1766
  entityId: entityId || agentId
1403
1767
  };
1404
- logger3.debug(
1768
+ logger4.debug(
1405
1769
  `KnowledgeService: Creating memory with agentId=${agentId}, entityId=${entityId}, roomId=${roomId}, this.runtime.agentId=${this.runtime.agentId}`
1406
1770
  );
1407
- logger3.debug(
1771
+ logger4.debug(
1408
1772
  `KnowledgeService: memoryWithScope agentId=${memoryWithScope.agentId}, entityId=${memoryWithScope.entityId}`
1409
1773
  );
1410
1774
  await this.runtime.createMemory(memoryWithScope, "documents");
1411
- logger3.debug(
1775
+ logger4.debug(
1412
1776
  `KnowledgeService: Stored document ${originalFilename} (Memory ID: ${memoryWithScope.id})`
1413
1777
  );
1414
1778
  const fragmentCount = await processFragmentsSynchronously({
@@ -1422,7 +1786,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1422
1786
  entityId: entityId || agentId,
1423
1787
  worldId: worldId || agentId
1424
1788
  });
1425
- logger3.info(
1789
+ logger4.info(
1426
1790
  `KnowledgeService: Document ${originalFilename} processed with ${fragmentCount} fragments for agent ${agentId}`
1427
1791
  );
1428
1792
  return {
@@ -1431,7 +1795,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1431
1795
  fragmentCount
1432
1796
  };
1433
1797
  } catch (error) {
1434
- logger3.error(
1798
+ logger4.error(
1435
1799
  `KnowledgeService: Error processing document ${originalFilename}: ${error.message}`,
1436
1800
  error.stack
1437
1801
  );
@@ -1440,7 +1804,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1440
1804
  }
1441
1805
  // --- Knowledge methods moved from AgentRuntime ---
1442
1806
  async handleProcessingError(error, context) {
1443
- logger3.error(`KnowledgeService: Error ${context}:`, error?.message || error || "Unknown error");
1807
+ logger4.error(`KnowledgeService: Error ${context}:`, error?.message || error || "Unknown error");
1444
1808
  throw error;
1445
1809
  }
1446
1810
  async checkExistingKnowledge(knowledgeId) {
@@ -1448,9 +1812,9 @@ var KnowledgeService = class _KnowledgeService extends Service {
1448
1812
  return !!existingDocument;
1449
1813
  }
1450
1814
  async getKnowledge(message, scope) {
1451
- logger3.debug("KnowledgeService: getKnowledge called for message id: " + message.id);
1815
+ logger4.debug("KnowledgeService: getKnowledge called for message id: " + message.id);
1452
1816
  if (!message?.content?.text || message?.content?.text.trim().length === 0) {
1453
- logger3.warn("KnowledgeService: Invalid or empty message content for knowledge query.");
1817
+ logger4.warn("KnowledgeService: Invalid or empty message content for knowledge query.");
1454
1818
  return [];
1455
1819
  }
1456
1820
  const embedding = await this.runtime.useModel(ModelType2.TEXT_EMBEDDING, {
@@ -1481,7 +1845,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1481
1845
  }
1482
1846
  async processCharacterKnowledge(items) {
1483
1847
  await new Promise((resolve) => setTimeout(resolve, 1e3));
1484
- logger3.info(
1848
+ logger4.info(
1485
1849
  `KnowledgeService: Processing ${items.length} character knowledge items for agent ${this.runtime.agentId}`
1486
1850
  );
1487
1851
  const processingPromises = items.map(async (item) => {
@@ -1489,12 +1853,12 @@ var KnowledgeService = class _KnowledgeService extends Service {
1489
1853
  try {
1490
1854
  const knowledgeId = createUniqueUuid(this.runtime.agentId + item, item);
1491
1855
  if (await this.checkExistingKnowledge(knowledgeId)) {
1492
- logger3.debug(
1856
+ logger4.debug(
1493
1857
  `KnowledgeService: Character knowledge item with ID ${knowledgeId} already exists. Skipping.`
1494
1858
  );
1495
1859
  return;
1496
1860
  }
1497
- logger3.debug(
1861
+ logger4.debug(
1498
1862
  `KnowledgeService: Processing character knowledge for ${this.runtime.character?.name} - ${item.slice(0, 100)}`
1499
1863
  );
1500
1864
  let metadata = {
@@ -1545,7 +1909,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1545
1909
  }
1546
1910
  });
1547
1911
  await Promise.all(processingPromises);
1548
- logger3.info(
1912
+ logger4.info(
1549
1913
  `KnowledgeService: Finished processing character knowledge for agent ${this.runtime.agentId}.`
1550
1914
  );
1551
1915
  }
@@ -1565,7 +1929,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1565
1929
  worldId: scope?.worldId ?? this.runtime.agentId,
1566
1930
  entityId: scope?.entityId ?? this.runtime.agentId
1567
1931
  };
1568
- logger3.debug(`KnowledgeService: _internalAddKnowledge called for item ID ${item.id}`);
1932
+ logger4.debug(`KnowledgeService: _internalAddKnowledge called for item ID ${item.id}`);
1569
1933
  const documentMemory = {
1570
1934
  id: item.id,
1571
1935
  // This ID should be the unique ID for the document being added.
@@ -1587,7 +1951,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1587
1951
  };
1588
1952
  const existingDocument = await this.runtime.getMemoryById(item.id);
1589
1953
  if (existingDocument) {
1590
- logger3.debug(
1954
+ logger4.debug(
1591
1955
  `KnowledgeService: Document ${item.id} already exists in _internalAddKnowledge, updating...`
1592
1956
  );
1593
1957
  await this.runtime.updateMemory({
@@ -1611,13 +1975,13 @@ var KnowledgeService = class _KnowledgeService extends Service {
1611
1975
  await this.processDocumentFragment(fragment);
1612
1976
  fragmentsProcessed++;
1613
1977
  } catch (error) {
1614
- logger3.error(
1978
+ logger4.error(
1615
1979
  `KnowledgeService: Error processing fragment ${fragment.id} for document ${item.id}:`,
1616
1980
  error
1617
1981
  );
1618
1982
  }
1619
1983
  }
1620
- logger3.debug(
1984
+ logger4.debug(
1621
1985
  `KnowledgeService: Processed ${fragmentsProcessed}/${fragments.length} fragments for document ${item.id}.`
1622
1986
  );
1623
1987
  }
@@ -1626,7 +1990,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1626
1990
  await this.runtime.addEmbeddingToMemory(fragment);
1627
1991
  await this.runtime.createMemory(fragment, "knowledge");
1628
1992
  } catch (error) {
1629
- logger3.error(
1993
+ logger4.error(
1630
1994
  `KnowledgeService: Error processing fragment ${fragment.id}:`,
1631
1995
  error instanceof Error ? error.message : String(error)
1632
1996
  );
@@ -1678,8 +2042,9 @@ var KnowledgeService = class _KnowledgeService extends Service {
1678
2042
  */
1679
2043
  async getMemories(params) {
1680
2044
  return this.runtime.getMemories({
1681
- ...params
2045
+ ...params,
1682
2046
  // includes tableName, roomId, count, end
2047
+ agentId: this.runtime.agentId
1683
2048
  });
1684
2049
  }
1685
2050
  /**
@@ -1690,7 +2055,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1690
2055
  */
1691
2056
  async deleteMemory(memoryId) {
1692
2057
  await this.runtime.deleteMemory(memoryId);
1693
- logger3.info(
2058
+ logger4.info(
1694
2059
  `KnowledgeService: Deleted memory ${memoryId} for agent ${this.runtime.agentId}. Assumed it was a document or related fragment.`
1695
2060
  );
1696
2061
  }
@@ -2633,7 +2998,7 @@ var KnowledgeTestSuite = class {
2633
2998
  var tests_default = new KnowledgeTestSuite();
2634
2999
 
2635
3000
  // src/actions.ts
2636
- import { logger as logger4, createUniqueUuid as createUniqueUuid2 } from "@elizaos/core";
3001
+ import { logger as logger5, stringToUuid } from "@elizaos/core";
2637
3002
  import * as fs2 from "fs";
2638
3003
  import * as path2 from "path";
2639
3004
  var processKnowledgeAction = {
@@ -2693,7 +3058,7 @@ var processKnowledgeAction = {
2693
3058
  const hasPath = pathPattern.test(text);
2694
3059
  const service = runtime.getService(KnowledgeService.serviceType);
2695
3060
  if (!service) {
2696
- logger4.warn(
3061
+ logger5.warn(
2697
3062
  "Knowledge service not available for PROCESS_KNOWLEDGE action"
2698
3063
  );
2699
3064
  return false;
@@ -2734,7 +3099,7 @@ var processKnowledgeAction = {
2734
3099
  else if ([".txt", ".md", ".tson", ".xml", ".csv"].includes(fileExt))
2735
3100
  contentType = "text/plain";
2736
3101
  const knowledgeOptions = {
2737
- clientDocumentId: createUniqueUuid2(runtime.agentId + fileName + Date.now(), fileName),
3102
+ clientDocumentId: stringToUuid(runtime.agentId + fileName + Date.now()),
2738
3103
  contentType,
2739
3104
  originalFilename: fileName,
2740
3105
  worldId: runtime.agentId,
@@ -2761,7 +3126,7 @@ var processKnowledgeAction = {
2761
3126
  return;
2762
3127
  }
2763
3128
  const knowledgeOptions = {
2764
- clientDocumentId: createUniqueUuid2(runtime.agentId + "text" + Date.now(), "user-knowledge"),
3129
+ clientDocumentId: stringToUuid(runtime.agentId + "text" + Date.now() + "user-knowledge"),
2765
3130
  contentType: "text/plain",
2766
3131
  originalFilename: "user-knowledge.txt",
2767
3132
  worldId: runtime.agentId,
@@ -2778,7 +3143,7 @@ var processKnowledgeAction = {
2778
3143
  await callback(response);
2779
3144
  }
2780
3145
  } catch (error) {
2781
- logger4.error("Error in PROCESS_KNOWLEDGE action:", error);
3146
+ logger5.error("Error in PROCESS_KNOWLEDGE action:", error);
2782
3147
  const errorResponse = {
2783
3148
  text: `I encountered an error while processing the knowledge: ${error instanceof Error ? error.message : "Unknown error"}`
2784
3149
  };
@@ -2889,7 +3254,7 @@ ${formattedResults}`
2889
3254
  await callback(response);
2890
3255
  }
2891
3256
  } catch (error) {
2892
- logger4.error("Error in SEARCH_KNOWLEDGE action:", error);
3257
+ logger5.error("Error in SEARCH_KNOWLEDGE action:", error);
2893
3258
  const errorResponse = {
2894
3259
  text: `I encountered an error while searching the knowledge base: ${error instanceof Error ? error.message : "Unknown error"}`
2895
3260
  };
@@ -2902,7 +3267,7 @@ ${formattedResults}`
2902
3267
  var knowledgeActions = [processKnowledgeAction, searchKnowledgeAction];
2903
3268
 
2904
3269
  // src/routes.ts
2905
- import { MemoryType as MemoryType4, createUniqueUuid as createUniqueUuid3, logger as logger5 } from "@elizaos/core";
3270
+ import { createUniqueUuid as createUniqueUuid2, logger as logger6, ModelType as ModelType4 } from "@elizaos/core";
2906
3271
  import fs3 from "fs";
2907
3272
  import path3 from "path";
2908
3273
  import multer from "multer";
@@ -2953,7 +3318,7 @@ var cleanupFile = (filePath) => {
2953
3318
  try {
2954
3319
  fs3.unlinkSync(filePath);
2955
3320
  } catch (error) {
2956
- logger5.error(`Error cleaning up file ${filePath}:`, error);
3321
+ logger6.error(`Error cleaning up file ${filePath}:`, error);
2957
3322
  }
2958
3323
  }
2959
3324
  };
@@ -2978,37 +3343,32 @@ async function uploadKnowledgeHandler(req, res, runtime) {
2978
3343
  if (!files || files.length === 0) {
2979
3344
  return sendError(res, 400, "NO_FILES", "No files uploaded");
2980
3345
  }
3346
+ const agentId = req.body.agentId || req.query.agentId;
3347
+ if (!agentId) {
3348
+ logger6.error("[KNOWLEDGE UPLOAD HANDLER] No agent ID provided in request");
3349
+ return sendError(
3350
+ res,
3351
+ 400,
3352
+ "MISSING_AGENT_ID",
3353
+ "Agent ID is required for uploading knowledge"
3354
+ );
3355
+ }
3356
+ const worldId = req.body.worldId || agentId;
3357
+ logger6.info(`[KNOWLEDGE UPLOAD HANDLER] Processing upload for agent: ${agentId}`);
2981
3358
  const processingPromises = files.map(async (file, index) => {
2982
3359
  let knowledgeId;
2983
3360
  const originalFilename = file.originalname;
2984
- const agentId = req.body.agentId || req.query.agentId || runtime.agentId;
2985
- const worldId = req.body.worldId || agentId;
2986
3361
  const filePath = file.path;
2987
- knowledgeId = req.body?.documentIds && req.body.documentIds[index] || req.body?.documentId || createUniqueUuid3(runtime, `knowledge-${originalFilename}-${Date.now()}`);
3362
+ knowledgeId = req.body?.documentIds && req.body.documentIds[index] || req.body?.documentId || createUniqueUuid2(runtime, `knowledge-${originalFilename}-${Date.now()}`);
3363
+ logger6.debug(
3364
+ `[KNOWLEDGE UPLOAD HANDLER] File: ${originalFilename}, Agent ID: ${agentId}, World ID: ${worldId}, Knowledge ID: ${knowledgeId}`
3365
+ );
2988
3366
  try {
2989
3367
  const fileBuffer = await fs3.promises.readFile(filePath);
2990
- const fileExt = file.originalname.split(".").pop()?.toLowerCase() || "";
2991
- const filename = file.originalname;
2992
- const title = filename.replace(`.${fileExt}`, "");
2993
3368
  const base64Content = fileBuffer.toString("base64");
2994
- const knowledgeItem = {
2995
- id: knowledgeId,
2996
- content: {
2997
- text: base64Content
2998
- },
2999
- metadata: {
3000
- type: MemoryType4.DOCUMENT,
3001
- timestamp: Date.now(),
3002
- source: "upload",
3003
- filename,
3004
- fileExt,
3005
- title,
3006
- path: originalFilename,
3007
- fileType: file.mimetype,
3008
- fileSize: file.size
3009
- }
3010
- };
3011
3369
  const addKnowledgeOpts = {
3370
+ agentId,
3371
+ // Pass the agent ID from frontend
3012
3372
  clientDocumentId: knowledgeId,
3013
3373
  // This is knowledgeItem.id
3014
3374
  contentType: file.mimetype,
@@ -3034,7 +3394,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3034
3394
  status: "success"
3035
3395
  };
3036
3396
  } catch (fileError) {
3037
- logger5.error(
3397
+ logger6.error(
3038
3398
  `[KNOWLEDGE UPLOAD HANDLER] Error processing file ${file.originalname}: ${fileError}`
3039
3399
  );
3040
3400
  cleanupFile(filePath);
@@ -3053,16 +3413,26 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3053
3413
  if (fileUrls.length === 0) {
3054
3414
  return sendError(res, 400, "MISSING_URL", "File URL is required");
3055
3415
  }
3056
- const agentId = req.body.agentId || req.query.agentId || runtime.agentId;
3416
+ const agentId = req.body.agentId || req.query.agentId;
3417
+ if (!agentId) {
3418
+ logger6.error("[KNOWLEDGE URL HANDLER] No agent ID provided in request");
3419
+ return sendError(
3420
+ res,
3421
+ 400,
3422
+ "MISSING_AGENT_ID",
3423
+ "Agent ID is required for uploading knowledge from URLs"
3424
+ );
3425
+ }
3426
+ logger6.info(`[KNOWLEDGE URL HANDLER] Processing URL upload for agent: ${agentId}`);
3057
3427
  const processingPromises = fileUrls.map(async (fileUrl) => {
3058
3428
  try {
3059
3429
  const normalizedUrl = normalizeS3Url(fileUrl);
3060
- const knowledgeId = createUniqueUuid3(runtime, normalizedUrl);
3430
+ const knowledgeId = createUniqueUuid2(runtime, normalizedUrl);
3061
3431
  const urlObject = new URL(fileUrl);
3062
3432
  const pathSegments = urlObject.pathname.split("/");
3063
3433
  const encodedFilename = pathSegments[pathSegments.length - 1] || "document.pdf";
3064
3434
  const originalFilename = decodeURIComponent(encodedFilename);
3065
- logger5.info(`[KNOWLEDGE URL HANDLER] Fetching content from URL: ${fileUrl}`);
3435
+ logger6.info(`[KNOWLEDGE URL HANDLER] Fetching content from URL: ${fileUrl}`);
3066
3436
  const { content, contentType: fetchedContentType } = await fetchUrlContent(fileUrl);
3067
3437
  let contentType = fetchedContentType;
3068
3438
  if (contentType === "application/octet-stream") {
@@ -3086,6 +3456,8 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3086
3456
  }
3087
3457
  }
3088
3458
  const addKnowledgeOpts = {
3459
+ agentId,
3460
+ // Pass the agent ID from frontend
3089
3461
  clientDocumentId: knowledgeId,
3090
3462
  contentType,
3091
3463
  originalFilename,
@@ -3099,7 +3471,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3099
3471
  url: normalizedUrl
3100
3472
  }
3101
3473
  };
3102
- logger5.debug(
3474
+ logger6.debug(
3103
3475
  `[KNOWLEDGE URL HANDLER] Processing knowledge from URL: ${fileUrl} (type: ${contentType})`
3104
3476
  );
3105
3477
  const result = await service.addKnowledge(addKnowledgeOpts);
@@ -3113,7 +3485,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3113
3485
  status: "success"
3114
3486
  };
3115
3487
  } catch (urlError) {
3116
- logger5.error(`[KNOWLEDGE URL HANDLER] Error processing URL ${fileUrl}: ${urlError}`);
3488
+ logger6.error(`[KNOWLEDGE URL HANDLER] Error processing URL ${fileUrl}: ${urlError}`);
3117
3489
  return {
3118
3490
  fileUrl,
3119
3491
  status: "error_processing",
@@ -3125,7 +3497,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3125
3497
  sendSuccess(res, results);
3126
3498
  }
3127
3499
  } catch (error) {
3128
- logger5.error("[KNOWLEDGE HANDLER] Error processing knowledge:", error);
3500
+ logger6.error("[KNOWLEDGE HANDLER] Error processing knowledge:", error);
3129
3501
  if (hasUploadedFiles) {
3130
3502
  cleanupFiles(req.files);
3131
3503
  }
@@ -3157,14 +3529,14 @@ async function getKnowledgeDocumentsHandler(req, res, runtime) {
3157
3529
  if (fileUrls && fileUrls.length > 0) {
3158
3530
  const normalizedRequestUrls = fileUrls.map((url) => normalizeS3Url(url));
3159
3531
  const urlBasedIds = normalizedRequestUrls.map(
3160
- (url) => createUniqueUuid3(runtime, url)
3532
+ (url) => createUniqueUuid2(runtime, url)
3161
3533
  );
3162
3534
  filteredMemories = memories.filter(
3163
3535
  (memory) => urlBasedIds.includes(memory.id) || // If the ID corresponds directly
3164
3536
  // Or if the URL is stored in the metadata (check if it exists)
3165
3537
  memory.metadata && "url" in memory.metadata && typeof memory.metadata.url === "string" && normalizedRequestUrls.includes(normalizeS3Url(memory.metadata.url))
3166
3538
  );
3167
- logger5.debug(
3539
+ logger6.debug(
3168
3540
  `[KNOWLEDGE GET HANDLER] Filtered documents by URLs: ${fileUrls.length} URLs, found ${filteredMemories.length} matching documents`
3169
3541
  );
3170
3542
  }
@@ -3179,12 +3551,12 @@ async function getKnowledgeDocumentsHandler(req, res, runtime) {
3179
3551
  totalRequested: fileUrls ? fileUrls.length : 0
3180
3552
  });
3181
3553
  } catch (error) {
3182
- logger5.error("[KNOWLEDGE GET HANDLER] Error retrieving documents:", error);
3554
+ logger6.error("[KNOWLEDGE GET HANDLER] Error retrieving documents:", error);
3183
3555
  sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve documents", error.message);
3184
3556
  }
3185
3557
  }
3186
3558
  async function deleteKnowledgeDocumentHandler(req, res, runtime) {
3187
- logger5.debug(`[KNOWLEDGE DELETE HANDLER] Received DELETE request:
3559
+ logger6.debug(`[KNOWLEDGE DELETE HANDLER] Received DELETE request:
3188
3560
  - path: ${req.path}
3189
3561
  - params: ${JSON.stringify(req.params)}
3190
3562
  `);
@@ -3199,26 +3571,26 @@ async function deleteKnowledgeDocumentHandler(req, res, runtime) {
3199
3571
  }
3200
3572
  const knowledgeId = req.params.knowledgeId;
3201
3573
  if (!knowledgeId || knowledgeId.length < 36) {
3202
- logger5.error(`[KNOWLEDGE DELETE HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3574
+ logger6.error(`[KNOWLEDGE DELETE HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3203
3575
  return sendError(res, 400, "INVALID_ID", "Invalid Knowledge ID format");
3204
3576
  }
3205
3577
  try {
3206
3578
  const typedKnowledgeId = knowledgeId;
3207
- logger5.debug(
3579
+ logger6.debug(
3208
3580
  `[KNOWLEDGE DELETE HANDLER] Attempting to delete document with ID: ${typedKnowledgeId}`
3209
3581
  );
3210
3582
  await service.deleteMemory(typedKnowledgeId);
3211
- logger5.info(
3583
+ logger6.info(
3212
3584
  `[KNOWLEDGE DELETE HANDLER] Successfully deleted document with ID: ${typedKnowledgeId}`
3213
3585
  );
3214
3586
  sendSuccess(res, null, 204);
3215
3587
  } catch (error) {
3216
- logger5.error(`[KNOWLEDGE DELETE HANDLER] Error deleting document ${knowledgeId}:`, error);
3588
+ logger6.error(`[KNOWLEDGE DELETE HANDLER] Error deleting document ${knowledgeId}:`, error);
3217
3589
  sendError(res, 500, "DELETE_ERROR", "Failed to delete document", error.message);
3218
3590
  }
3219
3591
  }
3220
3592
  async function getKnowledgeByIdHandler(req, res, runtime) {
3221
- logger5.debug(`[KNOWLEDGE GET BY ID HANDLER] Received GET request:
3593
+ logger6.debug(`[KNOWLEDGE GET BY ID HANDLER] Received GET request:
3222
3594
  - path: ${req.path}
3223
3595
  - params: ${JSON.stringify(req.params)}
3224
3596
  `);
@@ -3233,11 +3605,11 @@ async function getKnowledgeByIdHandler(req, res, runtime) {
3233
3605
  }
3234
3606
  const knowledgeId = req.params.knowledgeId;
3235
3607
  if (!knowledgeId || knowledgeId.length < 36) {
3236
- logger5.error(`[KNOWLEDGE GET BY ID HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3608
+ logger6.error(`[KNOWLEDGE GET BY ID HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3237
3609
  return sendError(res, 400, "INVALID_ID", "Invalid Knowledge ID format");
3238
3610
  }
3239
3611
  try {
3240
- logger5.debug(`[KNOWLEDGE GET BY ID HANDLER] Retrieving document with ID: ${knowledgeId}`);
3612
+ logger6.debug(`[KNOWLEDGE GET BY ID HANDLER] Retrieving document with ID: ${knowledgeId}`);
3241
3613
  const agentId = req.query.agentId;
3242
3614
  const memories = await service.getMemories({
3243
3615
  tableName: "documents",
@@ -3254,17 +3626,17 @@ async function getKnowledgeByIdHandler(req, res, runtime) {
3254
3626
  };
3255
3627
  sendSuccess(res, { document: cleanDocument });
3256
3628
  } catch (error) {
3257
- logger5.error(`[KNOWLEDGE GET BY ID HANDLER] Error retrieving document ${knowledgeId}:`, error);
3629
+ logger6.error(`[KNOWLEDGE GET BY ID HANDLER] Error retrieving document ${knowledgeId}:`, error);
3258
3630
  sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve document", error.message);
3259
3631
  }
3260
3632
  }
3261
3633
  async function knowledgePanelHandler(req, res, runtime) {
3262
3634
  const agentId = runtime.agentId;
3263
- logger5.debug(`[KNOWLEDGE PANEL] Serving panel for agent ${agentId}, request path: ${req.path}`);
3635
+ logger6.debug(`[KNOWLEDGE PANEL] Serving panel for agent ${agentId}, request path: ${req.path}`);
3264
3636
  try {
3265
3637
  const currentDir = path3.dirname(new URL(import.meta.url).pathname);
3266
3638
  const frontendPath = path3.join(currentDir, "../dist/index.html");
3267
- logger5.debug(`[KNOWLEDGE PANEL] Looking for frontend at: ${frontendPath}`);
3639
+ logger6.debug(`[KNOWLEDGE PANEL] Looking for frontend at: ${frontendPath}`);
3268
3640
  if (fs3.existsSync(frontendPath)) {
3269
3641
  const html = await fs3.promises.readFile(frontendPath, "utf8");
3270
3642
  const injectedHtml = html.replace(
@@ -3298,10 +3670,10 @@ async function knowledgePanelHandler(req, res, runtime) {
3298
3670
  }
3299
3671
  }
3300
3672
  } catch (manifestError) {
3301
- logger5.error("[KNOWLEDGE PANEL] Error reading manifest:", manifestError);
3673
+ logger6.error("[KNOWLEDGE PANEL] Error reading manifest:", manifestError);
3302
3674
  }
3303
3675
  }
3304
- logger5.debug(`[KNOWLEDGE PANEL] Using fallback with CSS: ${cssFile}, JS: ${jsFile}`);
3676
+ logger6.debug(`[KNOWLEDGE PANEL] Using fallback with CSS: ${cssFile}, JS: ${jsFile}`);
3305
3677
  const html = `
3306
3678
  <!DOCTYPE html>
3307
3679
  <html lang="en">
@@ -3335,13 +3707,13 @@ async function knowledgePanelHandler(req, res, runtime) {
3335
3707
  res.end(html);
3336
3708
  }
3337
3709
  } catch (error) {
3338
- logger5.error("[KNOWLEDGE PANEL] Error serving frontend:", error);
3710
+ logger6.error("[KNOWLEDGE PANEL] Error serving frontend:", error);
3339
3711
  sendError(res, 500, "FRONTEND_ERROR", "Failed to load knowledge panel", error.message);
3340
3712
  }
3341
3713
  }
3342
3714
  async function frontendAssetHandler(req, res, runtime) {
3343
3715
  try {
3344
- logger5.debug(
3716
+ logger6.debug(
3345
3717
  `[KNOWLEDGE ASSET HANDLER] Called with req.path: ${req.path}, req.originalUrl: ${req.originalUrl}, req.params: ${JSON.stringify(req.params)}`
3346
3718
  );
3347
3719
  const currentDir = path3.dirname(new URL(import.meta.url).pathname);
@@ -3361,7 +3733,7 @@ async function frontendAssetHandler(req, res, runtime) {
3361
3733
  );
3362
3734
  }
3363
3735
  const assetPath = path3.join(currentDir, "../dist/assets", assetName);
3364
- logger5.debug(`[KNOWLEDGE ASSET HANDLER] Attempting to serve asset: ${assetPath}`);
3736
+ logger6.debug(`[KNOWLEDGE ASSET HANDLER] Attempting to serve asset: ${assetPath}`);
3365
3737
  if (fs3.existsSync(assetPath)) {
3366
3738
  const fileStream = fs3.createReadStream(assetPath);
3367
3739
  let contentType = "application/octet-stream";
@@ -3376,7 +3748,7 @@ async function frontendAssetHandler(req, res, runtime) {
3376
3748
  sendError(res, 404, "NOT_FOUND", `Asset not found: ${req.url}`);
3377
3749
  }
3378
3750
  } catch (error) {
3379
- logger5.error(`[KNOWLEDGE ASSET HANDLER] Error serving asset ${req.url}:`, error);
3751
+ logger6.error(`[KNOWLEDGE ASSET HANDLER] Error serving asset ${req.url}:`, error);
3380
3752
  sendError(res, 500, "ASSET_ERROR", `Failed to load asset ${req.url}`, error.message);
3381
3753
  }
3382
3754
  }
@@ -3400,10 +3772,91 @@ async function getKnowledgeChunksHandler(req, res, runtime) {
3400
3772
  ) : chunks;
3401
3773
  sendSuccess(res, { chunks: filteredChunks });
3402
3774
  } catch (error) {
3403
- logger5.error("[KNOWLEDGE CHUNKS GET HANDLER] Error retrieving chunks:", error);
3775
+ logger6.error("[KNOWLEDGE CHUNKS GET HANDLER] Error retrieving chunks:", error);
3404
3776
  sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve knowledge chunks", error.message);
3405
3777
  }
3406
3778
  }
3779
+ async function searchKnowledgeHandler(req, res, runtime) {
3780
+ const service = runtime.getService(KnowledgeService.serviceType);
3781
+ if (!service) {
3782
+ return sendError(res, 500, "SERVICE_NOT_FOUND", "KnowledgeService not found");
3783
+ }
3784
+ try {
3785
+ const searchText = req.query.q;
3786
+ const parsedThreshold = req.query.threshold ? Number.parseFloat(req.query.threshold) : NaN;
3787
+ let matchThreshold = Number.isNaN(parsedThreshold) ? 0.5 : parsedThreshold;
3788
+ matchThreshold = Math.max(0, Math.min(1, matchThreshold));
3789
+ const parsedLimit = req.query.limit ? Number.parseInt(req.query.limit, 10) : NaN;
3790
+ let limit = Number.isNaN(parsedLimit) ? 20 : parsedLimit;
3791
+ limit = Math.max(1, Math.min(100, limit));
3792
+ const agentId = req.query.agentId || runtime.agentId;
3793
+ if (!searchText || searchText.trim().length === 0) {
3794
+ return sendError(res, 400, "INVALID_QUERY", "Search query cannot be empty");
3795
+ }
3796
+ if (req.query.threshold && (parsedThreshold < 0 || parsedThreshold > 1)) {
3797
+ logger6.debug(
3798
+ `[KNOWLEDGE SEARCH] Threshold value ${parsedThreshold} was clamped to ${matchThreshold}`
3799
+ );
3800
+ }
3801
+ if (req.query.limit && (parsedLimit < 1 || parsedLimit > 100)) {
3802
+ logger6.debug(`[KNOWLEDGE SEARCH] Limit value ${parsedLimit} was clamped to ${limit}`);
3803
+ }
3804
+ logger6.debug(
3805
+ `[KNOWLEDGE SEARCH] Searching for: "${searchText}" with threshold: ${matchThreshold}, limit: ${limit}`
3806
+ );
3807
+ const embedding = await runtime.useModel(ModelType4.TEXT_EMBEDDING, {
3808
+ text: searchText
3809
+ });
3810
+ const results = await runtime.searchMemories({
3811
+ tableName: "knowledge",
3812
+ embedding,
3813
+ query: searchText,
3814
+ count: limit,
3815
+ match_threshold: matchThreshold,
3816
+ roomId: agentId
3817
+ });
3818
+ const enhancedResults = await Promise.all(
3819
+ results.map(async (fragment) => {
3820
+ let documentTitle = "Unknown Document";
3821
+ let documentFilename = "unknown";
3822
+ if (fragment.metadata && typeof fragment.metadata === "object" && "documentId" in fragment.metadata) {
3823
+ const documentId = fragment.metadata.documentId;
3824
+ try {
3825
+ const document = await runtime.getMemoryById(documentId);
3826
+ if (document && document.metadata) {
3827
+ documentTitle = document.metadata.title || document.metadata.filename || documentTitle;
3828
+ documentFilename = document.metadata.filename || documentFilename;
3829
+ }
3830
+ } catch (e) {
3831
+ logger6.debug(`Could not fetch document ${documentId} for fragment`);
3832
+ }
3833
+ }
3834
+ return {
3835
+ id: fragment.id,
3836
+ content: fragment.content,
3837
+ similarity: fragment.similarity || 0,
3838
+ metadata: {
3839
+ ...fragment.metadata || {},
3840
+ documentTitle,
3841
+ documentFilename
3842
+ }
3843
+ };
3844
+ })
3845
+ );
3846
+ logger6.info(
3847
+ `[KNOWLEDGE SEARCH] Found ${enhancedResults.length} results for query: "${searchText}"`
3848
+ );
3849
+ sendSuccess(res, {
3850
+ query: searchText,
3851
+ threshold: matchThreshold,
3852
+ results: enhancedResults,
3853
+ count: enhancedResults.length
3854
+ });
3855
+ } catch (error) {
3856
+ logger6.error("[KNOWLEDGE SEARCH] Error searching knowledge:", error);
3857
+ sendError(res, 500, "SEARCH_ERROR", "Failed to search knowledge", error.message);
3858
+ }
3859
+ }
3407
3860
  async function uploadKnowledgeWithMulter(req, res, runtime) {
3408
3861
  const upload = createUploadMiddleware(runtime);
3409
3862
  const uploadArray = upload.array(
@@ -3412,7 +3865,7 @@ async function uploadKnowledgeWithMulter(req, res, runtime) {
3412
3865
  );
3413
3866
  uploadArray(req, res, (err) => {
3414
3867
  if (err) {
3415
- logger5.error("[KNOWLEDGE UPLOAD] Multer error:", err);
3868
+ logger6.error("[KNOWLEDGE UPLOAD] Multer error:", err);
3416
3869
  return sendError(res, 400, "UPLOAD_ERROR", err.message);
3417
3870
  }
3418
3871
  uploadKnowledgeHandler(req, res, runtime);
@@ -3455,6 +3908,11 @@ var knowledgeRoutes = [
3455
3908
  type: "GET",
3456
3909
  path: "/knowledges",
3457
3910
  handler: getKnowledgeChunksHandler
3911
+ },
3912
+ {
3913
+ type: "GET",
3914
+ path: "/search",
3915
+ handler: searchKnowledgeHandler
3458
3916
  }
3459
3917
  ];
3460
3918
 
@@ -3470,56 +3928,59 @@ var knowledgePlugin = {
3470
3928
  CTX_KNOWLEDGE_ENABLED: "false"
3471
3929
  },
3472
3930
  async init(config, runtime) {
3473
- logger6.info("Initializing Knowledge Plugin...");
3931
+ logger7.info("Initializing Knowledge Plugin...");
3474
3932
  try {
3475
- logger6.info("Validating model configuration for Knowledge plugin...");
3933
+ logger7.info("Validating model configuration for Knowledge plugin...");
3476
3934
  const validatedConfig = validateModelConfig(runtime);
3477
3935
  if (validatedConfig.CTX_KNOWLEDGE_ENABLED) {
3478
- logger6.info("Running in Contextual Knowledge mode with text generation capabilities.");
3479
- logger6.info(
3936
+ logger7.info("Running in Contextual Knowledge mode with text generation capabilities.");
3937
+ logger7.info(
3480
3938
  `Using ${validatedConfig.EMBEDDING_PROVIDER} for embeddings and ${validatedConfig.TEXT_PROVIDER} for text generation.`
3481
3939
  );
3482
3940
  } else {
3483
3941
  const usingPluginOpenAI = !process.env.EMBEDDING_PROVIDER;
3484
3942
  if (usingPluginOpenAI) {
3485
- logger6.info(
3943
+ logger7.info(
3486
3944
  "Running in Basic Embedding mode with auto-detected configuration from plugin-openai."
3487
3945
  );
3488
3946
  } else {
3489
- logger6.info(
3947
+ logger7.info(
3490
3948
  "Running in Basic Embedding mode (CTX_KNOWLEDGE_ENABLED=false). TEXT_PROVIDER and TEXT_MODEL not required."
3491
3949
  );
3492
3950
  }
3493
- logger6.info(
3951
+ logger7.info(
3494
3952
  `Using ${validatedConfig.EMBEDDING_PROVIDER} for embeddings with ${validatedConfig.TEXT_EMBEDDING_MODEL}.`
3495
3953
  );
3496
3954
  }
3497
- logger6.info("Model configuration validated successfully.");
3955
+ logger7.info("Model configuration validated successfully.");
3498
3956
  if (runtime) {
3499
- logger6.info(`Knowledge Plugin initialized for agent: ${runtime.agentId}`);
3500
- const loadDocsOnStartup = config.LOAD_DOCS_ON_STARTUP !== "false" && process.env.LOAD_DOCS_ON_STARTUP !== "false";
3957
+ logger7.info(`Knowledge Plugin initialized for agent: ${runtime.agentId}`);
3958
+ const loadDocsOnStartup = config.LOAD_DOCS_ON_STARTUP === "true" || process.env.LOAD_DOCS_ON_STARTUP === "true";
3501
3959
  if (loadDocsOnStartup) {
3960
+ logger7.info("LOAD_DOCS_ON_STARTUP is enabled. Scheduling document loading...");
3502
3961
  setTimeout(async () => {
3503
3962
  try {
3504
3963
  const service = runtime.getService(KnowledgeService.serviceType);
3505
3964
  if (service instanceof KnowledgeService) {
3506
- const { loadDocsFromPath: loadDocsFromPath2 } = await import("./docs-loader-AEQHIBO4.js");
3965
+ const { loadDocsFromPath: loadDocsFromPath2 } = await import("./docs-loader-IBTEOAYT.js");
3507
3966
  const result = await loadDocsFromPath2(service, runtime.agentId);
3508
3967
  if (result.successful > 0) {
3509
- logger6.info(`Loaded ${result.successful} documents from docs folder on startup`);
3968
+ logger7.info(`Loaded ${result.successful} documents from docs folder on startup`);
3510
3969
  }
3511
3970
  }
3512
3971
  } catch (error) {
3513
- logger6.error("Error loading documents on startup:", error);
3972
+ logger7.error("Error loading documents on startup:", error);
3514
3973
  }
3515
3974
  }, 5e3);
3975
+ } else {
3976
+ logger7.info("LOAD_DOCS_ON_STARTUP is not enabled. Skipping automatic document loading.");
3516
3977
  }
3517
3978
  }
3518
- logger6.info(
3979
+ logger7.info(
3519
3980
  "Knowledge Plugin initialized. Frontend panel should be discoverable via its public route."
3520
3981
  );
3521
3982
  } catch (error) {
3522
- logger6.error("Failed to initialize Knowledge plugin:", error);
3983
+ logger7.error("Failed to initialize Knowledge plugin:", error);
3523
3984
  throw error;
3524
3985
  }
3525
3986
  },