@elizaos/plugin-knowledge 1.0.6 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -9,7 +9,7 @@ import {
9
9
  } from "./chunk-536BD2UA.js";
10
10
 
11
11
  // src/index.ts
12
- import { logger as logger6 } from "@elizaos/core";
12
+ import { logger as logger7 } from "@elizaos/core";
13
13
 
14
14
  // src/types.ts
15
15
  import z from "zod";
@@ -18,7 +18,7 @@ var ModelConfigSchema = z.object({
18
18
  // NOTE: If EMBEDDING_PROVIDER is not specified, the plugin automatically assumes
19
19
  // plugin-openai is being used and will use OPENAI_EMBEDDING_MODEL and
20
20
  // OPENAI_EMBEDDING_DIMENSIONS for configuration
21
- EMBEDDING_PROVIDER: z.enum(["openai", "google"]),
21
+ EMBEDDING_PROVIDER: z.enum(["openai", "google"]).optional(),
22
22
  TEXT_PROVIDER: z.enum(["openai", "anthropic", "openrouter", "google"]).optional(),
23
23
  // API keys
24
24
  OPENAI_API_KEY: z.string().optional(),
@@ -66,14 +66,14 @@ function validateModelConfig(runtime) {
66
66
  const openaiApiKey2 = getSetting("OPENAI_API_KEY");
67
67
  const openaiEmbeddingModel = getSetting("OPENAI_EMBEDDING_MODEL");
68
68
  if (openaiApiKey2 && openaiEmbeddingModel) {
69
- logger.info("EMBEDDING_PROVIDER not specified, using configuration from plugin-openai");
69
+ logger.debug("EMBEDDING_PROVIDER not specified, using configuration from plugin-openai");
70
70
  } else {
71
- logger.warn(
72
- "EMBEDDING_PROVIDER not specified, but plugin-openai configuration incomplete. Check OPENAI_API_KEY and OPENAI_EMBEDDING_MODEL."
71
+ logger.debug(
72
+ "EMBEDDING_PROVIDER not specified. Assuming embeddings are provided by another plugin (e.g., plugin-google-genai)."
73
73
  );
74
74
  }
75
75
  }
76
- const finalEmbeddingProvider = embeddingProvider || "openai";
76
+ const finalEmbeddingProvider = embeddingProvider;
77
77
  const textEmbeddingModel = getSetting("TEXT_EMBEDDING_MODEL") || getSetting("OPENAI_EMBEDDING_MODEL") || "text-embedding-3-small";
78
78
  const embeddingDimension = getSetting("EMBEDDING_DIMENSION") || getSetting("OPENAI_EMBEDDING_DIMENSIONS") || "1536";
79
79
  const openaiApiKey = getSetting("OPENAI_API_KEY");
@@ -106,23 +106,21 @@ function validateModelConfig(runtime) {
106
106
  }
107
107
  }
108
108
  function validateConfigRequirements(config, assumePluginOpenAI) {
109
- if (!assumePluginOpenAI) {
110
- if (config.EMBEDDING_PROVIDER === "openai" && !config.OPENAI_API_KEY) {
111
- throw new Error('OPENAI_API_KEY is required when EMBEDDING_PROVIDER is set to "openai"');
112
- }
113
- if (config.EMBEDDING_PROVIDER === "google" && !config.GOOGLE_API_KEY) {
114
- throw new Error('GOOGLE_API_KEY is required when EMBEDDING_PROVIDER is set to "google"');
115
- }
116
- } else {
117
- if (!config.OPENAI_API_KEY) {
118
- throw new Error("OPENAI_API_KEY is required when using plugin-openai configuration");
119
- }
120
- if (!config.TEXT_EMBEDDING_MODEL) {
121
- throw new Error("OPENAI_EMBEDDING_MODEL is required when using plugin-openai configuration");
122
- }
109
+ const embeddingProvider = config.EMBEDDING_PROVIDER;
110
+ if (embeddingProvider === "openai" && !config.OPENAI_API_KEY) {
111
+ throw new Error('OPENAI_API_KEY is required when EMBEDDING_PROVIDER is set to "openai"');
112
+ }
113
+ if (embeddingProvider === "google" && !config.GOOGLE_API_KEY) {
114
+ throw new Error('GOOGLE_API_KEY is required when EMBEDDING_PROVIDER is set to "google"');
115
+ }
116
+ if (!embeddingProvider) {
117
+ logger.debug("No EMBEDDING_PROVIDER specified. Embeddings will be handled by the runtime.");
118
+ }
119
+ if (assumePluginOpenAI && config.OPENAI_API_KEY && !config.TEXT_EMBEDDING_MODEL) {
120
+ throw new Error("OPENAI_EMBEDDING_MODEL is required when using plugin-openai configuration");
123
121
  }
124
122
  if (config.CTX_KNOWLEDGE_ENABLED) {
125
- logger.info("Contextual Knowledge is enabled. Validating text generation settings...");
123
+ logger.debug("Contextual Knowledge is enabled. Validating text generation settings...");
126
124
  if (config.TEXT_PROVIDER === "openai" && !config.OPENAI_API_KEY) {
127
125
  throw new Error('OPENAI_API_KEY is required when TEXT_PROVIDER is set to "openai"');
128
126
  }
@@ -138,18 +136,18 @@ function validateConfigRequirements(config, assumePluginOpenAI) {
138
136
  if (config.TEXT_PROVIDER === "openrouter") {
139
137
  const modelName = config.TEXT_MODEL?.toLowerCase() || "";
140
138
  if (modelName.includes("claude") || modelName.includes("gemini")) {
141
- logger.info(
139
+ logger.debug(
142
140
  `Using ${modelName} with OpenRouter. This configuration supports document caching for improved performance.`
143
141
  );
144
142
  }
145
143
  }
146
144
  } else {
147
145
  if (assumePluginOpenAI) {
148
- logger.info(
149
- "Contextual Knowledge is disabled. Using embedding configuration from plugin-openai."
146
+ logger.debug(
147
+ "Contextual Knowledge is disabled. Embeddings will be handled by the runtime (e.g., plugin-openai, plugin-google-genai)."
150
148
  );
151
149
  } else {
152
- logger.info("Contextual Knowledge is disabled. Using basic embedding-only configuration.");
150
+ logger.debug("Contextual Knowledge is disabled. Using configured embedding provider.");
153
151
  }
154
152
  }
155
153
  }
@@ -192,7 +190,7 @@ async function getProviderRateLimits(runtime) {
192
190
  // src/service.ts
193
191
  import {
194
192
  createUniqueUuid,
195
- logger as logger3,
193
+ logger as logger4,
196
194
  MemoryType as MemoryType2,
197
195
  ModelType as ModelType2,
198
196
  Semaphore,
@@ -204,7 +202,7 @@ import {
204
202
  import {
205
203
  MemoryType,
206
204
  ModelType,
207
- logger as logger2,
205
+ logger as logger3,
208
206
  splitChunks
209
207
  } from "@elizaos/core";
210
208
 
@@ -687,12 +685,342 @@ ${chunkContent}`;
687
685
  return generatedContext.trim();
688
686
  }
689
687
 
688
+ // src/llm.ts
689
+ import { generateText as aiGenerateText, embed } from "ai";
690
+ import { createOpenAI } from "@ai-sdk/openai";
691
+ import { createAnthropic } from "@ai-sdk/anthropic";
692
+ import { createOpenRouter } from "@openrouter/ai-sdk-provider";
693
+ import { google } from "@ai-sdk/google";
694
+ import { logger as logger2 } from "@elizaos/core";
695
+ async function generateText(prompt, system, overrideConfig) {
696
+ const config = validateModelConfig();
697
+ const provider = overrideConfig?.provider || config.TEXT_PROVIDER;
698
+ const modelName = overrideConfig?.modelName || config.TEXT_MODEL;
699
+ const maxTokens = overrideConfig?.maxTokens || config.MAX_OUTPUT_TOKENS;
700
+ const autoCacheContextualRetrieval = overrideConfig?.autoCacheContextualRetrieval !== false;
701
+ try {
702
+ switch (provider) {
703
+ case "anthropic":
704
+ return await generateAnthropicText(prompt, system, modelName, maxTokens);
705
+ case "openai":
706
+ return await generateOpenAIText(prompt, system, modelName, maxTokens);
707
+ case "openrouter":
708
+ return await generateOpenRouterText(
709
+ prompt,
710
+ system,
711
+ modelName,
712
+ maxTokens,
713
+ overrideConfig?.cacheDocument,
714
+ overrideConfig?.cacheOptions,
715
+ autoCacheContextualRetrieval
716
+ );
717
+ case "google":
718
+ return await generateGoogleText(prompt, system, modelName, maxTokens, config);
719
+ default:
720
+ throw new Error(`Unsupported text provider: ${provider}`);
721
+ }
722
+ } catch (error) {
723
+ logger2.error(`[LLM Service - ${provider}] Error generating text with ${modelName}:`, error);
724
+ throw error;
725
+ }
726
+ }
727
+ async function generateAnthropicText(prompt, system, modelName, maxTokens) {
728
+ const config = validateModelConfig();
729
+ const anthropic = createAnthropic({
730
+ apiKey: config.ANTHROPIC_API_KEY,
731
+ baseURL: config.ANTHROPIC_BASE_URL
732
+ });
733
+ const modelInstance = anthropic(modelName);
734
+ const result = await aiGenerateText({
735
+ model: modelInstance,
736
+ prompt,
737
+ system,
738
+ temperature: 0.3,
739
+ maxTokens
740
+ });
741
+ logger2.debug(
742
+ `[LLM Service - Anthropic] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
743
+ );
744
+ return result;
745
+ }
746
+ async function generateOpenAIText(prompt, system, modelName, maxTokens) {
747
+ const config = validateModelConfig();
748
+ const openai = createOpenAI({
749
+ apiKey: config.OPENAI_API_KEY,
750
+ baseURL: config.OPENAI_BASE_URL
751
+ });
752
+ const modelInstance = openai.chat(modelName);
753
+ const result = await aiGenerateText({
754
+ model: modelInstance,
755
+ prompt,
756
+ system,
757
+ temperature: 0.3,
758
+ maxTokens
759
+ });
760
+ logger2.debug(
761
+ `[LLM Service - OpenAI] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
762
+ );
763
+ return result;
764
+ }
765
+ async function generateGoogleText(prompt, system, modelName, maxTokens, config) {
766
+ const googleProvider = google;
767
+ if (config.GOOGLE_API_KEY) {
768
+ process.env.GOOGLE_GENERATIVE_AI_API_KEY = config.GOOGLE_API_KEY;
769
+ }
770
+ const modelInstance = googleProvider(modelName);
771
+ const result = await aiGenerateText({
772
+ model: modelInstance,
773
+ prompt,
774
+ system,
775
+ temperature: 0.3,
776
+ maxTokens
777
+ });
778
+ logger2.debug(
779
+ `[LLM Service - Google] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
780
+ );
781
+ return result;
782
+ }
783
+ async function generateOpenRouterText(prompt, system, modelName, maxTokens, cacheDocument, cacheOptions, autoCacheContextualRetrieval = true) {
784
+ const config = validateModelConfig();
785
+ const openrouter = createOpenRouter({
786
+ apiKey: config.OPENROUTER_API_KEY,
787
+ baseURL: config.OPENROUTER_BASE_URL
788
+ });
789
+ const modelInstance = openrouter.chat(modelName);
790
+ const isClaudeModel = modelName.toLowerCase().includes("claude");
791
+ const isGeminiModel = modelName.toLowerCase().includes("gemini");
792
+ const isGemini25Model = modelName.toLowerCase().includes("gemini-2.5");
793
+ const supportsCaching = isClaudeModel || isGeminiModel;
794
+ let documentForCaching = cacheDocument;
795
+ if (!documentForCaching && autoCacheContextualRetrieval && supportsCaching) {
796
+ const docMatch = prompt.match(/<document>([\s\S]*?)<\/document>/);
797
+ if (docMatch && docMatch[1]) {
798
+ documentForCaching = docMatch[1].trim();
799
+ logger2.debug(
800
+ `[LLM Service - OpenRouter] Auto-detected document for caching (${documentForCaching.length} chars)`
801
+ );
802
+ }
803
+ }
804
+ if (documentForCaching && supportsCaching) {
805
+ const effectiveCacheOptions = cacheOptions || { type: "ephemeral" };
806
+ let promptText = prompt;
807
+ if (promptText.includes("<document>")) {
808
+ promptText = promptText.replace(/<document>[\s\S]*?<\/document>/, "").trim();
809
+ }
810
+ if (isClaudeModel) {
811
+ return await generateClaudeWithCaching(
812
+ promptText,
813
+ system,
814
+ modelInstance,
815
+ modelName,
816
+ maxTokens,
817
+ documentForCaching
818
+ );
819
+ } else if (isGeminiModel) {
820
+ return await generateGeminiWithCaching(
821
+ promptText,
822
+ system,
823
+ modelInstance,
824
+ modelName,
825
+ maxTokens,
826
+ documentForCaching,
827
+ isGemini25Model
828
+ );
829
+ }
830
+ }
831
+ logger2.debug("[LLM Service - OpenRouter] Using standard request without caching");
832
+ return await generateStandardOpenRouterText(prompt, system, modelInstance, modelName, maxTokens);
833
+ }
834
+ async function generateClaudeWithCaching(promptText, system, modelInstance, modelName, maxTokens, documentForCaching) {
835
+ logger2.debug(
836
+ `[LLM Service - OpenRouter] Using explicit prompt caching with Claude model ${modelName}`
837
+ );
838
+ const messages = [
839
+ // System message with cached document (if system is provided)
840
+ system ? {
841
+ role: "system",
842
+ content: [
843
+ {
844
+ type: "text",
845
+ text: system
846
+ },
847
+ {
848
+ type: "text",
849
+ text: documentForCaching,
850
+ cache_control: {
851
+ type: "ephemeral"
852
+ }
853
+ }
854
+ ]
855
+ } : (
856
+ // User message with cached document (if no system message)
857
+ {
858
+ role: "user",
859
+ content: [
860
+ {
861
+ type: "text",
862
+ text: "Document for context:"
863
+ },
864
+ {
865
+ type: "text",
866
+ text: documentForCaching,
867
+ cache_control: {
868
+ type: "ephemeral"
869
+ }
870
+ },
871
+ {
872
+ type: "text",
873
+ text: promptText
874
+ }
875
+ ]
876
+ }
877
+ ),
878
+ // Only add user message if system was provided (otherwise we included user above)
879
+ system ? {
880
+ role: "user",
881
+ content: [
882
+ {
883
+ type: "text",
884
+ text: promptText
885
+ }
886
+ ]
887
+ } : null
888
+ ].filter(Boolean);
889
+ logger2.debug("[LLM Service - OpenRouter] Using Claude-specific caching structure");
890
+ const result = await aiGenerateText({
891
+ model: modelInstance,
892
+ messages,
893
+ temperature: 0.3,
894
+ maxTokens,
895
+ providerOptions: {
896
+ openrouter: {
897
+ usage: {
898
+ include: true
899
+ }
900
+ }
901
+ }
902
+ });
903
+ logCacheMetrics(result);
904
+ logger2.debug(
905
+ `[LLM Service - OpenRouter] Text generated with ${modelName} using Claude caching. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
906
+ );
907
+ return result;
908
+ }
909
+ async function generateGeminiWithCaching(promptText, system, modelInstance, modelName, maxTokens, documentForCaching, isGemini25Model) {
910
+ const usingImplicitCaching = isGemini25Model;
911
+ const estimatedDocTokens = Math.ceil(documentForCaching.length / 4);
912
+ const minTokensForImplicitCache = modelName.toLowerCase().includes("flash") ? 1028 : 2048;
913
+ const likelyTriggersCaching = estimatedDocTokens >= minTokensForImplicitCache;
914
+ if (usingImplicitCaching) {
915
+ logger2.debug(
916
+ `[LLM Service - OpenRouter] Using Gemini 2.5 implicit caching with model ${modelName}`
917
+ );
918
+ logger2.debug(
919
+ `[LLM Service - OpenRouter] Gemini 2.5 models automatically cache large prompts (no cache_control needed)`
920
+ );
921
+ if (likelyTriggersCaching) {
922
+ logger2.debug(
923
+ `[LLM Service - OpenRouter] Document size ~${estimatedDocTokens} tokens exceeds minimum ${minTokensForImplicitCache} tokens for implicit caching`
924
+ );
925
+ } else {
926
+ logger2.debug(
927
+ `[LLM Service - OpenRouter] Warning: Document size ~${estimatedDocTokens} tokens may not meet minimum ${minTokensForImplicitCache} token threshold for implicit caching`
928
+ );
929
+ }
930
+ } else {
931
+ logger2.debug(
932
+ `[LLM Service - OpenRouter] Using standard prompt format with Gemini model ${modelName}`
933
+ );
934
+ logger2.debug(
935
+ `[LLM Service - OpenRouter] Note: Only Gemini 2.5 models support automatic implicit caching`
936
+ );
937
+ }
938
+ const geminiSystemPrefix = system ? `${system}
939
+
940
+ ` : "";
941
+ const geminiPrompt = `${geminiSystemPrefix}${documentForCaching}
942
+
943
+ ${promptText}`;
944
+ const result = await aiGenerateText({
945
+ model: modelInstance,
946
+ prompt: geminiPrompt,
947
+ temperature: 0.3,
948
+ maxTokens,
949
+ providerOptions: {
950
+ openrouter: {
951
+ usage: {
952
+ include: true
953
+ // Include usage info to see cache metrics
954
+ }
955
+ }
956
+ }
957
+ });
958
+ logCacheMetrics(result);
959
+ logger2.debug(
960
+ `[LLM Service - OpenRouter] Text generated with ${modelName} using ${usingImplicitCaching ? "implicit" : "standard"} caching. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
961
+ );
962
+ return result;
963
+ }
964
+ async function generateStandardOpenRouterText(prompt, system, modelInstance, modelName, maxTokens) {
965
+ const result = await aiGenerateText({
966
+ model: modelInstance,
967
+ prompt,
968
+ system,
969
+ temperature: 0.3,
970
+ maxTokens,
971
+ providerOptions: {
972
+ openrouter: {
973
+ usage: {
974
+ include: true
975
+ // Include usage info to see cache metrics
976
+ }
977
+ }
978
+ }
979
+ });
980
+ logger2.debug(
981
+ `[LLM Service - OpenRouter] Text generated with ${modelName}. Usage: ${result.usage.promptTokens} prompt tokens, ${result.usage.completionTokens} completion tokens.`
982
+ );
983
+ return result;
984
+ }
985
+ function logCacheMetrics(result) {
986
+ if (result.usage && result.usage.cacheTokens) {
987
+ logger2.debug(
988
+ `[LLM Service - OpenRouter] Cache metrics - Cached tokens: ${result.usage.cacheTokens}, Cache discount: ${result.usage.cacheDiscount}`
989
+ );
990
+ }
991
+ }
992
+
690
993
  // src/document-processor.ts
691
994
  var ctxKnowledgeEnabled = process.env.CTX_KNOWLEDGE_ENABLED === "true" || process.env.CTX_KNOWLEDGE_ENABLED === "True";
995
+ function shouldUseCustomLLM() {
996
+ const textProvider = process.env.TEXT_PROVIDER;
997
+ const textModel = process.env.TEXT_MODEL;
998
+ if (!textProvider || !textModel) {
999
+ return false;
1000
+ }
1001
+ switch (textProvider.toLowerCase()) {
1002
+ case "openrouter":
1003
+ return !!process.env.OPENROUTER_API_KEY;
1004
+ case "openai":
1005
+ return !!process.env.OPENAI_API_KEY;
1006
+ case "anthropic":
1007
+ return !!process.env.ANTHROPIC_API_KEY;
1008
+ case "google":
1009
+ return !!process.env.GOOGLE_API_KEY;
1010
+ default:
1011
+ return false;
1012
+ }
1013
+ }
1014
+ var useCustomLLM = shouldUseCustomLLM();
692
1015
  if (ctxKnowledgeEnabled) {
693
- logger2.info(`Document processor starting with Contextual Knowledge ENABLED`);
1016
+ logger3.info(`Document processor starting with Contextual Knowledge ENABLED`);
1017
+ if (useCustomLLM) {
1018
+ logger3.info(`Using Custom LLM with provider: ${process.env.TEXT_PROVIDER}, model: ${process.env.TEXT_MODEL}`);
1019
+ } else {
1020
+ logger3.info(`Using ElizaOS Runtime LLM (default behavior)`);
1021
+ }
694
1022
  } else {
695
- logger2.info(`Document processor starting with Contextual Knowledge DISABLED`);
1023
+ logger3.info(`Document processor starting with Contextual Knowledge DISABLED`);
696
1024
  }
697
1025
  async function processFragmentsSynchronously({
698
1026
  runtime,
@@ -705,15 +1033,15 @@ async function processFragmentsSynchronously({
705
1033
  worldId
706
1034
  }) {
707
1035
  if (!fullDocumentText || fullDocumentText.trim() === "") {
708
- logger2.warn(`No text content available to chunk for document ${documentId}.`);
1036
+ logger3.warn(`No text content available to chunk for document ${documentId}.`);
709
1037
  return 0;
710
1038
  }
711
1039
  const chunks = await splitDocumentIntoChunks(fullDocumentText);
712
1040
  if (chunks.length === 0) {
713
- logger2.warn(`No chunks generated from text for ${documentId}. No fragments to save.`);
1041
+ logger3.warn(`No chunks generated from text for ${documentId}. No fragments to save.`);
714
1042
  return 0;
715
1043
  }
716
- logger2.info(`Split content into ${chunks.length} chunks for document ${documentId}`);
1044
+ logger3.info(`Split content into ${chunks.length} chunks for document ${documentId}`);
717
1045
  const providerLimits = await getProviderRateLimits();
718
1046
  const CONCURRENCY_LIMIT = Math.min(30, providerLimits.maxConcurrentRequests || 30);
719
1047
  const rateLimiter = createRateLimiter(providerLimits.requestsPerMinute || 60);
@@ -731,11 +1059,11 @@ async function processFragmentsSynchronously({
731
1059
  rateLimiter
732
1060
  });
733
1061
  if (failedCount > 0) {
734
- logger2.warn(
1062
+ logger3.warn(
735
1063
  `Failed to process ${failedCount} chunks out of ${chunks.length} for document ${documentId}`
736
1064
  );
737
1065
  }
738
- logger2.info(`Finished saving ${savedCount} fragments for document ${documentId}.`);
1066
+ logger3.info(`Finished saving ${savedCount} fragments for document ${documentId}.`);
739
1067
  return savedCount;
740
1068
  }
741
1069
  async function extractTextFromDocument(fileBuffer, contentType, originalFilename) {
@@ -744,15 +1072,15 @@ async function extractTextFromDocument(fileBuffer, contentType, originalFilename
744
1072
  }
745
1073
  try {
746
1074
  if (contentType === "application/pdf") {
747
- logger2.debug(`Extracting text from PDF: ${originalFilename}`);
1075
+ logger3.debug(`Extracting text from PDF: ${originalFilename}`);
748
1076
  return await convertPdfToTextFromBuffer(fileBuffer, originalFilename);
749
1077
  } else {
750
- logger2.debug(`Extracting text from non-PDF: ${originalFilename} (Type: ${contentType})`);
1078
+ logger3.debug(`Extracting text from non-PDF: ${originalFilename} (Type: ${contentType})`);
751
1079
  if (contentType.includes("text/") || contentType.includes("application/json") || contentType.includes("application/xml")) {
752
1080
  try {
753
1081
  return fileBuffer.toString("utf8");
754
1082
  } catch (textError) {
755
- logger2.warn(
1083
+ logger3.warn(
756
1084
  `Failed to decode ${originalFilename} as UTF-8, falling back to binary extraction`
757
1085
  );
758
1086
  }
@@ -760,7 +1088,7 @@ async function extractTextFromDocument(fileBuffer, contentType, originalFilename
760
1088
  return await extractTextFromFileBuffer(fileBuffer, contentType, originalFilename);
761
1089
  }
762
1090
  } catch (error) {
763
- logger2.error(`Error extracting text from ${originalFilename}: ${error.message}`);
1091
+ logger3.error(`Error extracting text from ${originalFilename}: ${error.message}`);
764
1092
  throw new Error(`Failed to extract text from ${originalFilename}: ${error.message}`);
765
1093
  }
766
1094
  }
@@ -805,7 +1133,7 @@ async function splitDocumentIntoChunks(documentText) {
805
1133
  const tokenChunkOverlap = DEFAULT_CHUNK_OVERLAP_TOKENS;
806
1134
  const targetCharChunkSize = Math.round(tokenChunkSize * DEFAULT_CHARS_PER_TOKEN);
807
1135
  const targetCharChunkOverlap = Math.round(tokenChunkOverlap * DEFAULT_CHARS_PER_TOKEN);
808
- logger2.debug(
1136
+ logger3.debug(
809
1137
  `Using core splitChunks with settings: tokenChunkSize=${tokenChunkSize}, tokenChunkOverlap=${tokenChunkOverlap}, charChunkSize=${targetCharChunkSize}, charChunkOverlap=${targetCharChunkOverlap}`
810
1138
  );
811
1139
  return await splitChunks(documentText, tokenChunkSize, tokenChunkOverlap);
@@ -829,7 +1157,7 @@ async function processAndSaveFragments({
829
1157
  for (let i = 0; i < chunks.length; i += concurrencyLimit) {
830
1158
  const batchChunks = chunks.slice(i, i + concurrencyLimit);
831
1159
  const batchOriginalIndices = Array.from({ length: batchChunks.length }, (_, k) => i + k);
832
- logger2.debug(
1160
+ logger3.debug(
833
1161
  `Processing batch of ${batchChunks.length} chunks for document ${documentId}. Starting original index: ${batchOriginalIndices[0]}, batch ${Math.floor(i / concurrencyLimit) + 1}/${Math.ceil(chunks.length / concurrencyLimit)}`
834
1162
  );
835
1163
  const contextualizedChunks = await getContextualizedChunks(
@@ -849,13 +1177,13 @@ async function processAndSaveFragments({
849
1177
  if (!result.success) {
850
1178
  failedCount++;
851
1179
  failedChunks.push(originalChunkIndex);
852
- logger2.warn(`Failed to process chunk ${originalChunkIndex} for document ${documentId}`);
1180
+ logger3.warn(`Failed to process chunk ${originalChunkIndex} for document ${documentId}`);
853
1181
  continue;
854
1182
  }
855
1183
  const contextualizedChunkText = result.text;
856
1184
  const embedding = result.embedding;
857
1185
  if (!embedding || embedding.length === 0) {
858
- logger2.warn(
1186
+ logger3.warn(
859
1187
  `Zero vector detected for chunk ${originalChunkIndex} (document ${documentId}). Embedding: ${JSON.stringify(result.embedding)}`
860
1188
  );
861
1189
  failedCount++;
@@ -880,12 +1208,12 @@ async function processAndSaveFragments({
880
1208
  }
881
1209
  };
882
1210
  await runtime.createMemory(fragmentMemory, "knowledge");
883
- logger2.debug(
1211
+ logger3.debug(
884
1212
  `Saved fragment ${originalChunkIndex + 1} for document ${documentId} (Fragment ID: ${fragmentMemory.id})`
885
1213
  );
886
1214
  savedCount++;
887
1215
  } catch (saveError) {
888
- logger2.error(
1216
+ logger3.error(
889
1217
  `Error saving chunk ${originalChunkIndex} to database: ${saveError.message}`,
890
1218
  saveError.stack
891
1219
  );
@@ -900,8 +1228,26 @@ async function processAndSaveFragments({
900
1228
  return { savedCount, failedCount, failedChunks };
901
1229
  }
902
1230
  async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLimiter) {
1231
+ const validChunks = contextualizedChunks.filter((chunk) => chunk.success);
1232
+ const failedChunks = contextualizedChunks.filter((chunk) => !chunk.success);
1233
+ if (validChunks.length === 0) {
1234
+ return failedChunks.map((chunk) => ({
1235
+ success: false,
1236
+ index: chunk.index,
1237
+ error: new Error("Chunk processing failed"),
1238
+ text: chunk.contextualizedText
1239
+ }));
1240
+ }
903
1241
  return await Promise.all(
904
1242
  contextualizedChunks.map(async (contextualizedChunk) => {
1243
+ if (!contextualizedChunk.success) {
1244
+ return {
1245
+ success: false,
1246
+ index: contextualizedChunk.index,
1247
+ error: new Error("Chunk processing failed"),
1248
+ text: contextualizedChunk.contextualizedText
1249
+ };
1250
+ }
905
1251
  await rateLimiter();
906
1252
  try {
907
1253
  const generateEmbeddingOperation = async () => {
@@ -929,7 +1275,7 @@ async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLi
929
1275
  text: contextualizedChunk.contextualizedText
930
1276
  };
931
1277
  } catch (error) {
932
- logger2.error(
1278
+ logger3.error(
933
1279
  `Error generating embedding for chunk ${contextualizedChunk.index}: ${error.message}`
934
1280
  );
935
1281
  return {
@@ -944,7 +1290,7 @@ async function generateEmbeddingsForChunks(runtime, contextualizedChunks, rateLi
944
1290
  }
945
1291
  async function getContextualizedChunks(runtime, fullDocumentText, chunks, contentType, batchOriginalIndices) {
946
1292
  if (ctxKnowledgeEnabled && fullDocumentText) {
947
- logger2.debug(`Generating contexts for ${chunks.length} chunks`);
1293
+ logger3.debug(`Generating contexts for ${chunks.length} chunks`);
948
1294
  return await generateContextsInBatch(
949
1295
  runtime,
950
1296
  fullDocumentText,
@@ -989,17 +1335,31 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
989
1335
  try {
990
1336
  let llmResponse;
991
1337
  const generateTextOperation = async () => {
992
- if (item.usesCaching) {
993
- return await runtime.useModel(ModelType.TEXT_LARGE, {
994
- prompt: item.promptText,
995
- system: item.systemPrompt
996
- // cacheDocument: item.fullDocumentTextForContext, // Not directly supported by useModel
997
- // cacheOptions: { type: 'ephemeral' }, // Not directly supported by useModel
998
- });
1338
+ if (useCustomLLM) {
1339
+ if (item.usesCaching) {
1340
+ return await generateText(
1341
+ item.promptText,
1342
+ item.systemPrompt,
1343
+ {
1344
+ cacheDocument: item.fullDocumentTextForContext,
1345
+ cacheOptions: { type: "ephemeral" },
1346
+ autoCacheContextualRetrieval: true
1347
+ }
1348
+ );
1349
+ } else {
1350
+ return await generateText(item.prompt);
1351
+ }
999
1352
  } else {
1000
- return await runtime.useModel(ModelType.TEXT_LARGE, {
1001
- prompt: item.prompt
1002
- });
1353
+ if (item.usesCaching) {
1354
+ return await runtime.useModel(ModelType.TEXT_LARGE, {
1355
+ prompt: item.promptText,
1356
+ system: item.systemPrompt
1357
+ });
1358
+ } else {
1359
+ return await runtime.useModel(ModelType.TEXT_LARGE, {
1360
+ prompt: item.prompt
1361
+ });
1362
+ }
1003
1363
  }
1004
1364
  };
1005
1365
  llmResponse = await withRateLimitRetry(
@@ -1008,7 +1368,7 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
1008
1368
  );
1009
1369
  const generatedContext = llmResponse.text;
1010
1370
  const contextualizedText = getChunkWithContext(item.chunkText, generatedContext);
1011
- logger2.debug(
1371
+ logger3.debug(
1012
1372
  `Context added for chunk ${item.originalIndex}. New length: ${contextualizedText.length}`
1013
1373
  );
1014
1374
  return {
@@ -1017,7 +1377,7 @@ async function generateContextsInBatch(runtime, fullDocumentText, chunks, conten
1017
1377
  index: item.originalIndex
1018
1378
  };
1019
1379
  } catch (error) {
1020
- logger2.error(
1380
+ logger3.error(
1021
1381
  `Error generating context for chunk ${item.originalIndex}: ${error.message}`,
1022
1382
  error.stack
1023
1383
  );
@@ -1038,7 +1398,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
1038
1398
  if (isUsingCacheCapableModel) {
1039
1399
  const cachingPromptInfo = contentType ? getCachingPromptForMimeType(contentType, chunkText) : getCachingContextualizationPrompt(chunkText);
1040
1400
  if (cachingPromptInfo.prompt.startsWith("Error:")) {
1041
- logger2.warn(
1401
+ logger3.warn(
1042
1402
  `Skipping contextualization for chunk ${originalIndex} due to: ${cachingPromptInfo.prompt}`
1043
1403
  );
1044
1404
  return {
@@ -1060,7 +1420,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
1060
1420
  } else {
1061
1421
  const prompt = contentType ? getPromptForMimeType(contentType, fullDocumentText, chunkText) : getContextualizationPrompt(fullDocumentText, chunkText);
1062
1422
  if (prompt.startsWith("Error:")) {
1063
- logger2.warn(`Skipping contextualization for chunk ${originalIndex} due to: ${prompt}`);
1423
+ logger3.warn(`Skipping contextualization for chunk ${originalIndex} due to: ${prompt}`);
1064
1424
  return {
1065
1425
  prompt: null,
1066
1426
  originalIndex,
@@ -1078,7 +1438,7 @@ function prepareContextPrompts(chunks, fullDocumentText, contentType, batchIndic
1078
1438
  };
1079
1439
  }
1080
1440
  } catch (error) {
1081
- logger2.error(
1441
+ logger3.error(
1082
1442
  `Error preparing prompt for chunk ${originalIndex}: ${error.message}`,
1083
1443
  error.stack
1084
1444
  );
@@ -1099,7 +1459,7 @@ async function generateEmbeddingWithValidation(runtime, text) {
1099
1459
  });
1100
1460
  const embedding = Array.isArray(embeddingResult) ? embeddingResult : embeddingResult?.embedding;
1101
1461
  if (!embedding || embedding.length === 0) {
1102
- logger2.warn(`Zero vector detected. Embedding result: ${JSON.stringify(embeddingResult)}`);
1462
+ logger3.warn(`Zero vector detected. Embedding result: ${JSON.stringify(embedding)}`);
1103
1463
  return {
1104
1464
  embedding: null,
1105
1465
  success: false,
@@ -1117,12 +1477,12 @@ async function withRateLimitRetry(operation, errorContext, retryDelay) {
1117
1477
  } catch (error) {
1118
1478
  if (error.status === 429) {
1119
1479
  const delay = retryDelay || error.headers?.["retry-after"] || 5;
1120
- logger2.warn(`Rate limit hit for ${errorContext}. Retrying after ${delay}s`);
1480
+ logger3.warn(`Rate limit hit for ${errorContext}. Retrying after ${delay}s`);
1121
1481
  await new Promise((resolve) => setTimeout(resolve, delay * 1e3));
1122
1482
  try {
1123
1483
  return await operation();
1124
1484
  } catch (retryError) {
1125
- logger2.error(`Failed after retry for ${errorContext}: ${retryError.message}`);
1485
+ logger3.error(`Failed after retry for ${errorContext}: ${retryError.message}`);
1126
1486
  throw retryError;
1127
1487
  }
1128
1488
  }
@@ -1141,7 +1501,7 @@ function createRateLimiter(requestsPerMinute) {
1141
1501
  const oldestRequest = requestTimes[0];
1142
1502
  const timeToWait = Math.max(0, oldestRequest + intervalMs - now);
1143
1503
  if (timeToWait > 0) {
1144
- logger2.debug(`Rate limiting applied, waiting ${timeToWait}ms before next request`);
1504
+ logger3.debug(`Rate limiting applied, waiting ${timeToWait}ms before next request`);
1145
1505
  await new Promise((resolve) => setTimeout(resolve, timeToWait));
1146
1506
  }
1147
1507
  }
@@ -1168,9 +1528,10 @@ var KnowledgeService = class _KnowledgeService extends Service {
1168
1528
  if (typeof value === "string") return value.toLowerCase() === "true";
1169
1529
  return false;
1170
1530
  };
1531
+ const loadDocsOnStartup = parseBooleanEnv(config?.LOAD_DOCS_ON_STARTUP) || process.env.LOAD_DOCS_ON_STARTUP === "true";
1171
1532
  this.knowledgeConfig = {
1172
1533
  CTX_KNOWLEDGE_ENABLED: parseBooleanEnv(config?.CTX_KNOWLEDGE_ENABLED),
1173
- LOAD_DOCS_ON_STARTUP: parseBooleanEnv(config?.LOAD_DOCS_ON_STARTUP),
1534
+ LOAD_DOCS_ON_STARTUP: loadDocsOnStartup,
1174
1535
  MAX_INPUT_TOKENS: config?.MAX_INPUT_TOKENS,
1175
1536
  MAX_OUTPUT_TOKENS: config?.MAX_OUTPUT_TOKENS,
1176
1537
  EMBEDDING_PROVIDER: config?.EMBEDDING_PROVIDER,
@@ -1178,34 +1539,37 @@ var KnowledgeService = class _KnowledgeService extends Service {
1178
1539
  TEXT_EMBEDDING_MODEL: config?.TEXT_EMBEDDING_MODEL
1179
1540
  };
1180
1541
  this.config = { ...this.knowledgeConfig };
1181
- logger3.info(
1542
+ logger4.info(
1182
1543
  `KnowledgeService initialized for agent ${this.runtime.agentId} with config:`,
1183
1544
  this.knowledgeConfig
1184
1545
  );
1185
1546
  if (this.knowledgeConfig.LOAD_DOCS_ON_STARTUP) {
1547
+ logger4.info("LOAD_DOCS_ON_STARTUP is enabled. Loading documents from docs folder...");
1186
1548
  this.loadInitialDocuments().catch((error) => {
1187
- logger3.error("Error during initial document loading in KnowledgeService:", error);
1549
+ logger4.error("Error during initial document loading in KnowledgeService:", error);
1188
1550
  });
1551
+ } else {
1552
+ logger4.info("LOAD_DOCS_ON_STARTUP is disabled. Skipping automatic document loading.");
1189
1553
  }
1190
1554
  }
1191
1555
  async loadInitialDocuments() {
1192
- logger3.info(
1556
+ logger4.info(
1193
1557
  `KnowledgeService: Checking for documents to load on startup for agent ${this.runtime.agentId}`
1194
1558
  );
1195
1559
  try {
1196
1560
  await new Promise((resolve) => setTimeout(resolve, 1e3));
1197
1561
  const result = await loadDocsFromPath(this, this.runtime.agentId);
1198
1562
  if (result.successful > 0) {
1199
- logger3.info(
1563
+ logger4.info(
1200
1564
  `KnowledgeService: Loaded ${result.successful} documents from docs folder on startup for agent ${this.runtime.agentId}`
1201
1565
  );
1202
1566
  } else {
1203
- logger3.info(
1567
+ logger4.info(
1204
1568
  `KnowledgeService: No new documents found to load on startup for agent ${this.runtime.agentId}`
1205
1569
  );
1206
1570
  }
1207
1571
  } catch (error) {
1208
- logger3.error(
1572
+ logger4.error(
1209
1573
  `KnowledgeService: Error loading documents on startup for agent ${this.runtime.agentId}:`,
1210
1574
  error
1211
1575
  );
@@ -1217,23 +1581,23 @@ var KnowledgeService = class _KnowledgeService extends Service {
1217
1581
  * @returns Initialized Knowledge service
1218
1582
  */
1219
1583
  static async start(runtime) {
1220
- logger3.info(`Starting Knowledge service for agent: ${runtime.agentId}`);
1584
+ logger4.info(`Starting Knowledge service for agent: ${runtime.agentId}`);
1221
1585
  const service = new _KnowledgeService(runtime);
1222
1586
  if (service.runtime.character?.knowledge && service.runtime.character.knowledge.length > 0) {
1223
- logger3.info(
1587
+ logger4.info(
1224
1588
  `KnowledgeService: Processing ${service.runtime.character.knowledge.length} character knowledge items.`
1225
1589
  );
1226
1590
  const stringKnowledge = service.runtime.character.knowledge.filter(
1227
1591
  (item) => typeof item === "string"
1228
1592
  );
1229
1593
  await service.processCharacterKnowledge(stringKnowledge).catch((err) => {
1230
- logger3.error(
1594
+ logger4.error(
1231
1595
  `KnowledgeService: Error processing character knowledge during startup: ${err.message}`,
1232
1596
  err
1233
1597
  );
1234
1598
  });
1235
1599
  } else {
1236
- logger3.info(
1600
+ logger4.info(
1237
1601
  `KnowledgeService: No character knowledge to process for agent ${runtime.agentId}.`
1238
1602
  );
1239
1603
  }
@@ -1244,10 +1608,10 @@ var KnowledgeService = class _KnowledgeService extends Service {
1244
1608
  * @param runtime Agent runtime
1245
1609
  */
1246
1610
  static async stop(runtime) {
1247
- logger3.info(`Stopping Knowledge service for agent: ${runtime.agentId}`);
1611
+ logger4.info(`Stopping Knowledge service for agent: ${runtime.agentId}`);
1248
1612
  const service = runtime.getService(_KnowledgeService.serviceType);
1249
1613
  if (!service) {
1250
- logger3.warn(`KnowledgeService not found for agent ${runtime.agentId} during stop.`);
1614
+ logger4.warn(`KnowledgeService not found for agent ${runtime.agentId} during stop.`);
1251
1615
  }
1252
1616
  if (service instanceof _KnowledgeService) {
1253
1617
  await service.stop();
@@ -1257,7 +1621,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1257
1621
  * Stop the service
1258
1622
  */
1259
1623
  async stop() {
1260
- logger3.info(`Knowledge service stopping for agent: ${this.runtime.agentId}`);
1624
+ logger4.info(`Knowledge service stopping for agent: ${this.runtime.agentId}`);
1261
1625
  }
1262
1626
  /**
1263
1627
  * Add knowledge to the system
@@ -1266,13 +1630,13 @@ var KnowledgeService = class _KnowledgeService extends Service {
1266
1630
  */
1267
1631
  async addKnowledge(options) {
1268
1632
  const agentId = options.agentId || this.runtime.agentId;
1269
- logger3.info(
1633
+ logger4.info(
1270
1634
  `KnowledgeService processing document for agent: ${agentId}, file: ${options.originalFilename}, type: ${options.contentType}`
1271
1635
  );
1272
1636
  try {
1273
1637
  const existingDocument = await this.runtime.getMemoryById(options.clientDocumentId);
1274
1638
  if (existingDocument && existingDocument.metadata?.type === MemoryType2.DOCUMENT) {
1275
- logger3.info(
1639
+ logger4.info(
1276
1640
  `Document ${options.originalFilename} with ID ${options.clientDocumentId} already exists. Skipping processing.`
1277
1641
  );
1278
1642
  const fragments = await this.runtime.getMemories({
@@ -1291,7 +1655,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1291
1655
  };
1292
1656
  }
1293
1657
  } catch (error) {
1294
- logger3.debug(
1658
+ logger4.debug(
1295
1659
  `Document ${options.clientDocumentId} not found or error checking existence, proceeding with processing: ${error instanceof Error ? error.message : String(error)}`
1296
1660
  );
1297
1661
  }
@@ -1315,7 +1679,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1315
1679
  }) {
1316
1680
  const agentId = passedAgentId || this.runtime.agentId;
1317
1681
  try {
1318
- logger3.debug(
1682
+ logger4.debug(
1319
1683
  `KnowledgeService: Processing document ${originalFilename} (type: ${contentType}) via processDocument for agent: ${agentId}`
1320
1684
  );
1321
1685
  let fileBuffer = null;
@@ -1326,7 +1690,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1326
1690
  try {
1327
1691
  fileBuffer = Buffer.from(content, "base64");
1328
1692
  } catch (e) {
1329
- logger3.error(
1693
+ logger4.error(
1330
1694
  `KnowledgeService: Failed to convert base64 to buffer for ${originalFilename}: ${e.message}`
1331
1695
  );
1332
1696
  throw new Error(`Invalid base64 content for PDF file ${originalFilename}`);
@@ -1337,7 +1701,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1337
1701
  try {
1338
1702
  fileBuffer = Buffer.from(content, "base64");
1339
1703
  } catch (e) {
1340
- logger3.error(
1704
+ logger4.error(
1341
1705
  `KnowledgeService: Failed to convert base64 to buffer for ${originalFilename}: ${e.message}`
1342
1706
  );
1343
1707
  throw new Error(`Invalid base64 content for binary file ${originalFilename}`);
@@ -1354,11 +1718,11 @@ var KnowledgeService = class _KnowledgeService extends Service {
1354
1718
  if (invalidCharCount > 0 && invalidCharCount / textLength > 0.1) {
1355
1719
  throw new Error("Decoded content contains too many invalid characters");
1356
1720
  }
1357
- logger3.debug(`Successfully decoded base64 content for text file: ${originalFilename}`);
1721
+ logger4.debug(`Successfully decoded base64 content for text file: ${originalFilename}`);
1358
1722
  extractedText = decodedText;
1359
1723
  documentContentToStore = decodedText;
1360
1724
  } catch (e) {
1361
- logger3.error(
1725
+ logger4.error(
1362
1726
  `Failed to decode base64 for ${originalFilename}: ${e instanceof Error ? e.message : String(e)}`
1363
1727
  );
1364
1728
  throw new Error(
@@ -1366,7 +1730,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1366
1730
  );
1367
1731
  }
1368
1732
  } else {
1369
- logger3.debug(`Treating content as plain text for file: ${originalFilename}`);
1733
+ logger4.debug(`Treating content as plain text for file: ${originalFilename}`);
1370
1734
  extractedText = content;
1371
1735
  documentContentToStore = content;
1372
1736
  }
@@ -1375,7 +1739,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1375
1739
  const noTextError = new Error(
1376
1740
  `KnowledgeService: No text content extracted from ${originalFilename} (type: ${contentType}).`
1377
1741
  );
1378
- logger3.warn(noTextError.message);
1742
+ logger4.warn(noTextError.message);
1379
1743
  throw noTextError;
1380
1744
  }
1381
1745
  const documentMemory = createDocumentMemory({
@@ -1401,14 +1765,14 @@ var KnowledgeService = class _KnowledgeService extends Service {
1401
1765
  roomId: roomId || agentId,
1402
1766
  entityId: entityId || agentId
1403
1767
  };
1404
- logger3.debug(
1768
+ logger4.debug(
1405
1769
  `KnowledgeService: Creating memory with agentId=${agentId}, entityId=${entityId}, roomId=${roomId}, this.runtime.agentId=${this.runtime.agentId}`
1406
1770
  );
1407
- logger3.debug(
1771
+ logger4.debug(
1408
1772
  `KnowledgeService: memoryWithScope agentId=${memoryWithScope.agentId}, entityId=${memoryWithScope.entityId}`
1409
1773
  );
1410
1774
  await this.runtime.createMemory(memoryWithScope, "documents");
1411
- logger3.debug(
1775
+ logger4.debug(
1412
1776
  `KnowledgeService: Stored document ${originalFilename} (Memory ID: ${memoryWithScope.id})`
1413
1777
  );
1414
1778
  const fragmentCount = await processFragmentsSynchronously({
@@ -1422,7 +1786,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1422
1786
  entityId: entityId || agentId,
1423
1787
  worldId: worldId || agentId
1424
1788
  });
1425
- logger3.info(
1789
+ logger4.info(
1426
1790
  `KnowledgeService: Document ${originalFilename} processed with ${fragmentCount} fragments for agent ${agentId}`
1427
1791
  );
1428
1792
  return {
@@ -1431,7 +1795,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1431
1795
  fragmentCount
1432
1796
  };
1433
1797
  } catch (error) {
1434
- logger3.error(
1798
+ logger4.error(
1435
1799
  `KnowledgeService: Error processing document ${originalFilename}: ${error.message}`,
1436
1800
  error.stack
1437
1801
  );
@@ -1440,7 +1804,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1440
1804
  }
1441
1805
  // --- Knowledge methods moved from AgentRuntime ---
1442
1806
  async handleProcessingError(error, context) {
1443
- logger3.error(`KnowledgeService: Error ${context}:`, error?.message || error || "Unknown error");
1807
+ logger4.error(`KnowledgeService: Error ${context}:`, error?.message || error || "Unknown error");
1444
1808
  throw error;
1445
1809
  }
1446
1810
  async checkExistingKnowledge(knowledgeId) {
@@ -1448,9 +1812,9 @@ var KnowledgeService = class _KnowledgeService extends Service {
1448
1812
  return !!existingDocument;
1449
1813
  }
1450
1814
  async getKnowledge(message, scope) {
1451
- logger3.debug("KnowledgeService: getKnowledge called for message id: " + message.id);
1815
+ logger4.debug("KnowledgeService: getKnowledge called for message id: " + message.id);
1452
1816
  if (!message?.content?.text || message?.content?.text.trim().length === 0) {
1453
- logger3.warn("KnowledgeService: Invalid or empty message content for knowledge query.");
1817
+ logger4.warn("KnowledgeService: Invalid or empty message content for knowledge query.");
1454
1818
  return [];
1455
1819
  }
1456
1820
  const embedding = await this.runtime.useModel(ModelType2.TEXT_EMBEDDING, {
@@ -1481,7 +1845,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1481
1845
  }
1482
1846
  async processCharacterKnowledge(items) {
1483
1847
  await new Promise((resolve) => setTimeout(resolve, 1e3));
1484
- logger3.info(
1848
+ logger4.info(
1485
1849
  `KnowledgeService: Processing ${items.length} character knowledge items for agent ${this.runtime.agentId}`
1486
1850
  );
1487
1851
  const processingPromises = items.map(async (item) => {
@@ -1489,12 +1853,12 @@ var KnowledgeService = class _KnowledgeService extends Service {
1489
1853
  try {
1490
1854
  const knowledgeId = createUniqueUuid(this.runtime.agentId + item, item);
1491
1855
  if (await this.checkExistingKnowledge(knowledgeId)) {
1492
- logger3.debug(
1856
+ logger4.debug(
1493
1857
  `KnowledgeService: Character knowledge item with ID ${knowledgeId} already exists. Skipping.`
1494
1858
  );
1495
1859
  return;
1496
1860
  }
1497
- logger3.debug(
1861
+ logger4.debug(
1498
1862
  `KnowledgeService: Processing character knowledge for ${this.runtime.character?.name} - ${item.slice(0, 100)}`
1499
1863
  );
1500
1864
  let metadata = {
@@ -1545,7 +1909,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1545
1909
  }
1546
1910
  });
1547
1911
  await Promise.all(processingPromises);
1548
- logger3.info(
1912
+ logger4.info(
1549
1913
  `KnowledgeService: Finished processing character knowledge for agent ${this.runtime.agentId}.`
1550
1914
  );
1551
1915
  }
@@ -1565,7 +1929,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1565
1929
  worldId: scope?.worldId ?? this.runtime.agentId,
1566
1930
  entityId: scope?.entityId ?? this.runtime.agentId
1567
1931
  };
1568
- logger3.debug(`KnowledgeService: _internalAddKnowledge called for item ID ${item.id}`);
1932
+ logger4.debug(`KnowledgeService: _internalAddKnowledge called for item ID ${item.id}`);
1569
1933
  const documentMemory = {
1570
1934
  id: item.id,
1571
1935
  // This ID should be the unique ID for the document being added.
@@ -1587,7 +1951,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1587
1951
  };
1588
1952
  const existingDocument = await this.runtime.getMemoryById(item.id);
1589
1953
  if (existingDocument) {
1590
- logger3.debug(
1954
+ logger4.debug(
1591
1955
  `KnowledgeService: Document ${item.id} already exists in _internalAddKnowledge, updating...`
1592
1956
  );
1593
1957
  await this.runtime.updateMemory({
@@ -1611,13 +1975,13 @@ var KnowledgeService = class _KnowledgeService extends Service {
1611
1975
  await this.processDocumentFragment(fragment);
1612
1976
  fragmentsProcessed++;
1613
1977
  } catch (error) {
1614
- logger3.error(
1978
+ logger4.error(
1615
1979
  `KnowledgeService: Error processing fragment ${fragment.id} for document ${item.id}:`,
1616
1980
  error
1617
1981
  );
1618
1982
  }
1619
1983
  }
1620
- logger3.debug(
1984
+ logger4.debug(
1621
1985
  `KnowledgeService: Processed ${fragmentsProcessed}/${fragments.length} fragments for document ${item.id}.`
1622
1986
  );
1623
1987
  }
@@ -1626,7 +1990,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1626
1990
  await this.runtime.addEmbeddingToMemory(fragment);
1627
1991
  await this.runtime.createMemory(fragment, "knowledge");
1628
1992
  } catch (error) {
1629
- logger3.error(
1993
+ logger4.error(
1630
1994
  `KnowledgeService: Error processing fragment ${fragment.id}:`,
1631
1995
  error instanceof Error ? error.message : String(error)
1632
1996
  );
@@ -1691,7 +2055,7 @@ var KnowledgeService = class _KnowledgeService extends Service {
1691
2055
  */
1692
2056
  async deleteMemory(memoryId) {
1693
2057
  await this.runtime.deleteMemory(memoryId);
1694
- logger3.info(
2058
+ logger4.info(
1695
2059
  `KnowledgeService: Deleted memory ${memoryId} for agent ${this.runtime.agentId}. Assumed it was a document or related fragment.`
1696
2060
  );
1697
2061
  }
@@ -2634,7 +2998,7 @@ var KnowledgeTestSuite = class {
2634
2998
  var tests_default = new KnowledgeTestSuite();
2635
2999
 
2636
3000
  // src/actions.ts
2637
- import { logger as logger4, stringToUuid } from "@elizaos/core";
3001
+ import { logger as logger5, stringToUuid } from "@elizaos/core";
2638
3002
  import * as fs2 from "fs";
2639
3003
  import * as path2 from "path";
2640
3004
  var processKnowledgeAction = {
@@ -2694,7 +3058,7 @@ var processKnowledgeAction = {
2694
3058
  const hasPath = pathPattern.test(text);
2695
3059
  const service = runtime.getService(KnowledgeService.serviceType);
2696
3060
  if (!service) {
2697
- logger4.warn(
3061
+ logger5.warn(
2698
3062
  "Knowledge service not available for PROCESS_KNOWLEDGE action"
2699
3063
  );
2700
3064
  return false;
@@ -2779,7 +3143,7 @@ var processKnowledgeAction = {
2779
3143
  await callback(response);
2780
3144
  }
2781
3145
  } catch (error) {
2782
- logger4.error("Error in PROCESS_KNOWLEDGE action:", error);
3146
+ logger5.error("Error in PROCESS_KNOWLEDGE action:", error);
2783
3147
  const errorResponse = {
2784
3148
  text: `I encountered an error while processing the knowledge: ${error instanceof Error ? error.message : "Unknown error"}`
2785
3149
  };
@@ -2890,7 +3254,7 @@ ${formattedResults}`
2890
3254
  await callback(response);
2891
3255
  }
2892
3256
  } catch (error) {
2893
- logger4.error("Error in SEARCH_KNOWLEDGE action:", error);
3257
+ logger5.error("Error in SEARCH_KNOWLEDGE action:", error);
2894
3258
  const errorResponse = {
2895
3259
  text: `I encountered an error while searching the knowledge base: ${error instanceof Error ? error.message : "Unknown error"}`
2896
3260
  };
@@ -2903,44 +3267,9 @@ ${formattedResults}`
2903
3267
  var knowledgeActions = [processKnowledgeAction, searchKnowledgeAction];
2904
3268
 
2905
3269
  // src/routes.ts
2906
- import { createUniqueUuid as createUniqueUuid2, logger as logger5, ModelType as ModelType4 } from "@elizaos/core";
3270
+ import { createUniqueUuid as createUniqueUuid2, logger as logger6, ModelType as ModelType4 } from "@elizaos/core";
2907
3271
  import fs3 from "fs";
2908
3272
  import path3 from "path";
2909
- import multer from "multer";
2910
- var createUploadMiddleware = (runtime) => {
2911
- const uploadDir = runtime.getSetting("KNOWLEDGE_UPLOAD_DIR") || "/tmp/uploads/";
2912
- const maxFileSize = parseInt(runtime.getSetting("KNOWLEDGE_MAX_FILE_SIZE") || "52428800");
2913
- const maxFiles = parseInt(runtime.getSetting("KNOWLEDGE_MAX_FILES") || "10");
2914
- const allowedMimeTypes = runtime.getSetting("KNOWLEDGE_ALLOWED_MIME_TYPES")?.split(",") || [
2915
- "text/plain",
2916
- "text/markdown",
2917
- "application/pdf",
2918
- "application/msword",
2919
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
2920
- "text/html",
2921
- "application/json",
2922
- "application/xml",
2923
- "text/csv"
2924
- ];
2925
- return multer({
2926
- dest: uploadDir,
2927
- limits: {
2928
- fileSize: maxFileSize,
2929
- files: maxFiles
2930
- },
2931
- fileFilter: (req, file, cb) => {
2932
- if (allowedMimeTypes.includes(file.mimetype)) {
2933
- cb(null, true);
2934
- } else {
2935
- cb(
2936
- new Error(
2937
- `File type ${file.mimetype} not allowed. Allowed types: ${allowedMimeTypes.join(", ")}`
2938
- )
2939
- );
2940
- }
2941
- }
2942
- });
2943
- };
2944
3273
  function sendSuccess(res, data, status = 200) {
2945
3274
  res.writeHead(status, { "Content-Type": "application/json" });
2946
3275
  res.end(JSON.stringify({ success: true, data }));
@@ -2954,13 +3283,17 @@ var cleanupFile = (filePath) => {
2954
3283
  try {
2955
3284
  fs3.unlinkSync(filePath);
2956
3285
  } catch (error) {
2957
- logger5.error(`Error cleaning up file ${filePath}:`, error);
3286
+ logger6.error(`Error cleaning up file ${filePath}:`, error);
2958
3287
  }
2959
3288
  }
2960
3289
  };
2961
3290
  var cleanupFiles = (files) => {
2962
3291
  if (files) {
2963
- files.forEach((file) => cleanupFile(file.path));
3292
+ files.forEach((file) => {
3293
+ if (file.tempFilePath) {
3294
+ cleanupFile(file.tempFilePath);
3295
+ }
3296
+ });
2964
3297
  }
2965
3298
  };
2966
3299
  async function uploadKnowledgeHandler(req, res, runtime) {
@@ -2968,20 +3301,56 @@ async function uploadKnowledgeHandler(req, res, runtime) {
2968
3301
  if (!service) {
2969
3302
  return sendError(res, 500, "SERVICE_NOT_FOUND", "KnowledgeService not found");
2970
3303
  }
2971
- const hasUploadedFiles = req.files && req.files.length > 0;
3304
+ const hasUploadedFiles = req.files && Object.keys(req.files).length > 0;
2972
3305
  const isJsonRequest = !hasUploadedFiles && req.body && (req.body.fileUrl || req.body.fileUrls);
2973
3306
  if (!hasUploadedFiles && !isJsonRequest) {
2974
3307
  return sendError(res, 400, "INVALID_REQUEST", "Request must contain either files or URLs");
2975
3308
  }
2976
3309
  try {
2977
3310
  if (hasUploadedFiles) {
2978
- const files = req.files;
3311
+ let files = [];
3312
+ if (req.files.files) {
3313
+ if (Array.isArray(req.files.files)) {
3314
+ files = req.files.files;
3315
+ } else {
3316
+ files = [req.files.files];
3317
+ }
3318
+ } else if (req.files.file) {
3319
+ files = [req.files.file];
3320
+ } else {
3321
+ files = Object.values(req.files).flat();
3322
+ }
2979
3323
  if (!files || files.length === 0) {
2980
3324
  return sendError(res, 400, "NO_FILES", "No files uploaded");
2981
3325
  }
3326
+ const invalidFiles = files.filter((file) => {
3327
+ if (file.truncated) {
3328
+ logger6.warn(`File ${file.name} was truncated during upload`);
3329
+ return true;
3330
+ }
3331
+ if (file.size === 0) {
3332
+ logger6.warn(`File ${file.name} is empty`);
3333
+ return true;
3334
+ }
3335
+ if (!file.name || file.name.trim() === "") {
3336
+ logger6.warn(`File has no name`);
3337
+ return true;
3338
+ }
3339
+ if (!file.data && !file.tempFilePath) {
3340
+ logger6.warn(`File ${file.name} has no data or temp file path`);
3341
+ return true;
3342
+ }
3343
+ return false;
3344
+ });
3345
+ if (invalidFiles.length > 0) {
3346
+ cleanupFiles(files);
3347
+ const invalidFileNames = invalidFiles.map((f) => f.name || "unnamed").join(", ");
3348
+ return sendError(res, 400, "INVALID_FILES", `Invalid or corrupted files: ${invalidFileNames}`);
3349
+ }
2982
3350
  const agentId = req.body.agentId || req.query.agentId;
2983
3351
  if (!agentId) {
2984
- logger5.error("[KNOWLEDGE UPLOAD HANDLER] No agent ID provided in request");
3352
+ logger6.error("[KNOWLEDGE UPLOAD HANDLER] No agent ID provided in request");
3353
+ cleanupFiles(files);
2985
3354
  return sendError(
2986
3355
  res,
2987
3356
  400,
@@ -2990,27 +3359,53 @@ async function uploadKnowledgeHandler(req, res, runtime) {
2990
3359
  );
2991
3360
  }
2992
3361
  const worldId = req.body.worldId || agentId;
2993
- logger5.info(`[KNOWLEDGE UPLOAD HANDLER] Processing upload for agent: ${agentId}`);
3362
+ logger6.info(`[KNOWLEDGE UPLOAD HANDLER] Processing upload for agent: ${agentId}`);
2994
3363
  const processingPromises = files.map(async (file, index) => {
2995
3364
  let knowledgeId;
2996
- const originalFilename = file.originalname;
2997
- const filePath = file.path;
3365
+ const originalFilename = file.name;
3366
+ const filePath = file.tempFilePath;
2998
3367
  knowledgeId = req.body?.documentIds && req.body.documentIds[index] || req.body?.documentId || createUniqueUuid2(runtime, `knowledge-${originalFilename}-${Date.now()}`);
2999
- logger5.debug(
3368
+ logger6.debug(
3000
3369
  `[KNOWLEDGE UPLOAD HANDLER] File: ${originalFilename}, Agent ID: ${agentId}, World ID: ${worldId}, Knowledge ID: ${knowledgeId}`
3001
3370
  );
3002
3371
  try {
3003
- const fileBuffer = await fs3.promises.readFile(filePath);
3372
+ let fileBuffer;
3373
+ if (filePath && fs3.existsSync(filePath)) {
3374
+ try {
3375
+ const stats = await fs3.promises.stat(filePath);
3376
+ if (stats.size === 0) {
3377
+ throw new Error("Temporary file is empty");
3378
+ }
3379
+ fileBuffer = await fs3.promises.readFile(filePath);
3380
+ logger6.debug(`[KNOWLEDGE UPLOAD] Read ${fileBuffer.length} bytes from temp file: ${filePath}`);
3381
+ } catch (fsError) {
3382
+ throw new Error(`Failed to read temporary file: ${fsError.message}`);
3383
+ }
3384
+ } else if (file.data && Buffer.isBuffer(file.data)) {
3385
+ fileBuffer = file.data;
3386
+ logger6.debug(`[KNOWLEDGE UPLOAD] Using in-memory buffer of ${fileBuffer.length} bytes`);
3387
+ } else {
3388
+ throw new Error("No file data available - neither temp file nor buffer found");
3389
+ }
3390
+ if (!Buffer.isBuffer(fileBuffer) || fileBuffer.length === 0) {
3391
+ throw new Error("Invalid or empty file buffer");
3392
+ }
3393
+ if (fileBuffer.length !== file.size) {
3394
+ logger6.warn(`File size mismatch for ${originalFilename}: expected ${file.size}, got ${fileBuffer.length}`);
3395
+ }
3004
3396
  const base64Content = fileBuffer.toString("base64");
3397
+ if (!base64Content || base64Content.length === 0) {
3398
+ throw new Error("Failed to convert file to base64");
3399
+ }
3005
3400
  const addKnowledgeOpts = {
3006
3401
  agentId,
3007
3402
  // Pass the agent ID from frontend
3008
3403
  clientDocumentId: knowledgeId,
3009
3404
  // This is knowledgeItem.id
3010
3405
  contentType: file.mimetype,
3011
- // Directly from multer file object
3406
+ // Directly from express-fileupload file object
3012
3407
  originalFilename,
3013
- // Directly from multer file object
3408
+ // Directly from express-fileupload file object
3014
3409
  content: base64Content,
3015
3410
  // The base64 string of the file
3016
3411
  worldId,
@@ -3020,7 +3415,9 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3020
3415
  // Use the correct agent ID
3021
3416
  };
3022
3417
  await service.addKnowledge(addKnowledgeOpts);
3023
- cleanupFile(filePath);
3418
+ if (filePath) {
3419
+ cleanupFile(filePath);
3420
+ }
3024
3421
  return {
3025
3422
  id: knowledgeId,
3026
3423
  filename: originalFilename,
@@ -3030,10 +3427,12 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3030
3427
  status: "success"
3031
3428
  };
3032
3429
  } catch (fileError) {
3033
- logger5.error(
3034
- `[KNOWLEDGE UPLOAD HANDLER] Error processing file ${file.originalname}: ${fileError}`
3430
+ logger6.error(
3431
+ `[KNOWLEDGE UPLOAD HANDLER] Error processing file ${file.name}: ${fileError}`
3035
3432
  );
3036
- cleanupFile(filePath);
3433
+ if (filePath) {
3434
+ cleanupFile(filePath);
3435
+ }
3037
3436
  return {
3038
3437
  id: knowledgeId,
3039
3438
  filename: originalFilename,
@@ -3051,7 +3450,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3051
3450
  }
3052
3451
  const agentId = req.body.agentId || req.query.agentId;
3053
3452
  if (!agentId) {
3054
- logger5.error("[KNOWLEDGE URL HANDLER] No agent ID provided in request");
3453
+ logger6.error("[KNOWLEDGE URL HANDLER] No agent ID provided in request");
3055
3454
  return sendError(
3056
3455
  res,
3057
3456
  400,
@@ -3059,7 +3458,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3059
3458
  "Agent ID is required for uploading knowledge from URLs"
3060
3459
  );
3061
3460
  }
3062
- logger5.info(`[KNOWLEDGE URL HANDLER] Processing URL upload for agent: ${agentId}`);
3461
+ logger6.info(`[KNOWLEDGE URL HANDLER] Processing URL upload for agent: ${agentId}`);
3063
3462
  const processingPromises = fileUrls.map(async (fileUrl) => {
3064
3463
  try {
3065
3464
  const normalizedUrl = normalizeS3Url(fileUrl);
@@ -3068,7 +3467,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3068
3467
  const pathSegments = urlObject.pathname.split("/");
3069
3468
  const encodedFilename = pathSegments[pathSegments.length - 1] || "document.pdf";
3070
3469
  const originalFilename = decodeURIComponent(encodedFilename);
3071
- logger5.info(`[KNOWLEDGE URL HANDLER] Fetching content from URL: ${fileUrl}`);
3470
+ logger6.info(`[KNOWLEDGE URL HANDLER] Fetching content from URL: ${fileUrl}`);
3072
3471
  const { content, contentType: fetchedContentType } = await fetchUrlContent(fileUrl);
3073
3472
  let contentType = fetchedContentType;
3074
3473
  if (contentType === "application/octet-stream") {
@@ -3107,7 +3506,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3107
3506
  url: normalizedUrl
3108
3507
  }
3109
3508
  };
3110
- logger5.debug(
3509
+ logger6.debug(
3111
3510
  `[KNOWLEDGE URL HANDLER] Processing knowledge from URL: ${fileUrl} (type: ${contentType})`
3112
3511
  );
3113
3512
  const result = await service.addKnowledge(addKnowledgeOpts);
@@ -3121,7 +3520,7 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3121
3520
  status: "success"
3122
3521
  };
3123
3522
  } catch (urlError) {
3124
- logger5.error(`[KNOWLEDGE URL HANDLER] Error processing URL ${fileUrl}: ${urlError}`);
3523
+ logger6.error(`[KNOWLEDGE URL HANDLER] Error processing URL ${fileUrl}: ${urlError}`);
3125
3524
  return {
3126
3525
  fileUrl,
3127
3526
  status: "error_processing",
@@ -3133,9 +3532,10 @@ async function uploadKnowledgeHandler(req, res, runtime) {
3133
3532
  sendSuccess(res, results);
3134
3533
  }
3135
3534
  } catch (error) {
3136
- logger5.error("[KNOWLEDGE HANDLER] Error processing knowledge:", error);
3137
- if (hasUploadedFiles) {
3138
- cleanupFiles(req.files);
3535
+ logger6.error("[KNOWLEDGE HANDLER] Error processing knowledge:", error);
3536
+ if (hasUploadedFiles && req.files) {
3537
+ const allFiles = Object.values(req.files).flat();
3538
+ cleanupFiles(allFiles);
3139
3539
  }
3140
3540
  sendError(res, 500, "PROCESSING_ERROR", "Failed to process knowledge", error.message);
3141
3541
  }
@@ -3172,7 +3572,7 @@ async function getKnowledgeDocumentsHandler(req, res, runtime) {
3172
3572
  // Or if the URL is stored in the metadata (check if it exists)
3173
3573
  memory.metadata && "url" in memory.metadata && typeof memory.metadata.url === "string" && normalizedRequestUrls.includes(normalizeS3Url(memory.metadata.url))
3174
3574
  );
3175
- logger5.debug(
3575
+ logger6.debug(
3176
3576
  `[KNOWLEDGE GET HANDLER] Filtered documents by URLs: ${fileUrls.length} URLs, found ${filteredMemories.length} matching documents`
3177
3577
  );
3178
3578
  }
@@ -3187,12 +3587,12 @@ async function getKnowledgeDocumentsHandler(req, res, runtime) {
3187
3587
  totalRequested: fileUrls ? fileUrls.length : 0
3188
3588
  });
3189
3589
  } catch (error) {
3190
- logger5.error("[KNOWLEDGE GET HANDLER] Error retrieving documents:", error);
3590
+ logger6.error("[KNOWLEDGE GET HANDLER] Error retrieving documents:", error);
3191
3591
  sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve documents", error.message);
3192
3592
  }
3193
3593
  }
3194
3594
  async function deleteKnowledgeDocumentHandler(req, res, runtime) {
3195
- logger5.debug(`[KNOWLEDGE DELETE HANDLER] Received DELETE request:
3595
+ logger6.debug(`[KNOWLEDGE DELETE HANDLER] Received DELETE request:
3196
3596
  - path: ${req.path}
3197
3597
  - params: ${JSON.stringify(req.params)}
3198
3598
  `);
@@ -3207,26 +3607,26 @@ async function deleteKnowledgeDocumentHandler(req, res, runtime) {
3207
3607
  }
3208
3608
  const knowledgeId = req.params.knowledgeId;
3209
3609
  if (!knowledgeId || knowledgeId.length < 36) {
3210
- logger5.error(`[KNOWLEDGE DELETE HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3610
+ logger6.error(`[KNOWLEDGE DELETE HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3211
3611
  return sendError(res, 400, "INVALID_ID", "Invalid Knowledge ID format");
3212
3612
  }
3213
3613
  try {
3214
3614
  const typedKnowledgeId = knowledgeId;
3215
- logger5.debug(
3615
+ logger6.debug(
3216
3616
  `[KNOWLEDGE DELETE HANDLER] Attempting to delete document with ID: ${typedKnowledgeId}`
3217
3617
  );
3218
3618
  await service.deleteMemory(typedKnowledgeId);
3219
- logger5.info(
3619
+ logger6.info(
3220
3620
  `[KNOWLEDGE DELETE HANDLER] Successfully deleted document with ID: ${typedKnowledgeId}`
3221
3621
  );
3222
3622
  sendSuccess(res, null, 204);
3223
3623
  } catch (error) {
3224
- logger5.error(`[KNOWLEDGE DELETE HANDLER] Error deleting document ${knowledgeId}:`, error);
3624
+ logger6.error(`[KNOWLEDGE DELETE HANDLER] Error deleting document ${knowledgeId}:`, error);
3225
3625
  sendError(res, 500, "DELETE_ERROR", "Failed to delete document", error.message);
3226
3626
  }
3227
3627
  }
3228
3628
  async function getKnowledgeByIdHandler(req, res, runtime) {
3229
- logger5.debug(`[KNOWLEDGE GET BY ID HANDLER] Received GET request:
3629
+ logger6.debug(`[KNOWLEDGE GET BY ID HANDLER] Received GET request:
3230
3630
  - path: ${req.path}
3231
3631
  - params: ${JSON.stringify(req.params)}
3232
3632
  `);
@@ -3241,11 +3641,11 @@ async function getKnowledgeByIdHandler(req, res, runtime) {
3241
3641
  }
3242
3642
  const knowledgeId = req.params.knowledgeId;
3243
3643
  if (!knowledgeId || knowledgeId.length < 36) {
3244
- logger5.error(`[KNOWLEDGE GET BY ID HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3644
+ logger6.error(`[KNOWLEDGE GET BY ID HANDLER] Invalid knowledge ID format: ${knowledgeId}`);
3245
3645
  return sendError(res, 400, "INVALID_ID", "Invalid Knowledge ID format");
3246
3646
  }
3247
3647
  try {
3248
- logger5.debug(`[KNOWLEDGE GET BY ID HANDLER] Retrieving document with ID: ${knowledgeId}`);
3648
+ logger6.debug(`[KNOWLEDGE GET BY ID HANDLER] Retrieving document with ID: ${knowledgeId}`);
3249
3649
  const agentId = req.query.agentId;
3250
3650
  const memories = await service.getMemories({
3251
3651
  tableName: "documents",
@@ -3262,17 +3662,17 @@ async function getKnowledgeByIdHandler(req, res, runtime) {
3262
3662
  };
3263
3663
  sendSuccess(res, { document: cleanDocument });
3264
3664
  } catch (error) {
3265
- logger5.error(`[KNOWLEDGE GET BY ID HANDLER] Error retrieving document ${knowledgeId}:`, error);
3665
+ logger6.error(`[KNOWLEDGE GET BY ID HANDLER] Error retrieving document ${knowledgeId}:`, error);
3266
3666
  sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve document", error.message);
3267
3667
  }
3268
3668
  }
3269
3669
  async function knowledgePanelHandler(req, res, runtime) {
3270
3670
  const agentId = runtime.agentId;
3271
- logger5.debug(`[KNOWLEDGE PANEL] Serving panel for agent ${agentId}, request path: ${req.path}`);
3671
+ logger6.debug(`[KNOWLEDGE PANEL] Serving panel for agent ${agentId}, request path: ${req.path}`);
3272
3672
  try {
3273
3673
  const currentDir = path3.dirname(new URL(import.meta.url).pathname);
3274
3674
  const frontendPath = path3.join(currentDir, "../dist/index.html");
3275
- logger5.debug(`[KNOWLEDGE PANEL] Looking for frontend at: ${frontendPath}`);
3675
+ logger6.debug(`[KNOWLEDGE PANEL] Looking for frontend at: ${frontendPath}`);
3276
3676
  if (fs3.existsSync(frontendPath)) {
3277
3677
  const html = await fs3.promises.readFile(frontendPath, "utf8");
3278
3678
  const injectedHtml = html.replace(
@@ -3306,10 +3706,10 @@ async function knowledgePanelHandler(req, res, runtime) {
3306
3706
  }
3307
3707
  }
3308
3708
  } catch (manifestError) {
3309
- logger5.error("[KNOWLEDGE PANEL] Error reading manifest:", manifestError);
3709
+ logger6.error("[KNOWLEDGE PANEL] Error reading manifest:", manifestError);
3310
3710
  }
3311
3711
  }
3312
- logger5.debug(`[KNOWLEDGE PANEL] Using fallback with CSS: ${cssFile}, JS: ${jsFile}`);
3712
+ logger6.debug(`[KNOWLEDGE PANEL] Using fallback with CSS: ${cssFile}, JS: ${jsFile}`);
3313
3713
  const html = `
3314
3714
  <!DOCTYPE html>
3315
3715
  <html lang="en">
@@ -3343,13 +3743,13 @@ async function knowledgePanelHandler(req, res, runtime) {
3343
3743
  res.end(html);
3344
3744
  }
3345
3745
  } catch (error) {
3346
- logger5.error("[KNOWLEDGE PANEL] Error serving frontend:", error);
3746
+ logger6.error("[KNOWLEDGE PANEL] Error serving frontend:", error);
3347
3747
  sendError(res, 500, "FRONTEND_ERROR", "Failed to load knowledge panel", error.message);
3348
3748
  }
3349
3749
  }
3350
3750
  async function frontendAssetHandler(req, res, runtime) {
3351
3751
  try {
3352
- logger5.debug(
3752
+ logger6.debug(
3353
3753
  `[KNOWLEDGE ASSET HANDLER] Called with req.path: ${req.path}, req.originalUrl: ${req.originalUrl}, req.params: ${JSON.stringify(req.params)}`
3354
3754
  );
3355
3755
  const currentDir = path3.dirname(new URL(import.meta.url).pathname);
@@ -3369,7 +3769,7 @@ async function frontendAssetHandler(req, res, runtime) {
3369
3769
  );
3370
3770
  }
3371
3771
  const assetPath = path3.join(currentDir, "../dist/assets", assetName);
3372
- logger5.debug(`[KNOWLEDGE ASSET HANDLER] Attempting to serve asset: ${assetPath}`);
3772
+ logger6.debug(`[KNOWLEDGE ASSET HANDLER] Attempting to serve asset: ${assetPath}`);
3373
3773
  if (fs3.existsSync(assetPath)) {
3374
3774
  const fileStream = fs3.createReadStream(assetPath);
3375
3775
  let contentType = "application/octet-stream";
@@ -3384,7 +3784,7 @@ async function frontendAssetHandler(req, res, runtime) {
3384
3784
  sendError(res, 404, "NOT_FOUND", `Asset not found: ${req.url}`);
3385
3785
  }
3386
3786
  } catch (error) {
3387
- logger5.error(`[KNOWLEDGE ASSET HANDLER] Error serving asset ${req.url}:`, error);
3787
+ logger6.error(`[KNOWLEDGE ASSET HANDLER] Error serving asset ${req.url}:`, error);
3388
3788
  sendError(res, 500, "ASSET_ERROR", `Failed to load asset ${req.url}`, error.message);
3389
3789
  }
3390
3790
  }
@@ -3408,7 +3808,7 @@ async function getKnowledgeChunksHandler(req, res, runtime) {
3408
3808
  ) : chunks;
3409
3809
  sendSuccess(res, { chunks: filteredChunks });
3410
3810
  } catch (error) {
3411
- logger5.error("[KNOWLEDGE CHUNKS GET HANDLER] Error retrieving chunks:", error);
3811
+ logger6.error("[KNOWLEDGE CHUNKS GET HANDLER] Error retrieving chunks:", error);
3412
3812
  sendError(res, 500, "RETRIEVAL_ERROR", "Failed to retrieve knowledge chunks", error.message);
3413
3813
  }
3414
3814
  }
@@ -3430,14 +3830,14 @@ async function searchKnowledgeHandler(req, res, runtime) {
3430
3830
  return sendError(res, 400, "INVALID_QUERY", "Search query cannot be empty");
3431
3831
  }
3432
3832
  if (req.query.threshold && (parsedThreshold < 0 || parsedThreshold > 1)) {
3433
- logger5.debug(
3833
+ logger6.debug(
3434
3834
  `[KNOWLEDGE SEARCH] Threshold value ${parsedThreshold} was clamped to ${matchThreshold}`
3435
3835
  );
3436
3836
  }
3437
3837
  if (req.query.limit && (parsedLimit < 1 || parsedLimit > 100)) {
3438
- logger5.debug(`[KNOWLEDGE SEARCH] Limit value ${parsedLimit} was clamped to ${limit}`);
3838
+ logger6.debug(`[KNOWLEDGE SEARCH] Limit value ${parsedLimit} was clamped to ${limit}`);
3439
3839
  }
3440
- logger5.debug(
3840
+ logger6.debug(
3441
3841
  `[KNOWLEDGE SEARCH] Searching for: "${searchText}" with threshold: ${matchThreshold}, limit: ${limit}`
3442
3842
  );
3443
3843
  const embedding = await runtime.useModel(ModelType4.TEXT_EMBEDDING, {
@@ -3464,7 +3864,7 @@ async function searchKnowledgeHandler(req, res, runtime) {
3464
3864
  documentFilename = document.metadata.filename || documentFilename;
3465
3865
  }
3466
3866
  } catch (e) {
3467
- logger5.debug(`Could not fetch document ${documentId} for fragment`);
3867
+ logger6.debug(`Could not fetch document ${documentId} for fragment`);
3468
3868
  }
3469
3869
  }
3470
3870
  return {
@@ -3479,7 +3879,7 @@ async function searchKnowledgeHandler(req, res, runtime) {
3479
3879
  };
3480
3880
  })
3481
3881
  );
3482
- logger5.info(
3882
+ logger6.info(
3483
3883
  `[KNOWLEDGE SEARCH] Found ${enhancedResults.length} results for query: "${searchText}"`
3484
3884
  );
3485
3885
  sendSuccess(res, {
@@ -3489,23 +3889,29 @@ async function searchKnowledgeHandler(req, res, runtime) {
3489
3889
  count: enhancedResults.length
3490
3890
  });
3491
3891
  } catch (error) {
3492
- logger5.error("[KNOWLEDGE SEARCH] Error searching knowledge:", error);
3892
+ logger6.error("[KNOWLEDGE SEARCH] Error searching knowledge:", error);
3493
3893
  sendError(res, 500, "SEARCH_ERROR", "Failed to search knowledge", error.message);
3494
3894
  }
3495
3895
  }
3496
- async function uploadKnowledgeWithMulter(req, res, runtime) {
3497
- const upload = createUploadMiddleware(runtime);
3498
- const uploadArray = upload.array(
3499
- "files",
3500
- parseInt(runtime.getSetting("KNOWLEDGE_MAX_FILES") || "10")
3501
- );
3502
- uploadArray(req, res, (err) => {
3503
- if (err) {
3504
- logger5.error("[KNOWLEDGE UPLOAD] Multer error:", err);
3505
- return sendError(res, 400, "UPLOAD_ERROR", err.message);
3506
- }
3507
- uploadKnowledgeHandler(req, res, runtime);
3896
+ async function handleKnowledgeUpload(req, res, runtime) {
3897
+ logger6.debug("[KNOWLEDGE UPLOAD] Starting upload handler");
3898
+ logger6.debug("[KNOWLEDGE UPLOAD] Request details:", {
3899
+ method: req.method,
3900
+ url: req.url,
3901
+ contentType: req.headers["content-type"],
3902
+ contentLength: req.headers["content-length"],
3903
+ hasFiles: req.files ? Object.keys(req.files).length : 0,
3904
+ hasBody: req.body ? Object.keys(req.body).length : 0
3508
3905
  });
3906
+ try {
3907
+ logger6.debug("[KNOWLEDGE UPLOAD] Using files parsed by global middleware");
3908
+ await uploadKnowledgeHandler(req, res, runtime);
3909
+ } catch (handlerError) {
3910
+ logger6.error("[KNOWLEDGE UPLOAD] Handler error:", handlerError);
3911
+ if (!res.headersSent) {
3912
+ sendError(res, 500, "HANDLER_ERROR", "Failed to process upload");
3913
+ }
3914
+ }
3509
3915
  }
3510
3916
  var knowledgeRoutes = [
3511
3917
  {
@@ -3523,7 +3929,7 @@ var knowledgeRoutes = [
3523
3929
  {
3524
3930
  type: "POST",
3525
3931
  path: "/documents",
3526
- handler: uploadKnowledgeWithMulter
3932
+ handler: handleKnowledgeUpload
3527
3933
  },
3528
3934
  {
3529
3935
  type: "GET",
@@ -3564,35 +3970,36 @@ var knowledgePlugin = {
3564
3970
  CTX_KNOWLEDGE_ENABLED: "false"
3565
3971
  },
3566
3972
  async init(config, runtime) {
3567
- logger6.info("Initializing Knowledge Plugin...");
3973
+ logger7.info("Initializing Knowledge Plugin...");
3568
3974
  try {
3569
- logger6.info("Validating model configuration for Knowledge plugin...");
3975
+ logger7.info("Validating model configuration for Knowledge plugin...");
3570
3976
  const validatedConfig = validateModelConfig(runtime);
3571
3977
  if (validatedConfig.CTX_KNOWLEDGE_ENABLED) {
3572
- logger6.info("Running in Contextual Knowledge mode with text generation capabilities.");
3573
- logger6.info(
3978
+ logger7.info("Running in Contextual Knowledge mode with text generation capabilities.");
3979
+ logger7.info(
3574
3980
  `Using ${validatedConfig.EMBEDDING_PROVIDER} for embeddings and ${validatedConfig.TEXT_PROVIDER} for text generation.`
3575
3981
  );
3576
3982
  } else {
3577
3983
  const usingPluginOpenAI = !process.env.EMBEDDING_PROVIDER;
3578
3984
  if (usingPluginOpenAI) {
3579
- logger6.info(
3985
+ logger7.info(
3580
3986
  "Running in Basic Embedding mode with auto-detected configuration from plugin-openai."
3581
3987
  );
3582
3988
  } else {
3583
- logger6.info(
3989
+ logger7.info(
3584
3990
  "Running in Basic Embedding mode (CTX_KNOWLEDGE_ENABLED=false). TEXT_PROVIDER and TEXT_MODEL not required."
3585
3991
  );
3586
3992
  }
3587
- logger6.info(
3993
+ logger7.info(
3588
3994
  `Using ${validatedConfig.EMBEDDING_PROVIDER} for embeddings with ${validatedConfig.TEXT_EMBEDDING_MODEL}.`
3589
3995
  );
3590
3996
  }
3591
- logger6.info("Model configuration validated successfully.");
3997
+ logger7.info("Model configuration validated successfully.");
3592
3998
  if (runtime) {
3593
- logger6.info(`Knowledge Plugin initialized for agent: ${runtime.agentId}`);
3594
- const loadDocsOnStartup = config.LOAD_DOCS_ON_STARTUP !== "false" && process.env.LOAD_DOCS_ON_STARTUP !== "false";
3999
+ logger7.info(`Knowledge Plugin initialized for agent: ${runtime.agentId}`);
4000
+ const loadDocsOnStartup = config.LOAD_DOCS_ON_STARTUP === "true" || process.env.LOAD_DOCS_ON_STARTUP === "true";
3595
4001
  if (loadDocsOnStartup) {
4002
+ logger7.info("LOAD_DOCS_ON_STARTUP is enabled. Scheduling document loading...");
3596
4003
  setTimeout(async () => {
3597
4004
  try {
3598
4005
  const service = runtime.getService(KnowledgeService.serviceType);
@@ -3600,20 +4007,22 @@ var knowledgePlugin = {
3600
4007
  const { loadDocsFromPath: loadDocsFromPath2 } = await import("./docs-loader-IBTEOAYT.js");
3601
4008
  const result = await loadDocsFromPath2(service, runtime.agentId);
3602
4009
  if (result.successful > 0) {
3603
- logger6.info(`Loaded ${result.successful} documents from docs folder on startup`);
4010
+ logger7.info(`Loaded ${result.successful} documents from docs folder on startup`);
3604
4011
  }
3605
4012
  }
3606
4013
  } catch (error) {
3607
- logger6.error("Error loading documents on startup:", error);
4014
+ logger7.error("Error loading documents on startup:", error);
3608
4015
  }
3609
4016
  }, 5e3);
4017
+ } else {
4018
+ logger7.info("LOAD_DOCS_ON_STARTUP is not enabled. Skipping automatic document loading.");
3610
4019
  }
3611
4020
  }
3612
- logger6.info(
4021
+ logger7.info(
3613
4022
  "Knowledge Plugin initialized. Frontend panel should be discoverable via its public route."
3614
4023
  );
3615
4024
  } catch (error) {
3616
- logger6.error("Failed to initialize Knowledge plugin:", error);
4025
+ logger7.error("Failed to initialize Knowledge plugin:", error);
3617
4026
  throw error;
3618
4027
  }
3619
4028
  },