powermem 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- powermem/__init__.py +103 -0
- powermem/agent/__init__.py +35 -0
- powermem/agent/abstract/__init__.py +22 -0
- powermem/agent/abstract/collaboration.py +259 -0
- powermem/agent/abstract/context.py +187 -0
- powermem/agent/abstract/manager.py +232 -0
- powermem/agent/abstract/permission.py +217 -0
- powermem/agent/abstract/privacy.py +267 -0
- powermem/agent/abstract/scope.py +199 -0
- powermem/agent/agent.py +791 -0
- powermem/agent/components/__init__.py +18 -0
- powermem/agent/components/collaboration_coordinator.py +645 -0
- powermem/agent/components/permission_controller.py +586 -0
- powermem/agent/components/privacy_protector.py +767 -0
- powermem/agent/components/scope_controller.py +685 -0
- powermem/agent/factories/__init__.py +16 -0
- powermem/agent/factories/agent_factory.py +266 -0
- powermem/agent/factories/config_factory.py +308 -0
- powermem/agent/factories/memory_factory.py +229 -0
- powermem/agent/implementations/__init__.py +16 -0
- powermem/agent/implementations/hybrid.py +728 -0
- powermem/agent/implementations/multi_agent.py +1040 -0
- powermem/agent/implementations/multi_user.py +1020 -0
- powermem/agent/types.py +53 -0
- powermem/agent/wrappers/__init__.py +14 -0
- powermem/agent/wrappers/agent_memory_wrapper.py +427 -0
- powermem/agent/wrappers/compatibility_wrapper.py +520 -0
- powermem/config_loader.py +318 -0
- powermem/configs.py +249 -0
- powermem/core/__init__.py +19 -0
- powermem/core/async_memory.py +1493 -0
- powermem/core/audit.py +258 -0
- powermem/core/base.py +165 -0
- powermem/core/memory.py +1567 -0
- powermem/core/setup.py +162 -0
- powermem/core/telemetry.py +215 -0
- powermem/integrations/__init__.py +17 -0
- powermem/integrations/embeddings/__init__.py +13 -0
- powermem/integrations/embeddings/aws_bedrock.py +100 -0
- powermem/integrations/embeddings/azure_openai.py +55 -0
- powermem/integrations/embeddings/base.py +31 -0
- powermem/integrations/embeddings/config/base.py +132 -0
- powermem/integrations/embeddings/configs.py +31 -0
- powermem/integrations/embeddings/factory.py +48 -0
- powermem/integrations/embeddings/gemini.py +39 -0
- powermem/integrations/embeddings/huggingface.py +41 -0
- powermem/integrations/embeddings/langchain.py +35 -0
- powermem/integrations/embeddings/lmstudio.py +29 -0
- powermem/integrations/embeddings/mock.py +11 -0
- powermem/integrations/embeddings/ollama.py +53 -0
- powermem/integrations/embeddings/openai.py +49 -0
- powermem/integrations/embeddings/qwen.py +102 -0
- powermem/integrations/embeddings/together.py +31 -0
- powermem/integrations/embeddings/vertexai.py +54 -0
- powermem/integrations/llm/__init__.py +18 -0
- powermem/integrations/llm/anthropic.py +87 -0
- powermem/integrations/llm/base.py +132 -0
- powermem/integrations/llm/config/anthropic.py +56 -0
- powermem/integrations/llm/config/azure.py +56 -0
- powermem/integrations/llm/config/base.py +62 -0
- powermem/integrations/llm/config/deepseek.py +56 -0
- powermem/integrations/llm/config/ollama.py +56 -0
- powermem/integrations/llm/config/openai.py +79 -0
- powermem/integrations/llm/config/qwen.py +68 -0
- powermem/integrations/llm/config/qwen_asr.py +46 -0
- powermem/integrations/llm/config/vllm.py +56 -0
- powermem/integrations/llm/configs.py +26 -0
- powermem/integrations/llm/deepseek.py +106 -0
- powermem/integrations/llm/factory.py +118 -0
- powermem/integrations/llm/gemini.py +201 -0
- powermem/integrations/llm/langchain.py +65 -0
- powermem/integrations/llm/ollama.py +106 -0
- powermem/integrations/llm/openai.py +166 -0
- powermem/integrations/llm/openai_structured.py +80 -0
- powermem/integrations/llm/qwen.py +207 -0
- powermem/integrations/llm/qwen_asr.py +171 -0
- powermem/integrations/llm/vllm.py +106 -0
- powermem/integrations/rerank/__init__.py +20 -0
- powermem/integrations/rerank/base.py +43 -0
- powermem/integrations/rerank/config/__init__.py +7 -0
- powermem/integrations/rerank/config/base.py +27 -0
- powermem/integrations/rerank/configs.py +23 -0
- powermem/integrations/rerank/factory.py +68 -0
- powermem/integrations/rerank/qwen.py +159 -0
- powermem/intelligence/__init__.py +17 -0
- powermem/intelligence/ebbinghaus_algorithm.py +354 -0
- powermem/intelligence/importance_evaluator.py +361 -0
- powermem/intelligence/intelligent_memory_manager.py +284 -0
- powermem/intelligence/manager.py +148 -0
- powermem/intelligence/plugin.py +229 -0
- powermem/prompts/__init__.py +29 -0
- powermem/prompts/graph/graph_prompts.py +217 -0
- powermem/prompts/graph/graph_tools_prompts.py +469 -0
- powermem/prompts/importance_evaluation.py +246 -0
- powermem/prompts/intelligent_memory_prompts.py +163 -0
- powermem/prompts/templates.py +193 -0
- powermem/storage/__init__.py +14 -0
- powermem/storage/adapter.py +896 -0
- powermem/storage/base.py +109 -0
- powermem/storage/config/base.py +13 -0
- powermem/storage/config/oceanbase.py +58 -0
- powermem/storage/config/pgvector.py +52 -0
- powermem/storage/config/sqlite.py +27 -0
- powermem/storage/configs.py +159 -0
- powermem/storage/factory.py +59 -0
- powermem/storage/migration_manager.py +438 -0
- powermem/storage/oceanbase/__init__.py +8 -0
- powermem/storage/oceanbase/constants.py +162 -0
- powermem/storage/oceanbase/oceanbase.py +1384 -0
- powermem/storage/oceanbase/oceanbase_graph.py +1441 -0
- powermem/storage/pgvector/__init__.py +7 -0
- powermem/storage/pgvector/pgvector.py +420 -0
- powermem/storage/sqlite/__init__.py +0 -0
- powermem/storage/sqlite/sqlite.py +218 -0
- powermem/storage/sqlite/sqlite_vector_store.py +311 -0
- powermem/utils/__init__.py +35 -0
- powermem/utils/utils.py +605 -0
- powermem/version.py +23 -0
- powermem-0.1.0.dist-info/METADATA +187 -0
- powermem-0.1.0.dist-info/RECORD +123 -0
- powermem-0.1.0.dist-info/WHEEL +5 -0
- powermem-0.1.0.dist-info/licenses/LICENSE +206 -0
- powermem-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,361 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Importance evaluator for memory content
|
|
3
|
+
|
|
4
|
+
This module evaluates the importance of memory content using LLM.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
import json
|
|
10
|
+
from ..prompts.importance_evaluation import ImportanceEvaluationPrompts
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ImportanceEvaluator:
|
|
16
|
+
"""
|
|
17
|
+
Evaluates the importance of memory content.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, config: Dict[str, Any], llm_config: Dict[str, Any]):
|
|
21
|
+
"""
|
|
22
|
+
Initialize importance evaluator.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
config: Importance evaluator configuration
|
|
26
|
+
llm_config: LLM configuration
|
|
27
|
+
"""
|
|
28
|
+
self.config = config
|
|
29
|
+
self.llm_config = llm_config
|
|
30
|
+
self.llm = None # Will be initialized by the parent manager
|
|
31
|
+
|
|
32
|
+
# Initialize prompts
|
|
33
|
+
self.prompts = ImportanceEvaluationPrompts(config)
|
|
34
|
+
|
|
35
|
+
# Importance criteria weights
|
|
36
|
+
self.criteria_weights = {
|
|
37
|
+
"relevance": 0.3,
|
|
38
|
+
"novelty": 0.2,
|
|
39
|
+
"emotional_impact": 0.15,
|
|
40
|
+
"actionable": 0.15,
|
|
41
|
+
"factual": 0.1,
|
|
42
|
+
"personal": 0.1
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
logger.info("ImportanceEvaluator initialized")
|
|
46
|
+
|
|
47
|
+
def set_llm(self, llm):
|
|
48
|
+
"""
|
|
49
|
+
Set the LLM instance for evaluation.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
llm: LLM instance
|
|
53
|
+
"""
|
|
54
|
+
self.llm = llm
|
|
55
|
+
logger.info("LLM instance set for importance evaluation")
|
|
56
|
+
|
|
57
|
+
def evaluate_importance(
|
|
58
|
+
self,
|
|
59
|
+
content: str,
|
|
60
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
61
|
+
context: Optional[Dict[str, Any]] = None
|
|
62
|
+
) -> float:
|
|
63
|
+
"""
|
|
64
|
+
Evaluate the importance of content.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
content: Content to evaluate
|
|
68
|
+
metadata: Additional metadata
|
|
69
|
+
context: Additional context
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Importance score between 0 and 1
|
|
73
|
+
"""
|
|
74
|
+
try:
|
|
75
|
+
# Use LLM-based evaluation if available, otherwise fall back to rule-based
|
|
76
|
+
if self.llm:
|
|
77
|
+
importance_score = self._llm_based_evaluation(content, metadata, context)
|
|
78
|
+
else:
|
|
79
|
+
importance_score = self._rule_based_evaluation(content, metadata, context)
|
|
80
|
+
|
|
81
|
+
logger.debug(f"Evaluated importance: {importance_score}")
|
|
82
|
+
|
|
83
|
+
return importance_score
|
|
84
|
+
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.error(f"Failed to evaluate importance: {e}")
|
|
87
|
+
return 0.5 # Default medium importance
|
|
88
|
+
|
|
89
|
+
def _rule_based_evaluation(
|
|
90
|
+
self,
|
|
91
|
+
content: str,
|
|
92
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
93
|
+
context: Optional[Dict[str, Any]] = None
|
|
94
|
+
) -> float:
|
|
95
|
+
"""
|
|
96
|
+
Rule-based importance evaluation.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
content: Content to evaluate
|
|
100
|
+
metadata: Additional metadata
|
|
101
|
+
context: Additional context
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Importance score between 0 and 1
|
|
105
|
+
"""
|
|
106
|
+
score = 0.0
|
|
107
|
+
|
|
108
|
+
# Length factor
|
|
109
|
+
if len(content) > 100:
|
|
110
|
+
score += 0.1
|
|
111
|
+
elif len(content) > 50:
|
|
112
|
+
score += 0.05
|
|
113
|
+
|
|
114
|
+
# Keyword importance
|
|
115
|
+
important_keywords = [
|
|
116
|
+
"important", "critical", "urgent", "remember", "note",
|
|
117
|
+
"preference", "like", "dislike", "hate", "love",
|
|
118
|
+
"password", "secret", "private", "confidential"
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
content_lower = content.lower()
|
|
122
|
+
for keyword in important_keywords:
|
|
123
|
+
if keyword in content_lower:
|
|
124
|
+
score += 0.1
|
|
125
|
+
|
|
126
|
+
# Question factor
|
|
127
|
+
if "?" in content:
|
|
128
|
+
score += 0.05
|
|
129
|
+
|
|
130
|
+
# Exclamation factor
|
|
131
|
+
if "!" in content:
|
|
132
|
+
score += 0.05
|
|
133
|
+
|
|
134
|
+
# Metadata factors
|
|
135
|
+
if metadata:
|
|
136
|
+
if metadata.get("priority") == "high":
|
|
137
|
+
score += 0.2
|
|
138
|
+
elif metadata.get("priority") == "medium":
|
|
139
|
+
score += 0.1
|
|
140
|
+
|
|
141
|
+
if metadata.get("tags"):
|
|
142
|
+
score += 0.05
|
|
143
|
+
|
|
144
|
+
# Context factors
|
|
145
|
+
if context:
|
|
146
|
+
if context.get("user_engagement") == "high":
|
|
147
|
+
score += 0.1
|
|
148
|
+
elif context.get("user_engagement") == "medium":
|
|
149
|
+
score += 0.05
|
|
150
|
+
|
|
151
|
+
# Cap the score at 1.0
|
|
152
|
+
return min(score, 1.0)
|
|
153
|
+
|
|
154
|
+
def _llm_based_evaluation(
|
|
155
|
+
self,
|
|
156
|
+
content: str,
|
|
157
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
158
|
+
context: Optional[Dict[str, Any]] = None
|
|
159
|
+
) -> float:
|
|
160
|
+
"""
|
|
161
|
+
LLM-based importance evaluation.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
content: Content to evaluate
|
|
165
|
+
metadata: Additional metadata
|
|
166
|
+
context: Additional context
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Importance score between 0 and 1
|
|
170
|
+
"""
|
|
171
|
+
if not self.llm:
|
|
172
|
+
logger.warning("LLM not initialized, falling back to rule-based evaluation")
|
|
173
|
+
return self._rule_based_evaluation(content, metadata, context)
|
|
174
|
+
|
|
175
|
+
try:
|
|
176
|
+
# Prepare evaluation prompt
|
|
177
|
+
prompt = self.prompts.get_importance_evaluation_prompt(content, metadata, context)
|
|
178
|
+
|
|
179
|
+
# Format prompt as messages for LLM
|
|
180
|
+
messages = [
|
|
181
|
+
{"role": "system", "content": self.prompts.get_system_prompt()},
|
|
182
|
+
{"role": "user", "content": prompt}
|
|
183
|
+
]
|
|
184
|
+
|
|
185
|
+
# Call LLM for evaluation
|
|
186
|
+
response = self.llm.generate_response(messages)
|
|
187
|
+
|
|
188
|
+
# Parse the response to extract importance score
|
|
189
|
+
importance_score = self._parse_importance_response(response)
|
|
190
|
+
|
|
191
|
+
logger.debug(f"LLM evaluated importance: {importance_score}")
|
|
192
|
+
|
|
193
|
+
return importance_score
|
|
194
|
+
|
|
195
|
+
except Exception as e:
|
|
196
|
+
logger.error(f"LLM-based evaluation failed: {e}, falling back to rule-based")
|
|
197
|
+
return self._rule_based_evaluation(content, metadata, context)
|
|
198
|
+
|
|
199
|
+
def get_importance_breakdown(
|
|
200
|
+
self,
|
|
201
|
+
content: str,
|
|
202
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
203
|
+
context: Optional[Dict[str, Any]] = None
|
|
204
|
+
) -> Dict[str, float]:
|
|
205
|
+
"""
|
|
206
|
+
Get detailed importance breakdown.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
content: Content to evaluate
|
|
210
|
+
metadata: Additional metadata
|
|
211
|
+
context: Additional context
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
Dictionary with importance breakdown
|
|
215
|
+
"""
|
|
216
|
+
breakdown = {}
|
|
217
|
+
|
|
218
|
+
for criterion, weight in self.criteria_weights.items():
|
|
219
|
+
# Calculate score for each criterion
|
|
220
|
+
if criterion == "relevance":
|
|
221
|
+
breakdown[criterion] = self._evaluate_relevance(content, context)
|
|
222
|
+
elif criterion == "novelty":
|
|
223
|
+
breakdown[criterion] = self._evaluate_novelty(content, metadata)
|
|
224
|
+
elif criterion == "emotional_impact":
|
|
225
|
+
breakdown[criterion] = self._evaluate_emotional_impact(content)
|
|
226
|
+
elif criterion == "actionable":
|
|
227
|
+
breakdown[criterion] = self._evaluate_actionable(content)
|
|
228
|
+
elif criterion == "factual":
|
|
229
|
+
breakdown[criterion] = self._evaluate_factual(content)
|
|
230
|
+
elif criterion == "personal":
|
|
231
|
+
breakdown[criterion] = self._evaluate_personal(content, metadata)
|
|
232
|
+
|
|
233
|
+
return breakdown
|
|
234
|
+
|
|
235
|
+
def _evaluate_relevance(self, content: str, context: Optional[Dict[str, Any]]) -> float:
|
|
236
|
+
"""Evaluate relevance of content."""
|
|
237
|
+
# Simple keyword-based relevance
|
|
238
|
+
relevance_keywords = ["relevant", "related", "connected", "associated"]
|
|
239
|
+
content_lower = content.lower()
|
|
240
|
+
|
|
241
|
+
score = 0.0
|
|
242
|
+
for keyword in relevance_keywords:
|
|
243
|
+
if keyword in content_lower:
|
|
244
|
+
score += 0.25
|
|
245
|
+
|
|
246
|
+
return min(score, 1.0)
|
|
247
|
+
|
|
248
|
+
def _evaluate_novelty(self, content: str, metadata: Optional[Dict[str, Any]]) -> float:
|
|
249
|
+
"""Evaluate novelty of content."""
|
|
250
|
+
# Check for new information indicators
|
|
251
|
+
novelty_indicators = ["new", "first", "never", "unprecedented", "unique"]
|
|
252
|
+
content_lower = content.lower()
|
|
253
|
+
|
|
254
|
+
score = 0.0
|
|
255
|
+
for indicator in novelty_indicators:
|
|
256
|
+
if indicator in content_lower:
|
|
257
|
+
score += 0.2
|
|
258
|
+
|
|
259
|
+
return min(score, 1.0)
|
|
260
|
+
|
|
261
|
+
def _evaluate_emotional_impact(self, content: str) -> float:
|
|
262
|
+
"""Evaluate emotional impact of content."""
|
|
263
|
+
# Check for emotional words
|
|
264
|
+
emotional_words = [
|
|
265
|
+
"happy", "sad", "angry", "excited", "worried", "scared",
|
|
266
|
+
"love", "hate", "fear", "joy", "sorrow", "anger"
|
|
267
|
+
]
|
|
268
|
+
content_lower = content.lower()
|
|
269
|
+
|
|
270
|
+
score = 0.0
|
|
271
|
+
for word in emotional_words:
|
|
272
|
+
if word in content_lower:
|
|
273
|
+
score += 0.1
|
|
274
|
+
|
|
275
|
+
return min(score, 1.0)
|
|
276
|
+
|
|
277
|
+
def _evaluate_actionable(self, content: str) -> float:
|
|
278
|
+
"""Evaluate if content is actionable."""
|
|
279
|
+
# Check for action words
|
|
280
|
+
action_words = [
|
|
281
|
+
"do", "make", "create", "build", "fix", "solve",
|
|
282
|
+
"implement", "execute", "perform", "complete"
|
|
283
|
+
]
|
|
284
|
+
content_lower = content.lower()
|
|
285
|
+
|
|
286
|
+
score = 0.0
|
|
287
|
+
for word in action_words:
|
|
288
|
+
if word in content_lower:
|
|
289
|
+
score += 0.1
|
|
290
|
+
|
|
291
|
+
return min(score, 1.0)
|
|
292
|
+
|
|
293
|
+
def _evaluate_factual(self, content: str) -> float:
|
|
294
|
+
"""Evaluate if content contains factual information."""
|
|
295
|
+
# Check for factual indicators
|
|
296
|
+
factual_indicators = [
|
|
297
|
+
"fact", "data", "statistic", "research", "study",
|
|
298
|
+
"evidence", "proof", "confirmed", "verified"
|
|
299
|
+
]
|
|
300
|
+
content_lower = content.lower()
|
|
301
|
+
|
|
302
|
+
score = 0.0
|
|
303
|
+
for indicator in factual_indicators:
|
|
304
|
+
if indicator in content_lower:
|
|
305
|
+
score += 0.15
|
|
306
|
+
|
|
307
|
+
return min(score, 1.0)
|
|
308
|
+
|
|
309
|
+
def _parse_importance_response(self, response: str) -> float:
|
|
310
|
+
"""
|
|
311
|
+
Parse LLM response to extract importance score.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
response: LLM response string
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
Importance score between 0 and 1
|
|
318
|
+
"""
|
|
319
|
+
try:
|
|
320
|
+
# Try to extract JSON from response
|
|
321
|
+
if "{" in response and "}" in response:
|
|
322
|
+
start = response.find("{")
|
|
323
|
+
end = response.rfind("}") + 1
|
|
324
|
+
json_str = response[start:end]
|
|
325
|
+
|
|
326
|
+
result = json.loads(json_str)
|
|
327
|
+
|
|
328
|
+
if "importance_score" in result:
|
|
329
|
+
score = float(result["importance_score"])
|
|
330
|
+
# Ensure score is within valid range
|
|
331
|
+
return max(0.0, min(1.0, score))
|
|
332
|
+
|
|
333
|
+
# Fallback: try to extract number from response
|
|
334
|
+
import re
|
|
335
|
+
numbers = re.findall(r'\d+\.?\d*', response)
|
|
336
|
+
if numbers:
|
|
337
|
+
score = float(numbers[0])
|
|
338
|
+
return max(0.0, min(1.0, score))
|
|
339
|
+
|
|
340
|
+
logger.warning(f"Could not parse importance score from response: {response}")
|
|
341
|
+
return 0.5 # Default medium importance
|
|
342
|
+
|
|
343
|
+
except Exception as e:
|
|
344
|
+
logger.error(f"Failed to parse importance response: {e}")
|
|
345
|
+
return 0.5 # Default medium importance
|
|
346
|
+
|
|
347
|
+
def _evaluate_personal(self, content: str, metadata: Optional[Dict[str, Any]]) -> float:
|
|
348
|
+
"""Evaluate if content is personal."""
|
|
349
|
+
# Check for personal indicators
|
|
350
|
+
personal_indicators = [
|
|
351
|
+
"i", "me", "my", "mine", "myself",
|
|
352
|
+
"personal", "private", "confidential"
|
|
353
|
+
]
|
|
354
|
+
content_lower = content.lower()
|
|
355
|
+
|
|
356
|
+
score = 0.0
|
|
357
|
+
for indicator in personal_indicators:
|
|
358
|
+
if indicator in content_lower:
|
|
359
|
+
score += 0.1
|
|
360
|
+
|
|
361
|
+
return min(score, 1.0)
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Intelligent memory manager
|
|
3
|
+
|
|
4
|
+
This module implements the main intelligent memory management system.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
|
|
11
|
+
from .importance_evaluator import ImportanceEvaluator
|
|
12
|
+
from .ebbinghaus_algorithm import EbbinghausAlgorithm
|
|
13
|
+
from powermem.integrations.llm.factory import LLMFactory
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class IntelligentMemoryManager:
|
|
19
|
+
"""
|
|
20
|
+
Intelligent Memory Manager
|
|
21
|
+
|
|
22
|
+
Implements complete memory management process:
|
|
23
|
+
1. New information input -> Working memory
|
|
24
|
+
2. Importance evaluation -> Determine storage type
|
|
25
|
+
3. Decay and reinforcement based on Ebbinghaus curve
|
|
26
|
+
4. Automatic cleanup and optimization
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
|
30
|
+
"""
|
|
31
|
+
Initialize intelligent memory manager.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
config: Configuration dictionary
|
|
35
|
+
"""
|
|
36
|
+
self.config = config or {}
|
|
37
|
+
self.intelligent_config = self.config.get("intelligent_memory", {})
|
|
38
|
+
|
|
39
|
+
# Merge top-level custom_importance_evaluation_prompt into intelligent_config
|
|
40
|
+
# so it can be passed to ImportanceEvaluator
|
|
41
|
+
if "custom_importance_evaluation_prompt" in self.config:
|
|
42
|
+
self.intelligent_config = self.intelligent_config.copy() if isinstance(self.intelligent_config, dict) else {}
|
|
43
|
+
self.intelligent_config["custom_importance_evaluation_prompt"] = self.config["custom_importance_evaluation_prompt"]
|
|
44
|
+
|
|
45
|
+
# Initialize components
|
|
46
|
+
self.importance_evaluator = ImportanceEvaluator(
|
|
47
|
+
self.intelligent_config,
|
|
48
|
+
self.config.get("llm", {})
|
|
49
|
+
)
|
|
50
|
+
self.ebbinghaus_algorithm = EbbinghausAlgorithm(self.intelligent_config)
|
|
51
|
+
|
|
52
|
+
# Initialize LLM for importance evaluation
|
|
53
|
+
self._initialize_llm()
|
|
54
|
+
|
|
55
|
+
# Memory storage
|
|
56
|
+
self.working_memories: Dict[str, Dict] = {}
|
|
57
|
+
self.short_term_memories: Dict[str, Dict] = {}
|
|
58
|
+
self.long_term_memories: Dict[str, Dict] = {}
|
|
59
|
+
|
|
60
|
+
logger.info("IntelligentMemoryManager initialized")
|
|
61
|
+
|
|
62
|
+
def _initialize_llm(self):
|
|
63
|
+
"""
|
|
64
|
+
Initialize LLM for importance evaluation.
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
llm_config = self.config.get("llm", {})
|
|
68
|
+
if llm_config:
|
|
69
|
+
llm_provider = llm_config.get("provider", "openai")
|
|
70
|
+
llm_instance = LLMFactory.create(llm_provider, llm_config.get("config", {}))
|
|
71
|
+
self.importance_evaluator.set_llm(llm_instance)
|
|
72
|
+
logger.info(f"LLM initialized for importance evaluation: {llm_provider}")
|
|
73
|
+
else:
|
|
74
|
+
logger.warning("No LLM configuration found, using rule-based evaluation only")
|
|
75
|
+
except Exception as e:
|
|
76
|
+
logger.error(f"Failed to initialize LLM for importance evaluation: {e}")
|
|
77
|
+
logger.warning("Falling back to rule-based evaluation only")
|
|
78
|
+
|
|
79
|
+
def process_metadata(
|
|
80
|
+
self,
|
|
81
|
+
content: str,
|
|
82
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
83
|
+
context: Optional[Dict[str, Any]] = None
|
|
84
|
+
) -> Dict[str, Any]:
|
|
85
|
+
"""
|
|
86
|
+
Process metadata with intelligent memory management.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
content: Content to analyze
|
|
90
|
+
metadata: Additional metadata
|
|
91
|
+
context: Additional context
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Enhanced metadata with intelligence analysis
|
|
95
|
+
"""
|
|
96
|
+
try:
|
|
97
|
+
# Initialize metadata if None
|
|
98
|
+
if metadata is None:
|
|
99
|
+
metadata = {}
|
|
100
|
+
|
|
101
|
+
# Evaluate importance
|
|
102
|
+
importance_score = self.importance_evaluator.evaluate_importance(
|
|
103
|
+
content, metadata, context
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Determine memory type based on importance
|
|
107
|
+
if importance_score >= 0.8:
|
|
108
|
+
memory_type = "long_term"
|
|
109
|
+
elif importance_score >= 0.5:
|
|
110
|
+
memory_type = "short_term"
|
|
111
|
+
else:
|
|
112
|
+
memory_type = "working"
|
|
113
|
+
|
|
114
|
+
# Process with Ebbinghaus algorithm to get intelligence metadata
|
|
115
|
+
intelligence_metadata = self.ebbinghaus_algorithm.process_memory_metadata(
|
|
116
|
+
content, importance_score, memory_type
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Merge intelligence metadata into existing metadata
|
|
120
|
+
enhanced_metadata = metadata.copy()
|
|
121
|
+
enhanced_metadata.update(intelligence_metadata)
|
|
122
|
+
|
|
123
|
+
logger.debug(f"Processed metadata with importance: {importance_score}, type: {memory_type}")
|
|
124
|
+
|
|
125
|
+
return enhanced_metadata
|
|
126
|
+
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.error(f"Failed to process metadata: {e}")
|
|
129
|
+
return metadata or {}
|
|
130
|
+
|
|
131
|
+
async def process_metadata_async(
|
|
132
|
+
self,
|
|
133
|
+
content: str,
|
|
134
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
135
|
+
context: Optional[Dict[str, Any]] = None
|
|
136
|
+
) -> Dict[str, Any]:
|
|
137
|
+
"""
|
|
138
|
+
Process metadata with intelligent memory management asynchronously.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
content: Content to analyze
|
|
142
|
+
metadata: Additional metadata
|
|
143
|
+
context: Additional context
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
Enhanced metadata with intelligence analysis
|
|
147
|
+
"""
|
|
148
|
+
# For now, just call the sync version
|
|
149
|
+
# In a real implementation, this would use async LLM calls
|
|
150
|
+
return self.process_metadata(content, metadata, context)
|
|
151
|
+
|
|
152
|
+
def process_search_results(
|
|
153
|
+
self,
|
|
154
|
+
results: List[Dict[str, Any]],
|
|
155
|
+
query: str
|
|
156
|
+
) -> List[Dict[str, Any]]:
|
|
157
|
+
"""
|
|
158
|
+
Process search results with intelligent ranking.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
results: Search results
|
|
162
|
+
query: Original query
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Processed and ranked results
|
|
166
|
+
"""
|
|
167
|
+
try:
|
|
168
|
+
# Apply Ebbinghaus decay to results
|
|
169
|
+
processed_results = []
|
|
170
|
+
for result in results:
|
|
171
|
+
# Calculate relevance score
|
|
172
|
+
relevance_score = self.ebbinghaus_algorithm.calculate_relevance(
|
|
173
|
+
result, query
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Apply decay based on age
|
|
177
|
+
decay_factor = self.ebbinghaus_algorithm.calculate_decay(
|
|
178
|
+
result.get("created_at", datetime.utcnow())
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
# Update result with processed information
|
|
182
|
+
processed_result = result.copy()
|
|
183
|
+
processed_result["relevance_score"] = relevance_score
|
|
184
|
+
processed_result["decay_factor"] = decay_factor
|
|
185
|
+
processed_result["final_score"] = relevance_score * decay_factor
|
|
186
|
+
|
|
187
|
+
processed_results.append(processed_result)
|
|
188
|
+
|
|
189
|
+
# Sort by final score
|
|
190
|
+
processed_results.sort(key=lambda x: x["final_score"], reverse=True)
|
|
191
|
+
|
|
192
|
+
logger.debug(f"Processed {len(processed_results)} search results")
|
|
193
|
+
|
|
194
|
+
return processed_results
|
|
195
|
+
|
|
196
|
+
except Exception as e:
|
|
197
|
+
logger.error(f"Failed to process search results: {e}")
|
|
198
|
+
return results
|
|
199
|
+
|
|
200
|
+
async def process_search_results_async(
|
|
201
|
+
self,
|
|
202
|
+
results: List[Dict[str, Any]],
|
|
203
|
+
query: str
|
|
204
|
+
) -> List[Dict[str, Any]]:
|
|
205
|
+
"""
|
|
206
|
+
Process search results with intelligent ranking asynchronously.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
results: Search results
|
|
210
|
+
query: Original query
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
Processed and ranked results
|
|
214
|
+
"""
|
|
215
|
+
# For now, just call the sync version
|
|
216
|
+
return self.process_search_results(results, query)
|
|
217
|
+
|
|
218
|
+
def optimize_memories(self) -> Dict[str, Any]:
|
|
219
|
+
"""
|
|
220
|
+
Optimize memory storage based on usage patterns.
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
Optimization results
|
|
224
|
+
"""
|
|
225
|
+
try:
|
|
226
|
+
optimization_results = {
|
|
227
|
+
"working_to_short": 0,
|
|
228
|
+
"short_to_long": 0,
|
|
229
|
+
"long_to_archive": 0,
|
|
230
|
+
"deleted": 0
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
# Process working memories
|
|
234
|
+
for memory_id, memory in list(self.working_memories.items()):
|
|
235
|
+
if self.ebbinghaus_algorithm.should_promote(memory):
|
|
236
|
+
# Promote to short-term
|
|
237
|
+
self.short_term_memories[memory_id] = memory
|
|
238
|
+
del self.working_memories[memory_id]
|
|
239
|
+
optimization_results["working_to_short"] += 1
|
|
240
|
+
|
|
241
|
+
# Process short-term memories
|
|
242
|
+
for memory_id, memory in list(self.short_term_memories.items()):
|
|
243
|
+
if self.ebbinghaus_algorithm.should_promote(memory):
|
|
244
|
+
# Promote to long-term
|
|
245
|
+
self.long_term_memories[memory_id] = memory
|
|
246
|
+
del self.short_term_memories[memory_id]
|
|
247
|
+
optimization_results["short_to_long"] += 1
|
|
248
|
+
elif self.ebbinghaus_algorithm.should_forget(memory):
|
|
249
|
+
# Delete forgotten memory
|
|
250
|
+
del self.short_term_memories[memory_id]
|
|
251
|
+
optimization_results["deleted"] += 1
|
|
252
|
+
|
|
253
|
+
# Process long-term memories
|
|
254
|
+
for memory_id, memory in list(self.long_term_memories.items()):
|
|
255
|
+
if self.ebbinghaus_algorithm.should_archive(memory):
|
|
256
|
+
# Archive old memory
|
|
257
|
+
del self.long_term_memories[memory_id]
|
|
258
|
+
optimization_results["long_to_archive"] += 1
|
|
259
|
+
|
|
260
|
+
logger.info(f"Memory optimization completed: {optimization_results}")
|
|
261
|
+
|
|
262
|
+
return optimization_results
|
|
263
|
+
|
|
264
|
+
except Exception as e:
|
|
265
|
+
logger.error(f"Failed to optimize memories: {e}")
|
|
266
|
+
return {}
|
|
267
|
+
|
|
268
|
+
def get_memory_stats(self) -> Dict[str, Any]:
|
|
269
|
+
"""
|
|
270
|
+
Get memory statistics.
|
|
271
|
+
|
|
272
|
+
Returns:
|
|
273
|
+
Memory statistics
|
|
274
|
+
"""
|
|
275
|
+
return {
|
|
276
|
+
"working_memories": len(self.working_memories),
|
|
277
|
+
"short_term_memories": len(self.short_term_memories),
|
|
278
|
+
"long_term_memories": len(self.long_term_memories),
|
|
279
|
+
"total_memories": (
|
|
280
|
+
len(self.working_memories) +
|
|
281
|
+
len(self.short_term_memories) +
|
|
282
|
+
len(self.long_term_memories)
|
|
283
|
+
)
|
|
284
|
+
}
|