AbstractMemory 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractmemory/__init__.py +744 -31
- abstractmemory/cognitive/__init__.py +1 -0
- abstractmemory/components/__init__.py +1 -0
- abstractmemory/components/core.py +112 -0
- abstractmemory/components/episodic.py +68 -0
- abstractmemory/components/semantic.py +102 -0
- abstractmemory/components/working.py +50 -0
- abstractmemory/core/__init__.py +1 -0
- abstractmemory/core/interfaces.py +95 -0
- abstractmemory/core/temporal.py +100 -0
- abstractmemory/graph/__init__.py +1 -0
- abstractmemory/graph/knowledge_graph.py +178 -0
- abstractmemory/simple.py +151 -0
- abstractmemory/storage/__init__.py +16 -0
- abstractmemory/storage/dual_manager.py +278 -0
- abstractmemory/storage/lancedb_storage.py +425 -0
- abstractmemory/storage/markdown_storage.py +447 -0
- abstractmemory-0.1.0.dist-info/METADATA +331 -0
- abstractmemory-0.1.0.dist-info/RECORD +22 -0
- {abstractmemory-0.0.1.dist-info → abstractmemory-0.1.0.dist-info}/licenses/LICENSE +4 -1
- abstractmemory-0.0.1.dist-info/METADATA +0 -94
- abstractmemory-0.0.1.dist-info/RECORD +0 -6
- {abstractmemory-0.0.1.dist-info → abstractmemory-0.1.0.dist-info}/WHEEL +0 -0
- {abstractmemory-0.0.1.dist-info → abstractmemory-0.1.0.dist-info}/top_level.txt +0 -0
abstractmemory/__init__.py
CHANGED
|
@@ -1,41 +1,754 @@
|
|
|
1
1
|
"""
|
|
2
|
-
AbstractMemory -
|
|
2
|
+
AbstractMemory - Two-tier memory strategy for different agent types.
|
|
3
3
|
|
|
4
|
-
|
|
4
|
+
Simple agents use ScratchpadMemory or BufferMemory.
|
|
5
|
+
Complex agents use full GroundedMemory.
|
|
6
|
+
"""
|
|
5
7
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
+
from typing import Dict, List, Optional, Any, Union, Literal
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
import uuid
|
|
8
11
|
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
from .simple import ScratchpadMemory, BufferMemory
|
|
13
|
+
from .core.interfaces import MemoryItem
|
|
14
|
+
from .core.temporal import RelationalContext
|
|
15
|
+
from .components.core import CoreMemory
|
|
16
|
+
from .components.working import WorkingMemory
|
|
17
|
+
from .components.semantic import SemanticMemory
|
|
18
|
+
from .components.episodic import EpisodicMemory
|
|
19
|
+
from .graph.knowledge_graph import TemporalKnowledgeGraph
|
|
14
20
|
|
|
15
|
-
WARNING: This is a placeholder. Do not use in production.
|
|
16
|
-
"""
|
|
17
21
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
22
|
+
def create_memory(
|
|
23
|
+
memory_type: Literal["scratchpad", "buffer", "grounded"] = "scratchpad",
|
|
24
|
+
**kwargs
|
|
25
|
+
) -> Union[ScratchpadMemory, BufferMemory, 'GroundedMemory']:
|
|
26
|
+
"""
|
|
27
|
+
Factory function to create appropriate memory for agent type.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
memory_type: Type of memory to create
|
|
31
|
+
- "scratchpad": For ReAct agents and task tools
|
|
32
|
+
- "buffer": For simple chatbots
|
|
33
|
+
- "grounded": For autonomous agents (multi-dimensional memory)
|
|
34
|
+
|
|
35
|
+
For grounded memory with storage:
|
|
36
|
+
storage_backend: "markdown", "lancedb", "dual", or None
|
|
37
|
+
storage_path: Path for markdown storage
|
|
38
|
+
storage_uri: URI for LanceDB storage
|
|
39
|
+
embedding_provider: AbstractCore instance for embeddings
|
|
40
|
+
|
|
41
|
+
Examples:
|
|
42
|
+
# For a ReAct agent
|
|
43
|
+
memory = create_memory("scratchpad", max_entries=50)
|
|
44
|
+
|
|
45
|
+
# For a simple chatbot
|
|
46
|
+
memory = create_memory("buffer", max_messages=100)
|
|
47
|
+
|
|
48
|
+
# For an autonomous assistant with user tracking
|
|
49
|
+
memory = create_memory("grounded", working_capacity=10, enable_kg=True)
|
|
50
|
+
memory.set_current_user("alice", relationship="owner")
|
|
21
51
|
|
|
22
|
-
#
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
52
|
+
# With markdown storage (observable AI memory)
|
|
53
|
+
memory = create_memory("grounded",
|
|
54
|
+
storage_backend="markdown",
|
|
55
|
+
storage_path="./memory"
|
|
56
|
+
)
|
|
26
57
|
|
|
27
|
-
|
|
58
|
+
# With LanceDB storage (SQL + vector search)
|
|
59
|
+
from abstractllm import create_llm
|
|
60
|
+
provider = create_llm("openai")
|
|
61
|
+
memory = create_memory("grounded",
|
|
62
|
+
storage_backend="lancedb",
|
|
63
|
+
storage_uri="./lance.db",
|
|
64
|
+
embedding_provider=provider
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# With dual storage (both markdown and LanceDB)
|
|
68
|
+
memory = create_memory("grounded",
|
|
69
|
+
storage_backend="dual",
|
|
70
|
+
storage_path="./memory",
|
|
71
|
+
storage_uri="./lance.db",
|
|
72
|
+
embedding_provider=provider
|
|
73
|
+
)
|
|
28
74
|
"""
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
75
|
+
if memory_type == "scratchpad":
|
|
76
|
+
return ScratchpadMemory(**kwargs)
|
|
77
|
+
elif memory_type == "buffer":
|
|
78
|
+
return BufferMemory(**kwargs)
|
|
79
|
+
elif memory_type == "grounded":
|
|
80
|
+
return GroundedMemory(**kwargs)
|
|
81
|
+
else:
|
|
82
|
+
raise ValueError(f"Unknown memory type: {memory_type}")
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class GroundedMemory:
|
|
33
86
|
"""
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
)
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
87
|
+
Multi-dimensionally grounded memory for autonomous agents.
|
|
88
|
+
Grounds memory in WHO (relational), WHEN (temporal), and WHERE (spatial).
|
|
89
|
+
|
|
90
|
+
Memory Architecture:
|
|
91
|
+
- Core: Agent identity and persona (rarely changes)
|
|
92
|
+
- Semantic: Validated facts and concepts (requires recurrence)
|
|
93
|
+
- Working: Current context (transient)
|
|
94
|
+
- Episodic: Event archive (long-term)
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(self,
|
|
98
|
+
working_capacity: int = 10,
|
|
99
|
+
enable_kg: bool = True,
|
|
100
|
+
storage_backend: Optional[str] = None,
|
|
101
|
+
storage_path: Optional[str] = None,
|
|
102
|
+
storage_uri: Optional[str] = None,
|
|
103
|
+
embedding_provider: Optional[Any] = None,
|
|
104
|
+
default_user_id: str = "default",
|
|
105
|
+
semantic_threshold: int = 3):
|
|
106
|
+
"""Initialize grounded memory system"""
|
|
107
|
+
|
|
108
|
+
# Initialize memory components (Four-tier architecture)
|
|
109
|
+
self.core = CoreMemory() # Agent identity (rarely updated)
|
|
110
|
+
self.semantic = SemanticMemory(validation_threshold=semantic_threshold) # Validated facts
|
|
111
|
+
self.working = WorkingMemory(capacity=working_capacity) # Transient context
|
|
112
|
+
self.episodic = EpisodicMemory() # Event archive
|
|
113
|
+
|
|
114
|
+
# Initialize knowledge graph if enabled
|
|
115
|
+
self.kg = TemporalKnowledgeGraph() if enable_kg else None
|
|
116
|
+
|
|
117
|
+
# Relational tracking
|
|
118
|
+
self.current_user = default_user_id
|
|
119
|
+
self.user_profiles: Dict[str, Dict] = {} # User-specific profiles
|
|
120
|
+
self.user_memories: Dict[str, List] = {} # User-specific memory indices
|
|
121
|
+
|
|
122
|
+
# Learning tracking
|
|
123
|
+
self.failure_patterns: Dict[str, int] = {} # Track repeated failures
|
|
124
|
+
self.success_patterns: Dict[str, int] = {} # Track successful patterns
|
|
125
|
+
|
|
126
|
+
# Core memory update tracking
|
|
127
|
+
self.core_update_candidates: Dict[str, int] = {} # Track potential core updates
|
|
128
|
+
self.core_update_threshold = 5 # Require 5 occurrences before core update
|
|
129
|
+
|
|
130
|
+
# Initialize new storage manager
|
|
131
|
+
self.storage_manager = self._init_storage_manager(
|
|
132
|
+
storage_backend, storage_path, storage_uri, embedding_provider
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Legacy storage backend for compatibility
|
|
136
|
+
self.storage = self._init_storage(storage_backend)
|
|
137
|
+
|
|
138
|
+
def set_current_user(self, user_id: str, relationship: Optional[str] = None):
|
|
139
|
+
"""Set the current user for relational context"""
|
|
140
|
+
self.current_user = user_id
|
|
141
|
+
|
|
142
|
+
# Initialize user profile if new
|
|
143
|
+
if user_id not in self.user_profiles:
|
|
144
|
+
self.user_profiles[user_id] = {
|
|
145
|
+
"first_seen": datetime.now(),
|
|
146
|
+
"relationship": relationship or "unknown",
|
|
147
|
+
"interaction_count": 0,
|
|
148
|
+
"preferences": {},
|
|
149
|
+
"facts": []
|
|
150
|
+
}
|
|
151
|
+
self.user_memories[user_id] = []
|
|
152
|
+
|
|
153
|
+
def add_interaction(self, user_input: str, agent_response: str,
|
|
154
|
+
user_id: Optional[str] = None):
|
|
155
|
+
"""Add user-agent interaction with relational grounding"""
|
|
156
|
+
now = datetime.now()
|
|
157
|
+
user_id = user_id or self.current_user
|
|
158
|
+
|
|
159
|
+
# Create relational context
|
|
160
|
+
relational = RelationalContext(
|
|
161
|
+
user_id=user_id,
|
|
162
|
+
agent_id="main",
|
|
163
|
+
relationship=self.user_profiles.get(user_id, {}).get("relationship"),
|
|
164
|
+
session_id=str(uuid.uuid4())[:8]
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Add to working memory with relational context
|
|
168
|
+
user_item = MemoryItem(
|
|
169
|
+
content={
|
|
170
|
+
'role': 'user',
|
|
171
|
+
'text': user_input,
|
|
172
|
+
'user_id': user_id # Track who said it
|
|
173
|
+
},
|
|
174
|
+
event_time=now,
|
|
175
|
+
ingestion_time=now,
|
|
176
|
+
metadata={'relational': relational.__dict__}
|
|
177
|
+
)
|
|
178
|
+
item_id = self.working.add(user_item)
|
|
179
|
+
|
|
180
|
+
# Track in user-specific memory index
|
|
181
|
+
if user_id in self.user_memories:
|
|
182
|
+
self.user_memories[user_id].append(item_id)
|
|
183
|
+
|
|
184
|
+
# Update user profile
|
|
185
|
+
if user_id in self.user_profiles:
|
|
186
|
+
self.user_profiles[user_id]["interaction_count"] += 1
|
|
187
|
+
|
|
188
|
+
# Add to episodic memory with full context
|
|
189
|
+
episode = MemoryItem(
|
|
190
|
+
content={
|
|
191
|
+
'interaction': {
|
|
192
|
+
'user': user_input,
|
|
193
|
+
'agent': agent_response,
|
|
194
|
+
'user_id': user_id
|
|
195
|
+
}
|
|
196
|
+
},
|
|
197
|
+
event_time=now,
|
|
198
|
+
ingestion_time=now,
|
|
199
|
+
metadata={'relational': relational.__dict__}
|
|
200
|
+
)
|
|
201
|
+
self.episodic.add(episode)
|
|
202
|
+
|
|
203
|
+
# Extract facts if KG enabled
|
|
204
|
+
if self.kg:
|
|
205
|
+
self._extract_facts_to_kg(agent_response, now)
|
|
206
|
+
|
|
207
|
+
# Save interaction and generate experiential note if storage enabled
|
|
208
|
+
if hasattr(self, 'storage_manager') and self.storage_manager and self.storage_manager.is_enabled():
|
|
209
|
+
# Extract topic for the interaction
|
|
210
|
+
topic = self._extract_topic(user_input, agent_response)
|
|
211
|
+
|
|
212
|
+
# Save verbatim interaction
|
|
213
|
+
interaction_id = self.storage_manager.save_interaction(
|
|
214
|
+
user_id=user_id,
|
|
215
|
+
timestamp=now,
|
|
216
|
+
user_input=user_input,
|
|
217
|
+
agent_response=agent_response,
|
|
218
|
+
topic=topic,
|
|
219
|
+
metadata={
|
|
220
|
+
'relational': relational.__dict__,
|
|
221
|
+
'session_id': relational.session_id,
|
|
222
|
+
'confidence': episode.confidence
|
|
223
|
+
}
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
# Generate experiential note if conditions met
|
|
227
|
+
if self._should_reflect(user_input, agent_response, user_id):
|
|
228
|
+
reflection = self._generate_reflection(user_input, agent_response, user_id, relational)
|
|
229
|
+
if reflection:
|
|
230
|
+
note_id = self.storage_manager.save_experiential_note(
|
|
231
|
+
timestamp=now,
|
|
232
|
+
reflection=reflection,
|
|
233
|
+
interaction_id=interaction_id or f"int_{now.timestamp()}",
|
|
234
|
+
note_type="interaction_reflection",
|
|
235
|
+
metadata={
|
|
236
|
+
'user_id': user_id,
|
|
237
|
+
'trigger': 'interaction',
|
|
238
|
+
'confidence_change': self._calculate_confidence_change(user_input, agent_response)
|
|
239
|
+
}
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
# Create bidirectional link
|
|
243
|
+
if interaction_id and note_id:
|
|
244
|
+
self.storage_manager.link_interaction_to_note(interaction_id, note_id)
|
|
245
|
+
|
|
246
|
+
def _extract_facts_to_kg(self, text: str, event_time: datetime):
|
|
247
|
+
"""Extract facts from text and add to KG"""
|
|
248
|
+
# Simplified extraction - would use NLP/LLM in production
|
|
249
|
+
# Look for patterns like "X is Y" or "X has Y"
|
|
250
|
+
import re
|
|
251
|
+
|
|
252
|
+
patterns = [
|
|
253
|
+
r'(\w+)\s+is\s+(\w+)',
|
|
254
|
+
r'(\w+)\s+has\s+(\w+)',
|
|
255
|
+
r'(\w+)\s+can\s+(\w+)',
|
|
256
|
+
]
|
|
257
|
+
|
|
258
|
+
for pattern in patterns:
|
|
259
|
+
matches = re.findall(pattern, text, re.IGNORECASE)
|
|
260
|
+
for match in matches:
|
|
261
|
+
if len(match) == 2:
|
|
262
|
+
self.kg.add_fact(
|
|
263
|
+
subject=match[0],
|
|
264
|
+
predicate='is' if 'is' in pattern else 'has' if 'has' in pattern else 'can',
|
|
265
|
+
object=match[1],
|
|
266
|
+
event_time=event_time
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
def _should_reflect(self, user_input: str, agent_response: str, user_id: str) -> bool:
|
|
270
|
+
"""Determine if the interaction warrants an experiential note"""
|
|
271
|
+
|
|
272
|
+
# Always reflect on learning about users
|
|
273
|
+
if self._contains_user_learning(user_input, agent_response):
|
|
274
|
+
return True
|
|
275
|
+
|
|
276
|
+
# Reflect on pattern recognition (failures/successes)
|
|
277
|
+
if self._contains_pattern_learning(user_input, agent_response):
|
|
278
|
+
return True
|
|
279
|
+
|
|
280
|
+
# Reflect on significant topic shifts
|
|
281
|
+
if self._is_significant_topic_shift(user_input):
|
|
282
|
+
return True
|
|
283
|
+
|
|
284
|
+
# Reflect on high-confidence interactions
|
|
285
|
+
confidence_change = self._calculate_confidence_change(user_input, agent_response)
|
|
286
|
+
if abs(confidence_change) > 0.3:
|
|
287
|
+
return True
|
|
288
|
+
|
|
289
|
+
# Periodic reflection (every 10th interaction)
|
|
290
|
+
if user_id in self.user_profiles:
|
|
291
|
+
interaction_count = self.user_profiles[user_id]["interaction_count"]
|
|
292
|
+
if interaction_count % 10 == 0:
|
|
293
|
+
return True
|
|
294
|
+
|
|
295
|
+
return False
|
|
296
|
+
|
|
297
|
+
def _generate_reflection(self, user_input: str, agent_response: str,
|
|
298
|
+
user_id: str, relational: RelationalContext) -> str:
|
|
299
|
+
"""Generate AI experiential note about the interaction"""
|
|
300
|
+
|
|
301
|
+
# Analyze interaction patterns
|
|
302
|
+
patterns = []
|
|
303
|
+
|
|
304
|
+
if self._contains_user_learning(user_input, agent_response):
|
|
305
|
+
patterns.append("🧠 **User Learning Detected**: New information about user preferences or characteristics")
|
|
306
|
+
|
|
307
|
+
if self._contains_pattern_learning(user_input, agent_response):
|
|
308
|
+
patterns.append("📊 **Pattern Recognition**: Identified recurring behavior or outcome patterns")
|
|
309
|
+
|
|
310
|
+
confidence_change = self._calculate_confidence_change(user_input, agent_response)
|
|
311
|
+
if confidence_change > 0.2:
|
|
312
|
+
patterns.append(f"⬆️ **Confidence Boost**: Interaction increased confidence by {confidence_change:.2f}")
|
|
313
|
+
elif confidence_change < -0.2:
|
|
314
|
+
patterns.append(f"⬇️ **Uncertainty Introduced**: Interaction decreased confidence by {abs(confidence_change):.2f}")
|
|
315
|
+
|
|
316
|
+
# Generate reflection content
|
|
317
|
+
reflection_parts = [
|
|
318
|
+
f"## Interaction Analysis",
|
|
319
|
+
f"**User**: {user_id} ({relational.relationship})",
|
|
320
|
+
f"**Context**: {user_input[:100]}..." if len(user_input) > 100 else f"**Context**: {user_input}",
|
|
321
|
+
"",
|
|
322
|
+
"## Key Observations"
|
|
323
|
+
]
|
|
324
|
+
|
|
325
|
+
if patterns:
|
|
326
|
+
reflection_parts.extend(patterns)
|
|
327
|
+
else:
|
|
328
|
+
reflection_parts.append("📝 **Routine Interaction**: Standard conversational exchange with no significant patterns detected")
|
|
329
|
+
|
|
330
|
+
# Add learning insights
|
|
331
|
+
reflection_parts.extend([
|
|
332
|
+
"",
|
|
333
|
+
"## Memory Impact",
|
|
334
|
+
f"- **Working Memory**: Added interaction to recent context",
|
|
335
|
+
f"- **Episodic Memory**: Stored as complete interaction episode"
|
|
336
|
+
])
|
|
337
|
+
|
|
338
|
+
if self._contains_facts(agent_response):
|
|
339
|
+
reflection_parts.append("- **Semantic Memory**: Potential facts identified for validation")
|
|
340
|
+
|
|
341
|
+
if self.kg:
|
|
342
|
+
reflection_parts.append("- **Knowledge Graph**: Updated entity relationships")
|
|
343
|
+
|
|
344
|
+
# Future considerations
|
|
345
|
+
reflection_parts.extend([
|
|
346
|
+
"",
|
|
347
|
+
"## Future Considerations",
|
|
348
|
+
self._generate_future_considerations(user_input, agent_response, user_id)
|
|
349
|
+
])
|
|
350
|
+
|
|
351
|
+
return "\n".join(reflection_parts)
|
|
352
|
+
|
|
353
|
+
def _contains_user_learning(self, user_input: str, agent_response: str) -> bool:
|
|
354
|
+
"""Check if interaction contains learning about the user"""
|
|
355
|
+
user_indicators = [
|
|
356
|
+
"i am", "i'm", "my", "i like", "i prefer", "i work", "i live",
|
|
357
|
+
"i think", "i believe", "i usually", "i tend to"
|
|
358
|
+
]
|
|
359
|
+
return any(indicator in user_input.lower() for indicator in user_indicators)
|
|
360
|
+
|
|
361
|
+
def _contains_pattern_learning(self, user_input: str, agent_response: str) -> bool:
|
|
362
|
+
"""Check if interaction contains pattern learning"""
|
|
363
|
+
pattern_indicators = [
|
|
364
|
+
"failed", "error", "worked", "success", "usually", "often",
|
|
365
|
+
"always", "never", "typically", "tends to"
|
|
366
|
+
]
|
|
367
|
+
combined_text = f"{user_input} {agent_response}".lower()
|
|
368
|
+
return any(indicator in combined_text for indicator in pattern_indicators)
|
|
369
|
+
|
|
370
|
+
def _is_significant_topic_shift(self, user_input: str) -> bool:
|
|
371
|
+
"""Check if this represents a significant topic shift"""
|
|
372
|
+
# Simple heuristic: check for topic transition words
|
|
373
|
+
transition_words = [
|
|
374
|
+
"by the way", "actually", "also", "now", "next", "moving on",
|
|
375
|
+
"switching topics", "changing subject"
|
|
376
|
+
]
|
|
377
|
+
return any(word in user_input.lower() for word in transition_words)
|
|
378
|
+
|
|
379
|
+
def _calculate_confidence_change(self, user_input: str, agent_response: str) -> float:
|
|
380
|
+
"""Calculate the confidence change from this interaction"""
|
|
381
|
+
# Simple heuristic based on certainty indicators
|
|
382
|
+
confidence_boost = [
|
|
383
|
+
"exactly", "definitely", "certainly", "absolutely", "confirmed",
|
|
384
|
+
"correct", "right", "yes", "perfect"
|
|
385
|
+
]
|
|
386
|
+
|
|
387
|
+
confidence_reduction = [
|
|
388
|
+
"maybe", "perhaps", "might", "could be", "not sure",
|
|
389
|
+
"uncertain", "unclear", "confused", "don't know"
|
|
390
|
+
]
|
|
391
|
+
|
|
392
|
+
response_lower = agent_response.lower()
|
|
393
|
+
|
|
394
|
+
boost_count = sum(1 for word in confidence_boost if word in response_lower)
|
|
395
|
+
reduction_count = sum(1 for word in confidence_reduction if word in response_lower)
|
|
396
|
+
|
|
397
|
+
# Scale to reasonable range
|
|
398
|
+
return (boost_count - reduction_count) * 0.1
|
|
399
|
+
|
|
400
|
+
def _contains_facts(self, text: str) -> bool:
|
|
401
|
+
"""Check if text contains factual statements"""
|
|
402
|
+
fact_patterns = [
|
|
403
|
+
r'\w+ is \w+', r'\w+ has \w+', r'\w+ can \w+',
|
|
404
|
+
r'\w+ means \w+', r'\w+ equals \w+'
|
|
405
|
+
]
|
|
406
|
+
|
|
407
|
+
import re
|
|
408
|
+
for pattern in fact_patterns:
|
|
409
|
+
if re.search(pattern, text, re.IGNORECASE):
|
|
410
|
+
return True
|
|
411
|
+
return False
|
|
412
|
+
|
|
413
|
+
def _generate_future_considerations(self, user_input: str, agent_response: str, user_id: str) -> str:
|
|
414
|
+
"""Generate considerations for future interactions"""
|
|
415
|
+
considerations = []
|
|
416
|
+
|
|
417
|
+
# User-specific considerations
|
|
418
|
+
if user_id in self.user_profiles:
|
|
419
|
+
profile = self.user_profiles[user_id]
|
|
420
|
+
if profile["interaction_count"] < 5:
|
|
421
|
+
considerations.append("👋 Early interaction - continue building user profile")
|
|
422
|
+
elif len(profile.get("facts", [])) < 3:
|
|
423
|
+
considerations.append("🔍 Learn more about user preferences and background")
|
|
424
|
+
|
|
425
|
+
# Topic-specific considerations
|
|
426
|
+
if "help" in user_input.lower():
|
|
427
|
+
considerations.append("🤝 User seeking assistance - prioritize helpful, clear responses")
|
|
428
|
+
|
|
429
|
+
if "learn" in user_input.lower():
|
|
430
|
+
considerations.append("📚 User in learning mode - provide educational content")
|
|
431
|
+
|
|
432
|
+
# Default consideration
|
|
433
|
+
if not considerations:
|
|
434
|
+
considerations.append("💭 Monitor for patterns and user preference indicators")
|
|
435
|
+
|
|
436
|
+
return " • ".join(considerations)
|
|
437
|
+
|
|
438
|
+
def _extract_topic(self, user_input: str, agent_response: str) -> str:
|
|
439
|
+
"""Extract main topic from interaction"""
|
|
440
|
+
# Simple topic extraction - could be enhanced with NLP
|
|
441
|
+
text = f"{user_input} {agent_response}".lower()
|
|
442
|
+
|
|
443
|
+
# Look for key terms
|
|
444
|
+
topics = []
|
|
445
|
+
if "python" in text:
|
|
446
|
+
topics.append("python")
|
|
447
|
+
if "code" in text or "programming" in text:
|
|
448
|
+
topics.append("coding")
|
|
449
|
+
if "learn" in text or "teach" in text:
|
|
450
|
+
topics.append("learning")
|
|
451
|
+
if "help" in text or "assist" in text:
|
|
452
|
+
topics.append("assistance")
|
|
453
|
+
if "memory" in text or "remember" in text:
|
|
454
|
+
topics.append("memory")
|
|
455
|
+
|
|
456
|
+
# Default topic from first few words of user input
|
|
457
|
+
if not topics:
|
|
458
|
+
words = user_input.split()[:3]
|
|
459
|
+
topic = "_".join(word.lower().strip(".,!?") for word in words if word.isalpha())
|
|
460
|
+
topics.append(topic or "general")
|
|
461
|
+
|
|
462
|
+
return topics[0]
|
|
463
|
+
|
|
464
|
+
def get_full_context(self, query: str, max_items: int = 5,
|
|
465
|
+
user_id: Optional[str] = None) -> str:
|
|
466
|
+
"""Get user-specific context through relational lens"""
|
|
467
|
+
user_id = user_id or self.current_user
|
|
468
|
+
context_parts = []
|
|
469
|
+
|
|
470
|
+
# Include user profile if known
|
|
471
|
+
if user_id in self.user_profiles:
|
|
472
|
+
profile = self.user_profiles[user_id]
|
|
473
|
+
context_parts.append(f"=== User Profile: {user_id} ===")
|
|
474
|
+
context_parts.append(f"Relationship: {profile['relationship']}")
|
|
475
|
+
context_parts.append(f"Known for: {profile['interaction_count']} interactions")
|
|
476
|
+
if profile.get('facts'):
|
|
477
|
+
context_parts.append(f"Known facts: {', '.join(profile['facts'][:3])}")
|
|
478
|
+
|
|
479
|
+
# Always include core memory (agent identity)
|
|
480
|
+
core_context = self.core.get_context()
|
|
481
|
+
if core_context:
|
|
482
|
+
context_parts.append("\n=== Core Memory (Identity) ===")
|
|
483
|
+
context_parts.append(core_context)
|
|
484
|
+
|
|
485
|
+
# Include relevant semantic memory (validated facts)
|
|
486
|
+
semantic_facts = self.semantic.retrieve(query, limit=max_items//2)
|
|
487
|
+
if semantic_facts:
|
|
488
|
+
context_parts.append("\n=== Learned Facts ===")
|
|
489
|
+
for fact in semantic_facts:
|
|
490
|
+
context_parts.append(f"- {fact.content} (confidence: {fact.confidence:.2f})")
|
|
491
|
+
|
|
492
|
+
# Check for learned failures/successes relevant to query
|
|
493
|
+
for pattern, count in self.failure_patterns.items():
|
|
494
|
+
if query.lower() in pattern.lower() and count >= 2:
|
|
495
|
+
context_parts.append(f"\n⚠️ Warning: Previous failures with similar action ({count} times)")
|
|
496
|
+
break
|
|
497
|
+
|
|
498
|
+
# Get from working memory (recent context)
|
|
499
|
+
working_items = self.working.retrieve(query, limit=max_items)
|
|
500
|
+
if working_items:
|
|
501
|
+
context_parts.append("\n=== Recent Context ===")
|
|
502
|
+
for item in working_items:
|
|
503
|
+
if isinstance(item.content, dict):
|
|
504
|
+
context_parts.append(f"- {item.content.get('text', str(item.content))}")
|
|
505
|
+
|
|
506
|
+
# Get from episodic memory (retrieved as needed)
|
|
507
|
+
episodes = self.episodic.retrieve(query, limit=max_items)
|
|
508
|
+
if episodes:
|
|
509
|
+
context_parts.append("\n=== Relevant Episodes ===")
|
|
510
|
+
for episode in episodes:
|
|
511
|
+
context_parts.append(f"- {str(episode.content)[:100]}...")
|
|
512
|
+
|
|
513
|
+
# Get from knowledge graph
|
|
514
|
+
if self.kg:
|
|
515
|
+
facts = self.kg.query_at_time(query, datetime.now())
|
|
516
|
+
if facts:
|
|
517
|
+
context_parts.append("\n=== Known Facts ===")
|
|
518
|
+
for fact in facts[:max_items]:
|
|
519
|
+
context_parts.append(
|
|
520
|
+
f"- {fact['subject']} {fact['predicate']} {fact['object']}"
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
return "\n\n".join(context_parts) if context_parts else "No relevant context found."
|
|
524
|
+
|
|
525
|
+
def retrieve_context(self, query: str, max_items: int = 5) -> str:
|
|
526
|
+
"""Backward compatibility wrapper"""
|
|
527
|
+
return self.get_full_context(query, max_items)
|
|
528
|
+
|
|
529
|
+
def _init_storage_manager(self, backend: Optional[str], storage_path: Optional[str],
|
|
530
|
+
storage_uri: Optional[str], embedding_provider: Optional[Any]):
|
|
531
|
+
"""Initialize dual storage manager"""
|
|
532
|
+
if backend is None:
|
|
533
|
+
return None
|
|
534
|
+
|
|
535
|
+
try:
|
|
536
|
+
from .storage.dual_manager import DualStorageManager
|
|
537
|
+
return DualStorageManager(
|
|
538
|
+
mode=backend,
|
|
539
|
+
markdown_path=storage_path,
|
|
540
|
+
lancedb_uri=storage_uri,
|
|
541
|
+
embedding_provider=embedding_provider
|
|
542
|
+
)
|
|
543
|
+
except ImportError as e:
|
|
544
|
+
import logging
|
|
545
|
+
logging.warning(f"Failed to initialize storage manager: {e}")
|
|
546
|
+
return None
|
|
547
|
+
|
|
548
|
+
def _init_storage(self, backend: Optional[str]):
|
|
549
|
+
"""Initialize storage backend (legacy compatibility)"""
|
|
550
|
+
if backend == 'lancedb':
|
|
551
|
+
try:
|
|
552
|
+
from .storage.lancedb_storage import LanceDBStorage
|
|
553
|
+
return LanceDBStorage("./lance.db")
|
|
554
|
+
except ImportError:
|
|
555
|
+
return None
|
|
556
|
+
elif backend == 'file':
|
|
557
|
+
try:
|
|
558
|
+
from .storage.file_storage import FileStorage
|
|
559
|
+
return FileStorage()
|
|
560
|
+
except ImportError:
|
|
561
|
+
return None
|
|
562
|
+
return None
|
|
563
|
+
|
|
564
|
+
def save(self, path: str):
|
|
565
|
+
"""Save memory to disk"""
|
|
566
|
+
# Use new storage manager if available
|
|
567
|
+
if self.storage_manager and self.storage_manager.is_enabled():
|
|
568
|
+
# Save each component to storage manager
|
|
569
|
+
self.storage_manager.save_memory_component("core", self.core)
|
|
570
|
+
self.storage_manager.save_memory_component("semantic", self.semantic)
|
|
571
|
+
self.storage_manager.save_memory_component("working", self.working)
|
|
572
|
+
self.storage_manager.save_memory_component("episodic", self.episodic)
|
|
573
|
+
if self.kg:
|
|
574
|
+
self.storage_manager.save_memory_component("knowledge_graph", self.kg)
|
|
575
|
+
|
|
576
|
+
# Save user profiles and patterns
|
|
577
|
+
self.storage_manager.save_memory_component("user_profiles", self.user_profiles)
|
|
578
|
+
self.storage_manager.save_memory_component("failure_patterns", self.failure_patterns)
|
|
579
|
+
self.storage_manager.save_memory_component("success_patterns", self.success_patterns)
|
|
580
|
+
|
|
581
|
+
# Fallback to legacy storage
|
|
582
|
+
elif self.storage:
|
|
583
|
+
# Save each component (four-tier architecture)
|
|
584
|
+
self.storage.save(f"{path}/core", self.core)
|
|
585
|
+
self.storage.save(f"{path}/semantic", self.semantic)
|
|
586
|
+
self.storage.save(f"{path}/working", self.working)
|
|
587
|
+
self.storage.save(f"{path}/episodic", self.episodic)
|
|
588
|
+
if self.kg:
|
|
589
|
+
self.storage.save(f"{path}/kg", self.kg)
|
|
590
|
+
|
|
591
|
+
def load(self, path: str):
|
|
592
|
+
"""Load memory from disk"""
|
|
593
|
+
# Use new storage manager if available
|
|
594
|
+
if self.storage_manager and self.storage_manager.is_enabled():
|
|
595
|
+
# Load each component from storage manager
|
|
596
|
+
core_data = self.storage_manager.load_memory_component("core")
|
|
597
|
+
if core_data:
|
|
598
|
+
# Reconstruct core memory from data
|
|
599
|
+
pass # Would need to implement reconstruction logic
|
|
600
|
+
|
|
601
|
+
semantic_data = self.storage_manager.load_memory_component("semantic")
|
|
602
|
+
if semantic_data:
|
|
603
|
+
# Reconstruct semantic memory from data
|
|
604
|
+
pass # Would need to implement reconstruction logic
|
|
605
|
+
|
|
606
|
+
# Load user profiles and patterns
|
|
607
|
+
user_profiles = self.storage_manager.load_memory_component("user_profiles")
|
|
608
|
+
if user_profiles:
|
|
609
|
+
self.user_profiles = user_profiles
|
|
610
|
+
|
|
611
|
+
failure_patterns = self.storage_manager.load_memory_component("failure_patterns")
|
|
612
|
+
if failure_patterns:
|
|
613
|
+
self.failure_patterns = failure_patterns
|
|
614
|
+
|
|
615
|
+
success_patterns = self.storage_manager.load_memory_component("success_patterns")
|
|
616
|
+
if success_patterns:
|
|
617
|
+
self.success_patterns = success_patterns
|
|
618
|
+
|
|
619
|
+
# Fallback to legacy storage
|
|
620
|
+
elif self.storage and self.storage.exists(path):
|
|
621
|
+
# Load components (four-tier architecture)
|
|
622
|
+
if self.storage.exists(f"{path}/core"):
|
|
623
|
+
self.core = self.storage.load(f"{path}/core")
|
|
624
|
+
if self.storage.exists(f"{path}/semantic"):
|
|
625
|
+
self.semantic = self.storage.load(f"{path}/semantic")
|
|
626
|
+
self.working = self.storage.load(f"{path}/working")
|
|
627
|
+
self.episodic = self.storage.load(f"{path}/episodic")
|
|
628
|
+
if self.storage.exists(f"{path}/kg"):
|
|
629
|
+
self.kg = self.storage.load(f"{path}/kg")
|
|
630
|
+
|
|
631
|
+
def learn_about_user(self, fact: str, user_id: Optional[str] = None):
|
|
632
|
+
"""Learn and remember a fact about a specific user"""
|
|
633
|
+
user_id = user_id or self.current_user
|
|
634
|
+
|
|
635
|
+
if user_id in self.user_profiles:
|
|
636
|
+
# Add to user's facts
|
|
637
|
+
if 'facts' not in self.user_profiles[user_id]:
|
|
638
|
+
self.user_profiles[user_id]['facts'] = []
|
|
639
|
+
|
|
640
|
+
# Track for potential core memory update (requires recurrence)
|
|
641
|
+
core_key = f"user:{user_id}:{fact}"
|
|
642
|
+
self.core_update_candidates[core_key] = self.core_update_candidates.get(core_key, 0) + 1
|
|
643
|
+
|
|
644
|
+
# Add to user's facts if not already there
|
|
645
|
+
if fact not in self.user_profiles[user_id]['facts']:
|
|
646
|
+
self.user_profiles[user_id]['facts'].append(fact)
|
|
647
|
+
|
|
648
|
+
# Only update core memory after threshold met
|
|
649
|
+
if self.core_update_candidates[core_key] >= self.core_update_threshold:
|
|
650
|
+
current_info = self.core.blocks.get("user_info").content
|
|
651
|
+
updated_info = f"{current_info}\n- {fact}"
|
|
652
|
+
self.core.update_block("user_info", updated_info,
|
|
653
|
+
f"Validated through recurrence: {fact}")
|
|
654
|
+
del self.core_update_candidates[core_key]
|
|
655
|
+
|
|
656
|
+
def track_failure(self, action: str, context: str):
|
|
657
|
+
"""Track a failed action to learn from mistakes"""
|
|
658
|
+
failure_key = f"{action}:{context}"
|
|
659
|
+
self.failure_patterns[failure_key] = self.failure_patterns.get(failure_key, 0) + 1
|
|
660
|
+
|
|
661
|
+
# After repeated failures, add to semantic memory as a learned constraint
|
|
662
|
+
if self.failure_patterns[failure_key] >= 3:
|
|
663
|
+
fact = f"Action '{action}' tends to fail in context: {context}"
|
|
664
|
+
fact_item = MemoryItem(
|
|
665
|
+
content=fact,
|
|
666
|
+
event_time=datetime.now(),
|
|
667
|
+
ingestion_time=datetime.now(),
|
|
668
|
+
confidence=0.9,
|
|
669
|
+
metadata={'type': 'learned_constraint', 'failure_count': self.failure_patterns[failure_key]}
|
|
670
|
+
)
|
|
671
|
+
# Add multiple times to reach semantic validation threshold
|
|
672
|
+
for _ in range(self.semantic.validation_threshold):
|
|
673
|
+
self.semantic.add(fact_item)
|
|
674
|
+
|
|
675
|
+
def track_success(self, action: str, context: str):
|
|
676
|
+
"""Track a successful action to reinforce patterns"""
|
|
677
|
+
success_key = f"{action}:{context}"
|
|
678
|
+
self.success_patterns[success_key] = self.success_patterns.get(success_key, 0) + 1
|
|
679
|
+
|
|
680
|
+
# After repeated successes, add to semantic memory as a learned strategy
|
|
681
|
+
if self.success_patterns[success_key] >= 3:
|
|
682
|
+
fact = f"Action '{action}' works well in context: {context}"
|
|
683
|
+
fact_item = MemoryItem(
|
|
684
|
+
content=fact,
|
|
685
|
+
event_time=datetime.now(),
|
|
686
|
+
ingestion_time=datetime.now(),
|
|
687
|
+
confidence=0.9,
|
|
688
|
+
metadata={'type': 'learned_strategy', 'success_count': self.success_patterns[success_key]}
|
|
689
|
+
)
|
|
690
|
+
# Add multiple times to reach semantic validation threshold
|
|
691
|
+
for _ in range(self.semantic.validation_threshold):
|
|
692
|
+
self.semantic.add(fact_item)
|
|
693
|
+
|
|
694
|
+
def consolidate_memories(self):
|
|
695
|
+
"""Consolidate working memory to semantic/episodic based on importance"""
|
|
696
|
+
# Get items from working memory
|
|
697
|
+
working_items = self.working.get_context_window()
|
|
698
|
+
|
|
699
|
+
for item in working_items:
|
|
700
|
+
# Extract potential facts for semantic memory
|
|
701
|
+
if isinstance(item.content, dict):
|
|
702
|
+
content_text = item.content.get('text', '')
|
|
703
|
+
# Simple heuristic: statements with "is", "are", "means" are potential facts
|
|
704
|
+
if any(word in content_text.lower() for word in ['is', 'are', 'means', 'equals']):
|
|
705
|
+
self.semantic.add(item)
|
|
706
|
+
|
|
707
|
+
# Important items go to episodic memory
|
|
708
|
+
if item.confidence > 0.7 or (item.metadata and item.metadata.get('important')):
|
|
709
|
+
self.episodic.add(item)
|
|
710
|
+
|
|
711
|
+
# Consolidate semantic memory concepts
|
|
712
|
+
self.semantic.consolidate()
|
|
713
|
+
|
|
714
|
+
def get_user_context(self, user_id: str) -> Optional[Dict]:
|
|
715
|
+
"""Get everything we know about a specific user"""
|
|
716
|
+
return self.user_profiles.get(user_id)
|
|
717
|
+
|
|
718
|
+
def search_stored_interactions(self, query: str, user_id: Optional[str] = None,
|
|
719
|
+
start_date: Optional[datetime] = None,
|
|
720
|
+
end_date: Optional[datetime] = None) -> List[Dict]:
|
|
721
|
+
"""Search stored interactions and experiential notes"""
|
|
722
|
+
if self.storage_manager and self.storage_manager.is_enabled():
|
|
723
|
+
return self.storage_manager.search_interactions(query, user_id, start_date, end_date)
|
|
724
|
+
return []
|
|
725
|
+
|
|
726
|
+
def get_storage_stats(self) -> Dict[str, Any]:
|
|
727
|
+
"""Get statistics about stored data"""
|
|
728
|
+
if self.storage_manager and self.storage_manager.is_enabled():
|
|
729
|
+
return self.storage_manager.get_storage_stats()
|
|
730
|
+
return {"storage_enabled": False}
|
|
731
|
+
|
|
732
|
+
def update_core_memory(self, block_id: str, content: str, reasoning: str = "") -> bool:
|
|
733
|
+
"""Agent can update core memory blocks (self-editing capability)"""
|
|
734
|
+
return self.core.update_block(block_id, content, reasoning)
|
|
735
|
+
|
|
736
|
+
def get_core_memory_context(self) -> str:
|
|
737
|
+
"""Get core memory context for always-accessible facts"""
|
|
738
|
+
return self.core.get_context()
|
|
739
|
+
|
|
740
|
+
|
|
741
|
+
# Export main classes and factory
|
|
742
|
+
__all__ = [
|
|
743
|
+
'create_memory', # Factory function
|
|
744
|
+
'ScratchpadMemory', # Simple memory for task agents
|
|
745
|
+
'BufferMemory', # Simple buffer for chatbots
|
|
746
|
+
'GroundedMemory', # Multi-dimensional memory for autonomous agents
|
|
747
|
+
'MemoryItem', # Data structure
|
|
748
|
+
'CoreMemory', # Core memory component (identity)
|
|
749
|
+
'SemanticMemory', # Semantic memory component (validated facts)
|
|
750
|
+
'WorkingMemory', # Working memory component (transient)
|
|
751
|
+
'EpisodicMemory', # Episodic memory component (events)
|
|
752
|
+
'TemporalKnowledgeGraph', # Knowledge graph
|
|
753
|
+
'RelationalContext' # For tracking who
|
|
754
|
+
]
|