AbstractMemory 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractmemory/__init__.py +744 -31
- abstractmemory/cognitive/__init__.py +1 -0
- abstractmemory/components/__init__.py +1 -0
- abstractmemory/components/core.py +112 -0
- abstractmemory/components/episodic.py +68 -0
- abstractmemory/components/semantic.py +102 -0
- abstractmemory/components/working.py +50 -0
- abstractmemory/core/__init__.py +1 -0
- abstractmemory/core/interfaces.py +95 -0
- abstractmemory/core/temporal.py +100 -0
- abstractmemory/graph/__init__.py +1 -0
- abstractmemory/graph/knowledge_graph.py +178 -0
- abstractmemory/simple.py +151 -0
- abstractmemory/storage/__init__.py +16 -0
- abstractmemory/storage/dual_manager.py +278 -0
- abstractmemory/storage/lancedb_storage.py +425 -0
- abstractmemory/storage/markdown_storage.py +447 -0
- abstractmemory-0.1.0.dist-info/METADATA +331 -0
- abstractmemory-0.1.0.dist-info/RECORD +22 -0
- {abstractmemory-0.0.1.dist-info → abstractmemory-0.1.0.dist-info}/licenses/LICENSE +4 -1
- abstractmemory-0.0.1.dist-info/METADATA +0 -94
- abstractmemory-0.0.1.dist-info/RECORD +0 -6
- {abstractmemory-0.0.1.dist-info → abstractmemory-0.1.0.dist-info}/WHEEL +0 -0
- {abstractmemory-0.0.1.dist-info → abstractmemory-0.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Temporal knowledge graph implementation.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import networkx as nx
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from typing import Dict, List, Optional, Tuple, Any
|
|
8
|
+
|
|
9
|
+
from abstractmemory.core.temporal import GroundingAnchor, TemporalSpan, RelationalContext
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class TemporalKnowledgeGraph:
|
|
13
|
+
"""
|
|
14
|
+
Knowledge graph with bi-temporal modeling.
|
|
15
|
+
Based on Zep/Graphiti architecture.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self):
|
|
19
|
+
self.graph = nx.MultiDiGraph()
|
|
20
|
+
self._node_counter = 0
|
|
21
|
+
self._edge_counter = 0
|
|
22
|
+
self.ontology = {} # Auto-built ontology
|
|
23
|
+
|
|
24
|
+
def add_entity(self, value: str, entity_type: str = 'entity') -> str:
|
|
25
|
+
"""Add or get entity node"""
|
|
26
|
+
# Check for existing entity (deduplication)
|
|
27
|
+
for node_id, data in self.graph.nodes(data=True):
|
|
28
|
+
if data.get('value') == value:
|
|
29
|
+
# Update access time
|
|
30
|
+
self.graph.nodes[node_id]['last_accessed'] = datetime.now()
|
|
31
|
+
return node_id
|
|
32
|
+
|
|
33
|
+
# Create new entity
|
|
34
|
+
node_id = f"entity_{self._node_counter}"
|
|
35
|
+
self._node_counter += 1
|
|
36
|
+
|
|
37
|
+
self.graph.add_node(
|
|
38
|
+
node_id,
|
|
39
|
+
value=value,
|
|
40
|
+
type=entity_type,
|
|
41
|
+
created_at=datetime.now(),
|
|
42
|
+
last_accessed=datetime.now(),
|
|
43
|
+
importance=1.0
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Update ontology
|
|
47
|
+
if entity_type not in self.ontology:
|
|
48
|
+
self.ontology[entity_type] = []
|
|
49
|
+
self.ontology[entity_type].append(node_id)
|
|
50
|
+
|
|
51
|
+
return node_id
|
|
52
|
+
|
|
53
|
+
def add_fact(self, subject: str, predicate: str, object: str,
|
|
54
|
+
event_time: datetime, confidence: float = 1.0,
|
|
55
|
+
source: Optional[str] = None, ingestion_time: Optional[datetime] = None) -> str:
|
|
56
|
+
"""Add temporally anchored fact"""
|
|
57
|
+
|
|
58
|
+
# Get or create nodes
|
|
59
|
+
subj_id = self.add_entity(subject)
|
|
60
|
+
obj_id = self.add_entity(object)
|
|
61
|
+
|
|
62
|
+
# Create grounding anchor
|
|
63
|
+
anchor = GroundingAnchor(
|
|
64
|
+
event_time=event_time,
|
|
65
|
+
ingestion_time=ingestion_time or datetime.now(),
|
|
66
|
+
validity_span=TemporalSpan(start=event_time),
|
|
67
|
+
relational=RelationalContext(user_id="default"), # Will be updated when used in GroundedMemory
|
|
68
|
+
confidence=confidence,
|
|
69
|
+
source=source
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# Check for contradictions
|
|
73
|
+
self._handle_contradictions(subj_id, predicate, obj_id, anchor)
|
|
74
|
+
|
|
75
|
+
# Add edge with temporal data
|
|
76
|
+
edge_id = f"edge_{self._edge_counter}"
|
|
77
|
+
self._edge_counter += 1
|
|
78
|
+
|
|
79
|
+
self.graph.add_edge(
|
|
80
|
+
subj_id, obj_id,
|
|
81
|
+
key=edge_id,
|
|
82
|
+
predicate=predicate,
|
|
83
|
+
anchor=anchor,
|
|
84
|
+
confidence=confidence,
|
|
85
|
+
valid=True
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
return edge_id
|
|
89
|
+
|
|
90
|
+
def _handle_contradictions(self, subj_id: str, predicate: str,
|
|
91
|
+
obj_id: str, new_anchor: GroundingAnchor):
|
|
92
|
+
"""Handle temporal contradictions"""
|
|
93
|
+
# Check existing edges for contradictions
|
|
94
|
+
for _, _, key, data in self.graph.edges(subj_id, keys=True, data=True):
|
|
95
|
+
if data.get('predicate') == predicate and data.get('valid'):
|
|
96
|
+
old_anchor = data.get('anchor')
|
|
97
|
+
if old_anchor:
|
|
98
|
+
# Check for temporal overlap
|
|
99
|
+
if self._has_temporal_overlap(old_anchor, new_anchor):
|
|
100
|
+
# Invalidate older fact (new info takes precedence)
|
|
101
|
+
if old_anchor.ingestion_time < new_anchor.ingestion_time:
|
|
102
|
+
data['valid'] = False
|
|
103
|
+
old_anchor.validity_span.end = new_anchor.event_time
|
|
104
|
+
old_anchor.validity_span.valid = False
|
|
105
|
+
|
|
106
|
+
def _has_temporal_overlap(self, anchor1: GroundingAnchor,
|
|
107
|
+
anchor2: GroundingAnchor) -> bool:
|
|
108
|
+
"""Check if two anchors have temporal overlap"""
|
|
109
|
+
span1 = anchor1.validity_span
|
|
110
|
+
span2 = anchor2.validity_span
|
|
111
|
+
|
|
112
|
+
# If either span has no end, check if starts overlap
|
|
113
|
+
if span1.end is None or span2.end is None:
|
|
114
|
+
return True # Conservative: assume overlap
|
|
115
|
+
|
|
116
|
+
# Check for actual overlap
|
|
117
|
+
return not (span1.end < span2.start or span2.end < span1.start)
|
|
118
|
+
|
|
119
|
+
def query_at_time(self, query: str, point_in_time: datetime) -> List[Dict[str, Any]]:
|
|
120
|
+
"""Query knowledge state at specific time"""
|
|
121
|
+
results = []
|
|
122
|
+
|
|
123
|
+
for u, v, key, data in self.graph.edges(keys=True, data=True):
|
|
124
|
+
anchor = data.get('anchor')
|
|
125
|
+
if not anchor:
|
|
126
|
+
continue
|
|
127
|
+
|
|
128
|
+
# Check if fact was known and valid at this time
|
|
129
|
+
if (anchor.ingestion_time <= point_in_time and
|
|
130
|
+
anchor.event_time <= point_in_time and
|
|
131
|
+
data.get('valid', True)): # Default to True if not explicitly set
|
|
132
|
+
|
|
133
|
+
# Check if still valid at query time
|
|
134
|
+
if (anchor.validity_span.end is None or
|
|
135
|
+
anchor.validity_span.end > point_in_time):
|
|
136
|
+
|
|
137
|
+
# Check if matches query
|
|
138
|
+
if query.lower() in data.get('predicate', '').lower():
|
|
139
|
+
results.append({
|
|
140
|
+
'subject': self.graph.nodes[u]['value'],
|
|
141
|
+
'predicate': data['predicate'],
|
|
142
|
+
'object': self.graph.nodes[v]['value'],
|
|
143
|
+
'confidence': data.get('confidence', 1.0),
|
|
144
|
+
'event_time': anchor.event_time,
|
|
145
|
+
'source': getattr(anchor, 'source', None)
|
|
146
|
+
})
|
|
147
|
+
|
|
148
|
+
return results
|
|
149
|
+
|
|
150
|
+
def get_entity_evolution(self, entity: str, start: datetime,
|
|
151
|
+
end: datetime) -> List[Dict[str, Any]]:
|
|
152
|
+
"""Track how entity's relationships evolved over time"""
|
|
153
|
+
# Find entity node
|
|
154
|
+
entity_id = None
|
|
155
|
+
for node_id, data in self.graph.nodes(data=True):
|
|
156
|
+
if data.get('value') == entity:
|
|
157
|
+
entity_id = node_id
|
|
158
|
+
break
|
|
159
|
+
|
|
160
|
+
if not entity_id:
|
|
161
|
+
return []
|
|
162
|
+
|
|
163
|
+
evolution = []
|
|
164
|
+
|
|
165
|
+
# Check all edges involving this entity
|
|
166
|
+
for u, v, key, data in self.graph.edges(keys=True, data=True):
|
|
167
|
+
if u == entity_id or v == entity_id:
|
|
168
|
+
anchor = data.get('anchor')
|
|
169
|
+
if anchor and start <= anchor.event_time <= end:
|
|
170
|
+
evolution.append({
|
|
171
|
+
'time': anchor.event_time,
|
|
172
|
+
'type': 'fact_added' if data.get('valid') else 'fact_invalidated',
|
|
173
|
+
'subject': self.graph.nodes[u]['value'],
|
|
174
|
+
'predicate': data['predicate'],
|
|
175
|
+
'object': self.graph.nodes[v]['value']
|
|
176
|
+
})
|
|
177
|
+
|
|
178
|
+
return sorted(evolution, key=lambda x: x['time'])
|
abstractmemory/simple.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Simple, efficient memory for task-specific agents.
|
|
3
|
+
No over-engineering - just what's needed for the job.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import List, Optional, Dict, Any
|
|
7
|
+
from collections import deque
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ScratchpadMemory:
|
|
12
|
+
"""
|
|
13
|
+
Lightweight memory for ReAct agents and single-task tools.
|
|
14
|
+
|
|
15
|
+
Use this for:
|
|
16
|
+
- ReAct agent thought-action-observation cycles
|
|
17
|
+
- Summarizer working memory
|
|
18
|
+
- Extractor temporary context
|
|
19
|
+
- Any agent that doesn't need persistence
|
|
20
|
+
|
|
21
|
+
Example:
|
|
22
|
+
# For a ReAct agent
|
|
23
|
+
scratchpad = ScratchpadMemory(max_entries=20)
|
|
24
|
+
scratchpad.add_thought("Need to search for Python tutorials")
|
|
25
|
+
scratchpad.add_action("search", {"query": "Python basics"})
|
|
26
|
+
scratchpad.add_observation("Found 10 relevant tutorials")
|
|
27
|
+
|
|
28
|
+
# Get full context for next iteration
|
|
29
|
+
context = scratchpad.get_context()
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, max_entries: int = 100):
|
|
33
|
+
"""Initialize scratchpad with bounded size"""
|
|
34
|
+
self.entries: deque = deque(maxlen=max_entries)
|
|
35
|
+
self.thoughts: List[str] = []
|
|
36
|
+
self.actions: List[Dict[str, Any]] = []
|
|
37
|
+
self.observations: List[str] = []
|
|
38
|
+
|
|
39
|
+
def add(self, content: str, entry_type: str = "note"):
|
|
40
|
+
"""Add any entry to scratchpad"""
|
|
41
|
+
entry = {
|
|
42
|
+
"type": entry_type,
|
|
43
|
+
"content": content,
|
|
44
|
+
"timestamp": datetime.now().isoformat()
|
|
45
|
+
}
|
|
46
|
+
self.entries.append(entry)
|
|
47
|
+
|
|
48
|
+
def add_thought(self, thought: str):
|
|
49
|
+
"""Add a thought (for ReAct pattern)"""
|
|
50
|
+
self.thoughts.append(thought)
|
|
51
|
+
self.add(thought, "thought")
|
|
52
|
+
|
|
53
|
+
def add_action(self, action: str, params: Optional[Dict] = None):
|
|
54
|
+
"""Add an action (for ReAct pattern)"""
|
|
55
|
+
action_entry = {"action": action, "params": params or {}}
|
|
56
|
+
self.actions.append(action_entry)
|
|
57
|
+
self.add(f"Action: {action} with {params}", "action")
|
|
58
|
+
|
|
59
|
+
def add_observation(self, observation: str):
|
|
60
|
+
"""Add an observation (for ReAct pattern)"""
|
|
61
|
+
self.observations.append(observation)
|
|
62
|
+
self.add(observation, "observation")
|
|
63
|
+
|
|
64
|
+
def get_context(self, last_n: Optional[int] = None) -> str:
|
|
65
|
+
"""Get scratchpad context as string"""
|
|
66
|
+
entries_to_use = list(self.entries)
|
|
67
|
+
if last_n:
|
|
68
|
+
entries_to_use = entries_to_use[-last_n:]
|
|
69
|
+
|
|
70
|
+
context_lines = []
|
|
71
|
+
for entry in entries_to_use:
|
|
72
|
+
if entry["type"] == "thought":
|
|
73
|
+
context_lines.append(f"Thought: {entry['content']}")
|
|
74
|
+
elif entry["type"] == "action":
|
|
75
|
+
context_lines.append(f"Action: {entry['content']}")
|
|
76
|
+
elif entry["type"] == "observation":
|
|
77
|
+
context_lines.append(f"Observation: {entry['content']}")
|
|
78
|
+
else:
|
|
79
|
+
context_lines.append(entry['content'])
|
|
80
|
+
|
|
81
|
+
return "\n".join(context_lines)
|
|
82
|
+
|
|
83
|
+
def get_react_history(self) -> Dict[str, List]:
|
|
84
|
+
"""Get structured ReAct history"""
|
|
85
|
+
return {
|
|
86
|
+
"thoughts": self.thoughts,
|
|
87
|
+
"actions": self.actions,
|
|
88
|
+
"observations": self.observations
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
def clear(self):
|
|
92
|
+
"""Clear the scratchpad"""
|
|
93
|
+
self.entries.clear()
|
|
94
|
+
self.thoughts.clear()
|
|
95
|
+
self.actions.clear()
|
|
96
|
+
self.observations.clear()
|
|
97
|
+
|
|
98
|
+
def __len__(self) -> int:
|
|
99
|
+
return len(self.entries)
|
|
100
|
+
|
|
101
|
+
def __str__(self) -> str:
|
|
102
|
+
return f"ScratchpadMemory({len(self.entries)} entries)"
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class BufferMemory:
|
|
106
|
+
"""
|
|
107
|
+
Simple conversation buffer (wrapper around BasicSession).
|
|
108
|
+
|
|
109
|
+
Use this when BasicSession from AbstractLLM Core is sufficient.
|
|
110
|
+
This is just a thin adapter for compatibility.
|
|
111
|
+
|
|
112
|
+
Example:
|
|
113
|
+
# For a simple chatbot
|
|
114
|
+
memory = BufferMemory(max_messages=50)
|
|
115
|
+
memory.add_message("user", "What's the weather?")
|
|
116
|
+
memory.add_message("assistant", "I don't have weather data")
|
|
117
|
+
context = memory.get_context()
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
def __init__(self, max_messages: int = 100):
|
|
121
|
+
"""Initialize buffer with size limit"""
|
|
122
|
+
self.messages: deque = deque(maxlen=max_messages)
|
|
123
|
+
|
|
124
|
+
def add_message(self, role: str, content: str):
|
|
125
|
+
"""Add a message to the buffer"""
|
|
126
|
+
self.messages.append({
|
|
127
|
+
"role": role,
|
|
128
|
+
"content": content,
|
|
129
|
+
"timestamp": datetime.now().isoformat()
|
|
130
|
+
})
|
|
131
|
+
|
|
132
|
+
def get_messages(self) -> List[Dict[str, str]]:
|
|
133
|
+
"""Get messages for LLM context"""
|
|
134
|
+
return [{"role": m["role"], "content": m["content"]}
|
|
135
|
+
for m in self.messages]
|
|
136
|
+
|
|
137
|
+
def get_context(self, last_n: Optional[int] = None) -> str:
|
|
138
|
+
"""Get conversation as formatted string"""
|
|
139
|
+
messages = list(self.messages)
|
|
140
|
+
if last_n:
|
|
141
|
+
messages = messages[-last_n:]
|
|
142
|
+
|
|
143
|
+
lines = []
|
|
144
|
+
for msg in messages:
|
|
145
|
+
lines.append(f"{msg['role']}: {msg['content']}")
|
|
146
|
+
|
|
147
|
+
return "\n".join(lines)
|
|
148
|
+
|
|
149
|
+
def clear(self):
|
|
150
|
+
"""Clear the buffer"""
|
|
151
|
+
self.messages.clear()
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# Storage backends for memory persistence
|
|
2
|
+
|
|
3
|
+
from .dual_manager import DualStorageManager
|
|
4
|
+
from .markdown_storage import MarkdownStorage
|
|
5
|
+
|
|
6
|
+
# LanceDB is optional
|
|
7
|
+
try:
|
|
8
|
+
from .lancedb_storage import LanceDBStorage
|
|
9
|
+
except ImportError:
|
|
10
|
+
LanceDBStorage = None
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
'DualStorageManager',
|
|
14
|
+
'MarkdownStorage',
|
|
15
|
+
'LanceDBStorage'
|
|
16
|
+
]
|
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dual Storage Manager for managing multiple storage backends.
|
|
3
|
+
Supports markdown, lancedb, dual, or no storage modes.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Optional, Dict, List, Literal, Any
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
import logging
|
|
9
|
+
|
|
10
|
+
from ..core.interfaces import IStorage
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class DualStorageManager:
|
|
16
|
+
"""
|
|
17
|
+
Manages multiple storage backends for verbatim interactions and experiential notes.
|
|
18
|
+
|
|
19
|
+
Modes:
|
|
20
|
+
- "markdown": Human-readable, observable, version-controllable
|
|
21
|
+
- "lancedb": SQL + vector search capabilities via AbstractCore embeddings
|
|
22
|
+
- "dual": Write to both, read from LanceDB for performance
|
|
23
|
+
- None: No persistence (default)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self,
|
|
27
|
+
mode: Optional[Literal["markdown", "lancedb", "dual"]] = None,
|
|
28
|
+
markdown_path: Optional[str] = None,
|
|
29
|
+
lancedb_uri: Optional[str] = None,
|
|
30
|
+
embedding_provider: Optional[Any] = None):
|
|
31
|
+
"""
|
|
32
|
+
Initialize storage manager.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
mode: Storage mode to use
|
|
36
|
+
markdown_path: Path for markdown storage
|
|
37
|
+
lancedb_uri: URI for LanceDB storage
|
|
38
|
+
embedding_provider: AbstractCore instance for embeddings
|
|
39
|
+
"""
|
|
40
|
+
self.mode = mode
|
|
41
|
+
self.embedding_provider = embedding_provider
|
|
42
|
+
|
|
43
|
+
# Initialize storage backends based on mode
|
|
44
|
+
self.markdown_storage = None
|
|
45
|
+
self.lancedb_storage = None
|
|
46
|
+
|
|
47
|
+
if mode in ["markdown", "dual"] and markdown_path:
|
|
48
|
+
try:
|
|
49
|
+
from .markdown_storage import MarkdownStorage
|
|
50
|
+
self.markdown_storage = MarkdownStorage(markdown_path)
|
|
51
|
+
logger.info(f"Initialized markdown storage at {markdown_path}")
|
|
52
|
+
except (ImportError, OSError, FileNotFoundError) as e:
|
|
53
|
+
logger.warning(f"Failed to initialize markdown storage: {e}")
|
|
54
|
+
self.markdown_storage = None
|
|
55
|
+
|
|
56
|
+
if mode in ["lancedb", "dual"] and lancedb_uri:
|
|
57
|
+
try:
|
|
58
|
+
from .lancedb_storage import LanceDBStorage
|
|
59
|
+
self.lancedb_storage = LanceDBStorage(lancedb_uri, embedding_provider)
|
|
60
|
+
logger.info(f"Initialized LanceDB storage at {lancedb_uri}")
|
|
61
|
+
except (ImportError, OSError, FileNotFoundError) as e:
|
|
62
|
+
logger.warning(f"Failed to initialize LanceDB storage: {e}")
|
|
63
|
+
self.lancedb_storage = None
|
|
64
|
+
|
|
65
|
+
def is_enabled(self) -> bool:
|
|
66
|
+
"""Check if any storage backend is enabled"""
|
|
67
|
+
return self.mode is not None and (self.markdown_storage is not None or self.lancedb_storage is not None)
|
|
68
|
+
|
|
69
|
+
def save_interaction(self, user_id: str, timestamp: datetime,
|
|
70
|
+
user_input: str, agent_response: str,
|
|
71
|
+
topic: str, metadata: Optional[Dict] = None) -> Optional[str]:
|
|
72
|
+
"""
|
|
73
|
+
Save verbatim interaction to enabled storage backends.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Interaction ID if successful, None if no storage enabled
|
|
77
|
+
"""
|
|
78
|
+
if not self.is_enabled():
|
|
79
|
+
return None
|
|
80
|
+
|
|
81
|
+
interaction_id = None
|
|
82
|
+
|
|
83
|
+
# Save to markdown storage
|
|
84
|
+
if self.markdown_storage:
|
|
85
|
+
try:
|
|
86
|
+
interaction_id = self.markdown_storage.save_interaction(
|
|
87
|
+
user_id, timestamp, user_input, agent_response, topic, metadata
|
|
88
|
+
)
|
|
89
|
+
logger.debug(f"Saved interaction {interaction_id} to markdown storage")
|
|
90
|
+
except Exception as e:
|
|
91
|
+
logger.error(f"Failed to save interaction to markdown: {e}")
|
|
92
|
+
|
|
93
|
+
# Save to LanceDB storage
|
|
94
|
+
if self.lancedb_storage:
|
|
95
|
+
try:
|
|
96
|
+
# Generate embedding if provider available
|
|
97
|
+
embedding = None
|
|
98
|
+
if self.embedding_provider:
|
|
99
|
+
text = f"{user_input} {agent_response}"
|
|
100
|
+
embedding = self.embedding_provider.generate_embedding(text)
|
|
101
|
+
|
|
102
|
+
ldb_id = self.lancedb_storage.save_interaction(
|
|
103
|
+
user_id, timestamp, user_input, agent_response, topic, metadata
|
|
104
|
+
)
|
|
105
|
+
if interaction_id is None: # Use LanceDB ID if markdown didn't provide one
|
|
106
|
+
interaction_id = ldb_id
|
|
107
|
+
|
|
108
|
+
logger.debug(f"Saved interaction {ldb_id} to LanceDB storage")
|
|
109
|
+
except Exception as e:
|
|
110
|
+
logger.error(f"Failed to save interaction to LanceDB: {e}")
|
|
111
|
+
|
|
112
|
+
return interaction_id
|
|
113
|
+
|
|
114
|
+
def save_experiential_note(self, timestamp: datetime, reflection: str,
|
|
115
|
+
interaction_id: str, note_type: str = "reflection",
|
|
116
|
+
metadata: Optional[Dict] = None) -> Optional[str]:
|
|
117
|
+
"""
|
|
118
|
+
Save AI experiential note to enabled storage backends.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Note ID if successful, None if no storage enabled
|
|
122
|
+
"""
|
|
123
|
+
if not self.is_enabled():
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
note_id = None
|
|
127
|
+
|
|
128
|
+
# Save to markdown storage
|
|
129
|
+
if self.markdown_storage:
|
|
130
|
+
try:
|
|
131
|
+
note_id = self.markdown_storage.save_experiential_note(
|
|
132
|
+
timestamp, reflection, interaction_id, note_type, metadata
|
|
133
|
+
)
|
|
134
|
+
logger.debug(f"Saved experiential note {note_id} to markdown storage")
|
|
135
|
+
except Exception as e:
|
|
136
|
+
logger.error(f"Failed to save experiential note to markdown: {e}")
|
|
137
|
+
|
|
138
|
+
# Save to LanceDB storage
|
|
139
|
+
if self.lancedb_storage:
|
|
140
|
+
try:
|
|
141
|
+
# Generate embedding for reflection if provider available
|
|
142
|
+
embedding = None
|
|
143
|
+
if self.embedding_provider:
|
|
144
|
+
embedding = self.embedding_provider.generate_embedding(reflection)
|
|
145
|
+
|
|
146
|
+
ldb_note_id = self.lancedb_storage.save_experiential_note(
|
|
147
|
+
timestamp, reflection, interaction_id, note_type, metadata
|
|
148
|
+
)
|
|
149
|
+
if note_id is None: # Use LanceDB ID if markdown didn't provide one
|
|
150
|
+
note_id = ldb_note_id
|
|
151
|
+
|
|
152
|
+
logger.debug(f"Saved experiential note {ldb_note_id} to LanceDB storage")
|
|
153
|
+
except Exception as e:
|
|
154
|
+
logger.error(f"Failed to save experiential note to LanceDB: {e}")
|
|
155
|
+
|
|
156
|
+
return note_id
|
|
157
|
+
|
|
158
|
+
def link_interaction_to_note(self, interaction_id: str, note_id: str) -> None:
|
|
159
|
+
"""Create bidirectional link between interaction and experiential note"""
|
|
160
|
+
if not self.is_enabled():
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
# Link in markdown storage
|
|
164
|
+
if self.markdown_storage:
|
|
165
|
+
try:
|
|
166
|
+
self.markdown_storage.link_interaction_to_note(interaction_id, note_id)
|
|
167
|
+
logger.debug(f"Linked interaction {interaction_id} to note {note_id} in markdown")
|
|
168
|
+
except Exception as e:
|
|
169
|
+
logger.error(f"Failed to create link in markdown: {e}")
|
|
170
|
+
|
|
171
|
+
# Link in LanceDB storage
|
|
172
|
+
if self.lancedb_storage:
|
|
173
|
+
try:
|
|
174
|
+
self.lancedb_storage.link_interaction_to_note(interaction_id, note_id)
|
|
175
|
+
logger.debug(f"Linked interaction {interaction_id} to note {note_id} in LanceDB")
|
|
176
|
+
except Exception as e:
|
|
177
|
+
logger.error(f"Failed to create link in LanceDB: {e}")
|
|
178
|
+
|
|
179
|
+
def search_interactions(self, query: str, user_id: Optional[str] = None,
|
|
180
|
+
start_date: Optional[datetime] = None,
|
|
181
|
+
end_date: Optional[datetime] = None) -> List[Dict]:
|
|
182
|
+
"""
|
|
183
|
+
Search interactions. Prefers LanceDB for performance, falls back to markdown.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
List of matching interactions
|
|
187
|
+
"""
|
|
188
|
+
if not self.is_enabled():
|
|
189
|
+
return []
|
|
190
|
+
|
|
191
|
+
# Prefer LanceDB for search if available (SQL + vector capabilities)
|
|
192
|
+
if self.lancedb_storage:
|
|
193
|
+
try:
|
|
194
|
+
results = self.lancedb_storage.search_interactions(
|
|
195
|
+
query, user_id, start_date, end_date
|
|
196
|
+
)
|
|
197
|
+
logger.debug(f"Found {len(results)} interactions via LanceDB search")
|
|
198
|
+
return results
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.error(f"LanceDB search failed: {e}")
|
|
201
|
+
|
|
202
|
+
# Fallback to markdown search
|
|
203
|
+
if self.markdown_storage:
|
|
204
|
+
try:
|
|
205
|
+
results = self.markdown_storage.search_interactions(
|
|
206
|
+
query, user_id, start_date, end_date
|
|
207
|
+
)
|
|
208
|
+
logger.debug(f"Found {len(results)} interactions via markdown search")
|
|
209
|
+
return results
|
|
210
|
+
except Exception as e:
|
|
211
|
+
logger.error(f"Markdown search failed: {e}")
|
|
212
|
+
|
|
213
|
+
return []
|
|
214
|
+
|
|
215
|
+
def save_memory_component(self, component_name: str, component_data: Any) -> None:
|
|
216
|
+
"""Save memory component (core, semantic, working, episodic) for backup"""
|
|
217
|
+
if not self.is_enabled():
|
|
218
|
+
return
|
|
219
|
+
|
|
220
|
+
# Save to markdown as human-readable snapshot
|
|
221
|
+
if self.markdown_storage and hasattr(self.markdown_storage, 'save_memory_component'):
|
|
222
|
+
try:
|
|
223
|
+
self.markdown_storage.save_memory_component(component_name, component_data)
|
|
224
|
+
logger.debug(f"Saved {component_name} component to markdown storage")
|
|
225
|
+
except Exception as e:
|
|
226
|
+
logger.error(f"Failed to save {component_name} to markdown: {e}")
|
|
227
|
+
|
|
228
|
+
# Save to LanceDB for efficient querying
|
|
229
|
+
if self.lancedb_storage and hasattr(self.lancedb_storage, 'save_memory_component'):
|
|
230
|
+
try:
|
|
231
|
+
self.lancedb_storage.save_memory_component(component_name, component_data)
|
|
232
|
+
logger.debug(f"Saved {component_name} component to LanceDB storage")
|
|
233
|
+
except Exception as e:
|
|
234
|
+
logger.error(f"Failed to save {component_name} to LanceDB: {e}")
|
|
235
|
+
|
|
236
|
+
def load_memory_component(self, component_name: str) -> Optional[Any]:
|
|
237
|
+
"""Load memory component. Prefers LanceDB for performance."""
|
|
238
|
+
if not self.is_enabled():
|
|
239
|
+
return None
|
|
240
|
+
|
|
241
|
+
# Try LanceDB first for performance
|
|
242
|
+
if self.lancedb_storage and hasattr(self.lancedb_storage, 'load_memory_component'):
|
|
243
|
+
try:
|
|
244
|
+
component = self.lancedb_storage.load_memory_component(component_name)
|
|
245
|
+
if component is not None:
|
|
246
|
+
logger.debug(f"Loaded {component_name} component from LanceDB storage")
|
|
247
|
+
return component
|
|
248
|
+
except Exception as e:
|
|
249
|
+
logger.error(f"Failed to load {component_name} from LanceDB: {e}")
|
|
250
|
+
|
|
251
|
+
# Fallback to markdown
|
|
252
|
+
if self.markdown_storage and hasattr(self.markdown_storage, 'load_memory_component'):
|
|
253
|
+
try:
|
|
254
|
+
component = self.markdown_storage.load_memory_component(component_name)
|
|
255
|
+
if component is not None:
|
|
256
|
+
logger.debug(f"Loaded {component_name} component from markdown storage")
|
|
257
|
+
return component
|
|
258
|
+
except Exception as e:
|
|
259
|
+
logger.error(f"Failed to load {component_name} from markdown: {e}")
|
|
260
|
+
|
|
261
|
+
return None
|
|
262
|
+
|
|
263
|
+
def get_storage_stats(self) -> Dict[str, Any]:
|
|
264
|
+
"""Get statistics about stored data"""
|
|
265
|
+
stats = {
|
|
266
|
+
"mode": self.mode,
|
|
267
|
+
"markdown_enabled": self.markdown_storage is not None,
|
|
268
|
+
"lancedb_enabled": self.lancedb_storage is not None,
|
|
269
|
+
"embedding_provider": self.embedding_provider is not None
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
if self.markdown_storage and hasattr(self.markdown_storage, 'get_stats'):
|
|
273
|
+
stats["markdown_stats"] = self.markdown_storage.get_stats()
|
|
274
|
+
|
|
275
|
+
if self.lancedb_storage and hasattr(self.lancedb_storage, 'get_stats'):
|
|
276
|
+
stats["lancedb_stats"] = self.lancedb_storage.get_stats()
|
|
277
|
+
|
|
278
|
+
return stats
|