AbstractMemory 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ # Cognitive memory patterns and learning
@@ -0,0 +1 @@
1
+ # Memory components for four-tier architecture
@@ -0,0 +1,112 @@
1
+ """
2
+ Core memory - always-accessible foundational facts (MemGPT/Letta pattern).
3
+ """
4
+
5
+ from typing import Dict, Optional, List
6
+ from datetime import datetime
7
+ from dataclasses import dataclass
8
+
9
+ from abstractmemory.core.interfaces import IMemoryComponent, MemoryItem
10
+
11
+
12
+ @dataclass
13
+ class CoreMemoryBlock:
14
+ """A block of core memory (always in context)"""
15
+ block_id: str
16
+ label: str # "persona" or "user_info"
17
+ content: str # Max ~200 tokens
18
+ last_updated: datetime
19
+ edit_count: int = 0
20
+
21
+ def update(self, new_content: str, agent_reasoning: str):
22
+ """Agent can self-edit this block"""
23
+ self.content = new_content
24
+ self.last_updated = datetime.now()
25
+ self.edit_count += 1
26
+
27
+
28
+ class CoreMemory(IMemoryComponent):
29
+ """
30
+ Always-accessible core memory for fundamental facts.
31
+ Based on MemGPT/Letta research - stores agent persona + user information.
32
+ """
33
+
34
+ def __init__(self, max_blocks: int = 10, max_tokens_per_block: int = 200):
35
+ self.blocks: Dict[str, CoreMemoryBlock] = {}
36
+ self.max_blocks = max_blocks
37
+ self.max_tokens_per_block = max_tokens_per_block
38
+
39
+ # Initialize default blocks (MemGPT pattern)
40
+ self.blocks["persona"] = CoreMemoryBlock(
41
+ block_id="persona",
42
+ label="persona",
43
+ content="I am an AI assistant with persistent memory capabilities.",
44
+ last_updated=datetime.now()
45
+ )
46
+ self.blocks["user_info"] = CoreMemoryBlock(
47
+ block_id="user_info",
48
+ label="user_info",
49
+ content="User information will be learned over time.",
50
+ last_updated=datetime.now()
51
+ )
52
+
53
+ def get_context(self) -> str:
54
+ """Get all core memory as context string (always included in prompts)"""
55
+ context_parts = []
56
+ for block in self.blocks.values():
57
+ context_parts.append(f"[{block.label}] {block.content}")
58
+ return "\n".join(context_parts)
59
+
60
+ def update_block(self, block_id: str, content: str, reasoning: str = "") -> bool:
61
+ """Agent updates a core memory block (self-editing capability)"""
62
+ if block_id in self.blocks:
63
+ if len(content) <= self.max_tokens_per_block * 4: # Rough token estimate
64
+ self.blocks[block_id].update(content, reasoning)
65
+ return True
66
+ return False
67
+
68
+ def add_block(self, label: str, content: str) -> Optional[str]:
69
+ """Add new core memory block if space available"""
70
+ if len(self.blocks) < self.max_blocks:
71
+ block_id = f"core_{len(self.blocks)}"
72
+ self.blocks[block_id] = CoreMemoryBlock(
73
+ block_id=block_id,
74
+ label=label,
75
+ content=content,
76
+ last_updated=datetime.now()
77
+ )
78
+ return block_id
79
+ return None
80
+
81
+ # IMemoryComponent interface implementation
82
+ def add(self, item: MemoryItem) -> str:
83
+ """Add important fact to core memory"""
84
+ # Convert MemoryItem to core memory block
85
+ content = str(item.content)
86
+ if "user" in content.lower():
87
+ return self.update_block("user_info", content) and "user_info" or ""
88
+ elif "persona" in content.lower() or "agent" in content.lower():
89
+ return self.update_block("persona", content) and "persona" or ""
90
+ else:
91
+ return self.add_block("general", content) or ""
92
+
93
+ def retrieve(self, query: str, limit: int = 10) -> List[MemoryItem]:
94
+ """Retrieve core memory blocks matching query"""
95
+ results = []
96
+ query_lower = query.lower()
97
+
98
+ for block in self.blocks.values():
99
+ if query_lower in block.content.lower() or query_lower in block.label.lower():
100
+ results.append(MemoryItem(
101
+ content={"label": block.label, "content": block.content},
102
+ event_time=block.last_updated,
103
+ ingestion_time=block.last_updated,
104
+ confidence=1.0, # Core memory is always high confidence
105
+ metadata={"block_id": block.block_id, "edit_count": block.edit_count}
106
+ ))
107
+
108
+ return results[:limit]
109
+
110
+ def consolidate(self) -> int:
111
+ """Core memory doesn't consolidate - it's manually curated"""
112
+ return 0
@@ -0,0 +1,68 @@
1
+ """
2
+ Episodic memory for experiences and events.
3
+ """
4
+
5
+ from typing import List, Dict
6
+ from datetime import datetime
7
+
8
+ from abstractmemory.core.interfaces import IMemoryComponent, MemoryItem
9
+ from abstractmemory.core.temporal import GroundingAnchor, TemporalSpan, RelationalContext
10
+
11
+
12
+ class EpisodicMemory(IMemoryComponent):
13
+ """Long-term episodic memory with temporal organization"""
14
+
15
+ def __init__(self):
16
+ self.episodes = {} # ID -> Episode
17
+ self.temporal_index = {} # For temporal queries
18
+
19
+ def add(self, item: MemoryItem) -> str:
20
+ """Add episode to memory"""
21
+ episode_id = f"ep_{len(self.episodes)}_{datetime.now().timestamp()}"
22
+
23
+ # Create grounding anchor with minimal relational context
24
+ anchor = GroundingAnchor(
25
+ event_time=item.event_time,
26
+ ingestion_time=item.ingestion_time,
27
+ validity_span=TemporalSpan(start=item.event_time),
28
+ relational=RelationalContext(user_id="default"), # Will be updated when used in GroundedMemory
29
+ confidence=item.confidence
30
+ )
31
+
32
+ self.episodes[episode_id] = {
33
+ 'item': item,
34
+ 'anchor': anchor,
35
+ 'related': [] # Links to related episodes
36
+ }
37
+
38
+ # Update temporal index
39
+ self.temporal_index[episode_id] = anchor
40
+
41
+ return episode_id
42
+
43
+ def retrieve(self, query: str, limit: int = 10) -> List[MemoryItem]:
44
+ """Retrieve episodes matching query"""
45
+ # Simple implementation - would use embeddings in production
46
+ results = []
47
+ query_lower = query.lower()
48
+
49
+ for episode in self.episodes.values():
50
+ if query_lower in str(episode['item'].content).lower():
51
+ results.append(episode['item'])
52
+ if len(results) >= limit:
53
+ break
54
+
55
+ return results
56
+
57
+ def consolidate(self) -> int:
58
+ """Consolidate similar episodes"""
59
+ # Would implement clustering/summarization
60
+ return 0
61
+
62
+ def get_episodes_between(self, start: datetime, end: datetime) -> List[MemoryItem]:
63
+ """Get episodes between times"""
64
+ results = []
65
+ for episode in self.episodes.values():
66
+ if start <= episode['anchor'].event_time <= end:
67
+ results.append(episode['item'])
68
+ return sorted(results, key=lambda x: x.event_time)
@@ -0,0 +1,102 @@
1
+ """
2
+ Semantic memory for facts, concepts, and learned knowledge.
3
+ Separate from Core (identity) and Episodic (events).
4
+ """
5
+
6
+ from typing import List, Dict, Set
7
+ from datetime import datetime
8
+ from collections import defaultdict
9
+
10
+ from abstractmemory.core.interfaces import IMemoryComponent, MemoryItem
11
+
12
+
13
+ class SemanticMemory(IMemoryComponent):
14
+ """
15
+ Long-term storage of facts and concepts learned over time.
16
+ Only stores validated, recurring knowledge.
17
+ """
18
+
19
+ def __init__(self, validation_threshold: int = 3):
20
+ """
21
+ Args:
22
+ validation_threshold: How many times a fact must be observed to be stored
23
+ """
24
+ self.facts: Dict[str, Dict] = {} # Validated facts
25
+ self.concepts: Dict[str, Set[str]] = {} # Concept relationships
26
+ self.pending_facts: defaultdict = defaultdict(int) # Counting occurrences
27
+ self.validation_threshold = validation_threshold
28
+
29
+ def add(self, item: MemoryItem) -> str:
30
+ """Add potential fact - only stored after validation"""
31
+ fact_key = str(item.content).lower()
32
+
33
+ # Count occurrence
34
+ self.pending_facts[fact_key] += 1
35
+
36
+ # Promote to validated facts if threshold met
37
+ if self.pending_facts[fact_key] >= self.validation_threshold:
38
+ fact_id = f"fact_{len(self.facts)}_{datetime.now().timestamp()}"
39
+ occurrence_count = self.pending_facts[fact_key]
40
+ self.facts[fact_id] = {
41
+ 'content': item.content,
42
+ 'confidence': min(1.0, occurrence_count * 0.1 + 0.3), # Confidence grows with repetition
43
+ 'first_seen': item.event_time,
44
+ 'validated_at': datetime.now(),
45
+ 'occurrence_count': occurrence_count,
46
+ 'original_metadata': item.metadata or {}
47
+ }
48
+ # Clear from pending
49
+ del self.pending_facts[fact_key]
50
+ return fact_id
51
+
52
+ return "" # Not yet validated
53
+
54
+ def retrieve(self, query: str, limit: int = 10) -> List[MemoryItem]:
55
+ """Retrieve validated facts matching query"""
56
+ results = []
57
+ query_lower = query.lower()
58
+
59
+ for fact_id, fact in self.facts.items():
60
+ if query_lower in str(fact['content']).lower():
61
+ # Preserve original metadata and add occurrence count
62
+ original_metadata = fact.get('original_metadata', {})
63
+ metadata = original_metadata.copy()
64
+ metadata['occurrence_count'] = fact['occurrence_count']
65
+
66
+ results.append(MemoryItem(
67
+ content=fact['content'],
68
+ event_time=fact['first_seen'],
69
+ ingestion_time=fact['validated_at'],
70
+ confidence=fact['confidence'],
71
+ metadata=metadata
72
+ ))
73
+ if len(results) >= limit:
74
+ break
75
+
76
+ # Sort by confidence and return results
77
+ sorted_results = sorted(results, key=lambda x: x.confidence, reverse=True)[:limit]
78
+ return sorted_results
79
+
80
+ def consolidate(self) -> int:
81
+ """Link related facts into concepts"""
82
+ consolidated = 0
83
+ # Group facts by common terms
84
+ for fact_id, fact in self.facts.items():
85
+ words = str(fact['content']).lower().split()
86
+ for word in words:
87
+ if len(word) > 3: # Skip short words
88
+ if word not in self.concepts:
89
+ self.concepts[word] = set()
90
+ self.concepts[word].add(fact_id)
91
+ consolidated += 1
92
+ return consolidated
93
+
94
+ def get_concept_network(self, concept: str) -> Dict[str, Set[str]]:
95
+ """Get related facts for a concept"""
96
+ if concept.lower() in self.concepts:
97
+ fact_ids = self.concepts[concept.lower()]
98
+ return {
99
+ 'concept': concept,
100
+ 'facts': [self.facts[fid]['content'] for fid in fact_ids if fid in self.facts]
101
+ }
102
+ return {'concept': concept, 'facts': []}
@@ -0,0 +1,50 @@
1
+ """
2
+ Working memory with sliding window.
3
+ """
4
+
5
+ from collections import deque
6
+ from typing import List, Optional
7
+ from datetime import datetime
8
+
9
+ from abstractmemory.core.interfaces import IMemoryComponent, MemoryItem
10
+
11
+
12
+ class WorkingMemory(IMemoryComponent):
13
+ """Short-term working memory with fixed capacity"""
14
+
15
+ def __init__(self, capacity: int = 10):
16
+ self.capacity = capacity
17
+ self.items = deque(maxlen=capacity)
18
+
19
+ def add(self, item: MemoryItem) -> str:
20
+ """Add item to working memory"""
21
+ item_id = f"wm_{datetime.now().timestamp()}"
22
+ self.items.append((item_id, item))
23
+
24
+ # Auto-consolidate if over capacity (deque will handle max capacity automatically)
25
+ return item_id
26
+
27
+ def retrieve(self, query: str, limit: int = 10) -> List[MemoryItem]:
28
+ """Retrieve recent items matching query"""
29
+ results = []
30
+ query_lower = query.lower()
31
+
32
+ for item_id, item in self.items:
33
+ if query_lower in str(item.content).lower():
34
+ results.append(item)
35
+ if len(results) >= limit:
36
+ break
37
+
38
+ return results
39
+
40
+ def consolidate(self) -> int:
41
+ """Move old items to episodic memory"""
42
+ # In real implementation, would move to episodic
43
+ to_consolidate = len(self.items) // 2
44
+ for _ in range(to_consolidate):
45
+ self.items.popleft()
46
+ return to_consolidate
47
+
48
+ def get_context_window(self) -> List[MemoryItem]:
49
+ """Get current context window"""
50
+ return [item for _, item in self.items]
@@ -0,0 +1 @@
1
+ # Core memory interfaces and temporal models
@@ -0,0 +1,95 @@
1
+ """
2
+ Core memory interfaces based on SOTA research.
3
+ """
4
+
5
+ from abc import ABC, abstractmethod
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+ from datetime import datetime
8
+ from dataclasses import dataclass
9
+
10
+
11
+ @dataclass
12
+ class MemoryItem:
13
+ """Base class for memory items"""
14
+ content: Any
15
+ event_time: datetime # When it happened
16
+ ingestion_time: datetime # When we learned it
17
+ confidence: float = 1.0
18
+ metadata: Dict[str, Any] = None
19
+
20
+ def __post_init__(self):
21
+ if self.metadata is None:
22
+ self.metadata = {}
23
+
24
+
25
+ class IMemoryComponent(ABC):
26
+ """Interface for memory components"""
27
+
28
+ @abstractmethod
29
+ def add(self, item: MemoryItem) -> str:
30
+ """Add item to memory, return ID"""
31
+ pass
32
+
33
+ @abstractmethod
34
+ def retrieve(self, query: str, limit: int = 10) -> List[MemoryItem]:
35
+ """Retrieve relevant items"""
36
+ pass
37
+
38
+ @abstractmethod
39
+ def consolidate(self) -> int:
40
+ """Consolidate memory, return items consolidated"""
41
+ pass
42
+
43
+
44
+ class IRetriever(ABC):
45
+ """Interface for retrieval strategies"""
46
+
47
+ @abstractmethod
48
+ def search(self, query: str, limit: int = 10) -> List[Tuple[float, Any]]:
49
+ """Search and return (score, item) tuples"""
50
+ pass
51
+
52
+
53
+ class IStorage(ABC):
54
+ """Interface for storage backends"""
55
+
56
+ @abstractmethod
57
+ def save(self, key: str, value: Any) -> None:
58
+ """Save value with key"""
59
+ pass
60
+
61
+ @abstractmethod
62
+ def load(self, key: str) -> Any:
63
+ """Load value by key"""
64
+ pass
65
+
66
+ @abstractmethod
67
+ def exists(self, key: str) -> bool:
68
+ """Check if key exists"""
69
+ pass
70
+
71
+ @abstractmethod
72
+ def save_interaction(self, user_id: str, timestamp: datetime,
73
+ user_input: str, agent_response: str,
74
+ topic: str, metadata: Optional[Dict] = None) -> str:
75
+ """Save verbatim interaction, return interaction ID"""
76
+ pass
77
+
78
+ @abstractmethod
79
+ def save_experiential_note(self, timestamp: datetime, reflection: str,
80
+ interaction_id: str, note_type: str = "reflection",
81
+ metadata: Optional[Dict] = None) -> str:
82
+ """Save AI experiential note, return note ID"""
83
+ pass
84
+
85
+ @abstractmethod
86
+ def link_interaction_to_note(self, interaction_id: str, note_id: str) -> None:
87
+ """Create bidirectional link between interaction and note"""
88
+ pass
89
+
90
+ @abstractmethod
91
+ def search_interactions(self, query: str, user_id: Optional[str] = None,
92
+ start_date: Optional[datetime] = None,
93
+ end_date: Optional[datetime] = None) -> List[Dict]:
94
+ """Search interactions with filters"""
95
+ pass
@@ -0,0 +1,100 @@
1
+ """
2
+ Bi-temporal data model based on Zep/Graphiti research.
3
+ """
4
+
5
+ from datetime import datetime, timedelta
6
+ from typing import List, Optional, Tuple
7
+ from dataclasses import dataclass, field
8
+
9
+
10
+ @dataclass
11
+ class TemporalSpan:
12
+ """Represents a time span with validity"""
13
+ start: datetime
14
+ end: Optional[datetime] = None
15
+ valid: bool = True
16
+
17
+
18
+ @dataclass
19
+ class RelationalContext:
20
+ """Who is involved in this memory"""
21
+ user_id: str # Primary user/speaker
22
+ agent_id: Optional[str] = None # Which agent persona
23
+ relationship: Optional[str] = None # "owner", "colleague", "stranger"
24
+ session_id: Optional[str] = None # Conversation session
25
+
26
+ @dataclass
27
+ class GroundingAnchor:
28
+ """Multi-dimensional grounding for experiential memory"""
29
+ # Temporal grounding (when)
30
+ event_time: datetime # When it happened
31
+ ingestion_time: datetime # When we learned about it
32
+ validity_span: TemporalSpan # When it was/is valid
33
+
34
+ # Relational grounding (who)
35
+ relational: RelationalContext # Who is involved
36
+
37
+ # Additional grounding
38
+ confidence: float = 1.0
39
+ source: Optional[str] = None
40
+ location: Optional[str] = None # Where (optional)
41
+
42
+
43
+ class TemporalIndex:
44
+ """Index for efficient temporal queries"""
45
+
46
+ def __init__(self):
47
+ self._by_event_time = [] # Sorted by event time
48
+ self._by_ingestion_time = [] # Sorted by ingestion time
49
+ self._anchors = {} # ID -> GroundingAnchor
50
+
51
+ def add_anchor(self, anchor_id: str, anchor: GroundingAnchor):
52
+ """Add temporal anchor to index"""
53
+ self._anchors[anchor_id] = anchor
54
+
55
+ # Insert into sorted lists
56
+ self._insert_sorted(self._by_event_time,
57
+ (anchor.event_time, anchor_id))
58
+ self._insert_sorted(self._by_ingestion_time,
59
+ (anchor.ingestion_time, anchor_id))
60
+
61
+ def query_at_time(self, point_in_time: datetime,
62
+ use_event_time: bool = True) -> List[str]:
63
+ """Get valid anchor IDs at specific time"""
64
+ valid_ids = []
65
+
66
+ for anchor_id, anchor in self._anchors.items():
67
+ # Check if anchor was known at this time
68
+ if anchor.ingestion_time > point_in_time:
69
+ continue
70
+
71
+ # Check if anchor was valid at this time
72
+ if use_event_time:
73
+ if anchor.event_time <= point_in_time:
74
+ if anchor.validity_span.valid:
75
+ if (anchor.validity_span.end is None or
76
+ anchor.validity_span.end > point_in_time):
77
+ valid_ids.append(anchor_id)
78
+
79
+ return valid_ids
80
+
81
+ def _insert_sorted(self, lst: list, item: tuple):
82
+ """Insert item into sorted list"""
83
+ import bisect
84
+ bisect.insort(lst, item)
85
+
86
+ def get_evolution(self, start: datetime, end: datetime) -> List[Tuple[datetime, str]]:
87
+ """Get evolution of knowledge between times"""
88
+ changes = []
89
+
90
+ for anchor_id, anchor in self._anchors.items():
91
+ # Include if ingested during period
92
+ if start <= anchor.ingestion_time <= end:
93
+ changes.append((anchor.ingestion_time, f"Added: {anchor_id}"))
94
+
95
+ # Include if invalidated during period
96
+ if anchor.validity_span.end:
97
+ if start <= anchor.validity_span.end <= end:
98
+ changes.append((anchor.validity_span.end, f"Invalidated: {anchor_id}"))
99
+
100
+ return sorted(changes)
@@ -0,0 +1 @@
1
+ # Temporal knowledge graph implementation