superlocalmemory 2.4.2 → 2.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +62 -0
- package/README.md +62 -2
- package/docs/ARCHITECTURE-V2.5.md +190 -0
- package/docs/architecture-diagram.drawio +405 -0
- package/mcp_server.py +115 -14
- package/package.json +4 -1
- package/scripts/generate-thumbnails.py +220 -0
- package/src/agent_registry.py +385 -0
- package/src/db_connection_manager.py +532 -0
- package/src/event_bus.py +555 -0
- package/src/memory_store_v2.py +626 -471
- package/src/provenance_tracker.py +322 -0
- package/src/subscription_manager.py +399 -0
- package/src/trust_scorer.py +456 -0
- package/src/webhook_dispatcher.py +229 -0
- package/ui/app.js +425 -0
- package/ui/index.html +147 -1
- package/ui/js/agents.js +192 -0
- package/ui/js/clusters.js +80 -0
- package/ui/js/core.js +230 -0
- package/ui/js/events.js +178 -0
- package/ui/js/graph.js +32 -0
- package/ui/js/init.js +31 -0
- package/ui/js/memories.js +149 -0
- package/ui/js/modal.js +139 -0
- package/ui/js/patterns.js +93 -0
- package/ui/js/profiles.js +202 -0
- package/ui/js/search.js +59 -0
- package/ui/js/settings.js +167 -0
- package/ui/js/timeline.js +32 -0
- package/ui_server.py +69 -1665
- package/docs/COMPETITIVE-ANALYSIS.md +0 -210
package/src/event_bus.py
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
SuperLocalMemory V2 - Event Bus
|
|
4
|
+
Copyright (c) 2026 Varun Pratap Bhardwaj
|
|
5
|
+
Licensed under MIT License
|
|
6
|
+
|
|
7
|
+
Repository: https://github.com/varun369/SuperLocalMemoryV2
|
|
8
|
+
Author: Varun Pratap Bhardwaj (Solution Architect)
|
|
9
|
+
|
|
10
|
+
NOTICE: This software is protected by MIT License.
|
|
11
|
+
Attribution must be preserved in all copies or derivatives.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
"""
|
|
15
|
+
EventBus — Real-time event broadcasting for memory operations.
|
|
16
|
+
|
|
17
|
+
Transforms SuperLocalMemory from passive storage (filing cabinet) to active
|
|
18
|
+
coordination layer (nervous system). Every memory write, update, delete, or
|
|
19
|
+
recall triggers an event that subscribed agents and the dashboard receive.
|
|
20
|
+
|
|
21
|
+
Architecture:
|
|
22
|
+
memory_store_v2.py (write) → EventBus.emit()
|
|
23
|
+
├── SQLite memory_events table (persistence)
|
|
24
|
+
├── In-memory listeners (real-time delivery)
|
|
25
|
+
│ ├── SSE endpoint (dashboard, MCP clients)
|
|
26
|
+
│ ├── WebSocket (real-time agents)
|
|
27
|
+
│ └── Webhook dispatcher (external services)
|
|
28
|
+
└── Tiered retention (hot → warm → cold → archive)
|
|
29
|
+
|
|
30
|
+
Event Types:
|
|
31
|
+
memory.created — New memory written
|
|
32
|
+
memory.updated — Existing memory modified
|
|
33
|
+
memory.deleted — Memory removed
|
|
34
|
+
memory.recalled — Memory retrieved by an agent
|
|
35
|
+
graph.updated — Knowledge graph rebuilt
|
|
36
|
+
pattern.learned — New pattern detected
|
|
37
|
+
agent.connected — New agent connects
|
|
38
|
+
agent.disconnected — Agent disconnects
|
|
39
|
+
|
|
40
|
+
Retention Tiers:
|
|
41
|
+
Hot (0-48h, configurable) — Full events, fully queryable
|
|
42
|
+
Warm (2-14d, configurable) — Key events only (importance >= 5)
|
|
43
|
+
Cold (14-30d, configurable) — Daily aggregates only
|
|
44
|
+
Archive (30d+) — Pruned, stats in pattern_learner
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
import json
|
|
48
|
+
import logging
|
|
49
|
+
import threading
|
|
50
|
+
import time
|
|
51
|
+
from collections import deque
|
|
52
|
+
from datetime import datetime, timedelta
|
|
53
|
+
from pathlib import Path
|
|
54
|
+
from typing import Optional, List, Dict, Any, Callable
|
|
55
|
+
|
|
56
|
+
logger = logging.getLogger("superlocalmemory.events")
|
|
57
|
+
|
|
58
|
+
# Default retention windows (in hours)
|
|
59
|
+
DEFAULT_HOT_HOURS = 48
|
|
60
|
+
DEFAULT_WARM_HOURS = 14 * 24 # 14 days
|
|
61
|
+
DEFAULT_COLD_HOURS = 30 * 24 # 30 days
|
|
62
|
+
|
|
63
|
+
# In-memory buffer size for real-time delivery
|
|
64
|
+
EVENT_BUFFER_SIZE = 200
|
|
65
|
+
|
|
66
|
+
# Valid event types
|
|
67
|
+
VALID_EVENT_TYPES = frozenset([
|
|
68
|
+
"memory.created",
|
|
69
|
+
"memory.updated",
|
|
70
|
+
"memory.deleted",
|
|
71
|
+
"memory.recalled",
|
|
72
|
+
"graph.updated",
|
|
73
|
+
"pattern.learned",
|
|
74
|
+
"agent.connected",
|
|
75
|
+
"agent.disconnected",
|
|
76
|
+
])
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class EventBus:
|
|
80
|
+
"""
|
|
81
|
+
Central event bus for SuperLocalMemory.
|
|
82
|
+
|
|
83
|
+
Singleton per database path. Emits events to persistent storage and
|
|
84
|
+
in-memory listeners simultaneously.
|
|
85
|
+
|
|
86
|
+
Thread-safe: emit() can be called from any thread.
|
|
87
|
+
Listener callbacks run on the emitter's thread — keep them fast.
|
|
88
|
+
For heavy work, listeners should enqueue to their own async queue.
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
_instances: Dict[str, "EventBus"] = {}
|
|
92
|
+
_instances_lock = threading.Lock()
|
|
93
|
+
|
|
94
|
+
@classmethod
|
|
95
|
+
def get_instance(cls, db_path: Optional[Path] = None) -> "EventBus":
|
|
96
|
+
"""Get or create the singleton EventBus for a database path."""
|
|
97
|
+
if db_path is None:
|
|
98
|
+
db_path = Path.home() / ".claude-memory" / "memory.db"
|
|
99
|
+
|
|
100
|
+
key = str(db_path)
|
|
101
|
+
with cls._instances_lock:
|
|
102
|
+
if key not in cls._instances:
|
|
103
|
+
cls._instances[key] = cls(db_path)
|
|
104
|
+
return cls._instances[key]
|
|
105
|
+
|
|
106
|
+
@classmethod
|
|
107
|
+
def reset_instance(cls, db_path: Optional[Path] = None):
|
|
108
|
+
"""Remove and close a singleton instance. Used for testing."""
|
|
109
|
+
with cls._instances_lock:
|
|
110
|
+
if db_path is None:
|
|
111
|
+
cls._instances.clear()
|
|
112
|
+
else:
|
|
113
|
+
key = str(db_path)
|
|
114
|
+
if key in cls._instances:
|
|
115
|
+
del cls._instances[key]
|
|
116
|
+
|
|
117
|
+
def __init__(self, db_path: Path):
|
|
118
|
+
"""Initialize EventBus. Use get_instance() instead of calling directly."""
|
|
119
|
+
self.db_path = Path(db_path)
|
|
120
|
+
|
|
121
|
+
# In-memory event buffer for real-time delivery (thread-safe deque)
|
|
122
|
+
self._buffer: deque = deque(maxlen=EVENT_BUFFER_SIZE)
|
|
123
|
+
self._buffer_lock = threading.Lock()
|
|
124
|
+
|
|
125
|
+
# Event counter (monotonic, reset on restart — DB id is authoritative)
|
|
126
|
+
self._event_counter = 0
|
|
127
|
+
self._counter_lock = threading.Lock()
|
|
128
|
+
|
|
129
|
+
# Listeners: list of callbacks called on every event
|
|
130
|
+
# Signature: callback(event: dict) -> None
|
|
131
|
+
self._listeners: List[Callable[[dict], None]] = []
|
|
132
|
+
self._listeners_lock = threading.Lock()
|
|
133
|
+
|
|
134
|
+
# Initialize schema
|
|
135
|
+
self._init_schema()
|
|
136
|
+
|
|
137
|
+
logger.info("EventBus initialized: db=%s", self.db_path)
|
|
138
|
+
|
|
139
|
+
def _init_schema(self):
|
|
140
|
+
"""Create the memory_events table if it doesn't exist."""
|
|
141
|
+
try:
|
|
142
|
+
from db_connection_manager import DbConnectionManager
|
|
143
|
+
mgr = DbConnectionManager.get_instance(self.db_path)
|
|
144
|
+
|
|
145
|
+
def _create_table(conn):
|
|
146
|
+
cursor = conn.cursor()
|
|
147
|
+
cursor.execute('''
|
|
148
|
+
CREATE TABLE IF NOT EXISTS memory_events (
|
|
149
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
150
|
+
event_type TEXT NOT NULL,
|
|
151
|
+
memory_id INTEGER,
|
|
152
|
+
source_agent TEXT DEFAULT 'user',
|
|
153
|
+
source_protocol TEXT DEFAULT 'internal',
|
|
154
|
+
payload TEXT,
|
|
155
|
+
importance INTEGER DEFAULT 5,
|
|
156
|
+
tier TEXT DEFAULT 'hot',
|
|
157
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
158
|
+
)
|
|
159
|
+
''')
|
|
160
|
+
# Index for efficient querying and pruning
|
|
161
|
+
cursor.execute('''
|
|
162
|
+
CREATE INDEX IF NOT EXISTS idx_events_type
|
|
163
|
+
ON memory_events(event_type)
|
|
164
|
+
''')
|
|
165
|
+
cursor.execute('''
|
|
166
|
+
CREATE INDEX IF NOT EXISTS idx_events_created
|
|
167
|
+
ON memory_events(created_at)
|
|
168
|
+
''')
|
|
169
|
+
cursor.execute('''
|
|
170
|
+
CREATE INDEX IF NOT EXISTS idx_events_tier
|
|
171
|
+
ON memory_events(tier)
|
|
172
|
+
''')
|
|
173
|
+
conn.commit()
|
|
174
|
+
|
|
175
|
+
mgr.execute_write(_create_table)
|
|
176
|
+
except ImportError:
|
|
177
|
+
# Fallback: direct connection
|
|
178
|
+
import sqlite3
|
|
179
|
+
conn = sqlite3.connect(str(self.db_path))
|
|
180
|
+
cursor = conn.cursor()
|
|
181
|
+
cursor.execute('''
|
|
182
|
+
CREATE TABLE IF NOT EXISTS memory_events (
|
|
183
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
184
|
+
event_type TEXT NOT NULL,
|
|
185
|
+
memory_id INTEGER,
|
|
186
|
+
source_agent TEXT DEFAULT 'user',
|
|
187
|
+
source_protocol TEXT DEFAULT 'internal',
|
|
188
|
+
payload TEXT,
|
|
189
|
+
importance INTEGER DEFAULT 5,
|
|
190
|
+
tier TEXT DEFAULT 'hot',
|
|
191
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
192
|
+
)
|
|
193
|
+
''')
|
|
194
|
+
cursor.execute('CREATE INDEX IF NOT EXISTS idx_events_type ON memory_events(event_type)')
|
|
195
|
+
cursor.execute('CREATE INDEX IF NOT EXISTS idx_events_created ON memory_events(created_at)')
|
|
196
|
+
cursor.execute('CREATE INDEX IF NOT EXISTS idx_events_tier ON memory_events(tier)')
|
|
197
|
+
conn.commit()
|
|
198
|
+
conn.close()
|
|
199
|
+
|
|
200
|
+
# =========================================================================
|
|
201
|
+
# Event Emission
|
|
202
|
+
# =========================================================================
|
|
203
|
+
|
|
204
|
+
def emit(
|
|
205
|
+
self,
|
|
206
|
+
event_type: str,
|
|
207
|
+
payload: Optional[Dict[str, Any]] = None,
|
|
208
|
+
memory_id: Optional[int] = None,
|
|
209
|
+
source_agent: str = "user",
|
|
210
|
+
source_protocol: str = "internal",
|
|
211
|
+
importance: int = 5,
|
|
212
|
+
) -> Optional[int]:
|
|
213
|
+
"""
|
|
214
|
+
Emit an event to all subscribers and persist to database.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
event_type: One of VALID_EVENT_TYPES (e.g., "memory.created")
|
|
218
|
+
payload: Event-specific data (dict, serialized to JSON)
|
|
219
|
+
memory_id: Associated memory ID (if applicable)
|
|
220
|
+
source_agent: Agent that triggered the event
|
|
221
|
+
source_protocol: Protocol used (mcp, cli, rest, python, a2a)
|
|
222
|
+
importance: Event importance 1-10 (affects retention)
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
Event ID from database, or None if persistence failed
|
|
226
|
+
|
|
227
|
+
Raises:
|
|
228
|
+
ValueError: If event_type is not in VALID_EVENT_TYPES
|
|
229
|
+
"""
|
|
230
|
+
if event_type not in VALID_EVENT_TYPES:
|
|
231
|
+
raise ValueError(
|
|
232
|
+
f"Invalid event type: {event_type}. "
|
|
233
|
+
f"Valid types: {', '.join(sorted(VALID_EVENT_TYPES))}"
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# Clamp importance
|
|
237
|
+
importance = max(1, min(10, importance))
|
|
238
|
+
|
|
239
|
+
# Build event dict
|
|
240
|
+
now = datetime.now().isoformat()
|
|
241
|
+
with self._counter_lock:
|
|
242
|
+
self._event_counter += 1
|
|
243
|
+
seq = self._event_counter
|
|
244
|
+
|
|
245
|
+
event = {
|
|
246
|
+
"seq": seq,
|
|
247
|
+
"event_type": event_type,
|
|
248
|
+
"memory_id": memory_id,
|
|
249
|
+
"source_agent": source_agent,
|
|
250
|
+
"source_protocol": source_protocol,
|
|
251
|
+
"payload": payload or {},
|
|
252
|
+
"importance": importance,
|
|
253
|
+
"timestamp": now,
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
# 1. Persist to database (non-blocking if it fails)
|
|
257
|
+
event_id = self._persist_event(event)
|
|
258
|
+
if event_id:
|
|
259
|
+
event["id"] = event_id
|
|
260
|
+
|
|
261
|
+
# 2. Add to in-memory buffer
|
|
262
|
+
with self._buffer_lock:
|
|
263
|
+
self._buffer.append(event)
|
|
264
|
+
|
|
265
|
+
# 3. Notify all listeners
|
|
266
|
+
self._notify_listeners(event)
|
|
267
|
+
|
|
268
|
+
logger.debug("Event emitted: type=%s, id=%s, memory_id=%s", event_type, event_id, memory_id)
|
|
269
|
+
return event_id
|
|
270
|
+
|
|
271
|
+
def _persist_event(self, event: dict) -> Optional[int]:
|
|
272
|
+
"""Persist event to memory_events table. Returns event ID or None."""
|
|
273
|
+
try:
|
|
274
|
+
from db_connection_manager import DbConnectionManager
|
|
275
|
+
mgr = DbConnectionManager.get_instance(self.db_path)
|
|
276
|
+
|
|
277
|
+
def _insert(conn):
|
|
278
|
+
cursor = conn.cursor()
|
|
279
|
+
cursor.execute('''
|
|
280
|
+
INSERT INTO memory_events
|
|
281
|
+
(event_type, memory_id, source_agent, source_protocol,
|
|
282
|
+
payload, importance, tier, created_at)
|
|
283
|
+
VALUES (?, ?, ?, ?, ?, ?, 'hot', ?)
|
|
284
|
+
''', (
|
|
285
|
+
event["event_type"],
|
|
286
|
+
event.get("memory_id"),
|
|
287
|
+
event["source_agent"],
|
|
288
|
+
event["source_protocol"],
|
|
289
|
+
json.dumps(event["payload"]),
|
|
290
|
+
event["importance"],
|
|
291
|
+
event["timestamp"],
|
|
292
|
+
))
|
|
293
|
+
conn.commit()
|
|
294
|
+
return cursor.lastrowid
|
|
295
|
+
|
|
296
|
+
return mgr.execute_write(_insert)
|
|
297
|
+
|
|
298
|
+
except Exception as e:
|
|
299
|
+
# Event persistence failure must NEVER break core operations
|
|
300
|
+
logger.error("Failed to persist event: %s", e)
|
|
301
|
+
return None
|
|
302
|
+
|
|
303
|
+
# =========================================================================
|
|
304
|
+
# Listener Management
|
|
305
|
+
# =========================================================================
|
|
306
|
+
|
|
307
|
+
def add_listener(self, callback: Callable[[dict], None]):
|
|
308
|
+
"""
|
|
309
|
+
Register a listener that receives every emitted event.
|
|
310
|
+
|
|
311
|
+
Callbacks run on the emitter's thread — keep them fast and non-blocking.
|
|
312
|
+
For async/heavy work, the callback should enqueue to its own queue.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
callback: Function(event_dict) called on every emit()
|
|
316
|
+
"""
|
|
317
|
+
with self._listeners_lock:
|
|
318
|
+
self._listeners.append(callback)
|
|
319
|
+
|
|
320
|
+
def remove_listener(self, callback: Callable[[dict], None]):
|
|
321
|
+
"""Remove a previously registered listener."""
|
|
322
|
+
with self._listeners_lock:
|
|
323
|
+
try:
|
|
324
|
+
self._listeners.remove(callback)
|
|
325
|
+
except ValueError:
|
|
326
|
+
pass
|
|
327
|
+
|
|
328
|
+
def _notify_listeners(self, event: dict):
|
|
329
|
+
"""Call all registered listeners. Errors are logged, not raised."""
|
|
330
|
+
with self._listeners_lock:
|
|
331
|
+
listeners = list(self._listeners)
|
|
332
|
+
|
|
333
|
+
for listener in listeners:
|
|
334
|
+
try:
|
|
335
|
+
listener(event)
|
|
336
|
+
except Exception as e:
|
|
337
|
+
logger.error("Event listener failed: %s", e)
|
|
338
|
+
|
|
339
|
+
# =========================================================================
|
|
340
|
+
# Event Retrieval (for replay, SSE, polling)
|
|
341
|
+
# =========================================================================
|
|
342
|
+
|
|
343
|
+
def get_recent_events(
|
|
344
|
+
self,
|
|
345
|
+
since_id: Optional[int] = None,
|
|
346
|
+
limit: int = 50,
|
|
347
|
+
event_type: Optional[str] = None,
|
|
348
|
+
) -> List[dict]:
|
|
349
|
+
"""
|
|
350
|
+
Get recent events from the database.
|
|
351
|
+
|
|
352
|
+
Used for:
|
|
353
|
+
- SSE replay on reconnect (client sends Last-Event-ID)
|
|
354
|
+
- Dashboard polling
|
|
355
|
+
- Subscription replay (durable subscribers reconnecting)
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
since_id: Return events with ID greater than this (for replay)
|
|
359
|
+
limit: Maximum events to return (default 50, max 200)
|
|
360
|
+
event_type: Filter by event type (optional)
|
|
361
|
+
|
|
362
|
+
Returns:
|
|
363
|
+
List of event dicts, ordered by ID ascending
|
|
364
|
+
"""
|
|
365
|
+
limit = min(limit, 200)
|
|
366
|
+
|
|
367
|
+
try:
|
|
368
|
+
from db_connection_manager import DbConnectionManager
|
|
369
|
+
mgr = DbConnectionManager.get_instance(self.db_path)
|
|
370
|
+
|
|
371
|
+
with mgr.read_connection() as conn:
|
|
372
|
+
cursor = conn.cursor()
|
|
373
|
+
|
|
374
|
+
query = "SELECT id, event_type, memory_id, source_agent, source_protocol, payload, importance, tier, created_at FROM memory_events WHERE 1=1"
|
|
375
|
+
params = []
|
|
376
|
+
|
|
377
|
+
if since_id is not None:
|
|
378
|
+
query += " AND id > ?"
|
|
379
|
+
params.append(since_id)
|
|
380
|
+
|
|
381
|
+
if event_type:
|
|
382
|
+
query += " AND event_type = ?"
|
|
383
|
+
params.append(event_type)
|
|
384
|
+
|
|
385
|
+
query += " ORDER BY id ASC LIMIT ?"
|
|
386
|
+
params.append(limit)
|
|
387
|
+
|
|
388
|
+
cursor.execute(query, params)
|
|
389
|
+
rows = cursor.fetchall()
|
|
390
|
+
|
|
391
|
+
events = []
|
|
392
|
+
for row in rows:
|
|
393
|
+
payload = row[5]
|
|
394
|
+
try:
|
|
395
|
+
payload = json.loads(payload) if payload else {}
|
|
396
|
+
except (json.JSONDecodeError, TypeError):
|
|
397
|
+
payload = {}
|
|
398
|
+
|
|
399
|
+
events.append({
|
|
400
|
+
"id": row[0],
|
|
401
|
+
"event_type": row[1],
|
|
402
|
+
"memory_id": row[2],
|
|
403
|
+
"source_agent": row[3],
|
|
404
|
+
"source_protocol": row[4],
|
|
405
|
+
"payload": payload,
|
|
406
|
+
"importance": row[6],
|
|
407
|
+
"tier": row[7],
|
|
408
|
+
"timestamp": row[8],
|
|
409
|
+
})
|
|
410
|
+
|
|
411
|
+
return events
|
|
412
|
+
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.error("Failed to get recent events: %s", e)
|
|
415
|
+
return []
|
|
416
|
+
|
|
417
|
+
def get_buffered_events(self, since_seq: int = 0) -> List[dict]:
|
|
418
|
+
"""
|
|
419
|
+
Get events from the in-memory buffer (fast, no DB hit).
|
|
420
|
+
|
|
421
|
+
Used for real-time SSE/WebSocket delivery.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
since_seq: Return events with seq > this value
|
|
425
|
+
|
|
426
|
+
Returns:
|
|
427
|
+
List of event dicts from the buffer
|
|
428
|
+
"""
|
|
429
|
+
with self._buffer_lock:
|
|
430
|
+
return [e for e in self._buffer if e.get("seq", 0) > since_seq]
|
|
431
|
+
|
|
432
|
+
def get_event_stats(self) -> dict:
|
|
433
|
+
"""Get event system statistics."""
|
|
434
|
+
try:
|
|
435
|
+
from db_connection_manager import DbConnectionManager
|
|
436
|
+
mgr = DbConnectionManager.get_instance(self.db_path)
|
|
437
|
+
|
|
438
|
+
with mgr.read_connection() as conn:
|
|
439
|
+
cursor = conn.cursor()
|
|
440
|
+
|
|
441
|
+
cursor.execute("SELECT COUNT(*) FROM memory_events")
|
|
442
|
+
total = cursor.fetchone()[0]
|
|
443
|
+
|
|
444
|
+
cursor.execute("""
|
|
445
|
+
SELECT event_type, COUNT(*) as count
|
|
446
|
+
FROM memory_events
|
|
447
|
+
GROUP BY event_type
|
|
448
|
+
ORDER BY count DESC
|
|
449
|
+
""")
|
|
450
|
+
by_type = dict(cursor.fetchall())
|
|
451
|
+
|
|
452
|
+
cursor.execute("""
|
|
453
|
+
SELECT tier, COUNT(*) as count
|
|
454
|
+
FROM memory_events
|
|
455
|
+
GROUP BY tier
|
|
456
|
+
""")
|
|
457
|
+
by_tier = dict(cursor.fetchall())
|
|
458
|
+
|
|
459
|
+
cursor.execute("""
|
|
460
|
+
SELECT COUNT(*) FROM memory_events
|
|
461
|
+
WHERE created_at >= datetime('now', '-24 hours')
|
|
462
|
+
""")
|
|
463
|
+
last_24h = cursor.fetchone()[0]
|
|
464
|
+
|
|
465
|
+
return {
|
|
466
|
+
"total_events": total,
|
|
467
|
+
"events_last_24h": last_24h,
|
|
468
|
+
"by_type": by_type,
|
|
469
|
+
"by_tier": by_tier,
|
|
470
|
+
"buffer_size": len(self._buffer),
|
|
471
|
+
"listener_count": len(self._listeners),
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
except Exception as e:
|
|
475
|
+
logger.error("Failed to get event stats: %s", e)
|
|
476
|
+
return {"total_events": 0, "error": str(e)}
|
|
477
|
+
|
|
478
|
+
# =========================================================================
|
|
479
|
+
# Tiered Retention (pruning)
|
|
480
|
+
# =========================================================================
|
|
481
|
+
|
|
482
|
+
def prune_events(
|
|
483
|
+
self,
|
|
484
|
+
hot_hours: int = DEFAULT_HOT_HOURS,
|
|
485
|
+
warm_hours: int = DEFAULT_WARM_HOURS,
|
|
486
|
+
cold_hours: int = DEFAULT_COLD_HOURS,
|
|
487
|
+
) -> dict:
|
|
488
|
+
"""
|
|
489
|
+
Apply tiered retention policy to events.
|
|
490
|
+
|
|
491
|
+
Tiers:
|
|
492
|
+
Hot (0-48h) — Keep all events
|
|
493
|
+
Warm (2-14d) — Keep events with importance >= 5
|
|
494
|
+
Cold (14-30d) — Keep daily aggregates only
|
|
495
|
+
Archive (30d+) — Delete (stats preserved in pattern_learner)
|
|
496
|
+
|
|
497
|
+
Args:
|
|
498
|
+
hot_hours: Hours to keep all events (default 48)
|
|
499
|
+
warm_hours: Hours before warm→cold transition (default 336 / 14 days)
|
|
500
|
+
cold_hours: Hours before cold→archive (default 720 / 30 days)
|
|
501
|
+
|
|
502
|
+
Returns:
|
|
503
|
+
Dict with counts of events transitioned/pruned per tier
|
|
504
|
+
"""
|
|
505
|
+
try:
|
|
506
|
+
from db_connection_manager import DbConnectionManager
|
|
507
|
+
mgr = DbConnectionManager.get_instance(self.db_path)
|
|
508
|
+
|
|
509
|
+
stats = {"hot_to_warm": 0, "warm_to_cold": 0, "archived": 0}
|
|
510
|
+
|
|
511
|
+
def _do_prune(conn):
|
|
512
|
+
cursor = conn.cursor()
|
|
513
|
+
now = datetime.now()
|
|
514
|
+
|
|
515
|
+
# Hot → Warm: events older than hot_hours with importance < 5
|
|
516
|
+
warm_cutoff = (now - timedelta(hours=hot_hours)).isoformat()
|
|
517
|
+
cursor.execute("""
|
|
518
|
+
UPDATE memory_events
|
|
519
|
+
SET tier = 'warm'
|
|
520
|
+
WHERE tier = 'hot'
|
|
521
|
+
AND created_at < ?
|
|
522
|
+
AND importance < 5
|
|
523
|
+
""", (warm_cutoff,))
|
|
524
|
+
stats["hot_to_warm"] = cursor.rowcount
|
|
525
|
+
|
|
526
|
+
# Warm → Cold: events older than warm_hours
|
|
527
|
+
cold_cutoff = (now - timedelta(hours=warm_hours)).isoformat()
|
|
528
|
+
cursor.execute("""
|
|
529
|
+
DELETE FROM memory_events
|
|
530
|
+
WHERE tier = 'warm'
|
|
531
|
+
AND created_at < ?
|
|
532
|
+
""", (cold_cutoff,))
|
|
533
|
+
stats["warm_to_cold"] = cursor.rowcount
|
|
534
|
+
|
|
535
|
+
# Archive: delete events older than cold_hours
|
|
536
|
+
archive_cutoff = (now - timedelta(hours=cold_hours)).isoformat()
|
|
537
|
+
cursor.execute("""
|
|
538
|
+
DELETE FROM memory_events
|
|
539
|
+
WHERE created_at < ?
|
|
540
|
+
""", (archive_cutoff,))
|
|
541
|
+
stats["archived"] = cursor.rowcount
|
|
542
|
+
|
|
543
|
+
conn.commit()
|
|
544
|
+
|
|
545
|
+
mgr.execute_write(_do_prune)
|
|
546
|
+
|
|
547
|
+
logger.info(
|
|
548
|
+
"Event pruning complete: hot→warm=%d, warm→cold=%d, archived=%d",
|
|
549
|
+
stats["hot_to_warm"], stats["warm_to_cold"], stats["archived"]
|
|
550
|
+
)
|
|
551
|
+
return stats
|
|
552
|
+
|
|
553
|
+
except Exception as e:
|
|
554
|
+
logger.error("Event pruning failed: %s", e)
|
|
555
|
+
return {"error": str(e)}
|