superlocalmemory 2.4.2 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,456 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Trust Scorer
4
+ Copyright (c) 2026 Varun Pratap Bhardwaj
5
+ Licensed under MIT License
6
+
7
+ Repository: https://github.com/varun369/SuperLocalMemoryV2
8
+ Author: Varun Pratap Bhardwaj (Solution Architect)
9
+
10
+ NOTICE: This software is protected by MIT License.
11
+ Attribution must be preserved in all copies or derivatives.
12
+ """
13
+
14
+ """
15
+ TrustScorer — Silent trust signal collection for AI agents.
16
+
17
+ v2.5 BEHAVIOR (this version):
18
+ - All agents start at trust 1.0
19
+ - Signals are collected silently (no enforcement, no ranking, no blocking)
20
+ - Trust scores are updated in agent_registry.trust_score
21
+ - Dashboard shows scores but they don't affect recall ordering yet
22
+
23
+ v2.6 BEHAVIOR (future):
24
+ - Trust scores visible in dashboard
25
+ - Recall results ranked by trust (higher trust = higher in results)
26
+
27
+ v3.0 BEHAVIOR (future):
28
+ - Active enforcement: quarantine low-trust memories, rate limiting
29
+ - Admin approval for untrusted agents
30
+
31
+ Trust Signals (all silently collected):
32
+ POSITIVE (increase trust):
33
+ - Memory recalled by other agents (cross-agent validation)
34
+ - Memory updated (shows ongoing relevance)
35
+ - High importance memories (agent writes valuable content)
36
+ - Consistent write patterns (not spam-like)
37
+
38
+ NEGATIVE (decrease trust):
39
+ - Memory deleted shortly after creation (low quality)
40
+ - Very high write volume in short time (potential spam/poisoning)
41
+ - Content flagged or overwritten by user
42
+
43
+ NEUTRAL:
44
+ - Normal read/write patterns
45
+ - Agent disconnects/reconnects
46
+
47
+ Scoring Algorithm:
48
+ Bayesian-inspired moving average. Each signal adjusts the score
49
+ by a small delta. Score is clamped to [0.0, 1.0].
50
+
51
+ new_score = old_score + (delta * decay_factor)
52
+ decay_factor = 1 / (1 + signal_count * 0.01) # Stabilizes over time
53
+
54
+ This means early signals have more impact, and the score converges
55
+ as more data is collected. Similar to MACLA Beta-Binomial approach
56
+ (arXiv:2512.18950) but simplified for local computation.
57
+
58
+ Security (OWASP for Agentic AI):
59
+ - Memory poisoning (#1 threat): Trust scoring is the first defense layer
60
+ - Over-permissioning: Trust scores inform future access control (v3.0)
61
+ - Agent impersonation: Agent ID + protocol tracking detects anomalies
62
+ """
63
+
64
+ import json
65
+ import logging
66
+ import math
67
+ import threading
68
+ from datetime import datetime, timedelta
69
+ from pathlib import Path
70
+ from typing import Optional, Dict, List
71
+
72
+ logger = logging.getLogger("superlocalmemory.trust")
73
+
74
+ # Signal deltas (how much each signal moves the trust score)
75
+ SIGNAL_DELTAS = {
76
+ # Positive signals
77
+ "memory_recalled_by_others": +0.02,
78
+ "memory_updated": +0.01,
79
+ "high_importance_write": +0.015, # importance >= 7
80
+ "consistent_pattern": +0.01,
81
+
82
+ # Negative signals
83
+ "quick_delete": -0.03, # deleted within 1 hour of creation
84
+ "high_volume_burst": -0.02, # >20 writes in 5 minutes
85
+ "content_overwritten_by_user": -0.01,
86
+
87
+ # Neutral (logged but no score change)
88
+ "normal_write": 0.0,
89
+ "normal_recall": 0.0,
90
+ }
91
+
92
+ # Thresholds
93
+ QUICK_DELETE_HOURS = 1 # Delete within 1 hour = negative signal
94
+ BURST_THRESHOLD = 20 # >20 writes in burst window = negative
95
+ BURST_WINDOW_MINUTES = 5 # Burst detection window
96
+
97
+
98
+ class TrustScorer:
99
+ """
100
+ Silent trust signal collector for AI agents.
101
+
102
+ v2.5: Collection only, no enforcement. All agents start at 1.0.
103
+ Thread-safe singleton per database path.
104
+ """
105
+
106
+ _instances: Dict[str, "TrustScorer"] = {}
107
+ _instances_lock = threading.Lock()
108
+
109
+ @classmethod
110
+ def get_instance(cls, db_path: Optional[Path] = None) -> "TrustScorer":
111
+ """Get or create the singleton TrustScorer."""
112
+ if db_path is None:
113
+ db_path = Path.home() / ".claude-memory" / "memory.db"
114
+ key = str(db_path)
115
+ with cls._instances_lock:
116
+ if key not in cls._instances:
117
+ cls._instances[key] = cls(db_path)
118
+ return cls._instances[key]
119
+
120
+ @classmethod
121
+ def reset_instance(cls, db_path: Optional[Path] = None):
122
+ """Remove singleton. Used for testing."""
123
+ with cls._instances_lock:
124
+ if db_path is None:
125
+ cls._instances.clear()
126
+ else:
127
+ key = str(db_path)
128
+ if key in cls._instances:
129
+ del cls._instances[key]
130
+
131
+ def __init__(self, db_path: Path):
132
+ self.db_path = Path(db_path)
133
+
134
+ # In-memory signal log for burst detection (agent_id -> list of timestamps)
135
+ self._write_timestamps: Dict[str, list] = {}
136
+ self._timestamps_lock = threading.Lock()
137
+
138
+ # Signal count per agent (for decay factor calculation)
139
+ self._signal_counts: Dict[str, int] = {}
140
+
141
+ self._init_schema()
142
+ logger.info("TrustScorer initialized (v2.5 — silent collection, no enforcement)")
143
+
144
+ def _init_schema(self):
145
+ """Create trust_signals table for audit trail."""
146
+ try:
147
+ from db_connection_manager import DbConnectionManager
148
+ mgr = DbConnectionManager.get_instance(self.db_path)
149
+
150
+ def _create(conn):
151
+ conn.execute('''
152
+ CREATE TABLE IF NOT EXISTS trust_signals (
153
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
154
+ agent_id TEXT NOT NULL,
155
+ signal_type TEXT NOT NULL,
156
+ delta REAL NOT NULL,
157
+ old_score REAL,
158
+ new_score REAL,
159
+ context TEXT DEFAULT '{}',
160
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
161
+ )
162
+ ''')
163
+ conn.execute('''
164
+ CREATE INDEX IF NOT EXISTS idx_trust_agent
165
+ ON trust_signals(agent_id)
166
+ ''')
167
+ conn.execute('''
168
+ CREATE INDEX IF NOT EXISTS idx_trust_created
169
+ ON trust_signals(created_at)
170
+ ''')
171
+ conn.commit()
172
+
173
+ mgr.execute_write(_create)
174
+ except ImportError:
175
+ import sqlite3
176
+ conn = sqlite3.connect(str(self.db_path))
177
+ conn.execute('''
178
+ CREATE TABLE IF NOT EXISTS trust_signals (
179
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
180
+ agent_id TEXT NOT NULL,
181
+ signal_type TEXT NOT NULL,
182
+ delta REAL NOT NULL,
183
+ old_score REAL,
184
+ new_score REAL,
185
+ context TEXT DEFAULT '{}',
186
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
187
+ )
188
+ ''')
189
+ conn.execute('CREATE INDEX IF NOT EXISTS idx_trust_agent ON trust_signals(agent_id)')
190
+ conn.execute('CREATE INDEX IF NOT EXISTS idx_trust_created ON trust_signals(created_at)')
191
+ conn.commit()
192
+ conn.close()
193
+
194
+ # =========================================================================
195
+ # Signal Recording
196
+ # =========================================================================
197
+
198
+ def record_signal(
199
+ self,
200
+ agent_id: str,
201
+ signal_type: str,
202
+ context: Optional[dict] = None,
203
+ ):
204
+ """
205
+ Record a trust signal for an agent.
206
+
207
+ Silently adjusts the agent's trust score based on the signal type.
208
+ The signal and score change are logged to trust_signals table.
209
+
210
+ Args:
211
+ agent_id: Agent that generated the signal
212
+ signal_type: One of SIGNAL_DELTAS keys
213
+ context: Additional context (memory_id, etc.)
214
+ """
215
+ if signal_type not in SIGNAL_DELTAS:
216
+ logger.warning("Unknown trust signal: %s", signal_type)
217
+ return
218
+
219
+ delta = SIGNAL_DELTAS[signal_type]
220
+
221
+ # Get current trust score from agent registry
222
+ old_score = self._get_agent_trust(agent_id)
223
+ if old_score is None:
224
+ old_score = 1.0 # Default for unknown agents
225
+
226
+ # Apply decay factor (score stabilizes over time)
227
+ count = self._signal_counts.get(agent_id, 0)
228
+ decay = 1.0 / (1.0 + count * 0.01)
229
+ adjusted_delta = delta * decay
230
+
231
+ # Calculate new score (clamped to [0.0, 1.0])
232
+ new_score = max(0.0, min(1.0, old_score + adjusted_delta))
233
+
234
+ # Update signal count
235
+ self._signal_counts[agent_id] = count + 1
236
+
237
+ # Persist signal to audit trail
238
+ self._persist_signal(agent_id, signal_type, adjusted_delta, old_score, new_score, context)
239
+
240
+ # Update agent trust score (if score actually changed)
241
+ if abs(new_score - old_score) > 0.0001:
242
+ self._update_agent_trust(agent_id, new_score)
243
+
244
+ logger.debug(
245
+ "Trust signal: agent=%s, type=%s, delta=%.4f, score=%.4f→%.4f",
246
+ agent_id, signal_type, adjusted_delta, old_score, new_score
247
+ )
248
+
249
+ def _persist_signal(self, agent_id, signal_type, delta, old_score, new_score, context):
250
+ """Save signal to trust_signals table."""
251
+ try:
252
+ from db_connection_manager import DbConnectionManager
253
+ mgr = DbConnectionManager.get_instance(self.db_path)
254
+
255
+ def _insert(conn):
256
+ conn.execute('''
257
+ INSERT INTO trust_signals (agent_id, signal_type, delta, old_score, new_score, context)
258
+ VALUES (?, ?, ?, ?, ?, ?)
259
+ ''', (agent_id, signal_type, delta, old_score, new_score, json.dumps(context or {})))
260
+ conn.commit()
261
+
262
+ mgr.execute_write(_insert)
263
+ except Exception as e:
264
+ logger.error("Failed to persist trust signal: %s", e)
265
+
266
+ def _get_agent_trust(self, agent_id: str) -> Optional[float]:
267
+ """Get current trust score from agent_registry."""
268
+ try:
269
+ from db_connection_manager import DbConnectionManager
270
+ mgr = DbConnectionManager.get_instance(self.db_path)
271
+
272
+ with mgr.read_connection() as conn:
273
+ cursor = conn.cursor()
274
+ cursor.execute(
275
+ "SELECT trust_score FROM agent_registry WHERE agent_id = ?",
276
+ (agent_id,)
277
+ )
278
+ row = cursor.fetchone()
279
+ return row[0] if row else None
280
+ except Exception:
281
+ return None
282
+
283
+ def _update_agent_trust(self, agent_id: str, new_score: float):
284
+ """Update trust score in agent_registry."""
285
+ try:
286
+ from db_connection_manager import DbConnectionManager
287
+ mgr = DbConnectionManager.get_instance(self.db_path)
288
+
289
+ def _update(conn):
290
+ conn.execute(
291
+ "UPDATE agent_registry SET trust_score = ? WHERE agent_id = ?",
292
+ (round(new_score, 4), agent_id)
293
+ )
294
+ conn.commit()
295
+
296
+ mgr.execute_write(_update)
297
+ except Exception as e:
298
+ logger.error("Failed to update agent trust: %s", e)
299
+
300
+ # =========================================================================
301
+ # High-Level Signal Helpers (called from memory_store_v2 / mcp_server)
302
+ # =========================================================================
303
+
304
+ def on_memory_created(self, agent_id: str, memory_id: int, importance: int = 5):
305
+ """Record signals when a memory is created."""
306
+ # Track write timestamp for burst detection
307
+ self._track_write(agent_id)
308
+
309
+ if importance >= 7:
310
+ self.record_signal(agent_id, "high_importance_write",
311
+ context={"memory_id": memory_id, "importance": importance})
312
+ else:
313
+ self.record_signal(agent_id, "normal_write",
314
+ context={"memory_id": memory_id})
315
+
316
+ # Check for burst pattern
317
+ if self._is_burst(agent_id):
318
+ self.record_signal(agent_id, "high_volume_burst",
319
+ context={"memory_id": memory_id})
320
+
321
+ def on_memory_deleted(self, agent_id: str, memory_id: int, created_at: Optional[str] = None):
322
+ """Record signals when a memory is deleted."""
323
+ if created_at:
324
+ try:
325
+ created = datetime.fromisoformat(created_at)
326
+ age_hours = (datetime.now() - created).total_seconds() / 3600
327
+ if age_hours < QUICK_DELETE_HOURS:
328
+ self.record_signal(agent_id, "quick_delete",
329
+ context={"memory_id": memory_id, "age_hours": round(age_hours, 2)})
330
+ return
331
+ except (ValueError, TypeError):
332
+ pass
333
+
334
+ # Normal delete (no negative signal)
335
+ self.record_signal(agent_id, "normal_write",
336
+ context={"memory_id": memory_id, "action": "delete"})
337
+
338
+ def on_memory_recalled(self, agent_id: str, memory_id: int, created_by: Optional[str] = None):
339
+ """Record signals when a memory is recalled."""
340
+ if created_by and created_by != agent_id:
341
+ # Cross-agent validation: another agent found this memory useful
342
+ self.record_signal(created_by, "memory_recalled_by_others",
343
+ context={"memory_id": memory_id, "recalled_by": agent_id})
344
+
345
+ self.record_signal(agent_id, "normal_recall",
346
+ context={"memory_id": memory_id})
347
+
348
+ # =========================================================================
349
+ # Burst Detection
350
+ # =========================================================================
351
+
352
+ def _track_write(self, agent_id: str):
353
+ """Track a write timestamp for burst detection."""
354
+ now = datetime.now()
355
+ with self._timestamps_lock:
356
+ if agent_id not in self._write_timestamps:
357
+ self._write_timestamps[agent_id] = []
358
+ timestamps = self._write_timestamps[agent_id]
359
+ timestamps.append(now)
360
+ # Keep only recent timestamps (within burst window)
361
+ cutoff = now - timedelta(minutes=BURST_WINDOW_MINUTES)
362
+ self._write_timestamps[agent_id] = [t for t in timestamps if t > cutoff]
363
+
364
+ def _is_burst(self, agent_id: str) -> bool:
365
+ """Check if agent is in a burst write pattern."""
366
+ with self._timestamps_lock:
367
+ timestamps = self._write_timestamps.get(agent_id, [])
368
+ return len(timestamps) > BURST_THRESHOLD
369
+
370
+ # =========================================================================
371
+ # Query Trust Data
372
+ # =========================================================================
373
+
374
+ def get_trust_score(self, agent_id: str) -> float:
375
+ """Get current trust score for an agent. Returns 1.0 if unknown."""
376
+ score = self._get_agent_trust(agent_id)
377
+ return score if score is not None else 1.0
378
+
379
+ def get_signals(self, agent_id: str, limit: int = 50) -> List[dict]:
380
+ """Get recent trust signals for an agent."""
381
+ try:
382
+ from db_connection_manager import DbConnectionManager
383
+ mgr = DbConnectionManager.get_instance(self.db_path)
384
+
385
+ with mgr.read_connection() as conn:
386
+ cursor = conn.cursor()
387
+ cursor.execute("""
388
+ SELECT signal_type, delta, old_score, new_score, context, created_at
389
+ FROM trust_signals
390
+ WHERE agent_id = ?
391
+ ORDER BY created_at DESC
392
+ LIMIT ?
393
+ """, (agent_id, limit))
394
+
395
+ signals = []
396
+ for row in cursor.fetchall():
397
+ ctx = {}
398
+ try:
399
+ ctx = json.loads(row[4]) if row[4] else {}
400
+ except (json.JSONDecodeError, TypeError):
401
+ pass
402
+ signals.append({
403
+ "signal_type": row[0],
404
+ "delta": row[1],
405
+ "old_score": row[2],
406
+ "new_score": row[3],
407
+ "context": ctx,
408
+ "created_at": row[5],
409
+ })
410
+ return signals
411
+
412
+ except Exception as e:
413
+ logger.error("Failed to get trust signals: %s", e)
414
+ return []
415
+
416
+ def get_trust_stats(self) -> dict:
417
+ """Get trust system statistics."""
418
+ try:
419
+ from db_connection_manager import DbConnectionManager
420
+ mgr = DbConnectionManager.get_instance(self.db_path)
421
+
422
+ with mgr.read_connection() as conn:
423
+ cursor = conn.cursor()
424
+
425
+ cursor.execute("SELECT COUNT(*) FROM trust_signals")
426
+ total_signals = cursor.fetchone()[0]
427
+
428
+ cursor.execute("""
429
+ SELECT signal_type, COUNT(*) FROM trust_signals
430
+ GROUP BY signal_type ORDER BY COUNT(*) DESC
431
+ """)
432
+ by_type = dict(cursor.fetchall())
433
+
434
+ cursor.execute("""
435
+ SELECT agent_id, COUNT(*) FROM trust_signals
436
+ GROUP BY agent_id ORDER BY COUNT(*) DESC LIMIT 10
437
+ """)
438
+ by_agent = dict(cursor.fetchall())
439
+
440
+ cursor.execute("""
441
+ SELECT AVG(trust_score) FROM agent_registry
442
+ WHERE trust_score IS NOT NULL
443
+ """)
444
+ avg = cursor.fetchone()[0]
445
+
446
+ return {
447
+ "total_signals": total_signals,
448
+ "by_signal_type": by_type,
449
+ "by_agent": by_agent,
450
+ "avg_trust_score": round(avg, 4) if avg else 1.0,
451
+ "enforcement": "disabled (v2.5 — silent collection only)",
452
+ }
453
+
454
+ except Exception as e:
455
+ logger.error("Failed to get trust stats: %s", e)
456
+ return {"total_signals": 0, "error": str(e)}
@@ -0,0 +1,229 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Webhook Dispatcher
4
+ Copyright (c) 2026 Varun Pratap Bhardwaj
5
+ Licensed under MIT License
6
+
7
+ Repository: https://github.com/varun369/SuperLocalMemoryV2
8
+ Author: Varun Pratap Bhardwaj (Solution Architect)
9
+
10
+ NOTICE: This software is protected by MIT License.
11
+ Attribution must be preserved in all copies or derivatives.
12
+ """
13
+
14
+ """
15
+ WebhookDispatcher — Delivers events via HTTP POST to configured webhook URLs.
16
+
17
+ Runs on a background thread so webhook delivery never blocks the main event flow.
18
+ Retries failed deliveries with exponential backoff (3 attempts).
19
+
20
+ Security:
21
+ - Only allows http:// and https:// URLs
22
+ - Validates URL format before dispatch
23
+ - 10-second timeout per request
24
+ - No private/internal IP blocking in v2.5 (added in v2.6 with trust enforcement)
25
+ """
26
+
27
+ import json
28
+ import logging
29
+ import threading
30
+ import time
31
+ from queue import Queue, Empty
32
+ from typing import Optional, Dict
33
+ from datetime import datetime
34
+
35
+ logger = logging.getLogger("superlocalmemory.webhooks")
36
+
37
+ # Configuration
38
+ MAX_RETRIES = 3
39
+ RETRY_BACKOFF_BASE = 2 # seconds: 2, 4, 8
40
+ REQUEST_TIMEOUT = 10 # seconds
41
+ MAX_QUEUE_SIZE = 1000
42
+
43
+ # Optional: urllib3/requests for HTTP POST
44
+ try:
45
+ from urllib.request import Request, urlopen
46
+ from urllib.error import URLError, HTTPError
47
+ HTTP_AVAILABLE = True
48
+ except ImportError:
49
+ HTTP_AVAILABLE = False
50
+
51
+
52
+ class WebhookDispatcher:
53
+ """
54
+ Background webhook delivery with retry logic.
55
+
56
+ Thread-safe. Enqueues webhook deliveries and processes them on a
57
+ dedicated background thread. Failed deliveries are retried with
58
+ exponential backoff.
59
+ """
60
+
61
+ _instances: Dict[str, "WebhookDispatcher"] = {}
62
+ _instances_lock = threading.Lock()
63
+
64
+ @classmethod
65
+ def get_instance(cls, name: str = "default") -> "WebhookDispatcher":
66
+ """Get or create a singleton WebhookDispatcher."""
67
+ with cls._instances_lock:
68
+ if name not in cls._instances:
69
+ cls._instances[name] = cls()
70
+ return cls._instances[name]
71
+
72
+ @classmethod
73
+ def reset_instance(cls, name: Optional[str] = None):
74
+ """Remove singleton(s). Used for testing."""
75
+ with cls._instances_lock:
76
+ if name is None:
77
+ for inst in cls._instances.values():
78
+ inst.close()
79
+ cls._instances.clear()
80
+ elif name in cls._instances:
81
+ cls._instances[name].close()
82
+ del cls._instances[name]
83
+
84
+ def __init__(self):
85
+ self._queue: Queue = Queue(maxsize=MAX_QUEUE_SIZE)
86
+ self._closed = False
87
+ self._stats = {
88
+ "dispatched": 0,
89
+ "succeeded": 0,
90
+ "failed": 0,
91
+ "retries": 0,
92
+ }
93
+ self._stats_lock = threading.Lock()
94
+
95
+ # Background worker thread
96
+ self._worker = threading.Thread(
97
+ target=self._worker_loop,
98
+ name="slm-webhook-worker",
99
+ daemon=True,
100
+ )
101
+ self._worker.start()
102
+ logger.info("WebhookDispatcher started")
103
+
104
+ def dispatch(self, event: dict, webhook_url: str):
105
+ """
106
+ Enqueue a webhook delivery.
107
+
108
+ Args:
109
+ event: Event dict to send as JSON POST body
110
+ webhook_url: URL to POST to
111
+
112
+ Raises:
113
+ ValueError: If webhook_url is invalid
114
+ RuntimeError: If dispatcher is closed
115
+ """
116
+ if self._closed:
117
+ raise RuntimeError("WebhookDispatcher is closed")
118
+
119
+ if not webhook_url or not (webhook_url.startswith("http://") or webhook_url.startswith("https://")):
120
+ raise ValueError(f"Invalid webhook URL: {webhook_url}")
121
+
122
+ try:
123
+ self._queue.put_nowait({
124
+ "event": event,
125
+ "url": webhook_url,
126
+ "attempt": 0,
127
+ "enqueued_at": datetime.now().isoformat(),
128
+ })
129
+ with self._stats_lock:
130
+ self._stats["dispatched"] += 1
131
+ except Exception:
132
+ logger.warning("Webhook queue full, dropping event for %s", webhook_url)
133
+
134
+ def _worker_loop(self):
135
+ """Background worker: processes webhook deliveries sequentially."""
136
+ while not self._closed:
137
+ try:
138
+ item = self._queue.get(timeout=1.0)
139
+ except Empty:
140
+ continue
141
+
142
+ if item is None: # Shutdown sentinel
143
+ self._queue.task_done()
144
+ break
145
+
146
+ self._deliver(item)
147
+ self._queue.task_done()
148
+
149
+ def _deliver(self, item: dict):
150
+ """Attempt to deliver a webhook. Retry on failure."""
151
+ event = item["event"]
152
+ url = item["url"]
153
+ attempt = item["attempt"]
154
+
155
+ if not HTTP_AVAILABLE:
156
+ logger.error("HTTP library not available, cannot deliver webhook to %s", url)
157
+ with self._stats_lock:
158
+ self._stats["failed"] += 1
159
+ return
160
+
161
+ try:
162
+ payload = json.dumps({
163
+ "event": event,
164
+ "delivered_at": datetime.now().isoformat(),
165
+ "attempt": attempt + 1,
166
+ "source": "superlocalmemory",
167
+ "version": "2.5.0",
168
+ }).encode("utf-8")
169
+
170
+ req = Request(
171
+ url,
172
+ data=payload,
173
+ headers={
174
+ "Content-Type": "application/json",
175
+ "User-Agent": "SuperLocalMemory/2.5.0",
176
+ "X-SLM-Event-Type": event.get("event_type", "unknown"),
177
+ },
178
+ method="POST",
179
+ )
180
+
181
+ with urlopen(req, timeout=REQUEST_TIMEOUT) as resp:
182
+ status = resp.status
183
+ if 200 <= status < 300:
184
+ with self._stats_lock:
185
+ self._stats["succeeded"] += 1
186
+ logger.debug("Webhook delivered: url=%s, status=%d", url, status)
187
+ return
188
+ else:
189
+ raise HTTPError(url, status, f"HTTP {status}", {}, None)
190
+
191
+ except Exception as e:
192
+ logger.warning("Webhook delivery failed (attempt %d/%d): url=%s, error=%s",
193
+ attempt + 1, MAX_RETRIES, url, e)
194
+
195
+ if attempt + 1 < MAX_RETRIES:
196
+ # Retry with exponential backoff
197
+ backoff = RETRY_BACKOFF_BASE ** (attempt + 1)
198
+ time.sleep(backoff)
199
+ with self._stats_lock:
200
+ self._stats["retries"] += 1
201
+ item["attempt"] = attempt + 1
202
+ self._deliver(item) # Recursive retry
203
+ else:
204
+ with self._stats_lock:
205
+ self._stats["failed"] += 1
206
+ logger.error("Webhook permanently failed after %d attempts: url=%s", MAX_RETRIES, url)
207
+
208
+ def get_stats(self) -> dict:
209
+ """Get webhook delivery statistics."""
210
+ with self._stats_lock:
211
+ return dict(self._stats)
212
+
213
+ def close(self):
214
+ """Shut down the dispatcher. Drains remaining items."""
215
+ if self._closed:
216
+ return
217
+ self._closed = True
218
+ self._queue.put(None) # Shutdown sentinel
219
+ if self._worker.is_alive():
220
+ self._worker.join(timeout=5)
221
+ logger.info("WebhookDispatcher closed: stats=%s", self._stats)
222
+
223
+ @property
224
+ def is_closed(self) -> bool:
225
+ return self._closed
226
+
227
+ @property
228
+ def queue_size(self) -> int:
229
+ return self._queue.qsize()