superlocalmemory 2.6.0 → 2.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +167 -1803
  2. package/README.md +212 -397
  3. package/bin/slm +179 -3
  4. package/bin/superlocalmemoryv2:learning +4 -0
  5. package/bin/superlocalmemoryv2:patterns +4 -0
  6. package/docs/ACCESSIBILITY.md +291 -0
  7. package/docs/ARCHITECTURE.md +12 -6
  8. package/docs/FRAMEWORK-INTEGRATIONS.md +300 -0
  9. package/docs/MCP-MANUAL-SETUP.md +14 -4
  10. package/install.sh +99 -3
  11. package/mcp_server.py +291 -1
  12. package/package.json +2 -1
  13. package/requirements-learning.txt +12 -0
  14. package/scripts/verify-v27.sh +233 -0
  15. package/skills/slm-show-patterns/SKILL.md +224 -0
  16. package/src/learning/__init__.py +201 -0
  17. package/src/learning/adaptive_ranker.py +826 -0
  18. package/src/learning/cross_project_aggregator.py +866 -0
  19. package/src/learning/engagement_tracker.py +638 -0
  20. package/src/learning/feature_extractor.py +461 -0
  21. package/src/learning/feedback_collector.py +690 -0
  22. package/src/learning/learning_db.py +842 -0
  23. package/src/learning/project_context_manager.py +582 -0
  24. package/src/learning/source_quality_scorer.py +685 -0
  25. package/src/learning/synthetic_bootstrap.py +1047 -0
  26. package/src/learning/tests/__init__.py +0 -0
  27. package/src/learning/tests/test_adaptive_ranker.py +328 -0
  28. package/src/learning/tests/test_aggregator.py +309 -0
  29. package/src/learning/tests/test_feedback_collector.py +295 -0
  30. package/src/learning/tests/test_learning_db.py +606 -0
  31. package/src/learning/tests/test_project_context.py +296 -0
  32. package/src/learning/tests/test_source_quality.py +355 -0
  33. package/src/learning/tests/test_synthetic_bootstrap.py +433 -0
  34. package/src/learning/tests/test_workflow_miner.py +322 -0
  35. package/src/learning/workflow_pattern_miner.py +665 -0
  36. package/ui/index.html +346 -13
  37. package/ui/js/clusters.js +90 -1
  38. package/ui/js/graph-core.js +445 -0
  39. package/ui/js/graph-cytoscape-monolithic-backup.js +1168 -0
  40. package/ui/js/graph-cytoscape.js +1168 -0
  41. package/ui/js/graph-d3-backup.js +32 -0
  42. package/ui/js/graph-filters.js +220 -0
  43. package/ui/js/graph-interactions.js +354 -0
  44. package/ui/js/graph-ui.js +214 -0
  45. package/ui/js/memories.js +52 -0
  46. package/ui/js/modal.js +104 -1
@@ -0,0 +1,638 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Engagement Tracker (v2.7)
4
+ Copyright (c) 2026 Varun Pratap Bhardwaj
5
+ Licensed under MIT License
6
+
7
+ Repository: https://github.com/varun369/SuperLocalMemoryV2
8
+ Author: Varun Pratap Bhardwaj (Solution Architect)
9
+
10
+ NOTICE: This software is protected by MIT License.
11
+ Attribution must be preserved in all copies or derivatives.
12
+ """
13
+
14
+ """
15
+ EngagementTracker — Local-only engagement metrics.
16
+
17
+ Measures how actively the user interacts with the memory system.
18
+ All data stays local — NEVER transmitted anywhere.
19
+
20
+ Capabilities:
21
+ - Comprehensive engagement stats (days active, staleness, per-day rates)
22
+ - Health status classification (HEALTHY / DECLINING / AT_RISK / INACTIVE)
23
+ - Activity recording (delegates to LearningDB.increment_engagement)
24
+ - Weekly summary aggregation
25
+ - CLI-friendly formatted output for `slm engagement`
26
+ - MCP resource exposure (read-only stats)
27
+
28
+ Data sources:
29
+ - memory.db (read-only) — creation dates, total count, source agents
30
+ - learning.db (read/write via LearningDB) — feedback counts, patterns,
31
+ engagement_metrics daily rows
32
+
33
+ Design:
34
+ - Thread-safe: each method opens/closes its own connection
35
+ - Division-by-zero safe: all ratios default to 0.0 for empty databases
36
+ - Graceful degradation: works even if learning.db does not exist yet
37
+ """
38
+
39
+ import json
40
+ import logging
41
+ import sqlite3
42
+ import threading
43
+ from datetime import datetime, date, timedelta
44
+ from pathlib import Path
45
+ from typing import Optional, List, Dict, Any
46
+
47
+ logger = logging.getLogger("superlocalmemory.learning.engagement")
48
+
49
+ MEMORY_DIR = Path.home() / ".claude-memory"
50
+ MEMORY_DB_PATH = MEMORY_DIR / "memory.db"
51
+
52
+
53
+ class EngagementTracker:
54
+ """
55
+ Local-only engagement metrics for the SuperLocalMemory system.
56
+
57
+ Usage:
58
+ tracker = EngagementTracker()
59
+ stats = tracker.get_engagement_stats()
60
+ print(tracker.format_for_cli())
61
+
62
+ Thread-safe: all methods use per-call connections.
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ memory_db_path: Optional[Path] = None,
68
+ learning_db: Optional[Any] = None,
69
+ ):
70
+ """
71
+ Initialize EngagementTracker.
72
+
73
+ Args:
74
+ memory_db_path: Path to memory.db. Defaults to
75
+ ~/.claude-memory/memory.db. Opened read-only.
76
+ learning_db: A LearningDB instance for reading/writing
77
+ engagement metrics. If None, lazily created on first use.
78
+ """
79
+ self._memory_db_path = (
80
+ Path(memory_db_path) if memory_db_path else MEMORY_DB_PATH
81
+ )
82
+ self._learning_db = learning_db
83
+ self._lock = threading.Lock()
84
+ logger.info(
85
+ "EngagementTracker initialized: memory_db=%s",
86
+ self._memory_db_path,
87
+ )
88
+
89
+ # ------------------------------------------------------------------
90
+ # LearningDB access (lazy)
91
+ # ------------------------------------------------------------------
92
+
93
+ def _get_learning_db(self):
94
+ """
95
+ Get or lazily create the LearningDB instance.
96
+
97
+ Returns None if LearningDB cannot be imported or initialized.
98
+ """
99
+ if self._learning_db is not None:
100
+ return self._learning_db
101
+
102
+ try:
103
+ from .learning_db import LearningDB
104
+ self._learning_db = LearningDB()
105
+ return self._learning_db
106
+ except Exception as e:
107
+ logger.warning("Failed to initialize LearningDB: %s", e)
108
+ return None
109
+
110
+ # ------------------------------------------------------------------
111
+ # Memory.db read-only access
112
+ # ------------------------------------------------------------------
113
+
114
+ def _open_memory_db(self) -> sqlite3.Connection:
115
+ """
116
+ Open a read-only connection to memory.db.
117
+
118
+ Uses URI mode=ro when supported; falls back to regular connection.
119
+ """
120
+ db_str = str(self._memory_db_path)
121
+ try:
122
+ uri = f"file:{db_str}?mode=ro"
123
+ conn = sqlite3.connect(uri, uri=True, timeout=5)
124
+ except (sqlite3.OperationalError, sqlite3.NotSupportedError):
125
+ conn = sqlite3.connect(db_str, timeout=5)
126
+ conn.execute("PRAGMA busy_timeout=3000")
127
+ return conn
128
+
129
+ def _get_memory_db_columns(self) -> set:
130
+ """Get available columns in the memories table."""
131
+ if not self._memory_db_path.exists():
132
+ return set()
133
+ try:
134
+ conn = self._open_memory_db()
135
+ try:
136
+ cursor = conn.cursor()
137
+ cursor.execute("PRAGMA table_info(memories)")
138
+ return {row[1] for row in cursor.fetchall()}
139
+ finally:
140
+ conn.close()
141
+ except sqlite3.Error:
142
+ return set()
143
+
144
+ # ------------------------------------------------------------------
145
+ # Core stats
146
+ # ------------------------------------------------------------------
147
+
148
+ def get_engagement_stats(self) -> Dict[str, Any]:
149
+ """
150
+ Return a comprehensive engagement report.
151
+
152
+ Returns:
153
+ Dict with keys:
154
+ days_active — Days since first memory was created
155
+ days_since_last — Days since most recent activity
156
+ staleness_ratio — days_since_last / days_active (0=active, 1=abandoned)
157
+ total_memories — Total memory count
158
+ memories_per_day — Average memories created per active day
159
+ recalls_per_day — Average recalls per active day (from feedback)
160
+ patterns_learned — Transferable patterns with confidence > 0.6
161
+ feedback_signals — Total feedback count
162
+ health_status — 'HEALTHY', 'DECLINING', 'AT_RISK', 'INACTIVE'
163
+ active_sources — List of tool sources used recently
164
+ """
165
+ # --- Memory.db stats ---
166
+ mem_stats = self._get_memory_stats()
167
+
168
+ # --- Learning.db stats ---
169
+ learn_stats = self._get_learning_stats()
170
+
171
+ # --- Derived metrics ---
172
+ days_active = mem_stats['days_active']
173
+ days_since_last = mem_stats['days_since_last']
174
+ total_memories = mem_stats['total_memories']
175
+
176
+ # Staleness: 0.0 = used today, 1.0 = abandoned
177
+ if days_active > 0:
178
+ staleness_ratio = round(days_since_last / days_active, 4)
179
+ else:
180
+ staleness_ratio = 0.0 # Brand-new user — not stale
181
+
182
+ # Cap staleness at 1.0 (can exceed if days_since_last > days_active
183
+ # due to timezone edge cases)
184
+ staleness_ratio = min(staleness_ratio, 1.0)
185
+
186
+ # Per-day rates
187
+ if days_active > 0:
188
+ memories_per_day = round(total_memories / days_active, 2)
189
+ recalls_per_day = round(
190
+ learn_stats['feedback_signals'] / days_active, 2
191
+ )
192
+ else:
193
+ memories_per_day = float(total_memories) # Day 0 — show raw count
194
+ recalls_per_day = 0.0
195
+
196
+ health_status = self._compute_health_status(
197
+ staleness_ratio, recalls_per_day
198
+ )
199
+
200
+ return {
201
+ 'days_active': days_active,
202
+ 'days_since_last': days_since_last,
203
+ 'staleness_ratio': staleness_ratio,
204
+ 'total_memories': total_memories,
205
+ 'memories_per_day': memories_per_day,
206
+ 'recalls_per_day': recalls_per_day,
207
+ 'patterns_learned': learn_stats['patterns_learned'],
208
+ 'feedback_signals': learn_stats['feedback_signals'],
209
+ 'health_status': health_status,
210
+ 'active_sources': mem_stats['active_sources'],
211
+ }
212
+
213
+ def _get_memory_stats(self) -> Dict[str, Any]:
214
+ """
215
+ Gather stats from memory.db (read-only).
216
+
217
+ Returns dict with: days_active, days_since_last, total_memories,
218
+ active_sources.
219
+ """
220
+ default = {
221
+ 'days_active': 0,
222
+ 'days_since_last': 0,
223
+ 'total_memories': 0,
224
+ 'active_sources': [],
225
+ }
226
+
227
+ if not self._memory_db_path.exists():
228
+ return default
229
+
230
+ available = self._get_memory_db_columns()
231
+
232
+ try:
233
+ conn = self._open_memory_db()
234
+ try:
235
+ cursor = conn.cursor()
236
+
237
+ # Total memories
238
+ cursor.execute("SELECT COUNT(*) FROM memories")
239
+ total = cursor.fetchone()[0]
240
+ if total == 0:
241
+ return default
242
+
243
+ # Date range
244
+ if 'created_at' in available:
245
+ cursor.execute(
246
+ "SELECT MIN(created_at), MAX(created_at) "
247
+ "FROM memories"
248
+ )
249
+ row = cursor.fetchone()
250
+ first_ts, last_ts = row[0], row[1]
251
+
252
+ first_date = self._parse_date(first_ts)
253
+ last_date = self._parse_date(last_ts)
254
+ today = date.today()
255
+
256
+ if first_date and last_date:
257
+ days_active = max((today - first_date).days, 1)
258
+ days_since_last = max((today - last_date).days, 0)
259
+ else:
260
+ days_active = 1
261
+ days_since_last = 0
262
+ else:
263
+ days_active = 1
264
+ days_since_last = 0
265
+
266
+ # Active sources (created_by field, v2.5+)
267
+ active_sources = []
268
+ if 'created_by' in available:
269
+ try:
270
+ cursor.execute(
271
+ "SELECT DISTINCT created_by FROM memories "
272
+ "WHERE created_by IS NOT NULL "
273
+ "AND created_by != '' "
274
+ "ORDER BY created_by"
275
+ )
276
+ active_sources = [
277
+ row[0] for row in cursor.fetchall()
278
+ ]
279
+ except sqlite3.OperationalError:
280
+ pass # Column might not be queryable
281
+
282
+ return {
283
+ 'days_active': days_active,
284
+ 'days_since_last': days_since_last,
285
+ 'total_memories': total,
286
+ 'active_sources': active_sources,
287
+ }
288
+ finally:
289
+ conn.close()
290
+ except sqlite3.Error as e:
291
+ logger.warning("Failed to read memory stats: %s", e)
292
+ return default
293
+
294
+ def _get_learning_stats(self) -> Dict[str, Any]:
295
+ """
296
+ Gather stats from learning.db via LearningDB.
297
+
298
+ Returns dict with: patterns_learned, feedback_signals.
299
+ """
300
+ default = {
301
+ 'patterns_learned': 0,
302
+ 'feedback_signals': 0,
303
+ }
304
+
305
+ ldb = self._get_learning_db()
306
+ if ldb is None:
307
+ return default
308
+
309
+ try:
310
+ # Feedback signals
311
+ feedback_count = ldb.get_feedback_count()
312
+
313
+ # High-confidence patterns
314
+ patterns = ldb.get_transferable_patterns(min_confidence=0.6)
315
+ patterns_count = len(patterns)
316
+
317
+ return {
318
+ 'patterns_learned': patterns_count,
319
+ 'feedback_signals': feedback_count,
320
+ }
321
+ except Exception as e:
322
+ logger.warning("Failed to read learning stats: %s", e)
323
+ return default
324
+
325
+ # ------------------------------------------------------------------
326
+ # Health classification
327
+ # ------------------------------------------------------------------
328
+
329
+ @staticmethod
330
+ def _compute_health_status(
331
+ staleness_ratio: float,
332
+ recalls_per_day: float,
333
+ ) -> str:
334
+ """
335
+ Classify engagement health.
336
+
337
+ Tiers:
338
+ HEALTHY — staleness < 0.1 AND recalls > 0.5/day
339
+ DECLINING — staleness < 0.3 OR recalls > 0.2/day
340
+ AT_RISK — staleness < 0.5
341
+ INACTIVE — staleness >= 0.5
342
+
343
+ Args:
344
+ staleness_ratio: 0.0 (active) to 1.0 (abandoned).
345
+ recalls_per_day: Average recall operations per day.
346
+
347
+ Returns:
348
+ One of 'HEALTHY', 'DECLINING', 'AT_RISK', 'INACTIVE'.
349
+ """
350
+ if staleness_ratio < 0.1 and recalls_per_day > 0.5:
351
+ return 'HEALTHY'
352
+ if staleness_ratio < 0.3 or recalls_per_day > 0.2:
353
+ return 'DECLINING'
354
+ if staleness_ratio < 0.5:
355
+ return 'AT_RISK'
356
+ return 'INACTIVE'
357
+
358
+ # ------------------------------------------------------------------
359
+ # Activity recording
360
+ # ------------------------------------------------------------------
361
+
362
+ def record_activity(
363
+ self,
364
+ activity_type: str,
365
+ source: Optional[str] = None,
366
+ ):
367
+ """
368
+ Record an engagement activity event.
369
+
370
+ Delegates to LearningDB.increment_engagement() which maintains
371
+ daily engagement_metrics rows.
372
+
373
+ Args:
374
+ activity_type: One of 'memory_created', 'recall_performed',
375
+ 'feedback_given', 'pattern_updated'.
376
+ source: Source tool identifier (e.g., "claude-desktop",
377
+ "cursor", "cli").
378
+ """
379
+ # Map activity_type to LearningDB metric column names
380
+ metric_map = {
381
+ 'memory_created': 'memories_created',
382
+ 'recall_performed': 'recalls_performed',
383
+ 'feedback_given': 'feedback_signals',
384
+ 'pattern_updated': 'patterns_updated',
385
+ }
386
+
387
+ metric_type = metric_map.get(activity_type)
388
+ if metric_type is None:
389
+ logger.warning(
390
+ "Unknown activity type: %r (expected one of %s)",
391
+ activity_type,
392
+ list(metric_map.keys()),
393
+ )
394
+ return
395
+
396
+ ldb = self._get_learning_db()
397
+ if ldb is None:
398
+ logger.debug(
399
+ "LearningDB unavailable — cannot record activity '%s'",
400
+ activity_type,
401
+ )
402
+ return
403
+
404
+ try:
405
+ ldb.increment_engagement(
406
+ metric_type=metric_type,
407
+ count=1,
408
+ source=source,
409
+ )
410
+ logger.debug(
411
+ "Recorded activity: type=%s, source=%s",
412
+ activity_type, source,
413
+ )
414
+ except Exception as e:
415
+ logger.warning("Failed to record activity: %s", e)
416
+
417
+ # ------------------------------------------------------------------
418
+ # Weekly summary
419
+ # ------------------------------------------------------------------
420
+
421
+ def get_weekly_summary(self) -> Dict[str, Any]:
422
+ """
423
+ Aggregate the last 7 days of engagement_metrics.
424
+
425
+ Returns:
426
+ Dict with:
427
+ period_start — ISO date string
428
+ period_end — ISO date string
429
+ days_with_data — Number of days that had engagement rows
430
+ total_memories_created
431
+ total_recalls
432
+ total_feedback
433
+ total_patterns_updated
434
+ avg_memories_per_day
435
+ avg_recalls_per_day
436
+ all_sources — Unique tools used across the week
437
+ """
438
+ ldb = self._get_learning_db()
439
+ default = {
440
+ 'period_start': (date.today() - timedelta(days=6)).isoformat(),
441
+ 'period_end': date.today().isoformat(),
442
+ 'days_with_data': 0,
443
+ 'total_memories_created': 0,
444
+ 'total_recalls': 0,
445
+ 'total_feedback': 0,
446
+ 'total_patterns_updated': 0,
447
+ 'avg_memories_per_day': 0.0,
448
+ 'avg_recalls_per_day': 0.0,
449
+ 'all_sources': [],
450
+ }
451
+
452
+ if ldb is None:
453
+ return default
454
+
455
+ try:
456
+ history = ldb.get_engagement_history(days=7)
457
+ except Exception as e:
458
+ logger.warning("Failed to get engagement history: %s", e)
459
+ return default
460
+
461
+ if not history:
462
+ return default
463
+
464
+ total_mem = 0
465
+ total_rec = 0
466
+ total_fb = 0
467
+ total_pat = 0
468
+ all_sources: set = set()
469
+
470
+ for row in history:
471
+ total_mem += row.get('memories_created', 0) or 0
472
+ total_rec += row.get('recalls_performed', 0) or 0
473
+ total_fb += row.get('feedback_signals', 0) or 0
474
+ total_pat += row.get('patterns_updated', 0) or 0
475
+
476
+ sources_raw = row.get('active_sources', '[]')
477
+ if isinstance(sources_raw, str):
478
+ try:
479
+ sources = json.loads(sources_raw)
480
+ all_sources.update(sources)
481
+ except (json.JSONDecodeError, TypeError):
482
+ pass
483
+
484
+ days_with_data = len(history)
485
+
486
+ return {
487
+ 'period_start': (date.today() - timedelta(days=6)).isoformat(),
488
+ 'period_end': date.today().isoformat(),
489
+ 'days_with_data': days_with_data,
490
+ 'total_memories_created': total_mem,
491
+ 'total_recalls': total_rec,
492
+ 'total_feedback': total_fb,
493
+ 'total_patterns_updated': total_pat,
494
+ 'avg_memories_per_day': (
495
+ round(total_mem / days_with_data, 1)
496
+ if days_with_data > 0 else 0.0
497
+ ),
498
+ 'avg_recalls_per_day': (
499
+ round(total_rec / days_with_data, 1)
500
+ if days_with_data > 0 else 0.0
501
+ ),
502
+ 'all_sources': sorted(all_sources),
503
+ }
504
+
505
+ # ------------------------------------------------------------------
506
+ # CLI formatting
507
+ # ------------------------------------------------------------------
508
+
509
+ def format_for_cli(self) -> str:
510
+ """
511
+ Format engagement stats as human-readable CLI output.
512
+
513
+ Example:
514
+ Active for: 94 days
515
+ Last activity: 2 days ago
516
+ Memories per day: 3.2
517
+ Recalls per day: 1.8
518
+ Patterns learned: 23
519
+ Engagement: HEALTHY (staleness: 0.02)
520
+ """
521
+ try:
522
+ stats = self.get_engagement_stats()
523
+ except Exception as e:
524
+ return f"Error computing engagement stats: {e}"
525
+
526
+ # Format "last activity" human-friendly
527
+ days_since = stats['days_since_last']
528
+ if days_since == 0:
529
+ last_activity = "today"
530
+ elif days_since == 1:
531
+ last_activity = "yesterday"
532
+ else:
533
+ last_activity = f"{days_since} days ago"
534
+
535
+ # Health status with optional color hint for terminals
536
+ health = stats['health_status']
537
+ staleness = stats['staleness_ratio']
538
+
539
+ lines = [
540
+ f"Active for: {stats['days_active']} days",
541
+ f"Last activity: {last_activity}",
542
+ f"Total memories: {stats['total_memories']}",
543
+ f"Memories per day: {stats['memories_per_day']}",
544
+ f"Recalls per day: {stats['recalls_per_day']}",
545
+ f"Patterns learned: {stats['patterns_learned']}",
546
+ f"Feedback signals: {stats['feedback_signals']}",
547
+ f"Engagement: {health} (staleness: {staleness:.2f})",
548
+ ]
549
+
550
+ if stats['active_sources']:
551
+ lines.append(f"Active sources: {', '.join(stats['active_sources'])}")
552
+
553
+ return "\n".join(lines)
554
+
555
+ # ------------------------------------------------------------------
556
+ # Utilities
557
+ # ------------------------------------------------------------------
558
+
559
+ @staticmethod
560
+ def _parse_date(timestamp: Any) -> Optional[date]:
561
+ """
562
+ Parse a timestamp string into a date object.
563
+
564
+ Handles multiple formats from SQLite:
565
+ - '2026-02-16 14:30:00'
566
+ - '2026-02-16T14:30:00'
567
+ - '2026-02-16'
568
+ """
569
+ if timestamp is None:
570
+ return None
571
+
572
+ ts = str(timestamp).strip()
573
+ if not ts:
574
+ return None
575
+
576
+ # Try ISO formats
577
+ for fmt in (
578
+ "%Y-%m-%d %H:%M:%S",
579
+ "%Y-%m-%dT%H:%M:%S",
580
+ "%Y-%m-%d %H:%M:%S.%f",
581
+ "%Y-%m-%dT%H:%M:%S.%f",
582
+ "%Y-%m-%d",
583
+ ):
584
+ try:
585
+ return datetime.strptime(ts, fmt).date()
586
+ except ValueError:
587
+ continue
588
+
589
+ # Last resort: try to parse just the date portion
590
+ try:
591
+ return datetime.strptime(ts[:10], "%Y-%m-%d").date()
592
+ except (ValueError, IndexError):
593
+ logger.debug("Unparseable timestamp: %r", timestamp)
594
+ return None
595
+
596
+
597
+ # ======================================================================
598
+ # Standalone testing
599
+ # ======================================================================
600
+
601
+ if __name__ == "__main__":
602
+ logging.basicConfig(
603
+ level=logging.DEBUG,
604
+ format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
605
+ )
606
+
607
+ tracker = EngagementTracker()
608
+
609
+ print("=== Engagement Stats ===")
610
+ stats = tracker.get_engagement_stats()
611
+ for k, v in stats.items():
612
+ print(f" {k}: {v}")
613
+
614
+ print("\n=== CLI Output ===")
615
+ print(tracker.format_for_cli())
616
+
617
+ print("\n=== Weekly Summary ===")
618
+ weekly = tracker.get_weekly_summary()
619
+ for k, v in weekly.items():
620
+ print(f" {k}: {v}")
621
+
622
+ print("\n=== Health Classification Tests ===")
623
+ test_cases = [
624
+ (0.02, 1.5, "HEALTHY"),
625
+ (0.05, 0.3, "DECLINING"),
626
+ (0.25, 0.3, "DECLINING"),
627
+ (0.35, 0.1, "AT_RISK"),
628
+ (0.60, 0.0, "INACTIVE"),
629
+ (0.0, 0.0, "DECLINING"), # Active but no recalls
630
+ (0.99, 5.0, "DECLINING"), # High staleness but high recall
631
+ ]
632
+ for staleness, recalls, expected in test_cases:
633
+ actual = EngagementTracker._compute_health_status(staleness, recalls)
634
+ status = "PASS" if actual == expected else "FAIL"
635
+ print(
636
+ f" [{status}] staleness={staleness:.2f}, recalls={recalls:.1f}"
637
+ f" -> {actual} (expected {expected})"
638
+ )