superlocalmemory 2.7.5 → 2.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (174) hide show
  1. package/CHANGELOG.md +120 -155
  2. package/README.md +115 -89
  3. package/api_server.py +2 -12
  4. package/docs/PATTERN-LEARNING.md +64 -199
  5. package/docs/example_graph_usage.py +4 -6
  6. package/install.ps1 +226 -0
  7. package/install.sh +59 -0
  8. package/mcp_server.py +83 -7
  9. package/package.json +3 -10
  10. package/scripts/generate-thumbnails.py +3 -5
  11. package/skills/slm-build-graph/SKILL.md +1 -1
  12. package/skills/slm-list-recent/SKILL.md +1 -1
  13. package/skills/slm-recall/SKILL.md +1 -1
  14. package/skills/slm-remember/SKILL.md +1 -1
  15. package/skills/slm-show-patterns/SKILL.md +1 -1
  16. package/skills/slm-status/SKILL.md +1 -1
  17. package/skills/slm-switch-profile/SKILL.md +1 -1
  18. package/src/agent_registry.py +7 -18
  19. package/src/auth_middleware.py +3 -5
  20. package/src/auto_backup.py +3 -7
  21. package/src/behavioral/__init__.py +49 -0
  22. package/src/behavioral/behavioral_listener.py +203 -0
  23. package/src/behavioral/behavioral_patterns.py +275 -0
  24. package/src/behavioral/cross_project_transfer.py +206 -0
  25. package/src/behavioral/outcome_inference.py +194 -0
  26. package/src/behavioral/outcome_tracker.py +193 -0
  27. package/src/behavioral/tests/__init__.py +4 -0
  28. package/src/behavioral/tests/test_behavioral_integration.py +108 -0
  29. package/src/behavioral/tests/test_behavioral_patterns.py +150 -0
  30. package/src/behavioral/tests/test_cross_project_transfer.py +142 -0
  31. package/src/behavioral/tests/test_mcp_behavioral.py +139 -0
  32. package/src/behavioral/tests/test_mcp_report_outcome.py +117 -0
  33. package/src/behavioral/tests/test_outcome_inference.py +107 -0
  34. package/src/behavioral/tests/test_outcome_tracker.py +96 -0
  35. package/src/cache_manager.py +4 -6
  36. package/src/compliance/__init__.py +48 -0
  37. package/src/compliance/abac_engine.py +149 -0
  38. package/src/compliance/abac_middleware.py +116 -0
  39. package/src/compliance/audit_db.py +215 -0
  40. package/src/compliance/audit_logger.py +148 -0
  41. package/src/compliance/retention_manager.py +289 -0
  42. package/src/compliance/retention_scheduler.py +186 -0
  43. package/src/compliance/tests/__init__.py +4 -0
  44. package/src/compliance/tests/test_abac_enforcement.py +95 -0
  45. package/src/compliance/tests/test_abac_engine.py +124 -0
  46. package/src/compliance/tests/test_abac_mcp_integration.py +118 -0
  47. package/src/compliance/tests/test_audit_db.py +123 -0
  48. package/src/compliance/tests/test_audit_logger.py +98 -0
  49. package/src/compliance/tests/test_mcp_audit.py +128 -0
  50. package/src/compliance/tests/test_mcp_retention_policy.py +125 -0
  51. package/src/compliance/tests/test_retention_manager.py +131 -0
  52. package/src/compliance/tests/test_retention_scheduler.py +99 -0
  53. package/src/db_connection_manager.py +2 -12
  54. package/src/embedding_engine.py +61 -669
  55. package/src/embeddings/__init__.py +47 -0
  56. package/src/embeddings/cache.py +70 -0
  57. package/src/embeddings/cli.py +113 -0
  58. package/src/embeddings/constants.py +47 -0
  59. package/src/embeddings/database.py +91 -0
  60. package/src/embeddings/engine.py +247 -0
  61. package/src/embeddings/model_loader.py +145 -0
  62. package/src/event_bus.py +3 -13
  63. package/src/graph/__init__.py +36 -0
  64. package/src/graph/build_helpers.py +74 -0
  65. package/src/graph/cli.py +87 -0
  66. package/src/graph/cluster_builder.py +188 -0
  67. package/src/graph/cluster_summary.py +148 -0
  68. package/src/graph/constants.py +47 -0
  69. package/src/graph/edge_builder.py +162 -0
  70. package/src/graph/entity_extractor.py +95 -0
  71. package/src/graph/graph_core.py +226 -0
  72. package/src/graph/graph_search.py +231 -0
  73. package/src/graph/hierarchical.py +207 -0
  74. package/src/graph/schema.py +99 -0
  75. package/src/graph_engine.py +45 -1451
  76. package/src/hnsw_index.py +3 -7
  77. package/src/hybrid_search.py +36 -683
  78. package/src/learning/__init__.py +27 -12
  79. package/src/learning/adaptive_ranker.py +50 -12
  80. package/src/learning/cross_project_aggregator.py +2 -12
  81. package/src/learning/engagement_tracker.py +2 -12
  82. package/src/learning/feature_extractor.py +175 -43
  83. package/src/learning/feedback_collector.py +7 -12
  84. package/src/learning/learning_db.py +180 -12
  85. package/src/learning/project_context_manager.py +2 -12
  86. package/src/learning/source_quality_scorer.py +2 -12
  87. package/src/learning/synthetic_bootstrap.py +2 -12
  88. package/src/learning/tests/__init__.py +2 -0
  89. package/src/learning/tests/test_adaptive_ranker.py +2 -6
  90. package/src/learning/tests/test_adaptive_ranker_v28.py +60 -0
  91. package/src/learning/tests/test_aggregator.py +2 -6
  92. package/src/learning/tests/test_auto_retrain_v28.py +35 -0
  93. package/src/learning/tests/test_e2e_ranking_v28.py +82 -0
  94. package/src/learning/tests/test_feature_extractor_v28.py +93 -0
  95. package/src/learning/tests/test_feedback_collector.py +2 -6
  96. package/src/learning/tests/test_learning_db.py +2 -6
  97. package/src/learning/tests/test_learning_db_v28.py +110 -0
  98. package/src/learning/tests/test_learning_init_v28.py +48 -0
  99. package/src/learning/tests/test_outcome_signals.py +48 -0
  100. package/src/learning/tests/test_project_context.py +2 -6
  101. package/src/learning/tests/test_schema_migration.py +319 -0
  102. package/src/learning/tests/test_signal_inference.py +11 -13
  103. package/src/learning/tests/test_source_quality.py +2 -6
  104. package/src/learning/tests/test_synthetic_bootstrap.py +3 -7
  105. package/src/learning/tests/test_workflow_miner.py +2 -6
  106. package/src/learning/workflow_pattern_miner.py +2 -12
  107. package/src/lifecycle/__init__.py +54 -0
  108. package/src/lifecycle/bounded_growth.py +239 -0
  109. package/src/lifecycle/compaction_engine.py +226 -0
  110. package/src/lifecycle/lifecycle_engine.py +302 -0
  111. package/src/lifecycle/lifecycle_evaluator.py +225 -0
  112. package/src/lifecycle/lifecycle_scheduler.py +130 -0
  113. package/src/lifecycle/retention_policy.py +285 -0
  114. package/src/lifecycle/tests/__init__.py +4 -0
  115. package/src/lifecycle/tests/test_bounded_growth.py +193 -0
  116. package/src/lifecycle/tests/test_compaction.py +179 -0
  117. package/src/lifecycle/tests/test_lifecycle_engine.py +137 -0
  118. package/src/lifecycle/tests/test_lifecycle_evaluation.py +177 -0
  119. package/src/lifecycle/tests/test_lifecycle_scheduler.py +127 -0
  120. package/src/lifecycle/tests/test_lifecycle_search.py +109 -0
  121. package/src/lifecycle/tests/test_mcp_compact.py +149 -0
  122. package/src/lifecycle/tests/test_mcp_lifecycle_status.py +114 -0
  123. package/src/lifecycle/tests/test_retention_policy.py +162 -0
  124. package/src/mcp_tools_v28.py +280 -0
  125. package/src/memory-profiles.py +2 -12
  126. package/src/memory-reset.py +2 -12
  127. package/src/memory_compression.py +2 -12
  128. package/src/memory_store_v2.py +76 -20
  129. package/src/migrate_v1_to_v2.py +2 -12
  130. package/src/pattern_learner.py +29 -975
  131. package/src/patterns/__init__.py +24 -0
  132. package/src/patterns/analyzers.py +247 -0
  133. package/src/patterns/learner.py +267 -0
  134. package/src/patterns/scoring.py +167 -0
  135. package/src/patterns/store.py +223 -0
  136. package/src/patterns/terminology.py +138 -0
  137. package/src/provenance_tracker.py +4 -14
  138. package/src/query_optimizer.py +4 -6
  139. package/src/rate_limiter.py +2 -6
  140. package/src/search/__init__.py +20 -0
  141. package/src/search/cli.py +77 -0
  142. package/src/search/constants.py +26 -0
  143. package/src/search/engine.py +239 -0
  144. package/src/search/fusion.py +122 -0
  145. package/src/search/index_loader.py +112 -0
  146. package/src/search/methods.py +162 -0
  147. package/src/search_engine_v2.py +4 -6
  148. package/src/setup_validator.py +7 -13
  149. package/src/subscription_manager.py +2 -12
  150. package/src/tree/__init__.py +59 -0
  151. package/src/tree/builder.py +183 -0
  152. package/src/tree/nodes.py +196 -0
  153. package/src/tree/queries.py +252 -0
  154. package/src/tree/schema.py +76 -0
  155. package/src/tree_manager.py +10 -711
  156. package/src/trust/__init__.py +45 -0
  157. package/src/trust/constants.py +66 -0
  158. package/src/trust/queries.py +157 -0
  159. package/src/trust/schema.py +95 -0
  160. package/src/trust/scorer.py +299 -0
  161. package/src/trust/signals.py +95 -0
  162. package/src/trust_scorer.py +39 -697
  163. package/src/webhook_dispatcher.py +2 -12
  164. package/ui/app.js +1 -1
  165. package/ui/index.html +3 -0
  166. package/ui/js/agents.js +1 -1
  167. package/ui/js/core.js +21 -5
  168. package/ui/js/profiles.js +29 -7
  169. package/ui_server.py +2 -14
  170. package/ATTRIBUTION.md +0 -140
  171. package/docs/ARCHITECTURE-V2.5.md +0 -190
  172. package/docs/GRAPH-ENGINE.md +0 -503
  173. package/docs/architecture-diagram.drawio +0 -405
  174. package/docs/plans/2026-02-13-benchmark-suite.md +0 -1349
@@ -0,0 +1,194 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Implicit outcome detection from recall behavior patterns.
4
+
5
+ Pure logic module — no database, no I/O. Takes recall events and
6
+ returns inference results. The caller (EventBus integration) passes
7
+ these to OutcomeTracker for persistence.
8
+
9
+ Inference rules (checked in priority order, first match wins per recall):
10
+ 1. Deletion of recalled memory within 60 min -> failure, confidence 0.0
11
+ 2. Usage signal "mcp_used_high" within 5 min -> success, confidence 0.8
12
+ 3. Usage signal cross-tool within 5 min -> success, confidence 0.7
13
+ 4. Rapid-fire: 3+ recalls in 2 min window -> failure, confidence 0.1
14
+ 5. Different-query recall within 2 min -> failure, confidence 0.2
15
+ 6. No re-query for 10+ min elapsed -> success, confidence 0.6
16
+ 7. Otherwise -> not yet inferrable (keep)
17
+ """
18
+ from datetime import datetime, timedelta
19
+ from typing import Dict, List, Optional
20
+
21
+
22
+ # ── Thresholds (seconds) ─────────────────────────────────────────────
23
+ _DELETION_WINDOW = 60 * 60 # 60 min
24
+ _USAGE_WINDOW = 5 * 60 # 5 min
25
+ _RAPID_FIRE_WINDOW = 2 * 60 # 2 min
26
+ _RAPID_FIRE_COUNT = 3
27
+ _REQUERY_WINDOW = 2 * 60 # 2 min
28
+ _QUIET_WINDOW = 10 * 60 # 10 min
29
+
30
+
31
+ class OutcomeInference:
32
+ """Infer implicit success/failure from post-recall user behavior."""
33
+
34
+ def __init__(self) -> None:
35
+ self._recalls: List[Dict] = [] # {query, memory_ids, ts}
36
+ self._usages: List[Dict] = [] # {query, signal, ts}
37
+ self._deletions: List[Dict] = [] # {memory_id, ts}
38
+
39
+ # ── Recording API ────────────────────────────────────────────────
40
+
41
+ def record_recall(
42
+ self,
43
+ query: str,
44
+ memory_ids: List[int],
45
+ timestamp: Optional[datetime] = None,
46
+ ) -> None:
47
+ """Buffer a recall event."""
48
+ self._recalls.append({
49
+ "query": query,
50
+ "memory_ids": list(memory_ids),
51
+ "ts": timestamp or datetime.now(),
52
+ })
53
+
54
+ def record_usage(
55
+ self,
56
+ query: str,
57
+ signal: str,
58
+ timestamp: Optional[datetime] = None,
59
+ ) -> None:
60
+ """Buffer a post-recall usage signal."""
61
+ self._usages.append({
62
+ "query": query,
63
+ "signal": signal,
64
+ "ts": timestamp or datetime.now(),
65
+ })
66
+
67
+ def record_deletion(
68
+ self,
69
+ memory_id: int,
70
+ timestamp: Optional[datetime] = None,
71
+ ) -> None:
72
+ """Buffer a memory deletion event."""
73
+ self._deletions.append({
74
+ "memory_id": memory_id,
75
+ "ts": timestamp or datetime.now(),
76
+ })
77
+
78
+ # ── Inference engine ─────────────────────────────────────────────
79
+
80
+ def infer_outcomes(self, now: Optional[datetime] = None) -> List[Dict]:
81
+ """Process buffered events, apply rules, return inferences.
82
+
83
+ Processed recall events are removed from the buffer.
84
+ Events that are not yet inferrable remain for later.
85
+
86
+ Returns:
87
+ List of dicts with keys: outcome, confidence, memory_ids, reason
88
+ """
89
+ now = now or datetime.now()
90
+ results: List[Dict] = []
91
+ remaining: List[Dict] = []
92
+
93
+ for recall in self._recalls:
94
+ result = self._evaluate(recall, now)
95
+ if result is not None:
96
+ results.append(result)
97
+ else:
98
+ remaining.append(recall)
99
+
100
+ # Clear processed; keep un-inferrable recalls
101
+ self._recalls = remaining
102
+ # Consumed usages and deletions are cleared entirely
103
+ self._usages.clear()
104
+ self._deletions.clear()
105
+
106
+ return results
107
+
108
+ # ── Private rule evaluation ──────────────────────────────────────
109
+
110
+ def _evaluate(self, recall: Dict, now: datetime) -> Optional[Dict]:
111
+ """Apply rules in priority order. First match wins."""
112
+ query = recall["query"]
113
+ mem_ids = recall["memory_ids"]
114
+ ts = recall["ts"]
115
+ elapsed = (now - ts).total_seconds()
116
+
117
+ # Rule 1: Deletion of recalled memory within 60 min
118
+ for d in self._deletions:
119
+ if d["memory_id"] in mem_ids:
120
+ delta = (d["ts"] - ts).total_seconds()
121
+ if 0 <= delta <= _DELETION_WINDOW:
122
+ return self._result(
123
+ "failure", 0.0, mem_ids,
124
+ "memory_deleted_after_recall",
125
+ )
126
+
127
+ # Rule 2: Usage signal "mcp_used_high" within 5 min
128
+ for u in self._usages:
129
+ if u["query"] == query and u["signal"] == "mcp_used_high":
130
+ delta = (u["ts"] - ts).total_seconds()
131
+ if 0 <= delta <= _USAGE_WINDOW:
132
+ return self._result(
133
+ "success", 0.8, mem_ids,
134
+ "mcp_used_high_after_recall",
135
+ )
136
+
137
+ # Rule 3: Cross-tool usage within 5 min
138
+ for u in self._usages:
139
+ if u["query"] == query and u["signal"] == "implicit_positive_cross_tool":
140
+ delta = (u["ts"] - ts).total_seconds()
141
+ if 0 <= delta <= _USAGE_WINDOW:
142
+ return self._result(
143
+ "success", 0.7, mem_ids,
144
+ "cross_tool_access_after_recall",
145
+ )
146
+
147
+ # Rule 4: Rapid-fire — 3+ recalls within 2 min window
148
+ window_start = ts - timedelta(seconds=_RAPID_FIRE_WINDOW)
149
+ nearby = [
150
+ r for r in self._recalls
151
+ if r is not recall and window_start <= r["ts"] <= ts + timedelta(seconds=_RAPID_FIRE_WINDOW)
152
+ ]
153
+ # Count total including this recall
154
+ if len(nearby) + 1 >= _RAPID_FIRE_COUNT:
155
+ return self._result(
156
+ "failure", 0.1, mem_ids,
157
+ "rapid_fire_queries",
158
+ )
159
+
160
+ # Rule 5: Different-query recall within 2 min
161
+ for r in self._recalls:
162
+ if r is recall:
163
+ continue
164
+ if r["query"] != query:
165
+ delta = abs((r["ts"] - ts).total_seconds())
166
+ if delta <= _REQUERY_WINDOW:
167
+ return self._result(
168
+ "failure", 0.2, mem_ids,
169
+ "immediate_requery_different_terms",
170
+ )
171
+
172
+ # Rule 6: 10+ min elapsed with no subsequent activity
173
+ if elapsed >= _QUIET_WINDOW:
174
+ return self._result(
175
+ "success", 0.6, mem_ids,
176
+ "no_requery_after_recall",
177
+ )
178
+
179
+ # Rule 7: Not yet inferrable
180
+ return None
181
+
182
+ @staticmethod
183
+ def _result(
184
+ outcome: str,
185
+ confidence: float,
186
+ memory_ids: List[int],
187
+ reason: str,
188
+ ) -> Dict:
189
+ return {
190
+ "outcome": outcome,
191
+ "confidence": confidence,
192
+ "memory_ids": memory_ids,
193
+ "reason": reason,
194
+ }
@@ -0,0 +1,193 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Explicit + implicit action outcome recording.
4
+
5
+ Records what happens AFTER memories are recalled — success, failure,
6
+ or partial outcomes. Self-contained: creates its own table via
7
+ CREATE TABLE IF NOT EXISTS so no external migration is needed.
8
+
9
+ Part of SLM v2.8 Behavioral Learning Engine.
10
+ """
11
+ import json
12
+ import sqlite3
13
+ import threading
14
+ from typing import Dict, List, Optional, Any
15
+
16
+
17
+ class OutcomeTracker:
18
+ """Records action outcomes for behavioral learning.
19
+
20
+ Each outcome links one or more memory IDs to an outcome label
21
+ (success / failure / partial) with optional context metadata.
22
+ Confidence defaults to 0.9 for explicit (user-reported) outcomes.
23
+ """
24
+
25
+ OUTCOMES = ("success", "failure", "partial")
26
+
27
+ ACTION_TYPES = (
28
+ "code_written",
29
+ "decision_made",
30
+ "debug_resolved",
31
+ "architecture_chosen",
32
+ "other",
33
+ )
34
+
35
+ _CREATE_TABLE = """
36
+ CREATE TABLE IF NOT EXISTS action_outcomes (
37
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
38
+ memory_ids TEXT NOT NULL,
39
+ outcome TEXT NOT NULL,
40
+ action_type TEXT DEFAULT 'other',
41
+ context TEXT DEFAULT '{}',
42
+ confidence REAL DEFAULT 0.9,
43
+ agent_id TEXT DEFAULT 'user',
44
+ project TEXT,
45
+ profile TEXT DEFAULT 'default',
46
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
47
+ )
48
+ """
49
+
50
+ def __init__(self, db_path: Optional[str] = None):
51
+ self._db_path = db_path
52
+ self._lock = threading.Lock()
53
+ if db_path:
54
+ self._ensure_table()
55
+
56
+ # ------------------------------------------------------------------
57
+ # Public API
58
+ # ------------------------------------------------------------------
59
+
60
+ def record_outcome(
61
+ self,
62
+ memory_ids: List[int],
63
+ outcome: str,
64
+ action_type: str = "other",
65
+ context: Optional[Dict[str, Any]] = None,
66
+ confidence: float = 0.9,
67
+ agent_id: str = "user",
68
+ project: Optional[str] = None,
69
+ ) -> Optional[int]:
70
+ """Record an action outcome against one or more memories.
71
+
72
+ Args:
73
+ memory_ids: List of memory IDs involved in this outcome.
74
+ outcome: One of OUTCOMES ("success", "failure", "partial").
75
+ action_type: Category of the action taken.
76
+ context: Arbitrary metadata dict (stored as JSON).
77
+ confidence: Confidence in the outcome label (default 0.9).
78
+ agent_id: Identifier for the reporting agent.
79
+ project: Optional project scope.
80
+
81
+ Returns:
82
+ The row ID of the inserted outcome, or None if validation fails.
83
+ """
84
+ if outcome not in self.OUTCOMES:
85
+ return None
86
+
87
+ context_json = json.dumps(context or {})
88
+ memory_ids_json = json.dumps(memory_ids)
89
+
90
+ with self._lock:
91
+ conn = self._connect()
92
+ try:
93
+ cur = conn.execute(
94
+ """INSERT INTO action_outcomes
95
+ (memory_ids, outcome, action_type, context,
96
+ confidence, agent_id, project)
97
+ VALUES (?, ?, ?, ?, ?, ?, ?)""",
98
+ (
99
+ memory_ids_json,
100
+ outcome,
101
+ action_type,
102
+ context_json,
103
+ confidence,
104
+ agent_id,
105
+ project,
106
+ ),
107
+ )
108
+ conn.commit()
109
+ return cur.lastrowid
110
+ finally:
111
+ conn.close()
112
+
113
+ def get_outcomes(
114
+ self,
115
+ memory_id: Optional[int] = None,
116
+ project: Optional[str] = None,
117
+ limit: int = 100,
118
+ ) -> List[Dict[str, Any]]:
119
+ """Query recorded outcomes with optional filters.
120
+
121
+ Args:
122
+ memory_id: If given, return only outcomes that include this
123
+ memory ID in their memory_ids list.
124
+ project: If given, filter by project scope.
125
+ limit: Maximum rows to return (default 100).
126
+
127
+ Returns:
128
+ List of outcome dicts with deserialized memory_ids and context.
129
+ """
130
+ with self._lock:
131
+ conn = self._connect()
132
+ try:
133
+ query = "SELECT * FROM action_outcomes WHERE 1=1"
134
+ params: List[Any] = []
135
+
136
+ if project is not None:
137
+ query += " AND project = ?"
138
+ params.append(project)
139
+
140
+ query += " ORDER BY created_at DESC LIMIT ?"
141
+ params.append(limit)
142
+
143
+ rows = conn.execute(query, params).fetchall()
144
+ results = [self._row_to_dict(r) for r in rows]
145
+
146
+ if memory_id is not None:
147
+ results = [
148
+ r for r in results if memory_id in r["memory_ids"]
149
+ ]
150
+
151
+ return results
152
+ finally:
153
+ conn.close()
154
+
155
+ def get_success_rate(self, memory_id: int) -> float:
156
+ """Calculate success rate for a specific memory.
157
+
158
+ Counts outcomes where memory_id appears in memory_ids.
159
+ Returns success count / total count, or 0.0 if no outcomes.
160
+ """
161
+ outcomes = self.get_outcomes(memory_id=memory_id)
162
+ if not outcomes:
163
+ return 0.0
164
+ successes = sum(1 for o in outcomes if o["outcome"] == "success")
165
+ return round(successes / len(outcomes), 3)
166
+
167
+ # ------------------------------------------------------------------
168
+ # Internal helpers
169
+ # ------------------------------------------------------------------
170
+
171
+ def _connect(self) -> sqlite3.Connection:
172
+ """Open a connection with row factory enabled."""
173
+ conn = sqlite3.connect(self._db_path)
174
+ conn.row_factory = sqlite3.Row
175
+ return conn
176
+
177
+ def _ensure_table(self) -> None:
178
+ """Create the action_outcomes table if it doesn't exist."""
179
+ conn = self._connect()
180
+ try:
181
+ conn.execute(self._CREATE_TABLE)
182
+ conn.commit()
183
+ finally:
184
+ conn.close()
185
+
186
+ @staticmethod
187
+ def _row_to_dict(row: sqlite3.Row) -> Dict[str, Any]:
188
+ """Convert a sqlite3.Row into a plain dict with parsed JSON fields."""
189
+ d = dict(row)
190
+ d["memory_ids"] = json.loads(d.get("memory_ids", "[]"))
191
+ ctx = d.get("context", "{}")
192
+ d["context"] = json.loads(ctx) if isinstance(ctx, str) else ctx
193
+ return d
@@ -0,0 +1,4 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Tests for behavioral learning engine.
4
+ """
@@ -0,0 +1,108 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Tests for behavioral engine EventBus integration.
4
+ """
5
+ import sqlite3
6
+ import tempfile
7
+ import os
8
+ import sys
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+ from unittest.mock import MagicMock, patch
12
+
13
+ sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
14
+
15
+
16
+ class TestBehavioralIntegration:
17
+ """Test behavioral engine wiring with EventBus."""
18
+
19
+ def setup_method(self):
20
+ self.tmp_dir = tempfile.mkdtemp()
21
+ self.db_path = os.path.join(self.tmp_dir, "learning.db")
22
+
23
+ def teardown_method(self):
24
+ import shutil
25
+ shutil.rmtree(self.tmp_dir, ignore_errors=True)
26
+
27
+ def test_create_behavioral_listener(self):
28
+ """BehavioralListener can be instantiated."""
29
+ from behavioral.behavioral_listener import BehavioralListener
30
+ listener = BehavioralListener(self.db_path)
31
+ assert listener is not None
32
+
33
+ def test_listener_handles_recall_event(self):
34
+ """Listener processes memory.recalled events."""
35
+ from behavioral.behavioral_listener import BehavioralListener
36
+ listener = BehavioralListener(self.db_path)
37
+ event = {
38
+ "event_type": "memory.recalled",
39
+ "memory_id": 1,
40
+ "payload": {"query": "test query", "memory_ids": [1, 2]},
41
+ "timestamp": datetime.now().isoformat(),
42
+ }
43
+ # Should not raise
44
+ listener.handle_event(event)
45
+ assert listener.events_processed >= 1
46
+
47
+ def test_listener_ignores_irrelevant_events(self):
48
+ """Listener ignores non-recall events."""
49
+ from behavioral.behavioral_listener import BehavioralListener
50
+ listener = BehavioralListener(self.db_path)
51
+ event = {
52
+ "event_type": "memory.created",
53
+ "memory_id": 1,
54
+ "payload": {},
55
+ "timestamp": datetime.now().isoformat(),
56
+ }
57
+ listener.handle_event(event)
58
+ assert listener.recall_events_processed == 0
59
+
60
+ def test_listener_handles_deletion_event(self):
61
+ """Listener records deletion events for inference."""
62
+ from behavioral.behavioral_listener import BehavioralListener
63
+ listener = BehavioralListener(self.db_path)
64
+ event = {
65
+ "event_type": "memory.deleted",
66
+ "memory_id": 5,
67
+ "payload": {},
68
+ "timestamp": datetime.now().isoformat(),
69
+ }
70
+ listener.handle_event(event)
71
+ assert listener.deletion_events_processed >= 1
72
+
73
+ def test_listener_tracks_usage_signals(self):
74
+ """Listener records usage signals (memory_used) for inference."""
75
+ from behavioral.behavioral_listener import BehavioralListener
76
+ listener = BehavioralListener(self.db_path)
77
+ event = {
78
+ "event_type": "memory.recalled",
79
+ "memory_id": 1,
80
+ "payload": {"query": "test", "memory_ids": [1], "signal": "mcp_used_high"},
81
+ "timestamp": datetime.now().isoformat(),
82
+ }
83
+ listener.handle_event(event)
84
+ assert listener.events_processed >= 1
85
+
86
+ def test_graceful_degradation_no_eventbus(self):
87
+ """If EventBus unavailable, behavioral engine still works."""
88
+ from behavioral.behavioral_listener import BehavioralListener
89
+ listener = BehavioralListener(self.db_path)
90
+ # register_with_eventbus should not crash even if EventBus fails
91
+ result = listener.register_with_eventbus()
92
+ # Result depends on whether EventBus is importable in test env
93
+ assert isinstance(result, bool)
94
+
95
+ def test_pattern_extraction_threshold(self):
96
+ """Pattern extraction triggers after outcome count threshold."""
97
+ from behavioral.behavioral_listener import BehavioralListener
98
+ listener = BehavioralListener(self.db_path, extraction_threshold=5)
99
+ assert listener.extraction_threshold == 5
100
+
101
+ def test_get_status(self):
102
+ """Listener reports its status."""
103
+ from behavioral.behavioral_listener import BehavioralListener
104
+ listener = BehavioralListener(self.db_path)
105
+ status = listener.get_status()
106
+ assert "events_processed" in status
107
+ assert "recall_events_processed" in status
108
+ assert "registered" in status
@@ -0,0 +1,150 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Tests for behavioral pattern extraction from outcomes.
4
+ """
5
+ import sqlite3
6
+ import tempfile
7
+ import os
8
+ import sys
9
+ import json
10
+ from pathlib import Path
11
+
12
+ sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
13
+
14
+
15
+ class TestBehavioralPatterns:
16
+ """Test pattern extraction from action outcomes."""
17
+
18
+ def setup_method(self):
19
+ self.tmp_dir = tempfile.mkdtemp()
20
+ self.db_path = os.path.join(self.tmp_dir, "learning.db")
21
+ conn = sqlite3.connect(self.db_path)
22
+ # Create action_outcomes table (populated by OutcomeTracker)
23
+ conn.execute("""
24
+ CREATE TABLE IF NOT EXISTS action_outcomes (
25
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
26
+ memory_ids TEXT NOT NULL,
27
+ outcome TEXT NOT NULL,
28
+ action_type TEXT DEFAULT 'other',
29
+ context TEXT DEFAULT '{}',
30
+ confidence REAL DEFAULT 0.9,
31
+ agent_id TEXT DEFAULT 'user',
32
+ project TEXT,
33
+ profile TEXT DEFAULT 'default',
34
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
35
+ )
36
+ """)
37
+ # Create behavioral_patterns table
38
+ conn.execute("""
39
+ CREATE TABLE IF NOT EXISTS behavioral_patterns (
40
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
41
+ pattern_type TEXT NOT NULL,
42
+ pattern_key TEXT NOT NULL,
43
+ success_rate REAL DEFAULT 0.0,
44
+ evidence_count INTEGER DEFAULT 0,
45
+ confidence REAL DEFAULT 0.0,
46
+ metadata TEXT DEFAULT '{}',
47
+ project TEXT,
48
+ profile TEXT DEFAULT 'default',
49
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
50
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
51
+ )
52
+ """)
53
+ # Insert sample outcomes for pattern extraction
54
+ # Project A: 8 success, 2 failure -> 80% success
55
+ for i in range(8):
56
+ conn.execute("INSERT INTO action_outcomes (memory_ids, outcome, project, action_type) VALUES (?, ?, ?, ?)",
57
+ (json.dumps([i+1]), "success", "project_a", "code_written"))
58
+ for i in range(2):
59
+ conn.execute("INSERT INTO action_outcomes (memory_ids, outcome, project, action_type) VALUES (?, ?, ?, ?)",
60
+ (json.dumps([i+20]), "failure", "project_a", "code_written"))
61
+ # Project B: 2 success, 8 failure -> 20% success
62
+ for i in range(2):
63
+ conn.execute("INSERT INTO action_outcomes (memory_ids, outcome, project, action_type) VALUES (?, ?, ?, ?)",
64
+ (json.dumps([i+30]), "success", "project_b", "debug_resolved"))
65
+ for i in range(8):
66
+ conn.execute("INSERT INTO action_outcomes (memory_ids, outcome, project, action_type) VALUES (?, ?, ?, ?)",
67
+ (json.dumps([i+40]), "failure", "project_b", "debug_resolved"))
68
+ conn.commit()
69
+ conn.close()
70
+
71
+ def teardown_method(self):
72
+ import shutil
73
+ shutil.rmtree(self.tmp_dir, ignore_errors=True)
74
+
75
+ def test_extract_patterns(self):
76
+ """extract_patterns returns list of discovered patterns."""
77
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
78
+ extractor = BehavioralPatternExtractor(self.db_path)
79
+ patterns = extractor.extract_patterns()
80
+ assert isinstance(patterns, list)
81
+ assert len(patterns) >= 2 # At least project_a and project_b patterns
82
+
83
+ def test_project_success_rate(self):
84
+ """Patterns should reflect actual success rates per project."""
85
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
86
+ extractor = BehavioralPatternExtractor(self.db_path)
87
+ patterns = extractor.extract_patterns()
88
+ proj_a = [p for p in patterns if p["pattern_key"] == "project_a" and p["pattern_type"] == "project_success"]
89
+ assert len(proj_a) == 1
90
+ assert abs(proj_a[0]["success_rate"] - 0.8) < 0.01
91
+
92
+ def test_success_pattern_high_rate(self):
93
+ """Projects with >70% success and 5+ evidence -> success pattern."""
94
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
95
+ extractor = BehavioralPatternExtractor(self.db_path)
96
+ patterns = extractor.extract_patterns()
97
+ proj_a = [p for p in patterns if p["pattern_key"] == "project_a" and p["pattern_type"] == "project_success"]
98
+ assert proj_a[0]["success_rate"] > 0.7
99
+ assert proj_a[0]["evidence_count"] >= 5
100
+
101
+ def test_failure_pattern_low_rate(self):
102
+ """Projects with <30% success and 5+ evidence -> failure pattern."""
103
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
104
+ extractor = BehavioralPatternExtractor(self.db_path)
105
+ patterns = extractor.extract_patterns()
106
+ proj_b = [p for p in patterns if p["pattern_key"] == "project_b" and p["pattern_type"] == "project_success"]
107
+ assert proj_b[0]["success_rate"] < 0.3
108
+
109
+ def test_action_type_patterns(self):
110
+ """Should extract patterns grouped by action_type."""
111
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
112
+ extractor = BehavioralPatternExtractor(self.db_path)
113
+ patterns = extractor.extract_patterns()
114
+ action_patterns = [p for p in patterns if p["pattern_type"] == "action_type_success"]
115
+ assert len(action_patterns) >= 1
116
+
117
+ def test_get_patterns_with_min_confidence(self):
118
+ """get_patterns filters by minimum confidence."""
119
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
120
+ extractor = BehavioralPatternExtractor(self.db_path)
121
+ extractor.extract_patterns()
122
+ # Store patterns to DB first
123
+ extractor.save_patterns()
124
+ high_conf = extractor.get_patterns(min_confidence=0.5)
125
+ all_patterns = extractor.get_patterns(min_confidence=0.0)
126
+ assert len(high_conf) <= len(all_patterns)
127
+
128
+ def test_pattern_confidence_scoring(self):
129
+ """Patterns with more evidence should have higher confidence."""
130
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
131
+ extractor = BehavioralPatternExtractor(self.db_path)
132
+ patterns = extractor.extract_patterns()
133
+ for p in patterns:
134
+ assert 0.0 <= p["confidence"] <= 1.0
135
+ # Confidence should increase with evidence
136
+ if p["evidence_count"] >= 10:
137
+ assert p["confidence"] >= 0.5
138
+
139
+ def test_save_patterns_to_db(self):
140
+ """save_patterns stores extracted patterns in behavioral_patterns table."""
141
+ from behavioral.behavioral_patterns import BehavioralPatternExtractor
142
+ extractor = BehavioralPatternExtractor(self.db_path)
143
+ extractor.extract_patterns()
144
+ count = extractor.save_patterns()
145
+ assert count >= 2
146
+ # Verify in DB
147
+ conn = sqlite3.connect(self.db_path)
148
+ rows = conn.execute("SELECT COUNT(*) FROM behavioral_patterns").fetchone()
149
+ conn.close()
150
+ assert rows[0] >= 2