superlocalmemory 2.7.6 → 2.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. package/CHANGELOG.md +120 -155
  2. package/README.md +115 -89
  3. package/api_server.py +2 -12
  4. package/docs/PATTERN-LEARNING.md +64 -199
  5. package/docs/example_graph_usage.py +4 -6
  6. package/install.sh +59 -0
  7. package/mcp_server.py +83 -7
  8. package/package.json +1 -8
  9. package/scripts/generate-thumbnails.py +3 -5
  10. package/skills/slm-build-graph/SKILL.md +1 -1
  11. package/skills/slm-list-recent/SKILL.md +1 -1
  12. package/skills/slm-recall/SKILL.md +1 -1
  13. package/skills/slm-remember/SKILL.md +1 -1
  14. package/skills/slm-show-patterns/SKILL.md +1 -1
  15. package/skills/slm-status/SKILL.md +1 -1
  16. package/skills/slm-switch-profile/SKILL.md +1 -1
  17. package/src/agent_registry.py +7 -18
  18. package/src/auth_middleware.py +3 -5
  19. package/src/auto_backup.py +3 -7
  20. package/src/behavioral/__init__.py +49 -0
  21. package/src/behavioral/behavioral_listener.py +203 -0
  22. package/src/behavioral/behavioral_patterns.py +275 -0
  23. package/src/behavioral/cross_project_transfer.py +206 -0
  24. package/src/behavioral/outcome_inference.py +194 -0
  25. package/src/behavioral/outcome_tracker.py +193 -0
  26. package/src/behavioral/tests/__init__.py +4 -0
  27. package/src/behavioral/tests/test_behavioral_integration.py +108 -0
  28. package/src/behavioral/tests/test_behavioral_patterns.py +150 -0
  29. package/src/behavioral/tests/test_cross_project_transfer.py +142 -0
  30. package/src/behavioral/tests/test_mcp_behavioral.py +139 -0
  31. package/src/behavioral/tests/test_mcp_report_outcome.py +117 -0
  32. package/src/behavioral/tests/test_outcome_inference.py +107 -0
  33. package/src/behavioral/tests/test_outcome_tracker.py +96 -0
  34. package/src/cache_manager.py +4 -6
  35. package/src/compliance/__init__.py +48 -0
  36. package/src/compliance/abac_engine.py +149 -0
  37. package/src/compliance/abac_middleware.py +116 -0
  38. package/src/compliance/audit_db.py +215 -0
  39. package/src/compliance/audit_logger.py +148 -0
  40. package/src/compliance/retention_manager.py +289 -0
  41. package/src/compliance/retention_scheduler.py +186 -0
  42. package/src/compliance/tests/__init__.py +4 -0
  43. package/src/compliance/tests/test_abac_enforcement.py +95 -0
  44. package/src/compliance/tests/test_abac_engine.py +124 -0
  45. package/src/compliance/tests/test_abac_mcp_integration.py +118 -0
  46. package/src/compliance/tests/test_audit_db.py +123 -0
  47. package/src/compliance/tests/test_audit_logger.py +98 -0
  48. package/src/compliance/tests/test_mcp_audit.py +128 -0
  49. package/src/compliance/tests/test_mcp_retention_policy.py +125 -0
  50. package/src/compliance/tests/test_retention_manager.py +131 -0
  51. package/src/compliance/tests/test_retention_scheduler.py +99 -0
  52. package/src/db_connection_manager.py +2 -12
  53. package/src/embedding_engine.py +61 -669
  54. package/src/embeddings/__init__.py +47 -0
  55. package/src/embeddings/cache.py +70 -0
  56. package/src/embeddings/cli.py +113 -0
  57. package/src/embeddings/constants.py +47 -0
  58. package/src/embeddings/database.py +91 -0
  59. package/src/embeddings/engine.py +247 -0
  60. package/src/embeddings/model_loader.py +145 -0
  61. package/src/event_bus.py +3 -13
  62. package/src/graph/__init__.py +36 -0
  63. package/src/graph/build_helpers.py +74 -0
  64. package/src/graph/cli.py +87 -0
  65. package/src/graph/cluster_builder.py +188 -0
  66. package/src/graph/cluster_summary.py +148 -0
  67. package/src/graph/constants.py +47 -0
  68. package/src/graph/edge_builder.py +162 -0
  69. package/src/graph/entity_extractor.py +95 -0
  70. package/src/graph/graph_core.py +226 -0
  71. package/src/graph/graph_search.py +231 -0
  72. package/src/graph/hierarchical.py +207 -0
  73. package/src/graph/schema.py +99 -0
  74. package/src/graph_engine.py +45 -1451
  75. package/src/hnsw_index.py +3 -7
  76. package/src/hybrid_search.py +36 -683
  77. package/src/learning/__init__.py +27 -12
  78. package/src/learning/adaptive_ranker.py +50 -12
  79. package/src/learning/cross_project_aggregator.py +2 -12
  80. package/src/learning/engagement_tracker.py +2 -12
  81. package/src/learning/feature_extractor.py +175 -43
  82. package/src/learning/feedback_collector.py +7 -12
  83. package/src/learning/learning_db.py +180 -12
  84. package/src/learning/project_context_manager.py +2 -12
  85. package/src/learning/source_quality_scorer.py +2 -12
  86. package/src/learning/synthetic_bootstrap.py +2 -12
  87. package/src/learning/tests/__init__.py +2 -0
  88. package/src/learning/tests/test_adaptive_ranker.py +2 -6
  89. package/src/learning/tests/test_adaptive_ranker_v28.py +60 -0
  90. package/src/learning/tests/test_aggregator.py +2 -6
  91. package/src/learning/tests/test_auto_retrain_v28.py +35 -0
  92. package/src/learning/tests/test_e2e_ranking_v28.py +82 -0
  93. package/src/learning/tests/test_feature_extractor_v28.py +93 -0
  94. package/src/learning/tests/test_feedback_collector.py +2 -6
  95. package/src/learning/tests/test_learning_db.py +2 -6
  96. package/src/learning/tests/test_learning_db_v28.py +110 -0
  97. package/src/learning/tests/test_learning_init_v28.py +48 -0
  98. package/src/learning/tests/test_outcome_signals.py +48 -0
  99. package/src/learning/tests/test_project_context.py +2 -6
  100. package/src/learning/tests/test_schema_migration.py +319 -0
  101. package/src/learning/tests/test_signal_inference.py +11 -13
  102. package/src/learning/tests/test_source_quality.py +2 -6
  103. package/src/learning/tests/test_synthetic_bootstrap.py +3 -7
  104. package/src/learning/tests/test_workflow_miner.py +2 -6
  105. package/src/learning/workflow_pattern_miner.py +2 -12
  106. package/src/lifecycle/__init__.py +54 -0
  107. package/src/lifecycle/bounded_growth.py +239 -0
  108. package/src/lifecycle/compaction_engine.py +226 -0
  109. package/src/lifecycle/lifecycle_engine.py +302 -0
  110. package/src/lifecycle/lifecycle_evaluator.py +225 -0
  111. package/src/lifecycle/lifecycle_scheduler.py +130 -0
  112. package/src/lifecycle/retention_policy.py +285 -0
  113. package/src/lifecycle/tests/__init__.py +4 -0
  114. package/src/lifecycle/tests/test_bounded_growth.py +193 -0
  115. package/src/lifecycle/tests/test_compaction.py +179 -0
  116. package/src/lifecycle/tests/test_lifecycle_engine.py +137 -0
  117. package/src/lifecycle/tests/test_lifecycle_evaluation.py +177 -0
  118. package/src/lifecycle/tests/test_lifecycle_scheduler.py +127 -0
  119. package/src/lifecycle/tests/test_lifecycle_search.py +109 -0
  120. package/src/lifecycle/tests/test_mcp_compact.py +149 -0
  121. package/src/lifecycle/tests/test_mcp_lifecycle_status.py +114 -0
  122. package/src/lifecycle/tests/test_retention_policy.py +162 -0
  123. package/src/mcp_tools_v28.py +280 -0
  124. package/src/memory-profiles.py +2 -12
  125. package/src/memory-reset.py +2 -12
  126. package/src/memory_compression.py +2 -12
  127. package/src/memory_store_v2.py +76 -20
  128. package/src/migrate_v1_to_v2.py +2 -12
  129. package/src/pattern_learner.py +29 -975
  130. package/src/patterns/__init__.py +24 -0
  131. package/src/patterns/analyzers.py +247 -0
  132. package/src/patterns/learner.py +267 -0
  133. package/src/patterns/scoring.py +167 -0
  134. package/src/patterns/store.py +223 -0
  135. package/src/patterns/terminology.py +138 -0
  136. package/src/provenance_tracker.py +4 -14
  137. package/src/query_optimizer.py +4 -6
  138. package/src/rate_limiter.py +2 -6
  139. package/src/search/__init__.py +20 -0
  140. package/src/search/cli.py +77 -0
  141. package/src/search/constants.py +26 -0
  142. package/src/search/engine.py +239 -0
  143. package/src/search/fusion.py +122 -0
  144. package/src/search/index_loader.py +112 -0
  145. package/src/search/methods.py +162 -0
  146. package/src/search_engine_v2.py +4 -6
  147. package/src/setup_validator.py +7 -13
  148. package/src/subscription_manager.py +2 -12
  149. package/src/tree/__init__.py +59 -0
  150. package/src/tree/builder.py +183 -0
  151. package/src/tree/nodes.py +196 -0
  152. package/src/tree/queries.py +252 -0
  153. package/src/tree/schema.py +76 -0
  154. package/src/tree_manager.py +10 -711
  155. package/src/trust/__init__.py +45 -0
  156. package/src/trust/constants.py +66 -0
  157. package/src/trust/queries.py +157 -0
  158. package/src/trust/schema.py +95 -0
  159. package/src/trust/scorer.py +299 -0
  160. package/src/trust/signals.py +95 -0
  161. package/src/trust_scorer.py +39 -697
  162. package/src/webhook_dispatcher.py +2 -12
  163. package/ui/app.js +1 -1
  164. package/ui/js/agents.js +1 -1
  165. package/ui_server.py +2 -14
  166. package/ATTRIBUTION.md +0 -140
  167. package/docs/ARCHITECTURE-V2.5.md +0 -190
  168. package/docs/GRAPH-ENGINE.md +0 -503
  169. package/docs/architecture-diagram.drawio +0 -405
  170. package/docs/plans/2026-02-13-benchmark-suite.md +0 -1349
@@ -0,0 +1,206 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Privacy-safe cross-project behavioral pattern transfer.
4
+
5
+ Transfers behavioral patterns between projects using ONLY metadata
6
+ (pattern type, success rate, confidence). Never transfers memory
7
+ content or content hashes.
8
+
9
+ Eligibility criteria:
10
+ - confidence >= 0.7
11
+ - evidence_count >= 5
12
+ - source project != target project
13
+
14
+ Part of SLM v2.8 Behavioral Learning Engine.
15
+ """
16
+ import sqlite3
17
+ import threading
18
+ from typing import Dict, List, Optional, Any
19
+
20
+
21
+ # Thresholds for transfer eligibility
22
+ MIN_CONFIDENCE = 0.7
23
+ MIN_EVIDENCE = 5
24
+
25
+
26
+ class CrossProjectTransfer:
27
+ """Privacy-safe cross-project behavioral pattern transfer.
28
+
29
+ Only metadata (pattern_type, pattern_key, success_rate,
30
+ evidence_count, confidence) is transferred — never content
31
+ or content hashes.
32
+ """
33
+
34
+ _CREATE_CROSS_TABLE = """
35
+ CREATE TABLE IF NOT EXISTS cross_project_behaviors (
36
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
37
+ source_project TEXT NOT NULL,
38
+ target_project TEXT NOT NULL,
39
+ pattern_id INTEGER NOT NULL,
40
+ transfer_type TEXT DEFAULT 'metadata',
41
+ confidence REAL DEFAULT 0.0,
42
+ profile TEXT DEFAULT 'default',
43
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
44
+ FOREIGN KEY (pattern_id) REFERENCES behavioral_patterns(id)
45
+ )
46
+ """
47
+
48
+ def __init__(self, db_path: Optional[str] = None, enabled: bool = True):
49
+ self._db_path = db_path
50
+ self._enabled = enabled
51
+ self._lock = threading.Lock()
52
+ if db_path:
53
+ self._ensure_tables()
54
+
55
+ # ------------------------------------------------------------------
56
+ # Public API
57
+ # ------------------------------------------------------------------
58
+
59
+ def evaluate_transfers(
60
+ self, target_project: str
61
+ ) -> List[Dict[str, Any]]:
62
+ """Find patterns eligible for transfer to the target project.
63
+
64
+ Eligibility: confidence >= 0.7 AND evidence_count >= 5
65
+ AND source project != target project.
66
+
67
+ Returns:
68
+ List of dicts with metadata-only fields: pattern_id,
69
+ pattern_type, pattern_key, success_rate, evidence_count,
70
+ confidence, source_project, transfer_type.
71
+ """
72
+ if not self._enabled:
73
+ return []
74
+
75
+ with self._lock:
76
+ conn = self._connect()
77
+ try:
78
+ rows = conn.execute(
79
+ """SELECT id, pattern_type, pattern_key, success_rate,
80
+ evidence_count, confidence, project
81
+ FROM behavioral_patterns
82
+ WHERE confidence >= ?
83
+ AND evidence_count >= ?
84
+ AND project IS NOT NULL
85
+ AND project != ?
86
+ ORDER BY confidence DESC""",
87
+ (MIN_CONFIDENCE, MIN_EVIDENCE, target_project),
88
+ ).fetchall()
89
+
90
+ return [self._eligible_to_dict(row) for row in rows]
91
+ finally:
92
+ conn.close()
93
+
94
+ def apply_transfer(
95
+ self, pattern_id: int, target_project: str
96
+ ) -> Dict[str, Any]:
97
+ """Record a cross-project transfer in the database.
98
+
99
+ Looks up the source pattern to get its project and confidence,
100
+ then inserts a record into cross_project_behaviors.
101
+
102
+ Returns:
103
+ Dict with success status and transfer id.
104
+ """
105
+ with self._lock:
106
+ conn = self._connect()
107
+ try:
108
+ # Look up the source pattern
109
+ pattern = conn.execute(
110
+ "SELECT project, confidence FROM behavioral_patterns WHERE id = ?",
111
+ (pattern_id,),
112
+ ).fetchone()
113
+
114
+ if pattern is None:
115
+ return {"success": False, "error": "pattern_not_found"}
116
+
117
+ source_project = pattern["project"]
118
+ confidence = pattern["confidence"]
119
+
120
+ cur = conn.execute(
121
+ """INSERT INTO cross_project_behaviors
122
+ (source_project, target_project, pattern_id,
123
+ transfer_type, confidence)
124
+ VALUES (?, ?, ?, 'metadata', ?)""",
125
+ (source_project, target_project, pattern_id, confidence),
126
+ )
127
+ conn.commit()
128
+ return {
129
+ "success": True,
130
+ "transfer_id": cur.lastrowid,
131
+ "source_project": source_project,
132
+ "target_project": target_project,
133
+ }
134
+ finally:
135
+ conn.close()
136
+
137
+ def get_transfers(
138
+ self,
139
+ target_project: Optional[str] = None,
140
+ source_project: Optional[str] = None,
141
+ ) -> List[Dict[str, Any]]:
142
+ """Query recorded cross-project transfers.
143
+
144
+ Args:
145
+ target_project: Filter by target project.
146
+ source_project: Filter by source project.
147
+
148
+ Returns:
149
+ List of transfer record dicts.
150
+ """
151
+ with self._lock:
152
+ conn = self._connect()
153
+ try:
154
+ query = "SELECT * FROM cross_project_behaviors WHERE 1=1"
155
+ params: List[Any] = []
156
+
157
+ if target_project is not None:
158
+ query += " AND target_project = ?"
159
+ params.append(target_project)
160
+
161
+ if source_project is not None:
162
+ query += " AND source_project = ?"
163
+ params.append(source_project)
164
+
165
+ query += " ORDER BY created_at DESC"
166
+
167
+ rows = conn.execute(query, params).fetchall()
168
+ return [dict(row) for row in rows]
169
+ finally:
170
+ conn.close()
171
+
172
+ # ------------------------------------------------------------------
173
+ # Internal helpers
174
+ # ------------------------------------------------------------------
175
+
176
+ def _connect(self) -> sqlite3.Connection:
177
+ """Open a connection with row factory enabled."""
178
+ conn = sqlite3.connect(self._db_path)
179
+ conn.row_factory = sqlite3.Row
180
+ return conn
181
+
182
+ def _ensure_tables(self) -> None:
183
+ """Create the cross_project_behaviors table if missing."""
184
+ conn = self._connect()
185
+ try:
186
+ conn.execute(self._CREATE_CROSS_TABLE)
187
+ conn.commit()
188
+ finally:
189
+ conn.close()
190
+
191
+ @staticmethod
192
+ def _eligible_to_dict(row: sqlite3.Row) -> Dict[str, Any]:
193
+ """Convert a pattern row to a privacy-safe transfer dict.
194
+
195
+ Only metadata fields are included. No content, no hashes.
196
+ """
197
+ return {
198
+ "pattern_id": row["id"],
199
+ "pattern_type": row["pattern_type"],
200
+ "pattern_key": row["pattern_key"],
201
+ "success_rate": row["success_rate"],
202
+ "evidence_count": row["evidence_count"],
203
+ "confidence": row["confidence"],
204
+ "source_project": row["project"],
205
+ "transfer_type": "metadata",
206
+ }
@@ -0,0 +1,194 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Implicit outcome detection from recall behavior patterns.
4
+
5
+ Pure logic module — no database, no I/O. Takes recall events and
6
+ returns inference results. The caller (EventBus integration) passes
7
+ these to OutcomeTracker for persistence.
8
+
9
+ Inference rules (checked in priority order, first match wins per recall):
10
+ 1. Deletion of recalled memory within 60 min -> failure, confidence 0.0
11
+ 2. Usage signal "mcp_used_high" within 5 min -> success, confidence 0.8
12
+ 3. Usage signal cross-tool within 5 min -> success, confidence 0.7
13
+ 4. Rapid-fire: 3+ recalls in 2 min window -> failure, confidence 0.1
14
+ 5. Different-query recall within 2 min -> failure, confidence 0.2
15
+ 6. No re-query for 10+ min elapsed -> success, confidence 0.6
16
+ 7. Otherwise -> not yet inferrable (keep)
17
+ """
18
+ from datetime import datetime, timedelta
19
+ from typing import Dict, List, Optional
20
+
21
+
22
+ # ── Thresholds (seconds) ─────────────────────────────────────────────
23
+ _DELETION_WINDOW = 60 * 60 # 60 min
24
+ _USAGE_WINDOW = 5 * 60 # 5 min
25
+ _RAPID_FIRE_WINDOW = 2 * 60 # 2 min
26
+ _RAPID_FIRE_COUNT = 3
27
+ _REQUERY_WINDOW = 2 * 60 # 2 min
28
+ _QUIET_WINDOW = 10 * 60 # 10 min
29
+
30
+
31
+ class OutcomeInference:
32
+ """Infer implicit success/failure from post-recall user behavior."""
33
+
34
+ def __init__(self) -> None:
35
+ self._recalls: List[Dict] = [] # {query, memory_ids, ts}
36
+ self._usages: List[Dict] = [] # {query, signal, ts}
37
+ self._deletions: List[Dict] = [] # {memory_id, ts}
38
+
39
+ # ── Recording API ────────────────────────────────────────────────
40
+
41
+ def record_recall(
42
+ self,
43
+ query: str,
44
+ memory_ids: List[int],
45
+ timestamp: Optional[datetime] = None,
46
+ ) -> None:
47
+ """Buffer a recall event."""
48
+ self._recalls.append({
49
+ "query": query,
50
+ "memory_ids": list(memory_ids),
51
+ "ts": timestamp or datetime.now(),
52
+ })
53
+
54
+ def record_usage(
55
+ self,
56
+ query: str,
57
+ signal: str,
58
+ timestamp: Optional[datetime] = None,
59
+ ) -> None:
60
+ """Buffer a post-recall usage signal."""
61
+ self._usages.append({
62
+ "query": query,
63
+ "signal": signal,
64
+ "ts": timestamp or datetime.now(),
65
+ })
66
+
67
+ def record_deletion(
68
+ self,
69
+ memory_id: int,
70
+ timestamp: Optional[datetime] = None,
71
+ ) -> None:
72
+ """Buffer a memory deletion event."""
73
+ self._deletions.append({
74
+ "memory_id": memory_id,
75
+ "ts": timestamp or datetime.now(),
76
+ })
77
+
78
+ # ── Inference engine ─────────────────────────────────────────────
79
+
80
+ def infer_outcomes(self, now: Optional[datetime] = None) -> List[Dict]:
81
+ """Process buffered events, apply rules, return inferences.
82
+
83
+ Processed recall events are removed from the buffer.
84
+ Events that are not yet inferrable remain for later.
85
+
86
+ Returns:
87
+ List of dicts with keys: outcome, confidence, memory_ids, reason
88
+ """
89
+ now = now or datetime.now()
90
+ results: List[Dict] = []
91
+ remaining: List[Dict] = []
92
+
93
+ for recall in self._recalls:
94
+ result = self._evaluate(recall, now)
95
+ if result is not None:
96
+ results.append(result)
97
+ else:
98
+ remaining.append(recall)
99
+
100
+ # Clear processed; keep un-inferrable recalls
101
+ self._recalls = remaining
102
+ # Consumed usages and deletions are cleared entirely
103
+ self._usages.clear()
104
+ self._deletions.clear()
105
+
106
+ return results
107
+
108
+ # ── Private rule evaluation ──────────────────────────────────────
109
+
110
+ def _evaluate(self, recall: Dict, now: datetime) -> Optional[Dict]:
111
+ """Apply rules in priority order. First match wins."""
112
+ query = recall["query"]
113
+ mem_ids = recall["memory_ids"]
114
+ ts = recall["ts"]
115
+ elapsed = (now - ts).total_seconds()
116
+
117
+ # Rule 1: Deletion of recalled memory within 60 min
118
+ for d in self._deletions:
119
+ if d["memory_id"] in mem_ids:
120
+ delta = (d["ts"] - ts).total_seconds()
121
+ if 0 <= delta <= _DELETION_WINDOW:
122
+ return self._result(
123
+ "failure", 0.0, mem_ids,
124
+ "memory_deleted_after_recall",
125
+ )
126
+
127
+ # Rule 2: Usage signal "mcp_used_high" within 5 min
128
+ for u in self._usages:
129
+ if u["query"] == query and u["signal"] == "mcp_used_high":
130
+ delta = (u["ts"] - ts).total_seconds()
131
+ if 0 <= delta <= _USAGE_WINDOW:
132
+ return self._result(
133
+ "success", 0.8, mem_ids,
134
+ "mcp_used_high_after_recall",
135
+ )
136
+
137
+ # Rule 3: Cross-tool usage within 5 min
138
+ for u in self._usages:
139
+ if u["query"] == query and u["signal"] == "implicit_positive_cross_tool":
140
+ delta = (u["ts"] - ts).total_seconds()
141
+ if 0 <= delta <= _USAGE_WINDOW:
142
+ return self._result(
143
+ "success", 0.7, mem_ids,
144
+ "cross_tool_access_after_recall",
145
+ )
146
+
147
+ # Rule 4: Rapid-fire — 3+ recalls within 2 min window
148
+ window_start = ts - timedelta(seconds=_RAPID_FIRE_WINDOW)
149
+ nearby = [
150
+ r for r in self._recalls
151
+ if r is not recall and window_start <= r["ts"] <= ts + timedelta(seconds=_RAPID_FIRE_WINDOW)
152
+ ]
153
+ # Count total including this recall
154
+ if len(nearby) + 1 >= _RAPID_FIRE_COUNT:
155
+ return self._result(
156
+ "failure", 0.1, mem_ids,
157
+ "rapid_fire_queries",
158
+ )
159
+
160
+ # Rule 5: Different-query recall within 2 min
161
+ for r in self._recalls:
162
+ if r is recall:
163
+ continue
164
+ if r["query"] != query:
165
+ delta = abs((r["ts"] - ts).total_seconds())
166
+ if delta <= _REQUERY_WINDOW:
167
+ return self._result(
168
+ "failure", 0.2, mem_ids,
169
+ "immediate_requery_different_terms",
170
+ )
171
+
172
+ # Rule 6: 10+ min elapsed with no subsequent activity
173
+ if elapsed >= _QUIET_WINDOW:
174
+ return self._result(
175
+ "success", 0.6, mem_ids,
176
+ "no_requery_after_recall",
177
+ )
178
+
179
+ # Rule 7: Not yet inferrable
180
+ return None
181
+
182
+ @staticmethod
183
+ def _result(
184
+ outcome: str,
185
+ confidence: float,
186
+ memory_ids: List[int],
187
+ reason: str,
188
+ ) -> Dict:
189
+ return {
190
+ "outcome": outcome,
191
+ "confidence": confidence,
192
+ "memory_ids": memory_ids,
193
+ "reason": reason,
194
+ }
@@ -0,0 +1,193 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Explicit + implicit action outcome recording.
4
+
5
+ Records what happens AFTER memories are recalled — success, failure,
6
+ or partial outcomes. Self-contained: creates its own table via
7
+ CREATE TABLE IF NOT EXISTS so no external migration is needed.
8
+
9
+ Part of SLM v2.8 Behavioral Learning Engine.
10
+ """
11
+ import json
12
+ import sqlite3
13
+ import threading
14
+ from typing import Dict, List, Optional, Any
15
+
16
+
17
+ class OutcomeTracker:
18
+ """Records action outcomes for behavioral learning.
19
+
20
+ Each outcome links one or more memory IDs to an outcome label
21
+ (success / failure / partial) with optional context metadata.
22
+ Confidence defaults to 0.9 for explicit (user-reported) outcomes.
23
+ """
24
+
25
+ OUTCOMES = ("success", "failure", "partial")
26
+
27
+ ACTION_TYPES = (
28
+ "code_written",
29
+ "decision_made",
30
+ "debug_resolved",
31
+ "architecture_chosen",
32
+ "other",
33
+ )
34
+
35
+ _CREATE_TABLE = """
36
+ CREATE TABLE IF NOT EXISTS action_outcomes (
37
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
38
+ memory_ids TEXT NOT NULL,
39
+ outcome TEXT NOT NULL,
40
+ action_type TEXT DEFAULT 'other',
41
+ context TEXT DEFAULT '{}',
42
+ confidence REAL DEFAULT 0.9,
43
+ agent_id TEXT DEFAULT 'user',
44
+ project TEXT,
45
+ profile TEXT DEFAULT 'default',
46
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
47
+ )
48
+ """
49
+
50
+ def __init__(self, db_path: Optional[str] = None):
51
+ self._db_path = db_path
52
+ self._lock = threading.Lock()
53
+ if db_path:
54
+ self._ensure_table()
55
+
56
+ # ------------------------------------------------------------------
57
+ # Public API
58
+ # ------------------------------------------------------------------
59
+
60
+ def record_outcome(
61
+ self,
62
+ memory_ids: List[int],
63
+ outcome: str,
64
+ action_type: str = "other",
65
+ context: Optional[Dict[str, Any]] = None,
66
+ confidence: float = 0.9,
67
+ agent_id: str = "user",
68
+ project: Optional[str] = None,
69
+ ) -> Optional[int]:
70
+ """Record an action outcome against one or more memories.
71
+
72
+ Args:
73
+ memory_ids: List of memory IDs involved in this outcome.
74
+ outcome: One of OUTCOMES ("success", "failure", "partial").
75
+ action_type: Category of the action taken.
76
+ context: Arbitrary metadata dict (stored as JSON).
77
+ confidence: Confidence in the outcome label (default 0.9).
78
+ agent_id: Identifier for the reporting agent.
79
+ project: Optional project scope.
80
+
81
+ Returns:
82
+ The row ID of the inserted outcome, or None if validation fails.
83
+ """
84
+ if outcome not in self.OUTCOMES:
85
+ return None
86
+
87
+ context_json = json.dumps(context or {})
88
+ memory_ids_json = json.dumps(memory_ids)
89
+
90
+ with self._lock:
91
+ conn = self._connect()
92
+ try:
93
+ cur = conn.execute(
94
+ """INSERT INTO action_outcomes
95
+ (memory_ids, outcome, action_type, context,
96
+ confidence, agent_id, project)
97
+ VALUES (?, ?, ?, ?, ?, ?, ?)""",
98
+ (
99
+ memory_ids_json,
100
+ outcome,
101
+ action_type,
102
+ context_json,
103
+ confidence,
104
+ agent_id,
105
+ project,
106
+ ),
107
+ )
108
+ conn.commit()
109
+ return cur.lastrowid
110
+ finally:
111
+ conn.close()
112
+
113
+ def get_outcomes(
114
+ self,
115
+ memory_id: Optional[int] = None,
116
+ project: Optional[str] = None,
117
+ limit: int = 100,
118
+ ) -> List[Dict[str, Any]]:
119
+ """Query recorded outcomes with optional filters.
120
+
121
+ Args:
122
+ memory_id: If given, return only outcomes that include this
123
+ memory ID in their memory_ids list.
124
+ project: If given, filter by project scope.
125
+ limit: Maximum rows to return (default 100).
126
+
127
+ Returns:
128
+ List of outcome dicts with deserialized memory_ids and context.
129
+ """
130
+ with self._lock:
131
+ conn = self._connect()
132
+ try:
133
+ query = "SELECT * FROM action_outcomes WHERE 1=1"
134
+ params: List[Any] = []
135
+
136
+ if project is not None:
137
+ query += " AND project = ?"
138
+ params.append(project)
139
+
140
+ query += " ORDER BY created_at DESC LIMIT ?"
141
+ params.append(limit)
142
+
143
+ rows = conn.execute(query, params).fetchall()
144
+ results = [self._row_to_dict(r) for r in rows]
145
+
146
+ if memory_id is not None:
147
+ results = [
148
+ r for r in results if memory_id in r["memory_ids"]
149
+ ]
150
+
151
+ return results
152
+ finally:
153
+ conn.close()
154
+
155
+ def get_success_rate(self, memory_id: int) -> float:
156
+ """Calculate success rate for a specific memory.
157
+
158
+ Counts outcomes where memory_id appears in memory_ids.
159
+ Returns success count / total count, or 0.0 if no outcomes.
160
+ """
161
+ outcomes = self.get_outcomes(memory_id=memory_id)
162
+ if not outcomes:
163
+ return 0.0
164
+ successes = sum(1 for o in outcomes if o["outcome"] == "success")
165
+ return round(successes / len(outcomes), 3)
166
+
167
+ # ------------------------------------------------------------------
168
+ # Internal helpers
169
+ # ------------------------------------------------------------------
170
+
171
+ def _connect(self) -> sqlite3.Connection:
172
+ """Open a connection with row factory enabled."""
173
+ conn = sqlite3.connect(self._db_path)
174
+ conn.row_factory = sqlite3.Row
175
+ return conn
176
+
177
+ def _ensure_table(self) -> None:
178
+ """Create the action_outcomes table if it doesn't exist."""
179
+ conn = self._connect()
180
+ try:
181
+ conn.execute(self._CREATE_TABLE)
182
+ conn.commit()
183
+ finally:
184
+ conn.close()
185
+
186
+ @staticmethod
187
+ def _row_to_dict(row: sqlite3.Row) -> Dict[str, Any]:
188
+ """Convert a sqlite3.Row into a plain dict with parsed JSON fields."""
189
+ d = dict(row)
190
+ d["memory_ids"] = json.loads(d.get("memory_ids", "[]"))
191
+ ctx = d.get("context", "{}")
192
+ d["context"] = json.loads(ctx) if isinstance(ctx, str) else ctx
193
+ return d
@@ -0,0 +1,4 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Tests for behavioral learning engine.
4
+ """