superlocalmemory 2.7.5 → 2.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (174) hide show
  1. package/CHANGELOG.md +120 -155
  2. package/README.md +115 -89
  3. package/api_server.py +2 -12
  4. package/docs/PATTERN-LEARNING.md +64 -199
  5. package/docs/example_graph_usage.py +4 -6
  6. package/install.ps1 +226 -0
  7. package/install.sh +59 -0
  8. package/mcp_server.py +83 -7
  9. package/package.json +3 -10
  10. package/scripts/generate-thumbnails.py +3 -5
  11. package/skills/slm-build-graph/SKILL.md +1 -1
  12. package/skills/slm-list-recent/SKILL.md +1 -1
  13. package/skills/slm-recall/SKILL.md +1 -1
  14. package/skills/slm-remember/SKILL.md +1 -1
  15. package/skills/slm-show-patterns/SKILL.md +1 -1
  16. package/skills/slm-status/SKILL.md +1 -1
  17. package/skills/slm-switch-profile/SKILL.md +1 -1
  18. package/src/agent_registry.py +7 -18
  19. package/src/auth_middleware.py +3 -5
  20. package/src/auto_backup.py +3 -7
  21. package/src/behavioral/__init__.py +49 -0
  22. package/src/behavioral/behavioral_listener.py +203 -0
  23. package/src/behavioral/behavioral_patterns.py +275 -0
  24. package/src/behavioral/cross_project_transfer.py +206 -0
  25. package/src/behavioral/outcome_inference.py +194 -0
  26. package/src/behavioral/outcome_tracker.py +193 -0
  27. package/src/behavioral/tests/__init__.py +4 -0
  28. package/src/behavioral/tests/test_behavioral_integration.py +108 -0
  29. package/src/behavioral/tests/test_behavioral_patterns.py +150 -0
  30. package/src/behavioral/tests/test_cross_project_transfer.py +142 -0
  31. package/src/behavioral/tests/test_mcp_behavioral.py +139 -0
  32. package/src/behavioral/tests/test_mcp_report_outcome.py +117 -0
  33. package/src/behavioral/tests/test_outcome_inference.py +107 -0
  34. package/src/behavioral/tests/test_outcome_tracker.py +96 -0
  35. package/src/cache_manager.py +4 -6
  36. package/src/compliance/__init__.py +48 -0
  37. package/src/compliance/abac_engine.py +149 -0
  38. package/src/compliance/abac_middleware.py +116 -0
  39. package/src/compliance/audit_db.py +215 -0
  40. package/src/compliance/audit_logger.py +148 -0
  41. package/src/compliance/retention_manager.py +289 -0
  42. package/src/compliance/retention_scheduler.py +186 -0
  43. package/src/compliance/tests/__init__.py +4 -0
  44. package/src/compliance/tests/test_abac_enforcement.py +95 -0
  45. package/src/compliance/tests/test_abac_engine.py +124 -0
  46. package/src/compliance/tests/test_abac_mcp_integration.py +118 -0
  47. package/src/compliance/tests/test_audit_db.py +123 -0
  48. package/src/compliance/tests/test_audit_logger.py +98 -0
  49. package/src/compliance/tests/test_mcp_audit.py +128 -0
  50. package/src/compliance/tests/test_mcp_retention_policy.py +125 -0
  51. package/src/compliance/tests/test_retention_manager.py +131 -0
  52. package/src/compliance/tests/test_retention_scheduler.py +99 -0
  53. package/src/db_connection_manager.py +2 -12
  54. package/src/embedding_engine.py +61 -669
  55. package/src/embeddings/__init__.py +47 -0
  56. package/src/embeddings/cache.py +70 -0
  57. package/src/embeddings/cli.py +113 -0
  58. package/src/embeddings/constants.py +47 -0
  59. package/src/embeddings/database.py +91 -0
  60. package/src/embeddings/engine.py +247 -0
  61. package/src/embeddings/model_loader.py +145 -0
  62. package/src/event_bus.py +3 -13
  63. package/src/graph/__init__.py +36 -0
  64. package/src/graph/build_helpers.py +74 -0
  65. package/src/graph/cli.py +87 -0
  66. package/src/graph/cluster_builder.py +188 -0
  67. package/src/graph/cluster_summary.py +148 -0
  68. package/src/graph/constants.py +47 -0
  69. package/src/graph/edge_builder.py +162 -0
  70. package/src/graph/entity_extractor.py +95 -0
  71. package/src/graph/graph_core.py +226 -0
  72. package/src/graph/graph_search.py +231 -0
  73. package/src/graph/hierarchical.py +207 -0
  74. package/src/graph/schema.py +99 -0
  75. package/src/graph_engine.py +45 -1451
  76. package/src/hnsw_index.py +3 -7
  77. package/src/hybrid_search.py +36 -683
  78. package/src/learning/__init__.py +27 -12
  79. package/src/learning/adaptive_ranker.py +50 -12
  80. package/src/learning/cross_project_aggregator.py +2 -12
  81. package/src/learning/engagement_tracker.py +2 -12
  82. package/src/learning/feature_extractor.py +175 -43
  83. package/src/learning/feedback_collector.py +7 -12
  84. package/src/learning/learning_db.py +180 -12
  85. package/src/learning/project_context_manager.py +2 -12
  86. package/src/learning/source_quality_scorer.py +2 -12
  87. package/src/learning/synthetic_bootstrap.py +2 -12
  88. package/src/learning/tests/__init__.py +2 -0
  89. package/src/learning/tests/test_adaptive_ranker.py +2 -6
  90. package/src/learning/tests/test_adaptive_ranker_v28.py +60 -0
  91. package/src/learning/tests/test_aggregator.py +2 -6
  92. package/src/learning/tests/test_auto_retrain_v28.py +35 -0
  93. package/src/learning/tests/test_e2e_ranking_v28.py +82 -0
  94. package/src/learning/tests/test_feature_extractor_v28.py +93 -0
  95. package/src/learning/tests/test_feedback_collector.py +2 -6
  96. package/src/learning/tests/test_learning_db.py +2 -6
  97. package/src/learning/tests/test_learning_db_v28.py +110 -0
  98. package/src/learning/tests/test_learning_init_v28.py +48 -0
  99. package/src/learning/tests/test_outcome_signals.py +48 -0
  100. package/src/learning/tests/test_project_context.py +2 -6
  101. package/src/learning/tests/test_schema_migration.py +319 -0
  102. package/src/learning/tests/test_signal_inference.py +11 -13
  103. package/src/learning/tests/test_source_quality.py +2 -6
  104. package/src/learning/tests/test_synthetic_bootstrap.py +3 -7
  105. package/src/learning/tests/test_workflow_miner.py +2 -6
  106. package/src/learning/workflow_pattern_miner.py +2 -12
  107. package/src/lifecycle/__init__.py +54 -0
  108. package/src/lifecycle/bounded_growth.py +239 -0
  109. package/src/lifecycle/compaction_engine.py +226 -0
  110. package/src/lifecycle/lifecycle_engine.py +302 -0
  111. package/src/lifecycle/lifecycle_evaluator.py +225 -0
  112. package/src/lifecycle/lifecycle_scheduler.py +130 -0
  113. package/src/lifecycle/retention_policy.py +285 -0
  114. package/src/lifecycle/tests/__init__.py +4 -0
  115. package/src/lifecycle/tests/test_bounded_growth.py +193 -0
  116. package/src/lifecycle/tests/test_compaction.py +179 -0
  117. package/src/lifecycle/tests/test_lifecycle_engine.py +137 -0
  118. package/src/lifecycle/tests/test_lifecycle_evaluation.py +177 -0
  119. package/src/lifecycle/tests/test_lifecycle_scheduler.py +127 -0
  120. package/src/lifecycle/tests/test_lifecycle_search.py +109 -0
  121. package/src/lifecycle/tests/test_mcp_compact.py +149 -0
  122. package/src/lifecycle/tests/test_mcp_lifecycle_status.py +114 -0
  123. package/src/lifecycle/tests/test_retention_policy.py +162 -0
  124. package/src/mcp_tools_v28.py +280 -0
  125. package/src/memory-profiles.py +2 -12
  126. package/src/memory-reset.py +2 -12
  127. package/src/memory_compression.py +2 -12
  128. package/src/memory_store_v2.py +76 -20
  129. package/src/migrate_v1_to_v2.py +2 -12
  130. package/src/pattern_learner.py +29 -975
  131. package/src/patterns/__init__.py +24 -0
  132. package/src/patterns/analyzers.py +247 -0
  133. package/src/patterns/learner.py +267 -0
  134. package/src/patterns/scoring.py +167 -0
  135. package/src/patterns/store.py +223 -0
  136. package/src/patterns/terminology.py +138 -0
  137. package/src/provenance_tracker.py +4 -14
  138. package/src/query_optimizer.py +4 -6
  139. package/src/rate_limiter.py +2 -6
  140. package/src/search/__init__.py +20 -0
  141. package/src/search/cli.py +77 -0
  142. package/src/search/constants.py +26 -0
  143. package/src/search/engine.py +239 -0
  144. package/src/search/fusion.py +122 -0
  145. package/src/search/index_loader.py +112 -0
  146. package/src/search/methods.py +162 -0
  147. package/src/search_engine_v2.py +4 -6
  148. package/src/setup_validator.py +7 -13
  149. package/src/subscription_manager.py +2 -12
  150. package/src/tree/__init__.py +59 -0
  151. package/src/tree/builder.py +183 -0
  152. package/src/tree/nodes.py +196 -0
  153. package/src/tree/queries.py +252 -0
  154. package/src/tree/schema.py +76 -0
  155. package/src/tree_manager.py +10 -711
  156. package/src/trust/__init__.py +45 -0
  157. package/src/trust/constants.py +66 -0
  158. package/src/trust/queries.py +157 -0
  159. package/src/trust/schema.py +95 -0
  160. package/src/trust/scorer.py +299 -0
  161. package/src/trust/signals.py +95 -0
  162. package/src/trust_scorer.py +39 -697
  163. package/src/webhook_dispatcher.py +2 -12
  164. package/ui/app.js +1 -1
  165. package/ui/index.html +3 -0
  166. package/ui/js/agents.js +1 -1
  167. package/ui/js/core.js +21 -5
  168. package/ui/js/profiles.js +29 -7
  169. package/ui_server.py +2 -14
  170. package/ATTRIBUTION.md +0 -140
  171. package/docs/ARCHITECTURE-V2.5.md +0 -190
  172. package/docs/GRAPH-ENGINE.md +0 -503
  173. package/docs/architecture-diagram.drawio +0 -405
  174. package/docs/plans/2026-02-13-benchmark-suite.md +0 -1349
@@ -0,0 +1,203 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """EventBus listener that bridges events to the behavioral learning engine.
4
+
5
+ Listens for memory.recalled, memory.deleted, and usage events.
6
+ Feeds recall events to OutcomeInference for implicit outcome detection.
7
+ Triggers pattern extraction after configurable outcome count threshold.
8
+
9
+ Part of SLM v2.8 Behavioral Learning Engine.
10
+ """
11
+ import logging
12
+ import threading
13
+ from datetime import datetime
14
+ from pathlib import Path
15
+ from typing import Optional, Dict, Any, List
16
+
17
+ from .outcome_tracker import OutcomeTracker
18
+ from .outcome_inference import OutcomeInference
19
+
20
+ logger = logging.getLogger("superlocalmemory.behavioral.listener")
21
+
22
+ # Default: extract patterns every 100 new outcomes
23
+ DEFAULT_EXTRACTION_THRESHOLD = 100
24
+
25
+
26
+ class BehavioralListener:
27
+ """EventBus listener that feeds events to the behavioral learning engine.
28
+
29
+ Processes:
30
+ - memory.recalled -> feeds to OutcomeInference (implicit outcome detection)
31
+ - memory.deleted -> records deletion for inference (Rule 1 signal)
32
+ - Usage signals -> records for inference (Rule 2/3 signals)
33
+
34
+ Thread-safe: handle_event can be called from any thread.
35
+ Listener callbacks run on the emitter's thread -- must be fast.
36
+ """
37
+
38
+ # Event types this listener cares about
39
+ _RECALL_EVENT = "memory.recalled"
40
+ _DELETION_EVENT = "memory.deleted"
41
+
42
+ def __init__(
43
+ self,
44
+ db_path: Optional[str] = None,
45
+ extraction_threshold: int = DEFAULT_EXTRACTION_THRESHOLD,
46
+ ):
47
+ if db_path is None:
48
+ db_path = str(Path.home() / ".claude-memory" / "learning.db")
49
+ self._db_path = str(db_path)
50
+ self.extraction_threshold = extraction_threshold
51
+
52
+ # Core components
53
+ self._tracker = OutcomeTracker(self._db_path)
54
+ self._inference = OutcomeInference()
55
+
56
+ # Thread safety
57
+ self._lock = threading.Lock()
58
+
59
+ # Counters
60
+ self.events_processed = 0
61
+ self.recall_events_processed = 0
62
+ self.deletion_events_processed = 0
63
+ self._outcome_count_since_extraction = 0
64
+ self._registered = False
65
+
66
+ # ------------------------------------------------------------------
67
+ # Event handling (called on emitter's thread — must be fast)
68
+ # ------------------------------------------------------------------
69
+
70
+ def handle_event(self, event: Dict[str, Any]) -> None:
71
+ """Process an EventBus event.
72
+
73
+ Called on the emitter's thread — must be fast and non-blocking.
74
+ Filters by event_type and dispatches to the appropriate handler.
75
+ """
76
+ event_type = event.get("event_type", "")
77
+ payload = event.get("payload", {})
78
+ memory_id = event.get("memory_id")
79
+ timestamp_str = event.get("timestamp")
80
+
81
+ try:
82
+ timestamp = (
83
+ datetime.fromisoformat(timestamp_str)
84
+ if timestamp_str
85
+ else datetime.now()
86
+ )
87
+ except (ValueError, TypeError):
88
+ timestamp = datetime.now()
89
+
90
+ with self._lock:
91
+ self.events_processed += 1
92
+
93
+ if event_type == self._RECALL_EVENT:
94
+ self._handle_recall(payload, memory_id, timestamp)
95
+
96
+ elif event_type == self._DELETION_EVENT:
97
+ self._handle_deletion(memory_id, timestamp)
98
+ # All other event types are silently ignored
99
+
100
+ def _handle_recall(
101
+ self,
102
+ payload: Dict[str, Any],
103
+ memory_id: Optional[int],
104
+ timestamp: datetime,
105
+ ) -> None:
106
+ """Process a memory.recalled event. Must be called under self._lock."""
107
+ query = payload.get("query", "")
108
+ memory_ids = payload.get(
109
+ "memory_ids", [memory_id] if memory_id else []
110
+ )
111
+ signal = payload.get("signal")
112
+
113
+ self._inference.record_recall(query, memory_ids, timestamp)
114
+ if signal:
115
+ self._inference.record_usage(
116
+ query, signal=signal, timestamp=timestamp
117
+ )
118
+ self.recall_events_processed += 1
119
+
120
+ # Periodically run inference (every 10 recall events)
121
+ if self.recall_events_processed % 10 == 0:
122
+ self._run_inference_cycle()
123
+
124
+ def _handle_deletion(
125
+ self, memory_id: Optional[int], timestamp: datetime
126
+ ) -> None:
127
+ """Process a memory.deleted event. Must be called under self._lock."""
128
+ if memory_id is not None:
129
+ self._inference.record_deletion(memory_id, timestamp)
130
+ self.deletion_events_processed += 1
131
+
132
+ # ------------------------------------------------------------------
133
+ # Inference + pattern extraction
134
+ # ------------------------------------------------------------------
135
+
136
+ def _run_inference_cycle(self) -> None:
137
+ """Run outcome inference and optionally trigger pattern extraction."""
138
+ inferences: List[Dict] = self._inference.infer_outcomes(
139
+ datetime.now()
140
+ )
141
+ for inf in inferences:
142
+ self._tracker.record_outcome(
143
+ memory_ids=inf["memory_ids"],
144
+ outcome=inf["outcome"],
145
+ action_type="inferred",
146
+ confidence=inf["confidence"],
147
+ context={"reason": inf.get("reason", "")},
148
+ )
149
+ self._outcome_count_since_extraction += 1
150
+
151
+ if self._outcome_count_since_extraction >= self.extraction_threshold:
152
+ self._trigger_extraction()
153
+
154
+ def _trigger_extraction(self) -> None:
155
+ """Trigger behavioral pattern extraction. Best-effort."""
156
+ try:
157
+ from .behavioral_patterns import BehavioralPatternExtractor
158
+
159
+ extractor = BehavioralPatternExtractor(self._db_path)
160
+ extractor.extract_patterns()
161
+ extractor.save_patterns()
162
+ self._outcome_count_since_extraction = 0
163
+ except Exception as exc:
164
+ logger.warning("Pattern extraction failed: %s", exc)
165
+
166
+ # ------------------------------------------------------------------
167
+ # EventBus registration
168
+ # ------------------------------------------------------------------
169
+
170
+ def register_with_eventbus(self) -> bool:
171
+ """Register this listener with the EventBus singleton.
172
+
173
+ Returns True if registration succeeds, False otherwise.
174
+ Graceful degradation: failure here does NOT break the engine.
175
+ """
176
+ try:
177
+ from event_bus import EventBus
178
+
179
+ bus = EventBus.get_instance(Path(self._db_path))
180
+ bus.add_listener(self.handle_event)
181
+ self._registered = True
182
+ return True
183
+ except Exception as exc:
184
+ logger.info(
185
+ "EventBus registration skipped (not available): %s", exc
186
+ )
187
+ self._registered = False
188
+ return False
189
+
190
+ # ------------------------------------------------------------------
191
+ # Status / introspection
192
+ # ------------------------------------------------------------------
193
+
194
+ def get_status(self) -> Dict[str, Any]:
195
+ """Return listener status for diagnostics."""
196
+ return {
197
+ "events_processed": self.events_processed,
198
+ "recall_events_processed": self.recall_events_processed,
199
+ "deletion_events_processed": self.deletion_events_processed,
200
+ "registered": self._registered,
201
+ "outcome_count_since_extraction": self._outcome_count_since_extraction,
202
+ "extraction_threshold": self.extraction_threshold,
203
+ }
@@ -0,0 +1,275 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Pattern extraction from action outcome histories.
4
+
5
+ Scans the action_outcomes table, groups by project and action_type,
6
+ calculates success rates, and stores discovered patterns in the
7
+ behavioral_patterns table. Self-contained: creates its own table via
8
+ CREATE TABLE IF NOT EXISTS so no external migration is needed.
9
+
10
+ Part of SLM v2.8 Behavioral Learning Engine.
11
+ """
12
+ import json
13
+ import sqlite3
14
+ import threading
15
+ from datetime import datetime, timezone
16
+ from typing import Dict, List, Optional, Any
17
+
18
+
19
+ class BehavioralPatternExtractor:
20
+ """Extracts success/failure patterns from outcome data.
21
+
22
+ Analyzes action_outcomes rows to discover:
23
+ - project_success: success rate per project
24
+ - action_type_success: success rate per action_type
25
+
26
+ Confidence formula:
27
+ min(evidence_count / 10, 1.0) * abs(success_rate - 0.5) * 2
28
+ This yields high confidence only when there is enough evidence AND the
29
+ success rate is far from the 50/50 coin-flip baseline.
30
+ """
31
+
32
+ PATTERN_TYPES = ("project_success", "action_type_success")
33
+
34
+ _CREATE_TABLE = """
35
+ CREATE TABLE IF NOT EXISTS behavioral_patterns (
36
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
37
+ pattern_type TEXT NOT NULL,
38
+ pattern_key TEXT NOT NULL,
39
+ success_rate REAL DEFAULT 0.0,
40
+ evidence_count INTEGER DEFAULT 0,
41
+ confidence REAL DEFAULT 0.0,
42
+ metadata TEXT DEFAULT '{}',
43
+ project TEXT,
44
+ profile TEXT DEFAULT 'default',
45
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
46
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
47
+ )
48
+ """
49
+
50
+ # Minimum outcomes required before we emit a pattern at all.
51
+ MIN_EVIDENCE = 3
52
+
53
+ def __init__(self, db_path: Optional[str] = None):
54
+ self._db_path = db_path
55
+ self._lock = threading.Lock()
56
+ self._patterns: List[Dict[str, Any]] = []
57
+ if db_path:
58
+ self._ensure_table()
59
+
60
+ # ------------------------------------------------------------------
61
+ # Public API
62
+ # ------------------------------------------------------------------
63
+
64
+ def extract_patterns(self) -> List[Dict[str, Any]]:
65
+ """Scan action_outcomes and extract success/failure patterns.
66
+
67
+ Groups outcomes by project and by action_type, calculates
68
+ success rates, and returns a list of pattern dicts. Also stores
69
+ the result internally so a subsequent ``save_patterns()`` call
70
+ can persist them.
71
+
72
+ Returns:
73
+ List of pattern dicts with keys: pattern_type, pattern_key,
74
+ success_rate, evidence_count, confidence, metadata, project.
75
+ """
76
+ patterns: List[Dict[str, Any]] = []
77
+ with self._lock:
78
+ conn = self._connect()
79
+ try:
80
+ patterns.extend(self._extract_project_patterns(conn))
81
+ patterns.extend(self._extract_action_type_patterns(conn))
82
+ finally:
83
+ conn.close()
84
+ self._patterns = patterns
85
+ return patterns
86
+
87
+ def save_patterns(self) -> int:
88
+ """Persist the most recently extracted patterns to the DB.
89
+
90
+ Inserts (or replaces) rows in the behavioral_patterns table.
91
+
92
+ Returns:
93
+ Number of patterns saved.
94
+ """
95
+ if not self._patterns:
96
+ return 0
97
+
98
+ now = datetime.now(timezone.utc).isoformat()
99
+ with self._lock:
100
+ conn = self._connect()
101
+ try:
102
+ for p in self._patterns:
103
+ # Upsert: delete any existing row for the same
104
+ # (pattern_type, pattern_key, project) then insert.
105
+ conn.execute(
106
+ """DELETE FROM behavioral_patterns
107
+ WHERE pattern_type = ? AND pattern_key = ?
108
+ AND COALESCE(project, '') = COALESCE(?, '')""",
109
+ (p["pattern_type"], p["pattern_key"], p.get("project")),
110
+ )
111
+ conn.execute(
112
+ """INSERT INTO behavioral_patterns
113
+ (pattern_type, pattern_key, success_rate,
114
+ evidence_count, confidence, metadata,
115
+ project, created_at, updated_at)
116
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
117
+ (
118
+ p["pattern_type"],
119
+ p["pattern_key"],
120
+ p["success_rate"],
121
+ p["evidence_count"],
122
+ p["confidence"],
123
+ json.dumps(p.get("metadata", {})),
124
+ p.get("project"),
125
+ now,
126
+ now,
127
+ ),
128
+ )
129
+ conn.commit()
130
+ return len(self._patterns)
131
+ finally:
132
+ conn.close()
133
+
134
+ def get_patterns(
135
+ self,
136
+ min_confidence: float = 0.0,
137
+ project: Optional[str] = None,
138
+ ) -> List[Dict[str, Any]]:
139
+ """Read stored patterns from the DB with optional filters.
140
+
141
+ Args:
142
+ min_confidence: Only return patterns with confidence >= this.
143
+ project: If given, filter by project scope.
144
+
145
+ Returns:
146
+ List of pattern dicts read from the database.
147
+ """
148
+ with self._lock:
149
+ conn = self._connect()
150
+ try:
151
+ query = (
152
+ "SELECT * FROM behavioral_patterns "
153
+ "WHERE confidence >= ?"
154
+ )
155
+ params: List[Any] = [min_confidence]
156
+ if project is not None:
157
+ query += " AND project = ?"
158
+ params.append(project)
159
+ query += " ORDER BY confidence DESC"
160
+ rows = conn.execute(query, params).fetchall()
161
+ return [self._row_to_dict(r) for r in rows]
162
+ finally:
163
+ conn.close()
164
+
165
+ # ------------------------------------------------------------------
166
+ # Internal extraction helpers
167
+ # ------------------------------------------------------------------
168
+
169
+ def _extract_project_patterns(
170
+ self, conn: sqlite3.Connection
171
+ ) -> List[Dict[str, Any]]:
172
+ """Group outcomes by project and compute success rates."""
173
+ rows = conn.execute(
174
+ """SELECT project,
175
+ COUNT(*) AS total,
176
+ SUM(CASE WHEN outcome = 'success' THEN 1 ELSE 0 END) AS wins
177
+ FROM action_outcomes
178
+ WHERE project IS NOT NULL
179
+ GROUP BY project
180
+ HAVING total >= ?""",
181
+ (self.MIN_EVIDENCE,),
182
+ ).fetchall()
183
+
184
+ patterns = []
185
+ for row in rows:
186
+ project = row[0]
187
+ total = row[1]
188
+ wins = row[2]
189
+ rate = round(wins / total, 4) if total else 0.0
190
+ confidence = self._compute_confidence(total, rate)
191
+ patterns.append(
192
+ {
193
+ "pattern_type": "project_success",
194
+ "pattern_key": project,
195
+ "success_rate": rate,
196
+ "evidence_count": total,
197
+ "confidence": confidence,
198
+ "metadata": {"wins": wins, "losses": total - wins},
199
+ "project": project,
200
+ }
201
+ )
202
+ return patterns
203
+
204
+ def _extract_action_type_patterns(
205
+ self, conn: sqlite3.Connection
206
+ ) -> List[Dict[str, Any]]:
207
+ """Group outcomes by action_type and compute success rates."""
208
+ rows = conn.execute(
209
+ """SELECT action_type,
210
+ COUNT(*) AS total,
211
+ SUM(CASE WHEN outcome = 'success' THEN 1 ELSE 0 END) AS wins
212
+ FROM action_outcomes
213
+ WHERE action_type IS NOT NULL
214
+ GROUP BY action_type
215
+ HAVING total >= ?""",
216
+ (self.MIN_EVIDENCE,),
217
+ ).fetchall()
218
+
219
+ patterns = []
220
+ for row in rows:
221
+ action_type = row[0]
222
+ total = row[1]
223
+ wins = row[2]
224
+ rate = round(wins / total, 4) if total else 0.0
225
+ confidence = self._compute_confidence(total, rate)
226
+ patterns.append(
227
+ {
228
+ "pattern_type": "action_type_success",
229
+ "pattern_key": action_type,
230
+ "success_rate": rate,
231
+ "evidence_count": total,
232
+ "confidence": confidence,
233
+ "metadata": {"wins": wins, "losses": total - wins},
234
+ "project": None,
235
+ }
236
+ )
237
+ return patterns
238
+
239
+ # ------------------------------------------------------------------
240
+ # Internal helpers
241
+ # ------------------------------------------------------------------
242
+
243
+ @staticmethod
244
+ def _compute_confidence(evidence_count: int, success_rate: float) -> float:
245
+ """Confidence = min(evidence/10, 1.0) * abs(rate - 0.5) * 2.
246
+
247
+ High confidence requires both sufficient evidence AND a success
248
+ rate that deviates significantly from the 50% baseline.
249
+ """
250
+ evidence_factor = min(evidence_count / 10.0, 1.0)
251
+ deviation_factor = abs(success_rate - 0.5) * 2.0
252
+ return round(evidence_factor * deviation_factor, 4)
253
+
254
+ def _connect(self) -> sqlite3.Connection:
255
+ """Open a connection with row factory enabled."""
256
+ conn = sqlite3.connect(self._db_path)
257
+ conn.row_factory = sqlite3.Row
258
+ return conn
259
+
260
+ def _ensure_table(self) -> None:
261
+ """Create the behavioral_patterns table if it doesn't exist."""
262
+ conn = self._connect()
263
+ try:
264
+ conn.execute(self._CREATE_TABLE)
265
+ conn.commit()
266
+ finally:
267
+ conn.close()
268
+
269
+ @staticmethod
270
+ def _row_to_dict(row: sqlite3.Row) -> Dict[str, Any]:
271
+ """Convert a sqlite3.Row into a plain dict with parsed JSON."""
272
+ d = dict(row)
273
+ meta = d.get("metadata", "{}")
274
+ d["metadata"] = json.loads(meta) if isinstance(meta, str) else meta
275
+ return d
@@ -0,0 +1,206 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
3
+ """Privacy-safe cross-project behavioral pattern transfer.
4
+
5
+ Transfers behavioral patterns between projects using ONLY metadata
6
+ (pattern type, success rate, confidence). Never transfers memory
7
+ content or content hashes.
8
+
9
+ Eligibility criteria:
10
+ - confidence >= 0.7
11
+ - evidence_count >= 5
12
+ - source project != target project
13
+
14
+ Part of SLM v2.8 Behavioral Learning Engine.
15
+ """
16
+ import sqlite3
17
+ import threading
18
+ from typing import Dict, List, Optional, Any
19
+
20
+
21
+ # Thresholds for transfer eligibility
22
+ MIN_CONFIDENCE = 0.7
23
+ MIN_EVIDENCE = 5
24
+
25
+
26
+ class CrossProjectTransfer:
27
+ """Privacy-safe cross-project behavioral pattern transfer.
28
+
29
+ Only metadata (pattern_type, pattern_key, success_rate,
30
+ evidence_count, confidence) is transferred — never content
31
+ or content hashes.
32
+ """
33
+
34
+ _CREATE_CROSS_TABLE = """
35
+ CREATE TABLE IF NOT EXISTS cross_project_behaviors (
36
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
37
+ source_project TEXT NOT NULL,
38
+ target_project TEXT NOT NULL,
39
+ pattern_id INTEGER NOT NULL,
40
+ transfer_type TEXT DEFAULT 'metadata',
41
+ confidence REAL DEFAULT 0.0,
42
+ profile TEXT DEFAULT 'default',
43
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
44
+ FOREIGN KEY (pattern_id) REFERENCES behavioral_patterns(id)
45
+ )
46
+ """
47
+
48
+ def __init__(self, db_path: Optional[str] = None, enabled: bool = True):
49
+ self._db_path = db_path
50
+ self._enabled = enabled
51
+ self._lock = threading.Lock()
52
+ if db_path:
53
+ self._ensure_tables()
54
+
55
+ # ------------------------------------------------------------------
56
+ # Public API
57
+ # ------------------------------------------------------------------
58
+
59
+ def evaluate_transfers(
60
+ self, target_project: str
61
+ ) -> List[Dict[str, Any]]:
62
+ """Find patterns eligible for transfer to the target project.
63
+
64
+ Eligibility: confidence >= 0.7 AND evidence_count >= 5
65
+ AND source project != target project.
66
+
67
+ Returns:
68
+ List of dicts with metadata-only fields: pattern_id,
69
+ pattern_type, pattern_key, success_rate, evidence_count,
70
+ confidence, source_project, transfer_type.
71
+ """
72
+ if not self._enabled:
73
+ return []
74
+
75
+ with self._lock:
76
+ conn = self._connect()
77
+ try:
78
+ rows = conn.execute(
79
+ """SELECT id, pattern_type, pattern_key, success_rate,
80
+ evidence_count, confidence, project
81
+ FROM behavioral_patterns
82
+ WHERE confidence >= ?
83
+ AND evidence_count >= ?
84
+ AND project IS NOT NULL
85
+ AND project != ?
86
+ ORDER BY confidence DESC""",
87
+ (MIN_CONFIDENCE, MIN_EVIDENCE, target_project),
88
+ ).fetchall()
89
+
90
+ return [self._eligible_to_dict(row) for row in rows]
91
+ finally:
92
+ conn.close()
93
+
94
+ def apply_transfer(
95
+ self, pattern_id: int, target_project: str
96
+ ) -> Dict[str, Any]:
97
+ """Record a cross-project transfer in the database.
98
+
99
+ Looks up the source pattern to get its project and confidence,
100
+ then inserts a record into cross_project_behaviors.
101
+
102
+ Returns:
103
+ Dict with success status and transfer id.
104
+ """
105
+ with self._lock:
106
+ conn = self._connect()
107
+ try:
108
+ # Look up the source pattern
109
+ pattern = conn.execute(
110
+ "SELECT project, confidence FROM behavioral_patterns WHERE id = ?",
111
+ (pattern_id,),
112
+ ).fetchone()
113
+
114
+ if pattern is None:
115
+ return {"success": False, "error": "pattern_not_found"}
116
+
117
+ source_project = pattern["project"]
118
+ confidence = pattern["confidence"]
119
+
120
+ cur = conn.execute(
121
+ """INSERT INTO cross_project_behaviors
122
+ (source_project, target_project, pattern_id,
123
+ transfer_type, confidence)
124
+ VALUES (?, ?, ?, 'metadata', ?)""",
125
+ (source_project, target_project, pattern_id, confidence),
126
+ )
127
+ conn.commit()
128
+ return {
129
+ "success": True,
130
+ "transfer_id": cur.lastrowid,
131
+ "source_project": source_project,
132
+ "target_project": target_project,
133
+ }
134
+ finally:
135
+ conn.close()
136
+
137
+ def get_transfers(
138
+ self,
139
+ target_project: Optional[str] = None,
140
+ source_project: Optional[str] = None,
141
+ ) -> List[Dict[str, Any]]:
142
+ """Query recorded cross-project transfers.
143
+
144
+ Args:
145
+ target_project: Filter by target project.
146
+ source_project: Filter by source project.
147
+
148
+ Returns:
149
+ List of transfer record dicts.
150
+ """
151
+ with self._lock:
152
+ conn = self._connect()
153
+ try:
154
+ query = "SELECT * FROM cross_project_behaviors WHERE 1=1"
155
+ params: List[Any] = []
156
+
157
+ if target_project is not None:
158
+ query += " AND target_project = ?"
159
+ params.append(target_project)
160
+
161
+ if source_project is not None:
162
+ query += " AND source_project = ?"
163
+ params.append(source_project)
164
+
165
+ query += " ORDER BY created_at DESC"
166
+
167
+ rows = conn.execute(query, params).fetchall()
168
+ return [dict(row) for row in rows]
169
+ finally:
170
+ conn.close()
171
+
172
+ # ------------------------------------------------------------------
173
+ # Internal helpers
174
+ # ------------------------------------------------------------------
175
+
176
+ def _connect(self) -> sqlite3.Connection:
177
+ """Open a connection with row factory enabled."""
178
+ conn = sqlite3.connect(self._db_path)
179
+ conn.row_factory = sqlite3.Row
180
+ return conn
181
+
182
+ def _ensure_tables(self) -> None:
183
+ """Create the cross_project_behaviors table if missing."""
184
+ conn = self._connect()
185
+ try:
186
+ conn.execute(self._CREATE_CROSS_TABLE)
187
+ conn.commit()
188
+ finally:
189
+ conn.close()
190
+
191
+ @staticmethod
192
+ def _eligible_to_dict(row: sqlite3.Row) -> Dict[str, Any]:
193
+ """Convert a pattern row to a privacy-safe transfer dict.
194
+
195
+ Only metadata fields are included. No content, no hashes.
196
+ """
197
+ return {
198
+ "pattern_id": row["id"],
199
+ "pattern_type": row["pattern_type"],
200
+ "pattern_key": row["pattern_key"],
201
+ "success_rate": row["success_rate"],
202
+ "evidence_count": row["evidence_count"],
203
+ "confidence": row["confidence"],
204
+ "source_project": row["project"],
205
+ "transfer_type": "metadata",
206
+ }