superlocalmemory 2.7.6 → 2.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +120 -155
- package/README.md +115 -89
- package/api_server.py +2 -12
- package/docs/PATTERN-LEARNING.md +64 -199
- package/docs/example_graph_usage.py +4 -6
- package/install.sh +59 -0
- package/mcp_server.py +83 -7
- package/package.json +1 -8
- package/scripts/generate-thumbnails.py +3 -5
- package/skills/slm-build-graph/SKILL.md +1 -1
- package/skills/slm-list-recent/SKILL.md +1 -1
- package/skills/slm-recall/SKILL.md +1 -1
- package/skills/slm-remember/SKILL.md +1 -1
- package/skills/slm-show-patterns/SKILL.md +1 -1
- package/skills/slm-status/SKILL.md +1 -1
- package/skills/slm-switch-profile/SKILL.md +1 -1
- package/src/agent_registry.py +7 -18
- package/src/auth_middleware.py +3 -5
- package/src/auto_backup.py +3 -7
- package/src/behavioral/__init__.py +49 -0
- package/src/behavioral/behavioral_listener.py +203 -0
- package/src/behavioral/behavioral_patterns.py +275 -0
- package/src/behavioral/cross_project_transfer.py +206 -0
- package/src/behavioral/outcome_inference.py +194 -0
- package/src/behavioral/outcome_tracker.py +193 -0
- package/src/behavioral/tests/__init__.py +4 -0
- package/src/behavioral/tests/test_behavioral_integration.py +108 -0
- package/src/behavioral/tests/test_behavioral_patterns.py +150 -0
- package/src/behavioral/tests/test_cross_project_transfer.py +142 -0
- package/src/behavioral/tests/test_mcp_behavioral.py +139 -0
- package/src/behavioral/tests/test_mcp_report_outcome.py +117 -0
- package/src/behavioral/tests/test_outcome_inference.py +107 -0
- package/src/behavioral/tests/test_outcome_tracker.py +96 -0
- package/src/cache_manager.py +4 -6
- package/src/compliance/__init__.py +48 -0
- package/src/compliance/abac_engine.py +149 -0
- package/src/compliance/abac_middleware.py +116 -0
- package/src/compliance/audit_db.py +215 -0
- package/src/compliance/audit_logger.py +148 -0
- package/src/compliance/retention_manager.py +289 -0
- package/src/compliance/retention_scheduler.py +186 -0
- package/src/compliance/tests/__init__.py +4 -0
- package/src/compliance/tests/test_abac_enforcement.py +95 -0
- package/src/compliance/tests/test_abac_engine.py +124 -0
- package/src/compliance/tests/test_abac_mcp_integration.py +118 -0
- package/src/compliance/tests/test_audit_db.py +123 -0
- package/src/compliance/tests/test_audit_logger.py +98 -0
- package/src/compliance/tests/test_mcp_audit.py +128 -0
- package/src/compliance/tests/test_mcp_retention_policy.py +125 -0
- package/src/compliance/tests/test_retention_manager.py +131 -0
- package/src/compliance/tests/test_retention_scheduler.py +99 -0
- package/src/db_connection_manager.py +2 -12
- package/src/embedding_engine.py +61 -669
- package/src/embeddings/__init__.py +47 -0
- package/src/embeddings/cache.py +70 -0
- package/src/embeddings/cli.py +113 -0
- package/src/embeddings/constants.py +47 -0
- package/src/embeddings/database.py +91 -0
- package/src/embeddings/engine.py +247 -0
- package/src/embeddings/model_loader.py +145 -0
- package/src/event_bus.py +3 -13
- package/src/graph/__init__.py +36 -0
- package/src/graph/build_helpers.py +74 -0
- package/src/graph/cli.py +87 -0
- package/src/graph/cluster_builder.py +188 -0
- package/src/graph/cluster_summary.py +148 -0
- package/src/graph/constants.py +47 -0
- package/src/graph/edge_builder.py +162 -0
- package/src/graph/entity_extractor.py +95 -0
- package/src/graph/graph_core.py +226 -0
- package/src/graph/graph_search.py +231 -0
- package/src/graph/hierarchical.py +207 -0
- package/src/graph/schema.py +99 -0
- package/src/graph_engine.py +45 -1451
- package/src/hnsw_index.py +3 -7
- package/src/hybrid_search.py +36 -683
- package/src/learning/__init__.py +27 -12
- package/src/learning/adaptive_ranker.py +50 -12
- package/src/learning/cross_project_aggregator.py +2 -12
- package/src/learning/engagement_tracker.py +2 -12
- package/src/learning/feature_extractor.py +175 -43
- package/src/learning/feedback_collector.py +7 -12
- package/src/learning/learning_db.py +180 -12
- package/src/learning/project_context_manager.py +2 -12
- package/src/learning/source_quality_scorer.py +2 -12
- package/src/learning/synthetic_bootstrap.py +2 -12
- package/src/learning/tests/__init__.py +2 -0
- package/src/learning/tests/test_adaptive_ranker.py +2 -6
- package/src/learning/tests/test_adaptive_ranker_v28.py +60 -0
- package/src/learning/tests/test_aggregator.py +2 -6
- package/src/learning/tests/test_auto_retrain_v28.py +35 -0
- package/src/learning/tests/test_e2e_ranking_v28.py +82 -0
- package/src/learning/tests/test_feature_extractor_v28.py +93 -0
- package/src/learning/tests/test_feedback_collector.py +2 -6
- package/src/learning/tests/test_learning_db.py +2 -6
- package/src/learning/tests/test_learning_db_v28.py +110 -0
- package/src/learning/tests/test_learning_init_v28.py +48 -0
- package/src/learning/tests/test_outcome_signals.py +48 -0
- package/src/learning/tests/test_project_context.py +2 -6
- package/src/learning/tests/test_schema_migration.py +319 -0
- package/src/learning/tests/test_signal_inference.py +11 -13
- package/src/learning/tests/test_source_quality.py +2 -6
- package/src/learning/tests/test_synthetic_bootstrap.py +3 -7
- package/src/learning/tests/test_workflow_miner.py +2 -6
- package/src/learning/workflow_pattern_miner.py +2 -12
- package/src/lifecycle/__init__.py +54 -0
- package/src/lifecycle/bounded_growth.py +239 -0
- package/src/lifecycle/compaction_engine.py +226 -0
- package/src/lifecycle/lifecycle_engine.py +302 -0
- package/src/lifecycle/lifecycle_evaluator.py +225 -0
- package/src/lifecycle/lifecycle_scheduler.py +130 -0
- package/src/lifecycle/retention_policy.py +285 -0
- package/src/lifecycle/tests/__init__.py +4 -0
- package/src/lifecycle/tests/test_bounded_growth.py +193 -0
- package/src/lifecycle/tests/test_compaction.py +179 -0
- package/src/lifecycle/tests/test_lifecycle_engine.py +137 -0
- package/src/lifecycle/tests/test_lifecycle_evaluation.py +177 -0
- package/src/lifecycle/tests/test_lifecycle_scheduler.py +127 -0
- package/src/lifecycle/tests/test_lifecycle_search.py +109 -0
- package/src/lifecycle/tests/test_mcp_compact.py +149 -0
- package/src/lifecycle/tests/test_mcp_lifecycle_status.py +114 -0
- package/src/lifecycle/tests/test_retention_policy.py +162 -0
- package/src/mcp_tools_v28.py +280 -0
- package/src/memory-profiles.py +2 -12
- package/src/memory-reset.py +2 -12
- package/src/memory_compression.py +2 -12
- package/src/memory_store_v2.py +76 -20
- package/src/migrate_v1_to_v2.py +2 -12
- package/src/pattern_learner.py +29 -975
- package/src/patterns/__init__.py +24 -0
- package/src/patterns/analyzers.py +247 -0
- package/src/patterns/learner.py +267 -0
- package/src/patterns/scoring.py +167 -0
- package/src/patterns/store.py +223 -0
- package/src/patterns/terminology.py +138 -0
- package/src/provenance_tracker.py +4 -14
- package/src/query_optimizer.py +4 -6
- package/src/rate_limiter.py +2 -6
- package/src/search/__init__.py +20 -0
- package/src/search/cli.py +77 -0
- package/src/search/constants.py +26 -0
- package/src/search/engine.py +239 -0
- package/src/search/fusion.py +122 -0
- package/src/search/index_loader.py +112 -0
- package/src/search/methods.py +162 -0
- package/src/search_engine_v2.py +4 -6
- package/src/setup_validator.py +7 -13
- package/src/subscription_manager.py +2 -12
- package/src/tree/__init__.py +59 -0
- package/src/tree/builder.py +183 -0
- package/src/tree/nodes.py +196 -0
- package/src/tree/queries.py +252 -0
- package/src/tree/schema.py +76 -0
- package/src/tree_manager.py +10 -711
- package/src/trust/__init__.py +45 -0
- package/src/trust/constants.py +66 -0
- package/src/trust/queries.py +157 -0
- package/src/trust/schema.py +95 -0
- package/src/trust/scorer.py +299 -0
- package/src/trust/signals.py +95 -0
- package/src/trust_scorer.py +39 -697
- package/src/webhook_dispatcher.py +2 -12
- package/ui/app.js +1 -1
- package/ui/js/agents.js +1 -1
- package/ui_server.py +2 -14
- package/ATTRIBUTION.md +0 -140
- package/docs/ARCHITECTURE-V2.5.md +0 -190
- package/docs/GRAPH-ENGINE.md +0 -503
- package/docs/architecture-diagram.drawio +0 -405
- package/docs/plans/2026-02-13-benchmark-suite.md +0 -1349
|
@@ -320,7 +320,7 @@ slm recall "query" --format csv
|
|
|
320
320
|
|
|
321
321
|
**Created by:** [Varun Pratap Bhardwaj](https://github.com/varun369) (Solution Architect)
|
|
322
322
|
**Project:** SuperLocalMemory V2
|
|
323
|
-
**License:** MIT
|
|
323
|
+
**License:** MIT (see [LICENSE](../../LICENSE))
|
|
324
324
|
**Repository:** https://github.com/varun369/SuperLocalMemoryV2
|
|
325
325
|
|
|
326
326
|
*Open source doesn't mean removing credit. Attribution must be preserved per MIT License terms.*
|
|
@@ -188,7 +188,7 @@ slm remember "Commit: $commit_msg (${commit_hash:0:7})" \
|
|
|
188
188
|
|
|
189
189
|
**Created by:** [Varun Pratap Bhardwaj](https://github.com/varun369) (Solution Architect)
|
|
190
190
|
**Project:** SuperLocalMemory V2
|
|
191
|
-
**License:** MIT
|
|
191
|
+
**License:** MIT (see [LICENSE](../../LICENSE))
|
|
192
192
|
**Repository:** https://github.com/varun369/SuperLocalMemoryV2
|
|
193
193
|
|
|
194
194
|
*Open source doesn't mean removing credit. Attribution must be preserved per MIT License terms.*
|
|
@@ -218,7 +218,7 @@ slm engagement
|
|
|
218
218
|
|
|
219
219
|
**Created by:** [Varun Pratap Bhardwaj](https://github.com/varun369) (Solution Architect)
|
|
220
220
|
**Project:** SuperLocalMemory V2
|
|
221
|
-
**License:** MIT
|
|
221
|
+
**License:** MIT (see [LICENSE](../../LICENSE))
|
|
222
222
|
**Repository:** https://github.com/varun369/SuperLocalMemoryV2
|
|
223
223
|
|
|
224
224
|
*Open source doesn't mean removing credit. Attribution must be preserved per MIT License terms.*
|
|
@@ -357,7 +357,7 @@ slm build-graph
|
|
|
357
357
|
|
|
358
358
|
**Created by:** [Varun Pratap Bhardwaj](https://github.com/varun369) (Solution Architect)
|
|
359
359
|
**Project:** SuperLocalMemory V2
|
|
360
|
-
**License:** MIT
|
|
360
|
+
**License:** MIT (see [LICENSE](../../LICENSE))
|
|
361
361
|
**Repository:** https://github.com/varun369/SuperLocalMemoryV2
|
|
362
362
|
|
|
363
363
|
*Open source doesn't mean removing credit. Attribution must be preserved per MIT License terms.*
|
|
@@ -436,7 +436,7 @@ slm list-profiles
|
|
|
436
436
|
|
|
437
437
|
**Created by:** [Varun Pratap Bhardwaj](https://github.com/varun369) (Solution Architect)
|
|
438
438
|
**Project:** SuperLocalMemory V2
|
|
439
|
-
**License:** MIT
|
|
439
|
+
**License:** MIT (see [LICENSE](../../LICENSE))
|
|
440
440
|
**Repository:** https://github.com/varun369/SuperLocalMemoryV2
|
|
441
441
|
|
|
442
442
|
*Open source doesn't mean removing credit. Attribution must be preserved per MIT License terms.*
|
package/src/agent_registry.py
CHANGED
|
@@ -1,22 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
Copyright (c) 2026 Varun Pratap Bhardwaj
|
|
5
|
-
Licensed under MIT License
|
|
6
|
-
|
|
7
|
-
Repository: https://github.com/varun369/SuperLocalMemoryV2
|
|
8
|
-
Author: Varun Pratap Bhardwaj (Solution Architect)
|
|
9
|
-
|
|
10
|
-
NOTICE: This software is protected by MIT License.
|
|
11
|
-
Attribution must be preserved in all copies or derivatives.
|
|
12
|
-
"""
|
|
13
|
-
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
14
4
|
"""
|
|
15
5
|
AgentRegistry — Tracks which AI agents connect to SuperLocalMemory,
|
|
16
6
|
what they write, when, and via which protocol.
|
|
17
7
|
|
|
18
|
-
Every MCP client (Claude, Cursor, Windsurf), CLI call, REST API request
|
|
19
|
-
|
|
8
|
+
Every MCP client (Claude, Cursor, Windsurf), CLI call, and REST API request
|
|
9
|
+
gets registered here. This powers:
|
|
20
10
|
- Dashboard "Connected Agents" panel
|
|
21
11
|
- Trust scoring input (v2.5 silent collection)
|
|
22
12
|
- Provenance tracking (who created which memory)
|
|
@@ -31,7 +21,6 @@ Protocols:
|
|
|
31
21
|
cli — Command-line interface (slm command, bin/ scripts)
|
|
32
22
|
rest — REST API (api_server.py)
|
|
33
23
|
python — Direct Python import
|
|
34
|
-
a2a — Agent-to-Agent Protocol (v2.7+)
|
|
35
24
|
"""
|
|
36
25
|
|
|
37
26
|
import json
|
|
@@ -155,7 +144,7 @@ class AgentRegistry:
|
|
|
155
144
|
Args:
|
|
156
145
|
agent_id: Unique identifier (e.g., "mcp:claude-desktop")
|
|
157
146
|
agent_name: Human-readable name (e.g., "Claude Desktop")
|
|
158
|
-
protocol: Connection protocol (mcp, cli, rest, python
|
|
147
|
+
protocol: Connection protocol (mcp, cli, rest, python)
|
|
159
148
|
metadata: Additional agent info (version, capabilities, etc.)
|
|
160
149
|
|
|
161
150
|
Returns:
|
|
@@ -164,7 +153,7 @@ class AgentRegistry:
|
|
|
164
153
|
if not agent_id or not isinstance(agent_id, str):
|
|
165
154
|
raise ValueError("agent_id must be a non-empty string")
|
|
166
155
|
|
|
167
|
-
valid_protocols = ("mcp", "cli", "rest", "python"
|
|
156
|
+
valid_protocols = ("mcp", "cli", "rest", "python")
|
|
168
157
|
if protocol not in valid_protocols:
|
|
169
158
|
raise ValueError(f"Invalid protocol: {protocol}. Must be one of {valid_protocols}")
|
|
170
159
|
|
|
@@ -283,7 +272,7 @@ class AgentRegistry:
|
|
|
283
272
|
List registered agents with optional filtering.
|
|
284
273
|
|
|
285
274
|
Args:
|
|
286
|
-
protocol: Filter by protocol (mcp, cli, rest, python
|
|
275
|
+
protocol: Filter by protocol (mcp, cli, rest, python)
|
|
287
276
|
limit: Max agents to return
|
|
288
277
|
active_since_hours: Only agents seen within N hours
|
|
289
278
|
|
package/src/auth_middleware.py
CHANGED
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
Licensed under MIT License
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
4
|
+
"""SuperLocalMemory V2 - Optional API Key Authentication
|
|
6
5
|
|
|
7
6
|
Opt-in API key authentication for dashboard and API endpoints.
|
|
8
7
|
When ~/.claude-memory/api_key file exists, write endpoints require
|
|
9
8
|
X-SLM-API-Key header. Read endpoints remain open for backward compatibility.
|
|
10
9
|
"""
|
|
11
|
-
|
|
12
10
|
import os
|
|
13
11
|
import hashlib
|
|
14
12
|
import logging
|
package/src/auto_backup.py
CHANGED
|
@@ -1,10 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
Licensed under MIT License
|
|
6
|
-
|
|
7
|
-
Repository: https://github.com/varun369/SuperLocalMemoryV2
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
4
|
+
"""SuperLocalMemory V2 - Auto Backup System
|
|
8
5
|
|
|
9
6
|
Automated backup system for memory.db:
|
|
10
7
|
- Configurable interval: 24h (daily) or 7 days (weekly, default)
|
|
@@ -13,7 +10,6 @@ Automated backup system for memory.db:
|
|
|
13
10
|
- Auto-triggers on memory operations when backup is due
|
|
14
11
|
- Manual backup via CLI
|
|
15
12
|
"""
|
|
16
|
-
|
|
17
13
|
import sqlite3
|
|
18
14
|
import shutil
|
|
19
15
|
import json
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""SLM v2.8 Behavioral Learning Engine — Action Outcome Learning.
|
|
4
|
+
|
|
5
|
+
Tracks what happens AFTER memories are recalled (success/failure/partial).
|
|
6
|
+
Extracts behavioral patterns. Transfers across projects (privacy-safe).
|
|
7
|
+
All local, zero-LLM.
|
|
8
|
+
|
|
9
|
+
Graceful degradation: if this module fails to import,
|
|
10
|
+
adaptive ranking continues with v2.7 features only.
|
|
11
|
+
"""
|
|
12
|
+
import threading
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Optional, Dict, Any
|
|
15
|
+
|
|
16
|
+
BEHAVIORAL_AVAILABLE = False
|
|
17
|
+
_init_error = None
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
from .outcome_tracker import OutcomeTracker
|
|
21
|
+
from .behavioral_patterns import BehavioralPatternExtractor
|
|
22
|
+
BEHAVIORAL_AVAILABLE = True
|
|
23
|
+
except ImportError as e:
|
|
24
|
+
_init_error = str(e)
|
|
25
|
+
|
|
26
|
+
_outcome_tracker: Optional["OutcomeTracker"] = None
|
|
27
|
+
_tracker_lock = threading.Lock()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def get_outcome_tracker(db_path: Optional[Path] = None) -> Optional["OutcomeTracker"]:
|
|
31
|
+
"""Get or create the outcome tracker singleton."""
|
|
32
|
+
global _outcome_tracker
|
|
33
|
+
if not BEHAVIORAL_AVAILABLE:
|
|
34
|
+
return None
|
|
35
|
+
with _tracker_lock:
|
|
36
|
+
if _outcome_tracker is None:
|
|
37
|
+
try:
|
|
38
|
+
_outcome_tracker = OutcomeTracker(db_path)
|
|
39
|
+
except Exception:
|
|
40
|
+
return None
|
|
41
|
+
return _outcome_tracker
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_status() -> Dict[str, Any]:
|
|
45
|
+
return {
|
|
46
|
+
"behavioral_available": BEHAVIORAL_AVAILABLE,
|
|
47
|
+
"init_error": _init_error,
|
|
48
|
+
"tracker_active": _outcome_tracker is not None,
|
|
49
|
+
}
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""EventBus listener that bridges events to the behavioral learning engine.
|
|
4
|
+
|
|
5
|
+
Listens for memory.recalled, memory.deleted, and usage events.
|
|
6
|
+
Feeds recall events to OutcomeInference for implicit outcome detection.
|
|
7
|
+
Triggers pattern extraction after configurable outcome count threshold.
|
|
8
|
+
|
|
9
|
+
Part of SLM v2.8 Behavioral Learning Engine.
|
|
10
|
+
"""
|
|
11
|
+
import logging
|
|
12
|
+
import threading
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional, Dict, Any, List
|
|
16
|
+
|
|
17
|
+
from .outcome_tracker import OutcomeTracker
|
|
18
|
+
from .outcome_inference import OutcomeInference
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger("superlocalmemory.behavioral.listener")
|
|
21
|
+
|
|
22
|
+
# Default: extract patterns every 100 new outcomes
|
|
23
|
+
DEFAULT_EXTRACTION_THRESHOLD = 100
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class BehavioralListener:
|
|
27
|
+
"""EventBus listener that feeds events to the behavioral learning engine.
|
|
28
|
+
|
|
29
|
+
Processes:
|
|
30
|
+
- memory.recalled -> feeds to OutcomeInference (implicit outcome detection)
|
|
31
|
+
- memory.deleted -> records deletion for inference (Rule 1 signal)
|
|
32
|
+
- Usage signals -> records for inference (Rule 2/3 signals)
|
|
33
|
+
|
|
34
|
+
Thread-safe: handle_event can be called from any thread.
|
|
35
|
+
Listener callbacks run on the emitter's thread -- must be fast.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# Event types this listener cares about
|
|
39
|
+
_RECALL_EVENT = "memory.recalled"
|
|
40
|
+
_DELETION_EVENT = "memory.deleted"
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
db_path: Optional[str] = None,
|
|
45
|
+
extraction_threshold: int = DEFAULT_EXTRACTION_THRESHOLD,
|
|
46
|
+
):
|
|
47
|
+
if db_path is None:
|
|
48
|
+
db_path = str(Path.home() / ".claude-memory" / "learning.db")
|
|
49
|
+
self._db_path = str(db_path)
|
|
50
|
+
self.extraction_threshold = extraction_threshold
|
|
51
|
+
|
|
52
|
+
# Core components
|
|
53
|
+
self._tracker = OutcomeTracker(self._db_path)
|
|
54
|
+
self._inference = OutcomeInference()
|
|
55
|
+
|
|
56
|
+
# Thread safety
|
|
57
|
+
self._lock = threading.Lock()
|
|
58
|
+
|
|
59
|
+
# Counters
|
|
60
|
+
self.events_processed = 0
|
|
61
|
+
self.recall_events_processed = 0
|
|
62
|
+
self.deletion_events_processed = 0
|
|
63
|
+
self._outcome_count_since_extraction = 0
|
|
64
|
+
self._registered = False
|
|
65
|
+
|
|
66
|
+
# ------------------------------------------------------------------
|
|
67
|
+
# Event handling (called on emitter's thread — must be fast)
|
|
68
|
+
# ------------------------------------------------------------------
|
|
69
|
+
|
|
70
|
+
def handle_event(self, event: Dict[str, Any]) -> None:
|
|
71
|
+
"""Process an EventBus event.
|
|
72
|
+
|
|
73
|
+
Called on the emitter's thread — must be fast and non-blocking.
|
|
74
|
+
Filters by event_type and dispatches to the appropriate handler.
|
|
75
|
+
"""
|
|
76
|
+
event_type = event.get("event_type", "")
|
|
77
|
+
payload = event.get("payload", {})
|
|
78
|
+
memory_id = event.get("memory_id")
|
|
79
|
+
timestamp_str = event.get("timestamp")
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
timestamp = (
|
|
83
|
+
datetime.fromisoformat(timestamp_str)
|
|
84
|
+
if timestamp_str
|
|
85
|
+
else datetime.now()
|
|
86
|
+
)
|
|
87
|
+
except (ValueError, TypeError):
|
|
88
|
+
timestamp = datetime.now()
|
|
89
|
+
|
|
90
|
+
with self._lock:
|
|
91
|
+
self.events_processed += 1
|
|
92
|
+
|
|
93
|
+
if event_type == self._RECALL_EVENT:
|
|
94
|
+
self._handle_recall(payload, memory_id, timestamp)
|
|
95
|
+
|
|
96
|
+
elif event_type == self._DELETION_EVENT:
|
|
97
|
+
self._handle_deletion(memory_id, timestamp)
|
|
98
|
+
# All other event types are silently ignored
|
|
99
|
+
|
|
100
|
+
def _handle_recall(
|
|
101
|
+
self,
|
|
102
|
+
payload: Dict[str, Any],
|
|
103
|
+
memory_id: Optional[int],
|
|
104
|
+
timestamp: datetime,
|
|
105
|
+
) -> None:
|
|
106
|
+
"""Process a memory.recalled event. Must be called under self._lock."""
|
|
107
|
+
query = payload.get("query", "")
|
|
108
|
+
memory_ids = payload.get(
|
|
109
|
+
"memory_ids", [memory_id] if memory_id else []
|
|
110
|
+
)
|
|
111
|
+
signal = payload.get("signal")
|
|
112
|
+
|
|
113
|
+
self._inference.record_recall(query, memory_ids, timestamp)
|
|
114
|
+
if signal:
|
|
115
|
+
self._inference.record_usage(
|
|
116
|
+
query, signal=signal, timestamp=timestamp
|
|
117
|
+
)
|
|
118
|
+
self.recall_events_processed += 1
|
|
119
|
+
|
|
120
|
+
# Periodically run inference (every 10 recall events)
|
|
121
|
+
if self.recall_events_processed % 10 == 0:
|
|
122
|
+
self._run_inference_cycle()
|
|
123
|
+
|
|
124
|
+
def _handle_deletion(
|
|
125
|
+
self, memory_id: Optional[int], timestamp: datetime
|
|
126
|
+
) -> None:
|
|
127
|
+
"""Process a memory.deleted event. Must be called under self._lock."""
|
|
128
|
+
if memory_id is not None:
|
|
129
|
+
self._inference.record_deletion(memory_id, timestamp)
|
|
130
|
+
self.deletion_events_processed += 1
|
|
131
|
+
|
|
132
|
+
# ------------------------------------------------------------------
|
|
133
|
+
# Inference + pattern extraction
|
|
134
|
+
# ------------------------------------------------------------------
|
|
135
|
+
|
|
136
|
+
def _run_inference_cycle(self) -> None:
|
|
137
|
+
"""Run outcome inference and optionally trigger pattern extraction."""
|
|
138
|
+
inferences: List[Dict] = self._inference.infer_outcomes(
|
|
139
|
+
datetime.now()
|
|
140
|
+
)
|
|
141
|
+
for inf in inferences:
|
|
142
|
+
self._tracker.record_outcome(
|
|
143
|
+
memory_ids=inf["memory_ids"],
|
|
144
|
+
outcome=inf["outcome"],
|
|
145
|
+
action_type="inferred",
|
|
146
|
+
confidence=inf["confidence"],
|
|
147
|
+
context={"reason": inf.get("reason", "")},
|
|
148
|
+
)
|
|
149
|
+
self._outcome_count_since_extraction += 1
|
|
150
|
+
|
|
151
|
+
if self._outcome_count_since_extraction >= self.extraction_threshold:
|
|
152
|
+
self._trigger_extraction()
|
|
153
|
+
|
|
154
|
+
def _trigger_extraction(self) -> None:
|
|
155
|
+
"""Trigger behavioral pattern extraction. Best-effort."""
|
|
156
|
+
try:
|
|
157
|
+
from .behavioral_patterns import BehavioralPatternExtractor
|
|
158
|
+
|
|
159
|
+
extractor = BehavioralPatternExtractor(self._db_path)
|
|
160
|
+
extractor.extract_patterns()
|
|
161
|
+
extractor.save_patterns()
|
|
162
|
+
self._outcome_count_since_extraction = 0
|
|
163
|
+
except Exception as exc:
|
|
164
|
+
logger.warning("Pattern extraction failed: %s", exc)
|
|
165
|
+
|
|
166
|
+
# ------------------------------------------------------------------
|
|
167
|
+
# EventBus registration
|
|
168
|
+
# ------------------------------------------------------------------
|
|
169
|
+
|
|
170
|
+
def register_with_eventbus(self) -> bool:
|
|
171
|
+
"""Register this listener with the EventBus singleton.
|
|
172
|
+
|
|
173
|
+
Returns True if registration succeeds, False otherwise.
|
|
174
|
+
Graceful degradation: failure here does NOT break the engine.
|
|
175
|
+
"""
|
|
176
|
+
try:
|
|
177
|
+
from event_bus import EventBus
|
|
178
|
+
|
|
179
|
+
bus = EventBus.get_instance(Path(self._db_path))
|
|
180
|
+
bus.add_listener(self.handle_event)
|
|
181
|
+
self._registered = True
|
|
182
|
+
return True
|
|
183
|
+
except Exception as exc:
|
|
184
|
+
logger.info(
|
|
185
|
+
"EventBus registration skipped (not available): %s", exc
|
|
186
|
+
)
|
|
187
|
+
self._registered = False
|
|
188
|
+
return False
|
|
189
|
+
|
|
190
|
+
# ------------------------------------------------------------------
|
|
191
|
+
# Status / introspection
|
|
192
|
+
# ------------------------------------------------------------------
|
|
193
|
+
|
|
194
|
+
def get_status(self) -> Dict[str, Any]:
|
|
195
|
+
"""Return listener status for diagnostics."""
|
|
196
|
+
return {
|
|
197
|
+
"events_processed": self.events_processed,
|
|
198
|
+
"recall_events_processed": self.recall_events_processed,
|
|
199
|
+
"deletion_events_processed": self.deletion_events_processed,
|
|
200
|
+
"registered": self._registered,
|
|
201
|
+
"outcome_count_since_extraction": self._outcome_count_since_extraction,
|
|
202
|
+
"extraction_threshold": self.extraction_threshold,
|
|
203
|
+
}
|
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""Pattern extraction from action outcome histories.
|
|
4
|
+
|
|
5
|
+
Scans the action_outcomes table, groups by project and action_type,
|
|
6
|
+
calculates success rates, and stores discovered patterns in the
|
|
7
|
+
behavioral_patterns table. Self-contained: creates its own table via
|
|
8
|
+
CREATE TABLE IF NOT EXISTS so no external migration is needed.
|
|
9
|
+
|
|
10
|
+
Part of SLM v2.8 Behavioral Learning Engine.
|
|
11
|
+
"""
|
|
12
|
+
import json
|
|
13
|
+
import sqlite3
|
|
14
|
+
import threading
|
|
15
|
+
from datetime import datetime, timezone
|
|
16
|
+
from typing import Dict, List, Optional, Any
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class BehavioralPatternExtractor:
|
|
20
|
+
"""Extracts success/failure patterns from outcome data.
|
|
21
|
+
|
|
22
|
+
Analyzes action_outcomes rows to discover:
|
|
23
|
+
- project_success: success rate per project
|
|
24
|
+
- action_type_success: success rate per action_type
|
|
25
|
+
|
|
26
|
+
Confidence formula:
|
|
27
|
+
min(evidence_count / 10, 1.0) * abs(success_rate - 0.5) * 2
|
|
28
|
+
This yields high confidence only when there is enough evidence AND the
|
|
29
|
+
success rate is far from the 50/50 coin-flip baseline.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
PATTERN_TYPES = ("project_success", "action_type_success")
|
|
33
|
+
|
|
34
|
+
_CREATE_TABLE = """
|
|
35
|
+
CREATE TABLE IF NOT EXISTS behavioral_patterns (
|
|
36
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
37
|
+
pattern_type TEXT NOT NULL,
|
|
38
|
+
pattern_key TEXT NOT NULL,
|
|
39
|
+
success_rate REAL DEFAULT 0.0,
|
|
40
|
+
evidence_count INTEGER DEFAULT 0,
|
|
41
|
+
confidence REAL DEFAULT 0.0,
|
|
42
|
+
metadata TEXT DEFAULT '{}',
|
|
43
|
+
project TEXT,
|
|
44
|
+
profile TEXT DEFAULT 'default',
|
|
45
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
46
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
47
|
+
)
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
# Minimum outcomes required before we emit a pattern at all.
|
|
51
|
+
MIN_EVIDENCE = 3
|
|
52
|
+
|
|
53
|
+
def __init__(self, db_path: Optional[str] = None):
|
|
54
|
+
self._db_path = db_path
|
|
55
|
+
self._lock = threading.Lock()
|
|
56
|
+
self._patterns: List[Dict[str, Any]] = []
|
|
57
|
+
if db_path:
|
|
58
|
+
self._ensure_table()
|
|
59
|
+
|
|
60
|
+
# ------------------------------------------------------------------
|
|
61
|
+
# Public API
|
|
62
|
+
# ------------------------------------------------------------------
|
|
63
|
+
|
|
64
|
+
def extract_patterns(self) -> List[Dict[str, Any]]:
|
|
65
|
+
"""Scan action_outcomes and extract success/failure patterns.
|
|
66
|
+
|
|
67
|
+
Groups outcomes by project and by action_type, calculates
|
|
68
|
+
success rates, and returns a list of pattern dicts. Also stores
|
|
69
|
+
the result internally so a subsequent ``save_patterns()`` call
|
|
70
|
+
can persist them.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
List of pattern dicts with keys: pattern_type, pattern_key,
|
|
74
|
+
success_rate, evidence_count, confidence, metadata, project.
|
|
75
|
+
"""
|
|
76
|
+
patterns: List[Dict[str, Any]] = []
|
|
77
|
+
with self._lock:
|
|
78
|
+
conn = self._connect()
|
|
79
|
+
try:
|
|
80
|
+
patterns.extend(self._extract_project_patterns(conn))
|
|
81
|
+
patterns.extend(self._extract_action_type_patterns(conn))
|
|
82
|
+
finally:
|
|
83
|
+
conn.close()
|
|
84
|
+
self._patterns = patterns
|
|
85
|
+
return patterns
|
|
86
|
+
|
|
87
|
+
def save_patterns(self) -> int:
|
|
88
|
+
"""Persist the most recently extracted patterns to the DB.
|
|
89
|
+
|
|
90
|
+
Inserts (or replaces) rows in the behavioral_patterns table.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
Number of patterns saved.
|
|
94
|
+
"""
|
|
95
|
+
if not self._patterns:
|
|
96
|
+
return 0
|
|
97
|
+
|
|
98
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
99
|
+
with self._lock:
|
|
100
|
+
conn = self._connect()
|
|
101
|
+
try:
|
|
102
|
+
for p in self._patterns:
|
|
103
|
+
# Upsert: delete any existing row for the same
|
|
104
|
+
# (pattern_type, pattern_key, project) then insert.
|
|
105
|
+
conn.execute(
|
|
106
|
+
"""DELETE FROM behavioral_patterns
|
|
107
|
+
WHERE pattern_type = ? AND pattern_key = ?
|
|
108
|
+
AND COALESCE(project, '') = COALESCE(?, '')""",
|
|
109
|
+
(p["pattern_type"], p["pattern_key"], p.get("project")),
|
|
110
|
+
)
|
|
111
|
+
conn.execute(
|
|
112
|
+
"""INSERT INTO behavioral_patterns
|
|
113
|
+
(pattern_type, pattern_key, success_rate,
|
|
114
|
+
evidence_count, confidence, metadata,
|
|
115
|
+
project, created_at, updated_at)
|
|
116
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
|
117
|
+
(
|
|
118
|
+
p["pattern_type"],
|
|
119
|
+
p["pattern_key"],
|
|
120
|
+
p["success_rate"],
|
|
121
|
+
p["evidence_count"],
|
|
122
|
+
p["confidence"],
|
|
123
|
+
json.dumps(p.get("metadata", {})),
|
|
124
|
+
p.get("project"),
|
|
125
|
+
now,
|
|
126
|
+
now,
|
|
127
|
+
),
|
|
128
|
+
)
|
|
129
|
+
conn.commit()
|
|
130
|
+
return len(self._patterns)
|
|
131
|
+
finally:
|
|
132
|
+
conn.close()
|
|
133
|
+
|
|
134
|
+
def get_patterns(
|
|
135
|
+
self,
|
|
136
|
+
min_confidence: float = 0.0,
|
|
137
|
+
project: Optional[str] = None,
|
|
138
|
+
) -> List[Dict[str, Any]]:
|
|
139
|
+
"""Read stored patterns from the DB with optional filters.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
min_confidence: Only return patterns with confidence >= this.
|
|
143
|
+
project: If given, filter by project scope.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
List of pattern dicts read from the database.
|
|
147
|
+
"""
|
|
148
|
+
with self._lock:
|
|
149
|
+
conn = self._connect()
|
|
150
|
+
try:
|
|
151
|
+
query = (
|
|
152
|
+
"SELECT * FROM behavioral_patterns "
|
|
153
|
+
"WHERE confidence >= ?"
|
|
154
|
+
)
|
|
155
|
+
params: List[Any] = [min_confidence]
|
|
156
|
+
if project is not None:
|
|
157
|
+
query += " AND project = ?"
|
|
158
|
+
params.append(project)
|
|
159
|
+
query += " ORDER BY confidence DESC"
|
|
160
|
+
rows = conn.execute(query, params).fetchall()
|
|
161
|
+
return [self._row_to_dict(r) for r in rows]
|
|
162
|
+
finally:
|
|
163
|
+
conn.close()
|
|
164
|
+
|
|
165
|
+
# ------------------------------------------------------------------
|
|
166
|
+
# Internal extraction helpers
|
|
167
|
+
# ------------------------------------------------------------------
|
|
168
|
+
|
|
169
|
+
def _extract_project_patterns(
|
|
170
|
+
self, conn: sqlite3.Connection
|
|
171
|
+
) -> List[Dict[str, Any]]:
|
|
172
|
+
"""Group outcomes by project and compute success rates."""
|
|
173
|
+
rows = conn.execute(
|
|
174
|
+
"""SELECT project,
|
|
175
|
+
COUNT(*) AS total,
|
|
176
|
+
SUM(CASE WHEN outcome = 'success' THEN 1 ELSE 0 END) AS wins
|
|
177
|
+
FROM action_outcomes
|
|
178
|
+
WHERE project IS NOT NULL
|
|
179
|
+
GROUP BY project
|
|
180
|
+
HAVING total >= ?""",
|
|
181
|
+
(self.MIN_EVIDENCE,),
|
|
182
|
+
).fetchall()
|
|
183
|
+
|
|
184
|
+
patterns = []
|
|
185
|
+
for row in rows:
|
|
186
|
+
project = row[0]
|
|
187
|
+
total = row[1]
|
|
188
|
+
wins = row[2]
|
|
189
|
+
rate = round(wins / total, 4) if total else 0.0
|
|
190
|
+
confidence = self._compute_confidence(total, rate)
|
|
191
|
+
patterns.append(
|
|
192
|
+
{
|
|
193
|
+
"pattern_type": "project_success",
|
|
194
|
+
"pattern_key": project,
|
|
195
|
+
"success_rate": rate,
|
|
196
|
+
"evidence_count": total,
|
|
197
|
+
"confidence": confidence,
|
|
198
|
+
"metadata": {"wins": wins, "losses": total - wins},
|
|
199
|
+
"project": project,
|
|
200
|
+
}
|
|
201
|
+
)
|
|
202
|
+
return patterns
|
|
203
|
+
|
|
204
|
+
def _extract_action_type_patterns(
|
|
205
|
+
self, conn: sqlite3.Connection
|
|
206
|
+
) -> List[Dict[str, Any]]:
|
|
207
|
+
"""Group outcomes by action_type and compute success rates."""
|
|
208
|
+
rows = conn.execute(
|
|
209
|
+
"""SELECT action_type,
|
|
210
|
+
COUNT(*) AS total,
|
|
211
|
+
SUM(CASE WHEN outcome = 'success' THEN 1 ELSE 0 END) AS wins
|
|
212
|
+
FROM action_outcomes
|
|
213
|
+
WHERE action_type IS NOT NULL
|
|
214
|
+
GROUP BY action_type
|
|
215
|
+
HAVING total >= ?""",
|
|
216
|
+
(self.MIN_EVIDENCE,),
|
|
217
|
+
).fetchall()
|
|
218
|
+
|
|
219
|
+
patterns = []
|
|
220
|
+
for row in rows:
|
|
221
|
+
action_type = row[0]
|
|
222
|
+
total = row[1]
|
|
223
|
+
wins = row[2]
|
|
224
|
+
rate = round(wins / total, 4) if total else 0.0
|
|
225
|
+
confidence = self._compute_confidence(total, rate)
|
|
226
|
+
patterns.append(
|
|
227
|
+
{
|
|
228
|
+
"pattern_type": "action_type_success",
|
|
229
|
+
"pattern_key": action_type,
|
|
230
|
+
"success_rate": rate,
|
|
231
|
+
"evidence_count": total,
|
|
232
|
+
"confidence": confidence,
|
|
233
|
+
"metadata": {"wins": wins, "losses": total - wins},
|
|
234
|
+
"project": None,
|
|
235
|
+
}
|
|
236
|
+
)
|
|
237
|
+
return patterns
|
|
238
|
+
|
|
239
|
+
# ------------------------------------------------------------------
|
|
240
|
+
# Internal helpers
|
|
241
|
+
# ------------------------------------------------------------------
|
|
242
|
+
|
|
243
|
+
@staticmethod
|
|
244
|
+
def _compute_confidence(evidence_count: int, success_rate: float) -> float:
|
|
245
|
+
"""Confidence = min(evidence/10, 1.0) * abs(rate - 0.5) * 2.
|
|
246
|
+
|
|
247
|
+
High confidence requires both sufficient evidence AND a success
|
|
248
|
+
rate that deviates significantly from the 50% baseline.
|
|
249
|
+
"""
|
|
250
|
+
evidence_factor = min(evidence_count / 10.0, 1.0)
|
|
251
|
+
deviation_factor = abs(success_rate - 0.5) * 2.0
|
|
252
|
+
return round(evidence_factor * deviation_factor, 4)
|
|
253
|
+
|
|
254
|
+
def _connect(self) -> sqlite3.Connection:
|
|
255
|
+
"""Open a connection with row factory enabled."""
|
|
256
|
+
conn = sqlite3.connect(self._db_path)
|
|
257
|
+
conn.row_factory = sqlite3.Row
|
|
258
|
+
return conn
|
|
259
|
+
|
|
260
|
+
def _ensure_table(self) -> None:
|
|
261
|
+
"""Create the behavioral_patterns table if it doesn't exist."""
|
|
262
|
+
conn = self._connect()
|
|
263
|
+
try:
|
|
264
|
+
conn.execute(self._CREATE_TABLE)
|
|
265
|
+
conn.commit()
|
|
266
|
+
finally:
|
|
267
|
+
conn.close()
|
|
268
|
+
|
|
269
|
+
@staticmethod
|
|
270
|
+
def _row_to_dict(row: sqlite3.Row) -> Dict[str, Any]:
|
|
271
|
+
"""Convert a sqlite3.Row into a plain dict with parsed JSON."""
|
|
272
|
+
d = dict(row)
|
|
273
|
+
meta = d.get("metadata", "{}")
|
|
274
|
+
d["metadata"] = json.loads(meta) if isinstance(meta, str) else meta
|
|
275
|
+
return d
|