superlocalmemory 2.7.6 → 2.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +120 -155
- package/README.md +115 -89
- package/api_server.py +25 -12
- package/bin/slm +20 -0
- package/docs/PATTERN-LEARNING.md +64 -199
- package/docs/example_graph_usage.py +4 -6
- package/install.sh +74 -0
- package/mcp_server.py +120 -9
- package/package.json +1 -8
- package/scripts/generate-thumbnails.py +3 -5
- package/skills/slm-build-graph/SKILL.md +1 -1
- package/skills/slm-list-recent/SKILL.md +2 -2
- package/skills/slm-recall/SKILL.md +1 -1
- package/skills/slm-remember/SKILL.md +2 -2
- package/skills/slm-show-patterns/SKILL.md +1 -1
- package/skills/slm-status/SKILL.md +2 -2
- package/skills/slm-switch-profile/SKILL.md +4 -4
- package/src/agent_registry.py +7 -18
- package/src/auth_middleware.py +3 -5
- package/src/auto_backup.py +3 -7
- package/src/behavioral/__init__.py +49 -0
- package/src/behavioral/behavioral_listener.py +203 -0
- package/src/behavioral/behavioral_patterns.py +275 -0
- package/src/behavioral/cross_project_transfer.py +206 -0
- package/src/behavioral/outcome_inference.py +194 -0
- package/src/behavioral/outcome_tracker.py +193 -0
- package/src/behavioral/tests/__init__.py +4 -0
- package/src/behavioral/tests/test_behavioral_integration.py +108 -0
- package/src/behavioral/tests/test_behavioral_patterns.py +150 -0
- package/src/behavioral/tests/test_cross_project_transfer.py +142 -0
- package/src/behavioral/tests/test_mcp_behavioral.py +139 -0
- package/src/behavioral/tests/test_mcp_report_outcome.py +117 -0
- package/src/behavioral/tests/test_outcome_inference.py +107 -0
- package/src/behavioral/tests/test_outcome_tracker.py +96 -0
- package/src/cache_manager.py +4 -6
- package/src/compliance/__init__.py +48 -0
- package/src/compliance/abac_engine.py +149 -0
- package/src/compliance/abac_middleware.py +116 -0
- package/src/compliance/audit_db.py +215 -0
- package/src/compliance/audit_logger.py +148 -0
- package/src/compliance/retention_manager.py +289 -0
- package/src/compliance/retention_scheduler.py +186 -0
- package/src/compliance/tests/__init__.py +4 -0
- package/src/compliance/tests/test_abac_enforcement.py +95 -0
- package/src/compliance/tests/test_abac_engine.py +124 -0
- package/src/compliance/tests/test_abac_mcp_integration.py +118 -0
- package/src/compliance/tests/test_audit_db.py +123 -0
- package/src/compliance/tests/test_audit_logger.py +98 -0
- package/src/compliance/tests/test_mcp_audit.py +128 -0
- package/src/compliance/tests/test_mcp_retention_policy.py +125 -0
- package/src/compliance/tests/test_retention_manager.py +131 -0
- package/src/compliance/tests/test_retention_scheduler.py +99 -0
- package/src/db_connection_manager.py +2 -12
- package/src/embedding_engine.py +61 -669
- package/src/embeddings/__init__.py +47 -0
- package/src/embeddings/cache.py +70 -0
- package/src/embeddings/cli.py +113 -0
- package/src/embeddings/constants.py +47 -0
- package/src/embeddings/database.py +91 -0
- package/src/embeddings/engine.py +247 -0
- package/src/embeddings/model_loader.py +145 -0
- package/src/event_bus.py +3 -13
- package/src/graph/__init__.py +36 -0
- package/src/graph/build_helpers.py +74 -0
- package/src/graph/cli.py +87 -0
- package/src/graph/cluster_builder.py +188 -0
- package/src/graph/cluster_summary.py +148 -0
- package/src/graph/constants.py +47 -0
- package/src/graph/edge_builder.py +162 -0
- package/src/graph/entity_extractor.py +95 -0
- package/src/graph/graph_core.py +226 -0
- package/src/graph/graph_search.py +231 -0
- package/src/graph/hierarchical.py +207 -0
- package/src/graph/schema.py +99 -0
- package/src/graph_engine.py +45 -1451
- package/src/hnsw_index.py +13 -11
- package/src/hybrid_search.py +36 -683
- package/src/learning/__init__.py +27 -12
- package/src/learning/adaptive_ranker.py +50 -12
- package/src/learning/cross_project_aggregator.py +2 -12
- package/src/learning/engagement_tracker.py +2 -12
- package/src/learning/feature_extractor.py +175 -43
- package/src/learning/feedback_collector.py +7 -12
- package/src/learning/learning_db.py +180 -12
- package/src/learning/project_context_manager.py +2 -12
- package/src/learning/source_quality_scorer.py +2 -12
- package/src/learning/synthetic_bootstrap.py +2 -12
- package/src/learning/tests/__init__.py +2 -0
- package/src/learning/tests/test_adaptive_ranker.py +2 -6
- package/src/learning/tests/test_adaptive_ranker_v28.py +60 -0
- package/src/learning/tests/test_aggregator.py +2 -6
- package/src/learning/tests/test_auto_retrain_v28.py +35 -0
- package/src/learning/tests/test_e2e_ranking_v28.py +82 -0
- package/src/learning/tests/test_feature_extractor_v28.py +93 -0
- package/src/learning/tests/test_feedback_collector.py +2 -6
- package/src/learning/tests/test_learning_db.py +2 -6
- package/src/learning/tests/test_learning_db_v28.py +110 -0
- package/src/learning/tests/test_learning_init_v28.py +48 -0
- package/src/learning/tests/test_outcome_signals.py +48 -0
- package/src/learning/tests/test_project_context.py +2 -6
- package/src/learning/tests/test_schema_migration.py +319 -0
- package/src/learning/tests/test_signal_inference.py +11 -13
- package/src/learning/tests/test_source_quality.py +2 -6
- package/src/learning/tests/test_synthetic_bootstrap.py +3 -7
- package/src/learning/tests/test_workflow_miner.py +2 -6
- package/src/learning/workflow_pattern_miner.py +2 -12
- package/src/lifecycle/__init__.py +54 -0
- package/src/lifecycle/bounded_growth.py +239 -0
- package/src/lifecycle/compaction_engine.py +226 -0
- package/src/lifecycle/lifecycle_engine.py +355 -0
- package/src/lifecycle/lifecycle_evaluator.py +257 -0
- package/src/lifecycle/lifecycle_scheduler.py +130 -0
- package/src/lifecycle/retention_policy.py +285 -0
- package/src/lifecycle/tests/__init__.py +4 -0
- package/src/lifecycle/tests/test_bounded_growth.py +193 -0
- package/src/lifecycle/tests/test_compaction.py +179 -0
- package/src/lifecycle/tests/test_lifecycle_engine.py +137 -0
- package/src/lifecycle/tests/test_lifecycle_evaluation.py +177 -0
- package/src/lifecycle/tests/test_lifecycle_scheduler.py +127 -0
- package/src/lifecycle/tests/test_lifecycle_search.py +109 -0
- package/src/lifecycle/tests/test_mcp_compact.py +149 -0
- package/src/lifecycle/tests/test_mcp_lifecycle_status.py +114 -0
- package/src/lifecycle/tests/test_retention_policy.py +162 -0
- package/src/mcp_tools_v28.py +281 -0
- package/src/memory-profiles.py +3 -12
- package/src/memory-reset.py +2 -12
- package/src/memory_compression.py +2 -12
- package/src/memory_store_v2.py +76 -20
- package/src/migrate_v1_to_v2.py +2 -12
- package/src/pattern_learner.py +29 -975
- package/src/patterns/__init__.py +24 -0
- package/src/patterns/analyzers.py +247 -0
- package/src/patterns/learner.py +267 -0
- package/src/patterns/scoring.py +167 -0
- package/src/patterns/store.py +223 -0
- package/src/patterns/terminology.py +138 -0
- package/src/provenance_tracker.py +4 -14
- package/src/query_optimizer.py +4 -6
- package/src/rate_limiter.py +2 -6
- package/src/search/__init__.py +20 -0
- package/src/search/cli.py +77 -0
- package/src/search/constants.py +26 -0
- package/src/search/engine.py +239 -0
- package/src/search/fusion.py +122 -0
- package/src/search/index_loader.py +112 -0
- package/src/search/methods.py +162 -0
- package/src/search_engine_v2.py +4 -6
- package/src/setup_validator.py +7 -13
- package/src/subscription_manager.py +2 -12
- package/src/tree/__init__.py +59 -0
- package/src/tree/builder.py +183 -0
- package/src/tree/nodes.py +196 -0
- package/src/tree/queries.py +252 -0
- package/src/tree/schema.py +76 -0
- package/src/tree_manager.py +10 -711
- package/src/trust/__init__.py +45 -0
- package/src/trust/constants.py +66 -0
- package/src/trust/queries.py +157 -0
- package/src/trust/schema.py +95 -0
- package/src/trust/scorer.py +299 -0
- package/src/trust/signals.py +95 -0
- package/src/trust_scorer.py +39 -697
- package/src/webhook_dispatcher.py +2 -12
- package/ui/app.js +1 -1
- package/ui/index.html +152 -4
- package/ui/js/agents.js +1 -1
- package/ui/js/behavioral.js +276 -0
- package/ui/js/compliance.js +252 -0
- package/ui/js/init.js +10 -0
- package/ui/js/lifecycle.js +298 -0
- package/ui/js/profiles.js +4 -0
- package/ui_server.py +21 -14
- package/ATTRIBUTION.md +0 -140
- package/docs/ARCHITECTURE-V2.5.md +0 -190
- package/docs/GRAPH-ENGINE.md +0 -503
- package/docs/architecture-diagram.drawio +0 -405
- package/docs/plans/2026-02-13-benchmark-suite.md +0 -1349
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""Tests for privacy-safe cross-project behavioral transfer.
|
|
4
|
+
"""
|
|
5
|
+
import sqlite3
|
|
6
|
+
import tempfile
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class TestCrossProjectTransfer:
|
|
16
|
+
"""Test cross-project behavioral pattern transfer."""
|
|
17
|
+
|
|
18
|
+
def setup_method(self):
|
|
19
|
+
self.tmp_dir = tempfile.mkdtemp()
|
|
20
|
+
self.db_path = os.path.join(self.tmp_dir, "learning.db")
|
|
21
|
+
conn = sqlite3.connect(self.db_path)
|
|
22
|
+
conn.execute("""
|
|
23
|
+
CREATE TABLE IF NOT EXISTS behavioral_patterns (
|
|
24
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
25
|
+
pattern_type TEXT NOT NULL,
|
|
26
|
+
pattern_key TEXT NOT NULL,
|
|
27
|
+
success_rate REAL DEFAULT 0.0,
|
|
28
|
+
evidence_count INTEGER DEFAULT 0,
|
|
29
|
+
confidence REAL DEFAULT 0.0,
|
|
30
|
+
metadata TEXT DEFAULT '{}',
|
|
31
|
+
project TEXT,
|
|
32
|
+
profile TEXT DEFAULT 'default',
|
|
33
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
34
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
35
|
+
)
|
|
36
|
+
""")
|
|
37
|
+
conn.execute("""
|
|
38
|
+
CREATE TABLE IF NOT EXISTS cross_project_behaviors (
|
|
39
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
40
|
+
source_project TEXT NOT NULL,
|
|
41
|
+
target_project TEXT NOT NULL,
|
|
42
|
+
pattern_id INTEGER NOT NULL,
|
|
43
|
+
transfer_type TEXT DEFAULT 'metadata',
|
|
44
|
+
confidence REAL DEFAULT 0.0,
|
|
45
|
+
profile TEXT DEFAULT 'default',
|
|
46
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
47
|
+
FOREIGN KEY (pattern_id) REFERENCES behavioral_patterns(id)
|
|
48
|
+
)
|
|
49
|
+
""")
|
|
50
|
+
# Insert patterns: high-confidence pattern in project_a
|
|
51
|
+
conn.execute(
|
|
52
|
+
"INSERT INTO behavioral_patterns (pattern_type, pattern_key, success_rate, evidence_count, confidence, project) VALUES (?, ?, ?, ?, ?, ?)",
|
|
53
|
+
("action_type_success", "code_written", 0.85, 12, 0.9, "project_a"),
|
|
54
|
+
)
|
|
55
|
+
# Low-confidence pattern (should NOT transfer)
|
|
56
|
+
conn.execute(
|
|
57
|
+
"INSERT INTO behavioral_patterns (pattern_type, pattern_key, success_rate, evidence_count, confidence, project) VALUES (?, ?, ?, ?, ?, ?)",
|
|
58
|
+
("action_type_success", "debug_resolved", 0.4, 3, 0.2, "project_a"),
|
|
59
|
+
)
|
|
60
|
+
# High-confidence pattern for project_b
|
|
61
|
+
conn.execute(
|
|
62
|
+
"INSERT INTO behavioral_patterns (pattern_type, pattern_key, success_rate, evidence_count, confidence, project) VALUES (?, ?, ?, ?, ?, ?)",
|
|
63
|
+
("project_success", "project_b", 0.9, 15, 0.95, "project_b"),
|
|
64
|
+
)
|
|
65
|
+
conn.commit()
|
|
66
|
+
conn.close()
|
|
67
|
+
|
|
68
|
+
def teardown_method(self):
|
|
69
|
+
import shutil
|
|
70
|
+
shutil.rmtree(self.tmp_dir, ignore_errors=True)
|
|
71
|
+
|
|
72
|
+
def test_evaluate_transfers(self):
|
|
73
|
+
"""evaluate_transfers returns eligible patterns."""
|
|
74
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
75
|
+
transfer = CrossProjectTransfer(self.db_path)
|
|
76
|
+
eligible = transfer.evaluate_transfers(target_project="project_c")
|
|
77
|
+
assert isinstance(eligible, list)
|
|
78
|
+
assert len(eligible) >= 1 # At least the high-confidence pattern
|
|
79
|
+
|
|
80
|
+
def test_only_high_confidence_transfers(self):
|
|
81
|
+
"""Only patterns with confidence >= 0.7 and evidence >= 5 transfer."""
|
|
82
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
83
|
+
transfer = CrossProjectTransfer(self.db_path)
|
|
84
|
+
eligible = transfer.evaluate_transfers(target_project="project_c")
|
|
85
|
+
for e in eligible:
|
|
86
|
+
assert e["confidence"] >= 0.7
|
|
87
|
+
assert e["evidence_count"] >= 5
|
|
88
|
+
|
|
89
|
+
def test_low_confidence_excluded(self):
|
|
90
|
+
"""Low confidence patterns (id=2, confidence=0.2) should NOT transfer."""
|
|
91
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
92
|
+
transfer = CrossProjectTransfer(self.db_path)
|
|
93
|
+
eligible = transfer.evaluate_transfers(target_project="project_c")
|
|
94
|
+
pattern_ids = {e["pattern_id"] for e in eligible}
|
|
95
|
+
assert 2 not in pattern_ids # Low confidence pattern excluded
|
|
96
|
+
|
|
97
|
+
def test_only_metadata_transfers(self):
|
|
98
|
+
"""Transfers must be metadata-only — never content."""
|
|
99
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
100
|
+
transfer = CrossProjectTransfer(self.db_path)
|
|
101
|
+
eligible = transfer.evaluate_transfers(target_project="project_c")
|
|
102
|
+
for e in eligible:
|
|
103
|
+
assert e["transfer_type"] == "metadata"
|
|
104
|
+
assert "content" not in e # No content field
|
|
105
|
+
assert "content_hash" not in e # No content hashes
|
|
106
|
+
|
|
107
|
+
def test_apply_transfer(self):
|
|
108
|
+
"""apply_transfer records the transfer in cross_project_behaviors."""
|
|
109
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
110
|
+
transfer = CrossProjectTransfer(self.db_path)
|
|
111
|
+
result = transfer.apply_transfer(pattern_id=1, target_project="project_c")
|
|
112
|
+
assert result["success"] is True
|
|
113
|
+
# Verify in DB
|
|
114
|
+
conn = sqlite3.connect(self.db_path)
|
|
115
|
+
row = conn.execute("SELECT * FROM cross_project_behaviors WHERE target_project='project_c'").fetchone()
|
|
116
|
+
conn.close()
|
|
117
|
+
assert row is not None
|
|
118
|
+
|
|
119
|
+
def test_transfer_logged(self):
|
|
120
|
+
"""Transfers are logged with source and target projects."""
|
|
121
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
122
|
+
transfer = CrossProjectTransfer(self.db_path)
|
|
123
|
+
transfer.apply_transfer(pattern_id=1, target_project="project_c")
|
|
124
|
+
transfers = transfer.get_transfers(target_project="project_c")
|
|
125
|
+
assert len(transfers) == 1
|
|
126
|
+
assert transfers[0]["source_project"] == "project_a"
|
|
127
|
+
|
|
128
|
+
def test_no_self_transfer(self):
|
|
129
|
+
"""Patterns should not transfer to their own project."""
|
|
130
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
131
|
+
transfer = CrossProjectTransfer(self.db_path)
|
|
132
|
+
eligible = transfer.evaluate_transfers(target_project="project_a")
|
|
133
|
+
# Pattern 1 is from project_a — should not be eligible for project_a
|
|
134
|
+
source_projects = {e.get("source_project") for e in eligible}
|
|
135
|
+
assert "project_a" not in source_projects
|
|
136
|
+
|
|
137
|
+
def test_disable_via_config(self):
|
|
138
|
+
"""Transfers can be disabled via config."""
|
|
139
|
+
from behavioral.cross_project_transfer import CrossProjectTransfer
|
|
140
|
+
transfer = CrossProjectTransfer(self.db_path, enabled=False)
|
|
141
|
+
eligible = transfer.evaluate_transfers(target_project="project_c")
|
|
142
|
+
assert eligible == []
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""Tests for get_behavioral_patterns MCP tool handler.
|
|
4
|
+
|
|
5
|
+
Validates the MCP wrapper around BehavioralPatternExtractor — tests
|
|
6
|
+
pattern retrieval, confidence filtering, and project filtering.
|
|
7
|
+
"""
|
|
8
|
+
import asyncio
|
|
9
|
+
import os
|
|
10
|
+
import shutil
|
|
11
|
+
import sqlite3
|
|
12
|
+
import sys
|
|
13
|
+
import tempfile
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
import pytest
|
|
17
|
+
|
|
18
|
+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _create_learning_db_with_patterns(db_path: str) -> None:
|
|
22
|
+
"""Create learning.db with pre-seeded behavioral patterns."""
|
|
23
|
+
conn = sqlite3.connect(db_path)
|
|
24
|
+
# The BehavioralPatternExtractor creates this table itself, but we need
|
|
25
|
+
# pre-seeded data for read-only tests. Create it manually.
|
|
26
|
+
conn.execute(
|
|
27
|
+
"""CREATE TABLE IF NOT EXISTS behavioral_patterns (
|
|
28
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
29
|
+
pattern_type TEXT NOT NULL,
|
|
30
|
+
pattern_key TEXT NOT NULL,
|
|
31
|
+
success_rate REAL DEFAULT 0.0,
|
|
32
|
+
evidence_count INTEGER DEFAULT 0,
|
|
33
|
+
confidence REAL DEFAULT 0.0,
|
|
34
|
+
metadata TEXT DEFAULT '{}',
|
|
35
|
+
project TEXT,
|
|
36
|
+
profile TEXT DEFAULT 'default',
|
|
37
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
38
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
39
|
+
)"""
|
|
40
|
+
)
|
|
41
|
+
# Pattern 1: high confidence, project-scoped
|
|
42
|
+
conn.execute(
|
|
43
|
+
"INSERT INTO behavioral_patterns "
|
|
44
|
+
"(pattern_type, pattern_key, success_rate, evidence_count, confidence, project) "
|
|
45
|
+
"VALUES (?, ?, ?, ?, ?, ?)",
|
|
46
|
+
("project_success", "slm-v28", 0.85, 12, 0.8, "slm-v28"),
|
|
47
|
+
)
|
|
48
|
+
# Pattern 2: low confidence, no project
|
|
49
|
+
conn.execute(
|
|
50
|
+
"INSERT INTO behavioral_patterns "
|
|
51
|
+
"(pattern_type, pattern_key, success_rate, evidence_count, confidence, project) "
|
|
52
|
+
"VALUES (?, ?, ?, ?, ?, ?)",
|
|
53
|
+
("action_type_success", "code_written", 0.55, 4, 0.15, None),
|
|
54
|
+
)
|
|
55
|
+
conn.commit()
|
|
56
|
+
conn.close()
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class TestMCPBehavioralPatterns:
|
|
60
|
+
"""Tests for the get_behavioral_patterns tool handler."""
|
|
61
|
+
|
|
62
|
+
def setup_method(self):
|
|
63
|
+
self.tmp_dir = tempfile.mkdtemp()
|
|
64
|
+
self.db_path = os.path.join(self.tmp_dir, "learning.db")
|
|
65
|
+
_create_learning_db_with_patterns(self.db_path)
|
|
66
|
+
|
|
67
|
+
def teardown_method(self):
|
|
68
|
+
shutil.rmtree(self.tmp_dir, ignore_errors=True)
|
|
69
|
+
|
|
70
|
+
def _run(self, coro):
|
|
71
|
+
return asyncio.get_event_loop().run_until_complete(coro)
|
|
72
|
+
|
|
73
|
+
def test_get_all_patterns(self):
|
|
74
|
+
"""Without filters, should return all patterns."""
|
|
75
|
+
import mcp_tools_v28 as tools
|
|
76
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
77
|
+
|
|
78
|
+
result = self._run(tools.get_behavioral_patterns())
|
|
79
|
+
assert result["success"] is True
|
|
80
|
+
assert result["count"] == 2
|
|
81
|
+
|
|
82
|
+
def test_filter_by_high_confidence(self):
|
|
83
|
+
"""Filtering with min_confidence=0.9 should return 0 (none that high)."""
|
|
84
|
+
import mcp_tools_v28 as tools
|
|
85
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
86
|
+
|
|
87
|
+
result = self._run(tools.get_behavioral_patterns(min_confidence=0.9))
|
|
88
|
+
assert result["success"] is True
|
|
89
|
+
assert result["count"] == 0
|
|
90
|
+
|
|
91
|
+
def test_filter_by_medium_confidence(self):
|
|
92
|
+
"""Filtering with min_confidence=0.5 should return only the high-confidence one."""
|
|
93
|
+
import mcp_tools_v28 as tools
|
|
94
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
95
|
+
|
|
96
|
+
result = self._run(tools.get_behavioral_patterns(min_confidence=0.5))
|
|
97
|
+
assert result["success"] is True
|
|
98
|
+
assert result["count"] == 1
|
|
99
|
+
assert result["patterns"][0]["pattern_key"] == "slm-v28"
|
|
100
|
+
|
|
101
|
+
def test_filter_by_project(self):
|
|
102
|
+
"""Filtering by project should scope results."""
|
|
103
|
+
import mcp_tools_v28 as tools
|
|
104
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
105
|
+
|
|
106
|
+
result = self._run(tools.get_behavioral_patterns(project="slm-v28"))
|
|
107
|
+
assert result["success"] is True
|
|
108
|
+
assert result["count"] == 1
|
|
109
|
+
|
|
110
|
+
def test_filter_by_nonexistent_project(self):
|
|
111
|
+
"""Filtering by a project with no patterns should return 0."""
|
|
112
|
+
import mcp_tools_v28 as tools
|
|
113
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
114
|
+
|
|
115
|
+
result = self._run(tools.get_behavioral_patterns(project="nonexistent"))
|
|
116
|
+
assert result["success"] is True
|
|
117
|
+
assert result["count"] == 0
|
|
118
|
+
|
|
119
|
+
def test_patterns_have_required_keys(self):
|
|
120
|
+
"""Each returned pattern should have standard keys."""
|
|
121
|
+
import mcp_tools_v28 as tools
|
|
122
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
123
|
+
|
|
124
|
+
result = self._run(tools.get_behavioral_patterns())
|
|
125
|
+
for pattern in result["patterns"]:
|
|
126
|
+
assert "pattern_type" in pattern
|
|
127
|
+
assert "pattern_key" in pattern
|
|
128
|
+
assert "success_rate" in pattern
|
|
129
|
+
assert "confidence" in pattern
|
|
130
|
+
|
|
131
|
+
def test_empty_db_returns_zero(self):
|
|
132
|
+
"""An empty learning DB should return count=0."""
|
|
133
|
+
import mcp_tools_v28 as tools
|
|
134
|
+
empty_path = os.path.join(self.tmp_dir, "empty_learning.db")
|
|
135
|
+
tools.DEFAULT_LEARNING_DB = empty_path
|
|
136
|
+
|
|
137
|
+
result = self._run(tools.get_behavioral_patterns())
|
|
138
|
+
assert result["success"] is True
|
|
139
|
+
assert result["count"] == 0
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""Tests for report_outcome MCP tool handler.
|
|
4
|
+
|
|
5
|
+
Validates the MCP wrapper around OutcomeTracker — tests success/failure/partial
|
|
6
|
+
outcomes, context handling, and invalid outcome rejection.
|
|
7
|
+
"""
|
|
8
|
+
import asyncio
|
|
9
|
+
import os
|
|
10
|
+
import shutil
|
|
11
|
+
import sys
|
|
12
|
+
import tempfile
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
import pytest
|
|
16
|
+
|
|
17
|
+
# Ensure src/ is importable
|
|
18
|
+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TestMCPReportOutcome:
|
|
22
|
+
"""Tests for the report_outcome tool handler."""
|
|
23
|
+
|
|
24
|
+
def setup_method(self):
|
|
25
|
+
self.tmp_dir = tempfile.mkdtemp()
|
|
26
|
+
self.db_path = os.path.join(self.tmp_dir, "learning.db")
|
|
27
|
+
|
|
28
|
+
def teardown_method(self):
|
|
29
|
+
shutil.rmtree(self.tmp_dir, ignore_errors=True)
|
|
30
|
+
|
|
31
|
+
def _run(self, coro):
|
|
32
|
+
"""Helper to run async functions synchronously."""
|
|
33
|
+
return asyncio.get_event_loop().run_until_complete(coro)
|
|
34
|
+
|
|
35
|
+
def test_report_success_outcome(self):
|
|
36
|
+
"""Reporting a 'success' outcome should return success=True and an outcome_id."""
|
|
37
|
+
import mcp_tools_v28 as tools
|
|
38
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
39
|
+
|
|
40
|
+
result = self._run(tools.report_outcome([1, 2], "success"))
|
|
41
|
+
assert result["success"] is True
|
|
42
|
+
assert isinstance(result["outcome_id"], int)
|
|
43
|
+
assert result["outcome_id"] > 0
|
|
44
|
+
assert result["outcome"] == "success"
|
|
45
|
+
assert result["memory_ids"] == [1, 2]
|
|
46
|
+
|
|
47
|
+
def test_report_failure_outcome(self):
|
|
48
|
+
"""Reporting a 'failure' outcome should succeed."""
|
|
49
|
+
import mcp_tools_v28 as tools
|
|
50
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
51
|
+
|
|
52
|
+
result = self._run(tools.report_outcome([5], "failure"))
|
|
53
|
+
assert result["success"] is True
|
|
54
|
+
assert result["outcome"] == "failure"
|
|
55
|
+
|
|
56
|
+
def test_report_partial_outcome(self):
|
|
57
|
+
"""Reporting a 'partial' outcome should succeed."""
|
|
58
|
+
import mcp_tools_v28 as tools
|
|
59
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
60
|
+
|
|
61
|
+
result = self._run(tools.report_outcome([3], "partial"))
|
|
62
|
+
assert result["success"] is True
|
|
63
|
+
assert result["outcome"] == "partial"
|
|
64
|
+
|
|
65
|
+
def test_report_invalid_outcome(self):
|
|
66
|
+
"""An invalid outcome label should return success=False."""
|
|
67
|
+
import mcp_tools_v28 as tools
|
|
68
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
69
|
+
|
|
70
|
+
result = self._run(tools.report_outcome([1], "invalid"))
|
|
71
|
+
assert result["success"] is False
|
|
72
|
+
assert "Invalid outcome" in result["error"]
|
|
73
|
+
|
|
74
|
+
def test_report_with_context_json(self):
|
|
75
|
+
"""Context passed as JSON string should be accepted."""
|
|
76
|
+
import mcp_tools_v28 as tools
|
|
77
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
78
|
+
|
|
79
|
+
result = self._run(
|
|
80
|
+
tools.report_outcome(
|
|
81
|
+
[1], "partial", context='{"note": "worked partially"}'
|
|
82
|
+
)
|
|
83
|
+
)
|
|
84
|
+
assert result["success"] is True
|
|
85
|
+
|
|
86
|
+
def test_report_with_action_type(self):
|
|
87
|
+
"""Custom action_type should be accepted."""
|
|
88
|
+
import mcp_tools_v28 as tools
|
|
89
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
90
|
+
|
|
91
|
+
result = self._run(
|
|
92
|
+
tools.report_outcome(
|
|
93
|
+
[1, 2, 3], "success", action_type="code_written"
|
|
94
|
+
)
|
|
95
|
+
)
|
|
96
|
+
assert result["success"] is True
|
|
97
|
+
|
|
98
|
+
def test_report_with_agent_and_project(self):
|
|
99
|
+
"""agent_id and project parameters should be forwarded."""
|
|
100
|
+
import mcp_tools_v28 as tools
|
|
101
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
102
|
+
|
|
103
|
+
result = self._run(
|
|
104
|
+
tools.report_outcome(
|
|
105
|
+
[1], "success", agent_id="agent_a", project="slm-v28"
|
|
106
|
+
)
|
|
107
|
+
)
|
|
108
|
+
assert result["success"] is True
|
|
109
|
+
|
|
110
|
+
def test_multiple_outcomes_unique_ids(self):
|
|
111
|
+
"""Consecutive outcomes should get distinct IDs."""
|
|
112
|
+
import mcp_tools_v28 as tools
|
|
113
|
+
tools.DEFAULT_LEARNING_DB = self.db_path
|
|
114
|
+
|
|
115
|
+
r1 = self._run(tools.report_outcome([1], "success"))
|
|
116
|
+
r2 = self._run(tools.report_outcome([2], "failure"))
|
|
117
|
+
assert r1["outcome_id"] != r2["outcome_id"]
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""Tests for implicit outcome inference from recall behavior patterns.
|
|
4
|
+
"""
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from datetime import datetime, timedelta
|
|
8
|
+
|
|
9
|
+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class TestOutcomeInference:
|
|
13
|
+
"""Test inference rules for implicit outcome detection."""
|
|
14
|
+
|
|
15
|
+
def test_no_requery_implies_success(self):
|
|
16
|
+
"""No re-query for 10+ min after recall -> success (0.6)."""
|
|
17
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
18
|
+
engine = OutcomeInference()
|
|
19
|
+
now = datetime.now()
|
|
20
|
+
# Record a recall event
|
|
21
|
+
engine.record_recall("query_abc", [1, 2], now - timedelta(minutes=12))
|
|
22
|
+
# Infer outcomes after enough time has passed
|
|
23
|
+
results = engine.infer_outcomes(now)
|
|
24
|
+
assert len(results) >= 1
|
|
25
|
+
result = results[0]
|
|
26
|
+
assert result["outcome"] == "success"
|
|
27
|
+
assert abs(result["confidence"] - 0.6) < 0.01
|
|
28
|
+
|
|
29
|
+
def test_memory_used_high_confirms_success(self):
|
|
30
|
+
"""memory_used(high) within 5 min -> confirmed success (0.8)."""
|
|
31
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
32
|
+
engine = OutcomeInference()
|
|
33
|
+
now = datetime.now()
|
|
34
|
+
engine.record_recall("query_abc", [1], now - timedelta(minutes=3))
|
|
35
|
+
engine.record_usage("query_abc", signal="mcp_used_high", timestamp=now - timedelta(minutes=1))
|
|
36
|
+
results = engine.infer_outcomes(now)
|
|
37
|
+
success_results = [r for r in results if r["outcome"] == "success"]
|
|
38
|
+
assert len(success_results) >= 1
|
|
39
|
+
assert success_results[0]["confidence"] >= 0.8
|
|
40
|
+
|
|
41
|
+
def test_immediate_requery_implies_failure(self):
|
|
42
|
+
"""Immediate re-query with different terms -> failure (0.2)."""
|
|
43
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
44
|
+
engine = OutcomeInference()
|
|
45
|
+
now = datetime.now()
|
|
46
|
+
engine.record_recall("query_abc", [1], now - timedelta(minutes=1))
|
|
47
|
+
engine.record_recall("different_query", [3], now - timedelta(seconds=30))
|
|
48
|
+
results = engine.infer_outcomes(now)
|
|
49
|
+
failure_results = [r for r in results if r["outcome"] == "failure"]
|
|
50
|
+
assert len(failure_results) >= 1
|
|
51
|
+
assert failure_results[0]["confidence"] <= 0.3
|
|
52
|
+
|
|
53
|
+
def test_memory_deleted_implies_failure(self):
|
|
54
|
+
"""Memory deleted within 1 hour -> failure (0.0)."""
|
|
55
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
56
|
+
engine = OutcomeInference()
|
|
57
|
+
now = datetime.now()
|
|
58
|
+
engine.record_recall("query_abc", [1], now - timedelta(minutes=30))
|
|
59
|
+
engine.record_deletion(memory_id=1, timestamp=now - timedelta(minutes=5))
|
|
60
|
+
results = engine.infer_outcomes(now)
|
|
61
|
+
failure_results = [r for r in results if r["outcome"] == "failure" and 1 in r["memory_ids"]]
|
|
62
|
+
assert len(failure_results) >= 1
|
|
63
|
+
assert failure_results[0]["confidence"] <= 0.05
|
|
64
|
+
|
|
65
|
+
def test_rapid_fire_queries_implies_failure(self):
|
|
66
|
+
"""3+ queries in 2 min -> failure (0.1)."""
|
|
67
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
68
|
+
engine = OutcomeInference()
|
|
69
|
+
now = datetime.now()
|
|
70
|
+
engine.record_recall("q1", [1], now - timedelta(seconds=90))
|
|
71
|
+
engine.record_recall("q2", [2], now - timedelta(seconds=60))
|
|
72
|
+
engine.record_recall("q3", [3], now - timedelta(seconds=30))
|
|
73
|
+
results = engine.infer_outcomes(now)
|
|
74
|
+
# At least some should be failure due to rapid-fire pattern
|
|
75
|
+
failure_results = [r for r in results if r["outcome"] == "failure"]
|
|
76
|
+
assert len(failure_results) >= 1
|
|
77
|
+
|
|
78
|
+
def test_cross_tool_access_implies_success(self):
|
|
79
|
+
"""Cross-tool access after recall -> success (0.7)."""
|
|
80
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
81
|
+
engine = OutcomeInference()
|
|
82
|
+
now = datetime.now()
|
|
83
|
+
engine.record_recall("query_abc", [1], now - timedelta(minutes=3))
|
|
84
|
+
engine.record_usage("query_abc", signal="implicit_positive_cross_tool", timestamp=now - timedelta(minutes=1))
|
|
85
|
+
results = engine.infer_outcomes(now)
|
|
86
|
+
success_results = [r for r in results if r["outcome"] == "success"]
|
|
87
|
+
assert len(success_results) >= 1
|
|
88
|
+
assert success_results[0]["confidence"] >= 0.7
|
|
89
|
+
|
|
90
|
+
def test_empty_buffer_returns_empty(self):
|
|
91
|
+
"""No recorded events -> no inferences."""
|
|
92
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
93
|
+
engine = OutcomeInference()
|
|
94
|
+
results = engine.infer_outcomes(datetime.now())
|
|
95
|
+
assert results == []
|
|
96
|
+
|
|
97
|
+
def test_infer_clears_processed_events(self):
|
|
98
|
+
"""After inference, processed events are cleared from buffer."""
|
|
99
|
+
from behavioral.outcome_inference import OutcomeInference
|
|
100
|
+
engine = OutcomeInference()
|
|
101
|
+
now = datetime.now()
|
|
102
|
+
engine.record_recall("q1", [1], now - timedelta(minutes=12))
|
|
103
|
+
results1 = engine.infer_outcomes(now)
|
|
104
|
+
assert len(results1) >= 1
|
|
105
|
+
# Second call should have nothing new
|
|
106
|
+
results2 = engine.infer_outcomes(now)
|
|
107
|
+
assert len(results2) == 0
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""Tests for explicit action outcome recording.
|
|
4
|
+
"""
|
|
5
|
+
import sqlite3
|
|
6
|
+
import tempfile
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class TestOutcomeTracker:
|
|
16
|
+
"""Test outcome recording and querying."""
|
|
17
|
+
|
|
18
|
+
def setup_method(self):
|
|
19
|
+
self.tmp_dir = tempfile.mkdtemp()
|
|
20
|
+
self.db_path = os.path.join(self.tmp_dir, "learning.db")
|
|
21
|
+
|
|
22
|
+
def teardown_method(self):
|
|
23
|
+
import shutil
|
|
24
|
+
shutil.rmtree(self.tmp_dir, ignore_errors=True)
|
|
25
|
+
|
|
26
|
+
def test_record_success(self):
|
|
27
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
28
|
+
tracker = OutcomeTracker(self.db_path)
|
|
29
|
+
oid = tracker.record_outcome([1, 2], "success", action_type="code_written")
|
|
30
|
+
assert isinstance(oid, int)
|
|
31
|
+
assert oid > 0
|
|
32
|
+
|
|
33
|
+
def test_record_failure(self):
|
|
34
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
35
|
+
tracker = OutcomeTracker(self.db_path)
|
|
36
|
+
oid = tracker.record_outcome([3], "failure", context={"error": "timeout"})
|
|
37
|
+
assert oid > 0
|
|
38
|
+
|
|
39
|
+
def test_record_partial(self):
|
|
40
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
41
|
+
tracker = OutcomeTracker(self.db_path)
|
|
42
|
+
oid = tracker.record_outcome([1], "partial", action_type="debug_resolved")
|
|
43
|
+
assert oid > 0
|
|
44
|
+
|
|
45
|
+
def test_confidence_for_explicit(self):
|
|
46
|
+
"""Explicit outcomes should have confidence >= 0.8."""
|
|
47
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
48
|
+
tracker = OutcomeTracker(self.db_path)
|
|
49
|
+
tracker.record_outcome([1], "success")
|
|
50
|
+
outcomes = tracker.get_outcomes()
|
|
51
|
+
assert outcomes[0]["confidence"] >= 0.8
|
|
52
|
+
|
|
53
|
+
def test_multiple_memory_ids(self):
|
|
54
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
55
|
+
tracker = OutcomeTracker(self.db_path)
|
|
56
|
+
tracker.record_outcome([1, 2, 3], "success")
|
|
57
|
+
outcomes = tracker.get_outcomes()
|
|
58
|
+
assert len(outcomes[0]["memory_ids"]) == 3
|
|
59
|
+
|
|
60
|
+
def test_get_outcomes_by_memory(self):
|
|
61
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
62
|
+
tracker = OutcomeTracker(self.db_path)
|
|
63
|
+
tracker.record_outcome([1, 2], "success")
|
|
64
|
+
tracker.record_outcome([3], "failure")
|
|
65
|
+
results = tracker.get_outcomes(memory_id=1)
|
|
66
|
+
assert len(results) == 1
|
|
67
|
+
|
|
68
|
+
def test_get_outcomes_by_project(self):
|
|
69
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
70
|
+
tracker = OutcomeTracker(self.db_path)
|
|
71
|
+
tracker.record_outcome([1], "success", project="proj_a")
|
|
72
|
+
tracker.record_outcome([2], "failure", project="proj_b")
|
|
73
|
+
results = tracker.get_outcomes(project="proj_a")
|
|
74
|
+
assert len(results) == 1
|
|
75
|
+
|
|
76
|
+
def test_get_success_rate(self):
|
|
77
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
78
|
+
tracker = OutcomeTracker(self.db_path)
|
|
79
|
+
tracker.record_outcome([1], "success")
|
|
80
|
+
tracker.record_outcome([1], "success")
|
|
81
|
+
tracker.record_outcome([1], "failure")
|
|
82
|
+
rate = tracker.get_success_rate(1)
|
|
83
|
+
assert abs(rate - 0.667) < 0.01 # 2/3
|
|
84
|
+
|
|
85
|
+
def test_success_rate_no_outcomes(self):
|
|
86
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
87
|
+
tracker = OutcomeTracker(self.db_path)
|
|
88
|
+
rate = tracker.get_success_rate(999)
|
|
89
|
+
assert rate == 0.0
|
|
90
|
+
|
|
91
|
+
def test_valid_outcomes_only(self):
|
|
92
|
+
"""Only success, failure, partial are valid outcomes."""
|
|
93
|
+
from behavioral.outcome_tracker import OutcomeTracker
|
|
94
|
+
tracker = OutcomeTracker(self.db_path)
|
|
95
|
+
result = tracker.record_outcome([1], "invalid_outcome")
|
|
96
|
+
assert result is None # Rejected
|
package/src/cache_manager.py
CHANGED
|
@@ -1,16 +1,14 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
4
|
+
"""SuperLocalMemory V2 - Cache Manager
|
|
4
5
|
|
|
5
|
-
Copyright (c) 2026 Varun Pratap Bhardwaj
|
|
6
6
|
Solution Architect & Original Creator
|
|
7
7
|
|
|
8
|
-
|
|
9
|
-
Repository: https://github.com/varun369/SuperLocalMemoryV2
|
|
8
|
+
(see LICENSE file)
|
|
10
9
|
|
|
11
10
|
ATTRIBUTION REQUIRED: This notice must be preserved in all copies.
|
|
12
11
|
"""
|
|
13
|
-
|
|
14
12
|
"""
|
|
15
13
|
Cache Manager - LRU Cache for Search Results
|
|
16
14
|
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
3
|
+
"""SLM v2.8 Compliance Engine — ABAC + Audit Trail + Retention.
|
|
4
|
+
|
|
5
|
+
Enterprise-grade access control, tamper-evident audit trail,
|
|
6
|
+
and retention policy management for GDPR/EU AI Act/HIPAA.
|
|
7
|
+
|
|
8
|
+
Graceful degradation: if this module fails to import,
|
|
9
|
+
all agents have full access (v2.7 behavior).
|
|
10
|
+
"""
|
|
11
|
+
import threading
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Optional, Dict, Any
|
|
14
|
+
|
|
15
|
+
COMPLIANCE_AVAILABLE = False
|
|
16
|
+
_init_error = None
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
from .abac_engine import ABACEngine
|
|
20
|
+
from .audit_db import AuditDB
|
|
21
|
+
COMPLIANCE_AVAILABLE = True
|
|
22
|
+
except ImportError as e:
|
|
23
|
+
_init_error = str(e)
|
|
24
|
+
|
|
25
|
+
_abac_engine: Optional["ABACEngine"] = None
|
|
26
|
+
_abac_lock = threading.Lock()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def get_abac_engine(config_path: Optional[Path] = None) -> Optional["ABACEngine"]:
|
|
30
|
+
"""Get or create the ABAC engine singleton."""
|
|
31
|
+
global _abac_engine
|
|
32
|
+
if not COMPLIANCE_AVAILABLE:
|
|
33
|
+
return None
|
|
34
|
+
with _abac_lock:
|
|
35
|
+
if _abac_engine is None:
|
|
36
|
+
try:
|
|
37
|
+
_abac_engine = ABACEngine(config_path)
|
|
38
|
+
except Exception:
|
|
39
|
+
return None
|
|
40
|
+
return _abac_engine
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_status() -> Dict[str, Any]:
|
|
44
|
+
return {
|
|
45
|
+
"compliance_available": COMPLIANCE_AVAILABLE,
|
|
46
|
+
"init_error": _init_error,
|
|
47
|
+
"abac_active": _abac_engine is not None,
|
|
48
|
+
}
|