superlocalmemory 2.8.6 → 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +9 -1
- package/NOTICE +63 -0
- package/README.md +165 -480
- package/bin/slm +17 -449
- package/bin/slm-npm +62 -48
- package/conftest.py +5 -0
- package/docs/api-reference.md +284 -0
- package/docs/architecture.md +149 -0
- package/docs/auto-memory.md +150 -0
- package/docs/cli-reference.md +276 -0
- package/docs/compliance.md +191 -0
- package/docs/configuration.md +182 -0
- package/docs/getting-started.md +102 -0
- package/docs/ide-setup.md +261 -0
- package/docs/mcp-tools.md +220 -0
- package/docs/migration-from-v2.md +170 -0
- package/docs/profiles.md +173 -0
- package/docs/troubleshooting.md +310 -0
- package/{configs → ide/configs}/antigravity-mcp.json +3 -3
- package/ide/configs/chatgpt-desktop-mcp.json +16 -0
- package/{configs → ide/configs}/claude-desktop-mcp.json +3 -3
- package/{configs → ide/configs}/codex-mcp.toml +4 -4
- package/{configs → ide/configs}/continue-mcp.yaml +4 -3
- package/{configs → ide/configs}/continue-skills.yaml +6 -6
- package/ide/configs/cursor-mcp.json +15 -0
- package/{configs → ide/configs}/gemini-cli-mcp.json +2 -2
- package/{configs → ide/configs}/jetbrains-mcp.json +2 -2
- package/{configs → ide/configs}/opencode-mcp.json +2 -2
- package/{configs → ide/configs}/perplexity-mcp.json +2 -2
- package/{configs → ide/configs}/vscode-copilot-mcp.json +2 -2
- package/{configs → ide/configs}/windsurf-mcp.json +3 -3
- package/{configs → ide/configs}/zed-mcp.json +2 -2
- package/{hooks → ide/hooks}/context-hook.js +9 -20
- package/ide/hooks/memory-list-skill.js +70 -0
- package/ide/hooks/memory-profile-skill.js +101 -0
- package/ide/hooks/memory-recall-skill.js +62 -0
- package/ide/hooks/memory-remember-skill.js +68 -0
- package/ide/hooks/memory-reset-skill.js +160 -0
- package/{hooks → ide/hooks}/post-recall-hook.js +2 -2
- package/ide/integrations/langchain/README.md +106 -0
- package/ide/integrations/langchain/langchain_superlocalmemory/__init__.py +9 -0
- package/ide/integrations/langchain/langchain_superlocalmemory/chat_message_history.py +201 -0
- package/ide/integrations/langchain/pyproject.toml +38 -0
- package/{src/learning → ide/integrations/langchain}/tests/__init__.py +1 -0
- package/ide/integrations/langchain/tests/test_chat_message_history.py +215 -0
- package/ide/integrations/langchain/tests/test_security.py +117 -0
- package/ide/integrations/llamaindex/README.md +81 -0
- package/ide/integrations/llamaindex/llama_index/storage/chat_store/superlocalmemory/__init__.py +9 -0
- package/ide/integrations/llamaindex/llama_index/storage/chat_store/superlocalmemory/base.py +316 -0
- package/ide/integrations/llamaindex/pyproject.toml +43 -0
- package/{src/lifecycle → ide/integrations/llamaindex}/tests/__init__.py +1 -2
- package/ide/integrations/llamaindex/tests/test_chat_store.py +294 -0
- package/ide/integrations/llamaindex/tests/test_security.py +241 -0
- package/{skills → ide/skills}/slm-build-graph/SKILL.md +6 -6
- package/{skills → ide/skills}/slm-list-recent/SKILL.md +5 -5
- package/{skills → ide/skills}/slm-recall/SKILL.md +5 -5
- package/{skills → ide/skills}/slm-remember/SKILL.md +6 -6
- package/{skills → ide/skills}/slm-show-patterns/SKILL.md +7 -7
- package/{skills → ide/skills}/slm-status/SKILL.md +9 -9
- package/{skills → ide/skills}/slm-switch-profile/SKILL.md +9 -9
- package/package.json +13 -22
- package/pyproject.toml +85 -0
- package/scripts/build-dmg.sh +417 -0
- package/scripts/install-skills.ps1 +334 -0
- package/scripts/postinstall.js +2 -2
- package/scripts/start-dashboard.ps1 +52 -0
- package/scripts/start-dashboard.sh +41 -0
- package/scripts/sync-wiki.ps1 +127 -0
- package/scripts/sync-wiki.sh +82 -0
- package/scripts/test-dmg.sh +161 -0
- package/scripts/test-npm-package.ps1 +252 -0
- package/scripts/test-npm-package.sh +207 -0
- package/scripts/verify-install.ps1 +294 -0
- package/scripts/verify-install.sh +266 -0
- package/src/superlocalmemory/__init__.py +0 -0
- package/src/superlocalmemory/attribution/__init__.py +9 -0
- package/src/superlocalmemory/attribution/mathematical_dna.py +235 -0
- package/src/superlocalmemory/attribution/signer.py +153 -0
- package/src/superlocalmemory/attribution/watermark.py +189 -0
- package/src/superlocalmemory/cli/__init__.py +5 -0
- package/src/superlocalmemory/cli/commands.py +245 -0
- package/src/superlocalmemory/cli/main.py +89 -0
- package/src/superlocalmemory/cli/migrate_cmd.py +55 -0
- package/src/superlocalmemory/cli/post_install.py +99 -0
- package/src/superlocalmemory/cli/setup_wizard.py +129 -0
- package/src/superlocalmemory/compliance/__init__.py +0 -0
- package/src/superlocalmemory/compliance/abac.py +204 -0
- package/src/superlocalmemory/compliance/audit.py +314 -0
- package/src/superlocalmemory/compliance/eu_ai_act.py +131 -0
- package/src/superlocalmemory/compliance/gdpr.py +294 -0
- package/src/superlocalmemory/compliance/lifecycle.py +158 -0
- package/src/superlocalmemory/compliance/retention.py +232 -0
- package/src/superlocalmemory/compliance/scheduler.py +148 -0
- package/src/superlocalmemory/core/__init__.py +0 -0
- package/src/superlocalmemory/core/config.py +391 -0
- package/src/superlocalmemory/core/embeddings.py +293 -0
- package/src/superlocalmemory/core/engine.py +701 -0
- package/src/superlocalmemory/core/hooks.py +65 -0
- package/src/superlocalmemory/core/maintenance.py +172 -0
- package/src/superlocalmemory/core/modes.py +140 -0
- package/src/superlocalmemory/core/profiles.py +234 -0
- package/src/superlocalmemory/core/registry.py +117 -0
- package/src/superlocalmemory/dynamics/__init__.py +0 -0
- package/src/superlocalmemory/dynamics/fisher_langevin_coupling.py +223 -0
- package/src/superlocalmemory/encoding/__init__.py +0 -0
- package/src/superlocalmemory/encoding/consolidator.py +485 -0
- package/src/superlocalmemory/encoding/emotional.py +125 -0
- package/src/superlocalmemory/encoding/entity_resolver.py +525 -0
- package/src/superlocalmemory/encoding/entropy_gate.py +104 -0
- package/src/superlocalmemory/encoding/fact_extractor.py +775 -0
- package/src/superlocalmemory/encoding/foresight.py +91 -0
- package/src/superlocalmemory/encoding/graph_builder.py +302 -0
- package/src/superlocalmemory/encoding/observation_builder.py +160 -0
- package/src/superlocalmemory/encoding/scene_builder.py +183 -0
- package/src/superlocalmemory/encoding/signal_inference.py +90 -0
- package/src/superlocalmemory/encoding/temporal_parser.py +426 -0
- package/src/superlocalmemory/encoding/type_router.py +235 -0
- package/src/superlocalmemory/hooks/__init__.py +3 -0
- package/src/superlocalmemory/hooks/auto_capture.py +111 -0
- package/src/superlocalmemory/hooks/auto_recall.py +93 -0
- package/src/superlocalmemory/hooks/ide_connector.py +204 -0
- package/src/superlocalmemory/hooks/rules_engine.py +99 -0
- package/src/superlocalmemory/infra/__init__.py +3 -0
- package/src/superlocalmemory/infra/auth_middleware.py +82 -0
- package/src/superlocalmemory/infra/backup.py +317 -0
- package/src/superlocalmemory/infra/cache_manager.py +267 -0
- package/src/superlocalmemory/infra/event_bus.py +381 -0
- package/src/superlocalmemory/infra/rate_limiter.py +135 -0
- package/src/{webhook_dispatcher.py → superlocalmemory/infra/webhook_dispatcher.py} +104 -101
- package/src/superlocalmemory/learning/__init__.py +0 -0
- package/src/superlocalmemory/learning/adaptive.py +172 -0
- package/src/superlocalmemory/learning/behavioral.py +490 -0
- package/src/superlocalmemory/learning/behavioral_listener.py +94 -0
- package/src/superlocalmemory/learning/bootstrap.py +298 -0
- package/src/superlocalmemory/learning/cross_project.py +399 -0
- package/src/superlocalmemory/learning/database.py +376 -0
- package/src/superlocalmemory/learning/engagement.py +323 -0
- package/src/superlocalmemory/learning/features.py +138 -0
- package/src/superlocalmemory/learning/feedback.py +316 -0
- package/src/superlocalmemory/learning/outcomes.py +255 -0
- package/src/superlocalmemory/learning/project_context.py +366 -0
- package/src/superlocalmemory/learning/ranker.py +155 -0
- package/src/superlocalmemory/learning/source_quality.py +303 -0
- package/src/superlocalmemory/learning/workflows.py +309 -0
- package/src/superlocalmemory/llm/__init__.py +0 -0
- package/src/superlocalmemory/llm/backbone.py +316 -0
- package/src/superlocalmemory/math/__init__.py +0 -0
- package/src/superlocalmemory/math/fisher.py +356 -0
- package/src/superlocalmemory/math/langevin.py +398 -0
- package/src/superlocalmemory/math/sheaf.py +257 -0
- package/src/superlocalmemory/mcp/__init__.py +0 -0
- package/src/superlocalmemory/mcp/resources.py +245 -0
- package/src/superlocalmemory/mcp/server.py +61 -0
- package/src/superlocalmemory/mcp/tools.py +18 -0
- package/src/superlocalmemory/mcp/tools_core.py +305 -0
- package/src/superlocalmemory/mcp/tools_v28.py +223 -0
- package/src/superlocalmemory/mcp/tools_v3.py +286 -0
- package/src/superlocalmemory/retrieval/__init__.py +0 -0
- package/src/superlocalmemory/retrieval/agentic.py +295 -0
- package/src/superlocalmemory/retrieval/ann_index.py +223 -0
- package/src/superlocalmemory/retrieval/bm25_channel.py +185 -0
- package/src/superlocalmemory/retrieval/bridge_discovery.py +170 -0
- package/src/superlocalmemory/retrieval/engine.py +390 -0
- package/src/superlocalmemory/retrieval/entity_channel.py +179 -0
- package/src/superlocalmemory/retrieval/fusion.py +78 -0
- package/src/superlocalmemory/retrieval/profile_channel.py +105 -0
- package/src/superlocalmemory/retrieval/reranker.py +154 -0
- package/src/superlocalmemory/retrieval/semantic_channel.py +232 -0
- package/src/superlocalmemory/retrieval/strategy.py +96 -0
- package/src/superlocalmemory/retrieval/temporal_channel.py +175 -0
- package/src/superlocalmemory/server/__init__.py +1 -0
- package/src/superlocalmemory/server/api.py +248 -0
- package/src/superlocalmemory/server/routes/__init__.py +4 -0
- package/src/superlocalmemory/server/routes/agents.py +107 -0
- package/src/superlocalmemory/server/routes/backup.py +91 -0
- package/src/superlocalmemory/server/routes/behavioral.py +127 -0
- package/src/superlocalmemory/server/routes/compliance.py +160 -0
- package/src/superlocalmemory/server/routes/data_io.py +188 -0
- package/src/superlocalmemory/server/routes/events.py +183 -0
- package/src/superlocalmemory/server/routes/helpers.py +85 -0
- package/src/superlocalmemory/server/routes/learning.py +273 -0
- package/src/superlocalmemory/server/routes/lifecycle.py +116 -0
- package/src/superlocalmemory/server/routes/memories.py +399 -0
- package/src/superlocalmemory/server/routes/profiles.py +219 -0
- package/src/superlocalmemory/server/routes/stats.py +346 -0
- package/src/superlocalmemory/server/routes/v3_api.py +365 -0
- package/src/superlocalmemory/server/routes/ws.py +82 -0
- package/src/superlocalmemory/server/security_middleware.py +57 -0
- package/src/superlocalmemory/server/ui.py +245 -0
- package/src/superlocalmemory/storage/__init__.py +0 -0
- package/src/superlocalmemory/storage/access_control.py +182 -0
- package/src/superlocalmemory/storage/database.py +594 -0
- package/src/superlocalmemory/storage/migrations.py +303 -0
- package/src/superlocalmemory/storage/models.py +406 -0
- package/src/superlocalmemory/storage/schema.py +726 -0
- package/src/superlocalmemory/storage/v2_migrator.py +317 -0
- package/src/superlocalmemory/trust/__init__.py +0 -0
- package/src/superlocalmemory/trust/gate.py +130 -0
- package/src/superlocalmemory/trust/provenance.py +124 -0
- package/src/superlocalmemory/trust/scorer.py +347 -0
- package/src/superlocalmemory/trust/signals.py +153 -0
- package/ui/index.html +278 -5
- package/ui/js/auto-settings.js +70 -0
- package/ui/js/dashboard.js +90 -0
- package/ui/js/fact-detail.js +92 -0
- package/ui/js/feedback.js +2 -2
- package/ui/js/ide-status.js +102 -0
- package/ui/js/math-health.js +98 -0
- package/ui/js/recall-lab.js +127 -0
- package/ui/js/settings.js +2 -2
- package/ui/js/trust-dashboard.js +73 -0
- package/api_server.py +0 -724
- package/bin/aider-smart +0 -72
- package/bin/superlocalmemoryv2-learning +0 -4
- package/bin/superlocalmemoryv2-list +0 -3
- package/bin/superlocalmemoryv2-patterns +0 -4
- package/bin/superlocalmemoryv2-profile +0 -3
- package/bin/superlocalmemoryv2-recall +0 -3
- package/bin/superlocalmemoryv2-remember +0 -3
- package/bin/superlocalmemoryv2-reset +0 -3
- package/bin/superlocalmemoryv2-status +0 -3
- package/configs/chatgpt-desktop-mcp.json +0 -16
- package/configs/cursor-mcp.json +0 -15
- package/hooks/memory-list-skill.js +0 -139
- package/hooks/memory-profile-skill.js +0 -273
- package/hooks/memory-recall-skill.js +0 -114
- package/hooks/memory-remember-skill.js +0 -127
- package/hooks/memory-reset-skill.js +0 -274
- package/mcp_server.py +0 -1808
- package/requirements-core.txt +0 -22
- package/requirements-learning.txt +0 -12
- package/requirements.txt +0 -12
- package/src/agent_registry.py +0 -411
- package/src/auth_middleware.py +0 -61
- package/src/auto_backup.py +0 -459
- package/src/behavioral/__init__.py +0 -49
- package/src/behavioral/behavioral_listener.py +0 -203
- package/src/behavioral/behavioral_patterns.py +0 -275
- package/src/behavioral/cross_project_transfer.py +0 -206
- package/src/behavioral/outcome_inference.py +0 -194
- package/src/behavioral/outcome_tracker.py +0 -193
- package/src/behavioral/tests/__init__.py +0 -4
- package/src/behavioral/tests/test_behavioral_integration.py +0 -108
- package/src/behavioral/tests/test_behavioral_patterns.py +0 -150
- package/src/behavioral/tests/test_cross_project_transfer.py +0 -142
- package/src/behavioral/tests/test_mcp_behavioral.py +0 -139
- package/src/behavioral/tests/test_mcp_report_outcome.py +0 -117
- package/src/behavioral/tests/test_outcome_inference.py +0 -107
- package/src/behavioral/tests/test_outcome_tracker.py +0 -96
- package/src/cache_manager.py +0 -518
- package/src/compliance/__init__.py +0 -48
- package/src/compliance/abac_engine.py +0 -149
- package/src/compliance/abac_middleware.py +0 -116
- package/src/compliance/audit_db.py +0 -215
- package/src/compliance/audit_logger.py +0 -148
- package/src/compliance/retention_manager.py +0 -289
- package/src/compliance/retention_scheduler.py +0 -186
- package/src/compliance/tests/__init__.py +0 -4
- package/src/compliance/tests/test_abac_enforcement.py +0 -95
- package/src/compliance/tests/test_abac_engine.py +0 -124
- package/src/compliance/tests/test_abac_mcp_integration.py +0 -118
- package/src/compliance/tests/test_audit_db.py +0 -123
- package/src/compliance/tests/test_audit_logger.py +0 -98
- package/src/compliance/tests/test_mcp_audit.py +0 -128
- package/src/compliance/tests/test_mcp_retention_policy.py +0 -125
- package/src/compliance/tests/test_retention_manager.py +0 -131
- package/src/compliance/tests/test_retention_scheduler.py +0 -99
- package/src/compression/__init__.py +0 -25
- package/src/compression/cli.py +0 -150
- package/src/compression/cold_storage.py +0 -217
- package/src/compression/config.py +0 -72
- package/src/compression/orchestrator.py +0 -133
- package/src/compression/tier2_compressor.py +0 -228
- package/src/compression/tier3_compressor.py +0 -153
- package/src/compression/tier_classifier.py +0 -148
- package/src/db_connection_manager.py +0 -536
- package/src/embedding_engine.py +0 -63
- package/src/embeddings/__init__.py +0 -47
- package/src/embeddings/cache.py +0 -70
- package/src/embeddings/cli.py +0 -113
- package/src/embeddings/constants.py +0 -47
- package/src/embeddings/database.py +0 -91
- package/src/embeddings/engine.py +0 -247
- package/src/embeddings/model_loader.py +0 -145
- package/src/event_bus.py +0 -562
- package/src/graph/__init__.py +0 -36
- package/src/graph/build_helpers.py +0 -74
- package/src/graph/cli.py +0 -87
- package/src/graph/cluster_builder.py +0 -188
- package/src/graph/cluster_summary.py +0 -148
- package/src/graph/constants.py +0 -47
- package/src/graph/edge_builder.py +0 -162
- package/src/graph/entity_extractor.py +0 -95
- package/src/graph/graph_core.py +0 -226
- package/src/graph/graph_search.py +0 -231
- package/src/graph/hierarchical.py +0 -207
- package/src/graph/schema.py +0 -99
- package/src/graph_engine.py +0 -52
- package/src/hnsw_index.py +0 -628
- package/src/hybrid_search.py +0 -46
- package/src/learning/__init__.py +0 -217
- package/src/learning/adaptive_ranker.py +0 -682
- package/src/learning/bootstrap/__init__.py +0 -69
- package/src/learning/bootstrap/constants.py +0 -93
- package/src/learning/bootstrap/db_queries.py +0 -316
- package/src/learning/bootstrap/sampling.py +0 -82
- package/src/learning/bootstrap/text_utils.py +0 -71
- package/src/learning/cross_project_aggregator.py +0 -857
- package/src/learning/db/__init__.py +0 -40
- package/src/learning/db/constants.py +0 -44
- package/src/learning/db/schema.py +0 -279
- package/src/learning/engagement_tracker.py +0 -628
- package/src/learning/feature_extractor.py +0 -708
- package/src/learning/feedback_collector.py +0 -806
- package/src/learning/learning_db.py +0 -915
- package/src/learning/project_context_manager.py +0 -572
- package/src/learning/ranking/__init__.py +0 -33
- package/src/learning/ranking/constants.py +0 -84
- package/src/learning/ranking/helpers.py +0 -278
- package/src/learning/source_quality_scorer.py +0 -676
- package/src/learning/synthetic_bootstrap.py +0 -755
- package/src/learning/tests/test_adaptive_ranker.py +0 -325
- package/src/learning/tests/test_adaptive_ranker_v28.py +0 -60
- package/src/learning/tests/test_aggregator.py +0 -306
- package/src/learning/tests/test_auto_retrain_v28.py +0 -35
- package/src/learning/tests/test_e2e_ranking_v28.py +0 -82
- package/src/learning/tests/test_feature_extractor_v28.py +0 -93
- package/src/learning/tests/test_feedback_collector.py +0 -294
- package/src/learning/tests/test_learning_db.py +0 -602
- package/src/learning/tests/test_learning_db_v28.py +0 -110
- package/src/learning/tests/test_learning_init_v28.py +0 -48
- package/src/learning/tests/test_outcome_signals.py +0 -48
- package/src/learning/tests/test_project_context.py +0 -292
- package/src/learning/tests/test_schema_migration.py +0 -319
- package/src/learning/tests/test_signal_inference.py +0 -397
- package/src/learning/tests/test_source_quality.py +0 -351
- package/src/learning/tests/test_synthetic_bootstrap.py +0 -429
- package/src/learning/tests/test_workflow_miner.py +0 -318
- package/src/learning/workflow_pattern_miner.py +0 -655
- package/src/lifecycle/__init__.py +0 -54
- package/src/lifecycle/bounded_growth.py +0 -239
- package/src/lifecycle/compaction_engine.py +0 -226
- package/src/lifecycle/lifecycle_engine.py +0 -355
- package/src/lifecycle/lifecycle_evaluator.py +0 -257
- package/src/lifecycle/lifecycle_scheduler.py +0 -130
- package/src/lifecycle/retention_policy.py +0 -285
- package/src/lifecycle/tests/test_bounded_growth.py +0 -193
- package/src/lifecycle/tests/test_compaction.py +0 -179
- package/src/lifecycle/tests/test_lifecycle_engine.py +0 -137
- package/src/lifecycle/tests/test_lifecycle_evaluation.py +0 -177
- package/src/lifecycle/tests/test_lifecycle_scheduler.py +0 -127
- package/src/lifecycle/tests/test_lifecycle_search.py +0 -109
- package/src/lifecycle/tests/test_mcp_compact.py +0 -149
- package/src/lifecycle/tests/test_mcp_lifecycle_status.py +0 -114
- package/src/lifecycle/tests/test_retention_policy.py +0 -162
- package/src/mcp_tools_v28.py +0 -281
- package/src/memory/__init__.py +0 -36
- package/src/memory/cli.py +0 -205
- package/src/memory/constants.py +0 -39
- package/src/memory/helpers.py +0 -28
- package/src/memory/schema.py +0 -166
- package/src/memory-profiles.py +0 -595
- package/src/memory-reset.py +0 -491
- package/src/memory_compression.py +0 -989
- package/src/memory_store_v2.py +0 -1155
- package/src/migrate_v1_to_v2.py +0 -629
- package/src/pattern_learner.py +0 -34
- package/src/patterns/__init__.py +0 -24
- package/src/patterns/analyzers.py +0 -251
- package/src/patterns/learner.py +0 -271
- package/src/patterns/scoring.py +0 -171
- package/src/patterns/store.py +0 -225
- package/src/patterns/terminology.py +0 -140
- package/src/provenance_tracker.py +0 -312
- package/src/qualixar_attribution.py +0 -139
- package/src/qualixar_watermark.py +0 -78
- package/src/query_optimizer.py +0 -511
- package/src/rate_limiter.py +0 -83
- package/src/search/__init__.py +0 -20
- package/src/search/cli.py +0 -77
- package/src/search/constants.py +0 -26
- package/src/search/engine.py +0 -241
- package/src/search/fusion.py +0 -122
- package/src/search/index_loader.py +0 -114
- package/src/search/methods.py +0 -162
- package/src/search_engine_v2.py +0 -401
- package/src/setup_validator.py +0 -482
- package/src/subscription_manager.py +0 -391
- package/src/tree/__init__.py +0 -59
- package/src/tree/builder.py +0 -185
- package/src/tree/nodes.py +0 -202
- package/src/tree/queries.py +0 -257
- package/src/tree/schema.py +0 -80
- package/src/tree_manager.py +0 -19
- package/src/trust/__init__.py +0 -45
- package/src/trust/constants.py +0 -66
- package/src/trust/queries.py +0 -157
- package/src/trust/schema.py +0 -95
- package/src/trust/scorer.py +0 -299
- package/src/trust/signals.py +0 -95
- package/src/trust_scorer.py +0 -44
- package/ui/app.js +0 -1588
- package/ui/js/graph-cytoscape-monolithic-backup.js +0 -1168
- package/ui/js/graph-cytoscape.js +0 -1168
- package/ui/js/graph-d3-backup.js +0 -32
- package/ui/js/graph.js +0 -32
- package/ui_server.py +0 -286
- /package/docs/{ACCESSIBILITY.md → v2-archive/ACCESSIBILITY.md} +0 -0
- /package/docs/{ARCHITECTURE.md → v2-archive/ARCHITECTURE.md} +0 -0
- /package/docs/{CLI-COMMANDS-REFERENCE.md → v2-archive/CLI-COMMANDS-REFERENCE.md} +0 -0
- /package/docs/{COMPRESSION-README.md → v2-archive/COMPRESSION-README.md} +0 -0
- /package/docs/{FRAMEWORK-INTEGRATIONS.md → v2-archive/FRAMEWORK-INTEGRATIONS.md} +0 -0
- /package/docs/{MCP-MANUAL-SETUP.md → v2-archive/MCP-MANUAL-SETUP.md} +0 -0
- /package/docs/{MCP-TROUBLESHOOTING.md → v2-archive/MCP-TROUBLESHOOTING.md} +0 -0
- /package/docs/{PATTERN-LEARNING.md → v2-archive/PATTERN-LEARNING.md} +0 -0
- /package/docs/{PROFILES-GUIDE.md → v2-archive/PROFILES-GUIDE.md} +0 -0
- /package/docs/{RESET-GUIDE.md → v2-archive/RESET-GUIDE.md} +0 -0
- /package/docs/{SEARCH-ENGINE-V2.2.0.md → v2-archive/SEARCH-ENGINE-V2.2.0.md} +0 -0
- /package/docs/{SEARCH-INTEGRATION-GUIDE.md → v2-archive/SEARCH-INTEGRATION-GUIDE.md} +0 -0
- /package/docs/{UI-SERVER.md → v2-archive/UI-SERVER.md} +0 -0
- /package/docs/{UNIVERSAL-INTEGRATION.md → v2-archive/UNIVERSAL-INTEGRATION.md} +0 -0
- /package/docs/{V2.2.0-OPTIONAL-SEARCH.md → v2-archive/V2.2.0-OPTIONAL-SEARCH.md} +0 -0
- /package/docs/{WINDOWS-INSTALL-README.txt → v2-archive/WINDOWS-INSTALL-README.txt} +0 -0
- /package/docs/{WINDOWS-POST-INSTALL.txt → v2-archive/WINDOWS-POST-INSTALL.txt} +0 -0
- /package/docs/{example_graph_usage.py → v2-archive/example_graph_usage.py} +0 -0
- /package/{completions → ide/completions}/slm.bash +0 -0
- /package/{completions → ide/completions}/slm.zsh +0 -0
- /package/{configs → ide/configs}/cody-commands.json +0 -0
- /package/{install-skills.sh → scripts/install-skills.sh} +0 -0
- /package/{install.ps1 → scripts/install.ps1} +0 -0
- /package/{install.sh → scripts/install.sh} +0 -0
package/mcp_server.py
DELETED
|
@@ -1,1808 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
# SPDX-License-Identifier: MIT
|
|
3
|
-
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
4
|
-
"""SuperLocalMemory V2 - MCP Server
|
|
5
|
-
Universal memory access for all MCP-compatible tools (Cursor, Windsurf, Claude Desktop, Continue.dev)
|
|
6
|
-
|
|
7
|
-
IMPORTANT: This is an ADDITION to existing skills, not a replacement.
|
|
8
|
-
Skills in Claude Code continue to work unchanged.
|
|
9
|
-
|
|
10
|
-
Architecture:
|
|
11
|
-
MCP Server (this file)
|
|
12
|
-
↓
|
|
13
|
-
Calls existing memory_store_v2.py
|
|
14
|
-
↓
|
|
15
|
-
Same SQLite database as skills
|
|
16
|
-
|
|
17
|
-
Usage:
|
|
18
|
-
# Run as stdio MCP server (for local IDEs)
|
|
19
|
-
python3 mcp_server.py
|
|
20
|
-
|
|
21
|
-
# Run as HTTP MCP server (for remote access)
|
|
22
|
-
python3 mcp_server.py --transport http --port 8001
|
|
23
|
-
"""
|
|
24
|
-
from mcp.server.fastmcp import FastMCP, Context
|
|
25
|
-
from mcp.types import ToolAnnotations
|
|
26
|
-
import sys
|
|
27
|
-
import os
|
|
28
|
-
import json
|
|
29
|
-
import re
|
|
30
|
-
import time
|
|
31
|
-
import threading
|
|
32
|
-
from pathlib import Path
|
|
33
|
-
from typing import Optional, Dict, List, Any
|
|
34
|
-
|
|
35
|
-
# Add src directory to path (use existing code!)
|
|
36
|
-
import logging
|
|
37
|
-
_raw_memory_dir = os.environ.get("SL_MEMORY_PATH")
|
|
38
|
-
MEMORY_DIR = Path(_raw_memory_dir).expanduser() if _raw_memory_dir else (Path.home() / ".claude-memory")
|
|
39
|
-
MEMORY_DIR = MEMORY_DIR.resolve()
|
|
40
|
-
|
|
41
|
-
_required_modules = ("memory_store_v2.py", "graph_engine.py", "pattern_learner.py")
|
|
42
|
-
if not MEMORY_DIR.is_dir() or not all((MEMORY_DIR / m).exists() for m in _required_modules):
|
|
43
|
-
raise RuntimeError(f"Invalid SuperLocalMemory install directory: {MEMORY_DIR}")
|
|
44
|
-
|
|
45
|
-
sys.path.insert(0, str(MEMORY_DIR))
|
|
46
|
-
|
|
47
|
-
# Import existing core modules (zero duplicate logic)
|
|
48
|
-
try:
|
|
49
|
-
from memory_store_v2 import MemoryStoreV2
|
|
50
|
-
from graph_engine import GraphEngine
|
|
51
|
-
from pattern_learner import PatternLearner
|
|
52
|
-
except ImportError as e:
|
|
53
|
-
print(f"Error: Could not import SuperLocalMemory modules: {e}", file=sys.stderr)
|
|
54
|
-
print(f"Ensure SuperLocalMemory V2 is installed at {MEMORY_DIR}", file=sys.stderr)
|
|
55
|
-
sys.exit(1)
|
|
56
|
-
|
|
57
|
-
# Agent Registry + Provenance (v2.5+)
|
|
58
|
-
try:
|
|
59
|
-
from agent_registry import AgentRegistry
|
|
60
|
-
from provenance_tracker import ProvenanceTracker
|
|
61
|
-
PROVENANCE_AVAILABLE = True
|
|
62
|
-
except ImportError:
|
|
63
|
-
PROVENANCE_AVAILABLE = False
|
|
64
|
-
|
|
65
|
-
# Trust Scorer (v2.6 — enforcement)
|
|
66
|
-
try:
|
|
67
|
-
from trust_scorer import TrustScorer
|
|
68
|
-
TRUST_AVAILABLE = True
|
|
69
|
-
except ImportError:
|
|
70
|
-
TRUST_AVAILABLE = False
|
|
71
|
-
|
|
72
|
-
# Qualixar Attribution (v2.8.3 — 3-layer provenance)
|
|
73
|
-
try:
|
|
74
|
-
from qualixar_attribution import QualixarSigner
|
|
75
|
-
from qualixar_watermark import encode_watermark
|
|
76
|
-
_signer = QualixarSigner("superlocalmemory", "2.8.3")
|
|
77
|
-
ATTRIBUTION_AVAILABLE = True
|
|
78
|
-
except ImportError:
|
|
79
|
-
_signer = None
|
|
80
|
-
ATTRIBUTION_AVAILABLE = False
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
def _sign_response(response: dict) -> dict:
|
|
84
|
-
"""Apply Layer 2 cryptographic signing to MCP tool responses."""
|
|
85
|
-
if _signer and isinstance(response, dict):
|
|
86
|
-
try:
|
|
87
|
-
return _signer.sign(response)
|
|
88
|
-
except Exception:
|
|
89
|
-
pass
|
|
90
|
-
return response
|
|
91
|
-
|
|
92
|
-
# Learning System (v2.7+)
|
|
93
|
-
try:
|
|
94
|
-
sys.path.insert(0, str(Path(__file__).parent / "src"))
|
|
95
|
-
from learning import get_learning_db, get_adaptive_ranker, get_feedback_collector, get_engagement_tracker, get_status as get_learning_status
|
|
96
|
-
from learning import FULL_LEARNING_AVAILABLE, ML_RANKING_AVAILABLE
|
|
97
|
-
LEARNING_AVAILABLE = True
|
|
98
|
-
except ImportError:
|
|
99
|
-
LEARNING_AVAILABLE = False
|
|
100
|
-
|
|
101
|
-
# ============================================================================
|
|
102
|
-
# Synthetic Bootstrap Auto-Trigger (v2.7 — P1-12)
|
|
103
|
-
# Runs ONCE on first recall if: memory count > 50, no model, LightGBM available.
|
|
104
|
-
# Spawns in background thread — never blocks recall. All errors swallowed.
|
|
105
|
-
# ============================================================================
|
|
106
|
-
|
|
107
|
-
_bootstrap_checked = False
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
def _maybe_bootstrap():
|
|
111
|
-
"""Check if synthetic bootstrap is needed and run it in a background thread.
|
|
112
|
-
|
|
113
|
-
Called once from the first recall invocation. Sets _bootstrap_checked = True
|
|
114
|
-
immediately to prevent re-entry. The actual bootstrap runs in a daemon thread
|
|
115
|
-
so it never blocks the recall response.
|
|
116
|
-
|
|
117
|
-
Conditions for bootstrap:
|
|
118
|
-
1. LEARNING_AVAILABLE and ML_RANKING_AVAILABLE flags are True
|
|
119
|
-
2. SyntheticBootstrapper.should_bootstrap() returns True (checks:
|
|
120
|
-
- LightGBM + NumPy installed
|
|
121
|
-
- No existing model file at ~/.claude-memory/models/ranker.txt
|
|
122
|
-
- Memory count > 50)
|
|
123
|
-
|
|
124
|
-
CRITICAL: This function wraps everything in try/except. Bootstrap failure
|
|
125
|
-
must NEVER break recall. It is purely an optimization — first-time ML
|
|
126
|
-
model creation so users don't have to wait 200+ recalls for personalization.
|
|
127
|
-
"""
|
|
128
|
-
global _bootstrap_checked
|
|
129
|
-
_bootstrap_checked = True # Set immediately to prevent re-entry
|
|
130
|
-
|
|
131
|
-
try:
|
|
132
|
-
if not LEARNING_AVAILABLE:
|
|
133
|
-
return
|
|
134
|
-
if not ML_RANKING_AVAILABLE:
|
|
135
|
-
return
|
|
136
|
-
|
|
137
|
-
from learning.synthetic_bootstrap import SyntheticBootstrapper
|
|
138
|
-
bootstrapper = SyntheticBootstrapper(memory_db_path=DB_PATH)
|
|
139
|
-
|
|
140
|
-
if not bootstrapper.should_bootstrap():
|
|
141
|
-
return
|
|
142
|
-
|
|
143
|
-
# Run bootstrap in background thread — never block recall
|
|
144
|
-
import threading
|
|
145
|
-
|
|
146
|
-
def _run_bootstrap():
|
|
147
|
-
try:
|
|
148
|
-
result = bootstrapper.bootstrap_model()
|
|
149
|
-
if result:
|
|
150
|
-
import logging
|
|
151
|
-
logging.getLogger("superlocalmemory.mcp").info(
|
|
152
|
-
"Synthetic bootstrap complete: %d samples",
|
|
153
|
-
result.get('training_samples', 0)
|
|
154
|
-
)
|
|
155
|
-
except Exception:
|
|
156
|
-
pass # Bootstrap failure is never critical
|
|
157
|
-
|
|
158
|
-
thread = threading.Thread(target=_run_bootstrap, daemon=True)
|
|
159
|
-
thread.start()
|
|
160
|
-
|
|
161
|
-
except Exception:
|
|
162
|
-
pass # Any failure in bootstrap setup is swallowed silently
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
def _sanitize_error(error: Exception) -> str:
|
|
166
|
-
"""Strip internal paths and structure from error messages."""
|
|
167
|
-
msg = str(error)
|
|
168
|
-
# Strip file paths containing claude-memory
|
|
169
|
-
msg = re.sub(r'/[\w./-]*claude-memory[\w./-]*', '[internal-path]', msg)
|
|
170
|
-
# Strip file paths containing SuperLocalMemory
|
|
171
|
-
msg = re.sub(r'/[\w./-]*SuperLocalMemory[\w./-]*', '[internal-path]', msg)
|
|
172
|
-
# Strip SQLite table names from error messages
|
|
173
|
-
msg = re.sub(r'table\s+\w+', 'table [redacted]', msg)
|
|
174
|
-
return msg
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
# Parse command line arguments early (needed for port in constructor)
|
|
178
|
-
import argparse as _argparse
|
|
179
|
-
_parser = _argparse.ArgumentParser(add_help=False)
|
|
180
|
-
_parser.add_argument("--transport", default="stdio")
|
|
181
|
-
_parser.add_argument("--port", type=int, default=8417)
|
|
182
|
-
_pre_args, _ = _parser.parse_known_args()
|
|
183
|
-
|
|
184
|
-
# Initialize MCP server
|
|
185
|
-
mcp = FastMCP(
|
|
186
|
-
name="SuperLocalMemory V2",
|
|
187
|
-
host="127.0.0.1",
|
|
188
|
-
port=_pre_args.port,
|
|
189
|
-
)
|
|
190
|
-
|
|
191
|
-
# Database path
|
|
192
|
-
DB_PATH = MEMORY_DIR / "memory.db"
|
|
193
|
-
|
|
194
|
-
# ============================================================================
|
|
195
|
-
# Shared singleton instances (v2.5 — fixes per-call instantiation overhead)
|
|
196
|
-
# All MCP tool handlers share one MemoryStoreV2 instance instead of creating
|
|
197
|
-
# a new one per call. This means one ConnectionManager, one TF-IDF vectorizer,
|
|
198
|
-
# one write queue — shared across all concurrent MCP requests.
|
|
199
|
-
# ============================================================================
|
|
200
|
-
|
|
201
|
-
_store = None
|
|
202
|
-
_graph_engine = None
|
|
203
|
-
_pattern_learner = None
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
def get_store() -> MemoryStoreV2:
|
|
207
|
-
"""Get or create the shared MemoryStoreV2 singleton."""
|
|
208
|
-
global _store
|
|
209
|
-
if _store is None:
|
|
210
|
-
_store = MemoryStoreV2(DB_PATH)
|
|
211
|
-
return _store
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
def get_graph_engine() -> GraphEngine:
|
|
215
|
-
"""Get or create the shared GraphEngine singleton."""
|
|
216
|
-
global _graph_engine
|
|
217
|
-
if _graph_engine is None:
|
|
218
|
-
_graph_engine = GraphEngine(DB_PATH)
|
|
219
|
-
return _graph_engine
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
def get_pattern_learner() -> PatternLearner:
|
|
223
|
-
"""Get or create the shared PatternLearner singleton."""
|
|
224
|
-
global _pattern_learner
|
|
225
|
-
if _pattern_learner is None:
|
|
226
|
-
_pattern_learner = PatternLearner(DB_PATH)
|
|
227
|
-
return _pattern_learner
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
_agent_registry = None
|
|
231
|
-
_provenance_tracker = None
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
def get_agent_registry() -> Optional[Any]:
|
|
235
|
-
"""Get shared AgentRegistry singleton (v2.5+). Returns None if unavailable."""
|
|
236
|
-
global _agent_registry
|
|
237
|
-
if not PROVENANCE_AVAILABLE:
|
|
238
|
-
return None
|
|
239
|
-
if _agent_registry is None:
|
|
240
|
-
_agent_registry = AgentRegistry.get_instance(DB_PATH)
|
|
241
|
-
return _agent_registry
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
def get_provenance_tracker() -> Optional[Any]:
|
|
245
|
-
"""Get shared ProvenanceTracker singleton (v2.5+). Returns None if unavailable."""
|
|
246
|
-
global _provenance_tracker
|
|
247
|
-
if not PROVENANCE_AVAILABLE:
|
|
248
|
-
return None
|
|
249
|
-
if _provenance_tracker is None:
|
|
250
|
-
_provenance_tracker = ProvenanceTracker.get_instance(DB_PATH)
|
|
251
|
-
return _provenance_tracker
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
_trust_scorer = None
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
def get_trust_scorer() -> Optional[Any]:
|
|
258
|
-
"""Get shared TrustScorer singleton (v2.6+). Returns None if unavailable."""
|
|
259
|
-
global _trust_scorer
|
|
260
|
-
if not TRUST_AVAILABLE:
|
|
261
|
-
return None
|
|
262
|
-
if _trust_scorer is None:
|
|
263
|
-
_trust_scorer = TrustScorer.get_instance(DB_PATH)
|
|
264
|
-
return _trust_scorer
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
def get_learning_components():
|
|
268
|
-
"""Get learning system components. Returns None if unavailable."""
|
|
269
|
-
if not LEARNING_AVAILABLE:
|
|
270
|
-
return None
|
|
271
|
-
return {
|
|
272
|
-
'db': get_learning_db(),
|
|
273
|
-
'ranker': get_adaptive_ranker(),
|
|
274
|
-
'feedback': get_feedback_collector(),
|
|
275
|
-
'engagement': get_engagement_tracker(),
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
def _get_client_name(ctx: Optional[Context] = None) -> str:
|
|
280
|
-
"""Extract client name from MCP context, or return default.
|
|
281
|
-
|
|
282
|
-
Reads clientInfo.name from the MCP initialize handshake via
|
|
283
|
-
ctx.session.client_params. This identifies Perplexity, Codex,
|
|
284
|
-
Claude Desktop, etc. as distinct agents.
|
|
285
|
-
"""
|
|
286
|
-
if ctx:
|
|
287
|
-
try:
|
|
288
|
-
# Primary: session.client_params.clientInfo.name (from initialize handshake)
|
|
289
|
-
session = getattr(ctx, 'session', None)
|
|
290
|
-
if session:
|
|
291
|
-
params = getattr(session, 'client_params', None)
|
|
292
|
-
if params:
|
|
293
|
-
client_info = getattr(params, 'clientInfo', None)
|
|
294
|
-
if client_info:
|
|
295
|
-
name = getattr(client_info, 'name', None)
|
|
296
|
-
if name:
|
|
297
|
-
return str(name)
|
|
298
|
-
except Exception:
|
|
299
|
-
pass
|
|
300
|
-
try:
|
|
301
|
-
# Fallback: ctx.client_id (per-request, may be null)
|
|
302
|
-
client_id = ctx.client_id
|
|
303
|
-
if client_id:
|
|
304
|
-
return str(client_id)
|
|
305
|
-
except Exception:
|
|
306
|
-
pass
|
|
307
|
-
return "mcp-client"
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
def _register_mcp_agent(agent_name: str = "mcp-client", ctx: Optional[Context] = None):
|
|
311
|
-
"""Register the calling MCP agent and record activity. Non-blocking.
|
|
312
|
-
|
|
313
|
-
v2.7.4: Extracts real client name from MCP context when available,
|
|
314
|
-
so Perplexity, Codex, Claude Desktop show as distinct agents.
|
|
315
|
-
"""
|
|
316
|
-
if ctx:
|
|
317
|
-
detected = _get_client_name(ctx)
|
|
318
|
-
if detected != "mcp-client":
|
|
319
|
-
agent_name = detected
|
|
320
|
-
|
|
321
|
-
registry = get_agent_registry()
|
|
322
|
-
if registry:
|
|
323
|
-
try:
|
|
324
|
-
registry.register_agent(
|
|
325
|
-
agent_id=f"mcp:{agent_name}",
|
|
326
|
-
agent_name=agent_name,
|
|
327
|
-
protocol="mcp",
|
|
328
|
-
)
|
|
329
|
-
except Exception:
|
|
330
|
-
pass
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
# ============================================================================
|
|
334
|
-
# RECALL BUFFER & SIGNAL INFERENCE ENGINE (v2.7.4 — Silent Learning)
|
|
335
|
-
# ============================================================================
|
|
336
|
-
# Tracks recall operations and infers implicit feedback signals from user
|
|
337
|
-
# behavior patterns. Zero user effort — all signals auto-collected.
|
|
338
|
-
#
|
|
339
|
-
# Signal Types:
|
|
340
|
-
# implicit_positive_timegap — long pause (>5min) after recall = satisfied
|
|
341
|
-
# implicit_negative_requick — quick re-query (<30s) = dissatisfied
|
|
342
|
-
# implicit_positive_reaccess — same memory in consecutive recalls
|
|
343
|
-
# implicit_positive_cross_tool — same memory recalled by different agents
|
|
344
|
-
# implicit_positive_post_update — memory updated after being recalled
|
|
345
|
-
# implicit_negative_post_delete — memory deleted after being recalled
|
|
346
|
-
#
|
|
347
|
-
# Research: Hu et al. 2008 (implicit feedback), BPR Rendle 2009 (pairwise)
|
|
348
|
-
# ============================================================================
|
|
349
|
-
|
|
350
|
-
class _RecallBuffer:
|
|
351
|
-
"""Thread-safe buffer tracking recent recall operations for signal inference.
|
|
352
|
-
|
|
353
|
-
Stores the last recall per agent_id so we can compare consecutive recalls
|
|
354
|
-
and infer whether the user found results useful.
|
|
355
|
-
|
|
356
|
-
Rate limiting: max 5 implicit signals per agent per minute to prevent gaming.
|
|
357
|
-
"""
|
|
358
|
-
|
|
359
|
-
def __init__(self):
|
|
360
|
-
self._lock = threading.Lock()
|
|
361
|
-
# {agent_id: {query, result_ids, timestamp, result_id_set}}
|
|
362
|
-
self._last_recall: Dict[str, Dict[str, Any]] = {}
|
|
363
|
-
# Global last recall (for cross-agent comparison)
|
|
364
|
-
self._global_last: Optional[Dict[str, Any]] = None
|
|
365
|
-
# Rate limiter: {agent_id: [timestamp, timestamp, ...]}
|
|
366
|
-
self._signal_timestamps: Dict[str, List[float]] = {}
|
|
367
|
-
# Set of memory_ids from the most recent recall (for post-action tracking)
|
|
368
|
-
self._recent_result_ids: set = set()
|
|
369
|
-
# Recall counter for passive decay auto-trigger
|
|
370
|
-
self._recall_count: int = 0
|
|
371
|
-
# Adaptive threshold: starts at 300s (5min), adjusts based on user patterns
|
|
372
|
-
self._positive_threshold: float = 300.0
|
|
373
|
-
self._inter_recall_times: List[float] = []
|
|
374
|
-
|
|
375
|
-
def record_recall(
|
|
376
|
-
self,
|
|
377
|
-
query: str,
|
|
378
|
-
result_ids: List[int],
|
|
379
|
-
agent_id: str = "mcp-client",
|
|
380
|
-
) -> List[Dict[str, Any]]:
|
|
381
|
-
"""Record a recall and infer signals from previous recall comparison.
|
|
382
|
-
|
|
383
|
-
Returns a list of inferred signal dicts: [{memory_id, signal_type, query}]
|
|
384
|
-
"""
|
|
385
|
-
now = time.time()
|
|
386
|
-
signals: List[Dict[str, Any]] = []
|
|
387
|
-
|
|
388
|
-
with self._lock:
|
|
389
|
-
self._recall_count += 1
|
|
390
|
-
result_id_set = set(result_ids)
|
|
391
|
-
self._recent_result_ids = result_id_set
|
|
392
|
-
|
|
393
|
-
current = {
|
|
394
|
-
"query": query,
|
|
395
|
-
"result_ids": result_ids,
|
|
396
|
-
"result_id_set": result_id_set,
|
|
397
|
-
"timestamp": now,
|
|
398
|
-
"agent_id": agent_id,
|
|
399
|
-
}
|
|
400
|
-
|
|
401
|
-
# --- Compare with previous recall from SAME agent ---
|
|
402
|
-
prev = self._last_recall.get(agent_id)
|
|
403
|
-
if prev:
|
|
404
|
-
time_gap = now - prev["timestamp"]
|
|
405
|
-
|
|
406
|
-
# Track inter-recall times for adaptive threshold
|
|
407
|
-
self._inter_recall_times.append(time_gap)
|
|
408
|
-
if len(self._inter_recall_times) > 100:
|
|
409
|
-
self._inter_recall_times = self._inter_recall_times[-100:]
|
|
410
|
-
|
|
411
|
-
# Update adaptive threshold (median of recent times, min 60s, max 1800s)
|
|
412
|
-
if len(self._inter_recall_times) >= 10:
|
|
413
|
-
sorted_times = sorted(self._inter_recall_times)
|
|
414
|
-
median = sorted_times[len(sorted_times) // 2]
|
|
415
|
-
self._positive_threshold = max(60.0, min(median * 0.8, 1800.0))
|
|
416
|
-
|
|
417
|
-
# Signal: Quick re-query with different query = negative
|
|
418
|
-
if time_gap < 30.0 and query != prev["query"]:
|
|
419
|
-
for mid in prev["result_ids"][:5]: # Top 5 only
|
|
420
|
-
signals.append({
|
|
421
|
-
"memory_id": mid,
|
|
422
|
-
"signal_type": "implicit_negative_requick",
|
|
423
|
-
"query": prev["query"],
|
|
424
|
-
"rank_position": prev["result_ids"].index(mid) + 1,
|
|
425
|
-
})
|
|
426
|
-
|
|
427
|
-
# Signal: Long pause = positive for previous results
|
|
428
|
-
elif time_gap > self._positive_threshold:
|
|
429
|
-
for mid in prev["result_ids"][:3]: # Top 3 only
|
|
430
|
-
signals.append({
|
|
431
|
-
"memory_id": mid,
|
|
432
|
-
"signal_type": "implicit_positive_timegap",
|
|
433
|
-
"query": prev["query"],
|
|
434
|
-
"rank_position": prev["result_ids"].index(mid) + 1,
|
|
435
|
-
})
|
|
436
|
-
|
|
437
|
-
# Signal: Same memory re-accessed = positive
|
|
438
|
-
overlap = result_id_set & prev["result_id_set"]
|
|
439
|
-
for mid in overlap:
|
|
440
|
-
signals.append({
|
|
441
|
-
"memory_id": mid,
|
|
442
|
-
"signal_type": "implicit_positive_reaccess",
|
|
443
|
-
"query": query,
|
|
444
|
-
})
|
|
445
|
-
|
|
446
|
-
# --- Compare with previous recall from DIFFERENT agent (cross-tool) ---
|
|
447
|
-
global_prev = self._global_last
|
|
448
|
-
if global_prev and global_prev["agent_id"] != agent_id:
|
|
449
|
-
cross_overlap = result_id_set & global_prev["result_id_set"]
|
|
450
|
-
for mid in cross_overlap:
|
|
451
|
-
signals.append({
|
|
452
|
-
"memory_id": mid,
|
|
453
|
-
"signal_type": "implicit_positive_cross_tool",
|
|
454
|
-
"query": query,
|
|
455
|
-
})
|
|
456
|
-
|
|
457
|
-
# Update buffers
|
|
458
|
-
self._last_recall[agent_id] = current
|
|
459
|
-
self._global_last = current
|
|
460
|
-
|
|
461
|
-
return signals
|
|
462
|
-
|
|
463
|
-
def check_post_action(self, memory_id: int, action: str) -> Optional[Dict[str, Any]]:
|
|
464
|
-
"""Check if a memory action (update/delete) follows a recent recall.
|
|
465
|
-
|
|
466
|
-
Returns signal dict if the memory was in recent results, else None.
|
|
467
|
-
"""
|
|
468
|
-
with self._lock:
|
|
469
|
-
if memory_id not in self._recent_result_ids:
|
|
470
|
-
return None
|
|
471
|
-
|
|
472
|
-
if action == "update":
|
|
473
|
-
return {
|
|
474
|
-
"memory_id": memory_id,
|
|
475
|
-
"signal_type": "implicit_positive_post_update",
|
|
476
|
-
"query": self._global_last["query"] if self._global_last else "",
|
|
477
|
-
}
|
|
478
|
-
elif action == "delete":
|
|
479
|
-
return {
|
|
480
|
-
"memory_id": memory_id,
|
|
481
|
-
"signal_type": "implicit_negative_post_delete",
|
|
482
|
-
"query": self._global_last["query"] if self._global_last else "",
|
|
483
|
-
}
|
|
484
|
-
return None
|
|
485
|
-
|
|
486
|
-
def check_rate_limit(self, agent_id: str, max_per_minute: int = 5) -> bool:
|
|
487
|
-
"""Return True if agent is within rate limit, False if exceeded."""
|
|
488
|
-
now = time.time()
|
|
489
|
-
with self._lock:
|
|
490
|
-
if agent_id not in self._signal_timestamps:
|
|
491
|
-
self._signal_timestamps[agent_id] = []
|
|
492
|
-
|
|
493
|
-
# Clean old timestamps (older than 60s)
|
|
494
|
-
self._signal_timestamps[agent_id] = [
|
|
495
|
-
ts for ts in self._signal_timestamps[agent_id]
|
|
496
|
-
if now - ts < 60.0
|
|
497
|
-
]
|
|
498
|
-
|
|
499
|
-
if len(self._signal_timestamps[agent_id]) >= max_per_minute:
|
|
500
|
-
return False
|
|
501
|
-
|
|
502
|
-
self._signal_timestamps[agent_id].append(now)
|
|
503
|
-
return True
|
|
504
|
-
|
|
505
|
-
def get_recall_count(self) -> int:
|
|
506
|
-
"""Get total recall count (for passive decay trigger)."""
|
|
507
|
-
with self._lock:
|
|
508
|
-
return self._recall_count
|
|
509
|
-
|
|
510
|
-
def get_stats(self) -> Dict[str, Any]:
|
|
511
|
-
"""Get buffer statistics for diagnostics."""
|
|
512
|
-
with self._lock:
|
|
513
|
-
return {
|
|
514
|
-
"recall_count": self._recall_count,
|
|
515
|
-
"tracked_agents": len(self._last_recall),
|
|
516
|
-
"positive_threshold_s": round(self._positive_threshold, 1),
|
|
517
|
-
"recent_results_count": len(self._recent_result_ids),
|
|
518
|
-
}
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
# Module-level singleton
|
|
522
|
-
_recall_buffer = _RecallBuffer()
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
def _emit_implicit_signals(signals: List[Dict[str, Any]], agent_id: str = "mcp-client") -> int:
|
|
526
|
-
"""Emit inferred implicit signals to the feedback collector.
|
|
527
|
-
|
|
528
|
-
Rate-limited: max 5 signals per agent per minute.
|
|
529
|
-
All errors swallowed — signal collection must NEVER break operations.
|
|
530
|
-
|
|
531
|
-
Returns number of signals actually stored.
|
|
532
|
-
"""
|
|
533
|
-
if not LEARNING_AVAILABLE or not signals:
|
|
534
|
-
return 0
|
|
535
|
-
|
|
536
|
-
stored = 0
|
|
537
|
-
try:
|
|
538
|
-
feedback = get_feedback_collector()
|
|
539
|
-
if not feedback:
|
|
540
|
-
return 0
|
|
541
|
-
|
|
542
|
-
for sig in signals:
|
|
543
|
-
if not _recall_buffer.check_rate_limit(agent_id):
|
|
544
|
-
break # Rate limit exceeded for this agent
|
|
545
|
-
try:
|
|
546
|
-
feedback.record_implicit_signal(
|
|
547
|
-
memory_id=sig["memory_id"],
|
|
548
|
-
query=sig.get("query", ""),
|
|
549
|
-
signal_type=sig["signal_type"],
|
|
550
|
-
source_tool=agent_id,
|
|
551
|
-
rank_position=sig.get("rank_position"),
|
|
552
|
-
)
|
|
553
|
-
stored += 1
|
|
554
|
-
except Exception:
|
|
555
|
-
pass # Individual signal failure is fine
|
|
556
|
-
except Exception:
|
|
557
|
-
pass # Never break the caller
|
|
558
|
-
|
|
559
|
-
return stored
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
def _maybe_passive_decay() -> None:
|
|
563
|
-
"""Auto-trigger passive decay every 10 recalls in a background thread."""
|
|
564
|
-
try:
|
|
565
|
-
if not LEARNING_AVAILABLE:
|
|
566
|
-
return
|
|
567
|
-
if _recall_buffer.get_recall_count() % 10 != 0:
|
|
568
|
-
return
|
|
569
|
-
|
|
570
|
-
feedback = get_feedback_collector()
|
|
571
|
-
if not feedback:
|
|
572
|
-
return
|
|
573
|
-
|
|
574
|
-
def _run_decay():
|
|
575
|
-
try:
|
|
576
|
-
count = feedback.compute_passive_decay(threshold=5)
|
|
577
|
-
if count > 0:
|
|
578
|
-
import logging
|
|
579
|
-
logging.getLogger("superlocalmemory.mcp").info(
|
|
580
|
-
"Passive decay: %d signals emitted", count
|
|
581
|
-
)
|
|
582
|
-
except Exception:
|
|
583
|
-
pass
|
|
584
|
-
|
|
585
|
-
thread = threading.Thread(target=_run_decay, daemon=True)
|
|
586
|
-
thread.start()
|
|
587
|
-
except Exception:
|
|
588
|
-
pass
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
# ============================================================================
|
|
592
|
-
# Eager initialization — ensure schema migration runs at startup (v2.8)
|
|
593
|
-
# ============================================================================
|
|
594
|
-
|
|
595
|
-
def _eager_init():
|
|
596
|
-
"""Initialize all engines at startup. Ensures schema migration runs."""
|
|
597
|
-
try:
|
|
598
|
-
get_store() # Triggers MemoryStoreV2._init_db() which creates v2.8 columns
|
|
599
|
-
except Exception:
|
|
600
|
-
pass # Don't block server startup
|
|
601
|
-
try:
|
|
602
|
-
from lifecycle.lifecycle_engine import LifecycleEngine
|
|
603
|
-
LifecycleEngine() # Triggers _ensure_columns()
|
|
604
|
-
except Exception:
|
|
605
|
-
pass
|
|
606
|
-
try:
|
|
607
|
-
from behavioral.outcome_tracker import OutcomeTracker
|
|
608
|
-
OutcomeTracker(str(MEMORY_DIR / "learning.db"))
|
|
609
|
-
except Exception:
|
|
610
|
-
logging.getLogger("superlocalmemory.mcp").exception("OutcomeTracker eager initialization failed")
|
|
611
|
-
try:
|
|
612
|
-
from compliance.audit_db import AuditDB
|
|
613
|
-
AuditDB(str(MEMORY_DIR / "audit.db"))
|
|
614
|
-
except Exception:
|
|
615
|
-
logging.getLogger("superlocalmemory.mcp").exception("AuditDB eager initialization failed")
|
|
616
|
-
|
|
617
|
-
# Run once at module load
|
|
618
|
-
_eager_init()
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
# ============================================================================
|
|
622
|
-
# MCP TOOLS (Functions callable by AI)
|
|
623
|
-
# ============================================================================
|
|
624
|
-
|
|
625
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
626
|
-
readOnlyHint=False,
|
|
627
|
-
destructiveHint=False,
|
|
628
|
-
openWorldHint=False,
|
|
629
|
-
))
|
|
630
|
-
async def remember(
|
|
631
|
-
content: str,
|
|
632
|
-
tags: str = "",
|
|
633
|
-
project: str = "",
|
|
634
|
-
importance: int = 5,
|
|
635
|
-
ctx: Context = None,
|
|
636
|
-
) -> dict:
|
|
637
|
-
"""
|
|
638
|
-
Save content to SuperLocalMemory with intelligent indexing.
|
|
639
|
-
|
|
640
|
-
This calls the SAME backend as /superlocalmemoryv2-remember skill.
|
|
641
|
-
All memories are stored in the same local SQLite database.
|
|
642
|
-
|
|
643
|
-
Args:
|
|
644
|
-
content: The content to remember (required)
|
|
645
|
-
tags: Comma-separated tags (optional, e.g. "python,api,backend")
|
|
646
|
-
project: Project name to scope the memory
|
|
647
|
-
importance: Importance score 1-10 (default 5)
|
|
648
|
-
|
|
649
|
-
Returns:
|
|
650
|
-
{
|
|
651
|
-
"success": bool,
|
|
652
|
-
"memory_id": int,
|
|
653
|
-
"message": str,
|
|
654
|
-
"content_preview": str
|
|
655
|
-
}
|
|
656
|
-
|
|
657
|
-
Examples:
|
|
658
|
-
remember("Use FastAPI for REST APIs", tags="python,backend", project="myapp")
|
|
659
|
-
remember("JWT auth with refresh tokens", tags="security,auth", importance=8)
|
|
660
|
-
"""
|
|
661
|
-
try:
|
|
662
|
-
# Register MCP agent (v2.5 — agent tracking, v2.7.4 — client detection)
|
|
663
|
-
_register_mcp_agent(ctx=ctx)
|
|
664
|
-
|
|
665
|
-
# Trust enforcement (v2.6) — block untrusted agents from writing
|
|
666
|
-
try:
|
|
667
|
-
trust = get_trust_scorer()
|
|
668
|
-
if trust and not trust.check_trust("mcp:mcp-client", "write"):
|
|
669
|
-
return {
|
|
670
|
-
"success": False,
|
|
671
|
-
"error": "Agent trust score too low for write operations",
|
|
672
|
-
"message": "Trust enforcement blocked this operation"
|
|
673
|
-
}
|
|
674
|
-
except Exception:
|
|
675
|
-
pass # Trust check failure should not block operations
|
|
676
|
-
|
|
677
|
-
# Use existing MemoryStoreV2 class (no duplicate logic)
|
|
678
|
-
store = get_store()
|
|
679
|
-
|
|
680
|
-
# Call existing add_memory method
|
|
681
|
-
memory_id = store.add_memory(
|
|
682
|
-
content=content,
|
|
683
|
-
tags=tags.split(",") if tags else None,
|
|
684
|
-
project_name=project or None,
|
|
685
|
-
importance=importance
|
|
686
|
-
)
|
|
687
|
-
|
|
688
|
-
# Record provenance (v2.5 — who created this memory)
|
|
689
|
-
prov = get_provenance_tracker()
|
|
690
|
-
if prov:
|
|
691
|
-
try:
|
|
692
|
-
prov.record_provenance(memory_id, created_by="mcp:client", source_protocol="mcp")
|
|
693
|
-
except Exception:
|
|
694
|
-
pass
|
|
695
|
-
|
|
696
|
-
# Track write in agent registry
|
|
697
|
-
registry = get_agent_registry()
|
|
698
|
-
if registry:
|
|
699
|
-
try:
|
|
700
|
-
registry.record_write("mcp:mcp-client")
|
|
701
|
-
except Exception:
|
|
702
|
-
pass
|
|
703
|
-
|
|
704
|
-
# Format response
|
|
705
|
-
preview = content[:100] + "..." if len(content) > 100 else content
|
|
706
|
-
|
|
707
|
-
return _sign_response({
|
|
708
|
-
"success": True,
|
|
709
|
-
"memory_id": memory_id,
|
|
710
|
-
"message": f"Memory saved with ID {memory_id}",
|
|
711
|
-
"content_preview": preview
|
|
712
|
-
})
|
|
713
|
-
|
|
714
|
-
except Exception as e:
|
|
715
|
-
return {
|
|
716
|
-
"success": False,
|
|
717
|
-
"error": _sanitize_error(e),
|
|
718
|
-
"message": "Failed to save memory"
|
|
719
|
-
}
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
723
|
-
readOnlyHint=True,
|
|
724
|
-
destructiveHint=False,
|
|
725
|
-
openWorldHint=False,
|
|
726
|
-
))
|
|
727
|
-
async def recall(
|
|
728
|
-
query: str,
|
|
729
|
-
limit: int = 10,
|
|
730
|
-
min_score: float = 0.3,
|
|
731
|
-
ctx: Context = None,
|
|
732
|
-
) -> dict:
|
|
733
|
-
"""
|
|
734
|
-
Search memories using semantic similarity and knowledge graph.
|
|
735
|
-
Results are personalized based on your usage patterns — the more you
|
|
736
|
-
use SuperLocalMemory, the better results get. All learning is local.
|
|
737
|
-
|
|
738
|
-
After using results, call memory_used(memory_id) for memories you
|
|
739
|
-
referenced to help improve future recall quality.
|
|
740
|
-
|
|
741
|
-
Args:
|
|
742
|
-
query: Search query (required)
|
|
743
|
-
limit: Maximum results to return (default 10)
|
|
744
|
-
min_score: Minimum relevance score 0.0-1.0 (default 0.3)
|
|
745
|
-
|
|
746
|
-
Returns:
|
|
747
|
-
{
|
|
748
|
-
"query": str,
|
|
749
|
-
"results": [
|
|
750
|
-
{
|
|
751
|
-
"id": int,
|
|
752
|
-
"content": str,
|
|
753
|
-
"score": float,
|
|
754
|
-
"tags": list,
|
|
755
|
-
"project": str,
|
|
756
|
-
"created_at": str
|
|
757
|
-
}
|
|
758
|
-
],
|
|
759
|
-
"count": int
|
|
760
|
-
}
|
|
761
|
-
|
|
762
|
-
Examples:
|
|
763
|
-
recall("authentication patterns")
|
|
764
|
-
recall("FastAPI", limit=5, min_score=0.5)
|
|
765
|
-
"""
|
|
766
|
-
try:
|
|
767
|
-
# Register MCP agent (v2.7.4 — client detection for agent tab)
|
|
768
|
-
_register_mcp_agent(ctx=ctx)
|
|
769
|
-
|
|
770
|
-
# Track recall in agent registry
|
|
771
|
-
registry = get_agent_registry()
|
|
772
|
-
if registry:
|
|
773
|
-
try:
|
|
774
|
-
agent_name = _get_client_name(ctx)
|
|
775
|
-
registry.record_recall(f"mcp:{agent_name}")
|
|
776
|
-
except Exception:
|
|
777
|
-
pass
|
|
778
|
-
|
|
779
|
-
# Use existing MemoryStoreV2 class
|
|
780
|
-
store = get_store()
|
|
781
|
-
|
|
782
|
-
# Hybrid search (opt-in via env var, v2.6)
|
|
783
|
-
_use_hybrid = os.environ.get('SLM_HYBRID_SEARCH', 'false').lower() == 'true'
|
|
784
|
-
if _use_hybrid:
|
|
785
|
-
try:
|
|
786
|
-
from hybrid_search import HybridSearchEngine
|
|
787
|
-
engine = HybridSearchEngine(store=store)
|
|
788
|
-
results = engine.search(query, limit=limit)
|
|
789
|
-
except (ImportError, Exception):
|
|
790
|
-
results = store.search(query, limit=limit)
|
|
791
|
-
else:
|
|
792
|
-
results = store.search(query, limit=limit)
|
|
793
|
-
|
|
794
|
-
# v2.7: Auto-trigger synthetic bootstrap on first recall (P1-12)
|
|
795
|
-
if not _bootstrap_checked:
|
|
796
|
-
_maybe_bootstrap()
|
|
797
|
-
|
|
798
|
-
# v2.7: Learning-based re-ranking (optional, graceful fallback)
|
|
799
|
-
if LEARNING_AVAILABLE:
|
|
800
|
-
try:
|
|
801
|
-
ranker = get_adaptive_ranker()
|
|
802
|
-
if ranker:
|
|
803
|
-
results = ranker.rerank(results, query)
|
|
804
|
-
except Exception:
|
|
805
|
-
pass # Re-ranking failure must never break recall
|
|
806
|
-
|
|
807
|
-
# Track recall for passive feedback decay
|
|
808
|
-
if LEARNING_AVAILABLE:
|
|
809
|
-
try:
|
|
810
|
-
feedback = get_feedback_collector()
|
|
811
|
-
if feedback:
|
|
812
|
-
feedback.record_recall_results(query, [r.get('id') for r in results if r.get('id')])
|
|
813
|
-
tracker = get_engagement_tracker()
|
|
814
|
-
if tracker:
|
|
815
|
-
tracker.record_activity('recall_performed', source='mcp')
|
|
816
|
-
except Exception:
|
|
817
|
-
pass # Tracking failure must never break recall
|
|
818
|
-
|
|
819
|
-
# v2.7.4: Implicit signal inference from recall patterns
|
|
820
|
-
try:
|
|
821
|
-
result_ids = [r.get('id') for r in results if r.get('id')]
|
|
822
|
-
signals = _recall_buffer.record_recall(query, result_ids)
|
|
823
|
-
if signals:
|
|
824
|
-
_emit_implicit_signals(signals)
|
|
825
|
-
# Auto-trigger passive decay every 10 recalls
|
|
826
|
-
_maybe_passive_decay()
|
|
827
|
-
except Exception:
|
|
828
|
-
pass # Signal inference must NEVER break recall
|
|
829
|
-
|
|
830
|
-
# Filter by minimum score
|
|
831
|
-
filtered_results = [
|
|
832
|
-
r for r in results
|
|
833
|
-
if r.get('score', 0) >= min_score
|
|
834
|
-
]
|
|
835
|
-
|
|
836
|
-
return _sign_response({
|
|
837
|
-
"success": True,
|
|
838
|
-
"query": query,
|
|
839
|
-
"results": filtered_results,
|
|
840
|
-
"count": len(filtered_results),
|
|
841
|
-
"total_searched": len(results)
|
|
842
|
-
})
|
|
843
|
-
|
|
844
|
-
except Exception as e:
|
|
845
|
-
return {
|
|
846
|
-
"success": False,
|
|
847
|
-
"error": _sanitize_error(e),
|
|
848
|
-
"message": "Failed to search memories",
|
|
849
|
-
"results": [],
|
|
850
|
-
"count": 0
|
|
851
|
-
}
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
855
|
-
readOnlyHint=True,
|
|
856
|
-
destructiveHint=False,
|
|
857
|
-
openWorldHint=False,
|
|
858
|
-
))
|
|
859
|
-
async def list_recent(limit: int = 10) -> dict:
|
|
860
|
-
"""
|
|
861
|
-
List most recent memories.
|
|
862
|
-
|
|
863
|
-
Args:
|
|
864
|
-
limit: Number of memories to return (default 10)
|
|
865
|
-
|
|
866
|
-
Returns:
|
|
867
|
-
{
|
|
868
|
-
"memories": list,
|
|
869
|
-
"count": int
|
|
870
|
-
}
|
|
871
|
-
"""
|
|
872
|
-
try:
|
|
873
|
-
# Use existing MemoryStoreV2 class
|
|
874
|
-
store = get_store()
|
|
875
|
-
|
|
876
|
-
# Call existing list_all method
|
|
877
|
-
memories = store.list_all(limit=limit)
|
|
878
|
-
|
|
879
|
-
return {
|
|
880
|
-
"success": True,
|
|
881
|
-
"memories": memories,
|
|
882
|
-
"count": len(memories)
|
|
883
|
-
}
|
|
884
|
-
|
|
885
|
-
except Exception as e:
|
|
886
|
-
return {
|
|
887
|
-
"success": False,
|
|
888
|
-
"error": _sanitize_error(e),
|
|
889
|
-
"message": "Failed to list memories",
|
|
890
|
-
"memories": [],
|
|
891
|
-
"count": 0
|
|
892
|
-
}
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
896
|
-
readOnlyHint=True,
|
|
897
|
-
destructiveHint=False,
|
|
898
|
-
openWorldHint=False,
|
|
899
|
-
))
|
|
900
|
-
async def get_status() -> dict:
|
|
901
|
-
"""
|
|
902
|
-
Get SuperLocalMemory system status and statistics.
|
|
903
|
-
|
|
904
|
-
Returns:
|
|
905
|
-
{
|
|
906
|
-
"total_memories": int,
|
|
907
|
-
"graph_clusters": int,
|
|
908
|
-
"patterns_learned": int,
|
|
909
|
-
"database_size_mb": float
|
|
910
|
-
}
|
|
911
|
-
"""
|
|
912
|
-
try:
|
|
913
|
-
# Use existing MemoryStoreV2 class
|
|
914
|
-
store = get_store()
|
|
915
|
-
|
|
916
|
-
# Call existing get_stats method
|
|
917
|
-
stats = store.get_stats()
|
|
918
|
-
|
|
919
|
-
return _sign_response({
|
|
920
|
-
"success": True,
|
|
921
|
-
**stats
|
|
922
|
-
})
|
|
923
|
-
|
|
924
|
-
except Exception as e:
|
|
925
|
-
return {
|
|
926
|
-
"success": False,
|
|
927
|
-
"error": _sanitize_error(e),
|
|
928
|
-
"message": "Failed to get status"
|
|
929
|
-
}
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
933
|
-
readOnlyHint=False,
|
|
934
|
-
destructiveHint=False,
|
|
935
|
-
openWorldHint=False,
|
|
936
|
-
))
|
|
937
|
-
async def build_graph() -> dict:
|
|
938
|
-
"""
|
|
939
|
-
Build or rebuild the knowledge graph from existing memories.
|
|
940
|
-
|
|
941
|
-
This runs TF-IDF entity extraction and Leiden clustering to
|
|
942
|
-
automatically discover relationships between memories.
|
|
943
|
-
|
|
944
|
-
Returns:
|
|
945
|
-
{
|
|
946
|
-
"success": bool,
|
|
947
|
-
"clusters_created": int,
|
|
948
|
-
"memories_processed": int,
|
|
949
|
-
"message": str
|
|
950
|
-
}
|
|
951
|
-
"""
|
|
952
|
-
try:
|
|
953
|
-
# Use existing GraphEngine class
|
|
954
|
-
engine = get_graph_engine()
|
|
955
|
-
|
|
956
|
-
# Call existing build_graph method
|
|
957
|
-
stats = engine.build_graph()
|
|
958
|
-
|
|
959
|
-
return {
|
|
960
|
-
"success": True,
|
|
961
|
-
"message": "Knowledge graph built successfully",
|
|
962
|
-
**stats
|
|
963
|
-
}
|
|
964
|
-
|
|
965
|
-
except Exception as e:
|
|
966
|
-
return {
|
|
967
|
-
"success": False,
|
|
968
|
-
"error": _sanitize_error(e),
|
|
969
|
-
"message": "Failed to build graph"
|
|
970
|
-
}
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
974
|
-
readOnlyHint=False,
|
|
975
|
-
destructiveHint=False,
|
|
976
|
-
openWorldHint=False,
|
|
977
|
-
))
|
|
978
|
-
async def switch_profile(name: str) -> dict:
|
|
979
|
-
"""
|
|
980
|
-
Switch to a different memory profile.
|
|
981
|
-
|
|
982
|
-
Profiles allow you to maintain separate memory contexts
|
|
983
|
-
(e.g., work, personal, client projects). All profiles share
|
|
984
|
-
one database — switching is instant and safe (no data copying).
|
|
985
|
-
|
|
986
|
-
Args:
|
|
987
|
-
name: Profile name to switch to
|
|
988
|
-
|
|
989
|
-
Returns:
|
|
990
|
-
{
|
|
991
|
-
"success": bool,
|
|
992
|
-
"profile": str,
|
|
993
|
-
"message": str
|
|
994
|
-
}
|
|
995
|
-
"""
|
|
996
|
-
try:
|
|
997
|
-
# Import profile manager (uses column-based profiles)
|
|
998
|
-
sys.path.insert(0, str(MEMORY_DIR))
|
|
999
|
-
from importlib import import_module
|
|
1000
|
-
# Use direct JSON config update for speed
|
|
1001
|
-
import json
|
|
1002
|
-
config_file = MEMORY_DIR / "profiles.json"
|
|
1003
|
-
|
|
1004
|
-
if config_file.exists():
|
|
1005
|
-
with open(config_file, 'r') as f:
|
|
1006
|
-
config = json.load(f)
|
|
1007
|
-
else:
|
|
1008
|
-
config = {'profiles': {'default': {'name': 'default', 'description': 'Default memory profile'}}, 'active_profile': 'default'}
|
|
1009
|
-
|
|
1010
|
-
if name not in config.get('profiles', {}):
|
|
1011
|
-
available = ', '.join(config.get('profiles', {}).keys())
|
|
1012
|
-
return {
|
|
1013
|
-
"success": False,
|
|
1014
|
-
"message": f"Profile '{name}' not found. Available: {available}"
|
|
1015
|
-
}
|
|
1016
|
-
|
|
1017
|
-
old_profile = config.get('active_profile', 'default')
|
|
1018
|
-
config['active_profile'] = name
|
|
1019
|
-
|
|
1020
|
-
from datetime import datetime
|
|
1021
|
-
config['profiles'][name]['last_used'] = datetime.now().isoformat()
|
|
1022
|
-
|
|
1023
|
-
with open(config_file, 'w') as f:
|
|
1024
|
-
json.dump(config, f, indent=2)
|
|
1025
|
-
|
|
1026
|
-
return {
|
|
1027
|
-
"success": True,
|
|
1028
|
-
"profile": name,
|
|
1029
|
-
"previous_profile": old_profile,
|
|
1030
|
-
"message": f"Switched to profile '{name}'. Memory operations now use this profile."
|
|
1031
|
-
}
|
|
1032
|
-
|
|
1033
|
-
except Exception as e:
|
|
1034
|
-
return {
|
|
1035
|
-
"success": False,
|
|
1036
|
-
"error": _sanitize_error(e),
|
|
1037
|
-
"message": "Failed to switch profile"
|
|
1038
|
-
}
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
1042
|
-
readOnlyHint=True,
|
|
1043
|
-
destructiveHint=False,
|
|
1044
|
-
openWorldHint=False,
|
|
1045
|
-
))
|
|
1046
|
-
async def backup_status() -> dict:
|
|
1047
|
-
"""
|
|
1048
|
-
Get auto-backup system status for SuperLocalMemory.
|
|
1049
|
-
|
|
1050
|
-
Returns backup configuration, last backup time, next scheduled backup,
|
|
1051
|
-
total backup count, and storage used. Useful for monitoring data safety.
|
|
1052
|
-
|
|
1053
|
-
Returns:
|
|
1054
|
-
{
|
|
1055
|
-
"enabled": bool,
|
|
1056
|
-
"interval_display": str,
|
|
1057
|
-
"last_backup": str or null,
|
|
1058
|
-
"next_backup": str or null,
|
|
1059
|
-
"backup_count": int,
|
|
1060
|
-
"total_size_mb": float
|
|
1061
|
-
}
|
|
1062
|
-
"""
|
|
1063
|
-
try:
|
|
1064
|
-
from auto_backup import AutoBackup
|
|
1065
|
-
backup = AutoBackup()
|
|
1066
|
-
status = backup.get_status()
|
|
1067
|
-
return {
|
|
1068
|
-
"success": True,
|
|
1069
|
-
**status
|
|
1070
|
-
}
|
|
1071
|
-
except ImportError:
|
|
1072
|
-
return {
|
|
1073
|
-
"success": False,
|
|
1074
|
-
"message": "Auto-backup module not installed. Update SuperLocalMemory to v2.4.0+.",
|
|
1075
|
-
"enabled": False,
|
|
1076
|
-
"backup_count": 0
|
|
1077
|
-
}
|
|
1078
|
-
except Exception as e:
|
|
1079
|
-
return {
|
|
1080
|
-
"success": False,
|
|
1081
|
-
"error": _sanitize_error(e),
|
|
1082
|
-
"message": "Failed to get backup status"
|
|
1083
|
-
}
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
# ============================================================================
|
|
1087
|
-
# LEARNING TOOLS (v2.7 — feedback, transparency, user control)
|
|
1088
|
-
# ============================================================================
|
|
1089
|
-
|
|
1090
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
1091
|
-
readOnlyHint=False,
|
|
1092
|
-
destructiveHint=False,
|
|
1093
|
-
openWorldHint=True,
|
|
1094
|
-
))
|
|
1095
|
-
async def memory_used(
|
|
1096
|
-
memory_id: int,
|
|
1097
|
-
query: str = "",
|
|
1098
|
-
usefulness: str = "high"
|
|
1099
|
-
) -> dict:
|
|
1100
|
-
"""
|
|
1101
|
-
Call this tool whenever you use information from a recalled memory in
|
|
1102
|
-
your response. This is the most important feedback signal — it teaches
|
|
1103
|
-
SuperLocalMemory which memories are truly useful and dramatically
|
|
1104
|
-
improves future recall quality. All data stays 100% local.
|
|
1105
|
-
|
|
1106
|
-
Best practice: After using recall() results, call memory_used() for
|
|
1107
|
-
each memory ID you referenced. This takes <1ms and helps the system
|
|
1108
|
-
learn your preferences.
|
|
1109
|
-
|
|
1110
|
-
Args:
|
|
1111
|
-
memory_id: ID of the useful memory (from recall results)
|
|
1112
|
-
query: The recall query that found it (optional but recommended)
|
|
1113
|
-
usefulness: How useful - "high", "medium", or "low" (default "high")
|
|
1114
|
-
|
|
1115
|
-
Returns:
|
|
1116
|
-
{"success": bool, "message": str}
|
|
1117
|
-
"""
|
|
1118
|
-
try:
|
|
1119
|
-
if not LEARNING_AVAILABLE:
|
|
1120
|
-
return {"success": False, "message": "Learning features not available. Install: pip3 install lightgbm scipy"}
|
|
1121
|
-
|
|
1122
|
-
feedback = get_feedback_collector()
|
|
1123
|
-
if feedback is None:
|
|
1124
|
-
return {"success": False, "message": "Feedback collector not initialized"}
|
|
1125
|
-
|
|
1126
|
-
feedback.record_memory_used(
|
|
1127
|
-
memory_id=memory_id,
|
|
1128
|
-
query=query,
|
|
1129
|
-
usefulness=usefulness,
|
|
1130
|
-
source_tool="mcp-client",
|
|
1131
|
-
)
|
|
1132
|
-
|
|
1133
|
-
return {
|
|
1134
|
-
"success": True,
|
|
1135
|
-
"message": f"Feedback recorded for memory #{memory_id} (usefulness: {usefulness})"
|
|
1136
|
-
}
|
|
1137
|
-
except Exception as e:
|
|
1138
|
-
return {"success": False, "error": _sanitize_error(e)}
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
1142
|
-
readOnlyHint=True,
|
|
1143
|
-
destructiveHint=False,
|
|
1144
|
-
openWorldHint=False,
|
|
1145
|
-
))
|
|
1146
|
-
async def get_learned_patterns(
|
|
1147
|
-
min_confidence: float = 0.6,
|
|
1148
|
-
category: str = "all"
|
|
1149
|
-
) -> dict:
|
|
1150
|
-
"""
|
|
1151
|
-
See what SuperLocalMemory has learned about your preferences,
|
|
1152
|
-
projects, and workflow patterns.
|
|
1153
|
-
|
|
1154
|
-
Args:
|
|
1155
|
-
min_confidence: Minimum confidence threshold 0.0-1.0 (default 0.6)
|
|
1156
|
-
category: Filter by "tech", "workflow", "project", or "all" (default "all")
|
|
1157
|
-
|
|
1158
|
-
Returns:
|
|
1159
|
-
{
|
|
1160
|
-
"success": bool,
|
|
1161
|
-
"patterns": {
|
|
1162
|
-
"tech_preferences": [...],
|
|
1163
|
-
"workflow_patterns": [...],
|
|
1164
|
-
},
|
|
1165
|
-
"ranking_phase": str,
|
|
1166
|
-
"feedback_count": int
|
|
1167
|
-
}
|
|
1168
|
-
"""
|
|
1169
|
-
try:
|
|
1170
|
-
if not LEARNING_AVAILABLE:
|
|
1171
|
-
return {"success": False, "message": "Learning features not available. Install: pip3 install lightgbm scipy", "patterns": {}}
|
|
1172
|
-
|
|
1173
|
-
ldb = get_learning_db()
|
|
1174
|
-
if ldb is None:
|
|
1175
|
-
return {"success": False, "message": "Learning database not initialized", "patterns": {}}
|
|
1176
|
-
|
|
1177
|
-
result = {"success": True, "patterns": {}}
|
|
1178
|
-
|
|
1179
|
-
# Tech preferences (Layer 1)
|
|
1180
|
-
if category in ("all", "tech"):
|
|
1181
|
-
patterns = ldb.get_transferable_patterns(min_confidence=min_confidence)
|
|
1182
|
-
result["patterns"]["tech_preferences"] = [
|
|
1183
|
-
{
|
|
1184
|
-
"id": p["id"],
|
|
1185
|
-
"type": p["pattern_type"],
|
|
1186
|
-
"key": p["key"],
|
|
1187
|
-
"value": p["value"],
|
|
1188
|
-
"confidence": round(p["confidence"], 2),
|
|
1189
|
-
"evidence": p["evidence_count"],
|
|
1190
|
-
"profiles_seen": p["profiles_seen"],
|
|
1191
|
-
}
|
|
1192
|
-
for p in patterns
|
|
1193
|
-
]
|
|
1194
|
-
|
|
1195
|
-
# Workflow patterns (Layer 3)
|
|
1196
|
-
if category in ("all", "workflow"):
|
|
1197
|
-
workflows = ldb.get_workflow_patterns(min_confidence=min_confidence)
|
|
1198
|
-
result["patterns"]["workflow_patterns"] = [
|
|
1199
|
-
{
|
|
1200
|
-
"id": p["id"],
|
|
1201
|
-
"type": p["pattern_type"],
|
|
1202
|
-
"key": p["pattern_key"],
|
|
1203
|
-
"value": p["pattern_value"],
|
|
1204
|
-
"confidence": round(p["confidence"], 2),
|
|
1205
|
-
}
|
|
1206
|
-
for p in workflows
|
|
1207
|
-
]
|
|
1208
|
-
|
|
1209
|
-
# Ranking phase info
|
|
1210
|
-
ranker = get_adaptive_ranker()
|
|
1211
|
-
if ranker:
|
|
1212
|
-
result["ranking_phase"] = ranker.get_phase()
|
|
1213
|
-
result["feedback_count"] = ldb.get_feedback_count()
|
|
1214
|
-
|
|
1215
|
-
# Learning stats
|
|
1216
|
-
result["stats"] = ldb.get_stats()
|
|
1217
|
-
|
|
1218
|
-
return result
|
|
1219
|
-
except Exception as e:
|
|
1220
|
-
return {"success": False, "error": _sanitize_error(e), "patterns": {}}
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
1224
|
-
readOnlyHint=False,
|
|
1225
|
-
destructiveHint=False,
|
|
1226
|
-
openWorldHint=False,
|
|
1227
|
-
))
|
|
1228
|
-
async def correct_pattern(
|
|
1229
|
-
pattern_id: int,
|
|
1230
|
-
correct_value: str,
|
|
1231
|
-
reason: str = ""
|
|
1232
|
-
) -> dict:
|
|
1233
|
-
"""
|
|
1234
|
-
Correct a learned pattern that is wrong. Use get_learned_patterns first
|
|
1235
|
-
to see pattern IDs.
|
|
1236
|
-
|
|
1237
|
-
Args:
|
|
1238
|
-
pattern_id: ID of the pattern to correct
|
|
1239
|
-
correct_value: The correct value (e.g., "Vue" instead of "React")
|
|
1240
|
-
reason: Why the correction (optional)
|
|
1241
|
-
|
|
1242
|
-
Returns:
|
|
1243
|
-
{"success": bool, "message": str}
|
|
1244
|
-
"""
|
|
1245
|
-
try:
|
|
1246
|
-
if not LEARNING_AVAILABLE:
|
|
1247
|
-
return {"success": False, "message": "Learning features not available"}
|
|
1248
|
-
|
|
1249
|
-
ldb = get_learning_db()
|
|
1250
|
-
if ldb is None:
|
|
1251
|
-
return {"success": False, "message": "Learning database not initialized"}
|
|
1252
|
-
|
|
1253
|
-
# Get existing pattern
|
|
1254
|
-
conn = ldb._get_connection()
|
|
1255
|
-
try:
|
|
1256
|
-
cursor = conn.cursor()
|
|
1257
|
-
cursor.execute('SELECT * FROM transferable_patterns WHERE id = ?', (pattern_id,))
|
|
1258
|
-
pattern = cursor.fetchone()
|
|
1259
|
-
if not pattern:
|
|
1260
|
-
return {"success": False, "message": f"Pattern #{pattern_id} not found"}
|
|
1261
|
-
|
|
1262
|
-
old_value = pattern['value']
|
|
1263
|
-
|
|
1264
|
-
# Update the pattern with correction
|
|
1265
|
-
ldb.upsert_transferable_pattern(
|
|
1266
|
-
pattern_type=pattern['pattern_type'],
|
|
1267
|
-
key=pattern['key'],
|
|
1268
|
-
value=correct_value,
|
|
1269
|
-
confidence=1.0, # User correction = maximum confidence
|
|
1270
|
-
evidence_count=pattern['evidence_count'] + 1,
|
|
1271
|
-
profiles_seen=pattern['profiles_seen'],
|
|
1272
|
-
contradictions=[f"Corrected from '{old_value}' to '{correct_value}': {reason}"],
|
|
1273
|
-
)
|
|
1274
|
-
|
|
1275
|
-
# Record as negative feedback for the old value
|
|
1276
|
-
feedback = get_feedback_collector()
|
|
1277
|
-
if feedback:
|
|
1278
|
-
feedback.record_memory_used(
|
|
1279
|
-
memory_id=0, # No specific memory
|
|
1280
|
-
query=f"correction:{pattern['key']}",
|
|
1281
|
-
usefulness="low",
|
|
1282
|
-
source_tool="mcp-correction",
|
|
1283
|
-
)
|
|
1284
|
-
|
|
1285
|
-
return {
|
|
1286
|
-
"success": True,
|
|
1287
|
-
"message": f"Pattern '{pattern['key']}' corrected: '{old_value}' → '{correct_value}'"
|
|
1288
|
-
}
|
|
1289
|
-
finally:
|
|
1290
|
-
conn.close()
|
|
1291
|
-
except Exception as e:
|
|
1292
|
-
return {"success": False, "error": _sanitize_error(e)}
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
# ============================================================================
|
|
1296
|
-
# CHATGPT CONNECTOR TOOLS (search + fetch — required by OpenAI MCP spec)
|
|
1297
|
-
# These two tools are required for ChatGPT Connectors and Deep Research.
|
|
1298
|
-
# They wrap existing SuperLocalMemory search/retrieval logic.
|
|
1299
|
-
# Ref: https://platform.openai.com/docs/mcp
|
|
1300
|
-
# ============================================================================
|
|
1301
|
-
|
|
1302
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
1303
|
-
readOnlyHint=True,
|
|
1304
|
-
destructiveHint=False,
|
|
1305
|
-
openWorldHint=False,
|
|
1306
|
-
))
|
|
1307
|
-
async def search(query: str) -> dict:
|
|
1308
|
-
"""
|
|
1309
|
-
Search for documents in SuperLocalMemory.
|
|
1310
|
-
|
|
1311
|
-
Required by ChatGPT Connectors and Deep Research.
|
|
1312
|
-
Returns a list of search results with id, title, text snippet, and url.
|
|
1313
|
-
|
|
1314
|
-
Args:
|
|
1315
|
-
query: Search query string. Natural language queries work best.
|
|
1316
|
-
|
|
1317
|
-
Returns:
|
|
1318
|
-
{"results": [{"id": str, "title": str, "text": str, "url": str}]}
|
|
1319
|
-
"""
|
|
1320
|
-
try:
|
|
1321
|
-
store = get_store()
|
|
1322
|
-
raw_results = store.search(query, limit=20)
|
|
1323
|
-
|
|
1324
|
-
# v2.7: Learning-based re-ranking (optional, graceful fallback)
|
|
1325
|
-
if LEARNING_AVAILABLE:
|
|
1326
|
-
try:
|
|
1327
|
-
ranker = get_adaptive_ranker()
|
|
1328
|
-
if ranker:
|
|
1329
|
-
raw_results = ranker.rerank(raw_results, query)
|
|
1330
|
-
except Exception:
|
|
1331
|
-
pass # Re-ranking failure must never break search
|
|
1332
|
-
|
|
1333
|
-
results = []
|
|
1334
|
-
for r in raw_results:
|
|
1335
|
-
if r.get('score', 0) < 0.2:
|
|
1336
|
-
continue
|
|
1337
|
-
content = r.get('content', '') or r.get('summary', '') or ''
|
|
1338
|
-
snippet = content[:200] + "..." if len(content) > 200 else content
|
|
1339
|
-
mem_id = str(r.get('id', ''))
|
|
1340
|
-
title = r.get('category', 'Memory') + ': ' + (content[:60].replace('\n', ' ') if content else 'Untitled')
|
|
1341
|
-
results.append({
|
|
1342
|
-
"id": mem_id,
|
|
1343
|
-
"title": title,
|
|
1344
|
-
"text": snippet,
|
|
1345
|
-
"url": f"memory://local/{mem_id}"
|
|
1346
|
-
})
|
|
1347
|
-
|
|
1348
|
-
return {"results": results}
|
|
1349
|
-
|
|
1350
|
-
except Exception as e:
|
|
1351
|
-
return {"results": [], "error": _sanitize_error(e)}
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
1355
|
-
readOnlyHint=True,
|
|
1356
|
-
destructiveHint=False,
|
|
1357
|
-
openWorldHint=False,
|
|
1358
|
-
))
|
|
1359
|
-
async def fetch(id: str) -> dict:
|
|
1360
|
-
"""
|
|
1361
|
-
Retrieve full content of a memory by ID.
|
|
1362
|
-
|
|
1363
|
-
Required by ChatGPT Connectors and Deep Research.
|
|
1364
|
-
Use after search() to get complete document content for analysis and citation.
|
|
1365
|
-
|
|
1366
|
-
Args:
|
|
1367
|
-
id: Memory ID from search results.
|
|
1368
|
-
|
|
1369
|
-
Returns:
|
|
1370
|
-
{"id": str, "title": str, "text": str, "url": str, "metadata": dict|null}
|
|
1371
|
-
"""
|
|
1372
|
-
try:
|
|
1373
|
-
store = get_store()
|
|
1374
|
-
mem = store.get_by_id(int(id))
|
|
1375
|
-
|
|
1376
|
-
if not mem:
|
|
1377
|
-
raise ValueError(f"Memory with ID {id} not found")
|
|
1378
|
-
|
|
1379
|
-
content = mem.get('content', '') or mem.get('summary', '') or ''
|
|
1380
|
-
title = (mem.get('category', 'Memory') or 'Memory') + ': ' + (content[:60].replace('\n', ' ') if content else 'Untitled')
|
|
1381
|
-
|
|
1382
|
-
metadata = {}
|
|
1383
|
-
if mem.get('tags'):
|
|
1384
|
-
metadata['tags'] = mem['tags']
|
|
1385
|
-
if mem.get('project_name'):
|
|
1386
|
-
metadata['project'] = mem['project_name']
|
|
1387
|
-
if mem.get('importance'):
|
|
1388
|
-
metadata['importance'] = mem['importance']
|
|
1389
|
-
if mem.get('cluster_id'):
|
|
1390
|
-
metadata['cluster_id'] = mem['cluster_id']
|
|
1391
|
-
if mem.get('created_at'):
|
|
1392
|
-
metadata['created_at'] = mem['created_at']
|
|
1393
|
-
|
|
1394
|
-
return {
|
|
1395
|
-
"id": str(id),
|
|
1396
|
-
"title": title,
|
|
1397
|
-
"text": content,
|
|
1398
|
-
"url": f"memory://local/{id}",
|
|
1399
|
-
"metadata": metadata if metadata else None
|
|
1400
|
-
}
|
|
1401
|
-
|
|
1402
|
-
except Exception as e:
|
|
1403
|
-
raise ValueError(f"Failed to fetch memory {id}: {_sanitize_error(e)}")
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
# ============================================================================
|
|
1407
|
-
# v2.8 MCP TOOLS — Lifecycle, Behavioral Learning, Compliance
|
|
1408
|
-
# ============================================================================
|
|
1409
|
-
|
|
1410
|
-
try:
|
|
1411
|
-
from mcp_tools_v28 import (
|
|
1412
|
-
report_outcome as _report_outcome,
|
|
1413
|
-
get_lifecycle_status as _get_lifecycle_status,
|
|
1414
|
-
set_retention_policy as _set_retention_policy,
|
|
1415
|
-
compact_memories as _compact_memories,
|
|
1416
|
-
get_behavioral_patterns as _get_behavioral_patterns,
|
|
1417
|
-
audit_trail as _audit_trail,
|
|
1418
|
-
)
|
|
1419
|
-
|
|
1420
|
-
V28_AVAILABLE = True
|
|
1421
|
-
|
|
1422
|
-
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=False, destructiveHint=False))
|
|
1423
|
-
async def report_outcome(
|
|
1424
|
-
memory_ids: list,
|
|
1425
|
-
outcome: str,
|
|
1426
|
-
action_type: str = "other",
|
|
1427
|
-
context: str = None,
|
|
1428
|
-
agent_id: str = "user",
|
|
1429
|
-
project: str = None,
|
|
1430
|
-
) -> dict:
|
|
1431
|
-
"""Record action outcome for behavioral learning. Outcomes: success/failure/partial."""
|
|
1432
|
-
return await _report_outcome(memory_ids, outcome, action_type, context, agent_id, project)
|
|
1433
|
-
|
|
1434
|
-
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False))
|
|
1435
|
-
async def get_lifecycle_status(memory_id: int = None) -> dict:
|
|
1436
|
-
"""Get memory lifecycle status — state distribution or single memory state."""
|
|
1437
|
-
return await _get_lifecycle_status(memory_id)
|
|
1438
|
-
|
|
1439
|
-
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=False, destructiveHint=False))
|
|
1440
|
-
async def set_retention_policy(
|
|
1441
|
-
name: str,
|
|
1442
|
-
framework: str,
|
|
1443
|
-
retention_days: int,
|
|
1444
|
-
action: str = "retain",
|
|
1445
|
-
applies_to_tags: list = None,
|
|
1446
|
-
applies_to_project: str = None,
|
|
1447
|
-
) -> dict:
|
|
1448
|
-
"""Create a retention policy (GDPR, HIPAA, EU AI Act)."""
|
|
1449
|
-
return await _set_retention_policy(
|
|
1450
|
-
name, framework, retention_days, action, applies_to_tags, applies_to_project
|
|
1451
|
-
)
|
|
1452
|
-
|
|
1453
|
-
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=False, destructiveHint=False))
|
|
1454
|
-
async def compact_memories(dry_run: bool = True, profile: str = None) -> dict:
|
|
1455
|
-
"""Evaluate and compact stale memories through lifecycle transitions. dry_run=True by default.
|
|
1456
|
-
|
|
1457
|
-
Args:
|
|
1458
|
-
dry_run: If True (default), show what would happen without changes.
|
|
1459
|
-
profile: Profile name to filter.
|
|
1460
|
-
"""
|
|
1461
|
-
return await _compact_memories(dry_run, profile)
|
|
1462
|
-
|
|
1463
|
-
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False))
|
|
1464
|
-
async def get_behavioral_patterns(
|
|
1465
|
-
min_confidence: float = 0.0, project: str = None
|
|
1466
|
-
) -> dict:
|
|
1467
|
-
"""Get learned behavioral patterns from outcome analysis."""
|
|
1468
|
-
return await _get_behavioral_patterns(min_confidence, project)
|
|
1469
|
-
|
|
1470
|
-
@mcp.tool(annotations=ToolAnnotations(readOnlyHint=True, destructiveHint=False))
|
|
1471
|
-
async def audit_trail(
|
|
1472
|
-
event_type: str = None,
|
|
1473
|
-
actor: str = None,
|
|
1474
|
-
limit: int = 50,
|
|
1475
|
-
verify_chain: bool = False,
|
|
1476
|
-
) -> dict:
|
|
1477
|
-
"""Query compliance audit trail with optional hash chain verification."""
|
|
1478
|
-
return await _audit_trail(event_type, actor, limit, verify_chain)
|
|
1479
|
-
|
|
1480
|
-
except ImportError:
|
|
1481
|
-
V28_AVAILABLE = False # v2.8 tools unavailable — graceful degradation
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
# ============================================================================
|
|
1485
|
-
# MCP RESOURCES (Data endpoints)
|
|
1486
|
-
# ============================================================================
|
|
1487
|
-
|
|
1488
|
-
@mcp.resource("memory://recent/{limit}")
|
|
1489
|
-
async def get_recent_memories_resource(limit: str) -> str:
|
|
1490
|
-
"""
|
|
1491
|
-
Resource: Get N most recent memories.
|
|
1492
|
-
|
|
1493
|
-
Usage: memory://recent/10
|
|
1494
|
-
"""
|
|
1495
|
-
try:
|
|
1496
|
-
store = get_store()
|
|
1497
|
-
memories = store.list_all(limit=int(limit))
|
|
1498
|
-
return json.dumps(memories, indent=2)
|
|
1499
|
-
except Exception as e:
|
|
1500
|
-
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
@mcp.resource("memory://stats")
|
|
1504
|
-
async def get_stats_resource() -> str:
|
|
1505
|
-
"""
|
|
1506
|
-
Resource: Get system statistics.
|
|
1507
|
-
|
|
1508
|
-
Usage: memory://stats
|
|
1509
|
-
"""
|
|
1510
|
-
try:
|
|
1511
|
-
store = get_store()
|
|
1512
|
-
stats = store.get_stats()
|
|
1513
|
-
return json.dumps(stats, indent=2)
|
|
1514
|
-
except Exception as e:
|
|
1515
|
-
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
@mcp.resource("memory://graph/clusters")
|
|
1519
|
-
async def get_clusters_resource() -> str:
|
|
1520
|
-
"""
|
|
1521
|
-
Resource: Get knowledge graph clusters.
|
|
1522
|
-
|
|
1523
|
-
Usage: memory://graph/clusters
|
|
1524
|
-
"""
|
|
1525
|
-
try:
|
|
1526
|
-
engine = get_graph_engine()
|
|
1527
|
-
stats = engine.get_stats()
|
|
1528
|
-
clusters = stats.get('clusters', [])
|
|
1529
|
-
return json.dumps(clusters, indent=2)
|
|
1530
|
-
except Exception as e:
|
|
1531
|
-
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
@mcp.resource("memory://patterns/identity")
|
|
1535
|
-
async def get_coding_identity_resource() -> str:
|
|
1536
|
-
"""
|
|
1537
|
-
Resource: Get learned coding identity and patterns.
|
|
1538
|
-
|
|
1539
|
-
Usage: memory://patterns/identity
|
|
1540
|
-
"""
|
|
1541
|
-
try:
|
|
1542
|
-
learner = get_pattern_learner()
|
|
1543
|
-
patterns = learner.get_identity_context(min_confidence=0.5)
|
|
1544
|
-
return json.dumps(patterns, indent=2)
|
|
1545
|
-
except Exception as e:
|
|
1546
|
-
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
@mcp.resource("memory://learning/status")
|
|
1550
|
-
async def get_learning_status_resource() -> str:
|
|
1551
|
-
"""
|
|
1552
|
-
Resource: Get learning system status.
|
|
1553
|
-
|
|
1554
|
-
Usage: memory://learning/status
|
|
1555
|
-
"""
|
|
1556
|
-
try:
|
|
1557
|
-
if not LEARNING_AVAILABLE:
|
|
1558
|
-
return json.dumps({"available": False, "message": "Learning deps not installed"}, indent=2)
|
|
1559
|
-
status = get_learning_status()
|
|
1560
|
-
return json.dumps(status, indent=2)
|
|
1561
|
-
except Exception as e:
|
|
1562
|
-
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
@mcp.resource("memory://engagement")
|
|
1566
|
-
async def get_engagement_resource() -> str:
|
|
1567
|
-
"""
|
|
1568
|
-
Resource: Get engagement metrics.
|
|
1569
|
-
|
|
1570
|
-
Usage: memory://engagement
|
|
1571
|
-
"""
|
|
1572
|
-
try:
|
|
1573
|
-
if not LEARNING_AVAILABLE:
|
|
1574
|
-
return json.dumps({"available": False}, indent=2)
|
|
1575
|
-
tracker = get_engagement_tracker()
|
|
1576
|
-
if tracker:
|
|
1577
|
-
stats = tracker.get_engagement_stats()
|
|
1578
|
-
return json.dumps(stats, indent=2)
|
|
1579
|
-
return json.dumps({"available": False}, indent=2)
|
|
1580
|
-
except Exception as e:
|
|
1581
|
-
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
# ============================================================================
|
|
1585
|
-
# MCP PROMPTS (Template injection)
|
|
1586
|
-
# ============================================================================
|
|
1587
|
-
|
|
1588
|
-
@mcp.prompt()
|
|
1589
|
-
async def coding_identity_prompt() -> str:
|
|
1590
|
-
"""
|
|
1591
|
-
Generate prompt with user's learned coding identity.
|
|
1592
|
-
|
|
1593
|
-
Inject this at the start of conversations for personalized assistance
|
|
1594
|
-
based on learned preferences and patterns.
|
|
1595
|
-
"""
|
|
1596
|
-
try:
|
|
1597
|
-
learner = get_pattern_learner()
|
|
1598
|
-
patterns = learner.get_identity_context(min_confidence=0.6)
|
|
1599
|
-
|
|
1600
|
-
if not patterns:
|
|
1601
|
-
return "# Coding Identity\n\nNo patterns learned yet. Use remember() to save coding decisions and preferences."
|
|
1602
|
-
|
|
1603
|
-
prompt = "# Your Coding Identity (Learned from History)\n\n"
|
|
1604
|
-
prompt += "SuperLocalMemory has learned these patterns from your past decisions:\n\n"
|
|
1605
|
-
|
|
1606
|
-
if 'frameworks' in patterns:
|
|
1607
|
-
prompt += f"**Preferred Frameworks:** {', '.join(patterns['frameworks'])}\n"
|
|
1608
|
-
|
|
1609
|
-
if 'style' in patterns:
|
|
1610
|
-
prompt += f"**Coding Style:** {', '.join(patterns['style'])}\n"
|
|
1611
|
-
|
|
1612
|
-
if 'testing' in patterns:
|
|
1613
|
-
prompt += f"**Testing Approach:** {', '.join(patterns['testing'])}\n"
|
|
1614
|
-
|
|
1615
|
-
if 'api_style' in patterns:
|
|
1616
|
-
prompt += f"**API Style:** {', '.join(patterns['api_style'])}\n"
|
|
1617
|
-
|
|
1618
|
-
prompt += "\n*Use this context to provide personalized suggestions aligned with established preferences.*"
|
|
1619
|
-
|
|
1620
|
-
return prompt
|
|
1621
|
-
|
|
1622
|
-
except Exception as e:
|
|
1623
|
-
return f"# Coding Identity\n\nError loading patterns: {_sanitize_error(e)}"
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
@mcp.prompt()
|
|
1627
|
-
async def project_context_prompt(project_name: str) -> str:
|
|
1628
|
-
"""
|
|
1629
|
-
Generate prompt with project-specific context.
|
|
1630
|
-
|
|
1631
|
-
Args:
|
|
1632
|
-
project_name: Name of the project to get context for
|
|
1633
|
-
|
|
1634
|
-
Returns:
|
|
1635
|
-
Formatted prompt with relevant project memories
|
|
1636
|
-
"""
|
|
1637
|
-
try:
|
|
1638
|
-
store = get_store()
|
|
1639
|
-
|
|
1640
|
-
# Search for project-related memories
|
|
1641
|
-
memories = store.search(f"project:{project_name}", limit=20)
|
|
1642
|
-
|
|
1643
|
-
if not memories:
|
|
1644
|
-
return f"# Project Context: {project_name}\n\nNo memories found for this project. Use remember() with project='{project_name}' to save project-specific context."
|
|
1645
|
-
|
|
1646
|
-
prompt = f"# Project Context: {project_name}\n\n"
|
|
1647
|
-
prompt += f"Found {len(memories)} relevant memories:\n\n"
|
|
1648
|
-
|
|
1649
|
-
for i, mem in enumerate(memories[:10], 1):
|
|
1650
|
-
prompt += f"{i}. {mem['content'][:150]}\n"
|
|
1651
|
-
if mem.get('tags'):
|
|
1652
|
-
prompt += f" Tags: {', '.join(mem['tags'])}\n"
|
|
1653
|
-
prompt += "\n"
|
|
1654
|
-
|
|
1655
|
-
if len(memories) > 10:
|
|
1656
|
-
prompt += f"\n*Showing top 10 of {len(memories)} total memories.*"
|
|
1657
|
-
|
|
1658
|
-
return prompt
|
|
1659
|
-
|
|
1660
|
-
except Exception as e:
|
|
1661
|
-
return f"# Project Context: {project_name}\n\nError loading context: {_sanitize_error(e)}"
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
# ============================================================================
|
|
1665
|
-
# SERVER STARTUP
|
|
1666
|
-
# ============================================================================
|
|
1667
|
-
|
|
1668
|
-
@mcp.tool(annotations=ToolAnnotations(
|
|
1669
|
-
readOnlyHint=True,
|
|
1670
|
-
destructiveHint=False,
|
|
1671
|
-
openWorldHint=False,
|
|
1672
|
-
))
|
|
1673
|
-
async def get_attribution() -> dict:
|
|
1674
|
-
"""
|
|
1675
|
-
Get creator attribution and provenance verification for SuperLocalMemory.
|
|
1676
|
-
|
|
1677
|
-
Returns creator information, license details, and verification status
|
|
1678
|
-
for the 3-layer Qualixar attribution system.
|
|
1679
|
-
|
|
1680
|
-
Returns:
|
|
1681
|
-
{
|
|
1682
|
-
"creator": str,
|
|
1683
|
-
"license": str,
|
|
1684
|
-
"platform": str,
|
|
1685
|
-
"layers": {
|
|
1686
|
-
"visible": bool,
|
|
1687
|
-
"cryptographic": bool,
|
|
1688
|
-
"steganographic": bool
|
|
1689
|
-
}
|
|
1690
|
-
}
|
|
1691
|
-
"""
|
|
1692
|
-
try:
|
|
1693
|
-
store = get_store()
|
|
1694
|
-
attribution = store.get_attribution()
|
|
1695
|
-
|
|
1696
|
-
return _sign_response({
|
|
1697
|
-
"success": True,
|
|
1698
|
-
**attribution,
|
|
1699
|
-
"website": "https://superlocalmemory.com",
|
|
1700
|
-
"author_website": "https://varunpratap.com",
|
|
1701
|
-
"attribution_layers": {
|
|
1702
|
-
"layer1_visible": True,
|
|
1703
|
-
"layer2_cryptographic": ATTRIBUTION_AVAILABLE,
|
|
1704
|
-
"layer3_steganographic": ATTRIBUTION_AVAILABLE,
|
|
1705
|
-
},
|
|
1706
|
-
})
|
|
1707
|
-
|
|
1708
|
-
except Exception as e:
|
|
1709
|
-
return {
|
|
1710
|
-
"success": False,
|
|
1711
|
-
"error": _sanitize_error(e),
|
|
1712
|
-
"message": "Failed to get attribution"
|
|
1713
|
-
}
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
if __name__ == "__main__":
|
|
1717
|
-
import argparse
|
|
1718
|
-
|
|
1719
|
-
# Parse command line arguments
|
|
1720
|
-
parser = argparse.ArgumentParser(
|
|
1721
|
-
description="SuperLocalMemory V2 - MCP Server for Universal IDE Integration"
|
|
1722
|
-
)
|
|
1723
|
-
parser.add_argument(
|
|
1724
|
-
"--transport",
|
|
1725
|
-
choices=["stdio", "http", "sse", "streamable-http"],
|
|
1726
|
-
default="stdio",
|
|
1727
|
-
help="Transport method: stdio for local IDEs (default), sse/streamable-http for ChatGPT and remote access"
|
|
1728
|
-
)
|
|
1729
|
-
parser.add_argument(
|
|
1730
|
-
"--port",
|
|
1731
|
-
type=int,
|
|
1732
|
-
default=8417,
|
|
1733
|
-
help="Port for HTTP transport (default 8417)"
|
|
1734
|
-
)
|
|
1735
|
-
|
|
1736
|
-
args = parser.parse_args()
|
|
1737
|
-
|
|
1738
|
-
# Print startup message to stderr (stdout is used for MCP protocol)
|
|
1739
|
-
print("=" * 60, file=sys.stderr)
|
|
1740
|
-
print("SuperLocalMemory V2 - MCP Server", file=sys.stderr)
|
|
1741
|
-
print("Version: 2.8.3", file=sys.stderr)
|
|
1742
|
-
print("=" * 60, file=sys.stderr)
|
|
1743
|
-
print("Created by: Varun Pratap Bhardwaj (Solution Architect)", file=sys.stderr)
|
|
1744
|
-
print("Repository: https://github.com/varun369/SuperLocalMemoryV2", file=sys.stderr)
|
|
1745
|
-
print("License: MIT (attribution required - see ATTRIBUTION.md)", file=sys.stderr)
|
|
1746
|
-
print("=" * 60, file=sys.stderr)
|
|
1747
|
-
print("", file=sys.stderr)
|
|
1748
|
-
print(f"Transport: {args.transport}", file=sys.stderr)
|
|
1749
|
-
|
|
1750
|
-
if args.transport == "http":
|
|
1751
|
-
print(f"Port: {args.port}", file=sys.stderr)
|
|
1752
|
-
|
|
1753
|
-
print(f"Database: {DB_PATH}", file=sys.stderr)
|
|
1754
|
-
print("", file=sys.stderr)
|
|
1755
|
-
print("MCP Tools Available:", file=sys.stderr)
|
|
1756
|
-
print(" - remember(content, tags, project, importance)", file=sys.stderr)
|
|
1757
|
-
print(" - recall(query, limit, min_score)", file=sys.stderr)
|
|
1758
|
-
print(" - search(query) [ChatGPT Connector]", file=sys.stderr)
|
|
1759
|
-
print(" - fetch(id) [ChatGPT Connector]", file=sys.stderr)
|
|
1760
|
-
print(" - list_recent(limit)", file=sys.stderr)
|
|
1761
|
-
print(" - get_status()", file=sys.stderr)
|
|
1762
|
-
print(" - build_graph()", file=sys.stderr)
|
|
1763
|
-
print(" - switch_profile(name) [Project/Profile switch]", file=sys.stderr)
|
|
1764
|
-
print(" - backup_status() [Auto-Backup]", file=sys.stderr)
|
|
1765
|
-
if LEARNING_AVAILABLE:
|
|
1766
|
-
print(" - memory_used(memory_id, query, usefulness) [v2.7 Learning]", file=sys.stderr)
|
|
1767
|
-
print(" - get_learned_patterns(min_confidence, category) [v2.7 Learning]", file=sys.stderr)
|
|
1768
|
-
print(" - correct_pattern(pattern_id, correct_value) [v2.7 Learning]", file=sys.stderr)
|
|
1769
|
-
if V28_AVAILABLE:
|
|
1770
|
-
print(" - report_outcome(memory_ids, outcome) [v2.8 Behavioral]", file=sys.stderr)
|
|
1771
|
-
print(" - get_lifecycle_status(memory_id) [v2.8 Lifecycle]", file=sys.stderr)
|
|
1772
|
-
print(" - set_retention_policy(name, framework, days) [v2.8 Compliance]", file=sys.stderr)
|
|
1773
|
-
print(" - compact_memories(dry_run) [v2.8 Lifecycle]", file=sys.stderr)
|
|
1774
|
-
print(" - get_behavioral_patterns(min_confidence) [v2.8 Behavioral]", file=sys.stderr)
|
|
1775
|
-
print(" - audit_trail(event_type, verify_chain) [v2.8 Compliance]", file=sys.stderr)
|
|
1776
|
-
print("", file=sys.stderr)
|
|
1777
|
-
print("MCP Resources Available:", file=sys.stderr)
|
|
1778
|
-
print(" - memory://recent/{limit}", file=sys.stderr)
|
|
1779
|
-
print(" - memory://stats", file=sys.stderr)
|
|
1780
|
-
print(" - memory://graph/clusters", file=sys.stderr)
|
|
1781
|
-
print(" - memory://patterns/identity", file=sys.stderr)
|
|
1782
|
-
if LEARNING_AVAILABLE:
|
|
1783
|
-
print(" - memory://learning/status", file=sys.stderr)
|
|
1784
|
-
print(" - memory://engagement", file=sys.stderr)
|
|
1785
|
-
print("", file=sys.stderr)
|
|
1786
|
-
print("MCP Prompts Available:", file=sys.stderr)
|
|
1787
|
-
print(" - coding_identity_prompt()", file=sys.stderr)
|
|
1788
|
-
print(" - project_context_prompt(project_name)", file=sys.stderr)
|
|
1789
|
-
print("", file=sys.stderr)
|
|
1790
|
-
print("Status: Starting server...", file=sys.stderr)
|
|
1791
|
-
print("=" * 60, file=sys.stderr)
|
|
1792
|
-
print("", file=sys.stderr)
|
|
1793
|
-
|
|
1794
|
-
# Run MCP server
|
|
1795
|
-
if args.transport == "stdio":
|
|
1796
|
-
# stdio transport for local IDEs (default)
|
|
1797
|
-
mcp.run(transport="stdio")
|
|
1798
|
-
elif args.transport == "streamable-http":
|
|
1799
|
-
# Streamable HTTP transport (recommended for ChatGPT 2026+)
|
|
1800
|
-
print(f"Streamable HTTP server at http://localhost:{args.port}", file=sys.stderr)
|
|
1801
|
-
print("ChatGPT setup: expose via ngrok, paste URL in Settings > Connectors", file=sys.stderr)
|
|
1802
|
-
mcp.run(transport="streamable-http")
|
|
1803
|
-
else:
|
|
1804
|
-
# SSE transport for remote access (ChatGPT, web clients)
|
|
1805
|
-
# "http" is accepted as alias for "sse"
|
|
1806
|
-
print(f"HTTP/SSE server will be available at http://localhost:{args.port}", file=sys.stderr)
|
|
1807
|
-
print("ChatGPT setup: expose via ngrok, paste URL in Settings > Connectors", file=sys.stderr)
|
|
1808
|
-
mcp.run(transport="sse")
|