superlocalmemory 2.8.6 → 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +9 -1
- package/NOTICE +63 -0
- package/README.md +165 -480
- package/bin/slm +17 -449
- package/bin/slm-npm +62 -48
- package/conftest.py +5 -0
- package/docs/api-reference.md +284 -0
- package/docs/architecture.md +149 -0
- package/docs/auto-memory.md +150 -0
- package/docs/cli-reference.md +276 -0
- package/docs/compliance.md +191 -0
- package/docs/configuration.md +182 -0
- package/docs/getting-started.md +102 -0
- package/docs/ide-setup.md +261 -0
- package/docs/mcp-tools.md +220 -0
- package/docs/migration-from-v2.md +170 -0
- package/docs/profiles.md +173 -0
- package/docs/troubleshooting.md +310 -0
- package/{configs → ide/configs}/antigravity-mcp.json +3 -3
- package/ide/configs/chatgpt-desktop-mcp.json +16 -0
- package/{configs → ide/configs}/claude-desktop-mcp.json +3 -3
- package/{configs → ide/configs}/codex-mcp.toml +4 -4
- package/{configs → ide/configs}/continue-mcp.yaml +4 -3
- package/{configs → ide/configs}/continue-skills.yaml +6 -6
- package/ide/configs/cursor-mcp.json +15 -0
- package/{configs → ide/configs}/gemini-cli-mcp.json +2 -2
- package/{configs → ide/configs}/jetbrains-mcp.json +2 -2
- package/{configs → ide/configs}/opencode-mcp.json +2 -2
- package/{configs → ide/configs}/perplexity-mcp.json +2 -2
- package/{configs → ide/configs}/vscode-copilot-mcp.json +2 -2
- package/{configs → ide/configs}/windsurf-mcp.json +3 -3
- package/{configs → ide/configs}/zed-mcp.json +2 -2
- package/{hooks → ide/hooks}/context-hook.js +9 -20
- package/ide/hooks/memory-list-skill.js +70 -0
- package/ide/hooks/memory-profile-skill.js +101 -0
- package/ide/hooks/memory-recall-skill.js +62 -0
- package/ide/hooks/memory-remember-skill.js +68 -0
- package/ide/hooks/memory-reset-skill.js +160 -0
- package/{hooks → ide/hooks}/post-recall-hook.js +2 -2
- package/ide/integrations/langchain/README.md +106 -0
- package/ide/integrations/langchain/langchain_superlocalmemory/__init__.py +9 -0
- package/ide/integrations/langchain/langchain_superlocalmemory/chat_message_history.py +201 -0
- package/ide/integrations/langchain/pyproject.toml +38 -0
- package/{src/learning → ide/integrations/langchain}/tests/__init__.py +1 -0
- package/ide/integrations/langchain/tests/test_chat_message_history.py +215 -0
- package/ide/integrations/langchain/tests/test_security.py +117 -0
- package/ide/integrations/llamaindex/README.md +81 -0
- package/ide/integrations/llamaindex/llama_index/storage/chat_store/superlocalmemory/__init__.py +9 -0
- package/ide/integrations/llamaindex/llama_index/storage/chat_store/superlocalmemory/base.py +316 -0
- package/ide/integrations/llamaindex/pyproject.toml +43 -0
- package/{src/lifecycle → ide/integrations/llamaindex}/tests/__init__.py +1 -2
- package/ide/integrations/llamaindex/tests/test_chat_store.py +294 -0
- package/ide/integrations/llamaindex/tests/test_security.py +241 -0
- package/{skills → ide/skills}/slm-build-graph/SKILL.md +6 -6
- package/{skills → ide/skills}/slm-list-recent/SKILL.md +5 -5
- package/{skills → ide/skills}/slm-recall/SKILL.md +5 -5
- package/{skills → ide/skills}/slm-remember/SKILL.md +6 -6
- package/{skills → ide/skills}/slm-show-patterns/SKILL.md +7 -7
- package/{skills → ide/skills}/slm-status/SKILL.md +9 -9
- package/{skills → ide/skills}/slm-switch-profile/SKILL.md +9 -9
- package/package.json +13 -22
- package/pyproject.toml +85 -0
- package/scripts/build-dmg.sh +417 -0
- package/scripts/install-skills.ps1 +334 -0
- package/scripts/postinstall.js +2 -2
- package/scripts/start-dashboard.ps1 +52 -0
- package/scripts/start-dashboard.sh +41 -0
- package/scripts/sync-wiki.ps1 +127 -0
- package/scripts/sync-wiki.sh +82 -0
- package/scripts/test-dmg.sh +161 -0
- package/scripts/test-npm-package.ps1 +252 -0
- package/scripts/test-npm-package.sh +207 -0
- package/scripts/verify-install.ps1 +294 -0
- package/scripts/verify-install.sh +266 -0
- package/src/superlocalmemory/__init__.py +0 -0
- package/src/superlocalmemory/attribution/__init__.py +9 -0
- package/src/superlocalmemory/attribution/mathematical_dna.py +235 -0
- package/src/superlocalmemory/attribution/signer.py +153 -0
- package/src/superlocalmemory/attribution/watermark.py +189 -0
- package/src/superlocalmemory/cli/__init__.py +5 -0
- package/src/superlocalmemory/cli/commands.py +245 -0
- package/src/superlocalmemory/cli/main.py +89 -0
- package/src/superlocalmemory/cli/migrate_cmd.py +55 -0
- package/src/superlocalmemory/cli/post_install.py +99 -0
- package/src/superlocalmemory/cli/setup_wizard.py +129 -0
- package/src/superlocalmemory/compliance/__init__.py +0 -0
- package/src/superlocalmemory/compliance/abac.py +204 -0
- package/src/superlocalmemory/compliance/audit.py +314 -0
- package/src/superlocalmemory/compliance/eu_ai_act.py +131 -0
- package/src/superlocalmemory/compliance/gdpr.py +294 -0
- package/src/superlocalmemory/compliance/lifecycle.py +158 -0
- package/src/superlocalmemory/compliance/retention.py +232 -0
- package/src/superlocalmemory/compliance/scheduler.py +148 -0
- package/src/superlocalmemory/core/__init__.py +0 -0
- package/src/superlocalmemory/core/config.py +391 -0
- package/src/superlocalmemory/core/embeddings.py +293 -0
- package/src/superlocalmemory/core/engine.py +701 -0
- package/src/superlocalmemory/core/hooks.py +65 -0
- package/src/superlocalmemory/core/maintenance.py +172 -0
- package/src/superlocalmemory/core/modes.py +140 -0
- package/src/superlocalmemory/core/profiles.py +234 -0
- package/src/superlocalmemory/core/registry.py +117 -0
- package/src/superlocalmemory/dynamics/__init__.py +0 -0
- package/src/superlocalmemory/dynamics/fisher_langevin_coupling.py +223 -0
- package/src/superlocalmemory/encoding/__init__.py +0 -0
- package/src/superlocalmemory/encoding/consolidator.py +485 -0
- package/src/superlocalmemory/encoding/emotional.py +125 -0
- package/src/superlocalmemory/encoding/entity_resolver.py +525 -0
- package/src/superlocalmemory/encoding/entropy_gate.py +104 -0
- package/src/superlocalmemory/encoding/fact_extractor.py +775 -0
- package/src/superlocalmemory/encoding/foresight.py +91 -0
- package/src/superlocalmemory/encoding/graph_builder.py +302 -0
- package/src/superlocalmemory/encoding/observation_builder.py +160 -0
- package/src/superlocalmemory/encoding/scene_builder.py +183 -0
- package/src/superlocalmemory/encoding/signal_inference.py +90 -0
- package/src/superlocalmemory/encoding/temporal_parser.py +426 -0
- package/src/superlocalmemory/encoding/type_router.py +235 -0
- package/src/superlocalmemory/hooks/__init__.py +3 -0
- package/src/superlocalmemory/hooks/auto_capture.py +111 -0
- package/src/superlocalmemory/hooks/auto_recall.py +93 -0
- package/src/superlocalmemory/hooks/ide_connector.py +204 -0
- package/src/superlocalmemory/hooks/rules_engine.py +99 -0
- package/src/superlocalmemory/infra/__init__.py +3 -0
- package/src/superlocalmemory/infra/auth_middleware.py +82 -0
- package/src/superlocalmemory/infra/backup.py +317 -0
- package/src/superlocalmemory/infra/cache_manager.py +267 -0
- package/src/superlocalmemory/infra/event_bus.py +381 -0
- package/src/superlocalmemory/infra/rate_limiter.py +135 -0
- package/src/{webhook_dispatcher.py → superlocalmemory/infra/webhook_dispatcher.py} +104 -101
- package/src/superlocalmemory/learning/__init__.py +0 -0
- package/src/superlocalmemory/learning/adaptive.py +172 -0
- package/src/superlocalmemory/learning/behavioral.py +490 -0
- package/src/superlocalmemory/learning/behavioral_listener.py +94 -0
- package/src/superlocalmemory/learning/bootstrap.py +298 -0
- package/src/superlocalmemory/learning/cross_project.py +399 -0
- package/src/superlocalmemory/learning/database.py +376 -0
- package/src/superlocalmemory/learning/engagement.py +323 -0
- package/src/superlocalmemory/learning/features.py +138 -0
- package/src/superlocalmemory/learning/feedback.py +316 -0
- package/src/superlocalmemory/learning/outcomes.py +255 -0
- package/src/superlocalmemory/learning/project_context.py +366 -0
- package/src/superlocalmemory/learning/ranker.py +155 -0
- package/src/superlocalmemory/learning/source_quality.py +303 -0
- package/src/superlocalmemory/learning/workflows.py +309 -0
- package/src/superlocalmemory/llm/__init__.py +0 -0
- package/src/superlocalmemory/llm/backbone.py +316 -0
- package/src/superlocalmemory/math/__init__.py +0 -0
- package/src/superlocalmemory/math/fisher.py +356 -0
- package/src/superlocalmemory/math/langevin.py +398 -0
- package/src/superlocalmemory/math/sheaf.py +257 -0
- package/src/superlocalmemory/mcp/__init__.py +0 -0
- package/src/superlocalmemory/mcp/resources.py +245 -0
- package/src/superlocalmemory/mcp/server.py +61 -0
- package/src/superlocalmemory/mcp/tools.py +18 -0
- package/src/superlocalmemory/mcp/tools_core.py +305 -0
- package/src/superlocalmemory/mcp/tools_v28.py +223 -0
- package/src/superlocalmemory/mcp/tools_v3.py +286 -0
- package/src/superlocalmemory/retrieval/__init__.py +0 -0
- package/src/superlocalmemory/retrieval/agentic.py +295 -0
- package/src/superlocalmemory/retrieval/ann_index.py +223 -0
- package/src/superlocalmemory/retrieval/bm25_channel.py +185 -0
- package/src/superlocalmemory/retrieval/bridge_discovery.py +170 -0
- package/src/superlocalmemory/retrieval/engine.py +390 -0
- package/src/superlocalmemory/retrieval/entity_channel.py +179 -0
- package/src/superlocalmemory/retrieval/fusion.py +78 -0
- package/src/superlocalmemory/retrieval/profile_channel.py +105 -0
- package/src/superlocalmemory/retrieval/reranker.py +154 -0
- package/src/superlocalmemory/retrieval/semantic_channel.py +232 -0
- package/src/superlocalmemory/retrieval/strategy.py +96 -0
- package/src/superlocalmemory/retrieval/temporal_channel.py +175 -0
- package/src/superlocalmemory/server/__init__.py +1 -0
- package/src/superlocalmemory/server/api.py +248 -0
- package/src/superlocalmemory/server/routes/__init__.py +4 -0
- package/src/superlocalmemory/server/routes/agents.py +107 -0
- package/src/superlocalmemory/server/routes/backup.py +91 -0
- package/src/superlocalmemory/server/routes/behavioral.py +127 -0
- package/src/superlocalmemory/server/routes/compliance.py +160 -0
- package/src/superlocalmemory/server/routes/data_io.py +188 -0
- package/src/superlocalmemory/server/routes/events.py +183 -0
- package/src/superlocalmemory/server/routes/helpers.py +85 -0
- package/src/superlocalmemory/server/routes/learning.py +273 -0
- package/src/superlocalmemory/server/routes/lifecycle.py +116 -0
- package/src/superlocalmemory/server/routes/memories.py +399 -0
- package/src/superlocalmemory/server/routes/profiles.py +219 -0
- package/src/superlocalmemory/server/routes/stats.py +346 -0
- package/src/superlocalmemory/server/routes/v3_api.py +365 -0
- package/src/superlocalmemory/server/routes/ws.py +82 -0
- package/src/superlocalmemory/server/security_middleware.py +57 -0
- package/src/superlocalmemory/server/ui.py +245 -0
- package/src/superlocalmemory/storage/__init__.py +0 -0
- package/src/superlocalmemory/storage/access_control.py +182 -0
- package/src/superlocalmemory/storage/database.py +594 -0
- package/src/superlocalmemory/storage/migrations.py +303 -0
- package/src/superlocalmemory/storage/models.py +406 -0
- package/src/superlocalmemory/storage/schema.py +726 -0
- package/src/superlocalmemory/storage/v2_migrator.py +317 -0
- package/src/superlocalmemory/trust/__init__.py +0 -0
- package/src/superlocalmemory/trust/gate.py +130 -0
- package/src/superlocalmemory/trust/provenance.py +124 -0
- package/src/superlocalmemory/trust/scorer.py +347 -0
- package/src/superlocalmemory/trust/signals.py +153 -0
- package/ui/index.html +278 -5
- package/ui/js/auto-settings.js +70 -0
- package/ui/js/dashboard.js +90 -0
- package/ui/js/fact-detail.js +92 -0
- package/ui/js/feedback.js +2 -2
- package/ui/js/ide-status.js +102 -0
- package/ui/js/math-health.js +98 -0
- package/ui/js/recall-lab.js +127 -0
- package/ui/js/settings.js +2 -2
- package/ui/js/trust-dashboard.js +73 -0
- package/api_server.py +0 -724
- package/bin/aider-smart +0 -72
- package/bin/superlocalmemoryv2-learning +0 -4
- package/bin/superlocalmemoryv2-list +0 -3
- package/bin/superlocalmemoryv2-patterns +0 -4
- package/bin/superlocalmemoryv2-profile +0 -3
- package/bin/superlocalmemoryv2-recall +0 -3
- package/bin/superlocalmemoryv2-remember +0 -3
- package/bin/superlocalmemoryv2-reset +0 -3
- package/bin/superlocalmemoryv2-status +0 -3
- package/configs/chatgpt-desktop-mcp.json +0 -16
- package/configs/cursor-mcp.json +0 -15
- package/hooks/memory-list-skill.js +0 -139
- package/hooks/memory-profile-skill.js +0 -273
- package/hooks/memory-recall-skill.js +0 -114
- package/hooks/memory-remember-skill.js +0 -127
- package/hooks/memory-reset-skill.js +0 -274
- package/mcp_server.py +0 -1808
- package/requirements-core.txt +0 -22
- package/requirements-learning.txt +0 -12
- package/requirements.txt +0 -12
- package/src/agent_registry.py +0 -411
- package/src/auth_middleware.py +0 -61
- package/src/auto_backup.py +0 -459
- package/src/behavioral/__init__.py +0 -49
- package/src/behavioral/behavioral_listener.py +0 -203
- package/src/behavioral/behavioral_patterns.py +0 -275
- package/src/behavioral/cross_project_transfer.py +0 -206
- package/src/behavioral/outcome_inference.py +0 -194
- package/src/behavioral/outcome_tracker.py +0 -193
- package/src/behavioral/tests/__init__.py +0 -4
- package/src/behavioral/tests/test_behavioral_integration.py +0 -108
- package/src/behavioral/tests/test_behavioral_patterns.py +0 -150
- package/src/behavioral/tests/test_cross_project_transfer.py +0 -142
- package/src/behavioral/tests/test_mcp_behavioral.py +0 -139
- package/src/behavioral/tests/test_mcp_report_outcome.py +0 -117
- package/src/behavioral/tests/test_outcome_inference.py +0 -107
- package/src/behavioral/tests/test_outcome_tracker.py +0 -96
- package/src/cache_manager.py +0 -518
- package/src/compliance/__init__.py +0 -48
- package/src/compliance/abac_engine.py +0 -149
- package/src/compliance/abac_middleware.py +0 -116
- package/src/compliance/audit_db.py +0 -215
- package/src/compliance/audit_logger.py +0 -148
- package/src/compliance/retention_manager.py +0 -289
- package/src/compliance/retention_scheduler.py +0 -186
- package/src/compliance/tests/__init__.py +0 -4
- package/src/compliance/tests/test_abac_enforcement.py +0 -95
- package/src/compliance/tests/test_abac_engine.py +0 -124
- package/src/compliance/tests/test_abac_mcp_integration.py +0 -118
- package/src/compliance/tests/test_audit_db.py +0 -123
- package/src/compliance/tests/test_audit_logger.py +0 -98
- package/src/compliance/tests/test_mcp_audit.py +0 -128
- package/src/compliance/tests/test_mcp_retention_policy.py +0 -125
- package/src/compliance/tests/test_retention_manager.py +0 -131
- package/src/compliance/tests/test_retention_scheduler.py +0 -99
- package/src/compression/__init__.py +0 -25
- package/src/compression/cli.py +0 -150
- package/src/compression/cold_storage.py +0 -217
- package/src/compression/config.py +0 -72
- package/src/compression/orchestrator.py +0 -133
- package/src/compression/tier2_compressor.py +0 -228
- package/src/compression/tier3_compressor.py +0 -153
- package/src/compression/tier_classifier.py +0 -148
- package/src/db_connection_manager.py +0 -536
- package/src/embedding_engine.py +0 -63
- package/src/embeddings/__init__.py +0 -47
- package/src/embeddings/cache.py +0 -70
- package/src/embeddings/cli.py +0 -113
- package/src/embeddings/constants.py +0 -47
- package/src/embeddings/database.py +0 -91
- package/src/embeddings/engine.py +0 -247
- package/src/embeddings/model_loader.py +0 -145
- package/src/event_bus.py +0 -562
- package/src/graph/__init__.py +0 -36
- package/src/graph/build_helpers.py +0 -74
- package/src/graph/cli.py +0 -87
- package/src/graph/cluster_builder.py +0 -188
- package/src/graph/cluster_summary.py +0 -148
- package/src/graph/constants.py +0 -47
- package/src/graph/edge_builder.py +0 -162
- package/src/graph/entity_extractor.py +0 -95
- package/src/graph/graph_core.py +0 -226
- package/src/graph/graph_search.py +0 -231
- package/src/graph/hierarchical.py +0 -207
- package/src/graph/schema.py +0 -99
- package/src/graph_engine.py +0 -52
- package/src/hnsw_index.py +0 -628
- package/src/hybrid_search.py +0 -46
- package/src/learning/__init__.py +0 -217
- package/src/learning/adaptive_ranker.py +0 -682
- package/src/learning/bootstrap/__init__.py +0 -69
- package/src/learning/bootstrap/constants.py +0 -93
- package/src/learning/bootstrap/db_queries.py +0 -316
- package/src/learning/bootstrap/sampling.py +0 -82
- package/src/learning/bootstrap/text_utils.py +0 -71
- package/src/learning/cross_project_aggregator.py +0 -857
- package/src/learning/db/__init__.py +0 -40
- package/src/learning/db/constants.py +0 -44
- package/src/learning/db/schema.py +0 -279
- package/src/learning/engagement_tracker.py +0 -628
- package/src/learning/feature_extractor.py +0 -708
- package/src/learning/feedback_collector.py +0 -806
- package/src/learning/learning_db.py +0 -915
- package/src/learning/project_context_manager.py +0 -572
- package/src/learning/ranking/__init__.py +0 -33
- package/src/learning/ranking/constants.py +0 -84
- package/src/learning/ranking/helpers.py +0 -278
- package/src/learning/source_quality_scorer.py +0 -676
- package/src/learning/synthetic_bootstrap.py +0 -755
- package/src/learning/tests/test_adaptive_ranker.py +0 -325
- package/src/learning/tests/test_adaptive_ranker_v28.py +0 -60
- package/src/learning/tests/test_aggregator.py +0 -306
- package/src/learning/tests/test_auto_retrain_v28.py +0 -35
- package/src/learning/tests/test_e2e_ranking_v28.py +0 -82
- package/src/learning/tests/test_feature_extractor_v28.py +0 -93
- package/src/learning/tests/test_feedback_collector.py +0 -294
- package/src/learning/tests/test_learning_db.py +0 -602
- package/src/learning/tests/test_learning_db_v28.py +0 -110
- package/src/learning/tests/test_learning_init_v28.py +0 -48
- package/src/learning/tests/test_outcome_signals.py +0 -48
- package/src/learning/tests/test_project_context.py +0 -292
- package/src/learning/tests/test_schema_migration.py +0 -319
- package/src/learning/tests/test_signal_inference.py +0 -397
- package/src/learning/tests/test_source_quality.py +0 -351
- package/src/learning/tests/test_synthetic_bootstrap.py +0 -429
- package/src/learning/tests/test_workflow_miner.py +0 -318
- package/src/learning/workflow_pattern_miner.py +0 -655
- package/src/lifecycle/__init__.py +0 -54
- package/src/lifecycle/bounded_growth.py +0 -239
- package/src/lifecycle/compaction_engine.py +0 -226
- package/src/lifecycle/lifecycle_engine.py +0 -355
- package/src/lifecycle/lifecycle_evaluator.py +0 -257
- package/src/lifecycle/lifecycle_scheduler.py +0 -130
- package/src/lifecycle/retention_policy.py +0 -285
- package/src/lifecycle/tests/test_bounded_growth.py +0 -193
- package/src/lifecycle/tests/test_compaction.py +0 -179
- package/src/lifecycle/tests/test_lifecycle_engine.py +0 -137
- package/src/lifecycle/tests/test_lifecycle_evaluation.py +0 -177
- package/src/lifecycle/tests/test_lifecycle_scheduler.py +0 -127
- package/src/lifecycle/tests/test_lifecycle_search.py +0 -109
- package/src/lifecycle/tests/test_mcp_compact.py +0 -149
- package/src/lifecycle/tests/test_mcp_lifecycle_status.py +0 -114
- package/src/lifecycle/tests/test_retention_policy.py +0 -162
- package/src/mcp_tools_v28.py +0 -281
- package/src/memory/__init__.py +0 -36
- package/src/memory/cli.py +0 -205
- package/src/memory/constants.py +0 -39
- package/src/memory/helpers.py +0 -28
- package/src/memory/schema.py +0 -166
- package/src/memory-profiles.py +0 -595
- package/src/memory-reset.py +0 -491
- package/src/memory_compression.py +0 -989
- package/src/memory_store_v2.py +0 -1155
- package/src/migrate_v1_to_v2.py +0 -629
- package/src/pattern_learner.py +0 -34
- package/src/patterns/__init__.py +0 -24
- package/src/patterns/analyzers.py +0 -251
- package/src/patterns/learner.py +0 -271
- package/src/patterns/scoring.py +0 -171
- package/src/patterns/store.py +0 -225
- package/src/patterns/terminology.py +0 -140
- package/src/provenance_tracker.py +0 -312
- package/src/qualixar_attribution.py +0 -139
- package/src/qualixar_watermark.py +0 -78
- package/src/query_optimizer.py +0 -511
- package/src/rate_limiter.py +0 -83
- package/src/search/__init__.py +0 -20
- package/src/search/cli.py +0 -77
- package/src/search/constants.py +0 -26
- package/src/search/engine.py +0 -241
- package/src/search/fusion.py +0 -122
- package/src/search/index_loader.py +0 -114
- package/src/search/methods.py +0 -162
- package/src/search_engine_v2.py +0 -401
- package/src/setup_validator.py +0 -482
- package/src/subscription_manager.py +0 -391
- package/src/tree/__init__.py +0 -59
- package/src/tree/builder.py +0 -185
- package/src/tree/nodes.py +0 -202
- package/src/tree/queries.py +0 -257
- package/src/tree/schema.py +0 -80
- package/src/tree_manager.py +0 -19
- package/src/trust/__init__.py +0 -45
- package/src/trust/constants.py +0 -66
- package/src/trust/queries.py +0 -157
- package/src/trust/schema.py +0 -95
- package/src/trust/scorer.py +0 -299
- package/src/trust/signals.py +0 -95
- package/src/trust_scorer.py +0 -44
- package/ui/app.js +0 -1588
- package/ui/js/graph-cytoscape-monolithic-backup.js +0 -1168
- package/ui/js/graph-cytoscape.js +0 -1168
- package/ui/js/graph-d3-backup.js +0 -32
- package/ui/js/graph.js +0 -32
- package/ui_server.py +0 -286
- /package/docs/{ACCESSIBILITY.md → v2-archive/ACCESSIBILITY.md} +0 -0
- /package/docs/{ARCHITECTURE.md → v2-archive/ARCHITECTURE.md} +0 -0
- /package/docs/{CLI-COMMANDS-REFERENCE.md → v2-archive/CLI-COMMANDS-REFERENCE.md} +0 -0
- /package/docs/{COMPRESSION-README.md → v2-archive/COMPRESSION-README.md} +0 -0
- /package/docs/{FRAMEWORK-INTEGRATIONS.md → v2-archive/FRAMEWORK-INTEGRATIONS.md} +0 -0
- /package/docs/{MCP-MANUAL-SETUP.md → v2-archive/MCP-MANUAL-SETUP.md} +0 -0
- /package/docs/{MCP-TROUBLESHOOTING.md → v2-archive/MCP-TROUBLESHOOTING.md} +0 -0
- /package/docs/{PATTERN-LEARNING.md → v2-archive/PATTERN-LEARNING.md} +0 -0
- /package/docs/{PROFILES-GUIDE.md → v2-archive/PROFILES-GUIDE.md} +0 -0
- /package/docs/{RESET-GUIDE.md → v2-archive/RESET-GUIDE.md} +0 -0
- /package/docs/{SEARCH-ENGINE-V2.2.0.md → v2-archive/SEARCH-ENGINE-V2.2.0.md} +0 -0
- /package/docs/{SEARCH-INTEGRATION-GUIDE.md → v2-archive/SEARCH-INTEGRATION-GUIDE.md} +0 -0
- /package/docs/{UI-SERVER.md → v2-archive/UI-SERVER.md} +0 -0
- /package/docs/{UNIVERSAL-INTEGRATION.md → v2-archive/UNIVERSAL-INTEGRATION.md} +0 -0
- /package/docs/{V2.2.0-OPTIONAL-SEARCH.md → v2-archive/V2.2.0-OPTIONAL-SEARCH.md} +0 -0
- /package/docs/{WINDOWS-INSTALL-README.txt → v2-archive/WINDOWS-INSTALL-README.txt} +0 -0
- /package/docs/{WINDOWS-POST-INSTALL.txt → v2-archive/WINDOWS-POST-INSTALL.txt} +0 -0
- /package/docs/{example_graph_usage.py → v2-archive/example_graph_usage.py} +0 -0
- /package/{completions → ide/completions}/slm.bash +0 -0
- /package/{completions → ide/completions}/slm.zsh +0 -0
- /package/{configs → ide/configs}/cody-commands.json +0 -0
- /package/{install-skills.sh → scripts/install-skills.sh} +0 -0
- /package/{install.ps1 → scripts/install.ps1} +0 -0
- /package/{install.sh → scripts/install.sh} +0 -0
|
@@ -1,915 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
# SPDX-License-Identifier: MIT
|
|
3
|
-
# Copyright (c) 2026 SuperLocalMemory (superlocalmemory.com)
|
|
4
|
-
"""
|
|
5
|
-
LearningDB — Manages the separate learning.db for behavioral data.
|
|
6
|
-
|
|
7
|
-
CRITICAL DESIGN DECISIONS:
|
|
8
|
-
1. learning.db is SEPARATE from memory.db (GDPR erasable, security isolation)
|
|
9
|
-
2. All tables use CREATE TABLE IF NOT EXISTS (safe for re-runs)
|
|
10
|
-
3. WAL mode for concurrent read/write from multiple agents
|
|
11
|
-
4. Singleton pattern matches existing DbConnectionManager approach
|
|
12
|
-
5. Thread-safe via threading.Lock on write operations
|
|
13
|
-
|
|
14
|
-
Tables (6):
|
|
15
|
-
transferable_patterns — Layer 1: Cross-project tech preferences
|
|
16
|
-
workflow_patterns — Layer 3: Sequence + temporal patterns
|
|
17
|
-
ranking_feedback — Feedback from all channels (MCP, CLI, dashboard)
|
|
18
|
-
ranking_models — Model metadata and training history
|
|
19
|
-
source_quality — Per-source learning (which tools produce better memories)
|
|
20
|
-
engagement_metrics — Local-only engagement stats (never transmitted)
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
import json
|
|
24
|
-
import logging
|
|
25
|
-
import sqlite3
|
|
26
|
-
import threading
|
|
27
|
-
from datetime import datetime, date
|
|
28
|
-
from pathlib import Path
|
|
29
|
-
from typing import Optional, Dict, List, Any
|
|
30
|
-
|
|
31
|
-
from .db.constants import MEMORY_DIR, LEARNING_DB_PATH, DEFAULT_PROFILE
|
|
32
|
-
from .db.schema import initialize_schema
|
|
33
|
-
|
|
34
|
-
logger = logging.getLogger("superlocalmemory.learning.db")
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class LearningDB:
|
|
38
|
-
"""
|
|
39
|
-
Manages the learning.db database for behavioral data.
|
|
40
|
-
|
|
41
|
-
Singleton per database path. Thread-safe writes.
|
|
42
|
-
Separate from memory.db for GDPR compliance and security isolation.
|
|
43
|
-
|
|
44
|
-
Usage:
|
|
45
|
-
db = LearningDB()
|
|
46
|
-
db.store_feedback(query_hash="abc123", memory_id=42, signal_type="mcp_used")
|
|
47
|
-
stats = db.get_stats()
|
|
48
|
-
"""
|
|
49
|
-
|
|
50
|
-
_instances: Dict[str, "LearningDB"] = {}
|
|
51
|
-
_instances_lock = threading.Lock()
|
|
52
|
-
|
|
53
|
-
@classmethod
|
|
54
|
-
def get_instance(cls, db_path: Optional[Path] = None) -> "LearningDB":
|
|
55
|
-
"""Get or create the singleton LearningDB."""
|
|
56
|
-
if db_path is None:
|
|
57
|
-
db_path = LEARNING_DB_PATH
|
|
58
|
-
key = str(db_path)
|
|
59
|
-
with cls._instances_lock:
|
|
60
|
-
if key not in cls._instances:
|
|
61
|
-
cls._instances[key] = cls(db_path)
|
|
62
|
-
return cls._instances[key]
|
|
63
|
-
|
|
64
|
-
@classmethod
|
|
65
|
-
def reset_instance(cls, db_path: Optional[Path] = None):
|
|
66
|
-
"""Remove singleton. Used for testing."""
|
|
67
|
-
with cls._instances_lock:
|
|
68
|
-
if db_path is None:
|
|
69
|
-
cls._instances.clear()
|
|
70
|
-
else:
|
|
71
|
-
key = str(db_path)
|
|
72
|
-
if key in cls._instances:
|
|
73
|
-
del cls._instances[key]
|
|
74
|
-
|
|
75
|
-
def __init__(self, db_path: Optional[Path] = None):
|
|
76
|
-
self.db_path = Path(db_path) if db_path else LEARNING_DB_PATH
|
|
77
|
-
self._write_lock = threading.Lock()
|
|
78
|
-
self._ensure_directory()
|
|
79
|
-
self._init_schema()
|
|
80
|
-
|
|
81
|
-
def _get_active_profile(self) -> str:
|
|
82
|
-
"""Get the active profile name from profiles.json. Returns 'default' if unavailable."""
|
|
83
|
-
try:
|
|
84
|
-
import json
|
|
85
|
-
profiles_path = self.db_path.parent / "profiles.json"
|
|
86
|
-
if profiles_path.exists():
|
|
87
|
-
with open(profiles_path, 'r') as f:
|
|
88
|
-
config = json.load(f)
|
|
89
|
-
return config.get('active_profile', 'default')
|
|
90
|
-
except Exception:
|
|
91
|
-
pass
|
|
92
|
-
return "default"
|
|
93
|
-
logger.info("LearningDB initialized: %s", self.db_path)
|
|
94
|
-
|
|
95
|
-
def _ensure_directory(self):
|
|
96
|
-
"""Ensure the parent directory exists."""
|
|
97
|
-
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
98
|
-
|
|
99
|
-
def _get_connection(self) -> sqlite3.Connection:
|
|
100
|
-
"""Get a new database connection with standard pragmas."""
|
|
101
|
-
conn = sqlite3.connect(str(self.db_path), timeout=10)
|
|
102
|
-
conn.row_factory = sqlite3.Row
|
|
103
|
-
conn.execute("PRAGMA journal_mode=WAL")
|
|
104
|
-
conn.execute("PRAGMA busy_timeout=5000")
|
|
105
|
-
conn.execute("PRAGMA foreign_keys=ON")
|
|
106
|
-
return conn
|
|
107
|
-
|
|
108
|
-
def _init_schema(self):
|
|
109
|
-
"""Create all learning tables if they don't exist."""
|
|
110
|
-
conn = self._get_connection()
|
|
111
|
-
try:
|
|
112
|
-
initialize_schema(conn)
|
|
113
|
-
finally:
|
|
114
|
-
conn.close()
|
|
115
|
-
|
|
116
|
-
# ======================================================================
|
|
117
|
-
# Feedback Operations
|
|
118
|
-
# ======================================================================
|
|
119
|
-
|
|
120
|
-
def store_feedback(
|
|
121
|
-
self,
|
|
122
|
-
query_hash: str,
|
|
123
|
-
memory_id: int,
|
|
124
|
-
signal_type: str,
|
|
125
|
-
signal_value: float = 1.0,
|
|
126
|
-
channel: str = "mcp",
|
|
127
|
-
query_keywords: Optional[str] = None,
|
|
128
|
-
rank_position: Optional[int] = None,
|
|
129
|
-
source_tool: Optional[str] = None,
|
|
130
|
-
dwell_time: Optional[float] = None,
|
|
131
|
-
profile: Optional[str] = None,
|
|
132
|
-
) -> int:
|
|
133
|
-
"""
|
|
134
|
-
Store a ranking feedback signal.
|
|
135
|
-
|
|
136
|
-
Args:
|
|
137
|
-
query_hash: SHA256[:16] of the query (privacy-preserving)
|
|
138
|
-
memory_id: ID of the memory in memory.db
|
|
139
|
-
signal_type: One of 'mcp_used', 'cli_useful', 'dashboard_click', 'passive_decay'
|
|
140
|
-
signal_value: 1.0=strong positive, 0.5=weak, 0.0=negative
|
|
141
|
-
channel: 'mcp', 'cli', or 'dashboard'
|
|
142
|
-
query_keywords: Top keywords for grouping (optional)
|
|
143
|
-
rank_position: Where it appeared in results (1-50)
|
|
144
|
-
source_tool: Tool that originated the query (e.g., 'claude-desktop')
|
|
145
|
-
dwell_time: Seconds spent viewing (dashboard only)
|
|
146
|
-
profile: Active profile name (v2.7.4 — per-profile learning)
|
|
147
|
-
|
|
148
|
-
Returns:
|
|
149
|
-
Row ID of the inserted feedback record.
|
|
150
|
-
"""
|
|
151
|
-
# v2.7.4: Detect active profile if not provided
|
|
152
|
-
if not profile:
|
|
153
|
-
profile = self._get_active_profile()
|
|
154
|
-
|
|
155
|
-
with self._write_lock:
|
|
156
|
-
conn = self._get_connection()
|
|
157
|
-
try:
|
|
158
|
-
cursor = conn.cursor()
|
|
159
|
-
cursor.execute('''
|
|
160
|
-
INSERT INTO ranking_feedback
|
|
161
|
-
(query_hash, memory_id, signal_type, signal_value,
|
|
162
|
-
channel, query_keywords, rank_position, source_tool,
|
|
163
|
-
dwell_time, profile)
|
|
164
|
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
165
|
-
''', (
|
|
166
|
-
query_hash, memory_id, signal_type, signal_value,
|
|
167
|
-
channel, query_keywords, rank_position, source_tool,
|
|
168
|
-
dwell_time, profile,
|
|
169
|
-
))
|
|
170
|
-
conn.commit()
|
|
171
|
-
row_id = cursor.lastrowid
|
|
172
|
-
logger.debug(
|
|
173
|
-
"Feedback stored: memory=%d, type=%s, value=%.1f",
|
|
174
|
-
memory_id, signal_type, signal_value
|
|
175
|
-
)
|
|
176
|
-
return row_id
|
|
177
|
-
except Exception as e:
|
|
178
|
-
conn.rollback()
|
|
179
|
-
logger.error("Failed to store feedback: %s", e)
|
|
180
|
-
raise
|
|
181
|
-
finally:
|
|
182
|
-
conn.close()
|
|
183
|
-
|
|
184
|
-
def get_feedback_count(self, profile_scoped: bool = False) -> int:
|
|
185
|
-
"""Get total number of feedback signals.
|
|
186
|
-
|
|
187
|
-
Args:
|
|
188
|
-
profile_scoped: If True, count only signals for the active profile.
|
|
189
|
-
"""
|
|
190
|
-
conn = self._get_connection()
|
|
191
|
-
try:
|
|
192
|
-
cursor = conn.cursor()
|
|
193
|
-
if profile_scoped:
|
|
194
|
-
profile = self._get_active_profile()
|
|
195
|
-
cursor.execute(
|
|
196
|
-
'SELECT COUNT(*) FROM ranking_feedback WHERE profile = ?',
|
|
197
|
-
(profile,)
|
|
198
|
-
)
|
|
199
|
-
else:
|
|
200
|
-
cursor.execute('SELECT COUNT(*) FROM ranking_feedback')
|
|
201
|
-
return cursor.fetchone()[0]
|
|
202
|
-
finally:
|
|
203
|
-
conn.close()
|
|
204
|
-
|
|
205
|
-
def get_signal_stats_for_memories(self, memory_ids: Optional[List[int]] = None) -> Dict[str, Dict[str, float]]:
|
|
206
|
-
"""
|
|
207
|
-
Get aggregate feedback signal stats per memory (v2.7.4).
|
|
208
|
-
|
|
209
|
-
Returns a dict mapping str(memory_id) to {count, avg_value}.
|
|
210
|
-
Used by FeatureExtractor for features [10] and [11].
|
|
211
|
-
|
|
212
|
-
Args:
|
|
213
|
-
memory_ids: If provided, only fetch stats for these IDs.
|
|
214
|
-
If None, fetch stats for all memories with signals.
|
|
215
|
-
|
|
216
|
-
Returns:
|
|
217
|
-
{'42': {'count': 5, 'avg_value': 0.72}, ...}
|
|
218
|
-
"""
|
|
219
|
-
conn = self._get_connection()
|
|
220
|
-
try:
|
|
221
|
-
cursor = conn.cursor()
|
|
222
|
-
if memory_ids:
|
|
223
|
-
placeholders = ','.join('?' for _ in memory_ids)
|
|
224
|
-
cursor.execute(
|
|
225
|
-
f'SELECT memory_id, COUNT(*) as cnt, AVG(signal_value) as avg_val '
|
|
226
|
-
f'FROM ranking_feedback WHERE memory_id IN ({placeholders}) '
|
|
227
|
-
f'GROUP BY memory_id',
|
|
228
|
-
tuple(memory_ids),
|
|
229
|
-
)
|
|
230
|
-
else:
|
|
231
|
-
cursor.execute(
|
|
232
|
-
'SELECT memory_id, COUNT(*) as cnt, AVG(signal_value) as avg_val '
|
|
233
|
-
'FROM ranking_feedback GROUP BY memory_id'
|
|
234
|
-
)
|
|
235
|
-
result = {}
|
|
236
|
-
for row in cursor.fetchall():
|
|
237
|
-
result[str(row['memory_id'])] = {
|
|
238
|
-
'count': row['cnt'],
|
|
239
|
-
'avg_value': round(float(row['avg_val']), 3),
|
|
240
|
-
}
|
|
241
|
-
return result
|
|
242
|
-
except Exception as e:
|
|
243
|
-
logger.error("Failed to get signal stats: %s", e)
|
|
244
|
-
return {}
|
|
245
|
-
finally:
|
|
246
|
-
conn.close()
|
|
247
|
-
|
|
248
|
-
def get_unique_query_count(self, profile_scoped: bool = False) -> int:
|
|
249
|
-
"""Get number of unique queries with feedback."""
|
|
250
|
-
conn = self._get_connection()
|
|
251
|
-
try:
|
|
252
|
-
cursor = conn.cursor()
|
|
253
|
-
if profile_scoped:
|
|
254
|
-
profile = self._get_active_profile()
|
|
255
|
-
cursor.execute(
|
|
256
|
-
'SELECT COUNT(DISTINCT query_hash) FROM ranking_feedback WHERE profile = ?',
|
|
257
|
-
(profile,)
|
|
258
|
-
)
|
|
259
|
-
else:
|
|
260
|
-
cursor.execute(
|
|
261
|
-
'SELECT COUNT(DISTINCT query_hash) FROM ranking_feedback'
|
|
262
|
-
)
|
|
263
|
-
return cursor.fetchone()[0]
|
|
264
|
-
finally:
|
|
265
|
-
conn.close()
|
|
266
|
-
|
|
267
|
-
def get_feedback_for_training(
|
|
268
|
-
self,
|
|
269
|
-
limit: int = 10000,
|
|
270
|
-
) -> List[Dict[str, Any]]:
|
|
271
|
-
"""
|
|
272
|
-
Get feedback records suitable for model training.
|
|
273
|
-
|
|
274
|
-
Returns list of dicts with query_hash, memory_id, signal_value, etc.
|
|
275
|
-
Ordered by created_at DESC (newest first).
|
|
276
|
-
"""
|
|
277
|
-
conn = self._get_connection()
|
|
278
|
-
try:
|
|
279
|
-
cursor = conn.cursor()
|
|
280
|
-
cursor.execute('''
|
|
281
|
-
SELECT query_hash, query_keywords, memory_id, rank_position,
|
|
282
|
-
signal_type, signal_value, channel, source_tool,
|
|
283
|
-
created_at
|
|
284
|
-
FROM ranking_feedback
|
|
285
|
-
ORDER BY created_at DESC
|
|
286
|
-
LIMIT ?
|
|
287
|
-
''', (limit,))
|
|
288
|
-
return [dict(row) for row in cursor.fetchall()]
|
|
289
|
-
finally:
|
|
290
|
-
conn.close()
|
|
291
|
-
|
|
292
|
-
# ======================================================================
|
|
293
|
-
# Transferable Pattern Operations
|
|
294
|
-
# ======================================================================
|
|
295
|
-
|
|
296
|
-
def upsert_transferable_pattern(
|
|
297
|
-
self,
|
|
298
|
-
pattern_type: str,
|
|
299
|
-
key: str,
|
|
300
|
-
value: str,
|
|
301
|
-
confidence: float,
|
|
302
|
-
evidence_count: int,
|
|
303
|
-
profiles_seen: int = 1,
|
|
304
|
-
decay_factor: float = 1.0,
|
|
305
|
-
contradictions: Optional[List[str]] = None,
|
|
306
|
-
) -> int:
|
|
307
|
-
"""Insert or update a transferable pattern."""
|
|
308
|
-
now = datetime.now().isoformat()
|
|
309
|
-
contradictions_json = json.dumps(contradictions or [])
|
|
310
|
-
|
|
311
|
-
with self._write_lock:
|
|
312
|
-
conn = self._get_connection()
|
|
313
|
-
try:
|
|
314
|
-
cursor = conn.cursor()
|
|
315
|
-
|
|
316
|
-
# Check if pattern exists
|
|
317
|
-
cursor.execute(
|
|
318
|
-
'SELECT id, first_seen FROM transferable_patterns '
|
|
319
|
-
'WHERE pattern_type = ? AND key = ?',
|
|
320
|
-
(pattern_type, key)
|
|
321
|
-
)
|
|
322
|
-
existing = cursor.fetchone()
|
|
323
|
-
|
|
324
|
-
if existing:
|
|
325
|
-
cursor.execute('''
|
|
326
|
-
UPDATE transferable_patterns
|
|
327
|
-
SET value = ?, confidence = ?, evidence_count = ?,
|
|
328
|
-
profiles_seen = ?, last_seen = ?, decay_factor = ?,
|
|
329
|
-
contradictions = ?, updated_at = ?
|
|
330
|
-
WHERE id = ?
|
|
331
|
-
''', (
|
|
332
|
-
value, confidence, evidence_count,
|
|
333
|
-
profiles_seen, now, decay_factor,
|
|
334
|
-
contradictions_json, now, existing['id']
|
|
335
|
-
))
|
|
336
|
-
row_id = existing['id']
|
|
337
|
-
else:
|
|
338
|
-
cursor.execute('''
|
|
339
|
-
INSERT INTO transferable_patterns
|
|
340
|
-
(pattern_type, key, value, confidence, evidence_count,
|
|
341
|
-
profiles_seen, first_seen, last_seen, decay_factor,
|
|
342
|
-
contradictions, created_at, updated_at)
|
|
343
|
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
344
|
-
''', (
|
|
345
|
-
pattern_type, key, value, confidence, evidence_count,
|
|
346
|
-
profiles_seen, now, now, decay_factor,
|
|
347
|
-
contradictions_json, now, now
|
|
348
|
-
))
|
|
349
|
-
row_id = cursor.lastrowid
|
|
350
|
-
|
|
351
|
-
conn.commit()
|
|
352
|
-
return row_id
|
|
353
|
-
except Exception as e:
|
|
354
|
-
conn.rollback()
|
|
355
|
-
logger.error("Failed to upsert pattern: %s", e)
|
|
356
|
-
raise
|
|
357
|
-
finally:
|
|
358
|
-
conn.close()
|
|
359
|
-
|
|
360
|
-
def get_transferable_patterns(
|
|
361
|
-
self,
|
|
362
|
-
min_confidence: float = 0.0,
|
|
363
|
-
pattern_type: Optional[str] = None,
|
|
364
|
-
profile_scoped: bool = False,
|
|
365
|
-
) -> List[Dict[str, Any]]:
|
|
366
|
-
"""Get transferable patterns filtered by confidence, type, and profile."""
|
|
367
|
-
conn = self._get_connection()
|
|
368
|
-
try:
|
|
369
|
-
cursor = conn.cursor()
|
|
370
|
-
profile_filter = ""
|
|
371
|
-
params = [min_confidence]
|
|
372
|
-
if profile_scoped:
|
|
373
|
-
profile = self._get_active_profile()
|
|
374
|
-
profile_filter = " AND profile = ?"
|
|
375
|
-
params.append(profile)
|
|
376
|
-
if pattern_type:
|
|
377
|
-
cursor.execute(
|
|
378
|
-
'SELECT * FROM transferable_patterns '
|
|
379
|
-
'WHERE confidence >= ? AND pattern_type = ?' + profile_filter +
|
|
380
|
-
' ORDER BY confidence DESC',
|
|
381
|
-
tuple(params[:1]) + (pattern_type,) + tuple(params[1:])
|
|
382
|
-
)
|
|
383
|
-
else:
|
|
384
|
-
cursor.execute(
|
|
385
|
-
'SELECT * FROM transferable_patterns '
|
|
386
|
-
'WHERE confidence >= ?' + profile_filter +
|
|
387
|
-
' ORDER BY confidence DESC',
|
|
388
|
-
tuple(params)
|
|
389
|
-
)
|
|
390
|
-
return [dict(row) for row in cursor.fetchall()]
|
|
391
|
-
finally:
|
|
392
|
-
conn.close()
|
|
393
|
-
|
|
394
|
-
# ======================================================================
|
|
395
|
-
# Workflow Pattern Operations
|
|
396
|
-
# ======================================================================
|
|
397
|
-
|
|
398
|
-
def store_workflow_pattern(
|
|
399
|
-
self,
|
|
400
|
-
pattern_type: str,
|
|
401
|
-
pattern_key: str,
|
|
402
|
-
pattern_value: str,
|
|
403
|
-
confidence: float = 0.0,
|
|
404
|
-
evidence_count: int = 0,
|
|
405
|
-
metadata: Optional[Dict] = None,
|
|
406
|
-
) -> int:
|
|
407
|
-
"""Store a workflow pattern (sequence, temporal, or style)."""
|
|
408
|
-
metadata_json = json.dumps(metadata or {})
|
|
409
|
-
|
|
410
|
-
with self._write_lock:
|
|
411
|
-
conn = self._get_connection()
|
|
412
|
-
try:
|
|
413
|
-
cursor = conn.cursor()
|
|
414
|
-
cursor.execute('''
|
|
415
|
-
INSERT INTO workflow_patterns
|
|
416
|
-
(pattern_type, pattern_key, pattern_value,
|
|
417
|
-
confidence, evidence_count, metadata)
|
|
418
|
-
VALUES (?, ?, ?, ?, ?, ?)
|
|
419
|
-
''', (
|
|
420
|
-
pattern_type, pattern_key, pattern_value,
|
|
421
|
-
confidence, evidence_count, metadata_json
|
|
422
|
-
))
|
|
423
|
-
conn.commit()
|
|
424
|
-
return cursor.lastrowid
|
|
425
|
-
except Exception as e:
|
|
426
|
-
conn.rollback()
|
|
427
|
-
logger.error("Failed to store workflow pattern: %s", e)
|
|
428
|
-
raise
|
|
429
|
-
finally:
|
|
430
|
-
conn.close()
|
|
431
|
-
|
|
432
|
-
def get_workflow_patterns(
|
|
433
|
-
self,
|
|
434
|
-
pattern_type: Optional[str] = None,
|
|
435
|
-
min_confidence: float = 0.0,
|
|
436
|
-
profile_scoped: bool = False,
|
|
437
|
-
) -> List[Dict[str, Any]]:
|
|
438
|
-
"""Get workflow patterns filtered by type, confidence, and profile."""
|
|
439
|
-
conn = self._get_connection()
|
|
440
|
-
try:
|
|
441
|
-
cursor = conn.cursor()
|
|
442
|
-
profile_filter = ""
|
|
443
|
-
extra_params = []
|
|
444
|
-
if profile_scoped:
|
|
445
|
-
profile = self._get_active_profile()
|
|
446
|
-
profile_filter = " AND profile = ?"
|
|
447
|
-
extra_params.append(profile)
|
|
448
|
-
if pattern_type:
|
|
449
|
-
cursor.execute(
|
|
450
|
-
'SELECT * FROM workflow_patterns '
|
|
451
|
-
'WHERE pattern_type = ? AND confidence >= ?' + profile_filter +
|
|
452
|
-
' ORDER BY confidence DESC',
|
|
453
|
-
(pattern_type, min_confidence) + tuple(extra_params)
|
|
454
|
-
)
|
|
455
|
-
else:
|
|
456
|
-
cursor.execute(
|
|
457
|
-
'SELECT * FROM workflow_patterns '
|
|
458
|
-
'WHERE confidence >= ?' + profile_filter +
|
|
459
|
-
' ORDER BY confidence DESC',
|
|
460
|
-
(min_confidence,) + tuple(extra_params)
|
|
461
|
-
)
|
|
462
|
-
return [dict(row) for row in cursor.fetchall()]
|
|
463
|
-
finally:
|
|
464
|
-
conn.close()
|
|
465
|
-
|
|
466
|
-
def clear_workflow_patterns(self, pattern_type: Optional[str] = None) -> None:
|
|
467
|
-
"""Clear workflow patterns (used before re-mining)."""
|
|
468
|
-
with self._write_lock:
|
|
469
|
-
conn = self._get_connection()
|
|
470
|
-
try:
|
|
471
|
-
cursor = conn.cursor()
|
|
472
|
-
if pattern_type:
|
|
473
|
-
cursor.execute(
|
|
474
|
-
'DELETE FROM workflow_patterns WHERE pattern_type = ?',
|
|
475
|
-
(pattern_type,)
|
|
476
|
-
)
|
|
477
|
-
else:
|
|
478
|
-
cursor.execute('DELETE FROM workflow_patterns')
|
|
479
|
-
conn.commit()
|
|
480
|
-
except Exception as e:
|
|
481
|
-
conn.rollback()
|
|
482
|
-
logger.error("Failed to clear workflow patterns: %s", e)
|
|
483
|
-
raise
|
|
484
|
-
finally:
|
|
485
|
-
conn.close()
|
|
486
|
-
|
|
487
|
-
# ======================================================================
|
|
488
|
-
# Source Quality Operations
|
|
489
|
-
# ======================================================================
|
|
490
|
-
|
|
491
|
-
def update_source_quality(
|
|
492
|
-
self,
|
|
493
|
-
source_id: str,
|
|
494
|
-
positive_signals: int,
|
|
495
|
-
total_memories: int,
|
|
496
|
-
) -> None:
|
|
497
|
-
"""Update quality score for a memory source."""
|
|
498
|
-
# Beta-Binomial smoothing: (alpha + pos) / (alpha + beta + total)
|
|
499
|
-
quality_score = (1.0 + positive_signals) / (2.0 + total_memories)
|
|
500
|
-
|
|
501
|
-
with self._write_lock:
|
|
502
|
-
conn = self._get_connection()
|
|
503
|
-
try:
|
|
504
|
-
cursor = conn.cursor()
|
|
505
|
-
cursor.execute('''
|
|
506
|
-
INSERT INTO source_quality
|
|
507
|
-
(source_id, positive_signals, total_memories,
|
|
508
|
-
quality_score, last_updated)
|
|
509
|
-
VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP)
|
|
510
|
-
ON CONFLICT(source_id) DO UPDATE SET
|
|
511
|
-
positive_signals = ?,
|
|
512
|
-
total_memories = ?,
|
|
513
|
-
quality_score = ?,
|
|
514
|
-
last_updated = CURRENT_TIMESTAMP
|
|
515
|
-
''', (
|
|
516
|
-
source_id, positive_signals, total_memories, quality_score,
|
|
517
|
-
positive_signals, total_memories, quality_score,
|
|
518
|
-
))
|
|
519
|
-
conn.commit()
|
|
520
|
-
except Exception as e:
|
|
521
|
-
conn.rollback()
|
|
522
|
-
logger.error("Failed to update source quality: %s", e)
|
|
523
|
-
raise
|
|
524
|
-
finally:
|
|
525
|
-
conn.close()
|
|
526
|
-
|
|
527
|
-
def get_source_scores(self, profile_scoped: bool = False) -> Dict[str, float]:
|
|
528
|
-
"""Get quality scores for all known sources."""
|
|
529
|
-
conn = self._get_connection()
|
|
530
|
-
try:
|
|
531
|
-
cursor = conn.cursor()
|
|
532
|
-
if profile_scoped:
|
|
533
|
-
profile = self._get_active_profile()
|
|
534
|
-
cursor.execute(
|
|
535
|
-
'SELECT source_id, quality_score FROM source_quality WHERE profile = ?',
|
|
536
|
-
(profile,)
|
|
537
|
-
)
|
|
538
|
-
else:
|
|
539
|
-
cursor.execute('SELECT source_id, quality_score FROM source_quality')
|
|
540
|
-
return {row['source_id']: row['quality_score'] for row in cursor.fetchall()}
|
|
541
|
-
finally:
|
|
542
|
-
conn.close()
|
|
543
|
-
|
|
544
|
-
# ======================================================================
|
|
545
|
-
# Model Metadata Operations
|
|
546
|
-
# ======================================================================
|
|
547
|
-
|
|
548
|
-
def record_model_training(
|
|
549
|
-
self,
|
|
550
|
-
model_version: str,
|
|
551
|
-
training_samples: int,
|
|
552
|
-
synthetic_samples: int = 0,
|
|
553
|
-
real_samples: int = 0,
|
|
554
|
-
ndcg_at_10: Optional[float] = None,
|
|
555
|
-
model_path: Optional[str] = None,
|
|
556
|
-
) -> int:
|
|
557
|
-
"""Record metadata about a trained ranking model."""
|
|
558
|
-
with self._write_lock:
|
|
559
|
-
conn = self._get_connection()
|
|
560
|
-
try:
|
|
561
|
-
cursor = conn.cursor()
|
|
562
|
-
cursor.execute('''
|
|
563
|
-
INSERT INTO ranking_models
|
|
564
|
-
(model_version, training_samples, synthetic_samples,
|
|
565
|
-
real_samples, ndcg_at_10, model_path)
|
|
566
|
-
VALUES (?, ?, ?, ?, ?, ?)
|
|
567
|
-
''', (
|
|
568
|
-
model_version, training_samples, synthetic_samples,
|
|
569
|
-
real_samples, ndcg_at_10, model_path,
|
|
570
|
-
))
|
|
571
|
-
conn.commit()
|
|
572
|
-
return cursor.lastrowid
|
|
573
|
-
except Exception as e:
|
|
574
|
-
conn.rollback()
|
|
575
|
-
logger.error("Failed to record model training: %s", e)
|
|
576
|
-
raise
|
|
577
|
-
finally:
|
|
578
|
-
conn.close()
|
|
579
|
-
|
|
580
|
-
def get_latest_model(self) -> Optional[Dict[str, Any]]:
|
|
581
|
-
"""Get metadata for the most recently trained model."""
|
|
582
|
-
conn = self._get_connection()
|
|
583
|
-
try:
|
|
584
|
-
cursor = conn.cursor()
|
|
585
|
-
cursor.execute('''
|
|
586
|
-
SELECT * FROM ranking_models
|
|
587
|
-
ORDER BY created_at DESC
|
|
588
|
-
LIMIT 1
|
|
589
|
-
''')
|
|
590
|
-
row = cursor.fetchone()
|
|
591
|
-
return dict(row) if row else None
|
|
592
|
-
finally:
|
|
593
|
-
conn.close()
|
|
594
|
-
|
|
595
|
-
# ======================================================================
|
|
596
|
-
# Engagement Metrics Operations
|
|
597
|
-
# ======================================================================
|
|
598
|
-
|
|
599
|
-
def increment_engagement(
|
|
600
|
-
self,
|
|
601
|
-
metric_type: str,
|
|
602
|
-
count: int = 1,
|
|
603
|
-
source: Optional[str] = None,
|
|
604
|
-
) -> None:
|
|
605
|
-
"""
|
|
606
|
-
Increment a daily engagement metric.
|
|
607
|
-
|
|
608
|
-
Args:
|
|
609
|
-
metric_type: One of 'memories_created', 'recalls_performed',
|
|
610
|
-
'feedback_signals', 'patterns_updated'
|
|
611
|
-
count: Increment amount (default 1)
|
|
612
|
-
source: Source tool identifier to track in active_sources
|
|
613
|
-
"""
|
|
614
|
-
today = date.today().isoformat()
|
|
615
|
-
valid_metrics = {
|
|
616
|
-
'memories_created', 'recalls_performed',
|
|
617
|
-
'feedback_signals', 'patterns_updated',
|
|
618
|
-
}
|
|
619
|
-
if metric_type not in valid_metrics:
|
|
620
|
-
logger.warning("Invalid metric type: %s", metric_type)
|
|
621
|
-
return
|
|
622
|
-
|
|
623
|
-
with self._write_lock:
|
|
624
|
-
conn = self._get_connection()
|
|
625
|
-
try:
|
|
626
|
-
cursor = conn.cursor()
|
|
627
|
-
|
|
628
|
-
# Ensure today's row exists
|
|
629
|
-
cursor.execute('''
|
|
630
|
-
INSERT OR IGNORE INTO engagement_metrics (metric_date)
|
|
631
|
-
VALUES (?)
|
|
632
|
-
''', (today,))
|
|
633
|
-
|
|
634
|
-
# Increment the specific metric
|
|
635
|
-
cursor.execute(f'''
|
|
636
|
-
UPDATE engagement_metrics
|
|
637
|
-
SET {metric_type} = {metric_type} + ?
|
|
638
|
-
WHERE metric_date = ?
|
|
639
|
-
''', (count, today))
|
|
640
|
-
|
|
641
|
-
# Update active sources if provided
|
|
642
|
-
if source:
|
|
643
|
-
cursor.execute('''
|
|
644
|
-
SELECT active_sources FROM engagement_metrics
|
|
645
|
-
WHERE metric_date = ?
|
|
646
|
-
''', (today,))
|
|
647
|
-
row = cursor.fetchone()
|
|
648
|
-
if row:
|
|
649
|
-
sources = json.loads(row['active_sources'] or '[]')
|
|
650
|
-
if source not in sources:
|
|
651
|
-
sources.append(source)
|
|
652
|
-
cursor.execute('''
|
|
653
|
-
UPDATE engagement_metrics
|
|
654
|
-
SET active_sources = ?
|
|
655
|
-
WHERE metric_date = ?
|
|
656
|
-
''', (json.dumps(sources), today))
|
|
657
|
-
|
|
658
|
-
conn.commit()
|
|
659
|
-
except Exception as e:
|
|
660
|
-
conn.rollback()
|
|
661
|
-
logger.error("Failed to update engagement: %s", e)
|
|
662
|
-
finally:
|
|
663
|
-
conn.close()
|
|
664
|
-
|
|
665
|
-
def get_engagement_history(
|
|
666
|
-
self,
|
|
667
|
-
days: int = 30,
|
|
668
|
-
) -> List[Dict[str, Any]]:
|
|
669
|
-
"""Get engagement metrics for the last N days."""
|
|
670
|
-
conn = self._get_connection()
|
|
671
|
-
try:
|
|
672
|
-
cursor = conn.cursor()
|
|
673
|
-
cursor.execute('''
|
|
674
|
-
SELECT * FROM engagement_metrics
|
|
675
|
-
ORDER BY metric_date DESC
|
|
676
|
-
LIMIT ?
|
|
677
|
-
''', (days,))
|
|
678
|
-
return [dict(row) for row in cursor.fetchall()]
|
|
679
|
-
finally:
|
|
680
|
-
conn.close()
|
|
681
|
-
|
|
682
|
-
# ======================================================================
|
|
683
|
-
# Statistics & Diagnostics
|
|
684
|
-
# ======================================================================
|
|
685
|
-
|
|
686
|
-
def get_stats(self) -> Dict[str, Any]:
|
|
687
|
-
"""Get comprehensive learning database statistics."""
|
|
688
|
-
conn = self._get_connection()
|
|
689
|
-
try:
|
|
690
|
-
cursor = conn.cursor()
|
|
691
|
-
stats = {}
|
|
692
|
-
|
|
693
|
-
# Feedback stats
|
|
694
|
-
cursor.execute('SELECT COUNT(*) FROM ranking_feedback')
|
|
695
|
-
stats['feedback_count'] = cursor.fetchone()[0]
|
|
696
|
-
|
|
697
|
-
cursor.execute(
|
|
698
|
-
'SELECT COUNT(DISTINCT query_hash) FROM ranking_feedback'
|
|
699
|
-
)
|
|
700
|
-
stats['unique_queries'] = cursor.fetchone()[0]
|
|
701
|
-
|
|
702
|
-
# Pattern stats
|
|
703
|
-
cursor.execute('SELECT COUNT(*) FROM transferable_patterns')
|
|
704
|
-
stats['transferable_patterns'] = cursor.fetchone()[0]
|
|
705
|
-
|
|
706
|
-
cursor.execute(
|
|
707
|
-
'SELECT COUNT(*) FROM transferable_patterns '
|
|
708
|
-
'WHERE confidence >= 0.6'
|
|
709
|
-
)
|
|
710
|
-
stats['high_confidence_patterns'] = cursor.fetchone()[0]
|
|
711
|
-
|
|
712
|
-
# Workflow stats
|
|
713
|
-
cursor.execute('SELECT COUNT(*) FROM workflow_patterns')
|
|
714
|
-
stats['workflow_patterns'] = cursor.fetchone()[0]
|
|
715
|
-
|
|
716
|
-
# Source quality stats
|
|
717
|
-
cursor.execute('SELECT COUNT(*) FROM source_quality')
|
|
718
|
-
stats['tracked_sources'] = cursor.fetchone()[0]
|
|
719
|
-
|
|
720
|
-
# Model stats
|
|
721
|
-
cursor.execute(
|
|
722
|
-
'SELECT COUNT(*) FROM ranking_models'
|
|
723
|
-
)
|
|
724
|
-
stats['models_trained'] = cursor.fetchone()[0]
|
|
725
|
-
|
|
726
|
-
latest_model = self.get_latest_model()
|
|
727
|
-
if latest_model:
|
|
728
|
-
stats['latest_model_version'] = latest_model['model_version']
|
|
729
|
-
stats['latest_model_ndcg'] = latest_model['ndcg_at_10']
|
|
730
|
-
else:
|
|
731
|
-
stats['latest_model_version'] = None
|
|
732
|
-
stats['latest_model_ndcg'] = None
|
|
733
|
-
|
|
734
|
-
# DB file size
|
|
735
|
-
if self.db_path.exists():
|
|
736
|
-
stats['db_size_bytes'] = self.db_path.stat().st_size
|
|
737
|
-
stats['db_size_kb'] = round(stats['db_size_bytes'] / 1024, 1)
|
|
738
|
-
else:
|
|
739
|
-
stats['db_size_bytes'] = 0
|
|
740
|
-
stats['db_size_kb'] = 0
|
|
741
|
-
|
|
742
|
-
return stats
|
|
743
|
-
finally:
|
|
744
|
-
conn.close()
|
|
745
|
-
|
|
746
|
-
# ======================================================================
|
|
747
|
-
# v2.8.0: Action Outcomes CRUD
|
|
748
|
-
# ======================================================================
|
|
749
|
-
|
|
750
|
-
def store_outcome(self, memory_ids: Any, outcome: str, action_type: str = "other", context: Optional[Dict] = None, confidence: float = 1.0, agent_id: str = "user", project: Optional[str] = None, profile: str = "default") -> int:
|
|
751
|
-
"""Store an action outcome for behavioral learning."""
|
|
752
|
-
memory_ids_str = json.dumps(memory_ids if isinstance(memory_ids, list) else [memory_ids])
|
|
753
|
-
context_str = json.dumps(context or {})
|
|
754
|
-
conn = self._get_connection()
|
|
755
|
-
try:
|
|
756
|
-
with self._write_lock:
|
|
757
|
-
cursor = conn.execute(
|
|
758
|
-
"INSERT INTO action_outcomes (memory_ids, outcome, action_type, context, confidence, agent_id, project, profile) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
|
759
|
-
(memory_ids_str, outcome, action_type, context_str, confidence, agent_id, project, profile),
|
|
760
|
-
)
|
|
761
|
-
conn.commit()
|
|
762
|
-
return cursor.lastrowid
|
|
763
|
-
finally:
|
|
764
|
-
conn.close()
|
|
765
|
-
|
|
766
|
-
def get_outcomes(self, memory_id: Optional[int] = None, project: Optional[str] = None, profile: str = "default", limit: int = 100) -> List[Dict[str, Any]]:
|
|
767
|
-
"""Get action outcomes, optionally filtered."""
|
|
768
|
-
conn = self._get_connection()
|
|
769
|
-
try:
|
|
770
|
-
query = "SELECT * FROM action_outcomes WHERE profile = ?"
|
|
771
|
-
params = [profile]
|
|
772
|
-
if project:
|
|
773
|
-
query += " AND project = ?"
|
|
774
|
-
params.append(project)
|
|
775
|
-
query += " ORDER BY created_at DESC LIMIT ?"
|
|
776
|
-
params.append(limit)
|
|
777
|
-
rows = conn.execute(query, params).fetchall()
|
|
778
|
-
results = []
|
|
779
|
-
for row in rows:
|
|
780
|
-
d = dict(row)
|
|
781
|
-
d["memory_ids"] = json.loads(d["memory_ids"])
|
|
782
|
-
d["context"] = json.loads(d["context"])
|
|
783
|
-
if memory_id and memory_id not in d["memory_ids"]:
|
|
784
|
-
continue
|
|
785
|
-
results.append(d)
|
|
786
|
-
return results
|
|
787
|
-
finally:
|
|
788
|
-
conn.close()
|
|
789
|
-
|
|
790
|
-
# ======================================================================
|
|
791
|
-
# v2.8.0: Behavioral Patterns CRUD
|
|
792
|
-
# ======================================================================
|
|
793
|
-
|
|
794
|
-
def store_behavioral_pattern(self, pattern_type: str, pattern_key: str, success_rate: float = 0.0, evidence_count: int = 0, confidence: float = 0.0, metadata: Optional[Dict] = None, project: Optional[str] = None, profile: str = "default") -> int:
|
|
795
|
-
"""Store or update a behavioral pattern."""
|
|
796
|
-
metadata_str = json.dumps(metadata or {})
|
|
797
|
-
conn = self._get_connection()
|
|
798
|
-
try:
|
|
799
|
-
with self._write_lock:
|
|
800
|
-
cursor = conn.execute(
|
|
801
|
-
"INSERT INTO behavioral_patterns (pattern_type, pattern_key, success_rate, evidence_count, confidence, metadata, project, profile) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
|
802
|
-
(pattern_type, pattern_key, success_rate, evidence_count, confidence, metadata_str, project, profile),
|
|
803
|
-
)
|
|
804
|
-
conn.commit()
|
|
805
|
-
return cursor.lastrowid
|
|
806
|
-
finally:
|
|
807
|
-
conn.close()
|
|
808
|
-
|
|
809
|
-
def get_behavioral_patterns(self, pattern_type: Optional[str] = None, project: Optional[str] = None, min_confidence: float = 0.0, profile: str = "default") -> List[Dict[str, Any]]:
|
|
810
|
-
"""Get behavioral patterns, optionally filtered."""
|
|
811
|
-
conn = self._get_connection()
|
|
812
|
-
try:
|
|
813
|
-
query = "SELECT * FROM behavioral_patterns WHERE profile = ? AND confidence >= ?"
|
|
814
|
-
params = [profile, min_confidence]
|
|
815
|
-
if pattern_type:
|
|
816
|
-
query += " AND pattern_type = ?"
|
|
817
|
-
params.append(pattern_type)
|
|
818
|
-
if project:
|
|
819
|
-
query += " AND project = ?"
|
|
820
|
-
params.append(project)
|
|
821
|
-
query += " ORDER BY confidence DESC"
|
|
822
|
-
rows = conn.execute(query, params).fetchall()
|
|
823
|
-
results = []
|
|
824
|
-
for row in rows:
|
|
825
|
-
d = dict(row)
|
|
826
|
-
d["metadata"] = json.loads(d["metadata"])
|
|
827
|
-
results.append(d)
|
|
828
|
-
return results
|
|
829
|
-
finally:
|
|
830
|
-
conn.close()
|
|
831
|
-
|
|
832
|
-
# ======================================================================
|
|
833
|
-
# v2.8.0: Cross-Project CRUD
|
|
834
|
-
# ======================================================================
|
|
835
|
-
|
|
836
|
-
def store_cross_project(self, source_project: str, target_project: str, pattern_id: int, transfer_type: str = "metadata", confidence: float = 0.0, profile: str = "default") -> int:
|
|
837
|
-
"""Record a cross-project behavioral transfer."""
|
|
838
|
-
conn = self._get_connection()
|
|
839
|
-
try:
|
|
840
|
-
with self._write_lock:
|
|
841
|
-
cursor = conn.execute(
|
|
842
|
-
"INSERT INTO cross_project_behaviors (source_project, target_project, pattern_id, transfer_type, confidence, profile) VALUES (?, ?, ?, ?, ?, ?)",
|
|
843
|
-
(source_project, target_project, pattern_id, transfer_type, confidence, profile),
|
|
844
|
-
)
|
|
845
|
-
conn.commit()
|
|
846
|
-
return cursor.lastrowid
|
|
847
|
-
finally:
|
|
848
|
-
conn.close()
|
|
849
|
-
|
|
850
|
-
def get_cross_project_transfers(self, source_project: Optional[str] = None, target_project: Optional[str] = None, profile: str = "default") -> List[Dict[str, Any]]:
|
|
851
|
-
"""Get cross-project transfer records."""
|
|
852
|
-
conn = self._get_connection()
|
|
853
|
-
try:
|
|
854
|
-
query = "SELECT * FROM cross_project_behaviors WHERE profile = ?"
|
|
855
|
-
params = [profile]
|
|
856
|
-
if source_project:
|
|
857
|
-
query += " AND source_project = ?"
|
|
858
|
-
params.append(source_project)
|
|
859
|
-
if target_project:
|
|
860
|
-
query += " AND target_project = ?"
|
|
861
|
-
params.append(target_project)
|
|
862
|
-
query += " ORDER BY created_at DESC"
|
|
863
|
-
return [dict(row) for row in conn.execute(query, params).fetchall()]
|
|
864
|
-
finally:
|
|
865
|
-
conn.close()
|
|
866
|
-
|
|
867
|
-
# ======================================================================
|
|
868
|
-
# Reset / Cleanup
|
|
869
|
-
# ======================================================================
|
|
870
|
-
|
|
871
|
-
def reset(self) -> None:
|
|
872
|
-
"""
|
|
873
|
-
Delete all learning data. Memories in memory.db are preserved.
|
|
874
|
-
|
|
875
|
-
This is the GDPR Article 17 "Right to Erasure" handler for
|
|
876
|
-
behavioral data.
|
|
877
|
-
"""
|
|
878
|
-
with self._write_lock:
|
|
879
|
-
conn = self._get_connection()
|
|
880
|
-
try:
|
|
881
|
-
cursor = conn.cursor()
|
|
882
|
-
cursor.execute('DELETE FROM ranking_feedback')
|
|
883
|
-
cursor.execute('DELETE FROM transferable_patterns')
|
|
884
|
-
cursor.execute('DELETE FROM workflow_patterns')
|
|
885
|
-
cursor.execute('DELETE FROM ranking_models')
|
|
886
|
-
cursor.execute('DELETE FROM source_quality')
|
|
887
|
-
cursor.execute('DELETE FROM engagement_metrics')
|
|
888
|
-
conn.commit()
|
|
889
|
-
logger.info(
|
|
890
|
-
"Learning data reset. Memories in memory.db preserved."
|
|
891
|
-
)
|
|
892
|
-
except Exception as e:
|
|
893
|
-
conn.rollback()
|
|
894
|
-
logger.error("Failed to reset learning data: %s", e)
|
|
895
|
-
raise
|
|
896
|
-
finally:
|
|
897
|
-
conn.close()
|
|
898
|
-
|
|
899
|
-
def delete_database(self) -> None:
|
|
900
|
-
"""
|
|
901
|
-
Completely delete learning.db file.
|
|
902
|
-
More aggressive than reset() — removes the file entirely.
|
|
903
|
-
"""
|
|
904
|
-
with self._write_lock:
|
|
905
|
-
LearningDB.reset_instance(self.db_path)
|
|
906
|
-
if self.db_path.exists():
|
|
907
|
-
self.db_path.unlink()
|
|
908
|
-
logger.info("Learning database deleted: %s", self.db_path)
|
|
909
|
-
# Also clean WAL/SHM files
|
|
910
|
-
wal = self.db_path.with_suffix('.db-wal')
|
|
911
|
-
shm = self.db_path.with_suffix('.db-shm')
|
|
912
|
-
if wal.exists():
|
|
913
|
-
wal.unlink()
|
|
914
|
-
if shm.exists():
|
|
915
|
-
shm.unlink()
|