attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1192 @@
|
|
|
1
|
+
"""Secure MemDocs Integration for Enterprise Privacy
|
|
2
|
+
|
|
3
|
+
Combines PII scrubbing, secrets detection, and audit logging with MemDocs pattern storage.
|
|
4
|
+
Implements three-tier classification (PUBLIC/INTERNAL/SENSITIVE) with encryption support.
|
|
5
|
+
|
|
6
|
+
This module provides the complete security pipeline for storing and retrieving
|
|
7
|
+
patterns with full compliance for GDPR, HIPAA, and SOC2 requirements.
|
|
8
|
+
|
|
9
|
+
Key Features:
|
|
10
|
+
- Automatic PII scrubbing before storage
|
|
11
|
+
- Secrets detection with blocking
|
|
12
|
+
- Three-tier classification system
|
|
13
|
+
- AES-256-GCM encryption for SENSITIVE patterns
|
|
14
|
+
- Comprehensive audit logging
|
|
15
|
+
- Access control enforcement
|
|
16
|
+
- Retention policy management
|
|
17
|
+
|
|
18
|
+
Architecture:
|
|
19
|
+
User Input → [PII Scrubbing + Secrets Detection (PARALLEL)] → Classification
|
|
20
|
+
→ Encryption (if SENSITIVE) → MemDocs Storage → Audit Logging
|
|
21
|
+
|
|
22
|
+
Reference:
|
|
23
|
+
- SECURE_MEMORY_ARCHITECTURE.md: MemDocs Integration Patterns
|
|
24
|
+
- ENTERPRISE_PRIVACY_INTEGRATION.md: Phase 2 Implementation
|
|
25
|
+
|
|
26
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
27
|
+
Licensed under Fair Source 0.9
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
import base64
|
|
31
|
+
import concurrent.futures
|
|
32
|
+
import hashlib
|
|
33
|
+
import json
|
|
34
|
+
import os
|
|
35
|
+
from dataclasses import dataclass, field
|
|
36
|
+
from datetime import datetime, timedelta
|
|
37
|
+
from enum import Enum
|
|
38
|
+
from pathlib import Path
|
|
39
|
+
from typing import Any
|
|
40
|
+
|
|
41
|
+
import structlog
|
|
42
|
+
|
|
43
|
+
from .audit_logger import AuditEvent, AuditLogger
|
|
44
|
+
from .pii_scrubber import PIIScrubber
|
|
45
|
+
from .secrets_detector import SecretsDetector
|
|
46
|
+
|
|
47
|
+
logger = structlog.get_logger(__name__)
|
|
48
|
+
|
|
49
|
+
# Check for cryptography library
|
|
50
|
+
try:
|
|
51
|
+
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
|
52
|
+
|
|
53
|
+
HAS_ENCRYPTION = True
|
|
54
|
+
except ImportError:
|
|
55
|
+
HAS_ENCRYPTION = False
|
|
56
|
+
logger.warning("cryptography library not available - encryption disabled")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Classification(Enum):
|
|
60
|
+
"""Three-tier classification system for MemDocs patterns"""
|
|
61
|
+
|
|
62
|
+
PUBLIC = "PUBLIC" # Shareable across organization, anonymized
|
|
63
|
+
INTERNAL = "INTERNAL" # Team/project only, no PII or secrets
|
|
64
|
+
SENSITIVE = "SENSITIVE" # Encrypted at rest, access-controlled (HIPAA, finance)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@dataclass
|
|
68
|
+
class ClassificationRules:
|
|
69
|
+
"""Security rules for each classification level"""
|
|
70
|
+
|
|
71
|
+
classification: Classification
|
|
72
|
+
encryption_required: bool
|
|
73
|
+
retention_days: int
|
|
74
|
+
access_level: str # "all_users", "project_team", "explicit_permission"
|
|
75
|
+
audit_all_access: bool = False
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
# Default classification rules based on enterprise security policy
|
|
79
|
+
DEFAULT_CLASSIFICATION_RULES: dict[Classification, ClassificationRules] = {
|
|
80
|
+
Classification.PUBLIC: ClassificationRules(
|
|
81
|
+
classification=Classification.PUBLIC,
|
|
82
|
+
encryption_required=False,
|
|
83
|
+
retention_days=365,
|
|
84
|
+
access_level="all_users",
|
|
85
|
+
audit_all_access=False,
|
|
86
|
+
),
|
|
87
|
+
Classification.INTERNAL: ClassificationRules(
|
|
88
|
+
classification=Classification.INTERNAL,
|
|
89
|
+
encryption_required=False,
|
|
90
|
+
retention_days=180,
|
|
91
|
+
access_level="project_team",
|
|
92
|
+
audit_all_access=False,
|
|
93
|
+
),
|
|
94
|
+
Classification.SENSITIVE: ClassificationRules(
|
|
95
|
+
classification=Classification.SENSITIVE,
|
|
96
|
+
encryption_required=True,
|
|
97
|
+
retention_days=90,
|
|
98
|
+
access_level="explicit_permission",
|
|
99
|
+
audit_all_access=True,
|
|
100
|
+
),
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@dataclass
|
|
105
|
+
class PatternMetadata:
|
|
106
|
+
"""Metadata for stored MemDocs patterns"""
|
|
107
|
+
|
|
108
|
+
pattern_id: str
|
|
109
|
+
created_by: str
|
|
110
|
+
created_at: str
|
|
111
|
+
classification: str
|
|
112
|
+
retention_days: int
|
|
113
|
+
encrypted: bool
|
|
114
|
+
pattern_type: str
|
|
115
|
+
sanitization_applied: bool
|
|
116
|
+
pii_removed: int
|
|
117
|
+
secrets_detected: int
|
|
118
|
+
access_control: dict[str, Any] = field(default_factory=dict)
|
|
119
|
+
custom_metadata: dict[str, Any] = field(default_factory=dict)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
@dataclass
|
|
123
|
+
class SecurePattern:
|
|
124
|
+
"""Represents a securely stored pattern"""
|
|
125
|
+
|
|
126
|
+
pattern_id: str
|
|
127
|
+
content: str
|
|
128
|
+
metadata: PatternMetadata
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class SecurityError(Exception):
|
|
132
|
+
"""Raised when security policy is violated"""
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
class PermissionError(Exception):
|
|
136
|
+
"""Raised when access is denied"""
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class EncryptionManager:
|
|
140
|
+
"""Manages encryption/decryption for SENSITIVE patterns.
|
|
141
|
+
|
|
142
|
+
Uses AES-256-GCM (Galois/Counter Mode) for authenticated encryption.
|
|
143
|
+
Keys are derived from a master key using HKDF.
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
def __init__(self, master_key: bytes | None = None):
|
|
147
|
+
"""Initialize encryption manager.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
master_key: 32-byte master key (or None to generate/load)
|
|
151
|
+
|
|
152
|
+
"""
|
|
153
|
+
if not HAS_ENCRYPTION:
|
|
154
|
+
logger.warning("Encryption not available - install cryptography library")
|
|
155
|
+
self.enabled = False
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
self.enabled = True
|
|
159
|
+
self.master_key = master_key or self._load_or_generate_key()
|
|
160
|
+
|
|
161
|
+
def _load_or_generate_key(self) -> bytes:
|
|
162
|
+
"""Load master key from environment or generate new one.
|
|
163
|
+
|
|
164
|
+
Production: Set EMPATHY_MASTER_KEY environment variable
|
|
165
|
+
Development: Generates ephemeral key (warning logged)
|
|
166
|
+
"""
|
|
167
|
+
# Check environment variable first
|
|
168
|
+
if env_key := os.getenv("EMPATHY_MASTER_KEY"):
|
|
169
|
+
try:
|
|
170
|
+
return base64.b64decode(env_key)
|
|
171
|
+
except Exception as e:
|
|
172
|
+
logger.error("invalid_master_key_in_env", error=str(e))
|
|
173
|
+
raise ValueError("Invalid EMPATHY_MASTER_KEY format") from e
|
|
174
|
+
|
|
175
|
+
# Check key file
|
|
176
|
+
key_file = Path.home() / ".empathy" / "master.key"
|
|
177
|
+
if key_file.exists():
|
|
178
|
+
try:
|
|
179
|
+
return key_file.read_bytes()
|
|
180
|
+
except Exception as e:
|
|
181
|
+
logger.error("failed_to_load_key_file", error=str(e))
|
|
182
|
+
|
|
183
|
+
# Generate ephemeral key (NOT for production)
|
|
184
|
+
logger.warning(
|
|
185
|
+
"no_master_key_found",
|
|
186
|
+
message="Generating ephemeral encryption key - set EMPATHY_MASTER_KEY for production",
|
|
187
|
+
)
|
|
188
|
+
return AESGCM.generate_key(bit_length=256)
|
|
189
|
+
|
|
190
|
+
def encrypt(self, plaintext: str) -> str:
|
|
191
|
+
"""Encrypt plaintext using AES-256-GCM.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
plaintext: Content to encrypt
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Base64-encoded ciphertext with format: nonce||ciphertext||tag
|
|
198
|
+
|
|
199
|
+
Raises:
|
|
200
|
+
SecurityError: If encryption fails
|
|
201
|
+
|
|
202
|
+
"""
|
|
203
|
+
if not self.enabled:
|
|
204
|
+
raise SecurityError("Encryption not available - install cryptography library")
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
# Generate random 96-bit nonce (12 bytes)
|
|
208
|
+
nonce = os.urandom(12)
|
|
209
|
+
|
|
210
|
+
# Create AESGCM cipher
|
|
211
|
+
aesgcm = AESGCM(self.master_key)
|
|
212
|
+
|
|
213
|
+
# Encrypt and authenticate
|
|
214
|
+
ciphertext = aesgcm.encrypt(nonce, plaintext.encode("utf-8"), None)
|
|
215
|
+
|
|
216
|
+
# Combine nonce + ciphertext for storage
|
|
217
|
+
encrypted_data = nonce + ciphertext
|
|
218
|
+
|
|
219
|
+
# Return base64-encoded
|
|
220
|
+
return base64.b64encode(encrypted_data).decode("utf-8")
|
|
221
|
+
|
|
222
|
+
except Exception as e:
|
|
223
|
+
logger.error("encryption_failed", error=str(e))
|
|
224
|
+
raise SecurityError(f"Encryption failed: {e}") from e
|
|
225
|
+
|
|
226
|
+
def decrypt(self, ciphertext_b64: str) -> str:
|
|
227
|
+
"""Decrypt ciphertext using AES-256-GCM.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
ciphertext_b64: Base64-encoded encrypted data
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
Decrypted plaintext
|
|
234
|
+
|
|
235
|
+
Raises:
|
|
236
|
+
SecurityError: If decryption fails (invalid key, corrupted data, etc.)
|
|
237
|
+
|
|
238
|
+
"""
|
|
239
|
+
if not self.enabled:
|
|
240
|
+
raise SecurityError("Encryption not available - install cryptography library")
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
# Decode from base64
|
|
244
|
+
encrypted_data = base64.b64decode(ciphertext_b64)
|
|
245
|
+
|
|
246
|
+
# Extract nonce (first 12 bytes) and ciphertext (rest)
|
|
247
|
+
nonce = encrypted_data[:12]
|
|
248
|
+
ciphertext = encrypted_data[12:]
|
|
249
|
+
|
|
250
|
+
# Create AESGCM cipher
|
|
251
|
+
aesgcm = AESGCM(self.master_key)
|
|
252
|
+
|
|
253
|
+
# Decrypt and verify
|
|
254
|
+
plaintext_bytes = aesgcm.decrypt(nonce, ciphertext, None)
|
|
255
|
+
|
|
256
|
+
return plaintext_bytes.decode("utf-8")
|
|
257
|
+
|
|
258
|
+
except Exception as e:
|
|
259
|
+
logger.error("decryption_failed", error=str(e))
|
|
260
|
+
raise SecurityError(f"Decryption failed: {e}") from e
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
class MemDocsStorage:
|
|
264
|
+
"""Mock/Simple MemDocs storage backend.
|
|
265
|
+
|
|
266
|
+
In production, this would integrate with the actual MemDocs library.
|
|
267
|
+
For now, provides a simple file-based storage for testing.
|
|
268
|
+
"""
|
|
269
|
+
|
|
270
|
+
def __init__(self, storage_dir: str = "./memdocs_storage"):
|
|
271
|
+
"""Initialize storage backend.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
storage_dir: Directory for pattern storage
|
|
275
|
+
|
|
276
|
+
"""
|
|
277
|
+
self.storage_dir = Path(storage_dir)
|
|
278
|
+
self.storage_dir.mkdir(parents=True, exist_ok=True)
|
|
279
|
+
logger.info("memdocs_storage_initialized", storage_dir=str(self.storage_dir))
|
|
280
|
+
|
|
281
|
+
def store(self, pattern_id: str, content: str, metadata: dict[str, Any]) -> bool:
|
|
282
|
+
"""Store a pattern.
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
pattern_id: Unique pattern identifier
|
|
286
|
+
content: Pattern content (may be encrypted)
|
|
287
|
+
metadata: Pattern metadata
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
True if successful
|
|
291
|
+
|
|
292
|
+
Raises:
|
|
293
|
+
IOError: If storage fails
|
|
294
|
+
|
|
295
|
+
"""
|
|
296
|
+
try:
|
|
297
|
+
pattern_file = self.storage_dir / f"{pattern_id}.json"
|
|
298
|
+
|
|
299
|
+
# Ensure parent directory exists
|
|
300
|
+
pattern_file.parent.mkdir(parents=True, exist_ok=True)
|
|
301
|
+
|
|
302
|
+
pattern_data = {"pattern_id": pattern_id, "content": content, "metadata": metadata}
|
|
303
|
+
|
|
304
|
+
with open(pattern_file, "w", encoding="utf-8") as f:
|
|
305
|
+
json.dump(pattern_data, f, indent=2)
|
|
306
|
+
|
|
307
|
+
logger.debug("pattern_stored", pattern_id=pattern_id)
|
|
308
|
+
return True
|
|
309
|
+
|
|
310
|
+
except Exception as e:
|
|
311
|
+
logger.error("pattern_storage_failed", pattern_id=pattern_id, error=str(e))
|
|
312
|
+
raise
|
|
313
|
+
|
|
314
|
+
def retrieve(self, pattern_id: str) -> dict[str, Any] | None:
|
|
315
|
+
"""Retrieve a pattern.
|
|
316
|
+
|
|
317
|
+
Args:
|
|
318
|
+
pattern_id: Unique pattern identifier
|
|
319
|
+
|
|
320
|
+
Returns:
|
|
321
|
+
Pattern data dictionary or None if not found
|
|
322
|
+
|
|
323
|
+
"""
|
|
324
|
+
try:
|
|
325
|
+
pattern_file = self.storage_dir / f"{pattern_id}.json"
|
|
326
|
+
|
|
327
|
+
if not pattern_file.exists():
|
|
328
|
+
logger.warning("pattern_not_found", pattern_id=pattern_id)
|
|
329
|
+
return None
|
|
330
|
+
|
|
331
|
+
with open(pattern_file, encoding="utf-8") as f:
|
|
332
|
+
pattern_data: dict[str, Any] = json.load(f)
|
|
333
|
+
|
|
334
|
+
logger.debug("pattern_retrieved", pattern_id=pattern_id)
|
|
335
|
+
return pattern_data
|
|
336
|
+
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.error("pattern_retrieval_failed", pattern_id=pattern_id, error=str(e))
|
|
339
|
+
return None
|
|
340
|
+
|
|
341
|
+
def delete(self, pattern_id: str) -> bool:
|
|
342
|
+
"""Delete a pattern.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
pattern_id: Unique pattern identifier
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
True if deleted, False if not found
|
|
349
|
+
|
|
350
|
+
"""
|
|
351
|
+
try:
|
|
352
|
+
pattern_file = self.storage_dir / f"{pattern_id}.json"
|
|
353
|
+
|
|
354
|
+
if not pattern_file.exists():
|
|
355
|
+
return False
|
|
356
|
+
|
|
357
|
+
pattern_file.unlink()
|
|
358
|
+
logger.info("pattern_deleted", pattern_id=pattern_id)
|
|
359
|
+
return True
|
|
360
|
+
|
|
361
|
+
except Exception as e:
|
|
362
|
+
logger.error("pattern_deletion_failed", pattern_id=pattern_id, error=str(e))
|
|
363
|
+
return False
|
|
364
|
+
|
|
365
|
+
def list_patterns(
|
|
366
|
+
self,
|
|
367
|
+
classification: str | None = None,
|
|
368
|
+
created_by: str | None = None,
|
|
369
|
+
) -> list[str]:
|
|
370
|
+
"""List pattern IDs matching criteria.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
classification: Filter by classification
|
|
374
|
+
created_by: Filter by creator
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
List of pattern IDs
|
|
378
|
+
|
|
379
|
+
"""
|
|
380
|
+
pattern_ids = []
|
|
381
|
+
|
|
382
|
+
for pattern_file in self.storage_dir.glob("*.json"):
|
|
383
|
+
try:
|
|
384
|
+
with open(pattern_file, encoding="utf-8") as f:
|
|
385
|
+
data = json.load(f)
|
|
386
|
+
metadata = data.get("metadata", {})
|
|
387
|
+
|
|
388
|
+
# Apply filters
|
|
389
|
+
if classification and metadata.get("classification") != classification:
|
|
390
|
+
continue
|
|
391
|
+
if created_by and metadata.get("created_by") != created_by:
|
|
392
|
+
continue
|
|
393
|
+
|
|
394
|
+
pattern_ids.append(data.get("pattern_id"))
|
|
395
|
+
|
|
396
|
+
except Exception:
|
|
397
|
+
continue
|
|
398
|
+
|
|
399
|
+
return pattern_ids
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
class SecureMemDocsIntegration:
|
|
403
|
+
"""Secure integration between Claude Memory and MemDocs.
|
|
404
|
+
|
|
405
|
+
Enforces enterprise security policies from CLAUDE.md with:
|
|
406
|
+
- Automatic PII scrubbing
|
|
407
|
+
- Secrets detection and blocking
|
|
408
|
+
- Three-tier classification
|
|
409
|
+
- Encryption for SENSITIVE data
|
|
410
|
+
- Comprehensive audit logging
|
|
411
|
+
- Access control enforcement
|
|
412
|
+
|
|
413
|
+
Example:
|
|
414
|
+
>>> from attune_llm.claude_memory import ClaudeMemoryConfig
|
|
415
|
+
>>> config = ClaudeMemoryConfig(enabled=True, load_enterprise=True)
|
|
416
|
+
>>> integration = SecureMemDocsIntegration(config)
|
|
417
|
+
>>>
|
|
418
|
+
>>> # Store pattern with full security pipeline
|
|
419
|
+
>>> result = integration.store_pattern(
|
|
420
|
+
... content="Patient diagnosis: diabetes type 2",
|
|
421
|
+
... pattern_type="clinical_protocol",
|
|
422
|
+
... user_id="doctor@hospital.com"
|
|
423
|
+
... )
|
|
424
|
+
>>> # Automatically: PII scrubbed, classified as SENSITIVE, encrypted
|
|
425
|
+
>>>
|
|
426
|
+
>>> # Retrieve with access control
|
|
427
|
+
>>> pattern = integration.retrieve_pattern(
|
|
428
|
+
... pattern_id=result["pattern_id"],
|
|
429
|
+
... user_id="doctor@hospital.com"
|
|
430
|
+
... )
|
|
431
|
+
|
|
432
|
+
"""
|
|
433
|
+
|
|
434
|
+
def __init__(
|
|
435
|
+
self,
|
|
436
|
+
claude_memory_config=None,
|
|
437
|
+
storage_dir: str = "./memdocs_storage",
|
|
438
|
+
audit_log_dir: str = "/var/log/empathy",
|
|
439
|
+
classification_rules: dict[Classification, ClassificationRules] | None = None,
|
|
440
|
+
enable_encryption: bool = True,
|
|
441
|
+
master_key: bytes | None = None,
|
|
442
|
+
):
|
|
443
|
+
"""Initialize Secure MemDocs Integration.
|
|
444
|
+
|
|
445
|
+
Args:
|
|
446
|
+
claude_memory_config: Configuration for Claude memory integration
|
|
447
|
+
storage_dir: Directory for MemDocs storage
|
|
448
|
+
audit_log_dir: Directory for audit logs
|
|
449
|
+
classification_rules: Custom classification rules (uses defaults if None)
|
|
450
|
+
enable_encryption: Enable encryption for SENSITIVE patterns
|
|
451
|
+
master_key: Encryption master key (auto-generated if None)
|
|
452
|
+
|
|
453
|
+
"""
|
|
454
|
+
self.claude_memory_config = claude_memory_config
|
|
455
|
+
self.classification_rules = classification_rules or DEFAULT_CLASSIFICATION_RULES
|
|
456
|
+
|
|
457
|
+
# Initialize security components
|
|
458
|
+
self.pii_scrubber = PIIScrubber()
|
|
459
|
+
self.secrets_detector = SecretsDetector()
|
|
460
|
+
self.audit_logger = AuditLogger(
|
|
461
|
+
log_dir=audit_log_dir,
|
|
462
|
+
enable_console_logging=True, # Development mode
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
# Initialize encryption
|
|
466
|
+
self.encryption_enabled = enable_encryption and HAS_ENCRYPTION
|
|
467
|
+
self.encryption_manager: EncryptionManager | None = None
|
|
468
|
+
if self.encryption_enabled:
|
|
469
|
+
self.encryption_manager = EncryptionManager(master_key)
|
|
470
|
+
elif enable_encryption:
|
|
471
|
+
logger.warning("encryption_disabled", reason="cryptography library not available")
|
|
472
|
+
|
|
473
|
+
# Initialize storage backend
|
|
474
|
+
self.storage = MemDocsStorage(storage_dir)
|
|
475
|
+
|
|
476
|
+
# Load security policies from enterprise CLAUDE.md
|
|
477
|
+
self.security_policies = self._load_security_policies()
|
|
478
|
+
|
|
479
|
+
logger.info(
|
|
480
|
+
"secure_memdocs_initialized",
|
|
481
|
+
encryption_enabled=self.encryption_enabled,
|
|
482
|
+
storage_dir=storage_dir,
|
|
483
|
+
audit_dir=audit_log_dir,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
def _load_security_policies(self) -> dict[str, Any]:
|
|
487
|
+
"""Load security policies from enterprise Claude memory.
|
|
488
|
+
|
|
489
|
+
In production, this would parse the enterprise CLAUDE.md file
|
|
490
|
+
to extract PII patterns, secret patterns, and classification rules.
|
|
491
|
+
|
|
492
|
+
For now, returns default policies that match the architecture spec.
|
|
493
|
+
"""
|
|
494
|
+
policies = {
|
|
495
|
+
"pii_scrubbing_enabled": True,
|
|
496
|
+
"secrets_detection_enabled": True,
|
|
497
|
+
"classification_required": True,
|
|
498
|
+
"audit_logging_enabled": True,
|
|
499
|
+
"retention_enforcement_enabled": True,
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
logger.debug("security_policies_loaded", policies=policies)
|
|
503
|
+
return policies
|
|
504
|
+
|
|
505
|
+
def store_pattern(
|
|
506
|
+
self,
|
|
507
|
+
content: str,
|
|
508
|
+
pattern_type: str,
|
|
509
|
+
user_id: str,
|
|
510
|
+
auto_classify: bool = True,
|
|
511
|
+
explicit_classification: Classification | None = None,
|
|
512
|
+
session_id: str = "",
|
|
513
|
+
custom_metadata: dict[str, Any] | None = None,
|
|
514
|
+
) -> dict[str, Any]:
|
|
515
|
+
"""Store a pattern with full security pipeline.
|
|
516
|
+
|
|
517
|
+
Pipeline:
|
|
518
|
+
1. PII scrubbing
|
|
519
|
+
2. Secrets detection (blocks if found)
|
|
520
|
+
3. Classification (auto or explicit)
|
|
521
|
+
4. Encryption (if SENSITIVE)
|
|
522
|
+
5. MemDocs storage
|
|
523
|
+
6. Audit logging
|
|
524
|
+
|
|
525
|
+
Args:
|
|
526
|
+
content: Pattern content to store
|
|
527
|
+
pattern_type: Type of pattern (code, architecture, clinical, etc.)
|
|
528
|
+
user_id: User storing the pattern
|
|
529
|
+
auto_classify: Enable automatic classification
|
|
530
|
+
explicit_classification: Override auto-classification
|
|
531
|
+
session_id: Session identifier for audit
|
|
532
|
+
custom_metadata: Additional metadata
|
|
533
|
+
|
|
534
|
+
Returns:
|
|
535
|
+
Dictionary with:
|
|
536
|
+
- pattern_id: Unique identifier
|
|
537
|
+
- classification: Applied classification
|
|
538
|
+
- sanitization_report: PII and secrets detection results
|
|
539
|
+
|
|
540
|
+
Raises:
|
|
541
|
+
SecurityError: If secrets detected or security policy violated
|
|
542
|
+
ValueError: If invalid classification specified
|
|
543
|
+
|
|
544
|
+
Example:
|
|
545
|
+
>>> result = integration.store_pattern(
|
|
546
|
+
... content="Patient vital signs protocol",
|
|
547
|
+
... pattern_type="clinical_protocol",
|
|
548
|
+
... user_id="nurse@hospital.com"
|
|
549
|
+
... )
|
|
550
|
+
>>> print(f"Stored as {result['classification']}")
|
|
551
|
+
|
|
552
|
+
"""
|
|
553
|
+
logger.info(
|
|
554
|
+
"store_pattern_started",
|
|
555
|
+
user_id=user_id,
|
|
556
|
+
pattern_type=pattern_type,
|
|
557
|
+
auto_classify=auto_classify,
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
try:
|
|
561
|
+
# Validate content
|
|
562
|
+
if not content or not content.strip():
|
|
563
|
+
raise ValueError("Content cannot be empty")
|
|
564
|
+
|
|
565
|
+
# Step 1 & 2: PII Scrubbing + Secrets Detection (PARALLEL for performance)
|
|
566
|
+
# Run both operations in parallel since they're independent
|
|
567
|
+
# Secrets detection runs on original content to catch secrets before PII scrubbing
|
|
568
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
|
|
569
|
+
# Submit both tasks in parallel
|
|
570
|
+
pii_future = executor.submit(self.pii_scrubber.scrub, content)
|
|
571
|
+
secrets_future = executor.submit(self.secrets_detector.detect, content)
|
|
572
|
+
|
|
573
|
+
# Wait for both to complete
|
|
574
|
+
sanitized_content, pii_detections = pii_future.result()
|
|
575
|
+
secrets_found = secrets_future.result()
|
|
576
|
+
|
|
577
|
+
pii_count = len(pii_detections)
|
|
578
|
+
|
|
579
|
+
if pii_count > 0:
|
|
580
|
+
logger.info(
|
|
581
|
+
"pii_scrubbed",
|
|
582
|
+
user_id=user_id,
|
|
583
|
+
pii_count=pii_count,
|
|
584
|
+
types=[d.pii_type for d in pii_detections],
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
if secrets_found:
|
|
588
|
+
# CRITICAL: Block storage if secrets detected
|
|
589
|
+
secret_types = [s.secret_type.value for s in secrets_found]
|
|
590
|
+
logger.error(
|
|
591
|
+
"secrets_detected_blocking_storage",
|
|
592
|
+
user_id=user_id,
|
|
593
|
+
secret_count=len(secrets_found),
|
|
594
|
+
types=secret_types,
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
# Log to audit trail
|
|
598
|
+
self.audit_logger.log_security_violation(
|
|
599
|
+
user_id=user_id,
|
|
600
|
+
violation_type="secrets_in_storage_attempt",
|
|
601
|
+
severity="CRITICAL",
|
|
602
|
+
details={
|
|
603
|
+
"secret_count": len(secrets_found),
|
|
604
|
+
"secret_types": secret_types,
|
|
605
|
+
"pattern_type": pattern_type,
|
|
606
|
+
},
|
|
607
|
+
session_id=session_id,
|
|
608
|
+
blocked=True,
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
raise SecurityError(
|
|
612
|
+
f"Secrets detected in pattern. Cannot store. Found: {secret_types}",
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
# Step 3: Classification
|
|
616
|
+
if explicit_classification:
|
|
617
|
+
classification = explicit_classification
|
|
618
|
+
logger.info("explicit_classification", classification=classification.value)
|
|
619
|
+
elif auto_classify:
|
|
620
|
+
classification = self._classify_pattern(sanitized_content, pattern_type)
|
|
621
|
+
logger.info("auto_classification", classification=classification.value)
|
|
622
|
+
else:
|
|
623
|
+
# Default to INTERNAL if not specified
|
|
624
|
+
classification = Classification.INTERNAL
|
|
625
|
+
logger.info("default_classification", classification=classification.value)
|
|
626
|
+
|
|
627
|
+
# Step 4: Apply classification-specific controls
|
|
628
|
+
rules = self.classification_rules[classification]
|
|
629
|
+
|
|
630
|
+
# Encrypt if required
|
|
631
|
+
final_content = sanitized_content
|
|
632
|
+
encrypted = False
|
|
633
|
+
|
|
634
|
+
if rules.encryption_required and self.encryption_enabled and self.encryption_manager:
|
|
635
|
+
final_content = self.encryption_manager.encrypt(sanitized_content)
|
|
636
|
+
encrypted = True
|
|
637
|
+
logger.info("pattern_encrypted", classification=classification.value)
|
|
638
|
+
elif rules.encryption_required and not self.encryption_enabled:
|
|
639
|
+
logger.warning(
|
|
640
|
+
"encryption_required_but_unavailable",
|
|
641
|
+
classification=classification.value,
|
|
642
|
+
action="storing_unencrypted",
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
# Generate pattern ID
|
|
646
|
+
pattern_id = self._generate_pattern_id(user_id, pattern_type)
|
|
647
|
+
|
|
648
|
+
# Step 5: Store in MemDocs with metadata
|
|
649
|
+
metadata = PatternMetadata(
|
|
650
|
+
pattern_id=pattern_id,
|
|
651
|
+
created_by=user_id,
|
|
652
|
+
created_at=datetime.utcnow().isoformat() + "Z",
|
|
653
|
+
classification=classification.value,
|
|
654
|
+
retention_days=rules.retention_days,
|
|
655
|
+
encrypted=encrypted,
|
|
656
|
+
pattern_type=pattern_type,
|
|
657
|
+
sanitization_applied=True,
|
|
658
|
+
pii_removed=pii_count,
|
|
659
|
+
secrets_detected=0,
|
|
660
|
+
access_control={
|
|
661
|
+
"access_level": rules.access_level,
|
|
662
|
+
"audit_required": rules.audit_all_access,
|
|
663
|
+
},
|
|
664
|
+
custom_metadata=custom_metadata or {},
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
self.storage.store(
|
|
668
|
+
pattern_id=pattern_id,
|
|
669
|
+
content=final_content,
|
|
670
|
+
metadata=metadata.__dict__,
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
# Step 6: Audit logging
|
|
674
|
+
self.audit_logger.log_pattern_store(
|
|
675
|
+
user_id=user_id,
|
|
676
|
+
pattern_id=pattern_id,
|
|
677
|
+
pattern_type=pattern_type,
|
|
678
|
+
classification=classification.value,
|
|
679
|
+
pii_scrubbed=pii_count,
|
|
680
|
+
secrets_detected=0,
|
|
681
|
+
retention_days=rules.retention_days,
|
|
682
|
+
encrypted=encrypted,
|
|
683
|
+
session_id=session_id,
|
|
684
|
+
status="success",
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
logger.info(
|
|
688
|
+
"pattern_stored_successfully",
|
|
689
|
+
pattern_id=pattern_id,
|
|
690
|
+
classification=classification.value,
|
|
691
|
+
encrypted=encrypted,
|
|
692
|
+
)
|
|
693
|
+
|
|
694
|
+
return {
|
|
695
|
+
"pattern_id": pattern_id,
|
|
696
|
+
"classification": classification.value,
|
|
697
|
+
"sanitization_report": {
|
|
698
|
+
"pii_removed": [{"type": d.pii_type, "count": 1} for d in pii_detections],
|
|
699
|
+
"pii_count": pii_count,
|
|
700
|
+
"secrets_detected": 0,
|
|
701
|
+
},
|
|
702
|
+
"metadata": {
|
|
703
|
+
"encrypted": encrypted,
|
|
704
|
+
"retention_days": rules.retention_days,
|
|
705
|
+
"created_at": metadata.created_at,
|
|
706
|
+
},
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
except SecurityError:
|
|
710
|
+
# Re-raise security errors
|
|
711
|
+
raise
|
|
712
|
+
except Exception as e:
|
|
713
|
+
# Log unexpected errors
|
|
714
|
+
logger.error("pattern_storage_failed", user_id=user_id, error=str(e))
|
|
715
|
+
|
|
716
|
+
self.audit_logger.log_pattern_store(
|
|
717
|
+
user_id=user_id,
|
|
718
|
+
pattern_id="",
|
|
719
|
+
pattern_type=pattern_type,
|
|
720
|
+
classification="UNKNOWN",
|
|
721
|
+
pii_scrubbed=0,
|
|
722
|
+
secrets_detected=0,
|
|
723
|
+
retention_days=0,
|
|
724
|
+
encrypted=False,
|
|
725
|
+
session_id=session_id,
|
|
726
|
+
status="failed",
|
|
727
|
+
error=str(e),
|
|
728
|
+
)
|
|
729
|
+
|
|
730
|
+
raise
|
|
731
|
+
|
|
732
|
+
def retrieve_pattern(
|
|
733
|
+
self,
|
|
734
|
+
pattern_id: str,
|
|
735
|
+
user_id: str,
|
|
736
|
+
check_permissions: bool = True,
|
|
737
|
+
session_id: str = "",
|
|
738
|
+
) -> dict[str, Any]:
|
|
739
|
+
"""Retrieve a pattern with access control and decryption.
|
|
740
|
+
|
|
741
|
+
Pipeline:
|
|
742
|
+
1. Retrieve from MemDocs
|
|
743
|
+
2. Check access permissions
|
|
744
|
+
3. Decrypt (if SENSITIVE)
|
|
745
|
+
4. Check retention policy
|
|
746
|
+
5. Audit logging
|
|
747
|
+
|
|
748
|
+
Args:
|
|
749
|
+
pattern_id: Unique pattern identifier
|
|
750
|
+
user_id: User retrieving the pattern
|
|
751
|
+
check_permissions: Enforce access control
|
|
752
|
+
session_id: Session identifier for audit
|
|
753
|
+
|
|
754
|
+
Returns:
|
|
755
|
+
Dictionary with:
|
|
756
|
+
- content: Pattern content (decrypted if needed)
|
|
757
|
+
- metadata: Pattern metadata
|
|
758
|
+
|
|
759
|
+
Raises:
|
|
760
|
+
PermissionError: If access denied
|
|
761
|
+
ValueError: If pattern not found or retention expired
|
|
762
|
+
SecurityError: If decryption fails
|
|
763
|
+
|
|
764
|
+
Example:
|
|
765
|
+
>>> pattern = integration.retrieve_pattern(
|
|
766
|
+
... pattern_id="pat_abc123",
|
|
767
|
+
... user_id="user@company.com"
|
|
768
|
+
... )
|
|
769
|
+
>>> print(pattern["content"])
|
|
770
|
+
|
|
771
|
+
"""
|
|
772
|
+
logger.info(
|
|
773
|
+
"retrieve_pattern_started",
|
|
774
|
+
pattern_id=pattern_id,
|
|
775
|
+
user_id=user_id,
|
|
776
|
+
check_permissions=check_permissions,
|
|
777
|
+
)
|
|
778
|
+
|
|
779
|
+
try:
|
|
780
|
+
# Step 1: Retrieve from MemDocs
|
|
781
|
+
pattern_data = self.storage.retrieve(pattern_id)
|
|
782
|
+
|
|
783
|
+
if not pattern_data:
|
|
784
|
+
logger.warning("pattern_not_found", pattern_id=pattern_id)
|
|
785
|
+
raise ValueError(f"Pattern {pattern_id} not found")
|
|
786
|
+
|
|
787
|
+
content = pattern_data["content"]
|
|
788
|
+
metadata = pattern_data["metadata"]
|
|
789
|
+
classification = Classification[metadata["classification"]]
|
|
790
|
+
|
|
791
|
+
# Step 2: Check access permissions
|
|
792
|
+
access_granted = True
|
|
793
|
+
if check_permissions:
|
|
794
|
+
access_granted = self._check_access(
|
|
795
|
+
user_id=user_id,
|
|
796
|
+
classification=classification,
|
|
797
|
+
metadata=metadata,
|
|
798
|
+
)
|
|
799
|
+
|
|
800
|
+
if not access_granted:
|
|
801
|
+
logger.warning(
|
|
802
|
+
"access_denied",
|
|
803
|
+
pattern_id=pattern_id,
|
|
804
|
+
user_id=user_id,
|
|
805
|
+
classification=classification.value,
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
# Log access denial
|
|
809
|
+
self.audit_logger.log_pattern_retrieve(
|
|
810
|
+
user_id=user_id,
|
|
811
|
+
pattern_id=pattern_id,
|
|
812
|
+
classification=classification.value,
|
|
813
|
+
access_granted=False,
|
|
814
|
+
session_id=session_id,
|
|
815
|
+
status="blocked",
|
|
816
|
+
error="Access denied",
|
|
817
|
+
)
|
|
818
|
+
|
|
819
|
+
raise PermissionError(
|
|
820
|
+
f"User {user_id} does not have access to {classification.value} pattern",
|
|
821
|
+
)
|
|
822
|
+
|
|
823
|
+
# Step 3: Decrypt if needed
|
|
824
|
+
if metadata.get("encrypted", False):
|
|
825
|
+
if not self.encryption_enabled or not self.encryption_manager:
|
|
826
|
+
logger.error("decryption_required_but_unavailable", pattern_id=pattern_id)
|
|
827
|
+
raise SecurityError("Encryption not available for decryption")
|
|
828
|
+
|
|
829
|
+
content = self.encryption_manager.decrypt(content)
|
|
830
|
+
logger.debug("pattern_decrypted", pattern_id=pattern_id)
|
|
831
|
+
|
|
832
|
+
# Step 4: Check retention policy
|
|
833
|
+
created_at = datetime.fromisoformat(metadata["created_at"].rstrip("Z"))
|
|
834
|
+
retention_days = metadata["retention_days"]
|
|
835
|
+
expiration_date = created_at + timedelta(days=retention_days)
|
|
836
|
+
|
|
837
|
+
if datetime.utcnow() > expiration_date:
|
|
838
|
+
logger.warning(
|
|
839
|
+
"pattern_retention_expired",
|
|
840
|
+
pattern_id=pattern_id,
|
|
841
|
+
created_at=metadata["created_at"],
|
|
842
|
+
retention_days=retention_days,
|
|
843
|
+
)
|
|
844
|
+
raise ValueError(
|
|
845
|
+
f"Pattern {pattern_id} has expired retention period "
|
|
846
|
+
f"(created: {metadata['created_at']}, retention: {retention_days} days)",
|
|
847
|
+
)
|
|
848
|
+
|
|
849
|
+
# Step 5: Audit logging
|
|
850
|
+
self.audit_logger.log_pattern_retrieve(
|
|
851
|
+
user_id=user_id,
|
|
852
|
+
pattern_id=pattern_id,
|
|
853
|
+
classification=classification.value,
|
|
854
|
+
access_granted=True,
|
|
855
|
+
permission_level=metadata["access_control"]["access_level"],
|
|
856
|
+
session_id=session_id,
|
|
857
|
+
status="success",
|
|
858
|
+
)
|
|
859
|
+
|
|
860
|
+
logger.info(
|
|
861
|
+
"pattern_retrieved_successfully",
|
|
862
|
+
pattern_id=pattern_id,
|
|
863
|
+
classification=classification.value,
|
|
864
|
+
)
|
|
865
|
+
|
|
866
|
+
return {"content": content, "metadata": metadata}
|
|
867
|
+
|
|
868
|
+
except (PermissionError, ValueError, SecurityError):
|
|
869
|
+
# Re-raise expected errors
|
|
870
|
+
raise
|
|
871
|
+
except Exception as e:
|
|
872
|
+
# Log unexpected errors
|
|
873
|
+
logger.error("pattern_retrieval_failed", pattern_id=pattern_id, error=str(e))
|
|
874
|
+
|
|
875
|
+
self.audit_logger.log_pattern_retrieve(
|
|
876
|
+
user_id=user_id,
|
|
877
|
+
pattern_id=pattern_id,
|
|
878
|
+
classification="UNKNOWN",
|
|
879
|
+
access_granted=False,
|
|
880
|
+
session_id=session_id,
|
|
881
|
+
status="failed",
|
|
882
|
+
error=str(e),
|
|
883
|
+
)
|
|
884
|
+
|
|
885
|
+
raise
|
|
886
|
+
|
|
887
|
+
def _classify_pattern(self, content: str, pattern_type: str) -> Classification:
|
|
888
|
+
"""Auto-classify pattern based on content and type.
|
|
889
|
+
|
|
890
|
+
Classification heuristics:
|
|
891
|
+
- SENSITIVE: Healthcare, financial, regulated data keywords
|
|
892
|
+
- INTERNAL: Proprietary, confidential, internal keywords
|
|
893
|
+
- PUBLIC: Everything else (general patterns)
|
|
894
|
+
|
|
895
|
+
Args:
|
|
896
|
+
content: Pattern content (already PII-scrubbed)
|
|
897
|
+
pattern_type: Type of pattern
|
|
898
|
+
|
|
899
|
+
Returns:
|
|
900
|
+
Classification level
|
|
901
|
+
|
|
902
|
+
"""
|
|
903
|
+
content_lower = content.lower()
|
|
904
|
+
|
|
905
|
+
# SENSITIVE: Healthcare keywords (HIPAA)
|
|
906
|
+
healthcare_keywords = [
|
|
907
|
+
"patient",
|
|
908
|
+
"medical",
|
|
909
|
+
"diagnosis",
|
|
910
|
+
"treatment",
|
|
911
|
+
"healthcare",
|
|
912
|
+
"clinical",
|
|
913
|
+
"hipaa",
|
|
914
|
+
"phi",
|
|
915
|
+
"medical record",
|
|
916
|
+
"prescription",
|
|
917
|
+
]
|
|
918
|
+
|
|
919
|
+
# SENSITIVE: Financial keywords
|
|
920
|
+
financial_keywords = [
|
|
921
|
+
"financial",
|
|
922
|
+
"payment",
|
|
923
|
+
"credit card",
|
|
924
|
+
"banking",
|
|
925
|
+
"transaction",
|
|
926
|
+
"pci dss",
|
|
927
|
+
"payment card",
|
|
928
|
+
]
|
|
929
|
+
|
|
930
|
+
# INTERNAL: Proprietary keywords
|
|
931
|
+
proprietary_keywords = [
|
|
932
|
+
"proprietary",
|
|
933
|
+
"confidential",
|
|
934
|
+
"internal",
|
|
935
|
+
"trade secret",
|
|
936
|
+
"company confidential",
|
|
937
|
+
"restricted",
|
|
938
|
+
]
|
|
939
|
+
|
|
940
|
+
# Check for SENSITIVE indicators
|
|
941
|
+
if any(keyword in content_lower for keyword in healthcare_keywords):
|
|
942
|
+
return Classification.SENSITIVE
|
|
943
|
+
|
|
944
|
+
if any(keyword in content_lower for keyword in financial_keywords):
|
|
945
|
+
return Classification.SENSITIVE
|
|
946
|
+
|
|
947
|
+
# Pattern type based classification
|
|
948
|
+
if pattern_type in [
|
|
949
|
+
"clinical_protocol",
|
|
950
|
+
"medical_guideline",
|
|
951
|
+
"patient_workflow",
|
|
952
|
+
"financial_procedure",
|
|
953
|
+
]:
|
|
954
|
+
return Classification.SENSITIVE
|
|
955
|
+
|
|
956
|
+
# Check for INTERNAL indicators
|
|
957
|
+
if any(keyword in content_lower for keyword in proprietary_keywords):
|
|
958
|
+
return Classification.INTERNAL
|
|
959
|
+
|
|
960
|
+
if pattern_type in ["architecture", "business_logic", "company_process"]:
|
|
961
|
+
return Classification.INTERNAL
|
|
962
|
+
|
|
963
|
+
# Default to PUBLIC for general patterns
|
|
964
|
+
return Classification.PUBLIC
|
|
965
|
+
|
|
966
|
+
def _check_access(
|
|
967
|
+
self,
|
|
968
|
+
user_id: str,
|
|
969
|
+
classification: Classification,
|
|
970
|
+
metadata: dict[str, Any],
|
|
971
|
+
) -> bool:
|
|
972
|
+
"""Check if user has access to pattern based on classification.
|
|
973
|
+
|
|
974
|
+
Access rules:
|
|
975
|
+
- PUBLIC: All users
|
|
976
|
+
- INTERNAL: Users on project team (simplified: always granted for demo)
|
|
977
|
+
- SENSITIVE: Explicit permission required (simplified: creator only)
|
|
978
|
+
|
|
979
|
+
Args:
|
|
980
|
+
user_id: User requesting access
|
|
981
|
+
classification: Pattern classification
|
|
982
|
+
metadata: Pattern metadata
|
|
983
|
+
|
|
984
|
+
Returns:
|
|
985
|
+
True if access granted, False otherwise
|
|
986
|
+
|
|
987
|
+
"""
|
|
988
|
+
# PUBLIC: Everyone has access
|
|
989
|
+
if classification == Classification.PUBLIC:
|
|
990
|
+
return True
|
|
991
|
+
|
|
992
|
+
# INTERNAL: Check project team membership
|
|
993
|
+
# Simplified: Grant access (production would check team membership)
|
|
994
|
+
if classification == Classification.INTERNAL:
|
|
995
|
+
logger.debug("internal_access_check", user_id=user_id, granted=True)
|
|
996
|
+
return True
|
|
997
|
+
|
|
998
|
+
# SENSITIVE: Require explicit permission
|
|
999
|
+
# Simplified: Only pattern creator has access
|
|
1000
|
+
if classification == Classification.SENSITIVE:
|
|
1001
|
+
created_by = str(metadata.get("created_by", ""))
|
|
1002
|
+
granted = user_id == created_by
|
|
1003
|
+
|
|
1004
|
+
logger.debug(
|
|
1005
|
+
"sensitive_access_check",
|
|
1006
|
+
user_id=user_id,
|
|
1007
|
+
created_by=created_by,
|
|
1008
|
+
granted=granted,
|
|
1009
|
+
)
|
|
1010
|
+
|
|
1011
|
+
return bool(granted)
|
|
1012
|
+
|
|
1013
|
+
# Default deny
|
|
1014
|
+
return False
|
|
1015
|
+
|
|
1016
|
+
def _generate_pattern_id(self, user_id: str, pattern_type: str) -> str:
|
|
1017
|
+
"""Generate unique pattern ID.
|
|
1018
|
+
|
|
1019
|
+
Format: pat_{timestamp}_{hash}
|
|
1020
|
+
|
|
1021
|
+
Args:
|
|
1022
|
+
user_id: User creating the pattern
|
|
1023
|
+
pattern_type: Type of pattern
|
|
1024
|
+
|
|
1025
|
+
Returns:
|
|
1026
|
+
Unique pattern identifier
|
|
1027
|
+
|
|
1028
|
+
"""
|
|
1029
|
+
timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S")
|
|
1030
|
+
|
|
1031
|
+
# Create hash from user_id, pattern_type, and random component
|
|
1032
|
+
hash_input = f"{user_id}:{pattern_type}:{timestamp}:{os.urandom(8).hex()}"
|
|
1033
|
+
hash_digest = hashlib.sha256(hash_input.encode()).hexdigest()[:12]
|
|
1034
|
+
|
|
1035
|
+
return f"pat_{timestamp}_{hash_digest}"
|
|
1036
|
+
|
|
1037
|
+
def list_patterns(
|
|
1038
|
+
self,
|
|
1039
|
+
user_id: str,
|
|
1040
|
+
classification: Classification | None = None,
|
|
1041
|
+
pattern_type: str | None = None,
|
|
1042
|
+
) -> list[dict[str, Any]]:
|
|
1043
|
+
"""List patterns accessible to user.
|
|
1044
|
+
|
|
1045
|
+
Args:
|
|
1046
|
+
user_id: User listing patterns
|
|
1047
|
+
classification: Filter by classification
|
|
1048
|
+
pattern_type: Filter by pattern type
|
|
1049
|
+
|
|
1050
|
+
Returns:
|
|
1051
|
+
List of pattern summaries
|
|
1052
|
+
|
|
1053
|
+
"""
|
|
1054
|
+
all_pattern_ids = self.storage.list_patterns()
|
|
1055
|
+
accessible_patterns = []
|
|
1056
|
+
|
|
1057
|
+
for pattern_id in all_pattern_ids:
|
|
1058
|
+
try:
|
|
1059
|
+
pattern_data = self.storage.retrieve(pattern_id)
|
|
1060
|
+
if not pattern_data:
|
|
1061
|
+
continue
|
|
1062
|
+
|
|
1063
|
+
metadata = pattern_data["metadata"]
|
|
1064
|
+
pat_classification = Classification[metadata["classification"]]
|
|
1065
|
+
|
|
1066
|
+
# Apply filters
|
|
1067
|
+
if classification and pat_classification != classification:
|
|
1068
|
+
continue
|
|
1069
|
+
|
|
1070
|
+
if pattern_type and metadata.get("pattern_type") != pattern_type:
|
|
1071
|
+
continue
|
|
1072
|
+
|
|
1073
|
+
# Check access
|
|
1074
|
+
if self._check_access(user_id, pat_classification, metadata):
|
|
1075
|
+
accessible_patterns.append(
|
|
1076
|
+
{
|
|
1077
|
+
"pattern_id": pattern_id,
|
|
1078
|
+
"pattern_type": metadata.get("pattern_type"),
|
|
1079
|
+
"classification": metadata["classification"],
|
|
1080
|
+
"created_by": metadata.get("created_by"),
|
|
1081
|
+
"created_at": metadata.get("created_at"),
|
|
1082
|
+
"encrypted": metadata.get("encrypted", False),
|
|
1083
|
+
},
|
|
1084
|
+
)
|
|
1085
|
+
|
|
1086
|
+
except Exception as e:
|
|
1087
|
+
logger.warning(
|
|
1088
|
+
"failed_to_load_pattern_metadata",
|
|
1089
|
+
pattern_id=pattern_id,
|
|
1090
|
+
error=str(e),
|
|
1091
|
+
)
|
|
1092
|
+
continue
|
|
1093
|
+
|
|
1094
|
+
return accessible_patterns
|
|
1095
|
+
|
|
1096
|
+
def delete_pattern(self, pattern_id: str, user_id: str, session_id: str = "") -> bool:
|
|
1097
|
+
"""Delete a pattern (with access control).
|
|
1098
|
+
|
|
1099
|
+
Args:
|
|
1100
|
+
pattern_id: Pattern to delete
|
|
1101
|
+
user_id: User requesting deletion
|
|
1102
|
+
session_id: Session identifier
|
|
1103
|
+
|
|
1104
|
+
Returns:
|
|
1105
|
+
True if deleted successfully
|
|
1106
|
+
|
|
1107
|
+
Raises:
|
|
1108
|
+
PermissionError: If user doesn't have permission to delete
|
|
1109
|
+
|
|
1110
|
+
"""
|
|
1111
|
+
# Retrieve pattern to check permissions
|
|
1112
|
+
pattern_data = self.storage.retrieve(pattern_id)
|
|
1113
|
+
|
|
1114
|
+
if not pattern_data:
|
|
1115
|
+
logger.warning("pattern_not_found_for_deletion", pattern_id=pattern_id)
|
|
1116
|
+
return False
|
|
1117
|
+
|
|
1118
|
+
metadata = pattern_data["metadata"]
|
|
1119
|
+
|
|
1120
|
+
# Only creator can delete (simplified access control)
|
|
1121
|
+
if metadata.get("created_by") != user_id:
|
|
1122
|
+
logger.warning(
|
|
1123
|
+
"delete_permission_denied",
|
|
1124
|
+
pattern_id=pattern_id,
|
|
1125
|
+
user_id=user_id,
|
|
1126
|
+
created_by=metadata.get("created_by"),
|
|
1127
|
+
)
|
|
1128
|
+
raise PermissionError(f"User {user_id} cannot delete pattern {pattern_id}")
|
|
1129
|
+
|
|
1130
|
+
# Delete pattern
|
|
1131
|
+
deleted = self.storage.delete(pattern_id)
|
|
1132
|
+
|
|
1133
|
+
if deleted:
|
|
1134
|
+
# Log deletion
|
|
1135
|
+
self.audit_logger._write_event(
|
|
1136
|
+
AuditEvent(
|
|
1137
|
+
event_type="delete_pattern",
|
|
1138
|
+
user_id=user_id,
|
|
1139
|
+
session_id=session_id,
|
|
1140
|
+
status="success",
|
|
1141
|
+
data={
|
|
1142
|
+
"pattern_id": pattern_id,
|
|
1143
|
+
"classification": metadata["classification"],
|
|
1144
|
+
},
|
|
1145
|
+
),
|
|
1146
|
+
)
|
|
1147
|
+
|
|
1148
|
+
logger.info("pattern_deleted", pattern_id=pattern_id, user_id=user_id)
|
|
1149
|
+
|
|
1150
|
+
return deleted
|
|
1151
|
+
|
|
1152
|
+
def get_statistics(self) -> dict[str, Any]:
|
|
1153
|
+
"""Get statistics about stored patterns.
|
|
1154
|
+
|
|
1155
|
+
Returns:
|
|
1156
|
+
Dictionary with pattern statistics
|
|
1157
|
+
|
|
1158
|
+
"""
|
|
1159
|
+
all_patterns = self.storage.list_patterns()
|
|
1160
|
+
|
|
1161
|
+
stats: dict[str, Any] = {
|
|
1162
|
+
"total_patterns": len(all_patterns),
|
|
1163
|
+
"by_classification": {
|
|
1164
|
+
"PUBLIC": 0,
|
|
1165
|
+
"INTERNAL": 0,
|
|
1166
|
+
"SENSITIVE": 0,
|
|
1167
|
+
},
|
|
1168
|
+
"encrypted_count": 0,
|
|
1169
|
+
"with_pii_scrubbed": 0,
|
|
1170
|
+
}
|
|
1171
|
+
|
|
1172
|
+
for pattern_id in all_patterns:
|
|
1173
|
+
try:
|
|
1174
|
+
pattern_data = self.storage.retrieve(pattern_id)
|
|
1175
|
+
if not pattern_data:
|
|
1176
|
+
continue
|
|
1177
|
+
|
|
1178
|
+
metadata = pattern_data["metadata"]
|
|
1179
|
+
classification = metadata.get("classification", "INTERNAL")
|
|
1180
|
+
|
|
1181
|
+
stats["by_classification"][classification] += 1
|
|
1182
|
+
|
|
1183
|
+
if metadata.get("encrypted", False):
|
|
1184
|
+
stats["encrypted_count"] += 1
|
|
1185
|
+
|
|
1186
|
+
if metadata.get("pii_removed", 0) > 0:
|
|
1187
|
+
stats["with_pii_scrubbed"] += 1
|
|
1188
|
+
|
|
1189
|
+
except Exception:
|
|
1190
|
+
continue
|
|
1191
|
+
|
|
1192
|
+
return stats
|