attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
attune_llm/core.py
ADDED
|
@@ -0,0 +1,907 @@
|
|
|
1
|
+
"""Empathy LLM - Core Wrapper
|
|
2
|
+
|
|
3
|
+
Main class that wraps any LLM provider with Empathy Framework levels.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
6
|
+
Licensed under Fair Source 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import logging
|
|
11
|
+
import time
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
# Import from consolidated memory module
|
|
15
|
+
from attune.memory import (
|
|
16
|
+
AuditLogger,
|
|
17
|
+
ClaudeMemoryConfig,
|
|
18
|
+
ClaudeMemoryLoader,
|
|
19
|
+
PIIScrubber,
|
|
20
|
+
SecretsDetector,
|
|
21
|
+
SecurityError,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
from .levels import EmpathyLevel
|
|
25
|
+
from .providers import (
|
|
26
|
+
AnthropicProvider,
|
|
27
|
+
BaseLLMProvider,
|
|
28
|
+
GeminiProvider,
|
|
29
|
+
LocalProvider,
|
|
30
|
+
OpenAIProvider,
|
|
31
|
+
)
|
|
32
|
+
from .routing import ModelRouter
|
|
33
|
+
from .state import CollaborationState, PatternType, UserPattern
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class EmpathyLLM:
|
|
39
|
+
"""Wraps any LLM provider with Empathy Framework levels.
|
|
40
|
+
|
|
41
|
+
Automatically progresses from Level 1 (reactive) to Level 4 (anticipatory)
|
|
42
|
+
based on user collaboration state.
|
|
43
|
+
|
|
44
|
+
Security Features (Phase 3):
|
|
45
|
+
- PII Scrubbing: Automatically detect and redact PII from user inputs
|
|
46
|
+
- Secrets Detection: Block requests containing API keys, passwords, etc.
|
|
47
|
+
- Audit Logging: Comprehensive compliance logging (SOC2, HIPAA, GDPR)
|
|
48
|
+
- Backward Compatible: Security disabled by default
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
>>> llm = EmpathyLLM(provider="anthropic", target_level=4)
|
|
52
|
+
>>> response = await llm.interact(
|
|
53
|
+
... user_id="developer_123",
|
|
54
|
+
... user_input="Help me optimize my code",
|
|
55
|
+
... context={"code_snippet": "..."}
|
|
56
|
+
... )
|
|
57
|
+
>>> print(response["content"])
|
|
58
|
+
|
|
59
|
+
Example with Security:
|
|
60
|
+
>>> llm = EmpathyLLM(
|
|
61
|
+
... provider="anthropic",
|
|
62
|
+
... target_level=4,
|
|
63
|
+
... enable_security=True,
|
|
64
|
+
... security_config={
|
|
65
|
+
... "audit_log_dir": "/var/log/empathy",
|
|
66
|
+
... "block_on_secrets": True,
|
|
67
|
+
... "enable_pii_scrubbing": True
|
|
68
|
+
... }
|
|
69
|
+
... )
|
|
70
|
+
>>> response = await llm.interact(
|
|
71
|
+
... user_id="user@company.com",
|
|
72
|
+
... user_input="My email is john@example.com"
|
|
73
|
+
... )
|
|
74
|
+
>>> # PII automatically scrubbed, request logged
|
|
75
|
+
|
|
76
|
+
Example with Model Routing (Cost Optimization):
|
|
77
|
+
>>> llm = EmpathyLLM(
|
|
78
|
+
... provider="anthropic",
|
|
79
|
+
... enable_model_routing=True # Enable smart model selection
|
|
80
|
+
... )
|
|
81
|
+
>>> # Simple task -> uses Haiku (cheap)
|
|
82
|
+
>>> response = await llm.interact(
|
|
83
|
+
... user_id="dev",
|
|
84
|
+
... user_input="Summarize this function",
|
|
85
|
+
... task_type="summarize"
|
|
86
|
+
... )
|
|
87
|
+
>>> # Complex task -> uses Opus (premium)
|
|
88
|
+
>>> response = await llm.interact(
|
|
89
|
+
... user_id="dev",
|
|
90
|
+
... user_input="Design the architecture",
|
|
91
|
+
... task_type="architectural_decision"
|
|
92
|
+
... )
|
|
93
|
+
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
def __init__(
|
|
97
|
+
self,
|
|
98
|
+
provider: str = "anthropic",
|
|
99
|
+
target_level: int = 3,
|
|
100
|
+
api_key: str | None = None,
|
|
101
|
+
model: str | None = None,
|
|
102
|
+
pattern_library: dict | None = None,
|
|
103
|
+
claude_memory_config: ClaudeMemoryConfig | None = None,
|
|
104
|
+
project_root: str | None = None,
|
|
105
|
+
enable_security: bool = False,
|
|
106
|
+
security_config: dict | None = None,
|
|
107
|
+
enable_model_routing: bool = False,
|
|
108
|
+
**kwargs,
|
|
109
|
+
):
|
|
110
|
+
"""Initialize EmpathyLLM.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
provider: "anthropic", "openai", or "local"
|
|
114
|
+
target_level: Target empathy level (1-5)
|
|
115
|
+
api_key: API key for provider (if needed)
|
|
116
|
+
model: Specific model to use (overrides routing if set)
|
|
117
|
+
pattern_library: Shared pattern library (Level 5)
|
|
118
|
+
claude_memory_config: Configuration for Claude memory integration (v1.8.0+)
|
|
119
|
+
project_root: Project root directory for loading .claude/CLAUDE.md
|
|
120
|
+
enable_security: Enable Phase 2 security controls (default: False)
|
|
121
|
+
security_config: Security configuration dictionary with options:
|
|
122
|
+
- audit_log_dir: Directory for audit logs (default: "./logs")
|
|
123
|
+
- block_on_secrets: Block requests with detected secrets (default: True)
|
|
124
|
+
- enable_pii_scrubbing: Enable PII detection/scrubbing (default: True)
|
|
125
|
+
- enable_name_detection: Enable name PII detection (default: False)
|
|
126
|
+
- enable_audit_logging: Enable audit logging (default: True)
|
|
127
|
+
- enable_console_logging: Log to console for debugging (default: False)
|
|
128
|
+
enable_model_routing: Enable smart model routing for cost optimization.
|
|
129
|
+
When enabled, uses ModelRouter to select appropriate model tier:
|
|
130
|
+
- CHEAP (Haiku): summarize, classify, triage tasks
|
|
131
|
+
- CAPABLE (Sonnet): code generation, bug fixes, security review
|
|
132
|
+
- PREMIUM (Opus): coordination, synthesis, architectural decisions
|
|
133
|
+
**kwargs: Provider-specific options
|
|
134
|
+
|
|
135
|
+
"""
|
|
136
|
+
self.target_level = target_level
|
|
137
|
+
self.pattern_library = pattern_library or {}
|
|
138
|
+
self.project_root = project_root
|
|
139
|
+
self._provider_name = provider
|
|
140
|
+
self._explicit_model = model # Track if user explicitly set a model
|
|
141
|
+
|
|
142
|
+
# Initialize provider
|
|
143
|
+
self.provider = self._create_provider(provider, api_key, model, **kwargs)
|
|
144
|
+
|
|
145
|
+
# Track collaboration states for different users
|
|
146
|
+
self.states: dict[str, CollaborationState] = {}
|
|
147
|
+
|
|
148
|
+
# Initialize model routing for cost optimization
|
|
149
|
+
self.enable_model_routing = enable_model_routing
|
|
150
|
+
self.model_router: ModelRouter | None = None
|
|
151
|
+
if enable_model_routing:
|
|
152
|
+
self.model_router = ModelRouter(default_provider=provider)
|
|
153
|
+
logger.info(f"Model routing enabled for provider: {provider}")
|
|
154
|
+
|
|
155
|
+
# Initialize Claude memory integration (v1.8.0+)
|
|
156
|
+
self.claude_memory_config = claude_memory_config
|
|
157
|
+
self.claude_memory_loader = None
|
|
158
|
+
self._cached_memory = None
|
|
159
|
+
|
|
160
|
+
if claude_memory_config and claude_memory_config.enabled:
|
|
161
|
+
self.claude_memory_loader = ClaudeMemoryLoader(claude_memory_config)
|
|
162
|
+
# Load memory once at initialization
|
|
163
|
+
self._cached_memory = self.claude_memory_loader.load_all_memory(project_root)
|
|
164
|
+
logger.info(
|
|
165
|
+
f"EmpathyLLM initialized with Claude memory: "
|
|
166
|
+
f"{len(self._cached_memory)} chars loaded",
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Initialize Phase 3 security controls (v1.8.0+)
|
|
170
|
+
self.enable_security = enable_security
|
|
171
|
+
self.security_config = security_config or {}
|
|
172
|
+
self.pii_scrubber = None
|
|
173
|
+
self.secrets_detector = None
|
|
174
|
+
self.audit_logger = None
|
|
175
|
+
|
|
176
|
+
if enable_security:
|
|
177
|
+
self._initialize_security()
|
|
178
|
+
|
|
179
|
+
logger.info(
|
|
180
|
+
f"EmpathyLLM initialized: provider={provider}, target_level={target_level}, "
|
|
181
|
+
f"security={'enabled' if enable_security else 'disabled'}, "
|
|
182
|
+
f"model_routing={'enabled' if enable_model_routing else 'disabled'}",
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
def _initialize_security(self):
|
|
186
|
+
"""Initialize Phase 3 security modules based on configuration"""
|
|
187
|
+
# Extract security config options
|
|
188
|
+
enable_pii_scrubbing = self.security_config.get("enable_pii_scrubbing", True)
|
|
189
|
+
enable_name_detection = self.security_config.get("enable_name_detection", False)
|
|
190
|
+
enable_audit_logging = self.security_config.get("enable_audit_logging", True)
|
|
191
|
+
audit_log_dir = self.security_config.get("audit_log_dir", "./logs")
|
|
192
|
+
enable_console_logging = self.security_config.get("enable_console_logging", False)
|
|
193
|
+
|
|
194
|
+
# Initialize PII Scrubber
|
|
195
|
+
if enable_pii_scrubbing:
|
|
196
|
+
self.pii_scrubber = PIIScrubber(enable_name_detection=enable_name_detection)
|
|
197
|
+
logger.info("PII Scrubber initialized")
|
|
198
|
+
|
|
199
|
+
# Initialize Secrets Detector
|
|
200
|
+
self.secrets_detector = SecretsDetector(
|
|
201
|
+
enable_entropy_analysis=True,
|
|
202
|
+
entropy_threshold=4.5,
|
|
203
|
+
min_entropy_length=20,
|
|
204
|
+
)
|
|
205
|
+
logger.info("Secrets Detector initialized")
|
|
206
|
+
|
|
207
|
+
# Initialize Audit Logger
|
|
208
|
+
if enable_audit_logging:
|
|
209
|
+
self.audit_logger = AuditLogger(
|
|
210
|
+
log_dir=audit_log_dir,
|
|
211
|
+
enable_console_logging=enable_console_logging,
|
|
212
|
+
)
|
|
213
|
+
logger.info(f"Audit Logger initialized: {audit_log_dir}")
|
|
214
|
+
|
|
215
|
+
def _create_provider(
|
|
216
|
+
self,
|
|
217
|
+
provider: str,
|
|
218
|
+
api_key: str | None,
|
|
219
|
+
model: str | None,
|
|
220
|
+
**kwargs,
|
|
221
|
+
) -> BaseLLMProvider:
|
|
222
|
+
"""Create appropriate provider instance
|
|
223
|
+
|
|
224
|
+
Falls back to environment variables if api_key not provided:
|
|
225
|
+
- ANTHROPIC_API_KEY for Anthropic
|
|
226
|
+
- OPENAI_API_KEY for OpenAI
|
|
227
|
+
- GOOGLE_API_KEY or GEMINI_API_KEY for Google/Gemini
|
|
228
|
+
"""
|
|
229
|
+
import os
|
|
230
|
+
|
|
231
|
+
# Check environment variables if api_key not provided
|
|
232
|
+
if api_key is None:
|
|
233
|
+
if provider == "anthropic":
|
|
234
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
235
|
+
elif provider == "openai":
|
|
236
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
237
|
+
elif provider in ("google", "gemini"):
|
|
238
|
+
api_key = os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
|
|
239
|
+
|
|
240
|
+
if provider == "anthropic":
|
|
241
|
+
return AnthropicProvider(
|
|
242
|
+
api_key=api_key,
|
|
243
|
+
model=model or "claude-sonnet-4-5-20250929",
|
|
244
|
+
**kwargs,
|
|
245
|
+
)
|
|
246
|
+
if provider == "openai":
|
|
247
|
+
return OpenAIProvider(api_key=api_key, model=model or "gpt-4-turbo-preview", **kwargs)
|
|
248
|
+
if provider in ("google", "gemini"):
|
|
249
|
+
return GeminiProvider(api_key=api_key, model=model or "gemini-1.5-pro", **kwargs)
|
|
250
|
+
if provider == "local":
|
|
251
|
+
return LocalProvider(
|
|
252
|
+
endpoint=kwargs.get("endpoint", "http://localhost:11434"),
|
|
253
|
+
model=model or "llama2",
|
|
254
|
+
**kwargs,
|
|
255
|
+
)
|
|
256
|
+
raise ValueError(f"Unknown provider: {provider}")
|
|
257
|
+
|
|
258
|
+
def _get_or_create_state(self, user_id: str) -> CollaborationState:
|
|
259
|
+
"""Get or create collaboration state for user"""
|
|
260
|
+
if user_id not in self.states:
|
|
261
|
+
self.states[user_id] = CollaborationState(user_id=user_id)
|
|
262
|
+
return self.states[user_id]
|
|
263
|
+
|
|
264
|
+
def _determine_level(self, state: CollaborationState) -> int:
|
|
265
|
+
"""Determine which empathy level to use.
|
|
266
|
+
|
|
267
|
+
Progresses automatically based on state, up to target_level.
|
|
268
|
+
"""
|
|
269
|
+
# Start at Level 1
|
|
270
|
+
level = 1
|
|
271
|
+
|
|
272
|
+
# Progress through levels if state allows
|
|
273
|
+
for candidate_level in range(2, self.target_level + 1):
|
|
274
|
+
if state.should_progress_to_level(candidate_level):
|
|
275
|
+
level = candidate_level
|
|
276
|
+
else:
|
|
277
|
+
break
|
|
278
|
+
|
|
279
|
+
return level
|
|
280
|
+
|
|
281
|
+
def _build_system_prompt(self, level: int) -> str:
|
|
282
|
+
"""Build system prompt including Claude memory (if enabled).
|
|
283
|
+
|
|
284
|
+
Claude memory is prepended to the level-specific prompt,
|
|
285
|
+
so instructions from CLAUDE.md files affect all interactions.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
level: Empathy level (1-5)
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Complete system prompt
|
|
292
|
+
|
|
293
|
+
"""
|
|
294
|
+
level_prompt = EmpathyLevel.get_system_prompt(level)
|
|
295
|
+
|
|
296
|
+
# If Claude memory is enabled and loaded, prepend it
|
|
297
|
+
if self._cached_memory:
|
|
298
|
+
return f"""{self._cached_memory}
|
|
299
|
+
|
|
300
|
+
---
|
|
301
|
+
# Empathy Framework Instructions
|
|
302
|
+
{level_prompt}
|
|
303
|
+
|
|
304
|
+
Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
|
|
305
|
+
"""
|
|
306
|
+
return level_prompt
|
|
307
|
+
|
|
308
|
+
def reload_memory(self):
|
|
309
|
+
"""Reload Claude memory files.
|
|
310
|
+
|
|
311
|
+
Useful if CLAUDE.md files have been updated during runtime.
|
|
312
|
+
Call this to pick up changes without restarting.
|
|
313
|
+
"""
|
|
314
|
+
if self.claude_memory_loader:
|
|
315
|
+
# Clear cache before reloading to pick up file changes
|
|
316
|
+
self.claude_memory_loader.clear_cache()
|
|
317
|
+
self._cached_memory = self.claude_memory_loader.load_all_memory(self.project_root)
|
|
318
|
+
logger.info(f"Claude memory reloaded: {len(self._cached_memory)} chars")
|
|
319
|
+
else:
|
|
320
|
+
logger.warning("Claude memory not enabled, cannot reload")
|
|
321
|
+
|
|
322
|
+
async def interact(
|
|
323
|
+
self,
|
|
324
|
+
user_id: str,
|
|
325
|
+
user_input: str,
|
|
326
|
+
context: dict[str, Any] | None = None,
|
|
327
|
+
force_level: int | None = None,
|
|
328
|
+
task_type: str | None = None,
|
|
329
|
+
) -> dict[str, Any]:
|
|
330
|
+
"""Main interaction method.
|
|
331
|
+
|
|
332
|
+
Automatically selects appropriate empathy level and responds.
|
|
333
|
+
|
|
334
|
+
Phase 3 Security Pipeline (if enabled):
|
|
335
|
+
1. PII Scrubbing: Detect and redact PII from user input
|
|
336
|
+
2. Secrets Detection: Block requests containing secrets
|
|
337
|
+
3. LLM Interaction: Process sanitized input
|
|
338
|
+
4. Audit Logging: Log request details for compliance
|
|
339
|
+
|
|
340
|
+
Model Routing (if enable_model_routing=True):
|
|
341
|
+
Routes to appropriate model based on task_type:
|
|
342
|
+
- CHEAP (Haiku): summarize, classify, triage, match_pattern
|
|
343
|
+
- CAPABLE (Sonnet): generate_code, fix_bug, review_security, write_tests
|
|
344
|
+
- PREMIUM (Opus): coordinate, synthesize_results, architectural_decision
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
user_id: Unique user identifier
|
|
348
|
+
user_input: User's input/question
|
|
349
|
+
context: Optional context dictionary
|
|
350
|
+
force_level: Force specific level (for testing/demos)
|
|
351
|
+
task_type: Type of task for model routing (e.g., "summarize", "fix_bug").
|
|
352
|
+
If not provided with routing enabled, defaults to "capable" tier.
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
Dictionary with:
|
|
356
|
+
- content: LLM response
|
|
357
|
+
- level_used: Which empathy level was used
|
|
358
|
+
- proactive: Whether action was proactive
|
|
359
|
+
- metadata: Additional information (includes routed_model if routing enabled)
|
|
360
|
+
- security: Security details (if enabled)
|
|
361
|
+
|
|
362
|
+
Raises:
|
|
363
|
+
SecurityError: If secrets detected and block_on_secrets=True
|
|
364
|
+
|
|
365
|
+
"""
|
|
366
|
+
start_time = time.time()
|
|
367
|
+
state = self._get_or_create_state(user_id)
|
|
368
|
+
context = context or {}
|
|
369
|
+
|
|
370
|
+
# Model routing: determine which model to use for this request
|
|
371
|
+
routed_model: str | None = None
|
|
372
|
+
routing_metadata: dict[str, Any] = {}
|
|
373
|
+
|
|
374
|
+
if self.enable_model_routing and self.model_router and not self._explicit_model:
|
|
375
|
+
# Route based on task_type (default to "generate_code" if not specified)
|
|
376
|
+
effective_task = task_type or "generate_code"
|
|
377
|
+
routed_model = self.model_router.route(effective_task, self._provider_name)
|
|
378
|
+
tier = self.model_router.get_tier(effective_task)
|
|
379
|
+
|
|
380
|
+
routing_metadata = {
|
|
381
|
+
"model_routing_enabled": True,
|
|
382
|
+
"task_type": effective_task,
|
|
383
|
+
"routed_model": routed_model,
|
|
384
|
+
"routed_tier": tier.value,
|
|
385
|
+
}
|
|
386
|
+
logger.info(
|
|
387
|
+
f"Model routing: task={effective_task} -> model={routed_model} (tier={tier.value})",
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
# Initialize security tracking
|
|
391
|
+
pii_detections: list[dict] = []
|
|
392
|
+
secrets_detections: list[dict] = []
|
|
393
|
+
sanitized_input = user_input
|
|
394
|
+
security_metadata: dict[str, Any] = {}
|
|
395
|
+
|
|
396
|
+
# Phase 3: Security Pipeline (Step 1 - PII Scrubbing)
|
|
397
|
+
if self.enable_security and self.pii_scrubber:
|
|
398
|
+
sanitized_input, pii_detections = self.pii_scrubber.scrub(user_input)
|
|
399
|
+
security_metadata["pii_detected"] = len(pii_detections)
|
|
400
|
+
security_metadata["pii_scrubbed"] = len(pii_detections) > 0
|
|
401
|
+
if pii_detections:
|
|
402
|
+
logger.info(
|
|
403
|
+
f"PII detected for user {user_id}: {len(pii_detections)} items scrubbed",
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
# Phase 3: Security Pipeline (Step 2 - Secrets Detection)
|
|
407
|
+
if self.enable_security and self.secrets_detector:
|
|
408
|
+
secrets_detections = self.secrets_detector.detect(sanitized_input)
|
|
409
|
+
security_metadata["secrets_detected"] = len(secrets_detections)
|
|
410
|
+
|
|
411
|
+
if secrets_detections:
|
|
412
|
+
block_on_secrets = self.security_config.get("block_on_secrets", True)
|
|
413
|
+
logger.warning(
|
|
414
|
+
f"Secrets detected for user {user_id}: {len(secrets_detections)} secrets, "
|
|
415
|
+
f"blocking={block_on_secrets}",
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
# Log security violation
|
|
419
|
+
if self.audit_logger:
|
|
420
|
+
self.audit_logger.log_security_violation(
|
|
421
|
+
user_id=user_id,
|
|
422
|
+
violation_type="secrets_detected",
|
|
423
|
+
severity="HIGH",
|
|
424
|
+
details={
|
|
425
|
+
"secret_count": len(secrets_detections),
|
|
426
|
+
"secret_types": [s.secret_type.value for s in secrets_detections],
|
|
427
|
+
"event_type": "llm_request",
|
|
428
|
+
},
|
|
429
|
+
blocked=block_on_secrets,
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
if block_on_secrets:
|
|
433
|
+
raise SecurityError(
|
|
434
|
+
f"Request blocked: {len(secrets_detections)} secret(s) detected in input. "
|
|
435
|
+
f"Please remove sensitive credentials before submitting.",
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# Determine level to use
|
|
439
|
+
level = force_level if force_level is not None else self._determine_level(state)
|
|
440
|
+
|
|
441
|
+
logger.info(f"User {user_id}: Level {level} interaction")
|
|
442
|
+
|
|
443
|
+
# Record user input (sanitized version if security enabled)
|
|
444
|
+
state.add_interaction("user", sanitized_input, level)
|
|
445
|
+
|
|
446
|
+
# Phase 3: Security Pipeline (Step 3 - LLM Interaction with sanitized input)
|
|
447
|
+
# Route to appropriate level handler using sanitized input
|
|
448
|
+
# Pass routed_model for cost-optimized model selection
|
|
449
|
+
if level == 1:
|
|
450
|
+
result = await self._level_1_reactive(sanitized_input, state, context, routed_model)
|
|
451
|
+
elif level == 2:
|
|
452
|
+
result = await self._level_2_guided(sanitized_input, state, context, routed_model)
|
|
453
|
+
elif level == 3:
|
|
454
|
+
result = await self._level_3_proactive(sanitized_input, state, context, routed_model)
|
|
455
|
+
elif level == 4:
|
|
456
|
+
result = await self._level_4_anticipatory(sanitized_input, state, context, routed_model)
|
|
457
|
+
elif level == 5:
|
|
458
|
+
result = await self._level_5_systems(sanitized_input, state, context, routed_model)
|
|
459
|
+
else:
|
|
460
|
+
raise ValueError(f"Invalid level: {level}")
|
|
461
|
+
|
|
462
|
+
# Record assistant response
|
|
463
|
+
state.add_interaction("assistant", result["content"], level, result.get("metadata"))
|
|
464
|
+
|
|
465
|
+
# Add level info to result
|
|
466
|
+
result["level_used"] = level
|
|
467
|
+
result["level_description"] = EmpathyLevel.get_description(level)
|
|
468
|
+
|
|
469
|
+
# Add security metadata to result
|
|
470
|
+
if self.enable_security:
|
|
471
|
+
result["security"] = security_metadata
|
|
472
|
+
|
|
473
|
+
# Add model routing metadata to result
|
|
474
|
+
if routing_metadata:
|
|
475
|
+
result["metadata"].update(routing_metadata)
|
|
476
|
+
|
|
477
|
+
# Phase 3: Security Pipeline (Step 4 - Audit Logging)
|
|
478
|
+
if self.enable_security and self.audit_logger:
|
|
479
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
480
|
+
|
|
481
|
+
# Calculate approximate sizes
|
|
482
|
+
request_size_bytes = len(user_input.encode("utf-8"))
|
|
483
|
+
response_size_bytes = len(result["content"].encode("utf-8"))
|
|
484
|
+
|
|
485
|
+
# Extract memory sources if Claude Memory is enabled
|
|
486
|
+
memory_sources = []
|
|
487
|
+
if self._cached_memory:
|
|
488
|
+
memory_sources = ["claude_memory"]
|
|
489
|
+
|
|
490
|
+
self.audit_logger.log_llm_request(
|
|
491
|
+
user_id=user_id,
|
|
492
|
+
empathy_level=level,
|
|
493
|
+
provider=self.provider.__class__.__name__.replace("Provider", "").lower(),
|
|
494
|
+
model=result.get("metadata", {}).get("model", "unknown"),
|
|
495
|
+
memory_sources=memory_sources,
|
|
496
|
+
pii_count=len(pii_detections),
|
|
497
|
+
secrets_count=len(secrets_detections),
|
|
498
|
+
request_size_bytes=request_size_bytes,
|
|
499
|
+
response_size_bytes=response_size_bytes,
|
|
500
|
+
duration_ms=duration_ms,
|
|
501
|
+
sanitization_applied=len(pii_detections) > 0,
|
|
502
|
+
classification_verified=True,
|
|
503
|
+
status="success",
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
return result
|
|
507
|
+
|
|
508
|
+
async def _level_1_reactive(
|
|
509
|
+
self,
|
|
510
|
+
user_input: str,
|
|
511
|
+
state: CollaborationState,
|
|
512
|
+
context: dict[str, Any],
|
|
513
|
+
model_override: str | None = None,
|
|
514
|
+
) -> dict[str, Any]:
|
|
515
|
+
"""Level 1: Reactive - Simple Q&A
|
|
516
|
+
|
|
517
|
+
No memory, no patterns, just respond to question.
|
|
518
|
+
"""
|
|
519
|
+
generate_kwargs: dict[str, Any] = {
|
|
520
|
+
"messages": [{"role": "user", "content": user_input}],
|
|
521
|
+
"system_prompt": self._build_system_prompt(1),
|
|
522
|
+
"temperature": EmpathyLevel.get_temperature_recommendation(1),
|
|
523
|
+
"max_tokens": EmpathyLevel.get_max_tokens_recommendation(1),
|
|
524
|
+
}
|
|
525
|
+
if model_override:
|
|
526
|
+
generate_kwargs["model"] = model_override
|
|
527
|
+
|
|
528
|
+
response = await self.provider.generate(**generate_kwargs)
|
|
529
|
+
|
|
530
|
+
return {
|
|
531
|
+
"content": response.content,
|
|
532
|
+
"proactive": False,
|
|
533
|
+
"metadata": {"tokens_used": response.tokens_used, "model": response.model},
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
async def _level_2_guided(
|
|
537
|
+
self,
|
|
538
|
+
user_input: str,
|
|
539
|
+
state: CollaborationState,
|
|
540
|
+
context: dict[str, Any],
|
|
541
|
+
model_override: str | None = None,
|
|
542
|
+
) -> dict[str, Any]:
|
|
543
|
+
"""Level 2: Guided - Ask clarifying questions
|
|
544
|
+
|
|
545
|
+
Uses conversation history for context.
|
|
546
|
+
"""
|
|
547
|
+
# Include conversation history
|
|
548
|
+
messages = state.get_conversation_history(max_turns=5)
|
|
549
|
+
messages.append({"role": "user", "content": user_input})
|
|
550
|
+
|
|
551
|
+
generate_kwargs: dict[str, Any] = {
|
|
552
|
+
"messages": messages,
|
|
553
|
+
"system_prompt": self._build_system_prompt(2),
|
|
554
|
+
"temperature": EmpathyLevel.get_temperature_recommendation(2),
|
|
555
|
+
"max_tokens": EmpathyLevel.get_max_tokens_recommendation(2),
|
|
556
|
+
}
|
|
557
|
+
if model_override:
|
|
558
|
+
generate_kwargs["model"] = model_override
|
|
559
|
+
|
|
560
|
+
response = await self.provider.generate(**generate_kwargs)
|
|
561
|
+
|
|
562
|
+
return {
|
|
563
|
+
"content": response.content,
|
|
564
|
+
"proactive": False,
|
|
565
|
+
"metadata": {
|
|
566
|
+
"tokens_used": response.tokens_used,
|
|
567
|
+
"model": response.model,
|
|
568
|
+
"history_turns": len(messages) - 1,
|
|
569
|
+
},
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
async def _level_3_proactive(
|
|
573
|
+
self,
|
|
574
|
+
user_input: str,
|
|
575
|
+
state: CollaborationState,
|
|
576
|
+
context: dict[str, Any],
|
|
577
|
+
model_override: str | None = None,
|
|
578
|
+
) -> dict[str, Any]:
|
|
579
|
+
"""Level 3: Proactive - Act on detected patterns
|
|
580
|
+
|
|
581
|
+
Checks for matching patterns and acts proactively.
|
|
582
|
+
"""
|
|
583
|
+
# Check for matching pattern
|
|
584
|
+
matching_pattern = state.find_matching_pattern(user_input)
|
|
585
|
+
|
|
586
|
+
if matching_pattern:
|
|
587
|
+
# Proactive action based on pattern
|
|
588
|
+
prompt = f"""
|
|
589
|
+
User said: "{user_input}"
|
|
590
|
+
|
|
591
|
+
Pattern detected: When you {matching_pattern.trigger}, you typically {matching_pattern.action}.
|
|
592
|
+
|
|
593
|
+
Confidence: {matching_pattern.confidence:.0%}. Proactively {matching_pattern.action}.
|
|
594
|
+
|
|
595
|
+
[Provide the expected result/action]
|
|
596
|
+
|
|
597
|
+
Was this helpful? If not, I can adjust my pattern detection.
|
|
598
|
+
"""
|
|
599
|
+
|
|
600
|
+
messages = [{"role": "user", "content": prompt}]
|
|
601
|
+
proactive = True
|
|
602
|
+
pattern_info = {
|
|
603
|
+
"pattern_type": matching_pattern.pattern_type.value,
|
|
604
|
+
"trigger": matching_pattern.trigger,
|
|
605
|
+
"confidence": matching_pattern.confidence,
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
else:
|
|
609
|
+
# Standard response + pattern detection
|
|
610
|
+
messages = state.get_conversation_history(max_turns=10)
|
|
611
|
+
messages.append({"role": "user", "content": user_input})
|
|
612
|
+
proactive = False
|
|
613
|
+
pattern_info = None
|
|
614
|
+
|
|
615
|
+
# Run pattern detection in background (non-blocking)
|
|
616
|
+
asyncio.create_task(self._detect_patterns_async(state, user_input))
|
|
617
|
+
|
|
618
|
+
generate_kwargs: dict[str, Any] = {
|
|
619
|
+
"messages": messages,
|
|
620
|
+
"system_prompt": self._build_system_prompt(3),
|
|
621
|
+
"temperature": EmpathyLevel.get_temperature_recommendation(3),
|
|
622
|
+
"max_tokens": EmpathyLevel.get_max_tokens_recommendation(3),
|
|
623
|
+
}
|
|
624
|
+
if model_override:
|
|
625
|
+
generate_kwargs["model"] = model_override
|
|
626
|
+
|
|
627
|
+
response = await self.provider.generate(**generate_kwargs)
|
|
628
|
+
|
|
629
|
+
return {
|
|
630
|
+
"content": response.content,
|
|
631
|
+
"proactive": proactive,
|
|
632
|
+
"metadata": {
|
|
633
|
+
"tokens_used": response.tokens_used,
|
|
634
|
+
"model": response.model,
|
|
635
|
+
"pattern": pattern_info,
|
|
636
|
+
},
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
async def _level_4_anticipatory(
|
|
640
|
+
self,
|
|
641
|
+
user_input: str,
|
|
642
|
+
state: CollaborationState,
|
|
643
|
+
context: dict[str, Any],
|
|
644
|
+
model_override: str | None = None,
|
|
645
|
+
) -> dict[str, Any]:
|
|
646
|
+
"""Level 4: Anticipatory - Predict future needs
|
|
647
|
+
|
|
648
|
+
Analyzes trajectory and alerts to future bottlenecks.
|
|
649
|
+
"""
|
|
650
|
+
# Build prompt with trajectory analysis context
|
|
651
|
+
trajectory_prompt = f"""
|
|
652
|
+
User request: "{user_input}"
|
|
653
|
+
|
|
654
|
+
COLLABORATION CONTEXT:
|
|
655
|
+
- Total interactions: {len(state.interactions)}
|
|
656
|
+
- Trust level: {state.trust_level:.2f}
|
|
657
|
+
- Detected patterns: {len(state.detected_patterns)}
|
|
658
|
+
- Success rate: {state.success_rate:.0%}
|
|
659
|
+
|
|
660
|
+
TASK:
|
|
661
|
+
1. Respond to immediate request
|
|
662
|
+
2. Analyze trajectory (where is this headed?)
|
|
663
|
+
3. Predict future bottlenecks (if any)
|
|
664
|
+
4. Alert with prevention steps (if needed)
|
|
665
|
+
|
|
666
|
+
Use anticipatory format:
|
|
667
|
+
- Current state analysis
|
|
668
|
+
- Trajectory prediction
|
|
669
|
+
- Alert (if bottleneck predicted)
|
|
670
|
+
- Prevention steps (actionable)
|
|
671
|
+
- Reasoning (based on experience)
|
|
672
|
+
"""
|
|
673
|
+
|
|
674
|
+
messages = state.get_conversation_history(max_turns=15)
|
|
675
|
+
messages.append({"role": "user", "content": trajectory_prompt})
|
|
676
|
+
|
|
677
|
+
generate_kwargs: dict[str, Any] = {
|
|
678
|
+
"messages": messages,
|
|
679
|
+
"system_prompt": self._build_system_prompt(4),
|
|
680
|
+
"temperature": EmpathyLevel.get_temperature_recommendation(4),
|
|
681
|
+
"max_tokens": EmpathyLevel.get_max_tokens_recommendation(4),
|
|
682
|
+
}
|
|
683
|
+
if model_override:
|
|
684
|
+
generate_kwargs["model"] = model_override
|
|
685
|
+
|
|
686
|
+
response = await self.provider.generate(**generate_kwargs)
|
|
687
|
+
|
|
688
|
+
return {
|
|
689
|
+
"content": response.content,
|
|
690
|
+
"proactive": True, # Level 4 is inherently proactive
|
|
691
|
+
"metadata": {
|
|
692
|
+
"tokens_used": response.tokens_used,
|
|
693
|
+
"model": response.model,
|
|
694
|
+
"trajectory_analyzed": True,
|
|
695
|
+
"trust_level": state.trust_level,
|
|
696
|
+
},
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
async def _level_5_systems(
|
|
700
|
+
self,
|
|
701
|
+
user_input: str,
|
|
702
|
+
state: CollaborationState,
|
|
703
|
+
context: dict[str, Any],
|
|
704
|
+
model_override: str | None = None,
|
|
705
|
+
) -> dict[str, Any]:
|
|
706
|
+
"""Level 5: Systems - Cross-domain pattern learning
|
|
707
|
+
|
|
708
|
+
Leverages shared pattern library across domains.
|
|
709
|
+
"""
|
|
710
|
+
# Include pattern library context
|
|
711
|
+
pattern_context = ""
|
|
712
|
+
if self.pattern_library:
|
|
713
|
+
pattern_context = f"\n\nSHARED PATTERN LIBRARY:\n{self.pattern_library}"
|
|
714
|
+
|
|
715
|
+
prompt = f"""
|
|
716
|
+
User request: "{user_input}"
|
|
717
|
+
|
|
718
|
+
{pattern_context}
|
|
719
|
+
|
|
720
|
+
TASK:
|
|
721
|
+
1. Respond to request
|
|
722
|
+
2. Check if relevant cross-domain patterns apply
|
|
723
|
+
3. Contribute new patterns if discovered
|
|
724
|
+
4. Show how principle generalizes across domains
|
|
725
|
+
"""
|
|
726
|
+
|
|
727
|
+
messages = state.get_conversation_history(max_turns=20)
|
|
728
|
+
messages.append({"role": "user", "content": prompt})
|
|
729
|
+
|
|
730
|
+
generate_kwargs: dict[str, Any] = {
|
|
731
|
+
"messages": messages,
|
|
732
|
+
"system_prompt": self._build_system_prompt(5),
|
|
733
|
+
"temperature": EmpathyLevel.get_temperature_recommendation(5),
|
|
734
|
+
"max_tokens": EmpathyLevel.get_max_tokens_recommendation(5),
|
|
735
|
+
}
|
|
736
|
+
if model_override:
|
|
737
|
+
generate_kwargs["model"] = model_override
|
|
738
|
+
|
|
739
|
+
response = await self.provider.generate(**generate_kwargs)
|
|
740
|
+
|
|
741
|
+
return {
|
|
742
|
+
"content": response.content,
|
|
743
|
+
"proactive": True,
|
|
744
|
+
"metadata": {
|
|
745
|
+
"tokens_used": response.tokens_used,
|
|
746
|
+
"model": response.model,
|
|
747
|
+
"pattern_library_size": len(self.pattern_library),
|
|
748
|
+
"systems_level": True,
|
|
749
|
+
},
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
async def _detect_patterns_async(
|
|
753
|
+
self,
|
|
754
|
+
state: CollaborationState,
|
|
755
|
+
current_input: str,
|
|
756
|
+
) -> None:
|
|
757
|
+
"""Detect user behavior patterns in background.
|
|
758
|
+
|
|
759
|
+
Analyzes conversation history to identify:
|
|
760
|
+
- Sequential patterns: User always does X then Y
|
|
761
|
+
- Preference patterns: User prefers certain formats/styles
|
|
762
|
+
- Temporal patterns: User does X at specific times
|
|
763
|
+
- Conditional patterns: When Z happens, user does X
|
|
764
|
+
|
|
765
|
+
This runs asynchronously to avoid blocking the main response.
|
|
766
|
+
Detected patterns enable Level 3 proactive interactions.
|
|
767
|
+
"""
|
|
768
|
+
try:
|
|
769
|
+
from datetime import datetime
|
|
770
|
+
|
|
771
|
+
interactions = state.interactions
|
|
772
|
+
if len(interactions) < 3:
|
|
773
|
+
# Need at least 3 interactions to detect patterns
|
|
774
|
+
return
|
|
775
|
+
|
|
776
|
+
# Analyze recent interactions for sequential patterns
|
|
777
|
+
recent = interactions[-10:] # Last 10 interactions
|
|
778
|
+
user_messages = [i for i in recent if i.role == "user"]
|
|
779
|
+
|
|
780
|
+
if len(user_messages) < 2:
|
|
781
|
+
return
|
|
782
|
+
|
|
783
|
+
# Pattern 1: Sequential patterns (X followed by Y)
|
|
784
|
+
for i in range(len(user_messages) - 1):
|
|
785
|
+
current = user_messages[i].content.lower()
|
|
786
|
+
next_msg = user_messages[i + 1].content.lower()
|
|
787
|
+
|
|
788
|
+
# Detect common sequential patterns
|
|
789
|
+
sequential_triggers = [
|
|
790
|
+
("review", "fix"), # Review then fix
|
|
791
|
+
("debug", "test"), # Debug then test
|
|
792
|
+
("implement", "test"), # Implement then test
|
|
793
|
+
("refactor", "review"), # Refactor then review
|
|
794
|
+
]
|
|
795
|
+
|
|
796
|
+
for trigger, action in sequential_triggers:
|
|
797
|
+
if trigger in current and action in next_msg:
|
|
798
|
+
pattern = UserPattern(
|
|
799
|
+
pattern_type=PatternType.SEQUENTIAL,
|
|
800
|
+
trigger=trigger,
|
|
801
|
+
action=f"Typically follows with {action}",
|
|
802
|
+
confidence=0.6 + (0.1 * min(i, 3)), # Increase with occurrences
|
|
803
|
+
occurrences=1,
|
|
804
|
+
last_seen=datetime.now(),
|
|
805
|
+
context={"detected_from": "sequential_analysis"},
|
|
806
|
+
)
|
|
807
|
+
state.add_pattern(pattern)
|
|
808
|
+
|
|
809
|
+
# Pattern 2: Preference patterns
|
|
810
|
+
preference_indicators = {
|
|
811
|
+
"concise": "brief, concise responses",
|
|
812
|
+
"detailed": "comprehensive, detailed responses",
|
|
813
|
+
"example": "responses with examples",
|
|
814
|
+
"step by step": "step-by-step explanations",
|
|
815
|
+
"code": "code-focused responses",
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
for indicator, preference in preference_indicators.items():
|
|
819
|
+
occurrences = sum(1 for m in user_messages if indicator in m.content.lower())
|
|
820
|
+
if occurrences >= 2:
|
|
821
|
+
pattern = UserPattern(
|
|
822
|
+
pattern_type=PatternType.PREFERENCE,
|
|
823
|
+
trigger=indicator,
|
|
824
|
+
action=f"User prefers {preference}",
|
|
825
|
+
confidence=min(0.9, 0.5 + (0.1 * occurrences)),
|
|
826
|
+
occurrences=occurrences,
|
|
827
|
+
last_seen=datetime.now(),
|
|
828
|
+
context={"preference_type": indicator},
|
|
829
|
+
)
|
|
830
|
+
state.add_pattern(pattern)
|
|
831
|
+
|
|
832
|
+
# Pattern 3: Conditional patterns (error -> debug)
|
|
833
|
+
conditional_triggers = [
|
|
834
|
+
("error", "debug", "When errors occur, user asks for debugging"),
|
|
835
|
+
("failed", "fix", "When tests fail, user asks for fixes"),
|
|
836
|
+
("slow", "optimize", "When performance issues arise, user asks for optimization"),
|
|
837
|
+
]
|
|
838
|
+
|
|
839
|
+
for condition, response_keyword, description in conditional_triggers:
|
|
840
|
+
for i, msg in enumerate(user_messages[:-1]):
|
|
841
|
+
if condition in msg.content.lower():
|
|
842
|
+
next_msg = user_messages[i + 1].content.lower()
|
|
843
|
+
if response_keyword in next_msg:
|
|
844
|
+
pattern = UserPattern(
|
|
845
|
+
pattern_type=PatternType.CONDITIONAL,
|
|
846
|
+
trigger=condition,
|
|
847
|
+
action=description,
|
|
848
|
+
confidence=0.7,
|
|
849
|
+
occurrences=1,
|
|
850
|
+
last_seen=datetime.now(),
|
|
851
|
+
context={"condition": condition, "response": response_keyword},
|
|
852
|
+
)
|
|
853
|
+
state.add_pattern(pattern)
|
|
854
|
+
|
|
855
|
+
logger.debug(
|
|
856
|
+
f"Pattern detection complete. Detected {len(state.detected_patterns)} patterns.",
|
|
857
|
+
)
|
|
858
|
+
|
|
859
|
+
except Exception as e:
|
|
860
|
+
# Pattern detection should never break the main flow
|
|
861
|
+
logger.warning(f"Pattern detection error (non-critical): {e}")
|
|
862
|
+
|
|
863
|
+
def update_trust(self, user_id: str, outcome: str, magnitude: float = 1.0):
|
|
864
|
+
"""Update trust level based on interaction outcome.
|
|
865
|
+
|
|
866
|
+
Args:
|
|
867
|
+
user_id: User identifier
|
|
868
|
+
outcome: "success" or "failure"
|
|
869
|
+
magnitude: How much to adjust (0.0 to 1.0)
|
|
870
|
+
|
|
871
|
+
"""
|
|
872
|
+
state = self._get_or_create_state(user_id)
|
|
873
|
+
state.update_trust(outcome, magnitude)
|
|
874
|
+
|
|
875
|
+
logger.info(f"Trust updated for {user_id}: {outcome} -> {state.trust_level:.2f}")
|
|
876
|
+
|
|
877
|
+
def add_pattern(self, user_id: str, pattern: UserPattern):
|
|
878
|
+
"""Manually add a detected pattern.
|
|
879
|
+
|
|
880
|
+
Args:
|
|
881
|
+
user_id: User identifier
|
|
882
|
+
pattern: UserPattern instance
|
|
883
|
+
|
|
884
|
+
"""
|
|
885
|
+
state = self._get_or_create_state(user_id)
|
|
886
|
+
state.add_pattern(pattern)
|
|
887
|
+
|
|
888
|
+
logger.info(f"Pattern added for {user_id}: {pattern.pattern_type.value}")
|
|
889
|
+
|
|
890
|
+
def get_statistics(self, user_id: str) -> dict[str, Any]:
|
|
891
|
+
"""Get collaboration statistics for user.
|
|
892
|
+
|
|
893
|
+
Args:
|
|
894
|
+
user_id: User identifier
|
|
895
|
+
|
|
896
|
+
Returns:
|
|
897
|
+
Dictionary with stats
|
|
898
|
+
|
|
899
|
+
"""
|
|
900
|
+
state = self._get_or_create_state(user_id)
|
|
901
|
+
return state.get_statistics()
|
|
902
|
+
|
|
903
|
+
def reset_state(self, user_id: str):
|
|
904
|
+
"""Reset collaboration state for user"""
|
|
905
|
+
if user_id in self.states:
|
|
906
|
+
del self.states[user_id]
|
|
907
|
+
logger.info(f"State reset for {user_id}")
|