attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
attune/core.py
ADDED
|
@@ -0,0 +1,1511 @@
|
|
|
1
|
+
"""EmpathyOS - Core Implementation
|
|
2
|
+
|
|
3
|
+
The main entry point for the Empathy Framework, providing access to all
|
|
4
|
+
5 empathy levels and system thinking integrations.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
7
|
+
Licensed under Fair Source 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from typing import TYPE_CHECKING, Any
|
|
16
|
+
|
|
17
|
+
from .emergence import EmergenceDetector
|
|
18
|
+
from .exceptions import ValidationError
|
|
19
|
+
from .feedback_loops import FeedbackLoopDetector
|
|
20
|
+
from .leverage_points import LeveragePoint, LeveragePointAnalyzer
|
|
21
|
+
from .memory import Classification, UnifiedMemory
|
|
22
|
+
from .redis_memory import AccessTier, AgentCredentials, RedisShortTermMemory, StagedPattern
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from .pattern_library import PatternLibrary
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class CollaborationState:
|
|
30
|
+
"""Stock & Flow model of AI-human collaboration
|
|
31
|
+
|
|
32
|
+
Tracks:
|
|
33
|
+
- Trust level (stock that accumulates/erodes)
|
|
34
|
+
- Shared context (accumulated understanding)
|
|
35
|
+
- Success/failure rates (quality metrics)
|
|
36
|
+
- Flow rates (how fast trust builds/erodes)
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
# Stocks (accumulate over time)
|
|
40
|
+
trust_level: float = 0.5 # 0.0 to 1.0, start neutral
|
|
41
|
+
shared_context: dict = field(default_factory=dict)
|
|
42
|
+
successful_interventions: int = 0
|
|
43
|
+
failed_interventions: int = 0
|
|
44
|
+
|
|
45
|
+
# Flow rates (change stocks per interaction)
|
|
46
|
+
trust_building_rate: float = 0.05 # Per successful interaction
|
|
47
|
+
trust_erosion_rate: float = 0.10 # Per failed interaction (erosion faster)
|
|
48
|
+
context_accumulation_rate: float = 0.1
|
|
49
|
+
|
|
50
|
+
# Metadata
|
|
51
|
+
session_start: datetime = field(default_factory=datetime.now)
|
|
52
|
+
total_interactions: int = 0
|
|
53
|
+
trust_trajectory: list[float] = field(default_factory=list) # Historical trust levels
|
|
54
|
+
|
|
55
|
+
def update_trust(self, outcome: str):
|
|
56
|
+
"""Update trust stock based on interaction outcome"""
|
|
57
|
+
if outcome == "success":
|
|
58
|
+
self.trust_level += self.trust_building_rate
|
|
59
|
+
self.successful_interventions += 1
|
|
60
|
+
elif outcome == "failure":
|
|
61
|
+
self.trust_level -= self.trust_erosion_rate
|
|
62
|
+
self.failed_interventions += 1
|
|
63
|
+
|
|
64
|
+
# Clamp to [0, 1]
|
|
65
|
+
self.trust_level = max(0.0, min(1.0, self.trust_level))
|
|
66
|
+
self.total_interactions += 1
|
|
67
|
+
|
|
68
|
+
# Track trajectory
|
|
69
|
+
self.trust_trajectory.append(self.trust_level)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class EmpathyOS:
|
|
73
|
+
"""Empathy Operating System for AI-Human Collaboration.
|
|
74
|
+
|
|
75
|
+
Integrates:
|
|
76
|
+
- 5-level Empathy Maturity Model
|
|
77
|
+
- Systems Thinking (feedback loops, emergence, leverage points)
|
|
78
|
+
- Tactical Empathy (Voss)
|
|
79
|
+
- Emotional Intelligence (Goleman)
|
|
80
|
+
- Clear Thinking (Naval)
|
|
81
|
+
|
|
82
|
+
Goal: Enable AI to operate at Levels 3-4 (Proactive/Anticipatory)
|
|
83
|
+
|
|
84
|
+
Example:
|
|
85
|
+
Basic usage with empathy levels::
|
|
86
|
+
|
|
87
|
+
from attune import EmpathyOS
|
|
88
|
+
|
|
89
|
+
# Create instance targeting Level 4 (Anticipatory)
|
|
90
|
+
empathy = EmpathyOS(user_id="developer_123", target_level=4)
|
|
91
|
+
|
|
92
|
+
# Level 1 - Reactive response
|
|
93
|
+
response = empathy.level_1_reactive(
|
|
94
|
+
user_input="How do I optimize database queries?",
|
|
95
|
+
context={"domain": "software"}
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Level 2 - Guided with follow-up questions
|
|
99
|
+
response = empathy.level_2_guided(
|
|
100
|
+
user_input="I need help with my code",
|
|
101
|
+
context={"task": "debugging"},
|
|
102
|
+
history=[]
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
Memory operations::
|
|
106
|
+
|
|
107
|
+
# Stash working data (short-term)
|
|
108
|
+
empathy.stash("current_task", {"status": "debugging"})
|
|
109
|
+
|
|
110
|
+
# Retrieve later
|
|
111
|
+
task = empathy.retrieve("current_task")
|
|
112
|
+
|
|
113
|
+
# Persist patterns (long-term)
|
|
114
|
+
result = empathy.persist_pattern(
|
|
115
|
+
content="Query optimization technique",
|
|
116
|
+
pattern_type="technique"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Recall patterns
|
|
120
|
+
pattern = empathy.recall_pattern(result["pattern_id"])
|
|
121
|
+
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
def __init__(
|
|
125
|
+
self,
|
|
126
|
+
user_id: str,
|
|
127
|
+
target_level: int = 3,
|
|
128
|
+
confidence_threshold: float = 0.75,
|
|
129
|
+
logger: logging.Logger | None = None,
|
|
130
|
+
shared_library: PatternLibrary | None = None,
|
|
131
|
+
short_term_memory: RedisShortTermMemory | None = None,
|
|
132
|
+
access_tier: AccessTier = AccessTier.CONTRIBUTOR,
|
|
133
|
+
):
|
|
134
|
+
"""Initialize EmpathyOS
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
user_id: Unique identifier for user/team
|
|
138
|
+
target_level: Target empathy level (1-5), default 3 (Proactive)
|
|
139
|
+
confidence_threshold: Minimum confidence for anticipatory actions (0.0-1.0)
|
|
140
|
+
logger: Optional logger instance for structured logging
|
|
141
|
+
shared_library: Optional shared PatternLibrary for multi-agent collaboration.
|
|
142
|
+
When provided, enables agents to share discovered patterns,
|
|
143
|
+
supporting Level 5 (Systems Empathy) distributed memory networks.
|
|
144
|
+
short_term_memory: Optional RedisShortTermMemory for fast, TTL-based working
|
|
145
|
+
memory. Enables real-time multi-agent coordination, pattern
|
|
146
|
+
staging, and conflict resolution.
|
|
147
|
+
access_tier: Access tier for this agent (Observer, Contributor, Validator, Steward).
|
|
148
|
+
Determines what operations the agent can perform on shared memory.
|
|
149
|
+
|
|
150
|
+
"""
|
|
151
|
+
self.user_id = user_id
|
|
152
|
+
self.target_level = target_level
|
|
153
|
+
self.confidence_threshold = confidence_threshold
|
|
154
|
+
self.logger = logger or logging.getLogger(__name__)
|
|
155
|
+
self.shared_library = shared_library
|
|
156
|
+
|
|
157
|
+
# Short-term memory for multi-agent coordination
|
|
158
|
+
self.short_term_memory = short_term_memory
|
|
159
|
+
self.credentials = AgentCredentials(agent_id=user_id, tier=access_tier)
|
|
160
|
+
|
|
161
|
+
# Collaboration state tracking
|
|
162
|
+
self.collaboration_state = CollaborationState()
|
|
163
|
+
|
|
164
|
+
# System thinking components
|
|
165
|
+
self.feedback_detector = FeedbackLoopDetector()
|
|
166
|
+
self.emergence_detector = EmergenceDetector()
|
|
167
|
+
self.leverage_analyzer = LeveragePointAnalyzer()
|
|
168
|
+
|
|
169
|
+
# Pattern storage for Level 3+
|
|
170
|
+
self.user_patterns: list[dict] = []
|
|
171
|
+
self.system_trajectory: list[dict] = []
|
|
172
|
+
|
|
173
|
+
# Current empathy level
|
|
174
|
+
self.current_empathy_level = 1
|
|
175
|
+
|
|
176
|
+
# Session ID for tracking (generated on first use)
|
|
177
|
+
self._session_id: str | None = None
|
|
178
|
+
|
|
179
|
+
# Unified memory (lazily initialized)
|
|
180
|
+
self._unified_memory: UnifiedMemory | None = None
|
|
181
|
+
|
|
182
|
+
@property
|
|
183
|
+
def memory(self) -> UnifiedMemory:
|
|
184
|
+
"""Unified memory interface for both short-term and long-term storage.
|
|
185
|
+
|
|
186
|
+
Lazily initializes on first access with environment auto-detection.
|
|
187
|
+
|
|
188
|
+
Usage:
|
|
189
|
+
empathy = EmpathyOS(user_id="agent_1")
|
|
190
|
+
|
|
191
|
+
# Store working data (short-term)
|
|
192
|
+
empathy.memory.stash("analysis", {"results": [...]})
|
|
193
|
+
|
|
194
|
+
# Persist pattern (long-term)
|
|
195
|
+
result = empathy.memory.persist_pattern(
|
|
196
|
+
content="Algorithm for X",
|
|
197
|
+
pattern_type="algorithm",
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# Retrieve pattern
|
|
201
|
+
pattern = empathy.memory.recall_pattern(result["pattern_id"])
|
|
202
|
+
"""
|
|
203
|
+
if self._unified_memory is None:
|
|
204
|
+
self._unified_memory = UnifiedMemory(
|
|
205
|
+
user_id=self.user_id,
|
|
206
|
+
access_tier=self.credentials.tier,
|
|
207
|
+
)
|
|
208
|
+
return self._unified_memory
|
|
209
|
+
|
|
210
|
+
# =========================================================================
|
|
211
|
+
# UNIFIED MEMORY CONVENIENCE METHODS
|
|
212
|
+
# =========================================================================
|
|
213
|
+
|
|
214
|
+
def persist_pattern(
|
|
215
|
+
self,
|
|
216
|
+
content: str,
|
|
217
|
+
pattern_type: str,
|
|
218
|
+
classification: Classification | str | None = None,
|
|
219
|
+
auto_classify: bool = True,
|
|
220
|
+
) -> dict | None:
|
|
221
|
+
"""Store a pattern in long-term memory with security controls.
|
|
222
|
+
|
|
223
|
+
This is a convenience method that delegates to memory.persist_pattern().
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
content: Pattern content
|
|
227
|
+
pattern_type: Type (algorithm, protocol, config, etc.)
|
|
228
|
+
classification: Security classification (or auto-detect)
|
|
229
|
+
auto_classify: Auto-detect classification from content
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Storage result with pattern_id and classification
|
|
233
|
+
|
|
234
|
+
Example:
|
|
235
|
+
>>> empathy = EmpathyOS(user_id="dev@company.com")
|
|
236
|
+
>>> result = empathy.persist_pattern(
|
|
237
|
+
... content="Our proprietary algorithm for...",
|
|
238
|
+
... pattern_type="algorithm",
|
|
239
|
+
... )
|
|
240
|
+
>>> print(result["classification"]) # "INTERNAL"
|
|
241
|
+
|
|
242
|
+
"""
|
|
243
|
+
return self.memory.persist_pattern(
|
|
244
|
+
content=content,
|
|
245
|
+
pattern_type=pattern_type,
|
|
246
|
+
classification=classification,
|
|
247
|
+
auto_classify=auto_classify,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
def recall_pattern(self, pattern_id: str) -> dict | None:
|
|
251
|
+
"""Retrieve a pattern from long-term memory.
|
|
252
|
+
|
|
253
|
+
This is a convenience method that delegates to memory.recall_pattern().
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
pattern_id: ID of pattern to retrieve
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
Pattern data with content and metadata
|
|
260
|
+
|
|
261
|
+
Example:
|
|
262
|
+
>>> pattern = empathy.recall_pattern("pat_123")
|
|
263
|
+
>>> print(pattern["content"])
|
|
264
|
+
|
|
265
|
+
"""
|
|
266
|
+
return self.memory.recall_pattern(pattern_id)
|
|
267
|
+
|
|
268
|
+
def stash(self, key: str, value: Any, ttl_seconds: int = 3600) -> bool:
|
|
269
|
+
"""Store data in short-term memory with TTL.
|
|
270
|
+
|
|
271
|
+
This is a convenience method that delegates to memory.stash().
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
key: Storage key
|
|
275
|
+
value: Data to store
|
|
276
|
+
ttl_seconds: Time-to-live (default 1 hour)
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
True if stored successfully
|
|
280
|
+
|
|
281
|
+
"""
|
|
282
|
+
return self.memory.stash(key, value, ttl_seconds)
|
|
283
|
+
|
|
284
|
+
def retrieve(self, key: str) -> Any:
|
|
285
|
+
"""Retrieve data from short-term memory.
|
|
286
|
+
|
|
287
|
+
This is a convenience method that delegates to memory.retrieve().
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
key: Storage key
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Stored data or None
|
|
294
|
+
|
|
295
|
+
"""
|
|
296
|
+
return self.memory.retrieve(key)
|
|
297
|
+
|
|
298
|
+
async def __aenter__(self):
|
|
299
|
+
"""Enter async context manager
|
|
300
|
+
|
|
301
|
+
Enables usage: async with EmpathyOS(...) as empathy:
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
self: The EmpathyOS instance
|
|
305
|
+
|
|
306
|
+
"""
|
|
307
|
+
# Initialize any async resources here if needed
|
|
308
|
+
return self
|
|
309
|
+
|
|
310
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
311
|
+
"""Exit async context manager
|
|
312
|
+
|
|
313
|
+
Performs cleanup when exiting the context:
|
|
314
|
+
- Saves patterns if persistence is enabled
|
|
315
|
+
- Closes any open connections
|
|
316
|
+
- Logs final collaboration state
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
exc_type: Exception type if an exception occurred
|
|
320
|
+
exc_val: Exception value if an exception occurred
|
|
321
|
+
exc_tb: Exception traceback if an exception occurred
|
|
322
|
+
|
|
323
|
+
Returns:
|
|
324
|
+
False to propagate exceptions (standard behavior)
|
|
325
|
+
|
|
326
|
+
"""
|
|
327
|
+
await self._cleanup()
|
|
328
|
+
return False # Don't suppress exceptions
|
|
329
|
+
|
|
330
|
+
async def _cleanup(self):
|
|
331
|
+
"""Cleanup resources on context exit
|
|
332
|
+
|
|
333
|
+
**Extension Point**: Override to add custom cleanup logic
|
|
334
|
+
(e.g., save state to database, close connections, send metrics)
|
|
335
|
+
"""
|
|
336
|
+
# Future: Save patterns to disk
|
|
337
|
+
# Future: Send final metrics
|
|
338
|
+
# Future: Close async connections
|
|
339
|
+
|
|
340
|
+
# =========================================================================
|
|
341
|
+
# SHARED PATTERN LIBRARY (Multi-Agent Collaboration)
|
|
342
|
+
# =========================================================================
|
|
343
|
+
|
|
344
|
+
def contribute_pattern(self, pattern) -> None:
|
|
345
|
+
"""Contribute a discovered pattern to the shared library.
|
|
346
|
+
|
|
347
|
+
Enables Level 5 Systems Empathy: patterns discovered by this agent
|
|
348
|
+
become available to all other agents sharing the same library.
|
|
349
|
+
|
|
350
|
+
Args:
|
|
351
|
+
pattern: Pattern object to contribute
|
|
352
|
+
|
|
353
|
+
Raises:
|
|
354
|
+
RuntimeError: If no shared library is configured
|
|
355
|
+
|
|
356
|
+
Example:
|
|
357
|
+
>>> from attune import Pattern, PatternLibrary
|
|
358
|
+
>>> library = PatternLibrary()
|
|
359
|
+
>>> agent = EmpathyOS(user_id="code_reviewer", shared_library=library)
|
|
360
|
+
>>> pattern = Pattern(
|
|
361
|
+
... id="pat_001",
|
|
362
|
+
... agent_id="code_reviewer",
|
|
363
|
+
... pattern_type="best_practice",
|
|
364
|
+
... name="Test pattern",
|
|
365
|
+
... description="A discovered pattern",
|
|
366
|
+
... )
|
|
367
|
+
>>> agent.contribute_pattern(pattern)
|
|
368
|
+
|
|
369
|
+
"""
|
|
370
|
+
if self.shared_library is None:
|
|
371
|
+
raise RuntimeError(
|
|
372
|
+
"No shared library configured. Pass shared_library to __init__ "
|
|
373
|
+
"to enable multi-agent pattern sharing.",
|
|
374
|
+
)
|
|
375
|
+
self.shared_library.contribute_pattern(self.user_id, pattern)
|
|
376
|
+
|
|
377
|
+
def query_patterns(self, context: dict, **kwargs):
|
|
378
|
+
"""Query the shared library for patterns relevant to the current context.
|
|
379
|
+
|
|
380
|
+
Enables agents to benefit from patterns discovered by other agents
|
|
381
|
+
in the distributed memory network.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
context: Dictionary describing the current context
|
|
385
|
+
**kwargs: Additional arguments passed to PatternLibrary.query_patterns()
|
|
386
|
+
(e.g., pattern_type, min_confidence, limit)
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
List of PatternMatch objects sorted by relevance
|
|
390
|
+
|
|
391
|
+
Raises:
|
|
392
|
+
RuntimeError: If no shared library is configured
|
|
393
|
+
|
|
394
|
+
Example:
|
|
395
|
+
>>> matches = agent.query_patterns(
|
|
396
|
+
... context={"language": "python", "task": "code_review"},
|
|
397
|
+
... min_confidence=0.7
|
|
398
|
+
... )
|
|
399
|
+
>>> for match in matches:
|
|
400
|
+
... print(f"{match.pattern.name}: {match.relevance_score:.0%}")
|
|
401
|
+
|
|
402
|
+
"""
|
|
403
|
+
if self.shared_library is None:
|
|
404
|
+
raise RuntimeError(
|
|
405
|
+
"No shared library configured. Pass shared_library to __init__ "
|
|
406
|
+
"to enable multi-agent pattern sharing.",
|
|
407
|
+
)
|
|
408
|
+
return self.shared_library.query_patterns(self.user_id, context, **kwargs)
|
|
409
|
+
|
|
410
|
+
def has_shared_library(self) -> bool:
|
|
411
|
+
"""Check if this agent has a shared pattern library configured."""
|
|
412
|
+
return self.shared_library is not None
|
|
413
|
+
|
|
414
|
+
# =========================================================================
|
|
415
|
+
# LEVEL 1: REACTIVE EMPATHY
|
|
416
|
+
# =========================================================================
|
|
417
|
+
|
|
418
|
+
async def level_1_reactive(self, user_request: str) -> dict:
|
|
419
|
+
"""Level 1: Reactive Empathy
|
|
420
|
+
|
|
421
|
+
Respond to explicit request accurately and helpfully.
|
|
422
|
+
No anticipation, no proactive action.
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
user_request: User's explicit request
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
Dict with result and reasoning
|
|
429
|
+
|
|
430
|
+
Raises:
|
|
431
|
+
ValueError: If user_request is empty or not a string
|
|
432
|
+
|
|
433
|
+
"""
|
|
434
|
+
# Input validation
|
|
435
|
+
if not isinstance(user_request, str):
|
|
436
|
+
raise ValidationError(
|
|
437
|
+
f"user_request must be a string, got {type(user_request).__name__}",
|
|
438
|
+
)
|
|
439
|
+
if not user_request.strip():
|
|
440
|
+
raise ValidationError("user_request cannot be empty")
|
|
441
|
+
|
|
442
|
+
self.logger.info(
|
|
443
|
+
"Level 1 reactive request started",
|
|
444
|
+
extra={
|
|
445
|
+
"user_id": self.user_id,
|
|
446
|
+
"empathy_level": 1,
|
|
447
|
+
"request_length": len(user_request),
|
|
448
|
+
},
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
self.current_empathy_level = 1
|
|
452
|
+
|
|
453
|
+
# Process request (implement your domain logic here)
|
|
454
|
+
result = await self._process_request(user_request)
|
|
455
|
+
|
|
456
|
+
self.logger.info(
|
|
457
|
+
"Level 1 reactive request completed",
|
|
458
|
+
extra={"user_id": self.user_id, "success": result.get("status") == "success"},
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
# Update collaboration state
|
|
462
|
+
self.collaboration_state.total_interactions += 1
|
|
463
|
+
|
|
464
|
+
return {
|
|
465
|
+
"level": 1,
|
|
466
|
+
"type": "reactive",
|
|
467
|
+
"result": result,
|
|
468
|
+
"reasoning": "Responding to explicit request",
|
|
469
|
+
"empathy_level": "Reactive: Help after being asked",
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
# =========================================================================
|
|
473
|
+
# LEVEL 2: GUIDED EMPATHY
|
|
474
|
+
# =========================================================================
|
|
475
|
+
|
|
476
|
+
async def level_2_guided(self, user_request: str) -> dict:
|
|
477
|
+
"""Level 2: Guided Empathy
|
|
478
|
+
|
|
479
|
+
Use calibrated questions (Voss) to clarify intent before acting.
|
|
480
|
+
Collaborative exploration to uncover hidden needs.
|
|
481
|
+
|
|
482
|
+
Args:
|
|
483
|
+
user_request: User's request (potentially ambiguous)
|
|
484
|
+
|
|
485
|
+
Returns:
|
|
486
|
+
Dict with clarification questions or refined result
|
|
487
|
+
|
|
488
|
+
Raises:
|
|
489
|
+
ValueError: If user_request is empty or not a string
|
|
490
|
+
|
|
491
|
+
"""
|
|
492
|
+
# Input validation
|
|
493
|
+
if not isinstance(user_request, str):
|
|
494
|
+
raise ValidationError(
|
|
495
|
+
f"user_request must be a string, got {type(user_request).__name__}",
|
|
496
|
+
)
|
|
497
|
+
if not user_request.strip():
|
|
498
|
+
raise ValidationError("user_request cannot be empty")
|
|
499
|
+
|
|
500
|
+
self.current_empathy_level = 2
|
|
501
|
+
|
|
502
|
+
self.logger.info(
|
|
503
|
+
"Level 2 guided request started",
|
|
504
|
+
extra={
|
|
505
|
+
"user_id": self.user_id,
|
|
506
|
+
"empathy_level": 2,
|
|
507
|
+
"request_length": len(user_request),
|
|
508
|
+
},
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
# Use Voss's calibrated questions
|
|
512
|
+
clarification = await self._ask_calibrated_questions(user_request)
|
|
513
|
+
|
|
514
|
+
if clarification["needs_clarification"]:
|
|
515
|
+
return {
|
|
516
|
+
"level": 2,
|
|
517
|
+
"type": "guided",
|
|
518
|
+
"action": "clarify_first",
|
|
519
|
+
"questions": clarification["questions"],
|
|
520
|
+
"reasoning": "Asking clarifying questions to understand true intent",
|
|
521
|
+
"empathy_level": "Guided: Collaborative exploration",
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
# Refine request based on clarification
|
|
525
|
+
refined_request = self._refine_request(user_request, clarification)
|
|
526
|
+
|
|
527
|
+
# Process refined request
|
|
528
|
+
result = await self._process_request(refined_request)
|
|
529
|
+
|
|
530
|
+
# Update collaboration state
|
|
531
|
+
self.collaboration_state.total_interactions += 1
|
|
532
|
+
self.collaboration_state.shared_context.update(clarification)
|
|
533
|
+
|
|
534
|
+
self.logger.info(
|
|
535
|
+
"Level 2 guided request completed",
|
|
536
|
+
extra={
|
|
537
|
+
"user_id": self.user_id,
|
|
538
|
+
"empathy_level": 2,
|
|
539
|
+
"action": "proceed",
|
|
540
|
+
"clarification_applied": True,
|
|
541
|
+
},
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
return {
|
|
545
|
+
"level": 2,
|
|
546
|
+
"type": "guided",
|
|
547
|
+
"action": "proceed",
|
|
548
|
+
"result": result,
|
|
549
|
+
"clarification": clarification,
|
|
550
|
+
"reasoning": "Collaborated to refine understanding before execution",
|
|
551
|
+
"empathy_level": "Guided: Clarified through questions",
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
# =========================================================================
|
|
555
|
+
# LEVEL 3: PROACTIVE EMPATHY
|
|
556
|
+
# =========================================================================
|
|
557
|
+
|
|
558
|
+
async def level_3_proactive(self, context: dict) -> dict:
|
|
559
|
+
"""Level 3: Proactive Empathy
|
|
560
|
+
|
|
561
|
+
Detect patterns, act on leading indicators.
|
|
562
|
+
Take initiative without being asked.
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
context: Current context (user activity, system state, etc.)
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
Dict with proactive actions taken
|
|
569
|
+
|
|
570
|
+
Raises:
|
|
571
|
+
ValueError: If context is not a dict or is empty
|
|
572
|
+
|
|
573
|
+
"""
|
|
574
|
+
# Input validation
|
|
575
|
+
if not isinstance(context, dict):
|
|
576
|
+
raise ValidationError(f"context must be a dict, got {type(context).__name__}")
|
|
577
|
+
if not context:
|
|
578
|
+
raise ValidationError("context cannot be empty")
|
|
579
|
+
|
|
580
|
+
self.current_empathy_level = 3
|
|
581
|
+
|
|
582
|
+
self.logger.info(
|
|
583
|
+
"Level 3 proactive analysis started",
|
|
584
|
+
extra={
|
|
585
|
+
"user_id": self.user_id,
|
|
586
|
+
"empathy_level": 3,
|
|
587
|
+
"context_keys": list(context.keys()),
|
|
588
|
+
},
|
|
589
|
+
)
|
|
590
|
+
|
|
591
|
+
# Detect current patterns
|
|
592
|
+
active_patterns = self._detect_active_patterns(context)
|
|
593
|
+
|
|
594
|
+
# Select proactive actions based on patterns
|
|
595
|
+
proactive_actions = []
|
|
596
|
+
|
|
597
|
+
for pattern in active_patterns:
|
|
598
|
+
if pattern["confidence"] > 0.8: # High confidence required
|
|
599
|
+
action = self._design_proactive_action(pattern)
|
|
600
|
+
|
|
601
|
+
# Safety check
|
|
602
|
+
if self._is_safe_to_execute(action):
|
|
603
|
+
proactive_actions.append(action)
|
|
604
|
+
|
|
605
|
+
# Execute proactive actions
|
|
606
|
+
results = await self._execute_proactive_actions(proactive_actions)
|
|
607
|
+
|
|
608
|
+
# Update collaboration state
|
|
609
|
+
for result in results:
|
|
610
|
+
outcome = "success" if result["success"] else "failure"
|
|
611
|
+
self.collaboration_state.update_trust(outcome)
|
|
612
|
+
|
|
613
|
+
self.logger.info(
|
|
614
|
+
"Level 3 proactive actions completed",
|
|
615
|
+
extra={
|
|
616
|
+
"user_id": self.user_id,
|
|
617
|
+
"empathy_level": 3,
|
|
618
|
+
"patterns_detected": len(active_patterns),
|
|
619
|
+
"actions_taken": len(proactive_actions),
|
|
620
|
+
"success_rate": (
|
|
621
|
+
sum(1 for r in results if r["success"]) / len(results) if results else 0
|
|
622
|
+
),
|
|
623
|
+
},
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
return {
|
|
627
|
+
"level": 3,
|
|
628
|
+
"type": "proactive",
|
|
629
|
+
"patterns_detected": len(active_patterns),
|
|
630
|
+
"actions_taken": len(proactive_actions),
|
|
631
|
+
"results": results,
|
|
632
|
+
"reasoning": "Acting on detected patterns without being asked",
|
|
633
|
+
"empathy_level": "Proactive: Act before being asked",
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
# =========================================================================
|
|
637
|
+
# LEVEL 4: ANTICIPATORY EMPATHY
|
|
638
|
+
# =========================================================================
|
|
639
|
+
|
|
640
|
+
async def level_4_anticipatory(self, system_trajectory: dict) -> dict:
|
|
641
|
+
"""Level 4: Anticipatory Empathy (THE INNOVATION)
|
|
642
|
+
|
|
643
|
+
Predict future bottlenecks, design relief in advance.
|
|
644
|
+
|
|
645
|
+
This is STRATEGIC CARE:
|
|
646
|
+
- Timing + Prediction + Initiative
|
|
647
|
+
- Solve tomorrow's pain today
|
|
648
|
+
- Act without being told (but without overstepping)
|
|
649
|
+
|
|
650
|
+
Args:
|
|
651
|
+
system_trajectory: System state + growth trends + constraints
|
|
652
|
+
|
|
653
|
+
Returns:
|
|
654
|
+
Dict with predicted bottlenecks and interventions
|
|
655
|
+
|
|
656
|
+
Raises:
|
|
657
|
+
ValueError: If system_trajectory is not a dict or is empty
|
|
658
|
+
|
|
659
|
+
"""
|
|
660
|
+
# Input validation
|
|
661
|
+
if not isinstance(system_trajectory, dict):
|
|
662
|
+
raise ValidationError(
|
|
663
|
+
f"system_trajectory must be a dict, got {type(system_trajectory).__name__}",
|
|
664
|
+
)
|
|
665
|
+
if not system_trajectory:
|
|
666
|
+
raise ValidationError("system_trajectory cannot be empty")
|
|
667
|
+
|
|
668
|
+
self.current_empathy_level = 4
|
|
669
|
+
|
|
670
|
+
self.logger.info(
|
|
671
|
+
"Level 4 anticipatory prediction started",
|
|
672
|
+
extra={
|
|
673
|
+
"user_id": self.user_id,
|
|
674
|
+
"empathy_level": 4,
|
|
675
|
+
"trajectory_keys": list(system_trajectory.keys()),
|
|
676
|
+
},
|
|
677
|
+
)
|
|
678
|
+
|
|
679
|
+
# Analyze system trajectory
|
|
680
|
+
predicted_bottlenecks = self._predict_future_bottlenecks(system_trajectory)
|
|
681
|
+
|
|
682
|
+
# Design structural relief for each bottleneck
|
|
683
|
+
interventions = []
|
|
684
|
+
|
|
685
|
+
for bottleneck in predicted_bottlenecks:
|
|
686
|
+
# Only intervene if:
|
|
687
|
+
# 1. High confidence (>75%)
|
|
688
|
+
# 2. Appropriate time horizon (30-120 days)
|
|
689
|
+
# 3. Reversible action
|
|
690
|
+
if self._should_anticipate(bottleneck):
|
|
691
|
+
intervention = self._design_anticipatory_intervention(bottleneck)
|
|
692
|
+
interventions.append(intervention)
|
|
693
|
+
|
|
694
|
+
# Execute anticipatory interventions
|
|
695
|
+
results = await self._execute_anticipatory_interventions(interventions)
|
|
696
|
+
|
|
697
|
+
# Update collaboration state
|
|
698
|
+
for result in results:
|
|
699
|
+
outcome = "success" if result["success"] else "failure"
|
|
700
|
+
self.collaboration_state.update_trust(outcome)
|
|
701
|
+
|
|
702
|
+
self.logger.info(
|
|
703
|
+
"Level 4 anticipatory interventions completed",
|
|
704
|
+
extra={
|
|
705
|
+
"user_id": self.user_id,
|
|
706
|
+
"empathy_level": 4,
|
|
707
|
+
"bottlenecks_predicted": len(predicted_bottlenecks),
|
|
708
|
+
"interventions_executed": len(interventions),
|
|
709
|
+
"success_rate": (
|
|
710
|
+
sum(1 for r in results if r["success"]) / len(results) if results else 0
|
|
711
|
+
),
|
|
712
|
+
},
|
|
713
|
+
)
|
|
714
|
+
|
|
715
|
+
return {
|
|
716
|
+
"level": 4,
|
|
717
|
+
"type": "anticipatory",
|
|
718
|
+
"bottlenecks_predicted": predicted_bottlenecks,
|
|
719
|
+
"interventions_designed": len(interventions),
|
|
720
|
+
"results": results,
|
|
721
|
+
"reasoning": "Predicting future bottlenecks and designing relief in advance",
|
|
722
|
+
"empathy_level": "Anticipatory: Predict and prevent problems",
|
|
723
|
+
"formula": "Timing + Prediction + Initiative = Anticipatory Empathy",
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
# =========================================================================
|
|
727
|
+
# LEVEL 5: SYSTEMS EMPATHY
|
|
728
|
+
# =========================================================================
|
|
729
|
+
|
|
730
|
+
async def level_5_systems(self, domain_context: dict) -> dict:
|
|
731
|
+
"""Level 5: Systems Empathy
|
|
732
|
+
|
|
733
|
+
Build structures that help at scale.
|
|
734
|
+
Design leverage points, frameworks, self-sustaining systems.
|
|
735
|
+
|
|
736
|
+
This is ARCHITECTURAL CARE:
|
|
737
|
+
- One framework → infinite applications
|
|
738
|
+
- Solve entire problem class, not individual instances
|
|
739
|
+
- Design for emergence of desired properties
|
|
740
|
+
|
|
741
|
+
Args:
|
|
742
|
+
domain_context: Domain information, recurring problems, patterns
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
Dict with designed frameworks and leverage points
|
|
746
|
+
|
|
747
|
+
Raises:
|
|
748
|
+
ValueError: If domain_context is not a dict or is empty
|
|
749
|
+
|
|
750
|
+
"""
|
|
751
|
+
# Input validation
|
|
752
|
+
if not isinstance(domain_context, dict):
|
|
753
|
+
raise ValidationError(
|
|
754
|
+
f"domain_context must be a dict, got {type(domain_context).__name__}",
|
|
755
|
+
)
|
|
756
|
+
if not domain_context:
|
|
757
|
+
raise ValidationError("domain_context cannot be empty")
|
|
758
|
+
|
|
759
|
+
self.current_empathy_level = 5
|
|
760
|
+
|
|
761
|
+
self.logger.info(
|
|
762
|
+
"Level 5 systems framework design started",
|
|
763
|
+
extra={
|
|
764
|
+
"user_id": self.user_id,
|
|
765
|
+
"empathy_level": 5,
|
|
766
|
+
"domain_keys": list(domain_context.keys()),
|
|
767
|
+
},
|
|
768
|
+
)
|
|
769
|
+
|
|
770
|
+
# Identify problem class (not individual problem)
|
|
771
|
+
problem_classes = self._identify_problem_classes(domain_context)
|
|
772
|
+
|
|
773
|
+
# Find leverage points (Meadows's framework)
|
|
774
|
+
leverage_points = []
|
|
775
|
+
for problem_class in problem_classes:
|
|
776
|
+
points = self.leverage_analyzer.find_leverage_points(problem_class)
|
|
777
|
+
leverage_points.extend(points)
|
|
778
|
+
|
|
779
|
+
# Design structural interventions at highest leverage points
|
|
780
|
+
frameworks = []
|
|
781
|
+
for lp in leverage_points:
|
|
782
|
+
if lp.level.value >= 8: # High leverage points only (Rules and above)
|
|
783
|
+
framework = self._design_framework(lp)
|
|
784
|
+
frameworks.append(framework)
|
|
785
|
+
|
|
786
|
+
# Implement frameworks
|
|
787
|
+
results = await self._implement_frameworks(frameworks)
|
|
788
|
+
|
|
789
|
+
self.logger.info(
|
|
790
|
+
"Level 5 systems frameworks implemented",
|
|
791
|
+
extra={
|
|
792
|
+
"user_id": self.user_id,
|
|
793
|
+
"empathy_level": 5,
|
|
794
|
+
"problem_classes": len(problem_classes),
|
|
795
|
+
"leverage_points_found": len(leverage_points),
|
|
796
|
+
"frameworks_deployed": len(frameworks),
|
|
797
|
+
},
|
|
798
|
+
)
|
|
799
|
+
|
|
800
|
+
return {
|
|
801
|
+
"level": 5,
|
|
802
|
+
"type": "systems",
|
|
803
|
+
"problem_classes": len(problem_classes),
|
|
804
|
+
"leverage_points": leverage_points,
|
|
805
|
+
"frameworks_designed": len(frameworks),
|
|
806
|
+
"results": results,
|
|
807
|
+
"reasoning": "Building structural solutions that scale to entire problem class",
|
|
808
|
+
"empathy_level": "Systems: Build structures that help at scale",
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
# =========================================================================
|
|
812
|
+
# HELPER METHODS (implement based on your domain)
|
|
813
|
+
# =========================================================================
|
|
814
|
+
|
|
815
|
+
async def _process_request(self, request: str) -> dict:
|
|
816
|
+
"""Process user request (implement domain logic)
|
|
817
|
+
|
|
818
|
+
**Extension Point**: Override this method in subclasses to implement
|
|
819
|
+
your specific domain logic for processing user requests.
|
|
820
|
+
|
|
821
|
+
Args:
|
|
822
|
+
request: The user's request string
|
|
823
|
+
|
|
824
|
+
Returns:
|
|
825
|
+
Dict with processed result and status
|
|
826
|
+
|
|
827
|
+
"""
|
|
828
|
+
# Placeholder - implement your actual request processing
|
|
829
|
+
return {"processed": request, "status": "success"}
|
|
830
|
+
|
|
831
|
+
async def _ask_calibrated_questions(self, request: str) -> dict:
|
|
832
|
+
"""Voss's tactical empathy: Ask calibrated questions
|
|
833
|
+
|
|
834
|
+
**Extension Point**: Override to implement sophisticated clarification
|
|
835
|
+
logic using NLP, LLMs, or domain-specific heuristics.
|
|
836
|
+
|
|
837
|
+
Args:
|
|
838
|
+
request: The user's request string
|
|
839
|
+
|
|
840
|
+
Returns:
|
|
841
|
+
Dict with needs_clarification flag and optional questions list
|
|
842
|
+
|
|
843
|
+
"""
|
|
844
|
+
# Simple heuristic - in production, use NLP/LLM
|
|
845
|
+
needs_clarification = any(
|
|
846
|
+
word in request.lower() for word in ["some", "a few", "many", "soon"]
|
|
847
|
+
)
|
|
848
|
+
|
|
849
|
+
if needs_clarification:
|
|
850
|
+
return {
|
|
851
|
+
"needs_clarification": True,
|
|
852
|
+
"questions": [
|
|
853
|
+
"What are you hoping to accomplish?",
|
|
854
|
+
"How does this fit into your workflow?",
|
|
855
|
+
"What would make this most helpful right now?",
|
|
856
|
+
],
|
|
857
|
+
}
|
|
858
|
+
return {"needs_clarification": False}
|
|
859
|
+
|
|
860
|
+
def _refine_request(self, original: str, clarification: dict) -> str:
|
|
861
|
+
"""Refine request based on clarification responses
|
|
862
|
+
|
|
863
|
+
**Extension Point**: Override to implement domain-specific request refinement
|
|
864
|
+
based on clarification questions and user responses.
|
|
865
|
+
|
|
866
|
+
Args:
|
|
867
|
+
original: Original request string
|
|
868
|
+
clarification: Dict containing clarification questions and responses
|
|
869
|
+
|
|
870
|
+
Returns:
|
|
871
|
+
Refined request string with added context
|
|
872
|
+
|
|
873
|
+
"""
|
|
874
|
+
# If no clarification was needed, return original
|
|
875
|
+
if not clarification.get("needs_clarification", False):
|
|
876
|
+
return original
|
|
877
|
+
|
|
878
|
+
# If clarification responses exist, incorporate them
|
|
879
|
+
if "responses" in clarification:
|
|
880
|
+
refinements = []
|
|
881
|
+
for question, response in clarification["responses"].items():
|
|
882
|
+
refinements.append(f"{question}: {response}")
|
|
883
|
+
|
|
884
|
+
refined = f"{original}\n\nClarifications:\n" + "\n".join(f"- {r}" for r in refinements)
|
|
885
|
+
return refined
|
|
886
|
+
|
|
887
|
+
# Default: return original
|
|
888
|
+
return original
|
|
889
|
+
|
|
890
|
+
def _detect_active_patterns(self, context: dict) -> list[dict]:
|
|
891
|
+
"""Detect patterns in user behavior"""
|
|
892
|
+
patterns = []
|
|
893
|
+
|
|
894
|
+
# Example pattern detection logic
|
|
895
|
+
if context.get("repeated_action"):
|
|
896
|
+
patterns.append(
|
|
897
|
+
{
|
|
898
|
+
"type": "sequential",
|
|
899
|
+
"pattern": "user_always_does_X_before_Y",
|
|
900
|
+
"confidence": 0.85,
|
|
901
|
+
},
|
|
902
|
+
)
|
|
903
|
+
|
|
904
|
+
return patterns
|
|
905
|
+
|
|
906
|
+
def _design_proactive_action(self, pattern: dict) -> dict:
|
|
907
|
+
"""Design proactive action based on pattern"""
|
|
908
|
+
return {
|
|
909
|
+
"action": "prefetch_data",
|
|
910
|
+
"reasoning": f"Pattern detected: {pattern['pattern']}",
|
|
911
|
+
"confidence": pattern["confidence"],
|
|
912
|
+
}
|
|
913
|
+
|
|
914
|
+
def _is_safe_to_execute(self, action: dict[str, Any]) -> bool:
|
|
915
|
+
"""Safety check for proactive actions"""
|
|
916
|
+
confidence: float = action.get("confidence", 0)
|
|
917
|
+
return confidence > 0.8
|
|
918
|
+
|
|
919
|
+
async def _execute_proactive_actions(self, actions: list[dict]) -> list[dict]:
|
|
920
|
+
"""Execute proactive actions
|
|
921
|
+
|
|
922
|
+
**Extension Point**: Override to implement actual execution of proactive
|
|
923
|
+
actions in your domain (e.g., file operations, API calls, UI updates).
|
|
924
|
+
|
|
925
|
+
This default implementation simulates execution with basic validation.
|
|
926
|
+
Override this method to add real action execution logic.
|
|
927
|
+
|
|
928
|
+
Args:
|
|
929
|
+
actions: List of action dicts to execute
|
|
930
|
+
|
|
931
|
+
Returns:
|
|
932
|
+
List of result dicts with action and success status
|
|
933
|
+
|
|
934
|
+
"""
|
|
935
|
+
results = []
|
|
936
|
+
for action in actions:
|
|
937
|
+
# Validate action has required fields
|
|
938
|
+
if not action.get("action"):
|
|
939
|
+
results.append(
|
|
940
|
+
{"action": action, "success": False, "error": "Missing 'action' field"},
|
|
941
|
+
)
|
|
942
|
+
continue
|
|
943
|
+
|
|
944
|
+
# Log the action (in production, this would execute real logic)
|
|
945
|
+
self.logger.debug(
|
|
946
|
+
f"Executing proactive action: {action.get('action')}",
|
|
947
|
+
extra={
|
|
948
|
+
"user_id": self.user_id,
|
|
949
|
+
"action_type": action.get("action"),
|
|
950
|
+
"confidence": action.get("confidence", 0),
|
|
951
|
+
},
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
# Simulate successful execution
|
|
955
|
+
results.append(
|
|
956
|
+
{"action": action, "success": True, "executed_at": datetime.now().isoformat()},
|
|
957
|
+
)
|
|
958
|
+
|
|
959
|
+
return results
|
|
960
|
+
|
|
961
|
+
def _predict_future_bottlenecks(self, trajectory: dict) -> list[dict]:
|
|
962
|
+
"""Predict where system will hit friction/overload
|
|
963
|
+
|
|
964
|
+
Uses trajectory analysis, domain knowledge, historical patterns
|
|
965
|
+
"""
|
|
966
|
+
bottlenecks = []
|
|
967
|
+
|
|
968
|
+
# Example: Scaling bottleneck
|
|
969
|
+
if trajectory.get("feature_count_increasing"):
|
|
970
|
+
current = trajectory["current_feature_count"]
|
|
971
|
+
growth_rate = trajectory.get("growth_rate", 0)
|
|
972
|
+
projected_3mo = current + (growth_rate * 3)
|
|
973
|
+
|
|
974
|
+
if projected_3mo > trajectory.get("threshold", 25):
|
|
975
|
+
bottlenecks.append(
|
|
976
|
+
{
|
|
977
|
+
"type": "scaling_bottleneck",
|
|
978
|
+
"area": "testing",
|
|
979
|
+
"description": "Testing burden will become unsustainable",
|
|
980
|
+
"timeframe": "2-3 months",
|
|
981
|
+
"confidence": 0.75,
|
|
982
|
+
"current_state": f"{current} features",
|
|
983
|
+
"predicted_state": f"{projected_3mo} features",
|
|
984
|
+
"impact": trajectory.get("impact", "low"),
|
|
985
|
+
},
|
|
986
|
+
)
|
|
987
|
+
|
|
988
|
+
return bottlenecks
|
|
989
|
+
|
|
990
|
+
def _should_anticipate(self, bottleneck: dict) -> bool:
|
|
991
|
+
"""Safety checks for Level 4 anticipatory actions
|
|
992
|
+
|
|
993
|
+
Validates:
|
|
994
|
+
1. Confidence is above threshold
|
|
995
|
+
2. Time horizon is appropriate (30-120 days)
|
|
996
|
+
3. Impact justifies the intervention effort
|
|
997
|
+
"""
|
|
998
|
+
# Check 1: Confidence threshold
|
|
999
|
+
if bottleneck["confidence"] < self.confidence_threshold:
|
|
1000
|
+
return False
|
|
1001
|
+
|
|
1002
|
+
# Check 2: Time horizon (30-120 days ideal)
|
|
1003
|
+
timeframe = bottleneck.get("timeframe", "")
|
|
1004
|
+
days = self._parse_timeframe_to_days(timeframe)
|
|
1005
|
+
|
|
1006
|
+
# Too soon (<30 days) = reactive, not anticipatory
|
|
1007
|
+
# Too far (>120 days) = too uncertain to act on
|
|
1008
|
+
if days is not None and (days < 30 or days > 120):
|
|
1009
|
+
return False
|
|
1010
|
+
|
|
1011
|
+
# Check 3: Impact justifies effort
|
|
1012
|
+
if bottleneck.get("impact", "low") not in ["high", "critical"]:
|
|
1013
|
+
return False
|
|
1014
|
+
|
|
1015
|
+
return True
|
|
1016
|
+
|
|
1017
|
+
def _parse_timeframe_to_days(self, timeframe: str) -> int | None:
|
|
1018
|
+
"""Parse timeframe string to days
|
|
1019
|
+
|
|
1020
|
+
Examples:
|
|
1021
|
+
"2-3 months" -> 75 (midpoint)
|
|
1022
|
+
"60 days" -> 60
|
|
1023
|
+
"3 weeks" -> 21
|
|
1024
|
+
|
|
1025
|
+
Returns:
|
|
1026
|
+
Number of days, or None if unparseable
|
|
1027
|
+
|
|
1028
|
+
"""
|
|
1029
|
+
import re
|
|
1030
|
+
|
|
1031
|
+
if not timeframe:
|
|
1032
|
+
return None
|
|
1033
|
+
|
|
1034
|
+
timeframe_lower = timeframe.lower()
|
|
1035
|
+
|
|
1036
|
+
# Pattern: "X days"
|
|
1037
|
+
match = re.search(r"(\d+)\s*days?", timeframe_lower)
|
|
1038
|
+
if match:
|
|
1039
|
+
return int(match.group(1))
|
|
1040
|
+
|
|
1041
|
+
# Pattern: "X weeks"
|
|
1042
|
+
match = re.search(r"(\d+)\s*weeks?", timeframe_lower)
|
|
1043
|
+
if match:
|
|
1044
|
+
return int(match.group(1)) * 7
|
|
1045
|
+
|
|
1046
|
+
# Pattern: "X months" or "X-Y months"
|
|
1047
|
+
match = re.search(r"(\d+)(?:-(\d+))?\s*months?", timeframe_lower)
|
|
1048
|
+
if match:
|
|
1049
|
+
start = int(match.group(1))
|
|
1050
|
+
end = int(match.group(2)) if match.group(2) else start
|
|
1051
|
+
midpoint = (start + end) / 2
|
|
1052
|
+
return int(midpoint * 30) # Approximate 30 days/month
|
|
1053
|
+
|
|
1054
|
+
# Couldn't parse - return None (will skip time validation)
|
|
1055
|
+
return None
|
|
1056
|
+
|
|
1057
|
+
def _design_anticipatory_intervention(self, bottleneck: dict) -> dict:
|
|
1058
|
+
"""Design structural relief for predicted bottleneck"""
|
|
1059
|
+
return {
|
|
1060
|
+
"type": "framework_design",
|
|
1061
|
+
"target": bottleneck["area"],
|
|
1062
|
+
"deliverables": ["design_doc", "implementation_plan"],
|
|
1063
|
+
"timeline": "Implement before threshold",
|
|
1064
|
+
}
|
|
1065
|
+
|
|
1066
|
+
async def _execute_anticipatory_interventions(self, interventions: list[dict]) -> list[dict]:
|
|
1067
|
+
"""Execute anticipatory interventions
|
|
1068
|
+
|
|
1069
|
+
**Extension Point**: Override to implement actual execution of
|
|
1070
|
+
anticipatory interventions (e.g., scaling resources, provisioning
|
|
1071
|
+
infrastructure, preparing documentation).
|
|
1072
|
+
|
|
1073
|
+
This default implementation simulates intervention execution with
|
|
1074
|
+
validation and logging. Override for real infrastructure changes.
|
|
1075
|
+
|
|
1076
|
+
Args:
|
|
1077
|
+
interventions: List of intervention dicts to execute
|
|
1078
|
+
|
|
1079
|
+
Returns:
|
|
1080
|
+
List of result dicts with intervention and success status
|
|
1081
|
+
|
|
1082
|
+
"""
|
|
1083
|
+
results = []
|
|
1084
|
+
for intervention in interventions:
|
|
1085
|
+
# Validate intervention has required fields
|
|
1086
|
+
if not intervention.get("type"):
|
|
1087
|
+
results.append(
|
|
1088
|
+
{
|
|
1089
|
+
"intervention": intervention,
|
|
1090
|
+
"success": False,
|
|
1091
|
+
"error": "Missing 'type' field",
|
|
1092
|
+
},
|
|
1093
|
+
)
|
|
1094
|
+
continue
|
|
1095
|
+
|
|
1096
|
+
# Log the intervention (in production, this would trigger real infrastructure changes)
|
|
1097
|
+
self.logger.info(
|
|
1098
|
+
f"Executing anticipatory intervention: {intervention.get('type')}",
|
|
1099
|
+
extra={
|
|
1100
|
+
"user_id": self.user_id,
|
|
1101
|
+
"intervention_type": intervention.get("type"),
|
|
1102
|
+
"target": intervention.get("target"),
|
|
1103
|
+
"timeline": intervention.get("timeline"),
|
|
1104
|
+
},
|
|
1105
|
+
)
|
|
1106
|
+
|
|
1107
|
+
# Simulate successful intervention
|
|
1108
|
+
results.append(
|
|
1109
|
+
{
|
|
1110
|
+
"intervention": intervention,
|
|
1111
|
+
"success": True,
|
|
1112
|
+
"executed_at": datetime.now().isoformat(),
|
|
1113
|
+
"status": "intervention_deployed",
|
|
1114
|
+
},
|
|
1115
|
+
)
|
|
1116
|
+
|
|
1117
|
+
return results
|
|
1118
|
+
|
|
1119
|
+
def _identify_problem_classes(self, domain_context: dict) -> list[dict]:
|
|
1120
|
+
"""Identify recurring problem classes (not individual instances)
|
|
1121
|
+
|
|
1122
|
+
Use "Rule of Three":
|
|
1123
|
+
- Occurred at least 3 times
|
|
1124
|
+
- Will occur at least 3 more times
|
|
1125
|
+
- Affects at least 3 users/workflows
|
|
1126
|
+
"""
|
|
1127
|
+
problem_classes = []
|
|
1128
|
+
|
|
1129
|
+
# Example detection logic
|
|
1130
|
+
if domain_context.get("recurring_documentation_burden"):
|
|
1131
|
+
problem_classes.append(
|
|
1132
|
+
{
|
|
1133
|
+
"class": "documentation_burden",
|
|
1134
|
+
"instances": domain_context["instances"],
|
|
1135
|
+
"frequency": "every_new_feature",
|
|
1136
|
+
},
|
|
1137
|
+
)
|
|
1138
|
+
|
|
1139
|
+
return problem_classes
|
|
1140
|
+
|
|
1141
|
+
def _design_framework(self, leverage_point: LeveragePoint) -> dict:
|
|
1142
|
+
"""Design framework at leverage point"""
|
|
1143
|
+
return {
|
|
1144
|
+
"name": f"{leverage_point.problem_domain}_framework",
|
|
1145
|
+
"type": "architectural_pattern",
|
|
1146
|
+
"leverage_point": leverage_point.description,
|
|
1147
|
+
"leverage_level": leverage_point.level.value,
|
|
1148
|
+
"impact": "Scales to all current + future instances",
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
async def _implement_frameworks(self, frameworks: list[dict]) -> list[dict]:
|
|
1152
|
+
"""Implement designed frameworks
|
|
1153
|
+
|
|
1154
|
+
**Extension Point**: Override to implement actual framework deployment
|
|
1155
|
+
(e.g., generating code templates, creating CI/CD pipelines, deploying
|
|
1156
|
+
infrastructure, setting up monitoring).
|
|
1157
|
+
|
|
1158
|
+
This default implementation simulates framework deployment with validation
|
|
1159
|
+
and logging. Override for real framework deployment logic.
|
|
1160
|
+
|
|
1161
|
+
Args:
|
|
1162
|
+
frameworks: List of framework dicts to implement
|
|
1163
|
+
|
|
1164
|
+
Returns:
|
|
1165
|
+
List of result dicts with framework and deployed status
|
|
1166
|
+
|
|
1167
|
+
"""
|
|
1168
|
+
results = []
|
|
1169
|
+
for framework in frameworks:
|
|
1170
|
+
# Validate framework has required fields
|
|
1171
|
+
if not framework.get("name"):
|
|
1172
|
+
results.append(
|
|
1173
|
+
{"framework": framework, "deployed": False, "error": "Missing 'name' field"},
|
|
1174
|
+
)
|
|
1175
|
+
continue
|
|
1176
|
+
|
|
1177
|
+
# Log the framework deployment (in production, this would deploy real infrastructure)
|
|
1178
|
+
self.logger.info(
|
|
1179
|
+
f"Deploying systems framework: {framework.get('name')}",
|
|
1180
|
+
extra={
|
|
1181
|
+
"user_id": self.user_id,
|
|
1182
|
+
"framework_name": framework.get("name"),
|
|
1183
|
+
"framework_type": framework.get("type"),
|
|
1184
|
+
"leverage_level": framework.get("leverage_level"),
|
|
1185
|
+
},
|
|
1186
|
+
)
|
|
1187
|
+
|
|
1188
|
+
# Simulate successful deployment
|
|
1189
|
+
results.append(
|
|
1190
|
+
{
|
|
1191
|
+
"framework": framework,
|
|
1192
|
+
"deployed": True,
|
|
1193
|
+
"deployed_at": datetime.now().isoformat(),
|
|
1194
|
+
"status": "framework_active",
|
|
1195
|
+
"impact_scope": "system_wide",
|
|
1196
|
+
},
|
|
1197
|
+
)
|
|
1198
|
+
|
|
1199
|
+
return results
|
|
1200
|
+
|
|
1201
|
+
# =========================================================================
|
|
1202
|
+
# FEEDBACK LOOP MANAGEMENT
|
|
1203
|
+
# =========================================================================
|
|
1204
|
+
|
|
1205
|
+
def monitor_feedback_loops(self, session_history: list) -> dict:
|
|
1206
|
+
"""Detect and manage feedback loops in collaboration"""
|
|
1207
|
+
active_loops = self.feedback_detector.detect_active_loop(session_history)
|
|
1208
|
+
|
|
1209
|
+
# Take action based on loop type
|
|
1210
|
+
if active_loops.get("dominant_loop") == "R2_trust_erosion":
|
|
1211
|
+
# URGENT: Break vicious cycle
|
|
1212
|
+
return self._break_trust_erosion_loop()
|
|
1213
|
+
|
|
1214
|
+
if active_loops.get("dominant_loop") == "R1_trust_building":
|
|
1215
|
+
# MAINTAIN: Keep virtuous cycle going
|
|
1216
|
+
return self._maintain_trust_building_loop()
|
|
1217
|
+
|
|
1218
|
+
return active_loops
|
|
1219
|
+
|
|
1220
|
+
def _break_trust_erosion_loop(self) -> dict:
|
|
1221
|
+
"""Intervention to break vicious cycle of trust erosion"""
|
|
1222
|
+
return {
|
|
1223
|
+
"action": "transparency_intervention",
|
|
1224
|
+
"steps": [
|
|
1225
|
+
"Acknowledge misalignment explicitly",
|
|
1226
|
+
"Ask calibrated questions (Level 2)",
|
|
1227
|
+
"Reduce initiative temporarily (drop to Level 1-2)",
|
|
1228
|
+
"Rebuild trust through consistent small wins",
|
|
1229
|
+
],
|
|
1230
|
+
}
|
|
1231
|
+
|
|
1232
|
+
def _maintain_trust_building_loop(self) -> dict:
|
|
1233
|
+
"""Maintain virtuous cycle of trust building"""
|
|
1234
|
+
return {
|
|
1235
|
+
"action": "maintain_momentum",
|
|
1236
|
+
"steps": [
|
|
1237
|
+
"Continue current approach",
|
|
1238
|
+
"Gradually increase initiative (Level 3 → 4)",
|
|
1239
|
+
"Document successful patterns",
|
|
1240
|
+
],
|
|
1241
|
+
}
|
|
1242
|
+
|
|
1243
|
+
# =========================================================================
|
|
1244
|
+
# STATE MANAGEMENT
|
|
1245
|
+
# =========================================================================
|
|
1246
|
+
|
|
1247
|
+
def get_collaboration_state(self) -> dict:
|
|
1248
|
+
"""Get current collaboration state"""
|
|
1249
|
+
return {
|
|
1250
|
+
"trust_level": self.collaboration_state.trust_level,
|
|
1251
|
+
"total_interactions": self.collaboration_state.total_interactions,
|
|
1252
|
+
"success_rate": (
|
|
1253
|
+
self.collaboration_state.successful_interventions
|
|
1254
|
+
/ self.collaboration_state.total_interactions
|
|
1255
|
+
if self.collaboration_state.total_interactions > 0
|
|
1256
|
+
else 0
|
|
1257
|
+
),
|
|
1258
|
+
"current_empathy_level": self.current_empathy_level,
|
|
1259
|
+
"target_empathy_level": self.target_level,
|
|
1260
|
+
}
|
|
1261
|
+
|
|
1262
|
+
def reset_collaboration_state(self):
|
|
1263
|
+
"""Reset collaboration state (new session)"""
|
|
1264
|
+
self.collaboration_state = CollaborationState()
|
|
1265
|
+
|
|
1266
|
+
# =========================================================================
|
|
1267
|
+
# SHORT-TERM MEMORY (Redis-backed Multi-Agent Coordination)
|
|
1268
|
+
# =========================================================================
|
|
1269
|
+
|
|
1270
|
+
def has_short_term_memory(self) -> bool:
|
|
1271
|
+
"""Check if this agent has short-term memory configured."""
|
|
1272
|
+
return self.short_term_memory is not None
|
|
1273
|
+
|
|
1274
|
+
@property
|
|
1275
|
+
def session_id(self) -> str:
|
|
1276
|
+
"""Get or generate a unique session ID for this agent instance."""
|
|
1277
|
+
if self._session_id is None:
|
|
1278
|
+
import uuid
|
|
1279
|
+
|
|
1280
|
+
self._session_id = f"{self.user_id}_{uuid.uuid4().hex[:8]}"
|
|
1281
|
+
return self._session_id
|
|
1282
|
+
|
|
1283
|
+
def stage_pattern(self, pattern: StagedPattern) -> bool:
|
|
1284
|
+
"""Stage a discovered pattern for validation.
|
|
1285
|
+
|
|
1286
|
+
Patterns are held in a staging area until a Validator promotes them
|
|
1287
|
+
to the active pattern library. This implements the trust-but-verify
|
|
1288
|
+
approach to multi-agent knowledge building.
|
|
1289
|
+
|
|
1290
|
+
Args:
|
|
1291
|
+
pattern: StagedPattern with discovery details
|
|
1292
|
+
|
|
1293
|
+
Returns:
|
|
1294
|
+
True if staged successfully
|
|
1295
|
+
|
|
1296
|
+
Raises:
|
|
1297
|
+
RuntimeError: If no short-term memory configured
|
|
1298
|
+
PermissionError: If agent lacks Contributor+ access
|
|
1299
|
+
|
|
1300
|
+
Example:
|
|
1301
|
+
>>> from attune import StagedPattern
|
|
1302
|
+
>>> pattern = StagedPattern(
|
|
1303
|
+
... pattern_id="pat_auth_001",
|
|
1304
|
+
... agent_id=empathy.user_id,
|
|
1305
|
+
... pattern_type="security",
|
|
1306
|
+
... name="JWT Token Refresh Pattern",
|
|
1307
|
+
... description="Refresh tokens before expiry to prevent auth failures",
|
|
1308
|
+
... confidence=0.85,
|
|
1309
|
+
... )
|
|
1310
|
+
>>> empathy.stage_pattern(pattern)
|
|
1311
|
+
|
|
1312
|
+
"""
|
|
1313
|
+
if self.short_term_memory is None:
|
|
1314
|
+
raise RuntimeError(
|
|
1315
|
+
"No short-term memory configured. Pass short_term_memory to __init__ "
|
|
1316
|
+
"to enable pattern staging.",
|
|
1317
|
+
)
|
|
1318
|
+
return self.short_term_memory.stage_pattern(pattern, self.credentials)
|
|
1319
|
+
|
|
1320
|
+
def get_staged_patterns(self) -> list[StagedPattern]:
|
|
1321
|
+
"""Get all patterns currently in staging.
|
|
1322
|
+
|
|
1323
|
+
Returns patterns staged by any agent that are awaiting validation.
|
|
1324
|
+
Validators use this to review and promote/reject patterns.
|
|
1325
|
+
|
|
1326
|
+
Returns:
|
|
1327
|
+
List of StagedPattern objects
|
|
1328
|
+
|
|
1329
|
+
Raises:
|
|
1330
|
+
RuntimeError: If no short-term memory configured
|
|
1331
|
+
|
|
1332
|
+
"""
|
|
1333
|
+
if self.short_term_memory is None:
|
|
1334
|
+
raise RuntimeError(
|
|
1335
|
+
"No short-term memory configured. Pass short_term_memory to __init__ "
|
|
1336
|
+
"to enable pattern staging.",
|
|
1337
|
+
)
|
|
1338
|
+
return self.short_term_memory.list_staged_patterns(self.credentials)
|
|
1339
|
+
|
|
1340
|
+
def send_signal(
|
|
1341
|
+
self,
|
|
1342
|
+
signal_type: str,
|
|
1343
|
+
data: dict,
|
|
1344
|
+
target_agent: str | None = None,
|
|
1345
|
+
) -> bool:
|
|
1346
|
+
"""Send a coordination signal to other agents.
|
|
1347
|
+
|
|
1348
|
+
Use signals for real-time coordination:
|
|
1349
|
+
- Notify completion of tasks
|
|
1350
|
+
- Request assistance
|
|
1351
|
+
- Broadcast status updates
|
|
1352
|
+
|
|
1353
|
+
Args:
|
|
1354
|
+
signal_type: Type of signal (e.g., "task_complete", "need_review")
|
|
1355
|
+
data: Signal payload
|
|
1356
|
+
target_agent: Specific agent to target, or None for broadcast
|
|
1357
|
+
|
|
1358
|
+
Returns:
|
|
1359
|
+
True if sent successfully
|
|
1360
|
+
|
|
1361
|
+
Raises:
|
|
1362
|
+
RuntimeError: If no short-term memory configured
|
|
1363
|
+
|
|
1364
|
+
Example:
|
|
1365
|
+
>>> # Notify specific agent
|
|
1366
|
+
>>> empathy.send_signal(
|
|
1367
|
+
... "analysis_complete",
|
|
1368
|
+
... {"files": 10, "issues_found": 3},
|
|
1369
|
+
... target_agent="lead_reviewer"
|
|
1370
|
+
... )
|
|
1371
|
+
>>> # Broadcast to all
|
|
1372
|
+
>>> empathy.send_signal("status_update", {"phase": "testing"})
|
|
1373
|
+
|
|
1374
|
+
"""
|
|
1375
|
+
if self.short_term_memory is None:
|
|
1376
|
+
raise RuntimeError(
|
|
1377
|
+
"No short-term memory configured. Pass short_term_memory to __init__ "
|
|
1378
|
+
"to enable coordination signals.",
|
|
1379
|
+
)
|
|
1380
|
+
return self.short_term_memory.send_signal(
|
|
1381
|
+
signal_type=signal_type,
|
|
1382
|
+
data=data,
|
|
1383
|
+
credentials=self.credentials,
|
|
1384
|
+
target_agent=target_agent,
|
|
1385
|
+
)
|
|
1386
|
+
|
|
1387
|
+
def receive_signals(self, signal_type: str | None = None) -> list[dict]:
|
|
1388
|
+
"""Receive coordination signals from other agents.
|
|
1389
|
+
|
|
1390
|
+
Returns signals targeted at this agent or broadcast signals.
|
|
1391
|
+
Signals expire after 5 minutes (TTL).
|
|
1392
|
+
|
|
1393
|
+
Args:
|
|
1394
|
+
signal_type: Filter by signal type, or None for all
|
|
1395
|
+
|
|
1396
|
+
Returns:
|
|
1397
|
+
List of signal dicts with sender, type, data, timestamp
|
|
1398
|
+
|
|
1399
|
+
Raises:
|
|
1400
|
+
RuntimeError: If no short-term memory configured
|
|
1401
|
+
|
|
1402
|
+
Example:
|
|
1403
|
+
>>> signals = empathy.receive_signals("analysis_complete")
|
|
1404
|
+
>>> for sig in signals:
|
|
1405
|
+
... print(f"From {sig['sender']}: {sig['data']}")
|
|
1406
|
+
|
|
1407
|
+
"""
|
|
1408
|
+
if self.short_term_memory is None:
|
|
1409
|
+
raise RuntimeError(
|
|
1410
|
+
"No short-term memory configured. Pass short_term_memory to __init__ "
|
|
1411
|
+
"to enable coordination signals.",
|
|
1412
|
+
)
|
|
1413
|
+
return self.short_term_memory.receive_signals(self.credentials, signal_type=signal_type)
|
|
1414
|
+
|
|
1415
|
+
def persist_collaboration_state(self) -> bool:
|
|
1416
|
+
"""Persist current collaboration state to short-term memory.
|
|
1417
|
+
|
|
1418
|
+
Call periodically to save state that can be recovered if the agent
|
|
1419
|
+
restarts. State expires after 30 minutes by default.
|
|
1420
|
+
|
|
1421
|
+
Returns:
|
|
1422
|
+
True if persisted successfully
|
|
1423
|
+
|
|
1424
|
+
Raises:
|
|
1425
|
+
RuntimeError: If no short-term memory configured
|
|
1426
|
+
|
|
1427
|
+
"""
|
|
1428
|
+
if self.short_term_memory is None:
|
|
1429
|
+
raise RuntimeError(
|
|
1430
|
+
"No short-term memory configured. Pass short_term_memory to __init__ "
|
|
1431
|
+
"to enable state persistence.",
|
|
1432
|
+
)
|
|
1433
|
+
|
|
1434
|
+
state_data = {
|
|
1435
|
+
"trust_level": self.collaboration_state.trust_level,
|
|
1436
|
+
"successful_interventions": self.collaboration_state.successful_interventions,
|
|
1437
|
+
"failed_interventions": self.collaboration_state.failed_interventions,
|
|
1438
|
+
"total_interactions": self.collaboration_state.total_interactions,
|
|
1439
|
+
"current_empathy_level": self.current_empathy_level,
|
|
1440
|
+
"session_start": self.collaboration_state.session_start.isoformat(),
|
|
1441
|
+
"trust_trajectory": self.collaboration_state.trust_trajectory[-100:], # Last 100
|
|
1442
|
+
}
|
|
1443
|
+
return self.short_term_memory.stash(
|
|
1444
|
+
f"collaboration_state_{self.session_id}",
|
|
1445
|
+
state_data,
|
|
1446
|
+
self.credentials,
|
|
1447
|
+
)
|
|
1448
|
+
|
|
1449
|
+
def restore_collaboration_state(self, session_id: str | None = None) -> bool:
|
|
1450
|
+
"""Restore collaboration state from short-term memory.
|
|
1451
|
+
|
|
1452
|
+
Use to recover state after agent restart or to continue a previous
|
|
1453
|
+
session.
|
|
1454
|
+
|
|
1455
|
+
Args:
|
|
1456
|
+
session_id: Session to restore, or None for current session
|
|
1457
|
+
|
|
1458
|
+
Returns:
|
|
1459
|
+
True if state was found and restored
|
|
1460
|
+
|
|
1461
|
+
Raises:
|
|
1462
|
+
RuntimeError: If no short-term memory configured
|
|
1463
|
+
|
|
1464
|
+
"""
|
|
1465
|
+
if self.short_term_memory is None:
|
|
1466
|
+
raise RuntimeError(
|
|
1467
|
+
"No short-term memory configured. Pass short_term_memory to __init__ "
|
|
1468
|
+
"to enable state persistence.",
|
|
1469
|
+
)
|
|
1470
|
+
|
|
1471
|
+
sid = session_id or self.session_id
|
|
1472
|
+
state_data = self.short_term_memory.retrieve(
|
|
1473
|
+
f"collaboration_state_{sid}",
|
|
1474
|
+
self.credentials,
|
|
1475
|
+
)
|
|
1476
|
+
|
|
1477
|
+
if state_data is None:
|
|
1478
|
+
return False
|
|
1479
|
+
|
|
1480
|
+
# Restore state
|
|
1481
|
+
self.collaboration_state.trust_level = state_data.get("trust_level", 0.5)
|
|
1482
|
+
self.collaboration_state.successful_interventions = state_data.get(
|
|
1483
|
+
"successful_interventions",
|
|
1484
|
+
0,
|
|
1485
|
+
)
|
|
1486
|
+
self.collaboration_state.failed_interventions = state_data.get("failed_interventions", 0)
|
|
1487
|
+
self.collaboration_state.total_interactions = state_data.get("total_interactions", 0)
|
|
1488
|
+
self.current_empathy_level = state_data.get("current_empathy_level", 1)
|
|
1489
|
+
self.collaboration_state.trust_trajectory = state_data.get("trust_trajectory", [])
|
|
1490
|
+
|
|
1491
|
+
self.logger.info(
|
|
1492
|
+
f"Restored collaboration state from session {sid}",
|
|
1493
|
+
extra={
|
|
1494
|
+
"user_id": self.user_id,
|
|
1495
|
+
"restored_trust_level": self.collaboration_state.trust_level,
|
|
1496
|
+
"restored_interactions": self.collaboration_state.total_interactions,
|
|
1497
|
+
},
|
|
1498
|
+
)
|
|
1499
|
+
|
|
1500
|
+
return True
|
|
1501
|
+
|
|
1502
|
+
def get_memory_stats(self) -> dict | None:
|
|
1503
|
+
"""Get statistics about the short-term memory system.
|
|
1504
|
+
|
|
1505
|
+
Returns:
|
|
1506
|
+
Dict with memory usage, key counts, mode, or None if not configured
|
|
1507
|
+
|
|
1508
|
+
"""
|
|
1509
|
+
if self.short_term_memory is None:
|
|
1510
|
+
return None
|
|
1511
|
+
return self.short_term_memory.get_stats()
|