attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,984 @@
|
|
|
1
|
+
"""Meta-workflow orchestration engine.
|
|
2
|
+
|
|
3
|
+
Coordinates the complete meta-workflow execution:
|
|
4
|
+
1. Template selection
|
|
5
|
+
2. Form collection (Socratic questioning)
|
|
6
|
+
3. Agent team generation
|
|
7
|
+
4. Agent execution (with tier escalation)
|
|
8
|
+
5. Result aggregation and storage (files + optional memory)
|
|
9
|
+
|
|
10
|
+
Created: 2026-01-17
|
|
11
|
+
Updated: 2026-01-18 (v4.3.0 - Real LLM execution with Anthropic client)
|
|
12
|
+
Purpose: Core orchestration for meta-workflows
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
# Load environment variables from .env file
|
|
16
|
+
# Try multiple locations: project root, home directory, empathy config
|
|
17
|
+
try:
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
from dotenv import load_dotenv
|
|
21
|
+
|
|
22
|
+
# Try common .env locations
|
|
23
|
+
_env_paths = [
|
|
24
|
+
Path.cwd() / ".env", # Current working directory
|
|
25
|
+
Path(__file__).parent.parent.parent.parent / ".env", # Project root
|
|
26
|
+
Path.home() / ".env", # Home directory
|
|
27
|
+
Path.home() / ".empathy" / ".env", # Empathy config directory
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
for _env_path in _env_paths:
|
|
31
|
+
if _env_path.exists():
|
|
32
|
+
load_dotenv(_env_path)
|
|
33
|
+
break
|
|
34
|
+
except ImportError:
|
|
35
|
+
pass # dotenv not installed, use environment variables directly
|
|
36
|
+
|
|
37
|
+
import json
|
|
38
|
+
import logging
|
|
39
|
+
import time
|
|
40
|
+
from datetime import datetime
|
|
41
|
+
from pathlib import Path
|
|
42
|
+
from typing import TYPE_CHECKING, Any
|
|
43
|
+
|
|
44
|
+
from attune_llm.routing.model_router import ModelRouter, ModelTier
|
|
45
|
+
from attune.config import _validate_file_path
|
|
46
|
+
from attune.meta_workflows.agent_creator import DynamicAgentCreator
|
|
47
|
+
from attune.meta_workflows.form_engine import SocraticFormEngine
|
|
48
|
+
from attune.meta_workflows.models import (
|
|
49
|
+
AgentExecutionResult,
|
|
50
|
+
AgentSpec,
|
|
51
|
+
FormResponse,
|
|
52
|
+
MetaWorkflowResult,
|
|
53
|
+
MetaWorkflowTemplate,
|
|
54
|
+
TierStrategy,
|
|
55
|
+
)
|
|
56
|
+
from attune.meta_workflows.template_registry import TemplateRegistry
|
|
57
|
+
from attune.orchestration.agent_templates import get_template
|
|
58
|
+
from attune.telemetry.usage_tracker import UsageTracker
|
|
59
|
+
|
|
60
|
+
if TYPE_CHECKING:
|
|
61
|
+
from attune.meta_workflows.pattern_learner import PatternLearner
|
|
62
|
+
|
|
63
|
+
logger = logging.getLogger(__name__)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class MetaWorkflow:
|
|
67
|
+
"""Orchestrates complete meta-workflow execution.
|
|
68
|
+
|
|
69
|
+
Coordinates form collection, agent generation, and execution
|
|
70
|
+
to implement dynamic, template-based workflows.
|
|
71
|
+
|
|
72
|
+
Hybrid Storage:
|
|
73
|
+
- Files: Persistent, human-readable execution results
|
|
74
|
+
- Memory: Rich semantic queries (optional via pattern_learner)
|
|
75
|
+
|
|
76
|
+
Attributes:
|
|
77
|
+
template: Meta-workflow template to execute
|
|
78
|
+
storage_dir: Directory for storing execution results
|
|
79
|
+
form_engine: Engine for collecting form responses
|
|
80
|
+
agent_creator: Creator for generating agent teams
|
|
81
|
+
pattern_learner: Optional pattern learner for memory integration
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
template: MetaWorkflowTemplate | None = None,
|
|
87
|
+
template_id: str | None = None,
|
|
88
|
+
storage_dir: str | None = None,
|
|
89
|
+
pattern_learner: "PatternLearner | None" = None,
|
|
90
|
+
):
|
|
91
|
+
"""Initialize meta-workflow with optional memory integration.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
template: Template to execute (optional if template_id provided)
|
|
95
|
+
template_id: ID of template to load (optional if template provided)
|
|
96
|
+
storage_dir: Directory for execution results
|
|
97
|
+
(default: .attune/meta_workflows/executions/)
|
|
98
|
+
pattern_learner: Optional pattern learner with memory integration
|
|
99
|
+
If provided, execution results will be stored in
|
|
100
|
+
both files and memory for rich semantic querying
|
|
101
|
+
|
|
102
|
+
Raises:
|
|
103
|
+
ValueError: If neither template nor template_id provided
|
|
104
|
+
"""
|
|
105
|
+
if template is None and template_id is None:
|
|
106
|
+
raise ValueError("Must provide either template or template_id")
|
|
107
|
+
|
|
108
|
+
# Load template if needed
|
|
109
|
+
if template is None:
|
|
110
|
+
registry = TemplateRegistry()
|
|
111
|
+
template = registry.load_template(template_id)
|
|
112
|
+
if template is None:
|
|
113
|
+
raise ValueError(f"Template not found: {template_id}")
|
|
114
|
+
|
|
115
|
+
self.template = template
|
|
116
|
+
self.form_engine = SocraticFormEngine()
|
|
117
|
+
self.agent_creator = DynamicAgentCreator()
|
|
118
|
+
self.pattern_learner = pattern_learner
|
|
119
|
+
|
|
120
|
+
# Set up storage
|
|
121
|
+
if storage_dir is None:
|
|
122
|
+
storage_dir = str(Path.home() / ".empathy" / "meta_workflows" / "executions")
|
|
123
|
+
self.storage_dir = Path(storage_dir)
|
|
124
|
+
self.storage_dir.mkdir(parents=True, exist_ok=True)
|
|
125
|
+
|
|
126
|
+
logger.info(
|
|
127
|
+
f"Initialized MetaWorkflow for template: {self.template.template_id}",
|
|
128
|
+
extra={"memory_enabled": pattern_learner is not None},
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
def execute(
|
|
132
|
+
self,
|
|
133
|
+
form_response: FormResponse | None = None,
|
|
134
|
+
mock_execution: bool = True,
|
|
135
|
+
use_defaults: bool = False,
|
|
136
|
+
) -> MetaWorkflowResult:
|
|
137
|
+
"""Execute complete meta-workflow.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
form_response: Pre-collected form responses (optional)
|
|
141
|
+
If None, will collect via form_engine
|
|
142
|
+
mock_execution: Use mock agent execution (default: True for MVP)
|
|
143
|
+
Set to False for real LLM execution
|
|
144
|
+
use_defaults: Use default values instead of asking questions
|
|
145
|
+
(non-interactive mode)
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
MetaWorkflowResult with complete execution details
|
|
149
|
+
|
|
150
|
+
Raises:
|
|
151
|
+
ValueError: If execution fails
|
|
152
|
+
"""
|
|
153
|
+
run_id = f"{self.template.template_id}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
|
154
|
+
start_time = time.time()
|
|
155
|
+
|
|
156
|
+
logger.info(f"Starting meta-workflow execution: {run_id}")
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
# Stage 1: Form collection (if not provided)
|
|
160
|
+
if form_response is None:
|
|
161
|
+
if use_defaults:
|
|
162
|
+
logger.info("Stage 1: Using default form values (non-interactive)")
|
|
163
|
+
else:
|
|
164
|
+
logger.info("Stage 1: Collecting form responses")
|
|
165
|
+
form_response = self.form_engine.ask_questions(
|
|
166
|
+
self.template.form_schema, self.template.template_id
|
|
167
|
+
)
|
|
168
|
+
else:
|
|
169
|
+
logger.info("Stage 1: Using provided form responses")
|
|
170
|
+
|
|
171
|
+
# Stage 2: Agent generation
|
|
172
|
+
logger.info("Stage 2: Generating agent team")
|
|
173
|
+
agents = self.agent_creator.create_agents(self.template, form_response)
|
|
174
|
+
|
|
175
|
+
logger.info(f"Created {len(agents)} agents")
|
|
176
|
+
|
|
177
|
+
# Stage 3: Agent execution
|
|
178
|
+
logger.info("Stage 3: Executing agents")
|
|
179
|
+
|
|
180
|
+
if mock_execution:
|
|
181
|
+
agent_results = self._execute_agents_mock(agents)
|
|
182
|
+
else:
|
|
183
|
+
agent_results = self._execute_agents_real(agents)
|
|
184
|
+
|
|
185
|
+
# Stage 4: Aggregate results
|
|
186
|
+
logger.info("Stage 4: Aggregating results")
|
|
187
|
+
|
|
188
|
+
total_cost = sum(result.cost for result in agent_results)
|
|
189
|
+
total_duration = time.time() - start_time
|
|
190
|
+
success = all(result.success for result in agent_results)
|
|
191
|
+
|
|
192
|
+
result = MetaWorkflowResult(
|
|
193
|
+
run_id=run_id,
|
|
194
|
+
template_id=self.template.template_id,
|
|
195
|
+
timestamp=datetime.now().isoformat(),
|
|
196
|
+
form_responses=form_response,
|
|
197
|
+
agents_created=agents,
|
|
198
|
+
agent_results=agent_results,
|
|
199
|
+
total_cost=total_cost,
|
|
200
|
+
total_duration=total_duration,
|
|
201
|
+
success=success,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
# Stage 5: Save results (files + optional memory)
|
|
205
|
+
logger.info("Stage 5: Saving results")
|
|
206
|
+
self._save_execution(result)
|
|
207
|
+
|
|
208
|
+
# Store in memory if pattern learner available
|
|
209
|
+
if self.pattern_learner:
|
|
210
|
+
logger.info("Stage 5b: Storing in memory")
|
|
211
|
+
pattern_id = self.pattern_learner.store_execution_in_memory(result)
|
|
212
|
+
if pattern_id:
|
|
213
|
+
logger.info(f"Execution stored in memory: {pattern_id}")
|
|
214
|
+
|
|
215
|
+
logger.info(
|
|
216
|
+
f"Meta-workflow execution complete: {run_id} "
|
|
217
|
+
f"(cost: ${total_cost:.2f}, duration: {total_duration:.1f}s)"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
return result
|
|
221
|
+
|
|
222
|
+
except Exception as e:
|
|
223
|
+
logger.error(f"Meta-workflow execution failed: {e}")
|
|
224
|
+
|
|
225
|
+
# Create error result
|
|
226
|
+
error_result = MetaWorkflowResult(
|
|
227
|
+
run_id=run_id,
|
|
228
|
+
template_id=self.template.template_id,
|
|
229
|
+
timestamp=datetime.now().isoformat(),
|
|
230
|
+
form_responses=form_response or FormResponse(template_id=self.template.template_id),
|
|
231
|
+
total_cost=0.0,
|
|
232
|
+
total_duration=time.time() - start_time,
|
|
233
|
+
success=False,
|
|
234
|
+
error=str(e),
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
# Try to save error result
|
|
238
|
+
try:
|
|
239
|
+
self._save_execution(error_result)
|
|
240
|
+
except Exception as save_error:
|
|
241
|
+
logger.error(f"Failed to save error result: {save_error}")
|
|
242
|
+
|
|
243
|
+
raise ValueError(f"Meta-workflow execution failed: {e}") from e
|
|
244
|
+
|
|
245
|
+
def _execute_agents_mock(self, agents: list[AgentSpec]) -> list[AgentExecutionResult]:
|
|
246
|
+
"""Execute agents with mock execution (for MVP).
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
agents: List of agent specs to execute
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
List of agent execution results
|
|
253
|
+
"""
|
|
254
|
+
results = []
|
|
255
|
+
|
|
256
|
+
for agent in agents:
|
|
257
|
+
logger.debug(f"Mock executing agent: {agent.role}")
|
|
258
|
+
|
|
259
|
+
# Simulate execution time based on tier
|
|
260
|
+
if agent.tier_strategy == TierStrategy.CHEAP_ONLY:
|
|
261
|
+
duration = 1.5
|
|
262
|
+
cost = 0.05
|
|
263
|
+
tier_used = "cheap"
|
|
264
|
+
elif agent.tier_strategy == TierStrategy.PROGRESSIVE:
|
|
265
|
+
duration = 3.0
|
|
266
|
+
cost = 0.15 # Average (may escalate)
|
|
267
|
+
tier_used = "capable"
|
|
268
|
+
elif agent.tier_strategy == TierStrategy.CAPABLE_FIRST:
|
|
269
|
+
duration = 4.0
|
|
270
|
+
cost = 0.25
|
|
271
|
+
tier_used = "capable"
|
|
272
|
+
else: # PREMIUM_ONLY
|
|
273
|
+
duration = 6.0
|
|
274
|
+
cost = 0.40
|
|
275
|
+
tier_used = "premium"
|
|
276
|
+
|
|
277
|
+
# Mock result
|
|
278
|
+
result = AgentExecutionResult(
|
|
279
|
+
agent_id=agent.agent_id,
|
|
280
|
+
role=agent.role,
|
|
281
|
+
success=True,
|
|
282
|
+
cost=cost,
|
|
283
|
+
duration=duration,
|
|
284
|
+
tier_used=tier_used,
|
|
285
|
+
output={
|
|
286
|
+
"message": f"Mock execution of {agent.role}",
|
|
287
|
+
"tier_strategy": agent.tier_strategy.value,
|
|
288
|
+
"tools_used": agent.tools,
|
|
289
|
+
"config": agent.config,
|
|
290
|
+
"success_criteria": agent.success_criteria,
|
|
291
|
+
},
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
results.append(result)
|
|
295
|
+
|
|
296
|
+
# Simulate some execution time
|
|
297
|
+
time.sleep(0.1)
|
|
298
|
+
|
|
299
|
+
return results
|
|
300
|
+
|
|
301
|
+
def _execute_agents_real(self, agents: list[AgentSpec]) -> list[AgentExecutionResult]:
|
|
302
|
+
"""Execute agents with real LLM calls and progressive tier escalation.
|
|
303
|
+
|
|
304
|
+
Implements progressive tier escalation strategy:
|
|
305
|
+
- CHEAP_ONLY: Always uses cheap tier
|
|
306
|
+
- PROGRESSIVE: cheap → capable → premium (escalates on failure)
|
|
307
|
+
- CAPABLE_FIRST: capable → premium (skips cheap tier)
|
|
308
|
+
|
|
309
|
+
Each LLM call is tracked via UsageTracker for cost analysis.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
agents: List of agent specs to execute
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
List of agent execution results with actual LLM costs
|
|
316
|
+
|
|
317
|
+
Raises:
|
|
318
|
+
RuntimeError: If agent execution encounters fatal error
|
|
319
|
+
"""
|
|
320
|
+
results = []
|
|
321
|
+
router = ModelRouter()
|
|
322
|
+
tracker = UsageTracker.get_instance()
|
|
323
|
+
|
|
324
|
+
for agent in agents:
|
|
325
|
+
logger.info(f"Executing agent: {agent.role} ({agent.tier_strategy.value})")
|
|
326
|
+
|
|
327
|
+
try:
|
|
328
|
+
result = self._execute_single_agent_with_escalation(agent, router, tracker)
|
|
329
|
+
results.append(result)
|
|
330
|
+
|
|
331
|
+
logger.info(
|
|
332
|
+
f"Agent {agent.role} completed: "
|
|
333
|
+
f"tier={result.tier_used}, cost=${result.cost:.4f}, "
|
|
334
|
+
f"success={result.success}"
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.error(f"Agent {agent.role} failed with error: {e}")
|
|
339
|
+
|
|
340
|
+
# Create error result
|
|
341
|
+
error_result = AgentExecutionResult(
|
|
342
|
+
agent_id=agent.agent_id,
|
|
343
|
+
role=agent.role,
|
|
344
|
+
success=False,
|
|
345
|
+
cost=0.0,
|
|
346
|
+
duration=0.0,
|
|
347
|
+
tier_used="error",
|
|
348
|
+
output={"error": str(e)},
|
|
349
|
+
error=str(e),
|
|
350
|
+
)
|
|
351
|
+
results.append(error_result)
|
|
352
|
+
|
|
353
|
+
return results
|
|
354
|
+
|
|
355
|
+
def _execute_single_agent_with_escalation(
|
|
356
|
+
self,
|
|
357
|
+
agent: AgentSpec,
|
|
358
|
+
router: ModelRouter,
|
|
359
|
+
tracker: UsageTracker,
|
|
360
|
+
) -> AgentExecutionResult:
|
|
361
|
+
"""Execute single agent with progressive tier escalation.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
agent: Agent specification
|
|
365
|
+
router: Model router for tier selection
|
|
366
|
+
tracker: Usage tracker for telemetry
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
AgentExecutionResult with actual LLM execution data
|
|
370
|
+
"""
|
|
371
|
+
start_time = time.time()
|
|
372
|
+
|
|
373
|
+
# Determine tier sequence based on strategy
|
|
374
|
+
if agent.tier_strategy == TierStrategy.CHEAP_ONLY:
|
|
375
|
+
tiers = [ModelTier.CHEAP]
|
|
376
|
+
elif agent.tier_strategy == TierStrategy.PROGRESSIVE:
|
|
377
|
+
tiers = [ModelTier.CHEAP, ModelTier.CAPABLE, ModelTier.PREMIUM]
|
|
378
|
+
elif agent.tier_strategy == TierStrategy.CAPABLE_FIRST:
|
|
379
|
+
tiers = [ModelTier.CAPABLE, ModelTier.PREMIUM]
|
|
380
|
+
else:
|
|
381
|
+
# Fallback to capable
|
|
382
|
+
logger.warning(f"Unknown tier strategy: {agent.tier_strategy}, using CAPABLE")
|
|
383
|
+
tiers = [ModelTier.CAPABLE]
|
|
384
|
+
|
|
385
|
+
# Try each tier in sequence
|
|
386
|
+
result = None
|
|
387
|
+
total_cost = 0.0
|
|
388
|
+
|
|
389
|
+
for tier in tiers:
|
|
390
|
+
logger.debug(f"Attempting tier: {tier.value}")
|
|
391
|
+
|
|
392
|
+
# Execute at this tier
|
|
393
|
+
tier_result = self._execute_at_tier(agent, tier, router, tracker)
|
|
394
|
+
total_cost += tier_result.cost
|
|
395
|
+
|
|
396
|
+
# Check if successful
|
|
397
|
+
if self._evaluate_success_criteria(tier_result, agent):
|
|
398
|
+
# Success - return result
|
|
399
|
+
tier_result.cost = total_cost # Update with cumulative cost
|
|
400
|
+
tier_result.duration = time.time() - start_time
|
|
401
|
+
return tier_result
|
|
402
|
+
|
|
403
|
+
# Failed - try next tier
|
|
404
|
+
logger.debug(f"Tier {tier.value} did not meet success criteria, attempting escalation")
|
|
405
|
+
result = tier_result
|
|
406
|
+
|
|
407
|
+
# All tiers exhausted - return final result (failed)
|
|
408
|
+
if result:
|
|
409
|
+
result.cost = total_cost
|
|
410
|
+
result.duration = time.time() - start_time
|
|
411
|
+
logger.warning(f"Agent {agent.role} failed at all tiers (cost: ${total_cost:.4f})")
|
|
412
|
+
return result
|
|
413
|
+
|
|
414
|
+
# Should never reach here
|
|
415
|
+
raise RuntimeError(f"No tiers attempted for agent {agent.role}")
|
|
416
|
+
|
|
417
|
+
def _execute_at_tier(
|
|
418
|
+
self,
|
|
419
|
+
agent: AgentSpec,
|
|
420
|
+
tier: ModelTier,
|
|
421
|
+
router: ModelRouter,
|
|
422
|
+
tracker: UsageTracker,
|
|
423
|
+
) -> AgentExecutionResult:
|
|
424
|
+
"""Execute agent at specific tier.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
agent: Agent specification
|
|
428
|
+
tier: Model tier to use
|
|
429
|
+
router: Model router
|
|
430
|
+
tracker: Usage tracker
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
AgentExecutionResult from this tier
|
|
434
|
+
"""
|
|
435
|
+
start_time = time.time()
|
|
436
|
+
|
|
437
|
+
# Get model config for tier (access MODELS dict directly)
|
|
438
|
+
provider = router._default_provider
|
|
439
|
+
model_config = router.MODELS[provider][tier.value]
|
|
440
|
+
|
|
441
|
+
# Build prompt from agent spec
|
|
442
|
+
prompt = self._build_agent_prompt(agent)
|
|
443
|
+
|
|
444
|
+
# Execute LLM call
|
|
445
|
+
# v4.3.0: Real LLM execution with Anthropic client
|
|
446
|
+
# Falls back to simulation if API key not available
|
|
447
|
+
|
|
448
|
+
try:
|
|
449
|
+
# Execute real LLM call (with simulation fallback)
|
|
450
|
+
response = self._execute_llm_call(prompt, model_config, tier)
|
|
451
|
+
|
|
452
|
+
# Track telemetry
|
|
453
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
454
|
+
tracker.track_llm_call(
|
|
455
|
+
workflow="meta-workflow",
|
|
456
|
+
stage=agent.role,
|
|
457
|
+
tier=tier.value,
|
|
458
|
+
model=model_config.model_id,
|
|
459
|
+
provider=router._default_provider,
|
|
460
|
+
cost=response["cost"],
|
|
461
|
+
tokens=response["tokens"],
|
|
462
|
+
cache_hit=False,
|
|
463
|
+
cache_type=None,
|
|
464
|
+
duration_ms=duration_ms,
|
|
465
|
+
user_id=None,
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
# Create result
|
|
469
|
+
result = AgentExecutionResult(
|
|
470
|
+
agent_id=agent.agent_id,
|
|
471
|
+
role=agent.role,
|
|
472
|
+
success=response["success"],
|
|
473
|
+
cost=response["cost"],
|
|
474
|
+
duration=time.time() - start_time,
|
|
475
|
+
tier_used=tier.value,
|
|
476
|
+
output=response["output"],
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
return result
|
|
480
|
+
|
|
481
|
+
except Exception as e:
|
|
482
|
+
logger.error(f"LLM execution failed at tier {tier.value}: {e}")
|
|
483
|
+
|
|
484
|
+
# Return error result
|
|
485
|
+
return AgentExecutionResult(
|
|
486
|
+
agent_id=agent.agent_id,
|
|
487
|
+
role=agent.role,
|
|
488
|
+
success=False,
|
|
489
|
+
cost=0.0,
|
|
490
|
+
duration=time.time() - start_time,
|
|
491
|
+
tier_used=tier.value,
|
|
492
|
+
output={"error": str(e)},
|
|
493
|
+
error=str(e),
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
def _get_generic_instructions(self, role: str) -> str:
|
|
497
|
+
"""Generate generic instructions based on agent role.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
role: Agent role name
|
|
501
|
+
|
|
502
|
+
Returns:
|
|
503
|
+
Generic instructions appropriate for the role
|
|
504
|
+
"""
|
|
505
|
+
# Map common role keywords to instructions
|
|
506
|
+
role_lower = role.lower()
|
|
507
|
+
|
|
508
|
+
if "analyst" in role_lower or "analyze" in role_lower:
|
|
509
|
+
return (
|
|
510
|
+
"You are an expert analyst. Your job is to thoroughly analyze "
|
|
511
|
+
"the provided information, identify key patterns, issues, and "
|
|
512
|
+
"opportunities. Provide detailed findings with specific evidence "
|
|
513
|
+
"and actionable recommendations."
|
|
514
|
+
)
|
|
515
|
+
elif "reviewer" in role_lower or "review" in role_lower:
|
|
516
|
+
return (
|
|
517
|
+
"You are a careful reviewer. Your job is to review the provided "
|
|
518
|
+
"content for quality, accuracy, completeness, and adherence to "
|
|
519
|
+
"best practices. Identify any issues, gaps, or areas for improvement "
|
|
520
|
+
"and provide specific feedback."
|
|
521
|
+
)
|
|
522
|
+
elif "generator" in role_lower or "create" in role_lower or "writer" in role_lower:
|
|
523
|
+
return (
|
|
524
|
+
"You are a skilled content generator. Your job is to create "
|
|
525
|
+
"high-quality content based on the provided requirements and context. "
|
|
526
|
+
"Ensure your output is well-structured, accurate, and follows "
|
|
527
|
+
"established conventions."
|
|
528
|
+
)
|
|
529
|
+
elif "validator" in role_lower or "verify" in role_lower:
|
|
530
|
+
return (
|
|
531
|
+
"You are a thorough validator. Your job is to verify the provided "
|
|
532
|
+
"content meets all requirements and standards. Check for correctness, "
|
|
533
|
+
"completeness, and consistency. Report any issues found."
|
|
534
|
+
)
|
|
535
|
+
elif "synthesizer" in role_lower or "combine" in role_lower:
|
|
536
|
+
return (
|
|
537
|
+
"You are an expert synthesizer. Your job is to combine multiple "
|
|
538
|
+
"inputs into a cohesive, well-organized output. Identify common "
|
|
539
|
+
"themes, resolve conflicts, and produce a unified result that "
|
|
540
|
+
"captures the key insights from all sources."
|
|
541
|
+
)
|
|
542
|
+
elif "test" in role_lower:
|
|
543
|
+
return (
|
|
544
|
+
"You are a testing specialist. Your job is to analyze code and "
|
|
545
|
+
"create comprehensive test cases that cover edge cases, error "
|
|
546
|
+
"conditions, and normal operation. Ensure tests are well-documented "
|
|
547
|
+
"and maintainable."
|
|
548
|
+
)
|
|
549
|
+
elif "doc" in role_lower:
|
|
550
|
+
return (
|
|
551
|
+
"You are a documentation specialist. Your job is to analyze content "
|
|
552
|
+
"and create or improve documentation that is clear, accurate, and "
|
|
553
|
+
"helpful. Follow documentation best practices and maintain consistency."
|
|
554
|
+
)
|
|
555
|
+
else:
|
|
556
|
+
return (
|
|
557
|
+
f"You are a {role} agent. Complete your assigned task thoroughly "
|
|
558
|
+
"and provide clear, well-structured output. Follow best practices "
|
|
559
|
+
"and provide actionable results."
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
def _build_agent_prompt(self, agent: AgentSpec) -> str:
|
|
563
|
+
"""Build prompt for agent from specification.
|
|
564
|
+
|
|
565
|
+
Args:
|
|
566
|
+
agent: Agent specification
|
|
567
|
+
|
|
568
|
+
Returns:
|
|
569
|
+
Formatted prompt string
|
|
570
|
+
"""
|
|
571
|
+
# Load base template
|
|
572
|
+
base_template = get_template(agent.base_template)
|
|
573
|
+
if base_template is not None:
|
|
574
|
+
instructions = base_template.default_instructions
|
|
575
|
+
else:
|
|
576
|
+
# Fallback if template not found - use role-based generic prompt
|
|
577
|
+
logger.warning(f"Template {agent.base_template} not found, using generic prompt")
|
|
578
|
+
instructions = self._get_generic_instructions(agent.role)
|
|
579
|
+
|
|
580
|
+
# Build prompt
|
|
581
|
+
prompt_parts = [
|
|
582
|
+
f"Role: {agent.role}",
|
|
583
|
+
f"\nInstructions:\n{instructions}",
|
|
584
|
+
]
|
|
585
|
+
|
|
586
|
+
# Add config if present
|
|
587
|
+
if agent.config:
|
|
588
|
+
prompt_parts.append(f"\nConfiguration:\n{json.dumps(agent.config, indent=2)}")
|
|
589
|
+
|
|
590
|
+
# Add success criteria if present
|
|
591
|
+
if agent.success_criteria:
|
|
592
|
+
prompt_parts.append(
|
|
593
|
+
f"\nSuccess Criteria:\n{json.dumps(agent.success_criteria, indent=2)}"
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
# Add tools if present
|
|
597
|
+
if agent.tools:
|
|
598
|
+
prompt_parts.append(f"\nAvailable Tools: {', '.join(agent.tools)}")
|
|
599
|
+
|
|
600
|
+
return "\n".join(prompt_parts)
|
|
601
|
+
|
|
602
|
+
def _execute_llm_call(self, prompt: str, model_config: Any, tier: ModelTier) -> dict[str, Any]:
|
|
603
|
+
"""Execute real LLM call via Anthropic or other providers.
|
|
604
|
+
|
|
605
|
+
Uses the Anthropic client for Claude models, with fallback to
|
|
606
|
+
other providers via the model configuration.
|
|
607
|
+
|
|
608
|
+
Args:
|
|
609
|
+
prompt: Prompt to send to LLM
|
|
610
|
+
model_config: Model configuration from router
|
|
611
|
+
tier: Model tier being used
|
|
612
|
+
|
|
613
|
+
Returns:
|
|
614
|
+
Dict with cost, tokens, success, and output
|
|
615
|
+
|
|
616
|
+
Raises:
|
|
617
|
+
RuntimeError: If LLM call fails after retries
|
|
618
|
+
"""
|
|
619
|
+
import os
|
|
620
|
+
|
|
621
|
+
# Try to use Anthropic client
|
|
622
|
+
try:
|
|
623
|
+
from anthropic import Anthropic
|
|
624
|
+
|
|
625
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
626
|
+
if not api_key:
|
|
627
|
+
raise ValueError(
|
|
628
|
+
"ANTHROPIC_API_KEY not found. Set it in environment or .env file "
|
|
629
|
+
"(checked: ./env, ~/.env, ~/.attune/.env)"
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
client = Anthropic(api_key=api_key)
|
|
633
|
+
|
|
634
|
+
# Execute the LLM call
|
|
635
|
+
response = client.messages.create(
|
|
636
|
+
model=model_config.model_id,
|
|
637
|
+
max_tokens=2048,
|
|
638
|
+
messages=[{"role": "user", "content": prompt}],
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
# Extract response data
|
|
642
|
+
output_text = response.content[0].text if response.content else ""
|
|
643
|
+
prompt_tokens = response.usage.input_tokens
|
|
644
|
+
completion_tokens = response.usage.output_tokens
|
|
645
|
+
|
|
646
|
+
# Calculate cost
|
|
647
|
+
cost = (prompt_tokens / 1000) * model_config.cost_per_1k_input + (
|
|
648
|
+
completion_tokens / 1000
|
|
649
|
+
) * model_config.cost_per_1k_output
|
|
650
|
+
|
|
651
|
+
return {
|
|
652
|
+
"cost": cost,
|
|
653
|
+
"tokens": {
|
|
654
|
+
"input": prompt_tokens,
|
|
655
|
+
"output": completion_tokens,
|
|
656
|
+
"total": prompt_tokens + completion_tokens,
|
|
657
|
+
},
|
|
658
|
+
"success": True,
|
|
659
|
+
"output": {
|
|
660
|
+
"message": output_text,
|
|
661
|
+
"model": model_config.model_id,
|
|
662
|
+
"tier": tier.value,
|
|
663
|
+
"success": True,
|
|
664
|
+
},
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
except ImportError:
|
|
668
|
+
logger.warning("Anthropic client not available, using simulation")
|
|
669
|
+
return self._simulate_llm_call(prompt, model_config, tier)
|
|
670
|
+
|
|
671
|
+
except Exception as e:
|
|
672
|
+
logger.error(f"LLM call failed: {e}")
|
|
673
|
+
# Return failure result
|
|
674
|
+
return {
|
|
675
|
+
"cost": 0.0,
|
|
676
|
+
"tokens": {"input": 0, "output": 0, "total": 0},
|
|
677
|
+
"success": False,
|
|
678
|
+
"output": {
|
|
679
|
+
"error": str(e),
|
|
680
|
+
"model": model_config.model_id,
|
|
681
|
+
"tier": tier.value,
|
|
682
|
+
"success": False,
|
|
683
|
+
},
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
def _simulate_llm_call(self, prompt: str, model_config: Any, tier: ModelTier) -> dict[str, Any]:
|
|
687
|
+
"""Simulate LLM call with realistic cost/token estimates.
|
|
688
|
+
|
|
689
|
+
Used as fallback when real LLM execution is not available
|
|
690
|
+
(e.g., no API key, testing mode, etc.)
|
|
691
|
+
|
|
692
|
+
Args:
|
|
693
|
+
prompt: Prompt to send to LLM
|
|
694
|
+
model_config: Model configuration
|
|
695
|
+
tier: Model tier
|
|
696
|
+
|
|
697
|
+
Returns:
|
|
698
|
+
Dict with cost, tokens, success, and output
|
|
699
|
+
"""
|
|
700
|
+
import random
|
|
701
|
+
|
|
702
|
+
# Estimate tokens (rough: ~4 chars per token)
|
|
703
|
+
prompt_tokens = len(prompt) // 4
|
|
704
|
+
completion_tokens = 500 # Assume moderate response
|
|
705
|
+
|
|
706
|
+
# Calculate cost
|
|
707
|
+
cost = (prompt_tokens / 1000) * model_config.cost_per_1k_input + (
|
|
708
|
+
completion_tokens / 1000
|
|
709
|
+
) * model_config.cost_per_1k_output
|
|
710
|
+
|
|
711
|
+
# Simulate success rate based on tier
|
|
712
|
+
# cheap: 80%, capable: 95%, premium: 99%
|
|
713
|
+
if tier == ModelTier.CHEAP:
|
|
714
|
+
success = random.random() < 0.80
|
|
715
|
+
elif tier == ModelTier.CAPABLE:
|
|
716
|
+
success = random.random() < 0.95
|
|
717
|
+
else: # PREMIUM
|
|
718
|
+
success = random.random() < 0.99
|
|
719
|
+
|
|
720
|
+
return {
|
|
721
|
+
"cost": cost,
|
|
722
|
+
"tokens": {
|
|
723
|
+
"input": prompt_tokens,
|
|
724
|
+
"output": completion_tokens,
|
|
725
|
+
"total": prompt_tokens + completion_tokens,
|
|
726
|
+
},
|
|
727
|
+
"success": success,
|
|
728
|
+
"output": {
|
|
729
|
+
"message": f"Simulated response at {tier.value} tier",
|
|
730
|
+
"model": model_config.model_id,
|
|
731
|
+
"tier": tier.value,
|
|
732
|
+
"success": success,
|
|
733
|
+
},
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
def _evaluate_success_criteria(self, result: AgentExecutionResult, agent: AgentSpec) -> bool:
|
|
737
|
+
"""Evaluate if agent result meets success criteria.
|
|
738
|
+
|
|
739
|
+
Args:
|
|
740
|
+
result: Agent execution result
|
|
741
|
+
agent: Agent specification with success criteria
|
|
742
|
+
|
|
743
|
+
Returns:
|
|
744
|
+
True if success criteria met, False otherwise
|
|
745
|
+
"""
|
|
746
|
+
# Basic success check
|
|
747
|
+
if not result.success:
|
|
748
|
+
return False
|
|
749
|
+
|
|
750
|
+
# If no criteria specified, basic success is enough
|
|
751
|
+
if not agent.success_criteria:
|
|
752
|
+
return True
|
|
753
|
+
|
|
754
|
+
# success_criteria is a list of descriptive strings (e.g., ["code reviewed", "tests pass"])
|
|
755
|
+
# These are informational criteria - if result.success is True, we consider the criteria met
|
|
756
|
+
# The criteria serve as documentation of what success means for this agent
|
|
757
|
+
logger.debug(f"Agent succeeded with criteria: {agent.success_criteria}")
|
|
758
|
+
return True
|
|
759
|
+
|
|
760
|
+
def _save_execution(self, result: MetaWorkflowResult) -> Path:
|
|
761
|
+
"""Save execution results to disk.
|
|
762
|
+
|
|
763
|
+
Args:
|
|
764
|
+
result: Execution result to save
|
|
765
|
+
|
|
766
|
+
Returns:
|
|
767
|
+
Path to saved results directory
|
|
768
|
+
|
|
769
|
+
Raises:
|
|
770
|
+
OSError: If save operation fails
|
|
771
|
+
"""
|
|
772
|
+
# Create run directory
|
|
773
|
+
run_dir = self.storage_dir / result.run_id
|
|
774
|
+
run_dir.mkdir(parents=True, exist_ok=True)
|
|
775
|
+
|
|
776
|
+
# Save config (template info + form responses)
|
|
777
|
+
config_file = run_dir / "config.json"
|
|
778
|
+
config_data = {
|
|
779
|
+
"template_id": result.template_id,
|
|
780
|
+
"template_name": self.template.name,
|
|
781
|
+
"template_version": self.template.version,
|
|
782
|
+
"run_id": result.run_id,
|
|
783
|
+
"timestamp": result.timestamp,
|
|
784
|
+
}
|
|
785
|
+
validated_config = _validate_file_path(str(config_file))
|
|
786
|
+
validated_config.write_text(json.dumps(config_data, indent=2), encoding="utf-8")
|
|
787
|
+
|
|
788
|
+
# Save form responses
|
|
789
|
+
responses_file = run_dir / "form_responses.json"
|
|
790
|
+
validated_responses = _validate_file_path(str(responses_file))
|
|
791
|
+
validated_responses.write_text(
|
|
792
|
+
json.dumps(
|
|
793
|
+
{
|
|
794
|
+
"template_id": result.form_responses.template_id,
|
|
795
|
+
"responses": result.form_responses.responses,
|
|
796
|
+
"timestamp": result.form_responses.timestamp,
|
|
797
|
+
"response_id": result.form_responses.response_id,
|
|
798
|
+
},
|
|
799
|
+
indent=2,
|
|
800
|
+
),
|
|
801
|
+
encoding="utf-8",
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
# Save agents created
|
|
805
|
+
agents_file = run_dir / "agents.json"
|
|
806
|
+
agents_data = [
|
|
807
|
+
{
|
|
808
|
+
"agent_id": agent.agent_id,
|
|
809
|
+
"role": agent.role,
|
|
810
|
+
"base_template": agent.base_template,
|
|
811
|
+
"tier_strategy": agent.tier_strategy.value,
|
|
812
|
+
"tools": agent.tools,
|
|
813
|
+
"config": agent.config,
|
|
814
|
+
"success_criteria": agent.success_criteria,
|
|
815
|
+
}
|
|
816
|
+
for agent in result.agents_created
|
|
817
|
+
]
|
|
818
|
+
validated_agents = _validate_file_path(str(agents_file))
|
|
819
|
+
validated_agents.write_text(json.dumps(agents_data, indent=2), encoding="utf-8")
|
|
820
|
+
|
|
821
|
+
# Save complete result
|
|
822
|
+
result_file = run_dir / "result.json"
|
|
823
|
+
validated_result = _validate_file_path(str(result_file))
|
|
824
|
+
validated_result.write_text(result.to_json(), encoding="utf-8")
|
|
825
|
+
|
|
826
|
+
# Create human-readable report
|
|
827
|
+
report_file = run_dir / "report.txt"
|
|
828
|
+
report = self._generate_report(result)
|
|
829
|
+
validated_report = _validate_file_path(str(report_file))
|
|
830
|
+
validated_report.write_text(report, encoding="utf-8")
|
|
831
|
+
|
|
832
|
+
logger.info(f"Saved execution results to: {run_dir}")
|
|
833
|
+
return run_dir
|
|
834
|
+
|
|
835
|
+
def _generate_report(self, result: MetaWorkflowResult) -> str:
|
|
836
|
+
"""Generate human-readable report.
|
|
837
|
+
|
|
838
|
+
Args:
|
|
839
|
+
result: Execution result
|
|
840
|
+
|
|
841
|
+
Returns:
|
|
842
|
+
Markdown-formatted report
|
|
843
|
+
"""
|
|
844
|
+
lines = []
|
|
845
|
+
|
|
846
|
+
lines.append("# Meta-Workflow Execution Report")
|
|
847
|
+
lines.append("")
|
|
848
|
+
lines.append(f"**Run ID**: {result.run_id}")
|
|
849
|
+
lines.append(f"**Template**: {self.template.name}")
|
|
850
|
+
lines.append(f"**Timestamp**: {result.timestamp}")
|
|
851
|
+
lines.append(f"**Success**: {'✅ Yes' if result.success else '❌ No'}")
|
|
852
|
+
if result.error:
|
|
853
|
+
lines.append(f"**Error**: {result.error}")
|
|
854
|
+
lines.append("")
|
|
855
|
+
|
|
856
|
+
lines.append("## Summary")
|
|
857
|
+
lines.append("")
|
|
858
|
+
lines.append(f"- **Agents Created**: {len(result.agents_created)}")
|
|
859
|
+
lines.append(f"- **Agents Executed**: {len(result.agent_results)}")
|
|
860
|
+
lines.append(f"- **Total Cost**: ${result.total_cost:.2f}")
|
|
861
|
+
lines.append(f"- **Total Duration**: {result.total_duration:.1f}s")
|
|
862
|
+
lines.append("")
|
|
863
|
+
|
|
864
|
+
lines.append("## Form Responses")
|
|
865
|
+
lines.append("")
|
|
866
|
+
for key, value in result.form_responses.responses.items():
|
|
867
|
+
lines.append(f"- **{key}**: {value}")
|
|
868
|
+
lines.append("")
|
|
869
|
+
|
|
870
|
+
lines.append("## Agents Created")
|
|
871
|
+
lines.append("")
|
|
872
|
+
for i, agent in enumerate(result.agents_created, 1):
|
|
873
|
+
lines.append(f"### {i}. {agent.role}")
|
|
874
|
+
lines.append("")
|
|
875
|
+
lines.append(f"- **Agent ID**: {agent.agent_id}")
|
|
876
|
+
lines.append(f"- **Base Template**: {agent.base_template}")
|
|
877
|
+
lines.append(f"- **Tier Strategy**: {agent.tier_strategy.value}")
|
|
878
|
+
lines.append(f"- **Tools**: {', '.join(agent.tools) if agent.tools else 'None'}")
|
|
879
|
+
if agent.config:
|
|
880
|
+
lines.append(f"- **Config**: {json.dumps(agent.config)}")
|
|
881
|
+
if agent.success_criteria:
|
|
882
|
+
lines.append("- **Success Criteria**:")
|
|
883
|
+
for criterion in agent.success_criteria:
|
|
884
|
+
lines.append(f" - {criterion}")
|
|
885
|
+
lines.append("")
|
|
886
|
+
|
|
887
|
+
lines.append("## Execution Results")
|
|
888
|
+
lines.append("")
|
|
889
|
+
for i, agent_result in enumerate(result.agent_results, 1):
|
|
890
|
+
lines.append(f"### {i}. {agent_result.role}")
|
|
891
|
+
lines.append("")
|
|
892
|
+
lines.append(f"- **Status**: {'✅ Success' if agent_result.success else '❌ Failed'}")
|
|
893
|
+
lines.append(f"- **Tier Used**: {agent_result.tier_used}")
|
|
894
|
+
lines.append(f"- **Cost**: ${agent_result.cost:.2f}")
|
|
895
|
+
lines.append(f"- **Duration**: {agent_result.duration:.1f}s")
|
|
896
|
+
if agent_result.error:
|
|
897
|
+
lines.append(f"- **Error**: {agent_result.error}")
|
|
898
|
+
lines.append("")
|
|
899
|
+
|
|
900
|
+
lines.append("## Cost Breakdown")
|
|
901
|
+
lines.append("")
|
|
902
|
+
|
|
903
|
+
# Group by tier
|
|
904
|
+
tier_costs = {}
|
|
905
|
+
for agent_result in result.agent_results:
|
|
906
|
+
tier = agent_result.tier_used
|
|
907
|
+
if tier not in tier_costs:
|
|
908
|
+
tier_costs[tier] = 0.0
|
|
909
|
+
tier_costs[tier] += agent_result.cost
|
|
910
|
+
|
|
911
|
+
for tier, cost in sorted(tier_costs.items()):
|
|
912
|
+
lines.append(f"- **{tier}**: ${cost:.2f}")
|
|
913
|
+
|
|
914
|
+
lines.append("")
|
|
915
|
+
lines.append("---")
|
|
916
|
+
lines.append("")
|
|
917
|
+
lines.append("*Generated by Empathy Framework Meta-Workflow System*")
|
|
918
|
+
|
|
919
|
+
return "\n".join(lines)
|
|
920
|
+
|
|
921
|
+
|
|
922
|
+
# =============================================================================
|
|
923
|
+
# Helper functions
|
|
924
|
+
# =============================================================================
|
|
925
|
+
|
|
926
|
+
|
|
927
|
+
def load_execution_result(run_id: str, storage_dir: str | None = None) -> MetaWorkflowResult:
|
|
928
|
+
"""Load a saved execution result.
|
|
929
|
+
|
|
930
|
+
Args:
|
|
931
|
+
run_id: ID of execution to load
|
|
932
|
+
storage_dir: Directory where executions are stored
|
|
933
|
+
|
|
934
|
+
Returns:
|
|
935
|
+
Loaded MetaWorkflowResult
|
|
936
|
+
|
|
937
|
+
Raises:
|
|
938
|
+
FileNotFoundError: If result not found
|
|
939
|
+
ValueError: If result file is invalid
|
|
940
|
+
"""
|
|
941
|
+
if storage_dir is None:
|
|
942
|
+
storage_dir = str(Path.home() / ".empathy" / "meta_workflows" / "executions")
|
|
943
|
+
|
|
944
|
+
result_file = Path(storage_dir) / run_id / "result.json"
|
|
945
|
+
|
|
946
|
+
if not result_file.exists():
|
|
947
|
+
raise FileNotFoundError(f"Result not found: {run_id}")
|
|
948
|
+
|
|
949
|
+
try:
|
|
950
|
+
json_str = result_file.read_text(encoding="utf-8")
|
|
951
|
+
data = json.loads(json_str)
|
|
952
|
+
return MetaWorkflowResult.from_dict(data)
|
|
953
|
+
|
|
954
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
955
|
+
raise ValueError(f"Invalid result file: {e}") from e
|
|
956
|
+
|
|
957
|
+
|
|
958
|
+
def list_execution_results(storage_dir: str | None = None) -> list[str]:
|
|
959
|
+
"""List all saved execution results.
|
|
960
|
+
|
|
961
|
+
Args:
|
|
962
|
+
storage_dir: Directory where executions are stored
|
|
963
|
+
|
|
964
|
+
Returns:
|
|
965
|
+
List of run IDs (sorted by timestamp, newest first)
|
|
966
|
+
"""
|
|
967
|
+
if storage_dir is None:
|
|
968
|
+
storage_dir = str(Path.home() / ".empathy" / "meta_workflows" / "executions")
|
|
969
|
+
|
|
970
|
+
storage_path = Path(storage_dir)
|
|
971
|
+
|
|
972
|
+
if not storage_path.exists():
|
|
973
|
+
return []
|
|
974
|
+
|
|
975
|
+
# Find all directories with result.json
|
|
976
|
+
run_ids = []
|
|
977
|
+
for dir_path in storage_path.iterdir():
|
|
978
|
+
if dir_path.is_dir() and (dir_path / "result.json").exists():
|
|
979
|
+
run_ids.append(dir_path.name)
|
|
980
|
+
|
|
981
|
+
# Sort by timestamp (newest first)
|
|
982
|
+
run_ids.sort(reverse=True)
|
|
983
|
+
|
|
984
|
+
return run_ids
|