kubiya-control-plane-api 0.9.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- control_plane_api/LICENSE +676 -0
- control_plane_api/README.md +350 -0
- control_plane_api/__init__.py +4 -0
- control_plane_api/__version__.py +8 -0
- control_plane_api/alembic/README +1 -0
- control_plane_api/alembic/env.py +121 -0
- control_plane_api/alembic/script.py.mako +28 -0
- control_plane_api/alembic/versions/2613c65c3dbe_initial_database_setup.py +32 -0
- control_plane_api/alembic/versions/2df520d4927d_merge_heads.py +28 -0
- control_plane_api/alembic/versions/43abf98d6a01_add_paused_status_to_executions.py +73 -0
- control_plane_api/alembic/versions/6289854264cb_merge_multiple_heads.py +28 -0
- control_plane_api/alembic/versions/6a4d4dc3d8dc_generate_execution_transitions.py +50 -0
- control_plane_api/alembic/versions/87d11cf0a783_add_disconnected_status_to_worker_.py +44 -0
- control_plane_api/alembic/versions/add_ephemeral_queue_support.py +85 -0
- control_plane_api/alembic/versions/add_model_type_to_llm_models.py +31 -0
- control_plane_api/alembic/versions/add_plan_executions_table.py +114 -0
- control_plane_api/alembic/versions/add_trace_span_tables.py +154 -0
- control_plane_api/alembic/versions/add_user_info_to_traces.py +36 -0
- control_plane_api/alembic/versions/adjusting_foreign_keys.py +32 -0
- control_plane_api/alembic/versions/b4983d976db2_initial_tables.py +1128 -0
- control_plane_api/alembic/versions/d181a3b40e71_rename_custom_metadata_to_metadata_in_.py +50 -0
- control_plane_api/alembic/versions/df9117888e82_add_missing_columns.py +82 -0
- control_plane_api/alembic/versions/f25de6ad895a_missing_migrations.py +34 -0
- control_plane_api/alembic/versions/f71305fb69b9_fix_ephemeral_queue_deletion_foreign_key.py +54 -0
- control_plane_api/alembic/versions/mark_local_exec_queues_as_ephemeral.py +68 -0
- control_plane_api/alembic.ini +148 -0
- control_plane_api/api/index.py +12 -0
- control_plane_api/app/__init__.py +11 -0
- control_plane_api/app/activities/__init__.py +20 -0
- control_plane_api/app/activities/agent_activities.py +384 -0
- control_plane_api/app/activities/plan_generation_activities.py +499 -0
- control_plane_api/app/activities/team_activities.py +424 -0
- control_plane_api/app/activities/temporal_cloud_activities.py +588 -0
- control_plane_api/app/config/__init__.py +35 -0
- control_plane_api/app/config/api_config.py +469 -0
- control_plane_api/app/config/config_loader.py +224 -0
- control_plane_api/app/config/model_pricing.py +323 -0
- control_plane_api/app/config/storage_config.py +159 -0
- control_plane_api/app/config.py +115 -0
- control_plane_api/app/controllers/__init__.py +0 -0
- control_plane_api/app/controllers/execution_environment_controller.py +1315 -0
- control_plane_api/app/database.py +135 -0
- control_plane_api/app/exceptions.py +408 -0
- control_plane_api/app/lib/__init__.py +11 -0
- control_plane_api/app/lib/environment.py +65 -0
- control_plane_api/app/lib/event_bus/__init__.py +17 -0
- control_plane_api/app/lib/event_bus/base.py +136 -0
- control_plane_api/app/lib/event_bus/manager.py +335 -0
- control_plane_api/app/lib/event_bus/providers/__init__.py +6 -0
- control_plane_api/app/lib/event_bus/providers/http_provider.py +166 -0
- control_plane_api/app/lib/event_bus/providers/nats_provider.py +324 -0
- control_plane_api/app/lib/event_bus/providers/redis_provider.py +233 -0
- control_plane_api/app/lib/event_bus/providers/websocket_provider.py +497 -0
- control_plane_api/app/lib/job_executor.py +330 -0
- control_plane_api/app/lib/kubiya_client.py +293 -0
- control_plane_api/app/lib/litellm_pricing.py +166 -0
- control_plane_api/app/lib/mcp_validation.py +163 -0
- control_plane_api/app/lib/nats/__init__.py +13 -0
- control_plane_api/app/lib/nats/credentials_manager.py +288 -0
- control_plane_api/app/lib/nats/listener.py +374 -0
- control_plane_api/app/lib/planning_prompt_builder.py +153 -0
- control_plane_api/app/lib/planning_tools/__init__.py +41 -0
- control_plane_api/app/lib/planning_tools/agents.py +409 -0
- control_plane_api/app/lib/planning_tools/agno_toolkit.py +836 -0
- control_plane_api/app/lib/planning_tools/base.py +119 -0
- control_plane_api/app/lib/planning_tools/cognitive_memory_tools.py +403 -0
- control_plane_api/app/lib/planning_tools/context_graph_tools.py +545 -0
- control_plane_api/app/lib/planning_tools/environments.py +218 -0
- control_plane_api/app/lib/planning_tools/knowledge.py +204 -0
- control_plane_api/app/lib/planning_tools/models.py +93 -0
- control_plane_api/app/lib/planning_tools/planning_service.py +646 -0
- control_plane_api/app/lib/planning_tools/resources.py +242 -0
- control_plane_api/app/lib/planning_tools/teams.py +334 -0
- control_plane_api/app/lib/policy_enforcer_client.py +1016 -0
- control_plane_api/app/lib/redis_client.py +803 -0
- control_plane_api/app/lib/sqlalchemy_utils.py +486 -0
- control_plane_api/app/lib/state_transition_tools/__init__.py +7 -0
- control_plane_api/app/lib/state_transition_tools/execution_context.py +388 -0
- control_plane_api/app/lib/storage/__init__.py +20 -0
- control_plane_api/app/lib/storage/base_provider.py +274 -0
- control_plane_api/app/lib/storage/provider_factory.py +157 -0
- control_plane_api/app/lib/storage/vercel_blob_provider.py +468 -0
- control_plane_api/app/lib/supabase.py +71 -0
- control_plane_api/app/lib/supabase_utils.py +138 -0
- control_plane_api/app/lib/task_planning/__init__.py +138 -0
- control_plane_api/app/lib/task_planning/agent_factory.py +308 -0
- control_plane_api/app/lib/task_planning/agents.py +389 -0
- control_plane_api/app/lib/task_planning/cache.py +218 -0
- control_plane_api/app/lib/task_planning/entity_resolver.py +273 -0
- control_plane_api/app/lib/task_planning/helpers.py +293 -0
- control_plane_api/app/lib/task_planning/hooks.py +474 -0
- control_plane_api/app/lib/task_planning/models.py +503 -0
- control_plane_api/app/lib/task_planning/plan_validator.py +166 -0
- control_plane_api/app/lib/task_planning/planning_workflow.py +2911 -0
- control_plane_api/app/lib/task_planning/runner.py +656 -0
- control_plane_api/app/lib/task_planning/streaming_hook.py +213 -0
- control_plane_api/app/lib/task_planning/workflow.py +424 -0
- control_plane_api/app/lib/templating/__init__.py +88 -0
- control_plane_api/app/lib/templating/compiler.py +278 -0
- control_plane_api/app/lib/templating/engine.py +178 -0
- control_plane_api/app/lib/templating/parsers/__init__.py +29 -0
- control_plane_api/app/lib/templating/parsers/base.py +96 -0
- control_plane_api/app/lib/templating/parsers/env.py +85 -0
- control_plane_api/app/lib/templating/parsers/graph.py +112 -0
- control_plane_api/app/lib/templating/parsers/secret.py +87 -0
- control_plane_api/app/lib/templating/parsers/simple.py +81 -0
- control_plane_api/app/lib/templating/resolver.py +366 -0
- control_plane_api/app/lib/templating/types.py +214 -0
- control_plane_api/app/lib/templating/validator.py +201 -0
- control_plane_api/app/lib/temporal_client.py +232 -0
- control_plane_api/app/lib/temporal_credentials_cache.py +178 -0
- control_plane_api/app/lib/temporal_credentials_service.py +203 -0
- control_plane_api/app/lib/validation/__init__.py +24 -0
- control_plane_api/app/lib/validation/runtime_validation.py +388 -0
- control_plane_api/app/main.py +531 -0
- control_plane_api/app/middleware/__init__.py +10 -0
- control_plane_api/app/middleware/auth.py +645 -0
- control_plane_api/app/middleware/exception_handler.py +267 -0
- control_plane_api/app/middleware/prometheus_middleware.py +173 -0
- control_plane_api/app/middleware/rate_limiting.py +384 -0
- control_plane_api/app/middleware/request_id.py +202 -0
- control_plane_api/app/models/__init__.py +40 -0
- control_plane_api/app/models/agent.py +90 -0
- control_plane_api/app/models/analytics.py +206 -0
- control_plane_api/app/models/associations.py +107 -0
- control_plane_api/app/models/auth_user.py +73 -0
- control_plane_api/app/models/context.py +161 -0
- control_plane_api/app/models/custom_integration.py +99 -0
- control_plane_api/app/models/environment.py +64 -0
- control_plane_api/app/models/execution.py +125 -0
- control_plane_api/app/models/execution_transition.py +50 -0
- control_plane_api/app/models/job.py +159 -0
- control_plane_api/app/models/llm_model.py +78 -0
- control_plane_api/app/models/orchestration.py +66 -0
- control_plane_api/app/models/plan_execution.py +102 -0
- control_plane_api/app/models/presence.py +49 -0
- control_plane_api/app/models/project.py +61 -0
- control_plane_api/app/models/project_management.py +85 -0
- control_plane_api/app/models/session.py +29 -0
- control_plane_api/app/models/skill.py +155 -0
- control_plane_api/app/models/system_tables.py +43 -0
- control_plane_api/app/models/task_planning.py +372 -0
- control_plane_api/app/models/team.py +86 -0
- control_plane_api/app/models/trace.py +257 -0
- control_plane_api/app/models/user_profile.py +54 -0
- control_plane_api/app/models/worker.py +221 -0
- control_plane_api/app/models/workflow.py +161 -0
- control_plane_api/app/models/workspace.py +50 -0
- control_plane_api/app/observability/__init__.py +177 -0
- control_plane_api/app/observability/context_logging.py +475 -0
- control_plane_api/app/observability/decorators.py +337 -0
- control_plane_api/app/observability/local_span_processor.py +702 -0
- control_plane_api/app/observability/metrics.py +303 -0
- control_plane_api/app/observability/middleware.py +246 -0
- control_plane_api/app/observability/optional.py +115 -0
- control_plane_api/app/observability/tracing.py +382 -0
- control_plane_api/app/policies/README.md +149 -0
- control_plane_api/app/policies/approved_users.rego +62 -0
- control_plane_api/app/policies/business_hours.rego +51 -0
- control_plane_api/app/policies/rate_limiting.rego +100 -0
- control_plane_api/app/policies/tool_enforcement/README.md +336 -0
- control_plane_api/app/policies/tool_enforcement/bash_command_validation.rego +71 -0
- control_plane_api/app/policies/tool_enforcement/business_hours_enforcement.rego +82 -0
- control_plane_api/app/policies/tool_enforcement/mcp_tool_allowlist.rego +58 -0
- control_plane_api/app/policies/tool_enforcement/production_safeguards.rego +80 -0
- control_plane_api/app/policies/tool_enforcement/role_based_tool_access.rego +44 -0
- control_plane_api/app/policies/tool_restrictions.rego +86 -0
- control_plane_api/app/routers/__init__.py +4 -0
- control_plane_api/app/routers/agents.py +382 -0
- control_plane_api/app/routers/agents_v2.py +1598 -0
- control_plane_api/app/routers/analytics.py +1310 -0
- control_plane_api/app/routers/auth.py +59 -0
- control_plane_api/app/routers/client_config.py +57 -0
- control_plane_api/app/routers/context_graph.py +561 -0
- control_plane_api/app/routers/context_manager.py +577 -0
- control_plane_api/app/routers/custom_integrations.py +490 -0
- control_plane_api/app/routers/enforcer.py +132 -0
- control_plane_api/app/routers/environment_context.py +252 -0
- control_plane_api/app/routers/environments.py +761 -0
- control_plane_api/app/routers/execution_environment.py +847 -0
- control_plane_api/app/routers/executions/__init__.py +28 -0
- control_plane_api/app/routers/executions/router.py +286 -0
- control_plane_api/app/routers/executions/services/__init__.py +22 -0
- control_plane_api/app/routers/executions/services/demo_worker_health.py +156 -0
- control_plane_api/app/routers/executions/services/status_service.py +420 -0
- control_plane_api/app/routers/executions/services/test_worker_health.py +480 -0
- control_plane_api/app/routers/executions/services/worker_health.py +514 -0
- control_plane_api/app/routers/executions/streaming/__init__.py +22 -0
- control_plane_api/app/routers/executions/streaming/deduplication.py +352 -0
- control_plane_api/app/routers/executions/streaming/event_buffer.py +353 -0
- control_plane_api/app/routers/executions/streaming/event_formatter.py +964 -0
- control_plane_api/app/routers/executions/streaming/history_loader.py +588 -0
- control_plane_api/app/routers/executions/streaming/live_source.py +693 -0
- control_plane_api/app/routers/executions/streaming/streamer.py +849 -0
- control_plane_api/app/routers/executions.py +4888 -0
- control_plane_api/app/routers/health.py +165 -0
- control_plane_api/app/routers/health_v2.py +394 -0
- control_plane_api/app/routers/integration_templates.py +496 -0
- control_plane_api/app/routers/integrations.py +287 -0
- control_plane_api/app/routers/jobs.py +1809 -0
- control_plane_api/app/routers/metrics.py +517 -0
- control_plane_api/app/routers/models.py +82 -0
- control_plane_api/app/routers/models_v2.py +628 -0
- control_plane_api/app/routers/plan_executions.py +1481 -0
- control_plane_api/app/routers/plan_generation_async.py +304 -0
- control_plane_api/app/routers/policies.py +669 -0
- control_plane_api/app/routers/presence.py +234 -0
- control_plane_api/app/routers/projects.py +987 -0
- control_plane_api/app/routers/runners.py +379 -0
- control_plane_api/app/routers/runtimes.py +172 -0
- control_plane_api/app/routers/secrets.py +171 -0
- control_plane_api/app/routers/skills.py +1010 -0
- control_plane_api/app/routers/skills_definitions.py +140 -0
- control_plane_api/app/routers/storage.py +456 -0
- control_plane_api/app/routers/task_planning.py +611 -0
- control_plane_api/app/routers/task_queues.py +650 -0
- control_plane_api/app/routers/team_context.py +274 -0
- control_plane_api/app/routers/teams.py +1747 -0
- control_plane_api/app/routers/templates.py +248 -0
- control_plane_api/app/routers/traces.py +571 -0
- control_plane_api/app/routers/websocket_client.py +479 -0
- control_plane_api/app/routers/websocket_executions_status.py +437 -0
- control_plane_api/app/routers/websocket_gateway.py +323 -0
- control_plane_api/app/routers/websocket_traces.py +576 -0
- control_plane_api/app/routers/worker_queues.py +2555 -0
- control_plane_api/app/routers/worker_websocket.py +419 -0
- control_plane_api/app/routers/workers.py +1004 -0
- control_plane_api/app/routers/workflows.py +204 -0
- control_plane_api/app/runtimes/__init__.py +6 -0
- control_plane_api/app/runtimes/validation.py +344 -0
- control_plane_api/app/schemas/__init__.py +1 -0
- control_plane_api/app/schemas/job_schemas.py +302 -0
- control_plane_api/app/schemas/mcp_schemas.py +311 -0
- control_plane_api/app/schemas/template_schemas.py +133 -0
- control_plane_api/app/schemas/trace_schemas.py +168 -0
- control_plane_api/app/schemas/worker_queue_observability_schemas.py +165 -0
- control_plane_api/app/services/__init__.py +1 -0
- control_plane_api/app/services/agno_planning_strategy.py +233 -0
- control_plane_api/app/services/agno_service.py +838 -0
- control_plane_api/app/services/claude_code_planning_service.py +203 -0
- control_plane_api/app/services/context_graph_client.py +224 -0
- control_plane_api/app/services/custom_integration_service.py +415 -0
- control_plane_api/app/services/integration_resolution_service.py +345 -0
- control_plane_api/app/services/litellm_service.py +394 -0
- control_plane_api/app/services/plan_generator.py +79 -0
- control_plane_api/app/services/planning_strategy.py +66 -0
- control_plane_api/app/services/planning_strategy_factory.py +118 -0
- control_plane_api/app/services/policy_service.py +615 -0
- control_plane_api/app/services/state_transition_service.py +755 -0
- control_plane_api/app/services/storage_service.py +593 -0
- control_plane_api/app/services/temporal_cloud_provisioning.py +150 -0
- control_plane_api/app/services/toolsets/context_graph_skill.py +432 -0
- control_plane_api/app/services/trace_retention.py +354 -0
- control_plane_api/app/services/worker_queue_metrics_service.py +190 -0
- control_plane_api/app/services/workflow_cancellation_manager.py +135 -0
- control_plane_api/app/services/workflow_operations_service.py +611 -0
- control_plane_api/app/skills/__init__.py +100 -0
- control_plane_api/app/skills/base.py +239 -0
- control_plane_api/app/skills/builtin/__init__.py +37 -0
- control_plane_api/app/skills/builtin/agent_communication/__init__.py +8 -0
- control_plane_api/app/skills/builtin/agent_communication/skill.py +246 -0
- control_plane_api/app/skills/builtin/code_ingestion/__init__.py +4 -0
- control_plane_api/app/skills/builtin/code_ingestion/skill.py +267 -0
- control_plane_api/app/skills/builtin/cognitive_memory/__init__.py +4 -0
- control_plane_api/app/skills/builtin/cognitive_memory/skill.py +174 -0
- control_plane_api/app/skills/builtin/contextual_awareness/__init__.py +4 -0
- control_plane_api/app/skills/builtin/contextual_awareness/skill.py +387 -0
- control_plane_api/app/skills/builtin/data_visualization/__init__.py +4 -0
- control_plane_api/app/skills/builtin/data_visualization/skill.py +154 -0
- control_plane_api/app/skills/builtin/docker/__init__.py +4 -0
- control_plane_api/app/skills/builtin/docker/skill.py +104 -0
- control_plane_api/app/skills/builtin/file_generation/__init__.py +4 -0
- control_plane_api/app/skills/builtin/file_generation/skill.py +94 -0
- control_plane_api/app/skills/builtin/file_system/__init__.py +4 -0
- control_plane_api/app/skills/builtin/file_system/skill.py +110 -0
- control_plane_api/app/skills/builtin/knowledge_api/__init__.py +5 -0
- control_plane_api/app/skills/builtin/knowledge_api/skill.py +124 -0
- control_plane_api/app/skills/builtin/python/__init__.py +4 -0
- control_plane_api/app/skills/builtin/python/skill.py +92 -0
- control_plane_api/app/skills/builtin/remote_filesystem/__init__.py +5 -0
- control_plane_api/app/skills/builtin/remote_filesystem/skill.py +170 -0
- control_plane_api/app/skills/builtin/shell/__init__.py +4 -0
- control_plane_api/app/skills/builtin/shell/skill.py +161 -0
- control_plane_api/app/skills/builtin/slack/__init__.py +3 -0
- control_plane_api/app/skills/builtin/slack/skill.py +302 -0
- control_plane_api/app/skills/builtin/workflow_executor/__init__.py +4 -0
- control_plane_api/app/skills/builtin/workflow_executor/skill.py +469 -0
- control_plane_api/app/skills/business_intelligence.py +189 -0
- control_plane_api/app/skills/config.py +63 -0
- control_plane_api/app/skills/loaders/__init__.py +14 -0
- control_plane_api/app/skills/loaders/base.py +73 -0
- control_plane_api/app/skills/loaders/filesystem_loader.py +199 -0
- control_plane_api/app/skills/registry.py +125 -0
- control_plane_api/app/utils/helpers.py +12 -0
- control_plane_api/app/utils/workflow_executor.py +354 -0
- control_plane_api/app/workflows/__init__.py +11 -0
- control_plane_api/app/workflows/agent_execution.py +520 -0
- control_plane_api/app/workflows/agent_execution_with_skills.py +223 -0
- control_plane_api/app/workflows/namespace_provisioning.py +326 -0
- control_plane_api/app/workflows/plan_generation.py +254 -0
- control_plane_api/app/workflows/team_execution.py +442 -0
- control_plane_api/scripts/seed_models.py +240 -0
- control_plane_api/scripts/validate_existing_tool_names.py +492 -0
- control_plane_api/shared/__init__.py +8 -0
- control_plane_api/shared/version.py +17 -0
- control_plane_api/test_deduplication.py +274 -0
- control_plane_api/test_executor_deduplication_e2e.py +309 -0
- control_plane_api/test_job_execution_e2e.py +283 -0
- control_plane_api/test_real_integration.py +193 -0
- control_plane_api/version.py +38 -0
- control_plane_api/worker/__init__.py +0 -0
- control_plane_api/worker/activities/__init__.py +0 -0
- control_plane_api/worker/activities/agent_activities.py +1585 -0
- control_plane_api/worker/activities/approval_activities.py +234 -0
- control_plane_api/worker/activities/job_activities.py +199 -0
- control_plane_api/worker/activities/runtime_activities.py +1167 -0
- control_plane_api/worker/activities/skill_activities.py +282 -0
- control_plane_api/worker/activities/team_activities.py +479 -0
- control_plane_api/worker/agent_runtime_server.py +370 -0
- control_plane_api/worker/binary_manager.py +333 -0
- control_plane_api/worker/config/__init__.py +31 -0
- control_plane_api/worker/config/worker_config.py +273 -0
- control_plane_api/worker/control_plane_client.py +1491 -0
- control_plane_api/worker/examples/analytics_integration_example.py +362 -0
- control_plane_api/worker/health_monitor.py +159 -0
- control_plane_api/worker/metrics.py +237 -0
- control_plane_api/worker/models/__init__.py +1 -0
- control_plane_api/worker/models/error_events.py +105 -0
- control_plane_api/worker/models/inputs.py +89 -0
- control_plane_api/worker/runtimes/__init__.py +35 -0
- control_plane_api/worker/runtimes/agent_runtime/runtime.py +485 -0
- control_plane_api/worker/runtimes/agno/__init__.py +34 -0
- control_plane_api/worker/runtimes/agno/config.py +248 -0
- control_plane_api/worker/runtimes/agno/hooks.py +385 -0
- control_plane_api/worker/runtimes/agno/mcp_builder.py +195 -0
- control_plane_api/worker/runtimes/agno/runtime.py +1063 -0
- control_plane_api/worker/runtimes/agno/utils.py +163 -0
- control_plane_api/worker/runtimes/base.py +979 -0
- control_plane_api/worker/runtimes/claude_code/__init__.py +38 -0
- control_plane_api/worker/runtimes/claude_code/cleanup.py +184 -0
- control_plane_api/worker/runtimes/claude_code/client_pool.py +529 -0
- control_plane_api/worker/runtimes/claude_code/config.py +829 -0
- control_plane_api/worker/runtimes/claude_code/hooks.py +482 -0
- control_plane_api/worker/runtimes/claude_code/litellm_proxy.py +1702 -0
- control_plane_api/worker/runtimes/claude_code/mcp_builder.py +467 -0
- control_plane_api/worker/runtimes/claude_code/mcp_discovery.py +558 -0
- control_plane_api/worker/runtimes/claude_code/runtime.py +1546 -0
- control_plane_api/worker/runtimes/claude_code/tool_mapper.py +403 -0
- control_plane_api/worker/runtimes/claude_code/utils.py +149 -0
- control_plane_api/worker/runtimes/factory.py +173 -0
- control_plane_api/worker/runtimes/model_utils.py +107 -0
- control_plane_api/worker/runtimes/validation.py +93 -0
- control_plane_api/worker/services/__init__.py +1 -0
- control_plane_api/worker/services/agent_communication_tools.py +908 -0
- control_plane_api/worker/services/agent_executor.py +485 -0
- control_plane_api/worker/services/agent_executor_v2.py +793 -0
- control_plane_api/worker/services/analytics_collector.py +457 -0
- control_plane_api/worker/services/analytics_service.py +464 -0
- control_plane_api/worker/services/approval_tools.py +310 -0
- control_plane_api/worker/services/approval_tools_agno.py +207 -0
- control_plane_api/worker/services/cancellation_manager.py +177 -0
- control_plane_api/worker/services/code_ingestion_tools.py +465 -0
- control_plane_api/worker/services/contextual_awareness_tools.py +405 -0
- control_plane_api/worker/services/data_visualization.py +834 -0
- control_plane_api/worker/services/event_publisher.py +531 -0
- control_plane_api/worker/services/jira_tools.py +257 -0
- control_plane_api/worker/services/remote_filesystem_tools.py +498 -0
- control_plane_api/worker/services/runtime_analytics.py +328 -0
- control_plane_api/worker/services/session_service.py +365 -0
- control_plane_api/worker/services/skill_context_enhancement.py +181 -0
- control_plane_api/worker/services/skill_factory.py +471 -0
- control_plane_api/worker/services/system_prompt_enhancement.py +410 -0
- control_plane_api/worker/services/team_executor.py +715 -0
- control_plane_api/worker/services/team_executor_v2.py +1866 -0
- control_plane_api/worker/services/tool_enforcement.py +254 -0
- control_plane_api/worker/services/workflow_executor/__init__.py +52 -0
- control_plane_api/worker/services/workflow_executor/event_processor.py +287 -0
- control_plane_api/worker/services/workflow_executor/event_publisher.py +210 -0
- control_plane_api/worker/services/workflow_executor/executors/__init__.py +15 -0
- control_plane_api/worker/services/workflow_executor/executors/base.py +270 -0
- control_plane_api/worker/services/workflow_executor/executors/json_executor.py +50 -0
- control_plane_api/worker/services/workflow_executor/executors/python_executor.py +50 -0
- control_plane_api/worker/services/workflow_executor/models.py +142 -0
- control_plane_api/worker/services/workflow_executor_tools.py +1748 -0
- control_plane_api/worker/skills/__init__.py +12 -0
- control_plane_api/worker/skills/builtin/context_graph_search/README.md +213 -0
- control_plane_api/worker/skills/builtin/context_graph_search/__init__.py +5 -0
- control_plane_api/worker/skills/builtin/context_graph_search/agno_impl.py +808 -0
- control_plane_api/worker/skills/builtin/context_graph_search/skill.yaml +67 -0
- control_plane_api/worker/skills/builtin/contextual_awareness/__init__.py +4 -0
- control_plane_api/worker/skills/builtin/contextual_awareness/agno_impl.py +62 -0
- control_plane_api/worker/skills/builtin/data_visualization/agno_impl.py +18 -0
- control_plane_api/worker/skills/builtin/data_visualization/skill.yaml +84 -0
- control_plane_api/worker/skills/builtin/docker/agno_impl.py +65 -0
- control_plane_api/worker/skills/builtin/docker/skill.yaml +60 -0
- control_plane_api/worker/skills/builtin/file_generation/agno_impl.py +47 -0
- control_plane_api/worker/skills/builtin/file_generation/skill.yaml +64 -0
- control_plane_api/worker/skills/builtin/file_system/agno_impl.py +32 -0
- control_plane_api/worker/skills/builtin/file_system/skill.yaml +54 -0
- control_plane_api/worker/skills/builtin/knowledge_api/__init__.py +4 -0
- control_plane_api/worker/skills/builtin/knowledge_api/agno_impl.py +50 -0
- control_plane_api/worker/skills/builtin/knowledge_api/skill.yaml +66 -0
- control_plane_api/worker/skills/builtin/python/agno_impl.py +25 -0
- control_plane_api/worker/skills/builtin/python/skill.yaml +60 -0
- control_plane_api/worker/skills/builtin/schema_fix_mixin.py +260 -0
- control_plane_api/worker/skills/builtin/shell/agno_impl.py +31 -0
- control_plane_api/worker/skills/builtin/shell/skill.yaml +60 -0
- control_plane_api/worker/skills/builtin/slack/__init__.py +3 -0
- control_plane_api/worker/skills/builtin/slack/agno_impl.py +1282 -0
- control_plane_api/worker/skills/builtin/slack/skill.yaml +276 -0
- control_plane_api/worker/skills/builtin/workflow_executor/agno_impl.py +62 -0
- control_plane_api/worker/skills/builtin/workflow_executor/skill.yaml +79 -0
- control_plane_api/worker/skills/loaders/__init__.py +5 -0
- control_plane_api/worker/skills/loaders/base.py +23 -0
- control_plane_api/worker/skills/loaders/filesystem_loader.py +357 -0
- control_plane_api/worker/skills/registry.py +208 -0
- control_plane_api/worker/tests/__init__.py +1 -0
- control_plane_api/worker/tests/conftest.py +12 -0
- control_plane_api/worker/tests/e2e/__init__.py +0 -0
- control_plane_api/worker/tests/e2e/test_context_graph_real_api.py +338 -0
- control_plane_api/worker/tests/e2e/test_context_graph_templates_e2e.py +523 -0
- control_plane_api/worker/tests/e2e/test_enforcement_e2e.py +344 -0
- control_plane_api/worker/tests/e2e/test_execution_flow.py +571 -0
- control_plane_api/worker/tests/e2e/test_single_execution_mode.py +656 -0
- control_plane_api/worker/tests/integration/__init__.py +0 -0
- control_plane_api/worker/tests/integration/test_builtin_skills_fixes.py +245 -0
- control_plane_api/worker/tests/integration/test_context_graph_search_integration.py +365 -0
- control_plane_api/worker/tests/integration/test_control_plane_integration.py +308 -0
- control_plane_api/worker/tests/integration/test_hook_enforcement_integration.py +579 -0
- control_plane_api/worker/tests/integration/test_scheduled_job_workflow.py +237 -0
- control_plane_api/worker/tests/integration/test_system_prompt_enhancement_integration.py +343 -0
- control_plane_api/worker/tests/unit/__init__.py +0 -0
- control_plane_api/worker/tests/unit/test_builtin_skill_autoload.py +396 -0
- control_plane_api/worker/tests/unit/test_context_graph_search.py +450 -0
- control_plane_api/worker/tests/unit/test_context_graph_templates.py +403 -0
- control_plane_api/worker/tests/unit/test_control_plane_client.py +401 -0
- control_plane_api/worker/tests/unit/test_control_plane_client_jobs.py +345 -0
- control_plane_api/worker/tests/unit/test_job_activities.py +353 -0
- control_plane_api/worker/tests/unit/test_skill_context_enhancement.py +321 -0
- control_plane_api/worker/tests/unit/test_system_prompt_enhancement.py +415 -0
- control_plane_api/worker/tests/unit/test_tool_enforcement.py +324 -0
- control_plane_api/worker/utils/__init__.py +1 -0
- control_plane_api/worker/utils/chunk_batcher.py +330 -0
- control_plane_api/worker/utils/environment.py +65 -0
- control_plane_api/worker/utils/error_publisher.py +260 -0
- control_plane_api/worker/utils/event_batcher.py +256 -0
- control_plane_api/worker/utils/logging_config.py +335 -0
- control_plane_api/worker/utils/logging_helper.py +326 -0
- control_plane_api/worker/utils/parameter_validator.py +120 -0
- control_plane_api/worker/utils/retry_utils.py +60 -0
- control_plane_api/worker/utils/streaming_utils.py +665 -0
- control_plane_api/worker/utils/tool_validation.py +332 -0
- control_plane_api/worker/utils/workspace_manager.py +163 -0
- control_plane_api/worker/websocket_client.py +393 -0
- control_plane_api/worker/worker.py +1297 -0
- control_plane_api/worker/workflows/__init__.py +0 -0
- control_plane_api/worker/workflows/agent_execution.py +909 -0
- control_plane_api/worker/workflows/scheduled_job_wrapper.py +332 -0
- control_plane_api/worker/workflows/team_execution.py +611 -0
- kubiya_control_plane_api-0.9.15.dist-info/METADATA +354 -0
- kubiya_control_plane_api-0.9.15.dist-info/RECORD +479 -0
- kubiya_control_plane_api-0.9.15.dist-info/WHEEL +5 -0
- kubiya_control_plane_api-0.9.15.dist-info/entry_points.txt +5 -0
- kubiya_control_plane_api-0.9.15.dist-info/licenses/LICENSE +676 -0
- kubiya_control_plane_api-0.9.15.dist-info/top_level.txt +3 -0
- scripts/__init__.py +1 -0
- scripts/migrations.py +39 -0
- scripts/seed_worker_queues.py +128 -0
- scripts/setup_agent_runtime.py +142 -0
- worker_internal/__init__.py +1 -0
- worker_internal/planner/__init__.py +1 -0
- worker_internal/planner/activities.py +1499 -0
- worker_internal/planner/agent_tools.py +197 -0
- worker_internal/planner/event_models.py +148 -0
- worker_internal/planner/event_publisher.py +67 -0
- worker_internal/planner/models.py +199 -0
- worker_internal/planner/retry_logic.py +134 -0
- worker_internal/planner/worker.py +300 -0
- worker_internal/planner/workflows.py +970 -0
|
@@ -0,0 +1,2911 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Task Planning Workflow - Multi-step deterministic planning using Agno Workflows
|
|
3
|
+
|
|
4
|
+
This module implements a structured, multi-agent workflow for task planning:
|
|
5
|
+
1. Task Analysis - Understand requirements and identify needed capabilities
|
|
6
|
+
2. Resource Discovery - Find matching agents/teams using context graph
|
|
7
|
+
3. Cost Estimation - Calculate time and cost estimates
|
|
8
|
+
4. Plan Generation - Create final structured execution plan
|
|
9
|
+
|
|
10
|
+
Each step has clear inputs/outputs and can be streamed for real-time progress updates.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from typing import Optional, Dict, Any, List
|
|
14
|
+
from sqlalchemy.orm import Session
|
|
15
|
+
import structlog
|
|
16
|
+
import os
|
|
17
|
+
import json
|
|
18
|
+
import time
|
|
19
|
+
import uuid
|
|
20
|
+
|
|
21
|
+
from agno.agent import Agent
|
|
22
|
+
from agno.workflow import Workflow
|
|
23
|
+
from agno.models.litellm import LiteLLM
|
|
24
|
+
from pydantic import BaseModel, Field, field_validator, ValidationError
|
|
25
|
+
|
|
26
|
+
from control_plane_api.app.models.task_planning import (
|
|
27
|
+
TaskPlanResponse,
|
|
28
|
+
TaskPlanRequest,
|
|
29
|
+
AnalysisAndSelectionOutput,
|
|
30
|
+
)
|
|
31
|
+
from control_plane_api.app.lib.planning_tools.agno_toolkit import PlanningToolkit
|
|
32
|
+
|
|
33
|
+
logger = structlog.get_logger()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# ============================================================================
|
|
37
|
+
# Step Output Models - Define what each step produces
|
|
38
|
+
# ============================================================================
|
|
39
|
+
|
|
40
|
+
class TaskAnalysisOutput(BaseModel):
|
|
41
|
+
"""Output from Step 1: Task Analysis"""
|
|
42
|
+
|
|
43
|
+
task_summary: str = Field(description="Clear 1-2 sentence summary of what needs to be done")
|
|
44
|
+
required_capabilities: List[str] = Field(
|
|
45
|
+
description="List of required capabilities (e.g., 'aws_s3', 'kubectl', 'python')"
|
|
46
|
+
)
|
|
47
|
+
task_type: str = Field(
|
|
48
|
+
description="Type of task: deployment, analysis, automation, migration, monitoring, etc."
|
|
49
|
+
)
|
|
50
|
+
complexity_estimate: str = Field(
|
|
51
|
+
description="Initial complexity assessment: simple, moderate, complex"
|
|
52
|
+
)
|
|
53
|
+
story_points_estimate: int = Field(
|
|
54
|
+
description="Story points estimate (1-21 Fibonacci scale)",
|
|
55
|
+
ge=1,
|
|
56
|
+
le=21
|
|
57
|
+
)
|
|
58
|
+
needs_multi_agent: bool = Field(
|
|
59
|
+
description="Whether this task requires multiple agents (team) or single agent"
|
|
60
|
+
)
|
|
61
|
+
reasoning: str = Field(
|
|
62
|
+
description="Explanation of analysis and why these capabilities are needed"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ResourceDiscoveryOutput(BaseModel):
|
|
67
|
+
"""Output from Step 2: Resource Discovery
|
|
68
|
+
|
|
69
|
+
CRITICAL: recommended_entity_id MUST come from discovered_agents or discovered_teams.
|
|
70
|
+
This validator ensures no hallucinated IDs.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
discovered_agents: List[Dict[str, Any]] = Field(
|
|
74
|
+
description="REQUIRED: List of agents found using tools. Must call list_agents() or search_agents_by_capability().",
|
|
75
|
+
min_length=0
|
|
76
|
+
)
|
|
77
|
+
discovered_teams: List[Dict[str, Any]] = Field(
|
|
78
|
+
description="REQUIRED: List of teams found using tools. Must call list_teams() or search_teams_by_capability().",
|
|
79
|
+
min_length=0
|
|
80
|
+
)
|
|
81
|
+
recommended_entity_type: Optional[str] = Field(
|
|
82
|
+
default=None,
|
|
83
|
+
description="Either 'agent' or 'team' based on task needs (None if no resources available)"
|
|
84
|
+
)
|
|
85
|
+
recommended_entity_id: Optional[str] = Field(
|
|
86
|
+
default=None,
|
|
87
|
+
description="ID of the recommended agent or team - MUST exist in discovered_agents or discovered_teams (None if no resources available)"
|
|
88
|
+
)
|
|
89
|
+
recommended_entity_name: Optional[str] = Field(
|
|
90
|
+
default=None,
|
|
91
|
+
description="Name of the recommended agent or team - MUST match the name from tool results (None if no resources available)"
|
|
92
|
+
)
|
|
93
|
+
reasoning: str = Field(
|
|
94
|
+
description="Why this agent/team was selected as best match from the discovered options"
|
|
95
|
+
)
|
|
96
|
+
discovered_environments: List[Dict[str, Any]] = Field(
|
|
97
|
+
default_factory=list,
|
|
98
|
+
description="List of environments found using list_environments() tool. Required if recommending environment."
|
|
99
|
+
)
|
|
100
|
+
discovered_worker_queues: List[Dict[str, Any]] = Field(
|
|
101
|
+
default_factory=list,
|
|
102
|
+
description="List of worker queues found using list_worker_queues() tool. Required if recommending queue."
|
|
103
|
+
)
|
|
104
|
+
recommended_environment_id: Optional[str] = Field(
|
|
105
|
+
default=None,
|
|
106
|
+
description="UUID of the recommended environment - MUST exist in discovered_environments (not a name!)"
|
|
107
|
+
)
|
|
108
|
+
recommended_environment_name: Optional[str] = Field(
|
|
109
|
+
default=None,
|
|
110
|
+
description="Name of the recommended environment - MUST match the name from discovered_environments"
|
|
111
|
+
)
|
|
112
|
+
recommended_worker_queue_id: Optional[str] = Field(
|
|
113
|
+
default=None,
|
|
114
|
+
description="UUID of the recommended worker queue - MUST exist in discovered_worker_queues (not a name!)"
|
|
115
|
+
)
|
|
116
|
+
recommended_worker_queue_name: Optional[str] = Field(
|
|
117
|
+
default=None,
|
|
118
|
+
description="Name of the recommended worker queue - MUST match the name from discovered_worker_queues"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
@field_validator('discovered_agents', 'discovered_teams')
|
|
122
|
+
@classmethod
|
|
123
|
+
def validate_discovered_not_empty(cls, v, info):
|
|
124
|
+
"""At least one of discovered_agents or discovered_teams must have results"""
|
|
125
|
+
# This will be checked after both fields are set
|
|
126
|
+
return v
|
|
127
|
+
|
|
128
|
+
@field_validator('recommended_entity_id')
|
|
129
|
+
@classmethod
|
|
130
|
+
def validate_entity_id_exists(cls, v, info):
|
|
131
|
+
"""CRITICAL: Validate that recommended ID is a UUID and exists in discovered lists"""
|
|
132
|
+
# Allow None recommendations (edge case: no suitable resources found)
|
|
133
|
+
if v is None:
|
|
134
|
+
return v
|
|
135
|
+
|
|
136
|
+
# CRITICAL: Validate UUID format first
|
|
137
|
+
try:
|
|
138
|
+
uuid.UUID(v)
|
|
139
|
+
except (ValueError, AttributeError, TypeError):
|
|
140
|
+
raise ValueError(
|
|
141
|
+
f"recommended_entity_id '{v}' is NOT a valid UUID! "
|
|
142
|
+
f"It appears to be a name instead of an ID. "
|
|
143
|
+
f"You MUST use the 'id' field (UUID) from tool results, NOT the 'name' field! "
|
|
144
|
+
f"Common mistake: using agent['name'] instead of agent['id']. "
|
|
145
|
+
f"UUID format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx (36 characters with dashes)"
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
discovered_agents = info.data.get('discovered_agents', [])
|
|
149
|
+
discovered_teams = info.data.get('discovered_teams', [])
|
|
150
|
+
entity_type = info.data.get('recommended_entity_type', '')
|
|
151
|
+
|
|
152
|
+
# Check if at least one discovery was made
|
|
153
|
+
if not discovered_agents and not discovered_teams:
|
|
154
|
+
raise ValueError(
|
|
155
|
+
"Cannot recommend an entity when no agents or teams were discovered. "
|
|
156
|
+
"You MUST call list_agents() or list_teams() or search tools first!"
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Validate ID exists in the appropriate list
|
|
160
|
+
if entity_type == 'agent':
|
|
161
|
+
agent_ids = [str(a.get('id', '')) for a in discovered_agents if a.get('id')]
|
|
162
|
+
if v not in agent_ids:
|
|
163
|
+
raise ValueError(
|
|
164
|
+
f"Recommended agent_id '{v}' does not exist in discovered_agents. "
|
|
165
|
+
f"Available agent IDs: {agent_ids}. "
|
|
166
|
+
f"You MUST choose from the agents returned by tools, not make up an ID!"
|
|
167
|
+
)
|
|
168
|
+
elif entity_type == 'team':
|
|
169
|
+
team_ids = [str(t.get('id', '')) for t in discovered_teams if t.get('id')]
|
|
170
|
+
if v not in team_ids:
|
|
171
|
+
raise ValueError(
|
|
172
|
+
f"Recommended team_id '{v}' does not exist in discovered_teams. "
|
|
173
|
+
f"Available team IDs: {team_ids}. "
|
|
174
|
+
f"You MUST choose from the teams returned by tools, not make up an ID!"
|
|
175
|
+
)
|
|
176
|
+
else:
|
|
177
|
+
raise ValueError(
|
|
178
|
+
f"recommended_entity_type must be 'agent' or 'team', got '{entity_type}'"
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
return v
|
|
182
|
+
|
|
183
|
+
@field_validator('recommended_entity_name')
|
|
184
|
+
@classmethod
|
|
185
|
+
def validate_entity_name_matches(cls, v, info):
|
|
186
|
+
"""Validate that recommended name matches the entity from discovered lists"""
|
|
187
|
+
# Allow None recommendations (edge case: no suitable resources found)
|
|
188
|
+
if v is None:
|
|
189
|
+
return v
|
|
190
|
+
|
|
191
|
+
discovered_agents = info.data.get('discovered_agents', [])
|
|
192
|
+
discovered_teams = info.data.get('discovered_teams', [])
|
|
193
|
+
entity_type = info.data.get('recommended_entity_type', '')
|
|
194
|
+
entity_id = info.data.get('recommended_entity_id', '')
|
|
195
|
+
|
|
196
|
+
# Find the entity and verify name matches
|
|
197
|
+
if entity_type == 'agent':
|
|
198
|
+
for agent in discovered_agents:
|
|
199
|
+
if str(agent.get('id')) == entity_id:
|
|
200
|
+
actual_name = agent.get('name', '')
|
|
201
|
+
if v != actual_name:
|
|
202
|
+
raise ValueError(
|
|
203
|
+
f"Recommended name '{v}' does not match actual agent name '{actual_name}'. "
|
|
204
|
+
f"You MUST use the exact name from tool results!"
|
|
205
|
+
)
|
|
206
|
+
break
|
|
207
|
+
elif entity_type == 'team':
|
|
208
|
+
for team in discovered_teams:
|
|
209
|
+
if str(team.get('id')) == entity_id:
|
|
210
|
+
actual_name = team.get('name', '')
|
|
211
|
+
if v != actual_name:
|
|
212
|
+
raise ValueError(
|
|
213
|
+
f"Recommended name '{v}' does not match actual team name '{actual_name}'. "
|
|
214
|
+
f"You MUST use the exact name from tool results!"
|
|
215
|
+
)
|
|
216
|
+
break
|
|
217
|
+
|
|
218
|
+
return v
|
|
219
|
+
|
|
220
|
+
@field_validator('recommended_environment_id')
|
|
221
|
+
@classmethod
|
|
222
|
+
def validate_environment_id_exists(cls, v, info):
|
|
223
|
+
"""CRITICAL: Validate that recommended environment ID exists in discovered list"""
|
|
224
|
+
if v is None:
|
|
225
|
+
return v # Optional field
|
|
226
|
+
|
|
227
|
+
discovered_environments = info.data.get('discovered_environments', [])
|
|
228
|
+
|
|
229
|
+
if not discovered_environments:
|
|
230
|
+
raise ValueError(
|
|
231
|
+
"Cannot recommend an environment when no environments were discovered. "
|
|
232
|
+
"You MUST call list_environments() tool first!"
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
env_ids = [str(e.get('id', '')) for e in discovered_environments if e.get('id')]
|
|
236
|
+
if v not in env_ids:
|
|
237
|
+
raise ValueError(
|
|
238
|
+
f"CRITICAL: recommended_environment_id '{v}' does NOT exist in discovered_environments! "
|
|
239
|
+
f"Available environment IDs (UUIDs): {env_ids}. "
|
|
240
|
+
f"You MUST use an actual UUID from the list_environments() tool result, NOT a name! "
|
|
241
|
+
f"This is a hallucination - copy the 'id' field EXACTLY from the tool response!"
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return v
|
|
245
|
+
|
|
246
|
+
@field_validator('recommended_environment_name')
|
|
247
|
+
@classmethod
|
|
248
|
+
def validate_environment_name_matches(cls, v, info):
|
|
249
|
+
"""Validate that recommended environment name matches the ID from discovered list"""
|
|
250
|
+
if v is None:
|
|
251
|
+
return v # Optional field
|
|
252
|
+
|
|
253
|
+
discovered_environments = info.data.get('discovered_environments', [])
|
|
254
|
+
environment_id = info.data.get('recommended_environment_id', '')
|
|
255
|
+
|
|
256
|
+
# Find the environment and verify name matches
|
|
257
|
+
for env in discovered_environments:
|
|
258
|
+
if str(env.get('id')) == environment_id:
|
|
259
|
+
actual_name = env.get('name', '')
|
|
260
|
+
if v != actual_name:
|
|
261
|
+
raise ValueError(
|
|
262
|
+
f"Recommended environment name '{v}' does not match actual name '{actual_name}' for ID {environment_id}. "
|
|
263
|
+
f"You MUST use the exact name from list_environments() tool results!"
|
|
264
|
+
)
|
|
265
|
+
break
|
|
266
|
+
|
|
267
|
+
return v
|
|
268
|
+
|
|
269
|
+
@field_validator('recommended_worker_queue_id')
|
|
270
|
+
@classmethod
|
|
271
|
+
def validate_worker_queue_id_exists(cls, v, info):
|
|
272
|
+
"""CRITICAL: Validate that recommended worker queue ID exists in discovered list"""
|
|
273
|
+
if v is None:
|
|
274
|
+
return v # Optional field
|
|
275
|
+
|
|
276
|
+
discovered_worker_queues = info.data.get('discovered_worker_queues', [])
|
|
277
|
+
|
|
278
|
+
if not discovered_worker_queues:
|
|
279
|
+
raise ValueError(
|
|
280
|
+
"Cannot recommend a worker queue when no queues were discovered. "
|
|
281
|
+
"You MUST call list_worker_queues() tool first!"
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
queue_ids = [str(q.get('id', '')) for q in discovered_worker_queues if q.get('id')]
|
|
285
|
+
if v not in queue_ids:
|
|
286
|
+
raise ValueError(
|
|
287
|
+
f"CRITICAL: recommended_worker_queue_id '{v}' does NOT exist in discovered_worker_queues! "
|
|
288
|
+
f"Available worker queue IDs (UUIDs): {queue_ids}. "
|
|
289
|
+
f"You MUST use an actual UUID from the list_worker_queues() tool result, NOT a name! "
|
|
290
|
+
f"This is a hallucination - copy the 'id' field EXACTLY from the tool response!"
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
return v
|
|
294
|
+
|
|
295
|
+
@field_validator('recommended_worker_queue_name')
|
|
296
|
+
@classmethod
|
|
297
|
+
def validate_worker_queue_name_matches(cls, v, info):
|
|
298
|
+
"""Validate that recommended worker queue name matches the ID from discovered list"""
|
|
299
|
+
if v is None:
|
|
300
|
+
return v # Optional field
|
|
301
|
+
|
|
302
|
+
discovered_worker_queues = info.data.get('discovered_worker_queues', [])
|
|
303
|
+
queue_id = info.data.get('recommended_worker_queue_id', '')
|
|
304
|
+
|
|
305
|
+
# Find the queue and verify name matches
|
|
306
|
+
for queue in discovered_worker_queues:
|
|
307
|
+
if str(queue.get('id')) == queue_id:
|
|
308
|
+
actual_name = queue.get('name', '')
|
|
309
|
+
if v != actual_name:
|
|
310
|
+
raise ValueError(
|
|
311
|
+
f"Recommended worker queue name '{v}' does not match actual name '{actual_name}' for ID {queue_id}. "
|
|
312
|
+
f"You MUST use the exact name from list_worker_queues() tool results!"
|
|
313
|
+
)
|
|
314
|
+
break
|
|
315
|
+
|
|
316
|
+
return v
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
def _validate_resource_discovery(output: ResourceDiscoveryOutput) -> None:
|
|
320
|
+
"""
|
|
321
|
+
Explicitly validate ResourceDiscoveryOutput to catch issues that Agno might suppress.
|
|
322
|
+
|
|
323
|
+
This is a safety net in case Pydantic validation is bypassed or silently caught
|
|
324
|
+
by the Agno framework. Raises ValueError with detailed diagnostics.
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
output: ResourceDiscoveryOutput to validate
|
|
328
|
+
|
|
329
|
+
Raises:
|
|
330
|
+
ValueError: If validation fails with detailed error message
|
|
331
|
+
"""
|
|
332
|
+
# Check discovered lists are populated
|
|
333
|
+
if not output.discovered_agents and not output.discovered_teams:
|
|
334
|
+
raise ValueError(
|
|
335
|
+
"ResourceDiscoveryOutput validation failed: "
|
|
336
|
+
"Both discovered_agents and discovered_teams are empty. "
|
|
337
|
+
"You MUST call list_agents() or list_teams() tools and populate these fields!"
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# If recommendation is None, that's OK (edge case: no suitable resources found)
|
|
341
|
+
if output.recommended_entity_id is None:
|
|
342
|
+
logger.warning("no_entity_recommended",
|
|
343
|
+
discovered_agents=len(output.discovered_agents),
|
|
344
|
+
discovered_teams=len(output.discovered_teams))
|
|
345
|
+
return
|
|
346
|
+
|
|
347
|
+
# CRITICAL: Validate that entity_id is a valid UUID format
|
|
348
|
+
try:
|
|
349
|
+
uuid.UUID(output.recommended_entity_id)
|
|
350
|
+
logger.info(
|
|
351
|
+
"entity_id_uuid_validation_passed",
|
|
352
|
+
entity_id=output.recommended_entity_id,
|
|
353
|
+
entity_type=output.recommended_entity_type
|
|
354
|
+
)
|
|
355
|
+
except (ValueError, AttributeError) as e:
|
|
356
|
+
# entity_id is not a valid UUID - it might be a name instead!
|
|
357
|
+
logger.error(
|
|
358
|
+
"entity_id_not_uuid",
|
|
359
|
+
entity_id=output.recommended_entity_id,
|
|
360
|
+
entity_type=output.recommended_entity_type,
|
|
361
|
+
error=str(e)
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
# Try to fix it by finding the matching entity and using its ID
|
|
365
|
+
if output.recommended_entity_type == 'agent':
|
|
366
|
+
# Look for an agent with this name
|
|
367
|
+
matching_agent = next(
|
|
368
|
+
(a for a in output.discovered_agents
|
|
369
|
+
if a.get('name') == output.recommended_entity_id),
|
|
370
|
+
None
|
|
371
|
+
)
|
|
372
|
+
if matching_agent:
|
|
373
|
+
correct_id = matching_agent.get('id')
|
|
374
|
+
logger.warning(
|
|
375
|
+
"entity_id_was_name_fixed",
|
|
376
|
+
provided_name=output.recommended_entity_id,
|
|
377
|
+
correct_uuid=correct_id,
|
|
378
|
+
entity_type='agent'
|
|
379
|
+
)
|
|
380
|
+
# Fix the output by replacing name with UUID
|
|
381
|
+
output.recommended_entity_id = str(correct_id)
|
|
382
|
+
output.recommended_entity_name = matching_agent.get('name')
|
|
383
|
+
else:
|
|
384
|
+
raise ValueError(
|
|
385
|
+
f"CRITICAL UUID VALIDATION ERROR: recommended_entity_id '{output.recommended_entity_id}' "
|
|
386
|
+
f"is NOT a valid UUID! It appears to be an agent name, but no matching agent was found. "
|
|
387
|
+
f"You MUST use the 'id' field (UUID) from tool results, NOT the 'name' field! "
|
|
388
|
+
f"Available agents: {[a.get('name') for a in output.discovered_agents]}"
|
|
389
|
+
)
|
|
390
|
+
elif output.recommended_entity_type == 'team':
|
|
391
|
+
# Look for a team with this name
|
|
392
|
+
matching_team = next(
|
|
393
|
+
(t for t in output.discovered_teams
|
|
394
|
+
if t.get('name') == output.recommended_entity_id),
|
|
395
|
+
None
|
|
396
|
+
)
|
|
397
|
+
if matching_team:
|
|
398
|
+
correct_id = matching_team.get('id')
|
|
399
|
+
logger.warning(
|
|
400
|
+
"entity_id_was_name_fixed",
|
|
401
|
+
provided_name=output.recommended_entity_id,
|
|
402
|
+
correct_uuid=correct_id,
|
|
403
|
+
entity_type='team'
|
|
404
|
+
)
|
|
405
|
+
# Fix the output by replacing name with UUID
|
|
406
|
+
output.recommended_entity_id = str(correct_id)
|
|
407
|
+
output.recommended_entity_name = matching_team.get('name')
|
|
408
|
+
else:
|
|
409
|
+
raise ValueError(
|
|
410
|
+
f"CRITICAL UUID VALIDATION ERROR: recommended_entity_id '{output.recommended_entity_id}' "
|
|
411
|
+
f"is NOT a valid UUID! It appears to be a team name, but no matching team was found. "
|
|
412
|
+
f"You MUST use the 'id' field (UUID) from tool results, NOT the 'name' field! "
|
|
413
|
+
f"Available teams: {[t.get('name') for t in output.discovered_teams]}"
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# Validate ID exists in appropriate list
|
|
417
|
+
if output.recommended_entity_type == 'agent':
|
|
418
|
+
agent_ids = [str(a.get('id', '')) for a in output.discovered_agents if a.get('id')]
|
|
419
|
+
if output.recommended_entity_id not in agent_ids:
|
|
420
|
+
raise ValueError(
|
|
421
|
+
f"CRITICAL VALIDATION ERROR: recommended_entity_id '{output.recommended_entity_id}' "
|
|
422
|
+
f"does NOT exist in discovered_agents list! "
|
|
423
|
+
f"Available agent IDs: {agent_ids}. "
|
|
424
|
+
f"This is a hallucination - you MUST use an ID from the tool results!"
|
|
425
|
+
)
|
|
426
|
+
elif output.recommended_entity_type == 'team':
|
|
427
|
+
team_ids = [str(t.get('id', '')) for t in output.discovered_teams if t.get('id')]
|
|
428
|
+
if output.recommended_entity_id not in team_ids:
|
|
429
|
+
raise ValueError(
|
|
430
|
+
f"CRITICAL VALIDATION ERROR: recommended_entity_id '{output.recommended_entity_id}' "
|
|
431
|
+
f"does NOT exist in discovered_teams list! "
|
|
432
|
+
f"Available team IDs: {team_ids}. "
|
|
433
|
+
f"This is a hallucination - you MUST use an ID from the tool results!"
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
# Validate name matches
|
|
437
|
+
if output.recommended_entity_type == 'agent':
|
|
438
|
+
for agent in output.discovered_agents:
|
|
439
|
+
if str(agent.get('id')) == output.recommended_entity_id:
|
|
440
|
+
actual_name = agent.get('name', '')
|
|
441
|
+
if output.recommended_entity_name != actual_name:
|
|
442
|
+
raise ValueError(
|
|
443
|
+
f"CRITICAL VALIDATION ERROR: recommended_entity_name '{output.recommended_entity_name}' "
|
|
444
|
+
f"does NOT match actual agent name '{actual_name}' for ID {output.recommended_entity_id}!"
|
|
445
|
+
)
|
|
446
|
+
break
|
|
447
|
+
elif output.recommended_entity_type == 'team':
|
|
448
|
+
for team in output.discovered_teams:
|
|
449
|
+
if str(team.get('id')) == output.recommended_entity_id:
|
|
450
|
+
actual_name = team.get('name', '')
|
|
451
|
+
if output.recommended_entity_name != actual_name:
|
|
452
|
+
raise ValueError(
|
|
453
|
+
f"CRITICAL VALIDATION ERROR: recommended_entity_name '{output.recommended_entity_name}' "
|
|
454
|
+
f"does NOT match actual team name '{actual_name}' for ID {output.recommended_entity_id}!"
|
|
455
|
+
)
|
|
456
|
+
break
|
|
457
|
+
|
|
458
|
+
logger.info(
|
|
459
|
+
"resource_discovery_validation_passed",
|
|
460
|
+
entity_type=output.recommended_entity_type,
|
|
461
|
+
entity_id=output.recommended_entity_id[:12] if output.recommended_entity_id else None,
|
|
462
|
+
entity_name=output.recommended_entity_name,
|
|
463
|
+
validation_checks_passed=["uuid_format", "id_exists", "name_matches"]
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
class FastSelectionOutput(BaseModel):
|
|
468
|
+
"""
|
|
469
|
+
Fast selection output for --local mode - minimal fields for quick execution.
|
|
470
|
+
Reuses the same validators as ResourceDiscoveryOutput to prevent hallucination.
|
|
471
|
+
|
|
472
|
+
This schema is used by the 1-step fast workflow for CLI --local mode.
|
|
473
|
+
"""
|
|
474
|
+
|
|
475
|
+
discovered_agents: List[Dict[str, Any]] = Field(
|
|
476
|
+
default_factory=list,
|
|
477
|
+
description="List of agents found using list_agents() tool (optional if outer context provided)"
|
|
478
|
+
)
|
|
479
|
+
discovered_teams: List[Dict[str, Any]] = Field(
|
|
480
|
+
default_factory=list,
|
|
481
|
+
description="List of teams found using list_teams() tool (optional if outer context provided)"
|
|
482
|
+
)
|
|
483
|
+
discovered_environments: List[Dict[str, Any]] = Field(
|
|
484
|
+
default_factory=list,
|
|
485
|
+
description="List of environments found using list_environments() tool"
|
|
486
|
+
)
|
|
487
|
+
discovered_worker_queues: List[Dict[str, Any]] = Field(
|
|
488
|
+
default_factory=list,
|
|
489
|
+
description="List of worker queues found using list_worker_queues() tool"
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
recommended_entity_type: str = Field(..., description="'agent' or 'team'")
|
|
493
|
+
recommended_entity_id: str = Field(..., description="UUID of agent/team from discovered list")
|
|
494
|
+
recommended_entity_name: str = Field(..., description="Name of agent/team from discovered list")
|
|
495
|
+
|
|
496
|
+
recommended_environment_id: Optional[str] = Field(None, description="UUID of environment from discovered list (optional - use outer context if available)")
|
|
497
|
+
recommended_environment_name: Optional[str] = Field(None, description="Name of environment from discovered list (optional - use outer context if available)")
|
|
498
|
+
|
|
499
|
+
recommended_worker_queue_id: Optional[str] = Field(None, description="UUID of worker queue from discovered list (optional)")
|
|
500
|
+
recommended_worker_queue_name: Optional[str] = Field(None, description="Name of worker queue from discovered list (optional)")
|
|
501
|
+
|
|
502
|
+
reasoning: str = Field(..., description="Brief 1-sentence explanation of selection")
|
|
503
|
+
|
|
504
|
+
# Reuse the same validators from ResourceDiscoveryOutput to prevent hallucination!
|
|
505
|
+
|
|
506
|
+
@field_validator('recommended_entity_id')
|
|
507
|
+
@classmethod
|
|
508
|
+
def validate_entity_id_exists(cls, v, info):
|
|
509
|
+
"""CRITICAL: Validate that recommended ID is a UUID and exists in discovered lists"""
|
|
510
|
+
# CRITICAL: Validate UUID format first
|
|
511
|
+
try:
|
|
512
|
+
uuid.UUID(v)
|
|
513
|
+
except (ValueError, AttributeError, TypeError):
|
|
514
|
+
raise ValueError(
|
|
515
|
+
f"recommended_entity_id '{v}' is NOT a valid UUID! "
|
|
516
|
+
f"It appears to be a name instead of an ID. "
|
|
517
|
+
f"You MUST use the 'id' field (UUID) from tool results or outer context, NOT the 'name' field! "
|
|
518
|
+
f"Common mistake: using agent['name'] instead of agent['id']. "
|
|
519
|
+
f"UUID format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx (36 characters with dashes)"
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
discovered_agents = info.data.get('discovered_agents', [])
|
|
523
|
+
discovered_teams = info.data.get('discovered_teams', [])
|
|
524
|
+
entity_type = info.data.get('recommended_entity_type', '')
|
|
525
|
+
|
|
526
|
+
# Check if at least one discovery was made
|
|
527
|
+
if not discovered_agents and not discovered_teams:
|
|
528
|
+
raise ValueError(
|
|
529
|
+
"Cannot recommend an entity when no agents or teams were discovered. "
|
|
530
|
+
"You MUST call list_agents() or list_teams() or use outer context!"
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
# Validate ID exists in the appropriate list
|
|
534
|
+
if entity_type == 'agent':
|
|
535
|
+
agent_ids = [str(a.get('id', '')) for a in discovered_agents if a.get('id')]
|
|
536
|
+
if v not in agent_ids:
|
|
537
|
+
raise ValueError(
|
|
538
|
+
f"Recommended agent_id '{v}' does not exist in discovered_agents. "
|
|
539
|
+
f"Available agent IDs: {agent_ids}. "
|
|
540
|
+
f"You MUST choose from the agents returned by tools, not make up an ID!"
|
|
541
|
+
)
|
|
542
|
+
elif entity_type == 'team':
|
|
543
|
+
team_ids = [str(t.get('id', '')) for t in discovered_teams if t.get('id')]
|
|
544
|
+
if v not in team_ids:
|
|
545
|
+
raise ValueError(
|
|
546
|
+
f"Recommended team_id '{v}' does not exist in discovered_teams. "
|
|
547
|
+
f"Available team IDs: {team_ids}. "
|
|
548
|
+
f"You MUST choose from the teams returned by tools, not make up an ID!"
|
|
549
|
+
)
|
|
550
|
+
else:
|
|
551
|
+
raise ValueError(
|
|
552
|
+
f"recommended_entity_type must be 'agent' or 'team', got '{entity_type}'"
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
return v
|
|
556
|
+
|
|
557
|
+
@field_validator('recommended_entity_name')
|
|
558
|
+
@classmethod
|
|
559
|
+
def validate_entity_name_matches(cls, v, info):
|
|
560
|
+
"""Validate that recommended name matches the entity from discovered lists"""
|
|
561
|
+
discovered_agents = info.data.get('discovered_agents', [])
|
|
562
|
+
discovered_teams = info.data.get('discovered_teams', [])
|
|
563
|
+
entity_type = info.data.get('recommended_entity_type', '')
|
|
564
|
+
entity_id = info.data.get('recommended_entity_id', '')
|
|
565
|
+
|
|
566
|
+
# Find the entity and verify name matches
|
|
567
|
+
if entity_type == 'agent':
|
|
568
|
+
for agent in discovered_agents:
|
|
569
|
+
if str(agent.get('id')) == entity_id:
|
|
570
|
+
actual_name = agent.get('name', '')
|
|
571
|
+
if v != actual_name:
|
|
572
|
+
raise ValueError(
|
|
573
|
+
f"Recommended name '{v}' does not match actual agent name '{actual_name}'. "
|
|
574
|
+
f"You MUST use the exact name from tool results!"
|
|
575
|
+
)
|
|
576
|
+
break
|
|
577
|
+
elif entity_type == 'team':
|
|
578
|
+
for team in discovered_teams:
|
|
579
|
+
if str(team.get('id')) == entity_id:
|
|
580
|
+
actual_name = team.get('name', '')
|
|
581
|
+
if v != actual_name:
|
|
582
|
+
raise ValueError(
|
|
583
|
+
f"Recommended name '{v}' does not match actual team name '{actual_name}'. "
|
|
584
|
+
f"You MUST use the exact name from tool results!"
|
|
585
|
+
)
|
|
586
|
+
break
|
|
587
|
+
|
|
588
|
+
return v
|
|
589
|
+
|
|
590
|
+
@field_validator('recommended_environment_id')
|
|
591
|
+
@classmethod
|
|
592
|
+
def validate_environment_id_exists(cls, v, info):
|
|
593
|
+
"""CRITICAL: Validate that recommended environment ID exists in discovered list"""
|
|
594
|
+
if v is None:
|
|
595
|
+
return v # Optional field - allow None
|
|
596
|
+
|
|
597
|
+
discovered_environments = info.data.get('discovered_environments', [])
|
|
598
|
+
|
|
599
|
+
if not discovered_environments:
|
|
600
|
+
raise ValueError(
|
|
601
|
+
"Cannot recommend an environment when no environments were discovered. "
|
|
602
|
+
"Either call list_environments() tool or use outer context environments!"
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
env_ids = [str(e.get('id', '')) for e in discovered_environments if e.get('id')]
|
|
606
|
+
if v not in env_ids:
|
|
607
|
+
raise ValueError(
|
|
608
|
+
f"CRITICAL: recommended_environment_id '{v}' does NOT exist in discovered_environments! "
|
|
609
|
+
f"Available environment IDs (UUIDs): {env_ids}. "
|
|
610
|
+
f"You MUST use an actual UUID from the discovered list, NOT a name!"
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
return v
|
|
614
|
+
|
|
615
|
+
@field_validator('recommended_environment_name')
|
|
616
|
+
@classmethod
|
|
617
|
+
def validate_environment_name_matches(cls, v, info):
|
|
618
|
+
"""Validate that recommended environment name matches the ID from discovered list"""
|
|
619
|
+
if v is None:
|
|
620
|
+
return v # Optional field - allow None
|
|
621
|
+
|
|
622
|
+
discovered_environments = info.data.get('discovered_environments', [])
|
|
623
|
+
environment_id = info.data.get('recommended_environment_id', '')
|
|
624
|
+
|
|
625
|
+
# Find the environment and verify name matches
|
|
626
|
+
for env in discovered_environments:
|
|
627
|
+
if str(env.get('id')) == environment_id:
|
|
628
|
+
actual_name = env.get('name', '')
|
|
629
|
+
if v != actual_name:
|
|
630
|
+
raise ValueError(
|
|
631
|
+
f"Recommended environment name '{v}' does not match actual name '{actual_name}' for ID {environment_id}. "
|
|
632
|
+
f"You MUST use the exact name from list_environments() tool results!"
|
|
633
|
+
)
|
|
634
|
+
break
|
|
635
|
+
|
|
636
|
+
return v
|
|
637
|
+
|
|
638
|
+
@field_validator('recommended_worker_queue_id')
|
|
639
|
+
@classmethod
|
|
640
|
+
def validate_worker_queue_id_exists(cls, v, info):
|
|
641
|
+
"""CRITICAL: Validate that recommended worker queue ID exists in discovered list"""
|
|
642
|
+
if v is None:
|
|
643
|
+
return v # Optional field
|
|
644
|
+
|
|
645
|
+
discovered_worker_queues = info.data.get('discovered_worker_queues', [])
|
|
646
|
+
|
|
647
|
+
if not discovered_worker_queues:
|
|
648
|
+
raise ValueError(
|
|
649
|
+
"Cannot recommend a worker queue when no queues were discovered. "
|
|
650
|
+
"You MUST call list_worker_queues() tool first!"
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
queue_ids = [str(q.get('id', '')) for q in discovered_worker_queues if q.get('id')]
|
|
654
|
+
if v not in queue_ids:
|
|
655
|
+
raise ValueError(
|
|
656
|
+
f"CRITICAL: recommended_worker_queue_id '{v}' does NOT exist in discovered_worker_queues! "
|
|
657
|
+
f"Available worker queue IDs (UUIDs): {queue_ids}. "
|
|
658
|
+
f"You MUST use an actual UUID from the list_worker_queues() tool result, NOT a name!"
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
return v
|
|
662
|
+
|
|
663
|
+
@field_validator('recommended_worker_queue_name')
|
|
664
|
+
@classmethod
|
|
665
|
+
def validate_worker_queue_name_matches(cls, v, info):
|
|
666
|
+
"""Validate that recommended worker queue name matches the ID from discovered list"""
|
|
667
|
+
if v is None:
|
|
668
|
+
return v # Optional field
|
|
669
|
+
|
|
670
|
+
discovered_worker_queues = info.data.get('discovered_worker_queues', [])
|
|
671
|
+
queue_id = info.data.get('recommended_worker_queue_id', '')
|
|
672
|
+
|
|
673
|
+
# Find the queue and verify name matches
|
|
674
|
+
for queue in discovered_worker_queues:
|
|
675
|
+
if str(queue.get('id')) == queue_id:
|
|
676
|
+
actual_name = queue.get('name', '')
|
|
677
|
+
if v != actual_name:
|
|
678
|
+
raise ValueError(
|
|
679
|
+
f"Recommended worker queue name '{v}' does not match actual name '{actual_name}' for ID {queue_id}. "
|
|
680
|
+
f"You MUST use the exact name from list_worker_queues() tool results!"
|
|
681
|
+
)
|
|
682
|
+
break
|
|
683
|
+
|
|
684
|
+
return v
|
|
685
|
+
|
|
686
|
+
|
|
687
|
+
class CostEstimationOutput(BaseModel):
|
|
688
|
+
"""Output from Step 3: Cost Estimation"""
|
|
689
|
+
|
|
690
|
+
estimated_tokens_input: int = Field(description="Estimated input tokens")
|
|
691
|
+
estimated_tokens_output: int = Field(description="Estimated output tokens")
|
|
692
|
+
estimated_llm_cost: float = Field(description="Estimated LLM API cost in USD")
|
|
693
|
+
estimated_tool_cost: float = Field(description="Estimated tool execution cost in USD")
|
|
694
|
+
estimated_runtime_cost: float = Field(description="Estimated worker runtime cost in USD")
|
|
695
|
+
total_cost: float = Field(description="Total estimated cost in USD")
|
|
696
|
+
estimated_time_hours: float = Field(description="Estimated execution time in hours")
|
|
697
|
+
|
|
698
|
+
# Savings calculation
|
|
699
|
+
manual_cost: float = Field(description="Cost if done manually by humans")
|
|
700
|
+
manual_time_hours: float = Field(description="Time if done manually in hours")
|
|
701
|
+
money_saved: float = Field(description="Money saved by using AI")
|
|
702
|
+
time_saved_hours: float = Field(description="Time saved in hours")
|
|
703
|
+
savings_percentage: float = Field(description="Percentage of time saved")
|
|
704
|
+
|
|
705
|
+
reasoning: str = Field(description="Explanation of cost calculations")
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
# ============================================================================
|
|
709
|
+
# Workflow Step Agents
|
|
710
|
+
# ============================================================================
|
|
711
|
+
|
|
712
|
+
def create_analysis_and_selection_agent(
|
|
713
|
+
model: LiteLLM,
|
|
714
|
+
planning_toolkit: 'PlanningToolkit',
|
|
715
|
+
outer_context: Optional[Dict[str, Any]] = None
|
|
716
|
+
) -> Agent:
|
|
717
|
+
"""
|
|
718
|
+
NEW Step 1: Task Analysis & Resource Selection (2-Step Workflow)
|
|
719
|
+
|
|
720
|
+
Combines old Step 1 (Task Analyzer) + Step 2 (Resource Discoverer)
|
|
721
|
+
into single efficient agent for the simplified 2-step workflow.
|
|
722
|
+
|
|
723
|
+
Pre-fetched data (top 20 agents/teams/envs) provided in outer_context.
|
|
724
|
+
Tools available if agent needs to search for more specific matches.
|
|
725
|
+
|
|
726
|
+
This function replaces:
|
|
727
|
+
- create_task_analysis_agent() (old Step 1)
|
|
728
|
+
- create_resource_discovery_agent() (old Step 2)
|
|
729
|
+
"""
|
|
730
|
+
from agno.tools.function import Function
|
|
731
|
+
import json
|
|
732
|
+
from control_plane_api.app.lib.planning_tools.agno_toolkit import PlanningToolkit
|
|
733
|
+
|
|
734
|
+
# Provide hybrid approach: pre-fetched data + tool access
|
|
735
|
+
toolkit_tools = []
|
|
736
|
+
|
|
737
|
+
# Add synthetic tools for pre-fetched data (instant access)
|
|
738
|
+
if outer_context:
|
|
739
|
+
if outer_context.get("agents"):
|
|
740
|
+
def get_top_agents() -> str:
|
|
741
|
+
"""Get top 20 pre-fetched agents (instant, no API call).
|
|
742
|
+
Use this first before calling search tools."""
|
|
743
|
+
return json.dumps({
|
|
744
|
+
"success": True,
|
|
745
|
+
"data": {
|
|
746
|
+
"agents": outer_context["agents"],
|
|
747
|
+
"count": len(outer_context["agents"]),
|
|
748
|
+
"note": "Top 20 agents. Use search_agents_by_capability() if you need more."
|
|
749
|
+
}
|
|
750
|
+
}, indent=2)
|
|
751
|
+
toolkit_tools.append(Function.from_callable(get_top_agents))
|
|
752
|
+
|
|
753
|
+
if outer_context.get("teams"):
|
|
754
|
+
def get_top_teams() -> str:
|
|
755
|
+
"""Get top 20 pre-fetched teams (instant, no API call).
|
|
756
|
+
Use this first before calling search tools."""
|
|
757
|
+
return json.dumps({
|
|
758
|
+
"success": True,
|
|
759
|
+
"data": {
|
|
760
|
+
"teams": outer_context["teams"],
|
|
761
|
+
"count": len(outer_context["teams"]),
|
|
762
|
+
"note": "Top 20 teams. Use search_teams_by_capability() if you need more."
|
|
763
|
+
}
|
|
764
|
+
}, indent=2)
|
|
765
|
+
toolkit_tools.append(Function.from_callable(get_top_teams))
|
|
766
|
+
|
|
767
|
+
if outer_context.get("environments"):
|
|
768
|
+
def get_top_environments() -> str:
|
|
769
|
+
"""Get top 20 pre-fetched environments (instant, no API call)."""
|
|
770
|
+
return json.dumps({
|
|
771
|
+
"success": True,
|
|
772
|
+
"data": {
|
|
773
|
+
"environments": outer_context["environments"],
|
|
774
|
+
"count": len(outer_context["environments"]),
|
|
775
|
+
"note": "Top 20 environments."
|
|
776
|
+
}
|
|
777
|
+
}, indent=2)
|
|
778
|
+
toolkit_tools.append(Function.from_callable(get_top_environments))
|
|
779
|
+
|
|
780
|
+
if outer_context.get("worker_queues"):
|
|
781
|
+
def get_top_worker_queues() -> str:
|
|
782
|
+
"""Get top 20 pre-fetched worker queues (instant, no API call)."""
|
|
783
|
+
return json.dumps({
|
|
784
|
+
"success": True,
|
|
785
|
+
"data": {
|
|
786
|
+
"worker_queues": outer_context["worker_queues"],
|
|
787
|
+
"count": len(outer_context["worker_queues"]),
|
|
788
|
+
"note": "Top 20 worker queues."
|
|
789
|
+
}
|
|
790
|
+
}, indent=2)
|
|
791
|
+
toolkit_tools.append(Function.from_callable(get_top_worker_queues))
|
|
792
|
+
|
|
793
|
+
# Also add real search tools for when top 20 isn't enough
|
|
794
|
+
if planning_toolkit and hasattr(planning_toolkit, 'functions'):
|
|
795
|
+
if "search_agents_by_capability" in planning_toolkit.functions:
|
|
796
|
+
toolkit_tools.append(planning_toolkit.functions["search_agents_by_capability"])
|
|
797
|
+
if "search_teams_by_capability" in planning_toolkit.functions:
|
|
798
|
+
toolkit_tools.append(planning_toolkit.functions["search_teams_by_capability"])
|
|
799
|
+
if "get_agent_details" in planning_toolkit.functions:
|
|
800
|
+
toolkit_tools.append(planning_toolkit.functions["get_agent_details"])
|
|
801
|
+
if "get_team_details" in planning_toolkit.functions:
|
|
802
|
+
toolkit_tools.append(planning_toolkit.functions["get_team_details"])
|
|
803
|
+
# PHASE 1 IMPROVEMENT: Add fallback tool to ensure agent never returns None
|
|
804
|
+
if "get_fallback_agent" in planning_toolkit.functions:
|
|
805
|
+
toolkit_tools.append(planning_toolkit.functions["get_fallback_agent"])
|
|
806
|
+
|
|
807
|
+
return Agent(
|
|
808
|
+
name="Task Analyzer & Resource Selector",
|
|
809
|
+
role="Analyze task requirements and select best execution resources",
|
|
810
|
+
model=model,
|
|
811
|
+
output_schema=AnalysisAndSelectionOutput,
|
|
812
|
+
tools=toolkit_tools,
|
|
813
|
+
instructions=[
|
|
814
|
+
"You analyze tasks and select the best agent/team to execute them.",
|
|
815
|
+
"",
|
|
816
|
+
"## Step 1: Analyze Task",
|
|
817
|
+
"Based on the task description:",
|
|
818
|
+
"- Identify required capabilities (e.g., kubernetes, aws, python)",
|
|
819
|
+
"- Estimate complexity (story points 1-21)",
|
|
820
|
+
"- Determine if single agent or team needed",
|
|
821
|
+
"- Calculate basic cost estimate (tokens + runtime)",
|
|
822
|
+
"",
|
|
823
|
+
"## Step 2: Select Resources",
|
|
824
|
+
"1. START with pre-fetched data (get_top_agents/teams) - these are instant",
|
|
825
|
+
"2. If top 20 has good match → select it",
|
|
826
|
+
"3. If top 20 doesn't match → use search_agents_by_capability('skill') tool",
|
|
827
|
+
"4. Select best agent or team based on name, description, and capabilities",
|
|
828
|
+
"",
|
|
829
|
+
"**CRITICAL - ALWAYS SELECT FROM AVAILABLE LIST:**",
|
|
830
|
+
"- You MUST select an agent or team from the ACTUAL results (selected_entity_id cannot be None/null)",
|
|
831
|
+
"- NEVER invent or hallucinate UUIDs - ONLY use IDs from tool call results",
|
|
832
|
+
"- If no perfect match: Pick the FIRST available agent or team from the search results",
|
|
833
|
+
"- If you truly cannot find ANY agents/teams, use get_fallback_agent() tool",
|
|
834
|
+
"- Use the EXACT UUID from the tool response - copy it character-for-character",
|
|
835
|
+
"- Explain in 'selection_reasoning' why you chose this agent/team",
|
|
836
|
+
"",
|
|
837
|
+
"## Selection Examples:",
|
|
838
|
+
"",
|
|
839
|
+
"**Example 1: Perfect Match**",
|
|
840
|
+
"Task: Deploy Kubernetes cluster",
|
|
841
|
+
"Action: search_agents_by_capability('kubernetes')",
|
|
842
|
+
"Result: Found DevOps Agent with kubernetes capability",
|
|
843
|
+
"Selection: DevOps Agent (perfect match)",
|
|
844
|
+
"",
|
|
845
|
+
"**Example 2: No Perfect Match - Pick Best Available**",
|
|
846
|
+
"Task: List files in directory",
|
|
847
|
+
"Action: search_agents_by_capability('shell')",
|
|
848
|
+
"Result: System Engineer, Platform Engineer",
|
|
849
|
+
"Selection: System Engineer (has shell, filesystem, os capabilities)",
|
|
850
|
+
"Reasoning: Best capability match for filesystem operations",
|
|
851
|
+
"",
|
|
852
|
+
"**Example 3: No Search Results - Use Fallback**",
|
|
853
|
+
"Task: Debug application",
|
|
854
|
+
"Action: search_agents_by_capability('debug')",
|
|
855
|
+
"Result: Empty",
|
|
856
|
+
"Action: get_top_agents()",
|
|
857
|
+
"Result: Empty or no relevant agents",
|
|
858
|
+
"Action: get_fallback_agent()",
|
|
859
|
+
"Result: General Agent",
|
|
860
|
+
"Selection: General Agent (fallback)",
|
|
861
|
+
"Reasoning: No specific match found, using general-purpose agent as fallback",
|
|
862
|
+
"",
|
|
863
|
+
"**Example 4: Team vs Single Agent**",
|
|
864
|
+
"Task: Deploy app and monitor it",
|
|
865
|
+
"Available: DevOps Team (deploy, monitor), Deploy Agent (deploy only)",
|
|
866
|
+
"Selection: DevOps Team (covers both requirements)",
|
|
867
|
+
"Reasoning: Team provides complete coverage vs single agent",
|
|
868
|
+
"",
|
|
869
|
+
"## Step 3: Select Environment & Queue",
|
|
870
|
+
"- **CRITICAL**: ALWAYS select an environment (required for execution)",
|
|
871
|
+
"- Check pre-fetched environments (get_top_environments)",
|
|
872
|
+
"- Pick first available environment (NEVER use None)",
|
|
873
|
+
"- **CRITICAL**: Worker Queue Selection Rules:",
|
|
874
|
+
" 1. PREFER queues with active_workers > 0 (can execute immediately)",
|
|
875
|
+
" 2. If NO queues have active workers, still pick one (DON'T use None/ephemeral)",
|
|
876
|
+
" 3. Add to 'reasoning' field: Warn that selected queue has no active workers",
|
|
877
|
+
" 4. Check the 'active_workers' field in worker queue data",
|
|
878
|
+
" 5. Select based on: active_workers DESC, then pick first available",
|
|
879
|
+
"",
|
|
880
|
+
"**SPEED RULES:**",
|
|
881
|
+
"- Use get_top_* tools FIRST (instant, pre-fetched)",
|
|
882
|
+
"- Only call search_* tools if top 20 doesn't match",
|
|
883
|
+
"- Don't overthink - first good match is fine",
|
|
884
|
+
"- Target: Complete in 20-30 seconds",
|
|
885
|
+
"",
|
|
886
|
+
"**CRITICAL - Use Real UUIDs:**",
|
|
887
|
+
"- Copy EXACT UUIDs from tool results (format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)",
|
|
888
|
+
"- NEVER invent IDs like 'agent-001' or 'team-xyz'",
|
|
889
|
+
"",
|
|
890
|
+
"**Cost Estimation:**",
|
|
891
|
+
"- Simple tasks (1-3 points): $0.05-0.15",
|
|
892
|
+
"- Medium tasks (5-8 points): $0.15-0.50",
|
|
893
|
+
"- Complex tasks (13-21 points): $0.50-2.00",
|
|
894
|
+
"- Time estimate: story_points × 0.25 hours (adjust based on complexity)",
|
|
895
|
+
"",
|
|
896
|
+
"**OUTPUT FORMAT - CRITICAL:**",
|
|
897
|
+
"- Output ONLY the structured JSON data",
|
|
898
|
+
"- DO NOT add explanatory text before the JSON",
|
|
899
|
+
"- DO NOT add markdown code blocks (no ```json)",
|
|
900
|
+
"- DO NOT add reasoning or thinking before the JSON structure",
|
|
901
|
+
"- Start your response directly with the opening brace: {",
|
|
902
|
+
"- Your entire response must be valid JSON matching the schema",
|
|
903
|
+
],
|
|
904
|
+
markdown=False,
|
|
905
|
+
)
|
|
906
|
+
|
|
907
|
+
|
|
908
|
+
async def create_analysis_and_selection_agent_claude_code(
|
|
909
|
+
planning_toolkit: 'PlanningToolkit',
|
|
910
|
+
model_name: str = "claude-sonnet-4",
|
|
911
|
+
outer_context: Optional[Dict[str, Any]] = None
|
|
912
|
+
) -> Dict[str, Any]:
|
|
913
|
+
"""
|
|
914
|
+
Step 1 using Claude Code SDK: Task Analysis & Resource Selection
|
|
915
|
+
|
|
916
|
+
Uses Claude Code SDK instead of Agno for more intelligent tool usage and better
|
|
917
|
+
instruction following. Claude Code has proven better at complex tool orchestration.
|
|
918
|
+
|
|
919
|
+
Returns the AnalysisAndSelectionOutput as a dictionary.
|
|
920
|
+
"""
|
|
921
|
+
try:
|
|
922
|
+
from claude_agent_sdk import ClaudeSDKClient
|
|
923
|
+
from claude_agent_sdk.types import ClaudeAgentOptions
|
|
924
|
+
except ImportError as e:
|
|
925
|
+
logger.error("claude_sdk_not_available", error=str(e))
|
|
926
|
+
raise ValueError(f"Claude Code SDK not available: {e}")
|
|
927
|
+
|
|
928
|
+
# Build system prompt with clear instructions
|
|
929
|
+
system_prompt = """You are an intelligent agent selection system. Your goal is to analyze tasks and select the BEST agent or team to execute them.
|
|
930
|
+
|
|
931
|
+
## Your Process:
|
|
932
|
+
|
|
933
|
+
1. **Analyze the task**:
|
|
934
|
+
- Identify required capabilities (e.g., kubernetes, aws, python)
|
|
935
|
+
- Estimate complexity (story points 1-21)
|
|
936
|
+
- Determine if single agent or team needed
|
|
937
|
+
|
|
938
|
+
2. **Discover available resources**:
|
|
939
|
+
- Use search_agents_by_capability(skill) to find agents
|
|
940
|
+
- Use search_teams_by_capability(skill) to find teams
|
|
941
|
+
- Use get_fallback_agent() if no matches found
|
|
942
|
+
|
|
943
|
+
3. **Select the best match**:
|
|
944
|
+
- NEVER return None for selected_entity_id
|
|
945
|
+
- ALWAYS use ACTUAL UUIDs from tool results
|
|
946
|
+
- NEVER invent or hallucinate IDs
|
|
947
|
+
- If no perfect match: use get_fallback_agent()
|
|
948
|
+
|
|
949
|
+
4. **Return structured output**:
|
|
950
|
+
You MUST return a JSON object with these fields:
|
|
951
|
+
{
|
|
952
|
+
"task_summary": "Brief summary",
|
|
953
|
+
"required_capabilities": ["skill1", "skill2"],
|
|
954
|
+
"task_type": "deployment|investigation|automation",
|
|
955
|
+
"complexity_estimate": "simple|moderate|complex",
|
|
956
|
+
"story_points_estimate": 1-21,
|
|
957
|
+
"needs_multi_agent": false,
|
|
958
|
+
"selected_entity_type": "agent|team",
|
|
959
|
+
"selected_entity_id": "ACTUAL-UUID-FROM-TOOL-RESULT",
|
|
960
|
+
"selected_entity_name": "Name from tool result",
|
|
961
|
+
"selection_reasoning": "Why you chose this agent/team",
|
|
962
|
+
"selected_environment_id": null,
|
|
963
|
+
"selected_environment_name": null,
|
|
964
|
+
"selected_worker_queue_id": null,
|
|
965
|
+
"selected_worker_queue_name": null,
|
|
966
|
+
"estimated_cost_usd": 0.05,
|
|
967
|
+
"estimated_time_hours": 0.5,
|
|
968
|
+
"discovered_agents": [],
|
|
969
|
+
"discovered_teams": []
|
|
970
|
+
}
|
|
971
|
+
|
|
972
|
+
## CRITICAL RULES:
|
|
973
|
+
|
|
974
|
+
- Use get_fallback_agent() if searches return empty
|
|
975
|
+
- Copy EXACT UUID from tool results (format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)
|
|
976
|
+
- NEVER invent IDs like 'fallback-general-agent' or 'agent-001'
|
|
977
|
+
- selected_entity_id MUST be a valid UUID from tool results"""
|
|
978
|
+
|
|
979
|
+
# Build MCP server from planning toolkit
|
|
980
|
+
mcp_servers = {}
|
|
981
|
+
if planning_toolkit:
|
|
982
|
+
# Convert planning toolkit to simple MCP server configuration
|
|
983
|
+
tools_list = []
|
|
984
|
+
for tool_name in ['search_agents_by_capability', 'search_teams_by_capability',
|
|
985
|
+
'get_agent_details', 'get_team_details', 'get_fallback_agent']:
|
|
986
|
+
if tool_name in planning_toolkit.functions:
|
|
987
|
+
tool_func = planning_toolkit.functions[tool_name]
|
|
988
|
+
tools_list.append({
|
|
989
|
+
"name": tool_name,
|
|
990
|
+
"function": tool_func
|
|
991
|
+
})
|
|
992
|
+
|
|
993
|
+
logger.info("claude_code_step1_tools_prepared", tool_count=len(tools_list))
|
|
994
|
+
|
|
995
|
+
# Create Claude options with tools as native Python functions
|
|
996
|
+
# For simplicity, we'll use tools directly instead of MCP servers
|
|
997
|
+
from agno.tools.function import Function
|
|
998
|
+
|
|
999
|
+
# Get tools from planning toolkit
|
|
1000
|
+
toolkit_tools = []
|
|
1001
|
+
if planning_toolkit:
|
|
1002
|
+
for tool_name in ['search_agents_by_capability', 'search_teams_by_capability',
|
|
1003
|
+
'get_agent_details', 'get_team_details', 'get_fallback_agent']:
|
|
1004
|
+
if tool_name in planning_toolkit.functions:
|
|
1005
|
+
toolkit_tools.append(planning_toolkit.functions[tool_name])
|
|
1006
|
+
|
|
1007
|
+
logger.info("claude_code_step1_starting",
|
|
1008
|
+
tool_count=len(toolkit_tools),
|
|
1009
|
+
model=model_name)
|
|
1010
|
+
|
|
1011
|
+
# For now, return None to indicate Claude Code is not yet fully implemented
|
|
1012
|
+
# We'll continue using Agno until Claude Code integration is complete
|
|
1013
|
+
return None
|
|
1014
|
+
|
|
1015
|
+
|
|
1016
|
+
def create_task_analysis_agent(model: LiteLLM) -> Agent:
|
|
1017
|
+
"""
|
|
1018
|
+
Step 1: Task Analysis Agent
|
|
1019
|
+
|
|
1020
|
+
Analyzes the task description and identifies:
|
|
1021
|
+
- Required capabilities and skills
|
|
1022
|
+
- Task type and complexity
|
|
1023
|
+
- Whether multi-agent coordination is needed
|
|
1024
|
+
"""
|
|
1025
|
+
return Agent(
|
|
1026
|
+
name="Task Analyzer",
|
|
1027
|
+
role="Expert at understanding task requirements and complexity",
|
|
1028
|
+
model=model,
|
|
1029
|
+
output_schema=TaskAnalysisOutput,
|
|
1030
|
+
instructions=[
|
|
1031
|
+
"You analyze task descriptions to understand what's needed.",
|
|
1032
|
+
"",
|
|
1033
|
+
"**Your Responsibilities:**",
|
|
1034
|
+
"1. Read the task description carefully",
|
|
1035
|
+
"2. Identify what capabilities/skills are required (AWS, Kubernetes, Python, etc.)",
|
|
1036
|
+
"3. Determine the task type (deployment, analysis, automation, etc.)",
|
|
1037
|
+
"4. Assess complexity on the Fibonacci scale (1, 2, 3, 5, 8, 13, 21)",
|
|
1038
|
+
"5. Decide if this needs a single agent or multiple agents (team)",
|
|
1039
|
+
"",
|
|
1040
|
+
"**Complexity Guidelines:**",
|
|
1041
|
+
"- 1-3 points: Simple tasks (list files, basic queries, single API calls)",
|
|
1042
|
+
"- 5-8 points: Medium tasks (deployments, multi-step operations, data processing)",
|
|
1043
|
+
"- 13-21 points: Complex tasks (multi-system integrations, migrations, deep analysis)",
|
|
1044
|
+
"",
|
|
1045
|
+
"**Multi-Agent Assessment:**",
|
|
1046
|
+
"- Single agent: Task has clear single domain (just AWS, just Kubernetes, etc.)",
|
|
1047
|
+
"- Team needed: Task spans multiple domains (AWS + Kubernetes, monitoring + alerting, etc.)",
|
|
1048
|
+
"",
|
|
1049
|
+
"**Output:**",
|
|
1050
|
+
"Provide a clear analysis with reasoning so the next step knows what to search for.",
|
|
1051
|
+
],
|
|
1052
|
+
markdown=False,
|
|
1053
|
+
)
|
|
1054
|
+
|
|
1055
|
+
|
|
1056
|
+
def create_resource_discovery_agent(
|
|
1057
|
+
model: LiteLLM,
|
|
1058
|
+
db: Session,
|
|
1059
|
+
organization_id: str,
|
|
1060
|
+
api_token: str,
|
|
1061
|
+
outer_context: Optional[Dict[str, Any]] = None
|
|
1062
|
+
) -> Agent:
|
|
1063
|
+
"""
|
|
1064
|
+
Step 2: Resource Discovery Agent
|
|
1065
|
+
|
|
1066
|
+
Uses planning toolkit to find agents/teams with required capabilities.
|
|
1067
|
+
Takes output from Task Analysis step.
|
|
1068
|
+
|
|
1069
|
+
If outer_context is provided (from CLI), uses pre-filtered agents/teams instead of discovery.
|
|
1070
|
+
This supports --local mode and explicit agent selection.
|
|
1071
|
+
|
|
1072
|
+
CRITICAL: Tools use direct DB access (no HTTP self-calls) for performance.
|
|
1073
|
+
Context graph still uses HTTP to external service (correct pattern).
|
|
1074
|
+
"""
|
|
1075
|
+
# Create planning toolkit with DB access (internal services, no HTTP)
|
|
1076
|
+
planning_toolkit = PlanningToolkit(
|
|
1077
|
+
db=db,
|
|
1078
|
+
organization_id=organization_id,
|
|
1079
|
+
api_token=api_token
|
|
1080
|
+
)
|
|
1081
|
+
|
|
1082
|
+
# Extract individual Function objects from toolkit (CRITICAL!)
|
|
1083
|
+
# This is how Agno expects tools - as a list of Function objects
|
|
1084
|
+
toolkit_tools = list(planning_toolkit.functions.values()) if hasattr(planning_toolkit, 'functions') else []
|
|
1085
|
+
|
|
1086
|
+
# Check if outer context is provided with pre-filtered resources
|
|
1087
|
+
has_outer_agents = outer_context and outer_context.get("agents")
|
|
1088
|
+
has_outer_teams = outer_context and outer_context.get("teams")
|
|
1089
|
+
has_outer_context = has_outer_agents or has_outer_teams
|
|
1090
|
+
|
|
1091
|
+
# Optimization: Skip environment/queue tools when in local mode with outer context
|
|
1092
|
+
# Local mode will create ephemeral queue automatically, so discovery is unnecessary
|
|
1093
|
+
if has_outer_context:
|
|
1094
|
+
toolkit_tools = [
|
|
1095
|
+
t for t in toolkit_tools
|
|
1096
|
+
if t.name not in ["list_environments", "list_worker_queues"]
|
|
1097
|
+
]
|
|
1098
|
+
logger.info("conditional_tools_filtered", removed=["list_environments", "list_worker_queues"])
|
|
1099
|
+
|
|
1100
|
+
logger.info(
|
|
1101
|
+
"resource_discovery_agent_created",
|
|
1102
|
+
tool_count=len(toolkit_tools),
|
|
1103
|
+
has_outer_context=has_outer_context,
|
|
1104
|
+
outer_agents_count=len(outer_context.get("agents", [])) if outer_context else 0,
|
|
1105
|
+
outer_teams_count=len(outer_context.get("teams", [])) if outer_context else 0,
|
|
1106
|
+
organization_id=organization_id[:8]
|
|
1107
|
+
)
|
|
1108
|
+
|
|
1109
|
+
# Build instructions based on whether outer context is provided
|
|
1110
|
+
base_instructions = [
|
|
1111
|
+
"You find the best agents or teams for a task and select from available options.",
|
|
1112
|
+
"",
|
|
1113
|
+
"🚨 CRITICAL VALIDATION 🚨",
|
|
1114
|
+
"Your output is STRICTLY VALIDATED:",
|
|
1115
|
+
"1. recommended_entity_id MUST be a UUID (36-char format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)",
|
|
1116
|
+
"2. recommended_entity_id MUST come from the 'id' field (NOT the 'name' field!) of discovered entities",
|
|
1117
|
+
"3. recommended_entity_name MUST come from the 'name' field and exactly match the entity's name",
|
|
1118
|
+
"4. discovered_agents/discovered_teams MUST contain actual data from tool calls",
|
|
1119
|
+
"5. If you use 'name' for entity_id, or hallucinate ANY ID/name, your response will be REJECTED",
|
|
1120
|
+
"",
|
|
1121
|
+
"⚠️ COMMON MISTAKE: Using agent.name for entity_id instead of agent.id",
|
|
1122
|
+
"✅ CORRECT: entity_id = agent['id'] (UUID), entity_name = agent['name']",
|
|
1123
|
+
"❌ WRONG: entity_id = agent['name'] (this will fail UUID validation!)",
|
|
1124
|
+
"",
|
|
1125
|
+
]
|
|
1126
|
+
|
|
1127
|
+
# Add mode-specific instructions
|
|
1128
|
+
if has_outer_context:
|
|
1129
|
+
# Selection Mode: User provided pre-filtered agents/teams
|
|
1130
|
+
workflow_instructions = [
|
|
1131
|
+
"🎯 SELECTION MODE: The user has provided a PRE-FILTERED list of agents/teams.",
|
|
1132
|
+
"",
|
|
1133
|
+
"**CRITICAL: DO NOT call list_agents() or list_teams() tools!**",
|
|
1134
|
+
"The user has already done the filtering. You MUST select from this provided list:",
|
|
1135
|
+
"",
|
|
1136
|
+
]
|
|
1137
|
+
|
|
1138
|
+
# Format provided agents
|
|
1139
|
+
if has_outer_agents:
|
|
1140
|
+
workflow_instructions.append("**Available Agents (provided by user):**")
|
|
1141
|
+
for agent in outer_context.get("agents", []):
|
|
1142
|
+
workflow_instructions.append(
|
|
1143
|
+
f" - ID: {agent.get('id')}, Name: {agent.get('name')}, "
|
|
1144
|
+
f"Capabilities: {agent.get('capabilities', [])}, Status: {agent.get('status', 'active')}"
|
|
1145
|
+
)
|
|
1146
|
+
workflow_instructions.append("")
|
|
1147
|
+
|
|
1148
|
+
# Format provided teams
|
|
1149
|
+
if has_outer_teams:
|
|
1150
|
+
workflow_instructions.append("**Available Teams (provided by user):**")
|
|
1151
|
+
for team in outer_context.get("teams", []):
|
|
1152
|
+
workflow_instructions.append(
|
|
1153
|
+
f" - ID: {team.get('id')}, Name: {team.get('name')}, "
|
|
1154
|
+
f"Agent Count: {team.get('agent_count', 0)}, Status: {team.get('status', 'active')}"
|
|
1155
|
+
)
|
|
1156
|
+
workflow_instructions.append("")
|
|
1157
|
+
|
|
1158
|
+
workflow_instructions.extend([
|
|
1159
|
+
"**YOUR WORKFLOW:**",
|
|
1160
|
+
"1. Set discovered_agents = (list of agents from above - use the exact objects as shown)",
|
|
1161
|
+
"2. Set discovered_teams = (list of teams from above - use the exact objects as shown)",
|
|
1162
|
+
"3. SELECT the best match based on task requirements and capabilities",
|
|
1163
|
+
"4. Set recommended_entity_id = selected_entity['id'] ← MUST be UUID from 'id' field",
|
|
1164
|
+
"5. Set recommended_entity_name = selected_entity['name'] ← From 'name' field",
|
|
1165
|
+
"6. DO NOT call list_agents() or list_teams() - everything is already provided!",
|
|
1166
|
+
"7. Still call list_environments() and list_worker_queues() to select environment and queue",
|
|
1167
|
+
"",
|
|
1168
|
+
])
|
|
1169
|
+
else:
|
|
1170
|
+
# Discovery Mode: Call tools to discover agents/teams
|
|
1171
|
+
workflow_instructions = [
|
|
1172
|
+
"**WORKFLOW (YOU MUST FOLLOW THIS EXACTLY):**",
|
|
1173
|
+
"",
|
|
1174
|
+
"Step 1: CALL TOOLS to discover agents/teams",
|
|
1175
|
+
" - For single-domain tasks: call list_agents() or search_agents_by_capability('skill_name')",
|
|
1176
|
+
" - For multi-domain tasks: call list_teams() or search_teams_by_capability('skill_name')",
|
|
1177
|
+
" - Tools return JSON with 'data' field containing agents/teams",
|
|
1178
|
+
" - Each entity has BOTH: 'id' (UUID string) AND 'name' (display name)",
|
|
1179
|
+
"",
|
|
1180
|
+
"Step 2: PARSE tool results",
|
|
1181
|
+
" - Extract the 'agents' or 'teams' array from tool response data",
|
|
1182
|
+
" - Each agent/team object has: {'id': '<UUID>', 'name': '<name>', 'description': '...', ...}",
|
|
1183
|
+
" - Store complete objects in discovered_agents or discovered_teams field",
|
|
1184
|
+
"",
|
|
1185
|
+
"Step 3: SELECT best match",
|
|
1186
|
+
" - Compare agents/teams from tool results based on capabilities",
|
|
1187
|
+
" - Once you pick an entity, extract BOTH fields:",
|
|
1188
|
+
" • recommended_entity_id = selected_entity['id'] ← UUID from 'id' field",
|
|
1189
|
+
" • recommended_entity_name = selected_entity['name'] ← Name from 'name' field",
|
|
1190
|
+
" - ⚠️ DO NOT use selected_entity['name'] for entity_id!",
|
|
1191
|
+
"",
|
|
1192
|
+
"Step 4: VALIDATE before returning",
|
|
1193
|
+
" - Double-check: Is recommended_entity_id a UUID? (36 chars with dashes)",
|
|
1194
|
+
" - Double-check: Is the recommended_entity_id in your discovered list's 'id' fields?",
|
|
1195
|
+
" - Double-check: Does recommended_entity_name exactly match the entity's 'name' field?",
|
|
1196
|
+
" - If not, GO BACK and fix it using actual tool data",
|
|
1197
|
+
"",
|
|
1198
|
+
]
|
|
1199
|
+
|
|
1200
|
+
# Conditional instructions for environment and worker queue selection
|
|
1201
|
+
# Only include when NOT in local mode (outer_context present means local/CLI execution)
|
|
1202
|
+
if has_outer_context:
|
|
1203
|
+
# Local mode: ephemeral queue will be created automatically, skip env/queue discovery
|
|
1204
|
+
environment_queue_instructions = []
|
|
1205
|
+
else:
|
|
1206
|
+
# Production mode: need to discover and select environment/queue
|
|
1207
|
+
environment_queue_instructions = [
|
|
1208
|
+
"**Environment & Worker Queue Selection:**",
|
|
1209
|
+
"Call list_environments() and list_worker_queues() to discover options.",
|
|
1210
|
+
"Select environment (active, suitable for task) and queue (active, has workers).",
|
|
1211
|
+
"Use EXACT UUIDs (36-char format) and names from tool results for recommended IDs/names.",
|
|
1212
|
+
"Store full lists in discovered_environments and discovered_worker_queues.",
|
|
1213
|
+
"",
|
|
1214
|
+
]
|
|
1215
|
+
|
|
1216
|
+
# Validation reminders (always needed)
|
|
1217
|
+
validation_reminders = [
|
|
1218
|
+
"",
|
|
1219
|
+
"🚨 AUTOMATED VALIDATION (2 retries if failed):",
|
|
1220
|
+
"",
|
|
1221
|
+
"Your response is validated automatically:",
|
|
1222
|
+
"1. discovered lists must have data (call tools first)",
|
|
1223
|
+
"2. recommended_entity_id must be a valid UUID format (not a name!)",
|
|
1224
|
+
"3. recommended_entity_id must come from the 'id' field of discovered entities",
|
|
1225
|
+
"4. recommended_entity_name must come from the 'name' field and match exactly",
|
|
1226
|
+
"5. All IDs must be UUIDs from tool results (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format)",
|
|
1227
|
+
"",
|
|
1228
|
+
"✅ SUCCESS: Use EXACT field mapping from tool results",
|
|
1229
|
+
" Example: Tool returns {\"id\": \"550e8400-e29b-41d4-a716-446655440000\", \"name\": \"DevOps Agent\"}",
|
|
1230
|
+
" → CORRECT: recommended_entity_id = \"550e8400-e29b-41d4-a716-446655440000\" (from 'id')",
|
|
1231
|
+
" → recommended_entity_name = \"DevOps Agent\" (from 'name')",
|
|
1232
|
+
"",
|
|
1233
|
+
"❌ FAILURE EXAMPLES:",
|
|
1234
|
+
" • Using agent['name'] for entity_id → UUID validation will fail!",
|
|
1235
|
+
" • Making up IDs/names not from tool results → existence check will fail!",
|
|
1236
|
+
" • Typos in names → name matching validation will fail!",
|
|
1237
|
+
"",
|
|
1238
|
+
"If validation fails, you get error message and ONE MORE CHANCE to fix using exact tool data.",
|
|
1239
|
+
"After 2 failures, workflow terminates.",
|
|
1240
|
+
"",
|
|
1241
|
+
]
|
|
1242
|
+
|
|
1243
|
+
# Combine all instructions
|
|
1244
|
+
final_instructions = base_instructions + workflow_instructions + environment_queue_instructions + validation_reminders
|
|
1245
|
+
|
|
1246
|
+
return Agent(
|
|
1247
|
+
name="Resource Discoverer",
|
|
1248
|
+
role="Expert at finding the right agents and teams for tasks",
|
|
1249
|
+
model=model,
|
|
1250
|
+
output_schema=ResourceDiscoveryOutput,
|
|
1251
|
+
tools=toolkit_tools, # Pass individual Function objects from toolkit
|
|
1252
|
+
instructions=final_instructions,
|
|
1253
|
+
markdown=False,
|
|
1254
|
+
)
|
|
1255
|
+
|
|
1256
|
+
|
|
1257
|
+
def create_cost_estimation_agent(model: LiteLLM) -> Agent:
|
|
1258
|
+
"""
|
|
1259
|
+
Step 3: Cost Estimation Agent
|
|
1260
|
+
|
|
1261
|
+
Calculates time and cost estimates based on task analysis and selected resources.
|
|
1262
|
+
"""
|
|
1263
|
+
return Agent(
|
|
1264
|
+
name="Cost Estimator",
|
|
1265
|
+
role="Expert at estimating time and cost for AI agent tasks",
|
|
1266
|
+
model=model,
|
|
1267
|
+
output_schema=CostEstimationOutput,
|
|
1268
|
+
instructions=[
|
|
1269
|
+
"You calculate realistic time and cost estimates for AI agent execution.",
|
|
1270
|
+
"",
|
|
1271
|
+
"**Your Input:**",
|
|
1272
|
+
"- Task analysis with complexity and capabilities",
|
|
1273
|
+
"- Selected agent/team details",
|
|
1274
|
+
"",
|
|
1275
|
+
"**Pricing Reference:**",
|
|
1276
|
+
"- Claude Sonnet 4: $0.003/1K input, $0.015/1K output tokens",
|
|
1277
|
+
"- Claude Haiku: $0.00025/1K input, $0.00125/1K output tokens",
|
|
1278
|
+
"- GPT-4o: $0.0025/1K input, $0.01/1K output tokens",
|
|
1279
|
+
"- GPT-4o Mini: $0.00015/1K input, $0.0006/1K output tokens",
|
|
1280
|
+
"- Tool calls: $0.0001 - $0.001 per call",
|
|
1281
|
+
"- Worker runtime: $0.10/hour",
|
|
1282
|
+
"",
|
|
1283
|
+
"**Token Estimation Guidelines:**",
|
|
1284
|
+
"- Simple tasks (1-3 points): 2-5K input, 1-2K output tokens",
|
|
1285
|
+
"- Medium tasks (5-8 points): 5-10K input, 2-5K output tokens",
|
|
1286
|
+
"- Complex tasks (13-21 points): 10-20K input, 5-10K output tokens",
|
|
1287
|
+
"",
|
|
1288
|
+
"**Time Estimation Guidelines:**",
|
|
1289
|
+
"- Simple tasks: 0.1 - 0.5 hours",
|
|
1290
|
+
"- Medium tasks: 0.5 - 2 hours",
|
|
1291
|
+
"- Complex tasks: 2 - 8 hours",
|
|
1292
|
+
"",
|
|
1293
|
+
"**Savings Calculation:**",
|
|
1294
|
+
"- Manual cost = time_hours × hourly_rate (use $100-150/hr for senior engineers)",
|
|
1295
|
+
"- AI cost = LLM + tools + runtime",
|
|
1296
|
+
"- Savings = manual_cost - ai_cost",
|
|
1297
|
+
"",
|
|
1298
|
+
"**Output:**",
|
|
1299
|
+
"Provide detailed cost breakdown with reasoning for your estimates.",
|
|
1300
|
+
],
|
|
1301
|
+
markdown=False,
|
|
1302
|
+
)
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
def create_plan_generation_agent(model: LiteLLM) -> Agent:
|
|
1306
|
+
"""
|
|
1307
|
+
NEW Step 2: Full Plan Generation with Integrated Cost Estimation (2-Step Workflow)
|
|
1308
|
+
|
|
1309
|
+
Receives Step 1 output (analysis + selected resources) and generates
|
|
1310
|
+
complete TaskPlanResponse with costs, savings, risks, etc.
|
|
1311
|
+
|
|
1312
|
+
This replaces:
|
|
1313
|
+
- create_cost_estimation_agent() (old Step 3)
|
|
1314
|
+
- Old Step 4 plan generation
|
|
1315
|
+
|
|
1316
|
+
Combines cost estimation + plan generation for the simplified 2-step workflow.
|
|
1317
|
+
"""
|
|
1318
|
+
return Agent(
|
|
1319
|
+
name="Plan Generator",
|
|
1320
|
+
role="Generate comprehensive execution plan with cost analysis",
|
|
1321
|
+
model=model,
|
|
1322
|
+
output_schema=TaskPlanResponse,
|
|
1323
|
+
instructions=[
|
|
1324
|
+
"You create complete execution plans based on task analysis and selected resources.",
|
|
1325
|
+
"",
|
|
1326
|
+
"**Your Input (from Step 1):**",
|
|
1327
|
+
"- Task analysis with complexity and requirements",
|
|
1328
|
+
"- Selected agent/team (with REAL UUID)",
|
|
1329
|
+
"- Selected environment and worker queue (with REAL UUIDs)",
|
|
1330
|
+
"- Basic cost estimate",
|
|
1331
|
+
"- Original task description",
|
|
1332
|
+
"",
|
|
1333
|
+
"**Your Responsibilities:**",
|
|
1334
|
+
"1. Create title and summary",
|
|
1335
|
+
"2. Set complexity (use story_points from Step 1)",
|
|
1336
|
+
"3. Build team_breakdown with selected agent/team",
|
|
1337
|
+
"4. Build recommended_execution with EXACT values from Step 1:",
|
|
1338
|
+
" - entity_type: COPY from Step 1's selected_entity_type",
|
|
1339
|
+
" - entity_id: COPY from Step 1's selected_entity_id",
|
|
1340
|
+
" - entity_name: COPY from Step 1's selected_entity_name",
|
|
1341
|
+
" - recommended_environment_id: COPY from Step 1's selected_environment_id",
|
|
1342
|
+
" - recommended_environment_name: COPY from Step 1's selected_environment_name",
|
|
1343
|
+
" - recommended_worker_queue_id: COPY from Step 1's selected_worker_queue_id",
|
|
1344
|
+
" - recommended_worker_queue_name: COPY from Step 1's selected_worker_queue_name",
|
|
1345
|
+
" - reasoning: Use Step 1's selection_reasoning",
|
|
1346
|
+
"5. Calculate detailed cost_estimate (LLM + tools + runtime)",
|
|
1347
|
+
"6. Calculate realized_savings (vs manual execution)",
|
|
1348
|
+
"7. List risks (3-5 critical items)",
|
|
1349
|
+
"8. List prerequisites (3-5 items)",
|
|
1350
|
+
"9. List success_criteria (3-5 items)",
|
|
1351
|
+
"10. Generate execution_prompt (detailed prompt for execution)",
|
|
1352
|
+
"",
|
|
1353
|
+
"**Execution Prompt Generation:**",
|
|
1354
|
+
"The execution_prompt is the actual prompt that will be sent to the executing agent.",
|
|
1355
|
+
"It must be comprehensive, clear, and actionable.",
|
|
1356
|
+
"",
|
|
1357
|
+
"Structure the execution_prompt with these sections:",
|
|
1358
|
+
"",
|
|
1359
|
+
"1. Header: # {title}",
|
|
1360
|
+
"2. Original Request: The task description from input",
|
|
1361
|
+
"3. Task Overview: The summary you generated",
|
|
1362
|
+
"4. Complexity Assessment: story_points + confidence + reasoning",
|
|
1363
|
+
"5. Prerequisites: If any prerequisites exist, list them (numbered)",
|
|
1364
|
+
"6. Your Responsibilities: The responsibilities from team_breakdown for the executing entity",
|
|
1365
|
+
"7. Potential Risks: If any risks exist, list them with mitigation guidance (numbered)",
|
|
1366
|
+
"8. Success Criteria: The success criteria you defined (numbered)",
|
|
1367
|
+
"9. Execution Context: **ALWAYS include**:",
|
|
1368
|
+
" - Environment: {recommended_environment_name} (ID: {recommended_environment_id})",
|
|
1369
|
+
" - Worker Queue: {recommended_worker_queue_name} (if selected)",
|
|
1370
|
+
"10. Instructions: Clear call to action for the agent",
|
|
1371
|
+
"",
|
|
1372
|
+
"Use markdown formatting with headers (##), numbered lists (1., 2.), and **bold** for emphasis.",
|
|
1373
|
+
"Skip sections that don't apply (e.g., if no risks, skip that section).",
|
|
1374
|
+
"Keep it concise but complete - this is what the agent will see and execute from.",
|
|
1375
|
+
"",
|
|
1376
|
+
"**⚡ DETAILED TASK GENERATION - Generate Executable Plan:**",
|
|
1377
|
+
"",
|
|
1378
|
+
"1. team_breakdown:",
|
|
1379
|
+
" - Create SINGLE item using entity from Step 1",
|
|
1380
|
+
" - Use EXACT entity_id UUID from Step 1",
|
|
1381
|
+
" - MUST generate detailed 'tasks' array with AT LEAST 2 tasks:",
|
|
1382
|
+
" * Each task MUST have: id (1,2,3...), title, description, details, test_strategy",
|
|
1383
|
+
" * Each task MUST have agent_id (copy from Step 1 entity_id)",
|
|
1384
|
+
" * Each task MUST have worker_queue_id (use recommended_worker_queue_id from Step 1, or use the agent's first available worker queue)",
|
|
1385
|
+
" * Set dependencies: [1] for task 2 if dependent, [] for independent",
|
|
1386
|
+
" * Set status: 'pending' for all",
|
|
1387
|
+
" * Set priority: 'high', 'medium', or 'low' based on criticality",
|
|
1388
|
+
" * Keep details actionable (2-3 sentences explaining HOW to do it)",
|
|
1389
|
+
" - Keep responsibilities to 3-5 high-level points",
|
|
1390
|
+
"",
|
|
1391
|
+
"2. cost_estimate:",
|
|
1392
|
+
" - Use estimated_cost_usd from Step 1 as base",
|
|
1393
|
+
" - Add simple breakdown (LLM, tools, runtime)",
|
|
1394
|
+
" - Empty arrays for llm_costs/tool_costs (use summary only)",
|
|
1395
|
+
"",
|
|
1396
|
+
"3. realized_savings:",
|
|
1397
|
+
" - Single item for without_kubiya_resources",
|
|
1398
|
+
" - Simple calculation: manual_hours × $125/hr",
|
|
1399
|
+
" - Keep savings_summary concise (1-2 sentences)",
|
|
1400
|
+
"",
|
|
1401
|
+
"4. risks/prerequisites/success_criteria:",
|
|
1402
|
+
" - 3-5 items each (not exhaustive lists)",
|
|
1403
|
+
" - Focus on critical items only",
|
|
1404
|
+
"",
|
|
1405
|
+
"**🚨 CRITICAL - Use Data from Step 1:**",
|
|
1406
|
+
"For recommended_execution:",
|
|
1407
|
+
" - Copy EXACT entity_id UUID from Step 1's 'selected_entity_id'",
|
|
1408
|
+
" - Copy EXACT entity_name from Step 1's 'selected_entity_name'",
|
|
1409
|
+
" - Copy entity_type from Step 1's 'selected_entity_type'",
|
|
1410
|
+
" - Copy EXACT recommended_environment_id from Step 1's 'selected_environment_id' (NEVER null/None!)",
|
|
1411
|
+
" - Copy EXACT recommended_environment_name from Step 1's 'selected_environment_name'",
|
|
1412
|
+
" - Copy recommended_worker_queue_id from Step 1's 'selected_worker_queue_id' (if provided)",
|
|
1413
|
+
" - Copy recommended_worker_queue_name from Step 1's 'selected_worker_queue_name' (if provided)",
|
|
1414
|
+
" - DO NOT invent new IDs like 'devops-team-001'",
|
|
1415
|
+
" - DO NOT leave environment fields as null - ALWAYS copy from Step 1!",
|
|
1416
|
+
"",
|
|
1417
|
+
"For team_breakdown array:",
|
|
1418
|
+
" - Create SINGLE item (not multiple)",
|
|
1419
|
+
" - Use SAME UUID from Step 1 in 'team_id' or 'agent_id'",
|
|
1420
|
+
" - If selected_entity_type='team', populate team_id + team_name, set agent_id/agent_name to None",
|
|
1421
|
+
" - If selected_entity_type='agent', populate agent_id + agent_name, set team_id/team_name to None",
|
|
1422
|
+
" - NEVER create fake IDs like 'agent-001', 'security-team-xyz'",
|
|
1423
|
+
"",
|
|
1424
|
+
"**CRITICAL - Generate Fast:**",
|
|
1425
|
+
"Your goal is to return a complete plan in 20-30 seconds.",
|
|
1426
|
+
"Prioritize speed over exhaustive detail.",
|
|
1427
|
+
"Every optional field you populate adds 2-5 seconds to generation time.",
|
|
1428
|
+
"",
|
|
1429
|
+
"**Output:**",
|
|
1430
|
+
"Complete TaskPlanResponse including execution_prompt in 20-30 seconds.",
|
|
1431
|
+
"",
|
|
1432
|
+
"**OUTPUT FORMAT - CRITICAL:**",
|
|
1433
|
+
"- Output ONLY the structured JSON data",
|
|
1434
|
+
"- DO NOT add explanatory text before the JSON",
|
|
1435
|
+
"- DO NOT add markdown code blocks (no ```json)",
|
|
1436
|
+
"- DO NOT add reasoning or thinking before the JSON structure",
|
|
1437
|
+
"- Start your response directly with the opening brace: {",
|
|
1438
|
+
"- Your entire response must be valid JSON matching the schema",
|
|
1439
|
+
],
|
|
1440
|
+
markdown=False,
|
|
1441
|
+
)
|
|
1442
|
+
|
|
1443
|
+
|
|
1444
|
+
# ============================================================================
|
|
1445
|
+
# Workflow Factory
|
|
1446
|
+
# ============================================================================
|
|
1447
|
+
|
|
1448
|
+
def create_planning_workflow(
|
|
1449
|
+
db: Session,
|
|
1450
|
+
organization_id: str,
|
|
1451
|
+
api_token: str,
|
|
1452
|
+
quick_mode: bool = False,
|
|
1453
|
+
outer_context: Optional[Dict[str, Any]] = None
|
|
1454
|
+
) -> Workflow:
|
|
1455
|
+
"""
|
|
1456
|
+
Create the task planning workflow with 4 steps.
|
|
1457
|
+
|
|
1458
|
+
NOTE: For fast planning (--local mode), use create_fast_planning_workflow() instead.
|
|
1459
|
+
This function always creates the full 4-step workflow regardless of quick_mode.
|
|
1460
|
+
|
|
1461
|
+
Args:
|
|
1462
|
+
db: Database session for internal service access
|
|
1463
|
+
organization_id: Organization ID for resource filtering (REQUIRED)
|
|
1464
|
+
api_token: Org-scoped API token for context graph access (REQUIRED)
|
|
1465
|
+
quick_mode: DEPRECATED - no longer used, kept for backwards compatibility
|
|
1466
|
+
outer_context: Optional pre-filtered context from CLI (agents, teams, etc.)
|
|
1467
|
+
|
|
1468
|
+
Returns:
|
|
1469
|
+
Configured Workflow instance with 4 steps
|
|
1470
|
+
"""
|
|
1471
|
+
if not organization_id:
|
|
1472
|
+
raise ValueError("organization_id is required for planning workflow")
|
|
1473
|
+
|
|
1474
|
+
if not api_token:
|
|
1475
|
+
raise ValueError("api_token is required for planning workflow")
|
|
1476
|
+
|
|
1477
|
+
# Get LiteLLM configuration
|
|
1478
|
+
litellm_api_url = (
|
|
1479
|
+
os.getenv("LITELLM_API_URL") or
|
|
1480
|
+
os.getenv("LITELLM_API_BASE") or
|
|
1481
|
+
"https://llm-proxy.kubiya.ai"
|
|
1482
|
+
).strip()
|
|
1483
|
+
|
|
1484
|
+
litellm_api_key = os.getenv("LITELLM_API_KEY", "").strip()
|
|
1485
|
+
|
|
1486
|
+
if not litellm_api_key:
|
|
1487
|
+
raise ValueError("LITELLM_API_KEY environment variable not set")
|
|
1488
|
+
|
|
1489
|
+
# PHASE 3 IMPROVEMENT: Use Opus for Step 1 (better reasoning), Sonnet for Step 2 (faster)
|
|
1490
|
+
# Step 1 is critical - agent selection affects entire execution
|
|
1491
|
+
# Worth the extra $0.007 per request for 10% better selection quality
|
|
1492
|
+
step1_model_id = os.getenv("STEP1_MODEL", "kubiya/claude-opus-4").strip()
|
|
1493
|
+
step2_model_id = os.getenv("STEP2_MODEL", "kubiya/claude-sonnet-4").strip()
|
|
1494
|
+
|
|
1495
|
+
logger.info("model_configuration",
|
|
1496
|
+
step1_model=step1_model_id,
|
|
1497
|
+
step2_model=step2_model_id,
|
|
1498
|
+
message="Using Opus for Step 1 (intelligent selection), Sonnet for Step 2 (fast generation)")
|
|
1499
|
+
|
|
1500
|
+
# Create model instances for each step
|
|
1501
|
+
step1_model = LiteLLM(
|
|
1502
|
+
id=f"openai/{step1_model_id}",
|
|
1503
|
+
api_base=litellm_api_url,
|
|
1504
|
+
api_key=litellm_api_key,
|
|
1505
|
+
request_params={"timeout": 300}
|
|
1506
|
+
)
|
|
1507
|
+
|
|
1508
|
+
step2_model = LiteLLM(
|
|
1509
|
+
id=f"openai/{step2_model_id}",
|
|
1510
|
+
api_base=litellm_api_url,
|
|
1511
|
+
api_key=litellm_api_key,
|
|
1512
|
+
request_params={"timeout": 300}
|
|
1513
|
+
)
|
|
1514
|
+
|
|
1515
|
+
# Log outer context if provided
|
|
1516
|
+
if outer_context:
|
|
1517
|
+
logger.info(
|
|
1518
|
+
"outer_context_provided",
|
|
1519
|
+
agents_count=len(outer_context.get("agents", [])),
|
|
1520
|
+
teams_count=len(outer_context.get("teams", [])),
|
|
1521
|
+
environments_count=len(outer_context.get("environments", [])),
|
|
1522
|
+
worker_queues_count=len(outer_context.get("worker_queues", [])),
|
|
1523
|
+
organization_id=organization_id[:8]
|
|
1524
|
+
)
|
|
1525
|
+
|
|
1526
|
+
logger.info(
|
|
1527
|
+
"creating_planning_workflow",
|
|
1528
|
+
step1_model=step1_model_id,
|
|
1529
|
+
step2_model=step2_model_id,
|
|
1530
|
+
has_outer_context=bool(outer_context),
|
|
1531
|
+
organization_id=organization_id[:8]
|
|
1532
|
+
)
|
|
1533
|
+
|
|
1534
|
+
# PRE-FETCH OPTIMIZATION: Get top 20 most-used resources
|
|
1535
|
+
# Limits context window bloat while providing enough data for 90% of cases
|
|
1536
|
+
# Tools remain available if agent needs more
|
|
1537
|
+
logger.info("pre_fetching_top_resources", organization_id=organization_id[:8])
|
|
1538
|
+
|
|
1539
|
+
from control_plane_api.app.lib.planning_tools.planning_service import PlanningService
|
|
1540
|
+
import asyncio
|
|
1541
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
1542
|
+
|
|
1543
|
+
planning_service = PlanningService(db, organization_id, api_token)
|
|
1544
|
+
|
|
1545
|
+
async def fetch_top_resources():
|
|
1546
|
+
"""Fetch top 20 agents, teams, environments - keeps context manageable"""
|
|
1547
|
+
loop = asyncio.get_event_loop()
|
|
1548
|
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
1549
|
+
# Limit to 20 each to avoid overwhelming context window
|
|
1550
|
+
agents_future = loop.run_in_executor(
|
|
1551
|
+
executor, planning_service.list_agents, 20, None # Top 20 agents
|
|
1552
|
+
)
|
|
1553
|
+
teams_future = loop.run_in_executor(
|
|
1554
|
+
executor, planning_service.list_teams, 20, None # Top 20 teams
|
|
1555
|
+
)
|
|
1556
|
+
envs_future = loop.run_in_executor(
|
|
1557
|
+
executor, planning_service.list_environments, 20 # Top 20 environments
|
|
1558
|
+
)
|
|
1559
|
+
queues_future = loop.run_in_executor(
|
|
1560
|
+
executor, planning_service.list_worker_queues, 20 # Top 20 queues
|
|
1561
|
+
)
|
|
1562
|
+
|
|
1563
|
+
agents, teams, envs, queues = await asyncio.gather(
|
|
1564
|
+
agents_future, teams_future, envs_future, queues_future
|
|
1565
|
+
)
|
|
1566
|
+
return agents, teams, envs, queues
|
|
1567
|
+
|
|
1568
|
+
# Execute pre-fetch (parallel, ~1-2 seconds total)
|
|
1569
|
+
try:
|
|
1570
|
+
agents_data, teams_data, envs_data, queues_data = asyncio.run(fetch_top_resources())
|
|
1571
|
+
|
|
1572
|
+
logger.info(
|
|
1573
|
+
"pre_fetch_completed",
|
|
1574
|
+
agents_count=len(agents_data),
|
|
1575
|
+
teams_count=len(teams_data),
|
|
1576
|
+
envs_count=len(envs_data),
|
|
1577
|
+
queues_count=len(queues_data),
|
|
1578
|
+
message="Pre-fetched top 20 of each type. Tools still available for more."
|
|
1579
|
+
)
|
|
1580
|
+
|
|
1581
|
+
# Store in outer_context with limited data (only update missing/empty fields)
|
|
1582
|
+
if outer_context is None:
|
|
1583
|
+
outer_context = {}
|
|
1584
|
+
|
|
1585
|
+
# Only update fields that are missing or empty (don't overwrite CLI-provided data)
|
|
1586
|
+
if not outer_context.get("agents"):
|
|
1587
|
+
outer_context["agents"] = agents_data[:20]
|
|
1588
|
+
if not outer_context.get("teams"):
|
|
1589
|
+
outer_context["teams"] = teams_data[:20]
|
|
1590
|
+
if not outer_context.get("environments"):
|
|
1591
|
+
outer_context["environments"] = envs_data[:20]
|
|
1592
|
+
if not outer_context.get("worker_queues"):
|
|
1593
|
+
# Sort worker queues by active_workers DESC (queues with active workers first!)
|
|
1594
|
+
sorted_queues = sorted(
|
|
1595
|
+
queues_data[:20],
|
|
1596
|
+
key=lambda q: q.get("active_workers", 0),
|
|
1597
|
+
reverse=True
|
|
1598
|
+
)
|
|
1599
|
+
outer_context["worker_queues"] = sorted_queues
|
|
1600
|
+
logger.info(
|
|
1601
|
+
"worker_queues_sorted",
|
|
1602
|
+
total=len(sorted_queues),
|
|
1603
|
+
with_active_workers=len([q for q in sorted_queues if q.get("active_workers", 0) > 0]),
|
|
1604
|
+
message="Worker queues sorted by active_workers (DESC)"
|
|
1605
|
+
)
|
|
1606
|
+
|
|
1607
|
+
outer_context["pre_fetch_note"] = "Top 20 most common resources. Use tools if you need more specific matches."
|
|
1608
|
+
except Exception as e:
|
|
1609
|
+
logger.warning("pre_fetch_failed", error=str(e), message="Continuing without pre-fetched data")
|
|
1610
|
+
# Continue without pre-fetched data - agent will use tools instead
|
|
1611
|
+
|
|
1612
|
+
# Create workflow steps (SIMPLIFIED 2-STEP WORKFLOW)
|
|
1613
|
+
# Create planning toolkit for Step 1 tools
|
|
1614
|
+
planning_toolkit = PlanningToolkit(db, organization_id, api_token)
|
|
1615
|
+
|
|
1616
|
+
# NEW Step 1: Analysis + Resource Selection (combines old Steps 1+2)
|
|
1617
|
+
# PHASE 3: Uses Opus for better reasoning and tool usage
|
|
1618
|
+
step1_analysis_and_selection = create_analysis_and_selection_agent(
|
|
1619
|
+
model=step1_model, # Opus for intelligent selection
|
|
1620
|
+
planning_toolkit=planning_toolkit,
|
|
1621
|
+
outer_context=outer_context
|
|
1622
|
+
)
|
|
1623
|
+
|
|
1624
|
+
# NEW Step 2: Full Plan Generation with Cost Estimation (combines old Steps 3+4)
|
|
1625
|
+
# Uses Sonnet for faster generation
|
|
1626
|
+
step2_plan_generation = create_plan_generation_agent(step2_model) # Sonnet for speed
|
|
1627
|
+
|
|
1628
|
+
# Create 2-step workflow
|
|
1629
|
+
workflow = Workflow(
|
|
1630
|
+
name="Task Planning Workflow",
|
|
1631
|
+
steps=[
|
|
1632
|
+
step1_analysis_and_selection,
|
|
1633
|
+
step2_plan_generation,
|
|
1634
|
+
],
|
|
1635
|
+
description="Simplified 2-step task planning: (1) Analysis & Resource Selection, (2) Full Plan Generation with Costs",
|
|
1636
|
+
)
|
|
1637
|
+
|
|
1638
|
+
# PHASE 1 IMPROVEMENT: Store planning_toolkit for validation
|
|
1639
|
+
workflow._planning_toolkit = planning_toolkit
|
|
1640
|
+
|
|
1641
|
+
logger.info(
|
|
1642
|
+
"planning_workflow_created",
|
|
1643
|
+
steps=2, # Down from 4!
|
|
1644
|
+
pre_fetched_data=bool(outer_context),
|
|
1645
|
+
message="Simplified 2-step workflow with smart pre-fetching and Phase 1 improvements"
|
|
1646
|
+
)
|
|
1647
|
+
|
|
1648
|
+
return workflow
|
|
1649
|
+
|
|
1650
|
+
|
|
1651
|
+
def create_fast_selection_agent(
|
|
1652
|
+
model: LiteLLM,
|
|
1653
|
+
db: Session,
|
|
1654
|
+
organization_id: str,
|
|
1655
|
+
api_token: str,
|
|
1656
|
+
outer_context: Optional[Dict[str, Any]] = None
|
|
1657
|
+
) -> Agent:
|
|
1658
|
+
"""
|
|
1659
|
+
Creates a fast selection agent for --local mode that does everything in one shot:
|
|
1660
|
+
1. Select agent/team (from outer context or DB)
|
|
1661
|
+
2. Select environment (from outer context or DB)
|
|
1662
|
+
3. Select worker queue (from outer context or DB)
|
|
1663
|
+
4. Return minimal response
|
|
1664
|
+
|
|
1665
|
+
This is the ONLY agent in the fast workflow - no analysis, no cost estimation.
|
|
1666
|
+
"""
|
|
1667
|
+
import json
|
|
1668
|
+
from agno.tools.function import Function
|
|
1669
|
+
|
|
1670
|
+
# Get full toolkit but we'll filter to only essential tools
|
|
1671
|
+
planning_toolkit = PlanningToolkit(db, organization_id, api_token)
|
|
1672
|
+
|
|
1673
|
+
# ONLY give agent the tools it needs for fast selection
|
|
1674
|
+
essential_tools = []
|
|
1675
|
+
|
|
1676
|
+
# If outer_context provided, create synthetic tools (NO API calls needed!)
|
|
1677
|
+
if outer_context:
|
|
1678
|
+
# Synthetic tool for agents from outer context
|
|
1679
|
+
if outer_context.get("agents"):
|
|
1680
|
+
def get_outer_context_agents() -> str:
|
|
1681
|
+
"""Get pre-filtered agents provided by CLI (outer context).
|
|
1682
|
+
|
|
1683
|
+
INSTANT - no API call needed, data already fetched by CLI.
|
|
1684
|
+
Returns structured JSON with agent data.
|
|
1685
|
+
|
|
1686
|
+
Returns:
|
|
1687
|
+
JSON string with agents list
|
|
1688
|
+
"""
|
|
1689
|
+
return json.dumps({
|
|
1690
|
+
"type": "tool_result",
|
|
1691
|
+
"tool": "get_outer_context_agents",
|
|
1692
|
+
"success": True,
|
|
1693
|
+
"data": {
|
|
1694
|
+
"agents": outer_context.get("agents", []),
|
|
1695
|
+
"count": len(outer_context.get("agents", []))
|
|
1696
|
+
},
|
|
1697
|
+
"human_readable": f"Found {len(outer_context.get('agents', []))} agents from CLI context (instant)"
|
|
1698
|
+
}, indent=2)
|
|
1699
|
+
|
|
1700
|
+
essential_tools.append(Function.from_callable(get_outer_context_agents))
|
|
1701
|
+
else:
|
|
1702
|
+
# Fallback: need to fetch from DB
|
|
1703
|
+
if "list_agents" in planning_toolkit.functions:
|
|
1704
|
+
essential_tools.append(planning_toolkit.functions["list_agents"])
|
|
1705
|
+
|
|
1706
|
+
# Synthetic tool for teams from outer context
|
|
1707
|
+
if outer_context.get("teams"):
|
|
1708
|
+
def get_outer_context_teams() -> str:
|
|
1709
|
+
"""Get pre-filtered teams provided by CLI (outer context).
|
|
1710
|
+
|
|
1711
|
+
INSTANT - no API call needed, data already fetched by CLI.
|
|
1712
|
+
Returns structured JSON with team data.
|
|
1713
|
+
|
|
1714
|
+
Returns:
|
|
1715
|
+
JSON string with teams list
|
|
1716
|
+
"""
|
|
1717
|
+
return json.dumps({
|
|
1718
|
+
"type": "tool_result",
|
|
1719
|
+
"tool": "get_outer_context_teams",
|
|
1720
|
+
"success": True,
|
|
1721
|
+
"data": {
|
|
1722
|
+
"teams": outer_context.get("teams", []),
|
|
1723
|
+
"count": len(outer_context.get("teams", []))
|
|
1724
|
+
},
|
|
1725
|
+
"human_readable": f"Found {len(outer_context.get('teams', []))} teams from CLI context (instant)"
|
|
1726
|
+
}, indent=2)
|
|
1727
|
+
|
|
1728
|
+
essential_tools.append(Function.from_callable(get_outer_context_teams))
|
|
1729
|
+
else:
|
|
1730
|
+
# Fallback: need to fetch from DB
|
|
1731
|
+
if "list_teams" in planning_toolkit.functions:
|
|
1732
|
+
essential_tools.append(planning_toolkit.functions["list_teams"])
|
|
1733
|
+
|
|
1734
|
+
# Synthetic tool for environments from outer context
|
|
1735
|
+
if outer_context.get("environments"):
|
|
1736
|
+
def get_outer_context_environments() -> str:
|
|
1737
|
+
"""Get pre-filtered environments provided by CLI (outer context).
|
|
1738
|
+
|
|
1739
|
+
INSTANT - no API call needed, data already fetched by CLI.
|
|
1740
|
+
Returns structured JSON with environment data.
|
|
1741
|
+
|
|
1742
|
+
Returns:
|
|
1743
|
+
JSON string with environments list
|
|
1744
|
+
"""
|
|
1745
|
+
return json.dumps({
|
|
1746
|
+
"type": "tool_result",
|
|
1747
|
+
"tool": "get_outer_context_environments",
|
|
1748
|
+
"success": True,
|
|
1749
|
+
"data": {
|
|
1750
|
+
"environments": outer_context.get("environments", []),
|
|
1751
|
+
"count": len(outer_context.get("environments", []))
|
|
1752
|
+
},
|
|
1753
|
+
"human_readable": f"Found {len(outer_context.get('environments', []))} environments from CLI context (instant)"
|
|
1754
|
+
}, indent=2)
|
|
1755
|
+
|
|
1756
|
+
essential_tools.append(Function.from_callable(get_outer_context_environments))
|
|
1757
|
+
else:
|
|
1758
|
+
# No outer context - use real API tools
|
|
1759
|
+
if "list_teams" in planning_toolkit.functions:
|
|
1760
|
+
essential_tools.append(planning_toolkit.functions["list_teams"])
|
|
1761
|
+
if "list_agents" in planning_toolkit.functions:
|
|
1762
|
+
essential_tools.append(planning_toolkit.functions["list_agents"])
|
|
1763
|
+
|
|
1764
|
+
toolkit_tools = essential_tools
|
|
1765
|
+
|
|
1766
|
+
# Build simple step-by-step instructions
|
|
1767
|
+
has_outer_agents = outer_context and outer_context.get("agents")
|
|
1768
|
+
has_outer_teams = outer_context and outer_context.get("teams")
|
|
1769
|
+
has_outer_envs = outer_context and outer_context.get("environments")
|
|
1770
|
+
|
|
1771
|
+
instructions = [
|
|
1772
|
+
"⚡ ULTRA-FAST SELECTOR - INSTANT DATA ⚡",
|
|
1773
|
+
"",
|
|
1774
|
+
"CLI already fetched all data! Just call the tools and pick FIRST available.",
|
|
1775
|
+
"",
|
|
1776
|
+
]
|
|
1777
|
+
|
|
1778
|
+
# Instructions for getting agents/teams
|
|
1779
|
+
if has_outer_agents and has_outer_teams:
|
|
1780
|
+
instructions.extend([
|
|
1781
|
+
"## STEP 1: Get pre-fetched data (INSTANT - no API calls!)",
|
|
1782
|
+
" Call get_outer_context_agents() → populate discovered_agents",
|
|
1783
|
+
" Call get_outer_context_teams() → populate discovered_teams",
|
|
1784
|
+
"",
|
|
1785
|
+
])
|
|
1786
|
+
elif has_outer_agents:
|
|
1787
|
+
instructions.extend([
|
|
1788
|
+
"## STEP 1: Get pre-fetched agents (INSTANT)",
|
|
1789
|
+
" Call get_outer_context_agents() → populate discovered_agents",
|
|
1790
|
+
" discovered_teams = []",
|
|
1791
|
+
"",
|
|
1792
|
+
])
|
|
1793
|
+
elif has_outer_teams:
|
|
1794
|
+
instructions.extend([
|
|
1795
|
+
"## STEP 1: Get pre-fetched teams (INSTANT)",
|
|
1796
|
+
" Call get_outer_context_teams() → populate discovered_teams",
|
|
1797
|
+
" discovered_agents = []",
|
|
1798
|
+
"",
|
|
1799
|
+
])
|
|
1800
|
+
else:
|
|
1801
|
+
instructions.extend([
|
|
1802
|
+
"## STEP 1: Fetch agents AND teams from API",
|
|
1803
|
+
" Call list_agents() → populate discovered_agents",
|
|
1804
|
+
" Call list_teams() → populate discovered_teams",
|
|
1805
|
+
"",
|
|
1806
|
+
])
|
|
1807
|
+
|
|
1808
|
+
instructions.extend([
|
|
1809
|
+
"## STEP 2: Pick FIRST available (NO comparison)",
|
|
1810
|
+
" If teams found: Pick FIRST team → entity_type='team'",
|
|
1811
|
+
" Else if agents found: Pick FIRST agent → entity_type='agent'",
|
|
1812
|
+
" Set: entity_id=<UUID>, entity_name=<name>",
|
|
1813
|
+
"",
|
|
1814
|
+
])
|
|
1815
|
+
|
|
1816
|
+
if has_outer_envs:
|
|
1817
|
+
instructions.extend([
|
|
1818
|
+
"## STEP 3: Get pre-fetched environments (INSTANT)",
|
|
1819
|
+
" Call get_outer_context_environments()",
|
|
1820
|
+
" Parse JSON → populate discovered_environments",
|
|
1821
|
+
" Pick FIRST environment → environment_id=<UUID>, environment_name=<name>",
|
|
1822
|
+
"",
|
|
1823
|
+
])
|
|
1824
|
+
else:
|
|
1825
|
+
instructions.extend([
|
|
1826
|
+
"## STEP 3: No environments",
|
|
1827
|
+
" Set: environment_id=None, environment_name=None, discovered_environments=[]",
|
|
1828
|
+
"",
|
|
1829
|
+
])
|
|
1830
|
+
|
|
1831
|
+
instructions.extend([
|
|
1832
|
+
"## STEP 4: Set worker queues to None (CLI creates ephemeral queue)",
|
|
1833
|
+
" Set: worker_queue_id=None, worker_queue_name=None, discovered_worker_queues=[]",
|
|
1834
|
+
"",
|
|
1835
|
+
"## STEP 5: Return output",
|
|
1836
|
+
" Set reasoning to: 'Fast local execution'",
|
|
1837
|
+
"",
|
|
1838
|
+
"🚨 SPEED RULES:",
|
|
1839
|
+
" - Call list_agents() + list_teams() in PARALLEL (both at once)",
|
|
1840
|
+
" - Pick FIRST available (no analysis, no comparison)",
|
|
1841
|
+
" - Keep reasoning SHORT: 1-3 words only",
|
|
1842
|
+
" - Target: < 10 seconds total",
|
|
1843
|
+
])
|
|
1844
|
+
|
|
1845
|
+
return Agent(
|
|
1846
|
+
name="Fast Selector",
|
|
1847
|
+
role="Quick agent and environment selection for local testing",
|
|
1848
|
+
model=model,
|
|
1849
|
+
output_schema=FastSelectionOutput,
|
|
1850
|
+
tools=toolkit_tools,
|
|
1851
|
+
instructions=instructions,
|
|
1852
|
+
markdown=False
|
|
1853
|
+
)
|
|
1854
|
+
|
|
1855
|
+
|
|
1856
|
+
def create_fast_planning_workflow(
|
|
1857
|
+
db: Session,
|
|
1858
|
+
organization_id: str,
|
|
1859
|
+
api_token: str,
|
|
1860
|
+
outer_context: Optional[Dict[str, Any]] = None
|
|
1861
|
+
) -> Workflow:
|
|
1862
|
+
"""
|
|
1863
|
+
Create a FAST 1-step planning workflow for --local mode.
|
|
1864
|
+
|
|
1865
|
+
This workflow:
|
|
1866
|
+
- Skips task analysis (no complexity assessment)
|
|
1867
|
+
- Skips cost estimation (no detailed calculations)
|
|
1868
|
+
- Skips detailed plan generation (no task breakdown)
|
|
1869
|
+
- Just selects: agent/team + environment + worker queue
|
|
1870
|
+
|
|
1871
|
+
Returns minimal FastSelectionOutput that gets converted to TaskPlanResponse.
|
|
1872
|
+
|
|
1873
|
+
Args:
|
|
1874
|
+
db: Database session for internal service access
|
|
1875
|
+
organization_id: Organization ID for resource filtering (REQUIRED)
|
|
1876
|
+
api_token: Org-scoped API token for context graph access (REQUIRED)
|
|
1877
|
+
outer_context: Optional pre-filtered context from CLI (agents, teams, etc.)
|
|
1878
|
+
|
|
1879
|
+
Returns:
|
|
1880
|
+
Configured Workflow with single fast selection agent
|
|
1881
|
+
"""
|
|
1882
|
+
if not organization_id:
|
|
1883
|
+
raise ValueError("organization_id is required for fast planning workflow")
|
|
1884
|
+
|
|
1885
|
+
if not api_token:
|
|
1886
|
+
raise ValueError("api_token is required for fast planning workflow")
|
|
1887
|
+
|
|
1888
|
+
# Get LiteLLM configuration
|
|
1889
|
+
litellm_api_url = (
|
|
1890
|
+
os.getenv("LITELLM_API_URL") or
|
|
1891
|
+
os.getenv("LITELLM_API_BASE") or
|
|
1892
|
+
"https://llm-proxy.kubiya.ai"
|
|
1893
|
+
).strip()
|
|
1894
|
+
|
|
1895
|
+
litellm_api_key = os.getenv("LITELLM_API_KEY", "").strip()
|
|
1896
|
+
|
|
1897
|
+
if not litellm_api_key:
|
|
1898
|
+
raise ValueError("LITELLM_API_KEY environment variable not set")
|
|
1899
|
+
|
|
1900
|
+
# Use fast model for local execution
|
|
1901
|
+
# Override with env var if needed: LITELLM_FAST_MODEL=kubiya/claude-sonnet-4
|
|
1902
|
+
model_id = os.getenv("LITELLM_FAST_MODEL", "kubiya/claude-sonnet-4").strip()
|
|
1903
|
+
|
|
1904
|
+
# Create model instance with reasonable timeout for Sonnet
|
|
1905
|
+
model = LiteLLM(
|
|
1906
|
+
id=f"openai/{model_id}",
|
|
1907
|
+
api_base=litellm_api_url,
|
|
1908
|
+
api_key=litellm_api_key,
|
|
1909
|
+
request_params={"timeout": 120} # 2 minutes for Sonnet (would be 60s for Haiku)
|
|
1910
|
+
)
|
|
1911
|
+
|
|
1912
|
+
# Log outer context if provided
|
|
1913
|
+
if outer_context:
|
|
1914
|
+
logger.info(
|
|
1915
|
+
"fast_workflow_outer_context",
|
|
1916
|
+
agents_count=len(outer_context.get("agents", [])),
|
|
1917
|
+
teams_count=len(outer_context.get("teams", [])),
|
|
1918
|
+
environments_count=len(outer_context.get("environments", [])),
|
|
1919
|
+
worker_queues_count=len(outer_context.get("worker_queues", [])),
|
|
1920
|
+
organization_id=organization_id[:8]
|
|
1921
|
+
)
|
|
1922
|
+
|
|
1923
|
+
logger.info(
|
|
1924
|
+
"creating_fast_planning_workflow",
|
|
1925
|
+
model=model_id,
|
|
1926
|
+
has_outer_context=bool(outer_context),
|
|
1927
|
+
organization_id=organization_id[:8]
|
|
1928
|
+
)
|
|
1929
|
+
|
|
1930
|
+
# Create single fast selection agent
|
|
1931
|
+
fast_agent = create_fast_selection_agent(
|
|
1932
|
+
model, db, organization_id, api_token, outer_context
|
|
1933
|
+
)
|
|
1934
|
+
|
|
1935
|
+
# Workflow with just 1 step!
|
|
1936
|
+
workflow = Workflow(
|
|
1937
|
+
name="Fast Planning Workflow",
|
|
1938
|
+
steps=[fast_agent], # Just ONE step
|
|
1939
|
+
description="Fast agent/team selection for local execution (1-step, no analysis/cost-estimation)"
|
|
1940
|
+
)
|
|
1941
|
+
|
|
1942
|
+
logger.info("fast_planning_workflow_created", steps=1)
|
|
1943
|
+
|
|
1944
|
+
return workflow
|
|
1945
|
+
|
|
1946
|
+
|
|
1947
|
+
def convert_fast_output_to_plan(
|
|
1948
|
+
fast_output: FastSelectionOutput,
|
|
1949
|
+
request: TaskPlanRequest
|
|
1950
|
+
) -> TaskPlanResponse:
|
|
1951
|
+
"""
|
|
1952
|
+
Convert minimal FastSelectionOutput to full TaskPlanResponse.
|
|
1953
|
+
Fills in required fields with sensible defaults for --local mode.
|
|
1954
|
+
|
|
1955
|
+
Args:
|
|
1956
|
+
fast_output: Minimal selection from fast workflow
|
|
1957
|
+
request: Original planning request for context
|
|
1958
|
+
|
|
1959
|
+
Returns:
|
|
1960
|
+
Complete TaskPlanResponse with defaults for fields not in fast output
|
|
1961
|
+
"""
|
|
1962
|
+
from control_plane_api.app.models.task_planning import (
|
|
1963
|
+
ComplexityInfo,
|
|
1964
|
+
TeamBreakdownItem,
|
|
1965
|
+
RecommendedExecution,
|
|
1966
|
+
CostEstimate,
|
|
1967
|
+
RealizedSavings,
|
|
1968
|
+
HumanResourceCost
|
|
1969
|
+
)
|
|
1970
|
+
|
|
1971
|
+
logger.info(
|
|
1972
|
+
"converting_fast_output_to_plan",
|
|
1973
|
+
entity_type=fast_output.recommended_entity_type,
|
|
1974
|
+
entity_id=fast_output.recommended_entity_id[:12],
|
|
1975
|
+
entity_name=fast_output.recommended_entity_name
|
|
1976
|
+
)
|
|
1977
|
+
|
|
1978
|
+
return TaskPlanResponse(
|
|
1979
|
+
title=f"Execute: {request.description[:50]}",
|
|
1980
|
+
summary=request.description,
|
|
1981
|
+
|
|
1982
|
+
# Minimal complexity (don't analyze in fast mode)
|
|
1983
|
+
complexity=ComplexityInfo(
|
|
1984
|
+
story_points=3, # Default medium
|
|
1985
|
+
confidence="medium",
|
|
1986
|
+
reasoning="Fast selection for local execution - no detailed complexity analysis"
|
|
1987
|
+
),
|
|
1988
|
+
|
|
1989
|
+
# Single team breakdown (minimal)
|
|
1990
|
+
team_breakdown=[TeamBreakdownItem(
|
|
1991
|
+
team_id=fast_output.recommended_entity_id if fast_output.recommended_entity_type == "team" else None,
|
|
1992
|
+
team_name=fast_output.recommended_entity_name if fast_output.recommended_entity_type == "team" else "",
|
|
1993
|
+
agent_id=fast_output.recommended_entity_id if fast_output.recommended_entity_type == "agent" else None,
|
|
1994
|
+
agent_name=fast_output.recommended_entity_name if fast_output.recommended_entity_type == "agent" else None,
|
|
1995
|
+
responsibilities=[request.description],
|
|
1996
|
+
estimated_time_hours=1.0,
|
|
1997
|
+
agent_cost=0.10,
|
|
1998
|
+
tasks=[] # No detailed tasks for fast mode
|
|
1999
|
+
)],
|
|
2000
|
+
|
|
2001
|
+
# Recommended execution (THE KEY PART!)
|
|
2002
|
+
recommended_execution=RecommendedExecution(
|
|
2003
|
+
entity_type=fast_output.recommended_entity_type,
|
|
2004
|
+
entity_id=fast_output.recommended_entity_id,
|
|
2005
|
+
entity_name=fast_output.recommended_entity_name,
|
|
2006
|
+
reasoning=fast_output.reasoning,
|
|
2007
|
+
recommended_environment_id=fast_output.recommended_environment_id,
|
|
2008
|
+
recommended_environment_name=fast_output.recommended_environment_name,
|
|
2009
|
+
recommended_worker_queue_id=fast_output.recommended_worker_queue_id,
|
|
2010
|
+
recommended_worker_queue_name=fast_output.recommended_worker_queue_name,
|
|
2011
|
+
execution_reasoning="Fast selection for local execution"
|
|
2012
|
+
),
|
|
2013
|
+
|
|
2014
|
+
# Minimal cost (don't calculate in fast mode)
|
|
2015
|
+
cost_estimate=CostEstimate(
|
|
2016
|
+
estimated_cost_usd=0.10,
|
|
2017
|
+
breakdown=[]
|
|
2018
|
+
),
|
|
2019
|
+
|
|
2020
|
+
# Minimal savings (don't calculate in fast mode)
|
|
2021
|
+
realized_savings=RealizedSavings(
|
|
2022
|
+
without_kubiya_cost=10.0,
|
|
2023
|
+
without_kubiya_hours=1.0,
|
|
2024
|
+
without_kubiya_resources=[
|
|
2025
|
+
HumanResourceCost(
|
|
2026
|
+
role="DevOps Engineer",
|
|
2027
|
+
hourly_rate=100.0,
|
|
2028
|
+
estimated_hours=1.0,
|
|
2029
|
+
total_cost=100.0
|
|
2030
|
+
)
|
|
2031
|
+
],
|
|
2032
|
+
with_kubiya_cost=0.10,
|
|
2033
|
+
with_kubiya_hours=0.1,
|
|
2034
|
+
money_saved=9.90,
|
|
2035
|
+
time_saved_hours=0.9,
|
|
2036
|
+
time_saved_percentage=90,
|
|
2037
|
+
savings_summary="Fast local execution - estimated 90% time savings"
|
|
2038
|
+
),
|
|
2039
|
+
|
|
2040
|
+
risks=[],
|
|
2041
|
+
prerequisites=[],
|
|
2042
|
+
success_criteria=["Task execution completes successfully"],
|
|
2043
|
+
has_questions=False
|
|
2044
|
+
)
|
|
2045
|
+
|
|
2046
|
+
|
|
2047
|
+
# ============================================================================
|
|
2048
|
+
# Workflow Runner with Streaming
|
|
2049
|
+
# ============================================================================
|
|
2050
|
+
|
|
2051
|
+
# Step descriptions for real-time progress updates (2-STEP WORKFLOW)
|
|
2052
|
+
STEP_DESCRIPTIONS = {
|
|
2053
|
+
1: "🔍 Discovering available agents and teams in your organization and selecting the best match for your task",
|
|
2054
|
+
2: "📋 Creating detailed execution plan with cost estimates, risks, and success criteria (will run on your local session)"
|
|
2055
|
+
}
|
|
2056
|
+
|
|
2057
|
+
# Stage names for CLI/UI display (compatible with UI's hardcoded stages)
|
|
2058
|
+
# UI expects: 'initializing', 'discovering', 'analyzing', 'generating', 'calculating', 'finalizing'
|
|
2059
|
+
STEP_STAGE_NAMES = {
|
|
2060
|
+
1: "analyzing", # Step 1: Analysis & Resource Selection
|
|
2061
|
+
2: "generating" # Step 2: Plan Generation
|
|
2062
|
+
}
|
|
2063
|
+
|
|
2064
|
+
# Progress milestones for each step (percentage) (2-STEP WORKFLOW)
|
|
2065
|
+
STEP_PROGRESS_MAP = {
|
|
2066
|
+
1: 50, # Analysis & Resource Selection (combines old Steps 1+2)
|
|
2067
|
+
2: 95 # Full Plan Generation with Costs (combines old Steps 3+4)
|
|
2068
|
+
}
|
|
2069
|
+
|
|
2070
|
+
|
|
2071
|
+
def create_tool_wrapper(tool, publish_event, step_number):
|
|
2072
|
+
"""
|
|
2073
|
+
Wrap a tool to emit events before/after execution.
|
|
2074
|
+
|
|
2075
|
+
Args:
|
|
2076
|
+
tool: The Agno tool to wrap
|
|
2077
|
+
publish_event: Callback to emit streaming events
|
|
2078
|
+
step_number: Current workflow step number (1-4)
|
|
2079
|
+
|
|
2080
|
+
Returns:
|
|
2081
|
+
Wrapped tool that emits events
|
|
2082
|
+
"""
|
|
2083
|
+
import uuid
|
|
2084
|
+
from datetime import datetime
|
|
2085
|
+
import copy
|
|
2086
|
+
|
|
2087
|
+
# Get the original function from the tool
|
|
2088
|
+
# Agno tools have 'entrypoint' attribute, not 'function'
|
|
2089
|
+
if hasattr(tool, 'entrypoint'):
|
|
2090
|
+
original_func = tool.entrypoint
|
|
2091
|
+
elif hasattr(tool, 'function'):
|
|
2092
|
+
original_func = tool.function
|
|
2093
|
+
elif callable(tool):
|
|
2094
|
+
original_func = tool
|
|
2095
|
+
else:
|
|
2096
|
+
logger.warning("tool_wrapper_skip", tool=str(tool), reason="not_callable")
|
|
2097
|
+
return tool
|
|
2098
|
+
|
|
2099
|
+
def wrapped_function(*args, **kwargs):
|
|
2100
|
+
tool_id = str(uuid.uuid4())
|
|
2101
|
+
tool_name = getattr(tool, 'name', getattr(original_func, '__name__', 'unknown_tool'))
|
|
2102
|
+
tool_description = getattr(tool, 'description', None)
|
|
2103
|
+
|
|
2104
|
+
# DEBUG: Log that wrapped function is being called
|
|
2105
|
+
logger.info("WRAPPED_FUNCTION_CALLED", tool_name=tool_name, step=step_number, args_count=len(args), kwargs_count=len(kwargs))
|
|
2106
|
+
|
|
2107
|
+
# Handle Agno's quirk: sometimes it passes both 'args' and 'kwargs' as keyword arguments
|
|
2108
|
+
# e.g., function(args=['kubernetes'], kwargs={'limit': 10})
|
|
2109
|
+
if 'args' in kwargs:
|
|
2110
|
+
args_from_kwargs = kwargs.pop('args')
|
|
2111
|
+
if isinstance(args_from_kwargs, list) and not args:
|
|
2112
|
+
args = tuple(args_from_kwargs)
|
|
2113
|
+
logger.info("converted_args_kwarg", tool_name=tool_name, args=args)
|
|
2114
|
+
|
|
2115
|
+
if 'kwargs' in kwargs:
|
|
2116
|
+
kwargs_from_kwargs = kwargs.pop('kwargs')
|
|
2117
|
+
if isinstance(kwargs_from_kwargs, dict):
|
|
2118
|
+
# Merge the nested kwargs with the outer kwargs
|
|
2119
|
+
kwargs.update(kwargs_from_kwargs)
|
|
2120
|
+
logger.info("converted_kwargs_kwarg", tool_name=tool_name, kwargs=kwargs)
|
|
2121
|
+
|
|
2122
|
+
# Emit tool_call event
|
|
2123
|
+
try:
|
|
2124
|
+
# Combine args and kwargs for the event
|
|
2125
|
+
event_args = {
|
|
2126
|
+
'args': list(args) if args else [],
|
|
2127
|
+
**kwargs
|
|
2128
|
+
}
|
|
2129
|
+
publish_event({
|
|
2130
|
+
"event": "tool_call",
|
|
2131
|
+
"data": {
|
|
2132
|
+
"tool_id": tool_id,
|
|
2133
|
+
"tool_name": tool_name,
|
|
2134
|
+
"tool_description": tool_description,
|
|
2135
|
+
"arguments": event_args,
|
|
2136
|
+
"step": step_number,
|
|
2137
|
+
"timestamp": datetime.now().isoformat()
|
|
2138
|
+
}
|
|
2139
|
+
})
|
|
2140
|
+
logger.info("tool_call_event_published", tool_name=tool_name, tool_id=tool_id)
|
|
2141
|
+
except Exception as e:
|
|
2142
|
+
logger.warning("failed_to_emit_tool_call", error=str(e), exc_info=True)
|
|
2143
|
+
|
|
2144
|
+
start_time = time.time()
|
|
2145
|
+
|
|
2146
|
+
try:
|
|
2147
|
+
# Execute actual tool
|
|
2148
|
+
result = original_func(*args, **kwargs)
|
|
2149
|
+
duration = time.time() - start_time
|
|
2150
|
+
|
|
2151
|
+
# Emit tool_result event with success
|
|
2152
|
+
try:
|
|
2153
|
+
# Truncate large results to avoid overwhelming the stream
|
|
2154
|
+
result_str = str(result)[:1000] if result else ""
|
|
2155
|
+
|
|
2156
|
+
publish_event({
|
|
2157
|
+
"event": "tool_result",
|
|
2158
|
+
"data": {
|
|
2159
|
+
"tool_id": tool_id,
|
|
2160
|
+
"tool_name": tool_name,
|
|
2161
|
+
"status": "success",
|
|
2162
|
+
"result": result_str,
|
|
2163
|
+
"duration": duration,
|
|
2164
|
+
"step": step_number,
|
|
2165
|
+
"timestamp": datetime.now().isoformat()
|
|
2166
|
+
}
|
|
2167
|
+
})
|
|
2168
|
+
logger.info("tool_result_event_published", tool_name=tool_name, tool_id=tool_id, status="success")
|
|
2169
|
+
except Exception as e:
|
|
2170
|
+
logger.warning("failed_to_emit_tool_result", error=str(e), exc_info=True)
|
|
2171
|
+
|
|
2172
|
+
return result
|
|
2173
|
+
|
|
2174
|
+
except Exception as e:
|
|
2175
|
+
duration = time.time() - start_time
|
|
2176
|
+
|
|
2177
|
+
# Emit tool_result event with error
|
|
2178
|
+
try:
|
|
2179
|
+
publish_event({
|
|
2180
|
+
"event": "tool_result",
|
|
2181
|
+
"data": {
|
|
2182
|
+
"tool_id": tool_id,
|
|
2183
|
+
"tool_name": tool_name,
|
|
2184
|
+
"status": "failed",
|
|
2185
|
+
"error": str(e),
|
|
2186
|
+
"duration": duration,
|
|
2187
|
+
"step": step_number,
|
|
2188
|
+
"timestamp": datetime.now().isoformat()
|
|
2189
|
+
}
|
|
2190
|
+
})
|
|
2191
|
+
except Exception as emit_error:
|
|
2192
|
+
logger.warning("failed_to_emit_tool_error", error=str(emit_error))
|
|
2193
|
+
|
|
2194
|
+
# Re-raise the original exception
|
|
2195
|
+
raise
|
|
2196
|
+
|
|
2197
|
+
# Create a copy of the tool with the wrapped function
|
|
2198
|
+
try:
|
|
2199
|
+
if hasattr(tool, '__dict__'):
|
|
2200
|
+
wrapped_tool = copy.copy(tool)
|
|
2201
|
+
# Agno tools use 'entrypoint', not 'function'
|
|
2202
|
+
if hasattr(tool, 'entrypoint'):
|
|
2203
|
+
wrapped_tool.entrypoint = wrapped_function
|
|
2204
|
+
elif hasattr(tool, 'function'):
|
|
2205
|
+
wrapped_tool.function = wrapped_function
|
|
2206
|
+
else:
|
|
2207
|
+
# Fallback: just return wrapped function
|
|
2208
|
+
return wrapped_function
|
|
2209
|
+
return wrapped_tool
|
|
2210
|
+
else:
|
|
2211
|
+
# If we can't copy it, just return a callable wrapper
|
|
2212
|
+
return wrapped_function
|
|
2213
|
+
except Exception as e:
|
|
2214
|
+
logger.warning("tool_copy_failed", error=str(e), tool=str(tool))
|
|
2215
|
+
return tool
|
|
2216
|
+
|
|
2217
|
+
|
|
2218
|
+
def extract_json_from_mixed_content(content: str, logger) -> dict:
|
|
2219
|
+
"""
|
|
2220
|
+
Extract JSON object from mixed text content using multiple strategies.
|
|
2221
|
+
|
|
2222
|
+
This handles various LLM output patterns:
|
|
2223
|
+
1. Pure JSON (ideal case)
|
|
2224
|
+
2. Markdown code blocks (```json ... ```)
|
|
2225
|
+
3. Text with inline JSON (preamble + JSON)
|
|
2226
|
+
4. Text with JSON after common prefixes
|
|
2227
|
+
5. Last complete JSON object in text
|
|
2228
|
+
|
|
2229
|
+
Args:
|
|
2230
|
+
content: Raw string content from LLM
|
|
2231
|
+
logger: Structlog logger for debugging
|
|
2232
|
+
|
|
2233
|
+
Returns:
|
|
2234
|
+
Parsed JSON as Python dict
|
|
2235
|
+
|
|
2236
|
+
Raises:
|
|
2237
|
+
ValueError: If no valid JSON found after all strategies
|
|
2238
|
+
"""
|
|
2239
|
+
import json
|
|
2240
|
+
import re
|
|
2241
|
+
|
|
2242
|
+
cleaned = content.strip()
|
|
2243
|
+
|
|
2244
|
+
# Strategy 1: Try direct parse (best case - pure JSON)
|
|
2245
|
+
try:
|
|
2246
|
+
return json.loads(cleaned)
|
|
2247
|
+
except json.JSONDecodeError:
|
|
2248
|
+
logger.debug("strategy_1_failed", strategy="direct_parse")
|
|
2249
|
+
|
|
2250
|
+
# Strategy 2: Extract from markdown code block
|
|
2251
|
+
if '```' in cleaned:
|
|
2252
|
+
# Match: ```json\n{...}\n``` or ```{...}```
|
|
2253
|
+
json_match = re.search(r'```(?:json)?\s*(\{.*?\})\s*```', cleaned, re.DOTALL)
|
|
2254
|
+
if json_match:
|
|
2255
|
+
try:
|
|
2256
|
+
return json.loads(json_match.group(1))
|
|
2257
|
+
except json.JSONDecodeError:
|
|
2258
|
+
logger.debug("strategy_2_failed", strategy="markdown_block")
|
|
2259
|
+
|
|
2260
|
+
# Strategy 3: Find first { to last } (captures JSON in mixed text)
|
|
2261
|
+
# This handles: "Here's my analysis: {JSON_HERE} and that's it"
|
|
2262
|
+
first_brace = cleaned.find('{')
|
|
2263
|
+
last_brace = cleaned.rfind('}')
|
|
2264
|
+
if first_brace != -1 and last_brace != -1 and first_brace < last_brace:
|
|
2265
|
+
json_candidate = cleaned[first_brace:last_brace + 1]
|
|
2266
|
+
try:
|
|
2267
|
+
return json.loads(json_candidate)
|
|
2268
|
+
except json.JSONDecodeError:
|
|
2269
|
+
logger.debug("strategy_3_failed", strategy="first_to_last_brace")
|
|
2270
|
+
|
|
2271
|
+
# Strategy 4: Look for JSON after common prefixes
|
|
2272
|
+
# Handles: "Output:\n{JSON}" or "Result: {JSON}"
|
|
2273
|
+
common_prefixes = [
|
|
2274
|
+
r'(?:output|result|response|analysis|data):\s*(\{.*\})',
|
|
2275
|
+
r'(?:here is|here\'s).*?:\s*(\{.*\})',
|
|
2276
|
+
]
|
|
2277
|
+
for pattern in common_prefixes:
|
|
2278
|
+
match = re.search(pattern, cleaned, re.IGNORECASE | re.DOTALL)
|
|
2279
|
+
if match:
|
|
2280
|
+
try:
|
|
2281
|
+
return json.loads(match.group(1))
|
|
2282
|
+
except json.JSONDecodeError:
|
|
2283
|
+
continue
|
|
2284
|
+
|
|
2285
|
+
# Strategy 5: Try each complete JSON object (handles multiple JSONs)
|
|
2286
|
+
# Find all {...} patterns and try parsing each
|
|
2287
|
+
for match in re.finditer(r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}', cleaned, re.DOTALL):
|
|
2288
|
+
try:
|
|
2289
|
+
return json.loads(match.group(0))
|
|
2290
|
+
except json.JSONDecodeError:
|
|
2291
|
+
continue
|
|
2292
|
+
|
|
2293
|
+
# All strategies failed
|
|
2294
|
+
raise ValueError(
|
|
2295
|
+
f"Could not extract valid JSON from content after 5 extraction strategies. "
|
|
2296
|
+
f"Content preview: {cleaned[:300]}"
|
|
2297
|
+
)
|
|
2298
|
+
|
|
2299
|
+
|
|
2300
|
+
def validate_and_fix_step1_output(
|
|
2301
|
+
output: 'AnalysisAndSelectionOutput',
|
|
2302
|
+
planning_toolkit: Optional['PlanningToolkit'] = None
|
|
2303
|
+
) -> 'AnalysisAndSelectionOutput':
|
|
2304
|
+
"""
|
|
2305
|
+
Validate and auto-correct Step 1 output to ensure it's always valid.
|
|
2306
|
+
|
|
2307
|
+
If issues are found (None values, invalid UUIDs), attempt to fix them
|
|
2308
|
+
automatically using available tools.
|
|
2309
|
+
|
|
2310
|
+
Args:
|
|
2311
|
+
output: Step 1 output to validate
|
|
2312
|
+
planning_toolkit: Optional toolkit for fetching fallback agent
|
|
2313
|
+
|
|
2314
|
+
Returns:
|
|
2315
|
+
Validated and corrected output
|
|
2316
|
+
|
|
2317
|
+
Raises:
|
|
2318
|
+
ValueError: If output cannot be corrected
|
|
2319
|
+
"""
|
|
2320
|
+
import re
|
|
2321
|
+
|
|
2322
|
+
logger.info("validating_step1_output",
|
|
2323
|
+
entity_id=output.selected_entity_id,
|
|
2324
|
+
entity_name=output.selected_entity_name)
|
|
2325
|
+
|
|
2326
|
+
# Check 1: selected_entity_id is not None or "None" string
|
|
2327
|
+
if not output.selected_entity_id or output.selected_entity_id in ["None", "null", "undefined"]:
|
|
2328
|
+
logger.warning("step1_selected_none_entity",
|
|
2329
|
+
message="Step 1 returned None/null for entity_id, attempting auto-correction")
|
|
2330
|
+
|
|
2331
|
+
# Try to auto-correct using fallback tool
|
|
2332
|
+
if planning_toolkit:
|
|
2333
|
+
try:
|
|
2334
|
+
import json
|
|
2335
|
+
fallback_result = planning_toolkit.get_fallback_agent()
|
|
2336
|
+
fallback_data = json.loads(fallback_result)
|
|
2337
|
+
|
|
2338
|
+
if fallback_data.get("success") and fallback_data.get("data", {}).get("agent"):
|
|
2339
|
+
fallback_agent = fallback_data["data"]["agent"]
|
|
2340
|
+
output.selected_entity_id = fallback_agent["id"]
|
|
2341
|
+
output.selected_entity_name = fallback_agent["name"]
|
|
2342
|
+
output.selected_entity_type = "agent"
|
|
2343
|
+
output.selection_reasoning += " (Auto-corrected: Used fallback agent due to None value)"
|
|
2344
|
+
|
|
2345
|
+
logger.info("step1_auto_corrected",
|
|
2346
|
+
new_entity_id=output.selected_entity_id,
|
|
2347
|
+
message="Successfully auto-corrected using fallback")
|
|
2348
|
+
else:
|
|
2349
|
+
raise ValueError("Fallback tool did not return valid agent")
|
|
2350
|
+
|
|
2351
|
+
except Exception as e:
|
|
2352
|
+
logger.error("step1_auto_correction_failed", error=str(e))
|
|
2353
|
+
raise ValueError(
|
|
2354
|
+
f"Step 1 returned None for selected_entity_id and auto-correction failed: {str(e)}"
|
|
2355
|
+
)
|
|
2356
|
+
else:
|
|
2357
|
+
raise ValueError("Step 1 returned None for selected_entity_id and no planning_toolkit available for auto-correction")
|
|
2358
|
+
|
|
2359
|
+
# Check 2: Entity ID is valid UUID format
|
|
2360
|
+
uuid_pattern = r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
|
|
2361
|
+
if not re.match(uuid_pattern, output.selected_entity_id, re.IGNORECASE):
|
|
2362
|
+
logger.error("step1_invalid_uuid",
|
|
2363
|
+
entity_id=output.selected_entity_id,
|
|
2364
|
+
message="Invalid UUID format detected - this is likely hallucination")
|
|
2365
|
+
raise ValueError(
|
|
2366
|
+
f"Invalid entity_id format: '{output.selected_entity_id}'. "
|
|
2367
|
+
f"Must be valid UUID (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx). "
|
|
2368
|
+
f"This looks like a hallucinated ID - the agent must use ACTUAL IDs from tool results."
|
|
2369
|
+
)
|
|
2370
|
+
|
|
2371
|
+
# Check 3: Cost and time are reasonable (auto-correct if unreasonable)
|
|
2372
|
+
if output.estimated_cost_usd <= 0:
|
|
2373
|
+
logger.warning("step1_invalid_cost",
|
|
2374
|
+
original_cost=output.estimated_cost_usd,
|
|
2375
|
+
message="Cost was <= 0, setting to minimum $0.05")
|
|
2376
|
+
output.estimated_cost_usd = 0.05 # Minimum realistic cost
|
|
2377
|
+
|
|
2378
|
+
if output.estimated_time_hours <= 0:
|
|
2379
|
+
logger.warning("step1_invalid_time",
|
|
2380
|
+
original_time=output.estimated_time_hours,
|
|
2381
|
+
message="Time was <= 0, setting to minimum 0.25 hours")
|
|
2382
|
+
output.estimated_time_hours = 0.25 # Minimum realistic time (15 minutes)
|
|
2383
|
+
|
|
2384
|
+
# Check 4: Story points are in valid range
|
|
2385
|
+
if output.story_points_estimate < 1:
|
|
2386
|
+
logger.warning("step1_invalid_story_points_low",
|
|
2387
|
+
original_points=output.story_points_estimate)
|
|
2388
|
+
output.story_points_estimate = 1
|
|
2389
|
+
|
|
2390
|
+
if output.story_points_estimate > 21:
|
|
2391
|
+
logger.warning("step1_invalid_story_points_high",
|
|
2392
|
+
original_points=output.story_points_estimate)
|
|
2393
|
+
output.story_points_estimate = 21
|
|
2394
|
+
|
|
2395
|
+
# Check 5: Environment is selected (CRITICAL for local execution)
|
|
2396
|
+
if not output.selected_environment_id or output.selected_environment_id in ["None", "null", "undefined"]:
|
|
2397
|
+
logger.warning("step1_missing_environment",
|
|
2398
|
+
message="Step 1 did not select an environment, attempting auto-correction")
|
|
2399
|
+
|
|
2400
|
+
# Try to get first available environment
|
|
2401
|
+
if planning_toolkit:
|
|
2402
|
+
try:
|
|
2403
|
+
import json
|
|
2404
|
+
# Try to get environments from planning service
|
|
2405
|
+
environments = planning_toolkit.planning_service.list_environments(limit=20)
|
|
2406
|
+
|
|
2407
|
+
if environments and len(environments) > 0:
|
|
2408
|
+
first_env = environments[0]
|
|
2409
|
+
output.selected_environment_id = first_env.get("id")
|
|
2410
|
+
output.selected_environment_name = first_env.get("name", "Default Environment")
|
|
2411
|
+
output.reasoning += " (Auto-corrected: Selected first available environment)"
|
|
2412
|
+
|
|
2413
|
+
logger.info("step1_environment_auto_corrected",
|
|
2414
|
+
environment_id=output.selected_environment_id,
|
|
2415
|
+
environment_name=output.selected_environment_name,
|
|
2416
|
+
message="Successfully auto-corrected environment selection")
|
|
2417
|
+
else:
|
|
2418
|
+
logger.warning("step1_no_environments_available",
|
|
2419
|
+
message="No environments available in organization")
|
|
2420
|
+
# Don't fail - let execution fail later with better error message
|
|
2421
|
+
except Exception as e:
|
|
2422
|
+
logger.warning("step1_environment_auto_correction_failed", error=str(e))
|
|
2423
|
+
# Don't fail here - some execution modes might not need environment
|
|
2424
|
+
else:
|
|
2425
|
+
logger.warning("step1_no_toolkit_for_environment_correction")
|
|
2426
|
+
|
|
2427
|
+
logger.info("step1_validation_passed",
|
|
2428
|
+
entity_id=output.selected_entity_id,
|
|
2429
|
+
entity_name=output.selected_entity_name,
|
|
2430
|
+
environment_id=output.selected_environment_id,
|
|
2431
|
+
cost=output.estimated_cost_usd,
|
|
2432
|
+
time=output.estimated_time_hours)
|
|
2433
|
+
|
|
2434
|
+
return output
|
|
2435
|
+
|
|
2436
|
+
|
|
2437
|
+
def execute_step_with_tool_tracking(
|
|
2438
|
+
step: Agent,
|
|
2439
|
+
input_data: str,
|
|
2440
|
+
publish_event: callable,
|
|
2441
|
+
step_number: int,
|
|
2442
|
+
max_retries: int = 1
|
|
2443
|
+
) -> Any:
|
|
2444
|
+
"""
|
|
2445
|
+
Execute a workflow step with validation and retry logic.
|
|
2446
|
+
|
|
2447
|
+
Wraps the step's tools to emit events before/after execution.
|
|
2448
|
+
Validates outputs and retries with error feedback if validation fails.
|
|
2449
|
+
|
|
2450
|
+
Args:
|
|
2451
|
+
step: The Agno Agent (workflow step) to execute
|
|
2452
|
+
input_data: Input string for the agent
|
|
2453
|
+
publish_event: Callback to emit streaming events
|
|
2454
|
+
step_number: Current workflow step number (1-4)
|
|
2455
|
+
max_retries: Maximum number of attempts (default: 1, no retries)
|
|
2456
|
+
|
|
2457
|
+
Returns:
|
|
2458
|
+
The step's output (content from agent response)
|
|
2459
|
+
|
|
2460
|
+
Raises:
|
|
2461
|
+
ValueError: If validation fails after all retries
|
|
2462
|
+
"""
|
|
2463
|
+
from pydantic import ValidationError
|
|
2464
|
+
|
|
2465
|
+
original_tools = None
|
|
2466
|
+
|
|
2467
|
+
# Retry loop for validation failures
|
|
2468
|
+
for attempt in range(max_retries):
|
|
2469
|
+
try:
|
|
2470
|
+
# Wrap each tool in the step
|
|
2471
|
+
if hasattr(step, 'tools') and step.tools and original_tools is None:
|
|
2472
|
+
original_tools = step.tools
|
|
2473
|
+
wrapped_tools = []
|
|
2474
|
+
|
|
2475
|
+
logger.info("wrapping_step_tools", step=step_number, tool_count=len(original_tools), attempt=attempt + 1)
|
|
2476
|
+
|
|
2477
|
+
for tool in original_tools:
|
|
2478
|
+
tool_name = getattr(tool, 'name', str(tool)[:50])
|
|
2479
|
+
logger.info("wrapping_tool", step=step_number, tool_name=tool_name)
|
|
2480
|
+
wrapped_tool = create_tool_wrapper(tool, publish_event, step_number)
|
|
2481
|
+
wrapped_tools.append(wrapped_tool)
|
|
2482
|
+
|
|
2483
|
+
# Temporarily replace tools
|
|
2484
|
+
step.tools = wrapped_tools
|
|
2485
|
+
logger.info("tools_replaced", step=step_number, original_count=len(original_tools), wrapped_count=len(wrapped_tools))
|
|
2486
|
+
|
|
2487
|
+
# Execute the step (LiteLLM timeout at 240s provides protection)
|
|
2488
|
+
logger.info("executing_step", step=step_number, step_name=step.name, attempt=attempt + 1, max_retries=max_retries)
|
|
2489
|
+
|
|
2490
|
+
# DEBUG: Log tool info before execution
|
|
2491
|
+
if hasattr(step, 'tools') and step.tools:
|
|
2492
|
+
tool_names = [getattr(t, 'name', str(t)[:30]) for t in step.tools[:5]]
|
|
2493
|
+
logger.info("step_tools_before_run", step=step_number, tool_count=len(step.tools), first_5_tools=tool_names)
|
|
2494
|
+
|
|
2495
|
+
# Execute step directly
|
|
2496
|
+
result = step.run(input_data)
|
|
2497
|
+
|
|
2498
|
+
logger.info("step_execution_completed", step=step_number, attempt=attempt + 1)
|
|
2499
|
+
|
|
2500
|
+
# Extract reasoning from agent messages if available (skip in quick mode for speed)
|
|
2501
|
+
# Quick mode flag is passed from the workflow context
|
|
2502
|
+
skip_reasoning = getattr(step, '_quick_mode', False)
|
|
2503
|
+
if not skip_reasoning:
|
|
2504
|
+
try:
|
|
2505
|
+
from datetime import datetime
|
|
2506
|
+
if hasattr(result, 'messages') and result.messages:
|
|
2507
|
+
for message in result.messages:
|
|
2508
|
+
# Check for assistant messages with content (reasoning/thinking)
|
|
2509
|
+
if hasattr(message, 'role') and message.role == 'assistant':
|
|
2510
|
+
if hasattr(message, 'content') and message.content:
|
|
2511
|
+
# Extract text content (reasoning before tool calls)
|
|
2512
|
+
reasoning_text = message.content if isinstance(message.content, str) else str(message.content)
|
|
2513
|
+
if reasoning_text and len(reasoning_text) > 20: # Filter out very short messages
|
|
2514
|
+
publish_event({
|
|
2515
|
+
"event": "thinking",
|
|
2516
|
+
"data": {
|
|
2517
|
+
"content": reasoning_text,
|
|
2518
|
+
"step": step_number,
|
|
2519
|
+
"step_name": step.name,
|
|
2520
|
+
"timestamp": datetime.now().isoformat()
|
|
2521
|
+
}
|
|
2522
|
+
})
|
|
2523
|
+
logger.info("reasoning_event_published", step=step_number, reasoning_length=len(reasoning_text))
|
|
2524
|
+
except Exception as e:
|
|
2525
|
+
logger.warning("failed_to_extract_reasoning", error=str(e), exc_info=False)
|
|
2526
|
+
|
|
2527
|
+
# Extract content from result
|
|
2528
|
+
content = result.content if hasattr(result, 'content') else result
|
|
2529
|
+
|
|
2530
|
+
# CRITICAL VALIDATION: Check if Agno's parsing failed (returned None)
|
|
2531
|
+
if content is None:
|
|
2532
|
+
raise ValueError(
|
|
2533
|
+
f"Step {step_number} ({step.name}) returned None. "
|
|
2534
|
+
f"This indicates validation failed during Agno's parsing. "
|
|
2535
|
+
f"The LLM output did not match the expected schema."
|
|
2536
|
+
)
|
|
2537
|
+
|
|
2538
|
+
# UNIVERSAL FIX: If output is a string, try to parse it as JSON
|
|
2539
|
+
# This handles cases where Agno fails to parse LLM output
|
|
2540
|
+
if isinstance(content, str) and hasattr(step, 'output_schema'):
|
|
2541
|
+
logger.warning("step_output_is_string", step=step_number, message="Attempting manual JSON parsing")
|
|
2542
|
+
original_content = content # Save original for logging
|
|
2543
|
+
try:
|
|
2544
|
+
# Use multi-strategy JSON extraction
|
|
2545
|
+
content_dict = extract_json_from_mixed_content(content, logger)
|
|
2546
|
+
|
|
2547
|
+
# Validate with Pydantic model
|
|
2548
|
+
content = step.output_schema.model_validate(content_dict)
|
|
2549
|
+
logger.info("manual_json_parsing_succeeded", step=step_number)
|
|
2550
|
+
|
|
2551
|
+
# Log if we had to extract from mixed content (indicates LLM didn't follow instructions)
|
|
2552
|
+
if not original_content.strip().startswith('{'):
|
|
2553
|
+
logger.warning(
|
|
2554
|
+
"llm_added_preamble_text",
|
|
2555
|
+
step=step_number,
|
|
2556
|
+
message="LLM added text before JSON despite output_schema constraint"
|
|
2557
|
+
)
|
|
2558
|
+
except ValueError as extract_error:
|
|
2559
|
+
# JSON extraction failed
|
|
2560
|
+
raise ValueError(
|
|
2561
|
+
f"Step {step_number} output validation failed - could not extract valid JSON. "
|
|
2562
|
+
f"Expected {step.output_schema.__name__}. "
|
|
2563
|
+
f"Extraction error: {str(extract_error)}. "
|
|
2564
|
+
f"Content preview: {str(content)[:500]}"
|
|
2565
|
+
)
|
|
2566
|
+
except Exception as parse_error:
|
|
2567
|
+
# Pydantic validation failed
|
|
2568
|
+
raise ValueError(
|
|
2569
|
+
f"Step {step_number} output validation failed - JSON extracted but schema validation failed. "
|
|
2570
|
+
f"Expected {step.output_schema.__name__}. "
|
|
2571
|
+
f"Validation error: {str(parse_error)}. "
|
|
2572
|
+
f"Content preview: {str(content)[:500]}"
|
|
2573
|
+
)
|
|
2574
|
+
|
|
2575
|
+
# CRITICAL VALIDATION: For Step 2, explicitly validate entity IDs
|
|
2576
|
+
if step_number == 2 and hasattr(step, 'output_schema'):
|
|
2577
|
+
# Validate type after universal string parsing above
|
|
2578
|
+
if not isinstance(content, step.output_schema):
|
|
2579
|
+
raise ValueError(
|
|
2580
|
+
f"Step {step_number} output validation failed. "
|
|
2581
|
+
f"Expected {step.output_schema.__name__}, got {type(content).__name__}. "
|
|
2582
|
+
f"Content: {str(content)[:200]}"
|
|
2583
|
+
)
|
|
2584
|
+
|
|
2585
|
+
# Additional explicit validation for ResourceDiscoveryOutput
|
|
2586
|
+
if isinstance(content, ResourceDiscoveryOutput):
|
|
2587
|
+
# Manually re-validate to catch any issues Agno suppressed
|
|
2588
|
+
_validate_resource_discovery(content)
|
|
2589
|
+
|
|
2590
|
+
logger.info("step_validation_passed", step=step_number, attempt=attempt + 1)
|
|
2591
|
+
return content # Success!
|
|
2592
|
+
|
|
2593
|
+
except (ValueError, ValidationError) as e:
|
|
2594
|
+
logger.warning(
|
|
2595
|
+
"step_validation_failed",
|
|
2596
|
+
step=step_number,
|
|
2597
|
+
attempt=attempt + 1,
|
|
2598
|
+
max_retries=max_retries,
|
|
2599
|
+
error=str(e),
|
|
2600
|
+
exc_info=True
|
|
2601
|
+
)
|
|
2602
|
+
|
|
2603
|
+
# Emit validation error event for monitoring
|
|
2604
|
+
try:
|
|
2605
|
+
publish_event({
|
|
2606
|
+
"event": "validation_error",
|
|
2607
|
+
"data": {
|
|
2608
|
+
"step": step_number,
|
|
2609
|
+
"attempt": attempt + 1,
|
|
2610
|
+
"error": str(e),
|
|
2611
|
+
"retrying": attempt < max_retries - 1
|
|
2612
|
+
}
|
|
2613
|
+
})
|
|
2614
|
+
except Exception as emit_error:
|
|
2615
|
+
logger.warning("failed_to_emit_validation_error", error=str(emit_error))
|
|
2616
|
+
|
|
2617
|
+
if attempt < max_retries - 1:
|
|
2618
|
+
# Build retry input with explicit error feedback to the LLM
|
|
2619
|
+
retry_input = f"""
|
|
2620
|
+
🚨 VALIDATION ERROR - Your previous output was REJECTED 🚨
|
|
2621
|
+
|
|
2622
|
+
Error Details:
|
|
2623
|
+
{str(e)}
|
|
2624
|
+
|
|
2625
|
+
CRITICAL: You MUST output ONLY valid JSON, with NO explanatory text before or after.
|
|
2626
|
+
|
|
2627
|
+
Common mistakes to avoid:
|
|
2628
|
+
❌ Adding reasoning before JSON: "Let me analyze this... {{json}}"
|
|
2629
|
+
❌ Using markdown code blocks: ```json {{...}} ```
|
|
2630
|
+
❌ Adding text after JSON: "{{json}} and that's my analysis"
|
|
2631
|
+
|
|
2632
|
+
✅ CORRECT FORMAT: Start your response directly with {{ and end with }}
|
|
2633
|
+
|
|
2634
|
+
Additional Requirements:
|
|
2635
|
+
1. Using ONLY IDs from actual tool call results (do not invent or guess IDs)
|
|
2636
|
+
2. NOT hallucinating any entity IDs or names
|
|
2637
|
+
3. Copying the exact ID strings from tool outputs
|
|
2638
|
+
4. Double-checking that your recommended_entity_id exists in discovered_agents/discovered_teams
|
|
2639
|
+
|
|
2640
|
+
Original Task:
|
|
2641
|
+
{input_data}
|
|
2642
|
+
|
|
2643
|
+
OUTPUT FORMAT REMINDER: Your response must be PURE JSON starting with {{ and ending with }}
|
|
2644
|
+
|
|
2645
|
+
Try again with the corrections above. This is attempt {attempt + 2} of {max_retries}.
|
|
2646
|
+
"""
|
|
2647
|
+
input_data = retry_input
|
|
2648
|
+
logger.info("retrying_step_with_feedback", step=step_number, attempt=attempt + 2)
|
|
2649
|
+
continue # Retry with error feedback
|
|
2650
|
+
else:
|
|
2651
|
+
# Final attempt failed - raise with full context
|
|
2652
|
+
raise ValueError(
|
|
2653
|
+
f"Step {step_number} ({step.name}) failed validation after {max_retries} attempts. "
|
|
2654
|
+
f"Final error: {str(e)}"
|
|
2655
|
+
)
|
|
2656
|
+
|
|
2657
|
+
except Exception as e:
|
|
2658
|
+
# Non-validation error (e.g., execution error)
|
|
2659
|
+
logger.error("step_execution_failed", step=step_number, error=str(e), exc_info=True)
|
|
2660
|
+
raise
|
|
2661
|
+
|
|
2662
|
+
finally:
|
|
2663
|
+
# Restore original tools if we wrapped them
|
|
2664
|
+
if original_tools is not None and hasattr(step, 'tools'):
|
|
2665
|
+
step.tools = original_tools
|
|
2666
|
+
|
|
2667
|
+
# Should never reach here
|
|
2668
|
+
raise ValueError(f"Step {step_number} execution logic error - exhausted all retries")
|
|
2669
|
+
|
|
2670
|
+
|
|
2671
|
+
def run_planning_workflow_stream(
|
|
2672
|
+
workflow: Workflow,
|
|
2673
|
+
task_request: TaskPlanRequest,
|
|
2674
|
+
publish_event: callable,
|
|
2675
|
+
quick_mode: bool = False
|
|
2676
|
+
) -> TaskPlanResponse:
|
|
2677
|
+
"""
|
|
2678
|
+
Run the planning workflow with real-time step-by-step streaming and tool event tracking.
|
|
2679
|
+
|
|
2680
|
+
This implementation manually executes each workflow step, intercepting tool calls
|
|
2681
|
+
to provide detailed real-time progress updates including:
|
|
2682
|
+
- Step start/complete events with structured outputs
|
|
2683
|
+
- Tool execution events (call + result) with timing
|
|
2684
|
+
- Actual progress based on step completion
|
|
2685
|
+
|
|
2686
|
+
Args:
|
|
2687
|
+
workflow: The planning workflow instance
|
|
2688
|
+
task_request: Task plan request
|
|
2689
|
+
publish_event: Callback to emit streaming events
|
|
2690
|
+
|
|
2691
|
+
Returns:
|
|
2692
|
+
TaskPlanResponse from the final workflow step
|
|
2693
|
+
"""
|
|
2694
|
+
try:
|
|
2695
|
+
# Build workflow input
|
|
2696
|
+
workflow_input = f"""
|
|
2697
|
+
Task: {task_request.description}
|
|
2698
|
+
Priority: {task_request.priority}
|
|
2699
|
+
Context: {task_request.conversation_context or 'New task'}
|
|
2700
|
+
|
|
2701
|
+
Analyze this task systematically through the workflow steps.
|
|
2702
|
+
"""
|
|
2703
|
+
|
|
2704
|
+
logger.info("workflow_runner_starting", input_length=len(workflow_input), steps=len(workflow.steps))
|
|
2705
|
+
|
|
2706
|
+
# Emit initial progress with informative message
|
|
2707
|
+
publish_event({
|
|
2708
|
+
"event": "progress",
|
|
2709
|
+
"data": {
|
|
2710
|
+
"stage": "initializing",
|
|
2711
|
+
"message": "🚀 Initializing AI Task Planner - analyzing your request and preparing to discover available resources...",
|
|
2712
|
+
"progress": 10
|
|
2713
|
+
}
|
|
2714
|
+
})
|
|
2715
|
+
|
|
2716
|
+
# Store outputs from each step
|
|
2717
|
+
step_outputs = {}
|
|
2718
|
+
current_input = workflow_input
|
|
2719
|
+
|
|
2720
|
+
# Manually execute each step with tool tracking
|
|
2721
|
+
for i, step in enumerate(workflow.steps, 1):
|
|
2722
|
+
# Track step execution time for monitoring
|
|
2723
|
+
step_start_time = time.time()
|
|
2724
|
+
|
|
2725
|
+
# Mark step with quick_mode flag if in quick mode (for skipping verbose reasoning)
|
|
2726
|
+
if quick_mode:
|
|
2727
|
+
step._quick_mode = True
|
|
2728
|
+
|
|
2729
|
+
logger.info("starting_workflow_step", step=i, step_name=step.name, quick_mode=quick_mode)
|
|
2730
|
+
|
|
2731
|
+
# Emit step_started event
|
|
2732
|
+
step_progress = STEP_PROGRESS_MAP.get(i, 10 + (i * 20))
|
|
2733
|
+
publish_event({
|
|
2734
|
+
"event": "step_started",
|
|
2735
|
+
"data": {
|
|
2736
|
+
"step": i,
|
|
2737
|
+
"step_name": step.name,
|
|
2738
|
+
"step_description": STEP_DESCRIPTIONS.get(i, f"Executing {step.name}"),
|
|
2739
|
+
"progress": step_progress
|
|
2740
|
+
}
|
|
2741
|
+
})
|
|
2742
|
+
|
|
2743
|
+
# Also emit explicit progress event for better UX (with friendly stage names)
|
|
2744
|
+
publish_event({
|
|
2745
|
+
"event": "progress",
|
|
2746
|
+
"data": {
|
|
2747
|
+
"stage": STEP_STAGE_NAMES.get(i, f"step_{i}"), # Use friendly names like "analyzing", "planning"
|
|
2748
|
+
"message": STEP_DESCRIPTIONS.get(i, f"Executing {step.name}"),
|
|
2749
|
+
"progress": step_progress
|
|
2750
|
+
}
|
|
2751
|
+
})
|
|
2752
|
+
|
|
2753
|
+
# Execute step with tool tracking AND validation (2-STEP WORKFLOW)
|
|
2754
|
+
# Step 1 (Analysis & Resource Selection) gets 2 retries for validation failures
|
|
2755
|
+
# Step 2 (Plan Generation) gets 1 attempt since it just generates plan from Step 1 output
|
|
2756
|
+
step_result = execute_step_with_tool_tracking(
|
|
2757
|
+
step=step,
|
|
2758
|
+
input_data=current_input,
|
|
2759
|
+
publish_event=publish_event,
|
|
2760
|
+
step_number=i,
|
|
2761
|
+
max_retries=2 if i == 1 else 1 # Step 1 (Analysis & Selection) gets 2 retries, Step 2 gets 1
|
|
2762
|
+
)
|
|
2763
|
+
|
|
2764
|
+
# PHASE 1 IMPROVEMENT: Validate and auto-correct Step 1 output
|
|
2765
|
+
if i == 1:
|
|
2766
|
+
from control_plane_api.app.models.task_planning import AnalysisAndSelectionOutput
|
|
2767
|
+
if isinstance(step_result, AnalysisAndSelectionOutput):
|
|
2768
|
+
try:
|
|
2769
|
+
# Get planning toolkit from workflow context if available
|
|
2770
|
+
planning_toolkit = None
|
|
2771
|
+
if hasattr(workflow, '_planning_toolkit'):
|
|
2772
|
+
planning_toolkit = workflow._planning_toolkit
|
|
2773
|
+
|
|
2774
|
+
# Validate and auto-correct if needed
|
|
2775
|
+
step_result = validate_and_fix_step1_output(
|
|
2776
|
+
output=step_result,
|
|
2777
|
+
planning_toolkit=planning_toolkit
|
|
2778
|
+
)
|
|
2779
|
+
logger.info("step1_validated_successfully",
|
|
2780
|
+
entity_id=step_result.selected_entity_id)
|
|
2781
|
+
|
|
2782
|
+
except Exception as validation_error:
|
|
2783
|
+
logger.error("step1_validation_failed",
|
|
2784
|
+
error=str(validation_error),
|
|
2785
|
+
exc_info=True)
|
|
2786
|
+
# Re-raise to trigger retry or fail the workflow
|
|
2787
|
+
raise ValueError(f"Step 1 validation failed: {str(validation_error)}")
|
|
2788
|
+
|
|
2789
|
+
# Store the output
|
|
2790
|
+
step_outputs[f"step_{i}"] = step_result
|
|
2791
|
+
|
|
2792
|
+
# Emit step_completed event with structured output
|
|
2793
|
+
try:
|
|
2794
|
+
# Try to convert output to dict for JSON serialization
|
|
2795
|
+
if hasattr(step_result, 'model_dump'):
|
|
2796
|
+
output_dict = step_result.model_dump()
|
|
2797
|
+
elif hasattr(step_result, 'dict'):
|
|
2798
|
+
output_dict = step_result.dict()
|
|
2799
|
+
elif isinstance(step_result, dict):
|
|
2800
|
+
output_dict = step_result
|
|
2801
|
+
else:
|
|
2802
|
+
output_dict = {"output": str(step_result)}
|
|
2803
|
+
|
|
2804
|
+
# Calculate step execution time
|
|
2805
|
+
step_duration = time.time() - step_start_time
|
|
2806
|
+
|
|
2807
|
+
# Build informative completion message
|
|
2808
|
+
completion_message = None
|
|
2809
|
+
if i == 1 and hasattr(step_result, 'selected_entity_name'):
|
|
2810
|
+
# Step 1: Mention what was discovered and selected
|
|
2811
|
+
entity_type = getattr(step_result, 'selected_entity_type', 'entity')
|
|
2812
|
+
entity_name = getattr(step_result, 'selected_entity_name', 'Unknown')
|
|
2813
|
+
completion_message = f"✅ Selected {entity_type}: {entity_name} for task execution"
|
|
2814
|
+
elif i == 2:
|
|
2815
|
+
# Step 2: Mention plan is ready for local execution
|
|
2816
|
+
completion_message = "✅ Execution plan ready - will run on your local session compute"
|
|
2817
|
+
|
|
2818
|
+
event_data = {
|
|
2819
|
+
"step": i,
|
|
2820
|
+
"step_name": step.name,
|
|
2821
|
+
"output": output_dict,
|
|
2822
|
+
"progress": STEP_PROGRESS_MAP.get(i, 10 + (i * 20)),
|
|
2823
|
+
"duration_seconds": round(step_duration, 2)
|
|
2824
|
+
}
|
|
2825
|
+
|
|
2826
|
+
if completion_message:
|
|
2827
|
+
event_data["message"] = completion_message
|
|
2828
|
+
|
|
2829
|
+
publish_event({
|
|
2830
|
+
"event": "step_completed",
|
|
2831
|
+
"data": event_data
|
|
2832
|
+
})
|
|
2833
|
+
|
|
2834
|
+
logger.info(
|
|
2835
|
+
"workflow_step_completed",
|
|
2836
|
+
step=i,
|
|
2837
|
+
step_name=step.name,
|
|
2838
|
+
duration_seconds=round(step_duration, 2)
|
|
2839
|
+
)
|
|
2840
|
+
|
|
2841
|
+
except Exception as e:
|
|
2842
|
+
logger.warning("failed_to_emit_step_completed", step=i, error=str(e))
|
|
2843
|
+
|
|
2844
|
+
# Build input for next step (combine previous context with new output)
|
|
2845
|
+
if i < len(workflow.steps):
|
|
2846
|
+
# Pass the output to the next step
|
|
2847
|
+
if hasattr(step_result, 'model_dump_json'):
|
|
2848
|
+
step_output_str = step_result.model_dump_json(indent=2)
|
|
2849
|
+
elif isinstance(step_result, dict):
|
|
2850
|
+
step_output_str = json.dumps(step_result, indent=2)
|
|
2851
|
+
else:
|
|
2852
|
+
step_output_str = str(step_result)
|
|
2853
|
+
|
|
2854
|
+
current_input = f"""
|
|
2855
|
+
Original Task:
|
|
2856
|
+
{workflow_input}
|
|
2857
|
+
|
|
2858
|
+
Previous Step Output ({step.name}):
|
|
2859
|
+
{step_output_str}
|
|
2860
|
+
|
|
2861
|
+
Continue to the next step of the workflow.
|
|
2862
|
+
"""
|
|
2863
|
+
|
|
2864
|
+
# Extract final result from LAST step (could be step 1 for fast workflow or step 4 for full workflow)
|
|
2865
|
+
num_steps = len(workflow.steps)
|
|
2866
|
+
last_step_key = f"step_{num_steps}"
|
|
2867
|
+
final_result = step_outputs.get(last_step_key)
|
|
2868
|
+
|
|
2869
|
+
if not final_result:
|
|
2870
|
+
logger.error("no_final_result", step_outputs_keys=list(step_outputs.keys()), expected_key=last_step_key)
|
|
2871
|
+
raise ValueError(f"Workflow completed but {last_step_key} returned no result")
|
|
2872
|
+
|
|
2873
|
+
# Handle FastSelectionOutput (from 1-step fast workflow)
|
|
2874
|
+
if isinstance(final_result, FastSelectionOutput):
|
|
2875
|
+
# Convert FastSelectionOutput to TaskPlanResponse
|
|
2876
|
+
plan = convert_fast_output_to_plan(final_result, task_request)
|
|
2877
|
+
logger.info("converted_fast_output_to_plan", entity_type=final_result.recommended_entity_type)
|
|
2878
|
+
# Handle TaskPlanResponse (from 4-step full workflow)
|
|
2879
|
+
elif isinstance(final_result, TaskPlanResponse):
|
|
2880
|
+
plan = final_result
|
|
2881
|
+
elif isinstance(final_result, dict):
|
|
2882
|
+
# Try to detect if this is FastSelectionOutput dict
|
|
2883
|
+
if 'recommended_entity_type' in final_result and 'reasoning' in final_result and 'title' not in final_result:
|
|
2884
|
+
fast_output = FastSelectionOutput(**final_result)
|
|
2885
|
+
plan = convert_fast_output_to_plan(fast_output, task_request)
|
|
2886
|
+
logger.info("converted_fast_output_dict_to_plan")
|
|
2887
|
+
else:
|
|
2888
|
+
plan = TaskPlanResponse(**final_result)
|
|
2889
|
+
else:
|
|
2890
|
+
raise ValueError(f"Last step returned unexpected type: {type(final_result)}")
|
|
2891
|
+
|
|
2892
|
+
# Success
|
|
2893
|
+
logger.info("workflow_completed_successfully", title=plan.title, steps_executed=len(step_outputs))
|
|
2894
|
+
publish_event({
|
|
2895
|
+
"event": "progress",
|
|
2896
|
+
"data": {
|
|
2897
|
+
"stage": "completed",
|
|
2898
|
+
"message": f"✅ Execution plan '{plan.title}' generated successfully! Ready to run on your local session compute.",
|
|
2899
|
+
"progress": 100
|
|
2900
|
+
}
|
|
2901
|
+
})
|
|
2902
|
+
|
|
2903
|
+
return plan
|
|
2904
|
+
|
|
2905
|
+
except Exception as e:
|
|
2906
|
+
logger.error("workflow_failed", error=str(e), exc_info=True)
|
|
2907
|
+
publish_event({
|
|
2908
|
+
"event": "error",
|
|
2909
|
+
"data": {"message": f"Workflow failed: {str(e)}"}
|
|
2910
|
+
})
|
|
2911
|
+
raise
|