kubiya-control-plane-api 0.1.0__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kubiya-control-plane-api might be problematic. Click here for more details.
- control_plane_api/README.md +266 -0
- control_plane_api/__init__.py +0 -0
- control_plane_api/__version__.py +1 -0
- control_plane_api/alembic/README +1 -0
- control_plane_api/alembic/env.py +98 -0
- control_plane_api/alembic/script.py.mako +28 -0
- control_plane_api/alembic/versions/1382bec74309_initial_migration_with_all_models.py +251 -0
- control_plane_api/alembic/versions/1f54bc2a37e3_add_analytics_tables.py +162 -0
- control_plane_api/alembic/versions/2e4cb136dc10_rename_toolset_ids_to_skill_ids_in_teams.py +30 -0
- control_plane_api/alembic/versions/31cd69a644ce_add_skill_templates_table.py +28 -0
- control_plane_api/alembic/versions/89e127caa47d_add_jobs_and_job_executions_tables.py +161 -0
- control_plane_api/alembic/versions/add_llm_models_table.py +51 -0
- control_plane_api/alembic/versions/b0e10697f212_add_runtime_column_to_teams_simple.py +42 -0
- control_plane_api/alembic/versions/ce43b24b63bf_add_execution_trigger_source_and_fix_.py +155 -0
- control_plane_api/alembic/versions/d4eaf16e3f8d_rename_toolsets_to_skills.py +84 -0
- control_plane_api/alembic/versions/efa2dc427da1_rename_metadata_to_custom_metadata.py +32 -0
- control_plane_api/alembic/versions/f973b431d1ce_add_workflow_executor_to_skill_types.py +44 -0
- control_plane_api/alembic.ini +148 -0
- control_plane_api/api/index.py +12 -0
- control_plane_api/app/__init__.py +11 -0
- control_plane_api/app/activities/__init__.py +20 -0
- control_plane_api/app/activities/agent_activities.py +379 -0
- control_plane_api/app/activities/team_activities.py +410 -0
- control_plane_api/app/activities/temporal_cloud_activities.py +577 -0
- control_plane_api/app/config/__init__.py +35 -0
- control_plane_api/app/config/api_config.py +354 -0
- control_plane_api/app/config/model_pricing.py +318 -0
- control_plane_api/app/config.py +95 -0
- control_plane_api/app/database.py +135 -0
- control_plane_api/app/exceptions.py +408 -0
- control_plane_api/app/lib/__init__.py +11 -0
- control_plane_api/app/lib/job_executor.py +312 -0
- control_plane_api/app/lib/kubiya_client.py +235 -0
- control_plane_api/app/lib/litellm_pricing.py +166 -0
- control_plane_api/app/lib/planning_tools/__init__.py +22 -0
- control_plane_api/app/lib/planning_tools/agents.py +155 -0
- control_plane_api/app/lib/planning_tools/base.py +189 -0
- control_plane_api/app/lib/planning_tools/environments.py +214 -0
- control_plane_api/app/lib/planning_tools/resources.py +240 -0
- control_plane_api/app/lib/planning_tools/teams.py +198 -0
- control_plane_api/app/lib/policy_enforcer_client.py +939 -0
- control_plane_api/app/lib/redis_client.py +436 -0
- control_plane_api/app/lib/supabase.py +71 -0
- control_plane_api/app/lib/temporal_client.py +138 -0
- control_plane_api/app/lib/validation/__init__.py +20 -0
- control_plane_api/app/lib/validation/runtime_validation.py +287 -0
- control_plane_api/app/main.py +128 -0
- control_plane_api/app/middleware/__init__.py +8 -0
- control_plane_api/app/middleware/auth.py +513 -0
- control_plane_api/app/middleware/exception_handler.py +267 -0
- control_plane_api/app/middleware/rate_limiting.py +384 -0
- control_plane_api/app/middleware/request_id.py +202 -0
- control_plane_api/app/models/__init__.py +27 -0
- control_plane_api/app/models/agent.py +79 -0
- control_plane_api/app/models/analytics.py +206 -0
- control_plane_api/app/models/associations.py +81 -0
- control_plane_api/app/models/environment.py +63 -0
- control_plane_api/app/models/execution.py +93 -0
- control_plane_api/app/models/job.py +179 -0
- control_plane_api/app/models/llm_model.py +75 -0
- control_plane_api/app/models/presence.py +49 -0
- control_plane_api/app/models/project.py +47 -0
- control_plane_api/app/models/session.py +38 -0
- control_plane_api/app/models/team.py +66 -0
- control_plane_api/app/models/workflow.py +55 -0
- control_plane_api/app/policies/README.md +121 -0
- control_plane_api/app/policies/approved_users.rego +62 -0
- control_plane_api/app/policies/business_hours.rego +51 -0
- control_plane_api/app/policies/rate_limiting.rego +100 -0
- control_plane_api/app/policies/tool_restrictions.rego +86 -0
- control_plane_api/app/routers/__init__.py +4 -0
- control_plane_api/app/routers/agents.py +364 -0
- control_plane_api/app/routers/agents_v2.py +1260 -0
- control_plane_api/app/routers/analytics.py +1014 -0
- control_plane_api/app/routers/context_manager.py +562 -0
- control_plane_api/app/routers/environment_context.py +270 -0
- control_plane_api/app/routers/environments.py +715 -0
- control_plane_api/app/routers/execution_environment.py +517 -0
- control_plane_api/app/routers/executions.py +1911 -0
- control_plane_api/app/routers/health.py +92 -0
- control_plane_api/app/routers/health_v2.py +326 -0
- control_plane_api/app/routers/integrations.py +274 -0
- control_plane_api/app/routers/jobs.py +1344 -0
- control_plane_api/app/routers/models.py +82 -0
- control_plane_api/app/routers/models_v2.py +361 -0
- control_plane_api/app/routers/policies.py +639 -0
- control_plane_api/app/routers/presence.py +234 -0
- control_plane_api/app/routers/projects.py +902 -0
- control_plane_api/app/routers/runners.py +379 -0
- control_plane_api/app/routers/runtimes.py +172 -0
- control_plane_api/app/routers/secrets.py +155 -0
- control_plane_api/app/routers/skills.py +1001 -0
- control_plane_api/app/routers/skills_definitions.py +140 -0
- control_plane_api/app/routers/task_planning.py +1256 -0
- control_plane_api/app/routers/task_queues.py +654 -0
- control_plane_api/app/routers/team_context.py +270 -0
- control_plane_api/app/routers/teams.py +1400 -0
- control_plane_api/app/routers/worker_queues.py +1545 -0
- control_plane_api/app/routers/workers.py +935 -0
- control_plane_api/app/routers/workflows.py +204 -0
- control_plane_api/app/runtimes/__init__.py +6 -0
- control_plane_api/app/runtimes/validation.py +344 -0
- control_plane_api/app/schemas/job_schemas.py +295 -0
- control_plane_api/app/services/__init__.py +1 -0
- control_plane_api/app/services/agno_service.py +619 -0
- control_plane_api/app/services/litellm_service.py +190 -0
- control_plane_api/app/services/policy_service.py +525 -0
- control_plane_api/app/services/temporal_cloud_provisioning.py +150 -0
- control_plane_api/app/skills/__init__.py +44 -0
- control_plane_api/app/skills/base.py +229 -0
- control_plane_api/app/skills/business_intelligence.py +189 -0
- control_plane_api/app/skills/data_visualization.py +154 -0
- control_plane_api/app/skills/docker.py +104 -0
- control_plane_api/app/skills/file_generation.py +94 -0
- control_plane_api/app/skills/file_system.py +110 -0
- control_plane_api/app/skills/python.py +92 -0
- control_plane_api/app/skills/registry.py +65 -0
- control_plane_api/app/skills/shell.py +102 -0
- control_plane_api/app/skills/workflow_executor.py +469 -0
- control_plane_api/app/utils/workflow_executor.py +354 -0
- control_plane_api/app/workflows/__init__.py +11 -0
- control_plane_api/app/workflows/agent_execution.py +507 -0
- control_plane_api/app/workflows/agent_execution_with_skills.py +222 -0
- control_plane_api/app/workflows/namespace_provisioning.py +326 -0
- control_plane_api/app/workflows/team_execution.py +399 -0
- control_plane_api/scripts/seed_models.py +239 -0
- control_plane_api/worker/__init__.py +0 -0
- control_plane_api/worker/activities/__init__.py +0 -0
- control_plane_api/worker/activities/agent_activities.py +1241 -0
- control_plane_api/worker/activities/approval_activities.py +234 -0
- control_plane_api/worker/activities/runtime_activities.py +388 -0
- control_plane_api/worker/activities/skill_activities.py +267 -0
- control_plane_api/worker/activities/team_activities.py +1217 -0
- control_plane_api/worker/config/__init__.py +31 -0
- control_plane_api/worker/config/worker_config.py +275 -0
- control_plane_api/worker/control_plane_client.py +529 -0
- control_plane_api/worker/examples/analytics_integration_example.py +362 -0
- control_plane_api/worker/models/__init__.py +1 -0
- control_plane_api/worker/models/inputs.py +89 -0
- control_plane_api/worker/runtimes/__init__.py +31 -0
- control_plane_api/worker/runtimes/base.py +789 -0
- control_plane_api/worker/runtimes/claude_code_runtime.py +1443 -0
- control_plane_api/worker/runtimes/default_runtime.py +617 -0
- control_plane_api/worker/runtimes/factory.py +173 -0
- control_plane_api/worker/runtimes/validation.py +93 -0
- control_plane_api/worker/services/__init__.py +1 -0
- control_plane_api/worker/services/agent_executor.py +422 -0
- control_plane_api/worker/services/agent_executor_v2.py +383 -0
- control_plane_api/worker/services/analytics_collector.py +457 -0
- control_plane_api/worker/services/analytics_service.py +464 -0
- control_plane_api/worker/services/approval_tools.py +310 -0
- control_plane_api/worker/services/approval_tools_agno.py +207 -0
- control_plane_api/worker/services/cancellation_manager.py +177 -0
- control_plane_api/worker/services/data_visualization.py +827 -0
- control_plane_api/worker/services/jira_tools.py +257 -0
- control_plane_api/worker/services/runtime_analytics.py +328 -0
- control_plane_api/worker/services/session_service.py +194 -0
- control_plane_api/worker/services/skill_factory.py +175 -0
- control_plane_api/worker/services/team_executor.py +574 -0
- control_plane_api/worker/services/team_executor_v2.py +465 -0
- control_plane_api/worker/services/workflow_executor_tools.py +1418 -0
- control_plane_api/worker/tests/__init__.py +1 -0
- control_plane_api/worker/tests/e2e/__init__.py +0 -0
- control_plane_api/worker/tests/e2e/test_execution_flow.py +571 -0
- control_plane_api/worker/tests/integration/__init__.py +0 -0
- control_plane_api/worker/tests/integration/test_control_plane_integration.py +308 -0
- control_plane_api/worker/tests/unit/__init__.py +0 -0
- control_plane_api/worker/tests/unit/test_control_plane_client.py +401 -0
- control_plane_api/worker/utils/__init__.py +1 -0
- control_plane_api/worker/utils/chunk_batcher.py +305 -0
- control_plane_api/worker/utils/retry_utils.py +60 -0
- control_plane_api/worker/utils/streaming_utils.py +373 -0
- control_plane_api/worker/worker.py +753 -0
- control_plane_api/worker/workflows/__init__.py +0 -0
- control_plane_api/worker/workflows/agent_execution.py +589 -0
- control_plane_api/worker/workflows/team_execution.py +429 -0
- kubiya_control_plane_api-0.3.4.dist-info/METADATA +229 -0
- kubiya_control_plane_api-0.3.4.dist-info/RECORD +182 -0
- kubiya_control_plane_api-0.3.4.dist-info/entry_points.txt +2 -0
- kubiya_control_plane_api-0.3.4.dist-info/top_level.txt +1 -0
- kubiya_control_plane_api-0.1.0.dist-info/METADATA +0 -66
- kubiya_control_plane_api-0.1.0.dist-info/RECORD +0 -5
- kubiya_control_plane_api-0.1.0.dist-info/top_level.txt +0 -1
- {kubiya_control_plane_api-0.1.0.dist-info/licenses → control_plane_api}/LICENSE +0 -0
- {kubiya_control_plane_api-0.1.0.dist-info → kubiya_control_plane_api-0.3.4.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,399 @@
|
|
|
1
|
+
"""Team execution workflow for Temporal"""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import timedelta
|
|
5
|
+
from typing import Optional, List, Dict, Any
|
|
6
|
+
from temporalio import workflow
|
|
7
|
+
import asyncio
|
|
8
|
+
|
|
9
|
+
with workflow.unsafe.imports_passed_through():
|
|
10
|
+
from control_plane_api.app.activities.team_activities import (
|
|
11
|
+
get_team_agents,
|
|
12
|
+
execute_team_coordination,
|
|
13
|
+
update_execution_status,
|
|
14
|
+
ActivityGetTeamAgentsInput,
|
|
15
|
+
ActivityExecuteTeamInput,
|
|
16
|
+
ActivityUpdateExecutionInput,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class TeamExecutionInput:
|
|
22
|
+
"""Input for team execution workflow"""
|
|
23
|
+
execution_id: str
|
|
24
|
+
team_id: str
|
|
25
|
+
organization_id: str
|
|
26
|
+
prompt: str
|
|
27
|
+
system_prompt: Optional[str] = None
|
|
28
|
+
model_id: Optional[str] = None
|
|
29
|
+
model_config: dict = None
|
|
30
|
+
team_config: dict = None
|
|
31
|
+
mcp_servers: dict = None
|
|
32
|
+
user_metadata: dict = None
|
|
33
|
+
|
|
34
|
+
def __post_init__(self):
|
|
35
|
+
if self.model_config is None:
|
|
36
|
+
self.model_config = {}
|
|
37
|
+
if self.team_config is None:
|
|
38
|
+
self.team_config = {}
|
|
39
|
+
if self.mcp_servers is None:
|
|
40
|
+
self.mcp_servers = {}
|
|
41
|
+
if self.user_metadata is None:
|
|
42
|
+
self.user_metadata = {}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class ChatMessage:
|
|
47
|
+
"""Represents a message in the conversation"""
|
|
48
|
+
role: str # "user", "assistant", "system", "tool"
|
|
49
|
+
content: str
|
|
50
|
+
timestamp: str
|
|
51
|
+
tool_name: Optional[str] = None
|
|
52
|
+
tool_input: Optional[Dict[str, Any]] = None
|
|
53
|
+
tool_output: Optional[Dict[str, Any]] = None
|
|
54
|
+
# User attribution for messages
|
|
55
|
+
user_id: Optional[str] = None
|
|
56
|
+
user_name: Optional[str] = None
|
|
57
|
+
user_email: Optional[str] = None
|
|
58
|
+
user_avatar: Optional[str] = None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class ExecutionState:
|
|
63
|
+
"""Current state of the execution for queries"""
|
|
64
|
+
status: str # "pending", "running", "waiting_for_input", "completed", "failed"
|
|
65
|
+
messages: List[ChatMessage] = field(default_factory=list)
|
|
66
|
+
current_response: str = ""
|
|
67
|
+
error_message: Optional[str] = None
|
|
68
|
+
usage: Dict[str, Any] = field(default_factory=dict)
|
|
69
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
70
|
+
is_waiting_for_input: bool = False
|
|
71
|
+
should_complete: bool = False
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@workflow.defn
|
|
75
|
+
class TeamExecutionWorkflow:
|
|
76
|
+
"""
|
|
77
|
+
Workflow for executing a team of agents with HITL support.
|
|
78
|
+
|
|
79
|
+
This workflow:
|
|
80
|
+
1. Gets team agents
|
|
81
|
+
2. Coordinates execution across agents
|
|
82
|
+
3. Aggregates results
|
|
83
|
+
4. Updates execution status
|
|
84
|
+
5. Supports queries for real-time state access
|
|
85
|
+
6. Supports signals for adding followup messages
|
|
86
|
+
"""
|
|
87
|
+
|
|
88
|
+
def __init__(self) -> None:
|
|
89
|
+
"""Initialize workflow state"""
|
|
90
|
+
self._state = ExecutionState(status="pending")
|
|
91
|
+
self._lock = asyncio.Lock()
|
|
92
|
+
self._new_message_count = 0
|
|
93
|
+
self._processed_message_count = 0
|
|
94
|
+
|
|
95
|
+
@workflow.query
|
|
96
|
+
def get_state(self) -> ExecutionState:
|
|
97
|
+
"""Query handler: Get current execution state including messages and status"""
|
|
98
|
+
return self._state
|
|
99
|
+
|
|
100
|
+
@workflow.signal
|
|
101
|
+
async def add_message(self, message: ChatMessage) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Signal handler: Add a message to the conversation.
|
|
104
|
+
This allows clients to send followup messages while the workflow is running.
|
|
105
|
+
"""
|
|
106
|
+
async with self._lock:
|
|
107
|
+
self._state.messages.append(message)
|
|
108
|
+
self._new_message_count += 1
|
|
109
|
+
self._state.is_waiting_for_input = False
|
|
110
|
+
workflow.logger.info(
|
|
111
|
+
f"Message added to team conversation",
|
|
112
|
+
extra={
|
|
113
|
+
"role": message.role,
|
|
114
|
+
"content_preview": message.content[:100] if message.content else "",
|
|
115
|
+
"total_messages": len(self._state.messages)
|
|
116
|
+
}
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
@workflow.signal
|
|
120
|
+
async def mark_as_done(self) -> None:
|
|
121
|
+
"""
|
|
122
|
+
Signal handler: Mark the workflow as complete.
|
|
123
|
+
"""
|
|
124
|
+
async with self._lock:
|
|
125
|
+
self._state.should_complete = True
|
|
126
|
+
self._state.is_waiting_for_input = False
|
|
127
|
+
workflow.logger.info("Team workflow marked as done by user")
|
|
128
|
+
|
|
129
|
+
@workflow.run
|
|
130
|
+
async def run(self, input: TeamExecutionInput) -> dict:
|
|
131
|
+
"""
|
|
132
|
+
Run the team execution workflow with HITL pattern.
|
|
133
|
+
|
|
134
|
+
This workflow implements a continuous conversation loop:
|
|
135
|
+
1. Process the initial user message
|
|
136
|
+
2. Execute team coordination and return response
|
|
137
|
+
3. Wait for user input (signals)
|
|
138
|
+
4. Process followup messages in a loop
|
|
139
|
+
5. Only complete when user explicitly marks as done
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
input: Workflow input with team execution details
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
Team execution result dict
|
|
146
|
+
"""
|
|
147
|
+
workflow.logger.info(
|
|
148
|
+
f"Starting team execution workflow with HITL pattern",
|
|
149
|
+
extra={
|
|
150
|
+
"execution_id": input.execution_id,
|
|
151
|
+
"team_id": input.team_id,
|
|
152
|
+
"organization_id": input.organization_id,
|
|
153
|
+
}
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Initialize state with user's initial message
|
|
157
|
+
self._state.messages.append(ChatMessage(
|
|
158
|
+
role="user",
|
|
159
|
+
content=input.prompt,
|
|
160
|
+
timestamp=workflow.now().isoformat(),
|
|
161
|
+
))
|
|
162
|
+
self._state.status = "running"
|
|
163
|
+
self._new_message_count = 1
|
|
164
|
+
self._processed_message_count = 0
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
# Step 1: Update execution status to running
|
|
168
|
+
await workflow.execute_activity(
|
|
169
|
+
update_execution_status,
|
|
170
|
+
ActivityUpdateExecutionInput(
|
|
171
|
+
execution_id=input.execution_id,
|
|
172
|
+
status="running",
|
|
173
|
+
started_at=workflow.now().isoformat(),
|
|
174
|
+
execution_metadata={
|
|
175
|
+
"workflow_started": True,
|
|
176
|
+
"hitl_enabled": True,
|
|
177
|
+
},
|
|
178
|
+
),
|
|
179
|
+
start_to_close_timeout=timedelta(seconds=30),
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
# Step 2: Get team agents once at the beginning
|
|
183
|
+
workflow.logger.info(
|
|
184
|
+
f"[WORKFLOW] About to call get_team_agents",
|
|
185
|
+
extra={
|
|
186
|
+
"team_id": input.team_id,
|
|
187
|
+
"organization_id": input.organization_id,
|
|
188
|
+
}
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
team_agents = await workflow.execute_activity(
|
|
192
|
+
get_team_agents,
|
|
193
|
+
ActivityGetTeamAgentsInput(
|
|
194
|
+
team_id=input.team_id,
|
|
195
|
+
organization_id=input.organization_id,
|
|
196
|
+
),
|
|
197
|
+
start_to_close_timeout=timedelta(seconds=30),
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
workflow.logger.info(
|
|
201
|
+
f"[WORKFLOW] get_team_agents returned",
|
|
202
|
+
extra={
|
|
203
|
+
"result": team_agents,
|
|
204
|
+
"agents_count": len(team_agents.get("agents", [])) if team_agents else 0,
|
|
205
|
+
}
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
if not team_agents.get("agents"):
|
|
209
|
+
workflow.logger.error(
|
|
210
|
+
f"[WORKFLOW] NO AGENTS RETURNED!",
|
|
211
|
+
extra={
|
|
212
|
+
"team_agents": team_agents,
|
|
213
|
+
"team_id": input.team_id,
|
|
214
|
+
"organization_id": input.organization_id,
|
|
215
|
+
}
|
|
216
|
+
)
|
|
217
|
+
raise ValueError("No agents found in team")
|
|
218
|
+
|
|
219
|
+
# HITL Conversation Loop - Continue until user marks as done
|
|
220
|
+
conversation_turn = 0
|
|
221
|
+
while not self._state.should_complete:
|
|
222
|
+
conversation_turn += 1
|
|
223
|
+
workflow.logger.info(
|
|
224
|
+
f"Starting team conversation turn {conversation_turn}",
|
|
225
|
+
extra={"turn": conversation_turn, "message_count": len(self._state.messages)}
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# Get the latest user message
|
|
229
|
+
latest_message = self._state.messages[-1] if self._state.messages else None
|
|
230
|
+
latest_prompt = latest_message.content if latest_message and latest_message.role == "user" else input.prompt
|
|
231
|
+
|
|
232
|
+
# Step 3: Execute team coordination
|
|
233
|
+
team_result = await workflow.execute_activity(
|
|
234
|
+
execute_team_coordination,
|
|
235
|
+
ActivityExecuteTeamInput(
|
|
236
|
+
execution_id=input.execution_id,
|
|
237
|
+
team_id=input.team_id,
|
|
238
|
+
organization_id=input.organization_id,
|
|
239
|
+
prompt=latest_prompt,
|
|
240
|
+
system_prompt=input.system_prompt,
|
|
241
|
+
agents=team_agents["agents"],
|
|
242
|
+
team_config=input.team_config,
|
|
243
|
+
mcp_servers=input.mcp_servers, # Pass MCP servers
|
|
244
|
+
session_id=input.execution_id, # Use execution_id as session_id for 1:1 mapping
|
|
245
|
+
user_id=input.user_metadata.get("user_id") if input.user_metadata else None, # Extract user_id from JWT
|
|
246
|
+
),
|
|
247
|
+
start_to_close_timeout=timedelta(minutes=30), # Teams can take longer
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Update state with team response
|
|
251
|
+
if team_result.get("response"):
|
|
252
|
+
async with self._lock:
|
|
253
|
+
self._state.messages.append(ChatMessage(
|
|
254
|
+
role="assistant",
|
|
255
|
+
content=team_result["response"],
|
|
256
|
+
timestamp=workflow.now().isoformat(),
|
|
257
|
+
))
|
|
258
|
+
self._state.current_response = team_result["response"]
|
|
259
|
+
self._processed_message_count += 1
|
|
260
|
+
|
|
261
|
+
# Update usage and metadata (accumulate across turns)
|
|
262
|
+
if team_result.get("usage"):
|
|
263
|
+
current_usage = self._state.usage
|
|
264
|
+
new_usage = team_result.get("usage", {})
|
|
265
|
+
self._state.usage = {
|
|
266
|
+
"input_tokens": current_usage.get("input_tokens", 0) + new_usage.get("input_tokens", 0),
|
|
267
|
+
"output_tokens": current_usage.get("output_tokens", 0) + new_usage.get("output_tokens", 0),
|
|
268
|
+
"total_tokens": current_usage.get("total_tokens", 0) + new_usage.get("total_tokens", 0),
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# Update metadata
|
|
272
|
+
self._state.metadata.update({
|
|
273
|
+
"agent_count": len(team_agents["agents"]),
|
|
274
|
+
"coordination_type": team_result.get("coordination_type"),
|
|
275
|
+
"conversation_turns": conversation_turn,
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
# Check if team execution failed
|
|
279
|
+
if not team_result.get("success"):
|
|
280
|
+
self._state.status = "failed"
|
|
281
|
+
self._state.error_message = team_result.get("error")
|
|
282
|
+
break
|
|
283
|
+
|
|
284
|
+
# Update execution status to waiting_for_input
|
|
285
|
+
self._state.status = "waiting_for_input"
|
|
286
|
+
self._state.is_waiting_for_input = True
|
|
287
|
+
|
|
288
|
+
# Update database to reflect waiting state
|
|
289
|
+
await workflow.execute_activity(
|
|
290
|
+
update_execution_status,
|
|
291
|
+
ActivityUpdateExecutionInput(
|
|
292
|
+
execution_id=input.execution_id,
|
|
293
|
+
status="waiting_for_input",
|
|
294
|
+
response=self._state.current_response,
|
|
295
|
+
usage=self._state.usage,
|
|
296
|
+
execution_metadata={
|
|
297
|
+
**self._state.metadata,
|
|
298
|
+
"conversation_turns": conversation_turn,
|
|
299
|
+
"waiting_for_user": True,
|
|
300
|
+
},
|
|
301
|
+
),
|
|
302
|
+
start_to_close_timeout=timedelta(seconds=30),
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
workflow.logger.info(
|
|
306
|
+
f"Waiting for user input after team turn {conversation_turn}",
|
|
307
|
+
extra={"turn": conversation_turn}
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
# Wait for either new message or mark as done
|
|
311
|
+
await workflow.wait_condition(
|
|
312
|
+
lambda: self._new_message_count > self._processed_message_count or self._state.should_complete,
|
|
313
|
+
timeout=timedelta(hours=24)
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
if self._state.should_complete:
|
|
317
|
+
workflow.logger.info("User marked team workflow as done")
|
|
318
|
+
break
|
|
319
|
+
|
|
320
|
+
# Continue loop to process new message
|
|
321
|
+
self._state.status = "running"
|
|
322
|
+
|
|
323
|
+
# Conversation complete - finalize workflow
|
|
324
|
+
final_status = "failed" if self._state.status == "failed" else "completed"
|
|
325
|
+
self._state.status = final_status
|
|
326
|
+
|
|
327
|
+
await workflow.execute_activity(
|
|
328
|
+
update_execution_status,
|
|
329
|
+
ActivityUpdateExecutionInput(
|
|
330
|
+
execution_id=input.execution_id,
|
|
331
|
+
status=final_status,
|
|
332
|
+
completed_at=workflow.now().isoformat(),
|
|
333
|
+
response=self._state.current_response,
|
|
334
|
+
error_message=self._state.error_message,
|
|
335
|
+
usage=self._state.usage,
|
|
336
|
+
execution_metadata={
|
|
337
|
+
**self._state.metadata,
|
|
338
|
+
"workflow_completed": True,
|
|
339
|
+
"total_conversation_turns": conversation_turn,
|
|
340
|
+
},
|
|
341
|
+
),
|
|
342
|
+
start_to_close_timeout=timedelta(seconds=30),
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
workflow.logger.info(
|
|
346
|
+
f"Team execution workflow completed with HITL",
|
|
347
|
+
extra={
|
|
348
|
+
"execution_id": input.execution_id,
|
|
349
|
+
"status": final_status,
|
|
350
|
+
"conversation_turns": conversation_turn,
|
|
351
|
+
}
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
return {
|
|
355
|
+
"success": final_status == "completed",
|
|
356
|
+
"execution_id": input.execution_id,
|
|
357
|
+
"status": final_status,
|
|
358
|
+
"response": self._state.current_response,
|
|
359
|
+
"usage": self._state.usage,
|
|
360
|
+
"conversation_turns": conversation_turn,
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
except Exception as e:
|
|
364
|
+
# Update state with error
|
|
365
|
+
self._state.status = "failed"
|
|
366
|
+
self._state.error_message = str(e)
|
|
367
|
+
self._state.metadata["error_type"] = type(e).__name__
|
|
368
|
+
|
|
369
|
+
workflow.logger.error(
|
|
370
|
+
f"Team execution workflow failed",
|
|
371
|
+
extra={
|
|
372
|
+
"execution_id": input.execution_id,
|
|
373
|
+
"error": str(e),
|
|
374
|
+
}
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
# Update execution as failed
|
|
378
|
+
try:
|
|
379
|
+
await workflow.execute_activity(
|
|
380
|
+
update_execution_status,
|
|
381
|
+
ActivityUpdateExecutionInput(
|
|
382
|
+
execution_id=input.execution_id,
|
|
383
|
+
status="failed",
|
|
384
|
+
completed_at=workflow.now().isoformat(),
|
|
385
|
+
error_message=f"Workflow error: {str(e)}",
|
|
386
|
+
execution_metadata={
|
|
387
|
+
"workflow_error": True,
|
|
388
|
+
"error_type": type(e).__name__,
|
|
389
|
+
},
|
|
390
|
+
),
|
|
391
|
+
start_to_close_timeout=timedelta(seconds=30),
|
|
392
|
+
)
|
|
393
|
+
except Exception as update_error:
|
|
394
|
+
workflow.logger.error(
|
|
395
|
+
f"Failed to update status after error",
|
|
396
|
+
extra={"error": str(update_error)}
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
raise
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Seed script for populating LLM models in the database.
|
|
3
|
+
|
|
4
|
+
Run this script to populate the database with default Kubiya-supported models.
|
|
5
|
+
|
|
6
|
+
Usage:
|
|
7
|
+
python -m control_plane_api.scripts.seed_models
|
|
8
|
+
"""
|
|
9
|
+
import sys
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
# Add parent directory to path
|
|
13
|
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
|
14
|
+
|
|
15
|
+
from control_plane_api.app.database import SessionLocal
|
|
16
|
+
from control_plane_api.app.models.llm_model import LLMModel
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# Default models to seed
|
|
21
|
+
DEFAULT_MODELS = [
|
|
22
|
+
{
|
|
23
|
+
"value": "kubiya/claude-sonnet-4",
|
|
24
|
+
"label": "Claude Sonnet 4",
|
|
25
|
+
"provider": "Anthropic",
|
|
26
|
+
"logo": "/logos/claude-color.svg",
|
|
27
|
+
"description": "Most intelligent model with best reasoning capabilities",
|
|
28
|
+
"enabled": True,
|
|
29
|
+
"recommended": True,
|
|
30
|
+
"compatible_runtimes": ["default", "claude_code"],
|
|
31
|
+
"capabilities": {
|
|
32
|
+
"vision": False,
|
|
33
|
+
"function_calling": True,
|
|
34
|
+
"max_tokens": 8192,
|
|
35
|
+
"context_window": 200000,
|
|
36
|
+
},
|
|
37
|
+
"display_order": 1,
|
|
38
|
+
},
|
|
39
|
+
{
|
|
40
|
+
"value": "kubiya/claude-opus-4",
|
|
41
|
+
"label": "Claude Opus 4",
|
|
42
|
+
"provider": "Anthropic",
|
|
43
|
+
"logo": "/logos/claude-color.svg",
|
|
44
|
+
"description": "Powerful model for complex tasks requiring deep analysis",
|
|
45
|
+
"enabled": True,
|
|
46
|
+
"recommended": False,
|
|
47
|
+
"compatible_runtimes": ["default", "claude_code"],
|
|
48
|
+
"capabilities": {
|
|
49
|
+
"vision": True,
|
|
50
|
+
"function_calling": True,
|
|
51
|
+
"max_tokens": 4096,
|
|
52
|
+
"context_window": 200000,
|
|
53
|
+
},
|
|
54
|
+
"display_order": 2,
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"value": "kubiya/claude-3-5-sonnet-20241022",
|
|
58
|
+
"label": "Claude 3.5 Sonnet",
|
|
59
|
+
"provider": "Anthropic",
|
|
60
|
+
"logo": "/logos/claude-color.svg",
|
|
61
|
+
"description": "Previous generation Sonnet with excellent performance",
|
|
62
|
+
"enabled": True,
|
|
63
|
+
"recommended": False,
|
|
64
|
+
"compatible_runtimes": ["default", "claude_code"],
|
|
65
|
+
"capabilities": {
|
|
66
|
+
"vision": True,
|
|
67
|
+
"function_calling": True,
|
|
68
|
+
"max_tokens": 8192,
|
|
69
|
+
"context_window": 200000,
|
|
70
|
+
},
|
|
71
|
+
"display_order": 3,
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
"value": "kubiya/gpt-4o",
|
|
75
|
+
"label": "GPT-4o",
|
|
76
|
+
"provider": "OpenAI",
|
|
77
|
+
"logo": "/thirdparty/logos/openai.svg",
|
|
78
|
+
"description": "Fast and capable model with vision support",
|
|
79
|
+
"enabled": True,
|
|
80
|
+
"recommended": False,
|
|
81
|
+
"compatible_runtimes": ["default"],
|
|
82
|
+
"capabilities": {
|
|
83
|
+
"vision": True,
|
|
84
|
+
"function_calling": True,
|
|
85
|
+
"max_tokens": 16384,
|
|
86
|
+
"context_window": 128000,
|
|
87
|
+
},
|
|
88
|
+
"display_order": 4,
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
"value": "kubiya/gpt-4-turbo",
|
|
92
|
+
"label": "GPT-4 Turbo",
|
|
93
|
+
"provider": "OpenAI",
|
|
94
|
+
"logo": "/thirdparty/logos/openai.svg",
|
|
95
|
+
"description": "Enhanced GPT-4 with improved speed and capabilities",
|
|
96
|
+
"enabled": True,
|
|
97
|
+
"recommended": False,
|
|
98
|
+
"compatible_runtimes": ["default"],
|
|
99
|
+
"capabilities": {
|
|
100
|
+
"vision": True,
|
|
101
|
+
"function_calling": True,
|
|
102
|
+
"max_tokens": 4096,
|
|
103
|
+
"context_window": 128000,
|
|
104
|
+
},
|
|
105
|
+
"display_order": 5,
|
|
106
|
+
},
|
|
107
|
+
{
|
|
108
|
+
"value": "kubiya/gpt-4o-mini",
|
|
109
|
+
"label": "GPT-4o Mini",
|
|
110
|
+
"provider": "OpenAI",
|
|
111
|
+
"logo": "/thirdparty/logos/openai.svg",
|
|
112
|
+
"description": "Cost-effective model for simpler tasks",
|
|
113
|
+
"enabled": True,
|
|
114
|
+
"recommended": False,
|
|
115
|
+
"compatible_runtimes": ["default"],
|
|
116
|
+
"capabilities": {
|
|
117
|
+
"vision": True,
|
|
118
|
+
"function_calling": True,
|
|
119
|
+
"max_tokens": 16384,
|
|
120
|
+
"context_window": 128000,
|
|
121
|
+
},
|
|
122
|
+
"display_order": 6,
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
"value": "kubiya/gemini-pro",
|
|
126
|
+
"label": "Gemini Pro",
|
|
127
|
+
"provider": "Google",
|
|
128
|
+
"logo": "/thirdparty/logos/google.svg",
|
|
129
|
+
"description": "Google's powerful multimodal model",
|
|
130
|
+
"enabled": True,
|
|
131
|
+
"recommended": False,
|
|
132
|
+
"compatible_runtimes": ["default"],
|
|
133
|
+
"capabilities": {
|
|
134
|
+
"vision": True,
|
|
135
|
+
"function_calling": True,
|
|
136
|
+
"max_tokens": 8192,
|
|
137
|
+
"context_window": 1000000,
|
|
138
|
+
},
|
|
139
|
+
"display_order": 7,
|
|
140
|
+
},
|
|
141
|
+
]
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def seed_models(force: bool = False):
|
|
145
|
+
"""
|
|
146
|
+
Seed the database with default models.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
force: If True, update existing models. If False, skip existing models.
|
|
150
|
+
"""
|
|
151
|
+
db = SessionLocal()
|
|
152
|
+
try:
|
|
153
|
+
created_count = 0
|
|
154
|
+
updated_count = 0
|
|
155
|
+
skipped_count = 0
|
|
156
|
+
|
|
157
|
+
for model_data in DEFAULT_MODELS:
|
|
158
|
+
# Check if model exists
|
|
159
|
+
existing = db.query(LLMModel).filter(LLMModel.value == model_data["value"]).first()
|
|
160
|
+
|
|
161
|
+
if existing:
|
|
162
|
+
if force:
|
|
163
|
+
# Update existing model
|
|
164
|
+
for key, value in model_data.items():
|
|
165
|
+
setattr(existing, key, value)
|
|
166
|
+
existing.updated_at = datetime.utcnow()
|
|
167
|
+
print(f"✓ Updated: {model_data['label']} ({model_data['value']})")
|
|
168
|
+
updated_count += 1
|
|
169
|
+
else:
|
|
170
|
+
print(f"⊙ Skipped (exists): {model_data['label']} ({model_data['value']})")
|
|
171
|
+
skipped_count += 1
|
|
172
|
+
else:
|
|
173
|
+
# Create new model
|
|
174
|
+
new_model = LLMModel(**model_data)
|
|
175
|
+
db.add(new_model)
|
|
176
|
+
print(f"✓ Created: {model_data['label']} ({model_data['value']})")
|
|
177
|
+
created_count += 1
|
|
178
|
+
|
|
179
|
+
db.commit()
|
|
180
|
+
|
|
181
|
+
print("\n" + "=" * 60)
|
|
182
|
+
print("Seeding complete!")
|
|
183
|
+
print(f" Created: {created_count}")
|
|
184
|
+
print(f" Updated: {updated_count}")
|
|
185
|
+
print(f" Skipped: {skipped_count}")
|
|
186
|
+
print(f" Total: {len(DEFAULT_MODELS)}")
|
|
187
|
+
print("=" * 60)
|
|
188
|
+
|
|
189
|
+
except Exception as e:
|
|
190
|
+
db.rollback()
|
|
191
|
+
print(f"\n❌ Error seeding models: {e}")
|
|
192
|
+
raise
|
|
193
|
+
finally:
|
|
194
|
+
db.close()
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def clear_models():
|
|
198
|
+
"""Clear all models from the database (use with caution!)"""
|
|
199
|
+
db = SessionLocal()
|
|
200
|
+
try:
|
|
201
|
+
count = db.query(LLMModel).delete()
|
|
202
|
+
db.commit()
|
|
203
|
+
print(f"✓ Cleared {count} models from database")
|
|
204
|
+
except Exception as e:
|
|
205
|
+
db.rollback()
|
|
206
|
+
print(f"❌ Error clearing models: {e}")
|
|
207
|
+
raise
|
|
208
|
+
finally:
|
|
209
|
+
db.close()
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
if __name__ == "__main__":
|
|
213
|
+
import argparse
|
|
214
|
+
|
|
215
|
+
parser = argparse.ArgumentParser(description="Seed LLM models database")
|
|
216
|
+
parser.add_argument(
|
|
217
|
+
"--force",
|
|
218
|
+
action="store_true",
|
|
219
|
+
help="Update existing models (default: skip existing)",
|
|
220
|
+
)
|
|
221
|
+
parser.add_argument(
|
|
222
|
+
"--clear",
|
|
223
|
+
action="store_true",
|
|
224
|
+
help="Clear all models before seeding (DANGEROUS)",
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
args = parser.parse_args()
|
|
228
|
+
|
|
229
|
+
if args.clear:
|
|
230
|
+
confirm = input("⚠️ This will delete ALL models. Are you sure? (yes/no): ")
|
|
231
|
+
if confirm.lower() == "yes":
|
|
232
|
+
clear_models()
|
|
233
|
+
else:
|
|
234
|
+
print("Cancelled.")
|
|
235
|
+
sys.exit(0)
|
|
236
|
+
|
|
237
|
+
print("Seeding models...")
|
|
238
|
+
print("=" * 60)
|
|
239
|
+
seed_models(force=args.force)
|
|
File without changes
|
|
File without changes
|