ctrlcode 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ctrlcode/__init__.py +8 -0
- ctrlcode/agents/__init__.py +29 -0
- ctrlcode/agents/cleanup.py +388 -0
- ctrlcode/agents/communication.py +439 -0
- ctrlcode/agents/observability.py +421 -0
- ctrlcode/agents/react_loop.py +297 -0
- ctrlcode/agents/registry.py +211 -0
- ctrlcode/agents/result_parser.py +242 -0
- ctrlcode/agents/workflow.py +723 -0
- ctrlcode/analysis/__init__.py +28 -0
- ctrlcode/analysis/ast_diff.py +163 -0
- ctrlcode/analysis/bug_detector.py +149 -0
- ctrlcode/analysis/code_graphs.py +329 -0
- ctrlcode/analysis/semantic.py +205 -0
- ctrlcode/analysis/static.py +183 -0
- ctrlcode/analysis/synthesizer.py +281 -0
- ctrlcode/analysis/tests.py +189 -0
- ctrlcode/cleanup/__init__.py +16 -0
- ctrlcode/cleanup/auto_merge.py +350 -0
- ctrlcode/cleanup/doc_gardening.py +388 -0
- ctrlcode/cleanup/pr_automation.py +330 -0
- ctrlcode/cleanup/scheduler.py +356 -0
- ctrlcode/config.py +380 -0
- ctrlcode/embeddings/__init__.py +6 -0
- ctrlcode/embeddings/embedder.py +192 -0
- ctrlcode/embeddings/vector_store.py +213 -0
- ctrlcode/fuzzing/__init__.py +24 -0
- ctrlcode/fuzzing/analyzer.py +280 -0
- ctrlcode/fuzzing/budget.py +112 -0
- ctrlcode/fuzzing/context.py +665 -0
- ctrlcode/fuzzing/context_fuzzer.py +506 -0
- ctrlcode/fuzzing/derived_orchestrator.py +732 -0
- ctrlcode/fuzzing/oracle_adapter.py +135 -0
- ctrlcode/linters/__init__.py +11 -0
- ctrlcode/linters/hand_rolled_utils.py +221 -0
- ctrlcode/linters/yolo_parsing.py +217 -0
- ctrlcode/metrics/__init__.py +6 -0
- ctrlcode/metrics/dashboard.py +283 -0
- ctrlcode/metrics/tech_debt.py +663 -0
- ctrlcode/paths.py +68 -0
- ctrlcode/permissions.py +179 -0
- ctrlcode/providers/__init__.py +15 -0
- ctrlcode/providers/anthropic.py +138 -0
- ctrlcode/providers/base.py +77 -0
- ctrlcode/providers/openai.py +197 -0
- ctrlcode/providers/parallel.py +104 -0
- ctrlcode/server.py +871 -0
- ctrlcode/session/__init__.py +6 -0
- ctrlcode/session/baseline.py +57 -0
- ctrlcode/session/manager.py +967 -0
- ctrlcode/skills/__init__.py +10 -0
- ctrlcode/skills/builtin/commit.toml +29 -0
- ctrlcode/skills/builtin/docs.toml +25 -0
- ctrlcode/skills/builtin/refactor.toml +33 -0
- ctrlcode/skills/builtin/review.toml +28 -0
- ctrlcode/skills/builtin/test.toml +28 -0
- ctrlcode/skills/loader.py +111 -0
- ctrlcode/skills/registry.py +139 -0
- ctrlcode/storage/__init__.py +19 -0
- ctrlcode/storage/history_db.py +708 -0
- ctrlcode/tools/__init__.py +220 -0
- ctrlcode/tools/bash.py +112 -0
- ctrlcode/tools/browser.py +352 -0
- ctrlcode/tools/executor.py +153 -0
- ctrlcode/tools/explore.py +486 -0
- ctrlcode/tools/mcp.py +108 -0
- ctrlcode/tools/observability.py +561 -0
- ctrlcode/tools/registry.py +193 -0
- ctrlcode/tools/todo.py +291 -0
- ctrlcode/tools/update.py +266 -0
- ctrlcode/tools/webfetch.py +147 -0
- ctrlcode-0.1.0.dist-info/METADATA +93 -0
- ctrlcode-0.1.0.dist-info/RECORD +75 -0
- ctrlcode-0.1.0.dist-info/WHEEL +4 -0
- ctrlcode-0.1.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,723 @@
|
|
|
1
|
+
"""Multi-agent workflow execution."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
import logging
|
|
6
|
+
|
|
7
|
+
from .communication import AgentCoordinator, AgentVerbosity
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class TaskGraph:
|
|
14
|
+
"""Task graph with dependencies."""
|
|
15
|
+
|
|
16
|
+
tasks: list[dict[str, Any]]
|
|
17
|
+
parallel_groups: list[list[str]]
|
|
18
|
+
risks: list[str]
|
|
19
|
+
checkpoints: list[str]
|
|
20
|
+
|
|
21
|
+
def get_task(self, task_id: str) -> dict[str, Any] | None:
|
|
22
|
+
"""
|
|
23
|
+
Get task by ID.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
task_id: Task identifier
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Task dict or None if not found
|
|
30
|
+
"""
|
|
31
|
+
for task in self.tasks:
|
|
32
|
+
if task["id"] == task_id:
|
|
33
|
+
return task
|
|
34
|
+
return None
|
|
35
|
+
|
|
36
|
+
def get_parallel_group(self, group_index: int) -> list[str]:
|
|
37
|
+
"""
|
|
38
|
+
Get task IDs in parallel group.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
group_index: Index of parallel group
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
List of task IDs
|
|
45
|
+
"""
|
|
46
|
+
if 0 <= group_index < len(self.parallel_groups):
|
|
47
|
+
return self.parallel_groups[group_index]
|
|
48
|
+
return []
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class MultiAgentWorkflow:
|
|
52
|
+
"""Orchestrates multi-agent task execution."""
|
|
53
|
+
|
|
54
|
+
def __init__(self, coordinator: AgentCoordinator, event_callback=None):
|
|
55
|
+
"""
|
|
56
|
+
Initialize workflow executor.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
coordinator: AgentCoordinator instance
|
|
60
|
+
event_callback: Optional async callback for workflow events
|
|
61
|
+
"""
|
|
62
|
+
self.coordinator = coordinator
|
|
63
|
+
self.bus = coordinator.bus
|
|
64
|
+
self.event_callback = event_callback
|
|
65
|
+
|
|
66
|
+
async def execute(self, user_intent: str) -> dict[str, Any]:
|
|
67
|
+
"""
|
|
68
|
+
Execute multi-agent workflow for user intent.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
user_intent: User's request/goal
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Workflow execution results
|
|
75
|
+
"""
|
|
76
|
+
results = {
|
|
77
|
+
"status": "in_progress",
|
|
78
|
+
"user_intent": user_intent,
|
|
79
|
+
"task_graph": None,
|
|
80
|
+
"completed_tasks": [],
|
|
81
|
+
"errors": [],
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
# Phase 1: Planning
|
|
86
|
+
await self._emit_event("workflow_phase_change", {
|
|
87
|
+
"phase": "planning",
|
|
88
|
+
"progress": 0.0,
|
|
89
|
+
"status": "Analyzing request and creating task breakdown..."
|
|
90
|
+
})
|
|
91
|
+
|
|
92
|
+
task_graph = await self._planning_phase(user_intent)
|
|
93
|
+
results["task_graph"] = task_graph
|
|
94
|
+
|
|
95
|
+
# Phase 2: Execution
|
|
96
|
+
await self._emit_event("workflow_phase_change", {
|
|
97
|
+
"phase": "execution",
|
|
98
|
+
"progress": 0.25,
|
|
99
|
+
"completed": 1,
|
|
100
|
+
"total": 4,
|
|
101
|
+
"status": "Executing tasks..."
|
|
102
|
+
})
|
|
103
|
+
|
|
104
|
+
completed_tasks = await self._execution_phase(task_graph)
|
|
105
|
+
results["completed_tasks"] = completed_tasks
|
|
106
|
+
|
|
107
|
+
# Phase 3: Review
|
|
108
|
+
await self._emit_event("workflow_phase_change", {
|
|
109
|
+
"phase": "review",
|
|
110
|
+
"progress": 0.5,
|
|
111
|
+
"completed": 2,
|
|
112
|
+
"total": 4,
|
|
113
|
+
"status": "Reviewing changes..."
|
|
114
|
+
})
|
|
115
|
+
|
|
116
|
+
review_result = await self._review_phase(completed_tasks)
|
|
117
|
+
|
|
118
|
+
# Handle review feedback loop
|
|
119
|
+
if review_result["status"] == "changes_requested":
|
|
120
|
+
# Emit feedback
|
|
121
|
+
await self._emit_event("workflow_review_feedback", {
|
|
122
|
+
"feedback": review_result.get("feedback", [])
|
|
123
|
+
})
|
|
124
|
+
|
|
125
|
+
# Go back to execution phase
|
|
126
|
+
await self._emit_event("workflow_phase_change", {
|
|
127
|
+
"phase": "execution",
|
|
128
|
+
"progress": 0.5,
|
|
129
|
+
"status": "Addressing review feedback..."
|
|
130
|
+
})
|
|
131
|
+
|
|
132
|
+
completed_tasks = await self._handle_review_feedback(
|
|
133
|
+
review_result,
|
|
134
|
+
task_graph
|
|
135
|
+
)
|
|
136
|
+
results["completed_tasks"] = completed_tasks
|
|
137
|
+
|
|
138
|
+
# Re-review
|
|
139
|
+
await self._emit_event("workflow_phase_change", {
|
|
140
|
+
"phase": "review",
|
|
141
|
+
"progress": 0.65,
|
|
142
|
+
"status": "Re-reviewing changes..."
|
|
143
|
+
})
|
|
144
|
+
review_result = await self._review_phase(completed_tasks)
|
|
145
|
+
|
|
146
|
+
results["review"] = review_result
|
|
147
|
+
|
|
148
|
+
# Phase 4: Validation
|
|
149
|
+
await self._emit_event("workflow_phase_change", {
|
|
150
|
+
"phase": "validation",
|
|
151
|
+
"progress": 0.75,
|
|
152
|
+
"completed": 3,
|
|
153
|
+
"total": 4,
|
|
154
|
+
"status": "Running tests and validation..."
|
|
155
|
+
})
|
|
156
|
+
|
|
157
|
+
validation_result = await self._validation_phase(completed_tasks)
|
|
158
|
+
results["validation"] = validation_result
|
|
159
|
+
|
|
160
|
+
# Emit validation results
|
|
161
|
+
await self._emit_event("workflow_observability_results", {
|
|
162
|
+
"results": validation_result
|
|
163
|
+
})
|
|
164
|
+
|
|
165
|
+
# Determine final status
|
|
166
|
+
if validation_result["status"] == "pass":
|
|
167
|
+
await self._emit_event("workflow_phase_change", {
|
|
168
|
+
"phase": "complete",
|
|
169
|
+
"progress": 1.0,
|
|
170
|
+
"completed": 4,
|
|
171
|
+
"total": 4,
|
|
172
|
+
"status": "Workflow completed successfully"
|
|
173
|
+
})
|
|
174
|
+
results["status"] = "completed"
|
|
175
|
+
else:
|
|
176
|
+
await self._emit_event("workflow_phase_change", {
|
|
177
|
+
"phase": "complete",
|
|
178
|
+
"progress": 1.0,
|
|
179
|
+
"status": "Workflow completed with validation failures"
|
|
180
|
+
})
|
|
181
|
+
results["status"] = "failed"
|
|
182
|
+
results["errors"].append(
|
|
183
|
+
f"Validation failed: {validation_result.get('error')}"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
except Exception as e:
|
|
187
|
+
await self._emit_event("workflow_phase_change", {
|
|
188
|
+
"phase": "idle",
|
|
189
|
+
"progress": 0.0,
|
|
190
|
+
"status": f"Workflow error: {str(e)}"
|
|
191
|
+
})
|
|
192
|
+
results["status"] = "error"
|
|
193
|
+
results["errors"].append(str(e))
|
|
194
|
+
|
|
195
|
+
return results
|
|
196
|
+
|
|
197
|
+
async def _emit_event(self, event_type: str, data: dict[str, Any]):
|
|
198
|
+
"""
|
|
199
|
+
Emit workflow event via callback.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
event_type: Type of event
|
|
203
|
+
data: Event data
|
|
204
|
+
"""
|
|
205
|
+
if self.event_callback:
|
|
206
|
+
try:
|
|
207
|
+
await self.event_callback(event_type, data)
|
|
208
|
+
except Exception:
|
|
209
|
+
# Don't let event emission failures break workflow
|
|
210
|
+
pass
|
|
211
|
+
|
|
212
|
+
async def _planning_phase(self, user_intent: str) -> TaskGraph:
|
|
213
|
+
"""
|
|
214
|
+
Execute planning phase.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
user_intent: User's request
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
TaskGraph with tasks and dependencies
|
|
221
|
+
"""
|
|
222
|
+
# Spawn planner agent
|
|
223
|
+
planner_result = await self.coordinator.spawn_agent(
|
|
224
|
+
agent_type="planner",
|
|
225
|
+
task={
|
|
226
|
+
"user_intent": user_intent,
|
|
227
|
+
"context": {},
|
|
228
|
+
},
|
|
229
|
+
verbosity=AgentVerbosity.WORKFLOW
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Emit agent spawned event
|
|
233
|
+
agent_id = planner_result.get("agent_id")
|
|
234
|
+
await self._emit_event("workflow_agent_spawned", {
|
|
235
|
+
"agent_id": agent_id,
|
|
236
|
+
"type": "planner",
|
|
237
|
+
"task": "Creating task breakdown"
|
|
238
|
+
})
|
|
239
|
+
|
|
240
|
+
# Handle error case
|
|
241
|
+
if planner_result.get("status") == "error":
|
|
242
|
+
await self._emit_event("workflow_error", {
|
|
243
|
+
"phase": "planning",
|
|
244
|
+
"error": planner_result.get("error")
|
|
245
|
+
})
|
|
246
|
+
logger.error(f"Planning phase failed: {planner_result.get('error')}")
|
|
247
|
+
# Return empty task graph
|
|
248
|
+
return TaskGraph(tasks=[], parallel_groups=[], risks=[], checkpoints=[])
|
|
249
|
+
|
|
250
|
+
# Emit agent completion
|
|
251
|
+
await self._emit_event("workflow_agent_completed", {
|
|
252
|
+
"agent_id": agent_id
|
|
253
|
+
})
|
|
254
|
+
|
|
255
|
+
# Extract task graph from planner result (now structured)
|
|
256
|
+
task_graph_data = planner_result.get("task_graph", {
|
|
257
|
+
"tasks": [],
|
|
258
|
+
"parallel_groups": [],
|
|
259
|
+
"risks": [],
|
|
260
|
+
"checkpoints": [],
|
|
261
|
+
})
|
|
262
|
+
|
|
263
|
+
# Check for parsing errors
|
|
264
|
+
if "error" in task_graph_data:
|
|
265
|
+
logger.warning(f"Task graph parsing had issues: {task_graph_data.get('error')}")
|
|
266
|
+
|
|
267
|
+
task_graph = TaskGraph(
|
|
268
|
+
tasks=task_graph_data.get("tasks", []),
|
|
269
|
+
parallel_groups=task_graph_data.get("parallel_groups", []),
|
|
270
|
+
risks=task_graph_data.get("risks", []),
|
|
271
|
+
checkpoints=task_graph_data.get("checkpoints", [])
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# Emit task graph created event
|
|
275
|
+
await self._emit_event("workflow_task_graph_created", {
|
|
276
|
+
"task_graph": {
|
|
277
|
+
"tasks": task_graph.tasks,
|
|
278
|
+
"parallel_groups": task_graph.parallel_groups,
|
|
279
|
+
"risks": task_graph.risks,
|
|
280
|
+
"checkpoints": task_graph.checkpoints,
|
|
281
|
+
}
|
|
282
|
+
})
|
|
283
|
+
|
|
284
|
+
return task_graph
|
|
285
|
+
|
|
286
|
+
async def _execution_phase(
|
|
287
|
+
self,
|
|
288
|
+
task_graph: TaskGraph
|
|
289
|
+
) -> list[dict[str, Any]]:
|
|
290
|
+
"""
|
|
291
|
+
Execute all tasks from task graph.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
task_graph: TaskGraph to execute
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
List of completed task results
|
|
298
|
+
"""
|
|
299
|
+
completed_tasks = []
|
|
300
|
+
|
|
301
|
+
# Execute each parallel group sequentially
|
|
302
|
+
for group_index, group in enumerate(task_graph.parallel_groups):
|
|
303
|
+
if len(group) == 1:
|
|
304
|
+
# Single task - execute sequentially
|
|
305
|
+
task_id = group[0]
|
|
306
|
+
task = task_graph.get_task(task_id)
|
|
307
|
+
|
|
308
|
+
if task:
|
|
309
|
+
# Emit task status update
|
|
310
|
+
await self._emit_event("workflow_task_updated", {
|
|
311
|
+
"task_id": task_id,
|
|
312
|
+
"status": "in_progress"
|
|
313
|
+
})
|
|
314
|
+
|
|
315
|
+
# Spawn coder agent
|
|
316
|
+
result = await self.coordinator.spawn_agent(
|
|
317
|
+
agent_type="coder",
|
|
318
|
+
task=task
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
# Emit agent events
|
|
322
|
+
agent_id = result.get("agent_id")
|
|
323
|
+
await self._emit_event("workflow_agent_spawned", {
|
|
324
|
+
"agent_id": agent_id,
|
|
325
|
+
"type": "coder",
|
|
326
|
+
"task": task.get("description", task_id)
|
|
327
|
+
})
|
|
328
|
+
|
|
329
|
+
await self._emit_event("workflow_agent_updated", {
|
|
330
|
+
"agent_id": agent_id,
|
|
331
|
+
"status": "in_progress",
|
|
332
|
+
"task": task.get("description", task_id),
|
|
333
|
+
"progress": 0.5
|
|
334
|
+
})
|
|
335
|
+
|
|
336
|
+
completed_tasks.append(result)
|
|
337
|
+
|
|
338
|
+
# Emit completion events
|
|
339
|
+
await self._emit_event("workflow_agent_completed", {
|
|
340
|
+
"agent_id": agent_id
|
|
341
|
+
})
|
|
342
|
+
|
|
343
|
+
await self._emit_event("workflow_task_updated", {
|
|
344
|
+
"task_id": task_id,
|
|
345
|
+
"status": "completed"
|
|
346
|
+
})
|
|
347
|
+
|
|
348
|
+
else:
|
|
349
|
+
# Multiple tasks - execute in parallel
|
|
350
|
+
tasks = [task_graph.get_task(tid) for tid in group]
|
|
351
|
+
tasks = [t for t in tasks if t is not None] # Filter None
|
|
352
|
+
|
|
353
|
+
# Emit parallel execution start
|
|
354
|
+
task_ids = [t["id"] for t in tasks]
|
|
355
|
+
await self._emit_event("workflow_parallel_start", {
|
|
356
|
+
"task_ids": task_ids
|
|
357
|
+
})
|
|
358
|
+
|
|
359
|
+
# Update task statuses
|
|
360
|
+
for task_id in task_ids:
|
|
361
|
+
await self._emit_event("workflow_task_updated", {
|
|
362
|
+
"task_id": task_id,
|
|
363
|
+
"status": "in_progress"
|
|
364
|
+
})
|
|
365
|
+
|
|
366
|
+
agents_tasks = [
|
|
367
|
+
{"type": "coder", "task": task}
|
|
368
|
+
for task in tasks
|
|
369
|
+
]
|
|
370
|
+
|
|
371
|
+
results = await self.coordinator.spawn_agents_parallel(
|
|
372
|
+
agents_tasks
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
# Emit agent events for each
|
|
376
|
+
for i, result in enumerate(results):
|
|
377
|
+
agent_id = result.get("agent_id")
|
|
378
|
+
task_id = task_ids[i] if i < len(task_ids) else f"task-{i}"
|
|
379
|
+
|
|
380
|
+
await self._emit_event("workflow_agent_spawned", {
|
|
381
|
+
"agent_id": agent_id,
|
|
382
|
+
"type": "coder",
|
|
383
|
+
"task": tasks[i].get("description", task_id)
|
|
384
|
+
})
|
|
385
|
+
|
|
386
|
+
# Emit progress
|
|
387
|
+
await self._emit_event("workflow_parallel_progress", {
|
|
388
|
+
"task_id": task_id,
|
|
389
|
+
"progress": 1.0,
|
|
390
|
+
"description": tasks[i].get("description", task_id)
|
|
391
|
+
})
|
|
392
|
+
|
|
393
|
+
await self._emit_event("workflow_agent_completed", {
|
|
394
|
+
"agent_id": agent_id
|
|
395
|
+
})
|
|
396
|
+
|
|
397
|
+
await self._emit_event("workflow_task_updated", {
|
|
398
|
+
"task_id": task_id,
|
|
399
|
+
"status": "completed"
|
|
400
|
+
})
|
|
401
|
+
|
|
402
|
+
completed_tasks.extend(results)
|
|
403
|
+
|
|
404
|
+
return completed_tasks
|
|
405
|
+
|
|
406
|
+
async def _review_phase(
|
|
407
|
+
self,
|
|
408
|
+
completed_tasks: list[dict[str, Any]]
|
|
409
|
+
) -> dict[str, Any]:
|
|
410
|
+
"""
|
|
411
|
+
Execute review phase.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
completed_tasks: List of completed tasks
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Review result
|
|
418
|
+
"""
|
|
419
|
+
# Spawn reviewer agent
|
|
420
|
+
reviewer_result = await self.coordinator.spawn_agent(
|
|
421
|
+
agent_type="reviewer",
|
|
422
|
+
task={
|
|
423
|
+
"tasks": completed_tasks,
|
|
424
|
+
"files_changed": self._extract_changed_files(completed_tasks),
|
|
425
|
+
},
|
|
426
|
+
verbosity=AgentVerbosity.WORKFLOW
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
# Emit reviewer agent events
|
|
430
|
+
agent_id = reviewer_result.get("agent_id")
|
|
431
|
+
await self._emit_event("workflow_agent_spawned", {
|
|
432
|
+
"agent_id": agent_id,
|
|
433
|
+
"type": "reviewer",
|
|
434
|
+
"task": "Reviewing completed tasks"
|
|
435
|
+
})
|
|
436
|
+
|
|
437
|
+
# Handle error case
|
|
438
|
+
if reviewer_result.get("status") == "error":
|
|
439
|
+
await self._emit_event("workflow_error", {
|
|
440
|
+
"phase": "review",
|
|
441
|
+
"error": reviewer_result.get("error")
|
|
442
|
+
})
|
|
443
|
+
logger.error(f"Review phase failed: {reviewer_result.get('error')}")
|
|
444
|
+
# Return inconclusive review
|
|
445
|
+
return {
|
|
446
|
+
"status": "inconclusive",
|
|
447
|
+
"feedback": "Review failed",
|
|
448
|
+
"error": reviewer_result.get("error")
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
await self._emit_event("workflow_agent_completed", {
|
|
452
|
+
"agent_id": agent_id
|
|
453
|
+
})
|
|
454
|
+
|
|
455
|
+
# Extract structured review data
|
|
456
|
+
review_data = reviewer_result.get("review", {})
|
|
457
|
+
|
|
458
|
+
return {
|
|
459
|
+
"status": review_data.get("status", "inconclusive"),
|
|
460
|
+
"feedback": review_data.get("feedback", ""),
|
|
461
|
+
"changes_required": review_data.get("changes_required", [])
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
async def _validation_phase(
|
|
465
|
+
self,
|
|
466
|
+
completed_tasks: list[dict[str, Any]]
|
|
467
|
+
) -> dict[str, Any]:
|
|
468
|
+
"""
|
|
469
|
+
Execute validation phase.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
completed_tasks: List of completed tasks
|
|
473
|
+
|
|
474
|
+
Returns:
|
|
475
|
+
Validation result
|
|
476
|
+
"""
|
|
477
|
+
# Spawn executor agent
|
|
478
|
+
executor_result = await self.coordinator.spawn_agent(
|
|
479
|
+
agent_type="executor",
|
|
480
|
+
task={
|
|
481
|
+
"type": "verify_functionality",
|
|
482
|
+
"tasks": completed_tasks,
|
|
483
|
+
},
|
|
484
|
+
verbosity=AgentVerbosity.WORKFLOW
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
# Emit executor agent events
|
|
488
|
+
agent_id = executor_result.get("agent_id")
|
|
489
|
+
await self._emit_event("workflow_agent_spawned", {
|
|
490
|
+
"agent_id": agent_id,
|
|
491
|
+
"type": "executor",
|
|
492
|
+
"task": "Running validation tests"
|
|
493
|
+
})
|
|
494
|
+
|
|
495
|
+
await self._emit_event("workflow_agent_updated", {
|
|
496
|
+
"agent_id": agent_id,
|
|
497
|
+
"status": "in_progress",
|
|
498
|
+
"task": "Executing tests and checks",
|
|
499
|
+
"progress": 0.5
|
|
500
|
+
})
|
|
501
|
+
|
|
502
|
+
# Handle error case
|
|
503
|
+
if executor_result.get("status") == "error":
|
|
504
|
+
await self._emit_event("workflow_error", {
|
|
505
|
+
"phase": "validation",
|
|
506
|
+
"error": executor_result.get("error")
|
|
507
|
+
})
|
|
508
|
+
logger.error(f"Validation phase failed: {executor_result.get('error')}")
|
|
509
|
+
return {
|
|
510
|
+
"status": "fail",
|
|
511
|
+
"error": executor_result.get("error"),
|
|
512
|
+
"test_results": []
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
await self._emit_event("workflow_agent_completed", {
|
|
516
|
+
"agent_id": agent_id
|
|
517
|
+
})
|
|
518
|
+
|
|
519
|
+
# Extract structured validation data
|
|
520
|
+
validation_data = executor_result.get("validation", {})
|
|
521
|
+
|
|
522
|
+
return {
|
|
523
|
+
"status": "pass" if validation_data.get("validation_status") == "passed" else "fail",
|
|
524
|
+
"test_results": validation_data.get("test_results", []),
|
|
525
|
+
"output": validation_data.get("output", "")
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
async def _handle_review_feedback(
|
|
529
|
+
self,
|
|
530
|
+
review_result: dict[str, Any],
|
|
531
|
+
task_graph: TaskGraph
|
|
532
|
+
) -> list[dict[str, Any]]:
|
|
533
|
+
"""
|
|
534
|
+
Handle review feedback by re-executing tasks.
|
|
535
|
+
|
|
536
|
+
Args:
|
|
537
|
+
review_result: Review result with feedback
|
|
538
|
+
task_graph: Original task graph
|
|
539
|
+
|
|
540
|
+
Returns:
|
|
541
|
+
Updated completed tasks
|
|
542
|
+
"""
|
|
543
|
+
completed_tasks = []
|
|
544
|
+
|
|
545
|
+
# Extract tasks that need changes
|
|
546
|
+
feedback_items = review_result.get("feedback", [])
|
|
547
|
+
|
|
548
|
+
for feedback_item in feedback_items:
|
|
549
|
+
task_id = feedback_item.get("task_id")
|
|
550
|
+
if not task_id:
|
|
551
|
+
continue
|
|
552
|
+
|
|
553
|
+
# Get original task
|
|
554
|
+
task = task_graph.get_task(task_id)
|
|
555
|
+
if not task:
|
|
556
|
+
continue
|
|
557
|
+
|
|
558
|
+
# Update feedback status to in_progress
|
|
559
|
+
feedback_id = feedback_item.get("id")
|
|
560
|
+
if feedback_id:
|
|
561
|
+
await self._emit_event("workflow_review_feedback", {
|
|
562
|
+
"feedback": [{
|
|
563
|
+
**feedback_item,
|
|
564
|
+
"fix_status": "in_progress"
|
|
565
|
+
}]
|
|
566
|
+
})
|
|
567
|
+
|
|
568
|
+
# Add feedback to task context
|
|
569
|
+
task["context"] = task.get("context", {})
|
|
570
|
+
task["context"]["reviewer_feedback"] = feedback_item
|
|
571
|
+
|
|
572
|
+
# Update task status
|
|
573
|
+
await self._emit_event("workflow_task_updated", {
|
|
574
|
+
"task_id": task_id,
|
|
575
|
+
"status": "in_progress"
|
|
576
|
+
})
|
|
577
|
+
|
|
578
|
+
# Re-execute task
|
|
579
|
+
result = await self.coordinator.spawn_agent(
|
|
580
|
+
agent_type="coder",
|
|
581
|
+
task=task
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
# Emit agent events
|
|
585
|
+
agent_id = result.get("agent_id")
|
|
586
|
+
await self._emit_event("workflow_agent_spawned", {
|
|
587
|
+
"agent_id": agent_id,
|
|
588
|
+
"type": "coder",
|
|
589
|
+
"task": f"Addressing feedback for {task_id}"
|
|
590
|
+
})
|
|
591
|
+
|
|
592
|
+
await self._emit_event("workflow_agent_completed", {
|
|
593
|
+
"agent_id": agent_id
|
|
594
|
+
})
|
|
595
|
+
|
|
596
|
+
await self._emit_event("workflow_task_updated", {
|
|
597
|
+
"task_id": task_id,
|
|
598
|
+
"status": "completed"
|
|
599
|
+
})
|
|
600
|
+
|
|
601
|
+
# Update feedback status to fixed
|
|
602
|
+
if feedback_id:
|
|
603
|
+
await self._emit_event("workflow_review_feedback", {
|
|
604
|
+
"feedback": [{
|
|
605
|
+
**feedback_item,
|
|
606
|
+
"fix_status": "fixed"
|
|
607
|
+
}]
|
|
608
|
+
})
|
|
609
|
+
|
|
610
|
+
completed_tasks.append(result)
|
|
611
|
+
|
|
612
|
+
return completed_tasks
|
|
613
|
+
|
|
614
|
+
def _extract_changed_files(
|
|
615
|
+
self,
|
|
616
|
+
completed_tasks: list[dict[str, Any]]
|
|
617
|
+
) -> list[str]:
|
|
618
|
+
"""
|
|
619
|
+
Extract list of changed files from tasks.
|
|
620
|
+
|
|
621
|
+
Args:
|
|
622
|
+
completed_tasks: List of completed tasks
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
List of file paths
|
|
626
|
+
"""
|
|
627
|
+
files = set()
|
|
628
|
+
|
|
629
|
+
for task in completed_tasks:
|
|
630
|
+
task_files = task.get("task", {}).get("files", [])
|
|
631
|
+
files.update(task_files)
|
|
632
|
+
|
|
633
|
+
return list(files)
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
class WorkflowOrchestrator:
|
|
637
|
+
"""High-level orchestrator for managing workflows."""
|
|
638
|
+
|
|
639
|
+
def __init__(
|
|
640
|
+
self,
|
|
641
|
+
agent_registry: Any,
|
|
642
|
+
storage_path: Any,
|
|
643
|
+
provider: Any,
|
|
644
|
+
tool_registry: Any | None = None,
|
|
645
|
+
event_callback=None
|
|
646
|
+
):
|
|
647
|
+
"""
|
|
648
|
+
Initialize orchestrator.
|
|
649
|
+
|
|
650
|
+
Args:
|
|
651
|
+
agent_registry: AgentRegistry instance
|
|
652
|
+
storage_path: Base path for agent storage
|
|
653
|
+
provider: LLM provider instance
|
|
654
|
+
tool_registry: Optional ToolRegistry for tool access
|
|
655
|
+
event_callback: Optional async callback for workflow events
|
|
656
|
+
"""
|
|
657
|
+
self.coordinator = AgentCoordinator(
|
|
658
|
+
agent_registry,
|
|
659
|
+
storage_path,
|
|
660
|
+
provider,
|
|
661
|
+
tool_registry
|
|
662
|
+
)
|
|
663
|
+
self.workflow = MultiAgentWorkflow(self.coordinator, event_callback)
|
|
664
|
+
|
|
665
|
+
async def handle_user_request(
|
|
666
|
+
self,
|
|
667
|
+
user_intent: str,
|
|
668
|
+
context: dict[str, Any] | None = None
|
|
669
|
+
) -> dict[str, Any]:
|
|
670
|
+
"""
|
|
671
|
+
Handle user request with multi-agent workflow.
|
|
672
|
+
|
|
673
|
+
Args:
|
|
674
|
+
user_intent: User's request
|
|
675
|
+
context: Optional additional context
|
|
676
|
+
|
|
677
|
+
Returns:
|
|
678
|
+
Workflow execution results
|
|
679
|
+
"""
|
|
680
|
+
# Classify intent
|
|
681
|
+
is_trivial = self._is_trivial_request(user_intent)
|
|
682
|
+
|
|
683
|
+
if is_trivial:
|
|
684
|
+
# Skip planning, execute directly
|
|
685
|
+
result = await self.coordinator.spawn_agent(
|
|
686
|
+
agent_type="coder",
|
|
687
|
+
task={"description": user_intent}
|
|
688
|
+
)
|
|
689
|
+
return {
|
|
690
|
+
"status": "completed",
|
|
691
|
+
"workflow": "simple",
|
|
692
|
+
"result": result,
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
else:
|
|
696
|
+
# Full multi-agent workflow
|
|
697
|
+
result = await self.workflow.execute(user_intent)
|
|
698
|
+
return {
|
|
699
|
+
"status": result["status"],
|
|
700
|
+
"workflow": "multi_agent",
|
|
701
|
+
"result": result,
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
def _is_trivial_request(self, user_intent: str) -> bool:
|
|
705
|
+
"""
|
|
706
|
+
Determine if request is trivial (single-agent).
|
|
707
|
+
|
|
708
|
+
Args:
|
|
709
|
+
user_intent: User's request
|
|
710
|
+
|
|
711
|
+
Returns:
|
|
712
|
+
True if trivial, False otherwise
|
|
713
|
+
"""
|
|
714
|
+
trivial_keywords = [
|
|
715
|
+
"fix typo",
|
|
716
|
+
"change color",
|
|
717
|
+
"update readme",
|
|
718
|
+
"add comment",
|
|
719
|
+
]
|
|
720
|
+
|
|
721
|
+
user_intent_lower = user_intent.lower()
|
|
722
|
+
|
|
723
|
+
return any(keyword in user_intent_lower for keyword in trivial_keywords)
|