up-cli 0.1.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- up/__init__.py +1 -1
- up/ai_cli.py +229 -0
- up/cli.py +75 -4
- up/commands/agent.py +521 -0
- up/commands/bisect.py +343 -0
- up/commands/branch.py +350 -0
- up/commands/dashboard.py +248 -0
- up/commands/init.py +195 -6
- up/commands/learn.py +1741 -0
- up/commands/memory.py +545 -0
- up/commands/new.py +108 -10
- up/commands/provenance.py +267 -0
- up/commands/review.py +239 -0
- up/commands/start.py +1124 -0
- up/commands/status.py +360 -0
- up/commands/summarize.py +122 -0
- up/commands/sync.py +317 -0
- up/commands/vibe.py +304 -0
- up/context.py +421 -0
- up/core/__init__.py +69 -0
- up/core/checkpoint.py +479 -0
- up/core/provenance.py +364 -0
- up/core/state.py +678 -0
- up/events.py +512 -0
- up/git/__init__.py +37 -0
- up/git/utils.py +270 -0
- up/git/worktree.py +331 -0
- up/learn/__init__.py +155 -0
- up/learn/analyzer.py +227 -0
- up/learn/plan.py +374 -0
- up/learn/research.py +511 -0
- up/learn/utils.py +117 -0
- up/memory.py +1096 -0
- up/parallel.py +551 -0
- up/summarizer.py +407 -0
- up/templates/__init__.py +70 -2
- up/templates/config/__init__.py +502 -20
- up/templates/docs/SKILL.md +28 -0
- up/templates/docs/__init__.py +341 -0
- up/templates/docs/standards/HEADERS.md +24 -0
- up/templates/docs/standards/STRUCTURE.md +18 -0
- up/templates/docs/standards/TEMPLATES.md +19 -0
- up/templates/learn/__init__.py +567 -14
- up/templates/loop/__init__.py +546 -27
- up/templates/mcp/__init__.py +474 -0
- up/templates/projects/__init__.py +786 -0
- up/ui/__init__.py +14 -0
- up/ui/loop_display.py +650 -0
- up/ui/theme.py +137 -0
- up_cli-0.5.0.dist-info/METADATA +519 -0
- up_cli-0.5.0.dist-info/RECORD +55 -0
- up_cli-0.1.1.dist-info/METADATA +0 -186
- up_cli-0.1.1.dist-info/RECORD +0 -14
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/WHEEL +0 -0
- {up_cli-0.1.1.dist-info → up_cli-0.5.0.dist-info}/entry_points.txt +0 -0
up/parallel.py
ADDED
|
@@ -0,0 +1,551 @@
|
|
|
1
|
+
"""Parallel task execution using Git worktrees.
|
|
2
|
+
|
|
3
|
+
This module enables running multiple AI tasks simultaneously,
|
|
4
|
+
each in its own isolated Git worktree. Tasks are verified
|
|
5
|
+
independently and merged to main when successful.
|
|
6
|
+
|
|
7
|
+
Uses the unified state system in .up/state.json for consistency.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import subprocess
|
|
12
|
+
import time
|
|
13
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Optional, Callable, List
|
|
18
|
+
|
|
19
|
+
from rich.console import Console
|
|
20
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn
|
|
21
|
+
from rich.table import Table
|
|
22
|
+
|
|
23
|
+
from up.git.worktree import (
|
|
24
|
+
create_worktree,
|
|
25
|
+
remove_worktree,
|
|
26
|
+
list_worktrees,
|
|
27
|
+
merge_worktree,
|
|
28
|
+
create_checkpoint,
|
|
29
|
+
count_commits_since,
|
|
30
|
+
WorktreeState,
|
|
31
|
+
)
|
|
32
|
+
from up.ai_cli import check_ai_cli, run_ai_task
|
|
33
|
+
from up.core.state import get_state_manager, AgentState
|
|
34
|
+
|
|
35
|
+
console = Console()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class TaskResult:
|
|
40
|
+
"""Result of a task execution."""
|
|
41
|
+
task_id: str
|
|
42
|
+
success: bool
|
|
43
|
+
phase: str # "executed", "verified", "merged", "failed"
|
|
44
|
+
duration_seconds: float
|
|
45
|
+
commits: int = 0
|
|
46
|
+
error: Optional[str] = None
|
|
47
|
+
test_results: Optional[dict] = None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class ParallelExecutionManager:
|
|
51
|
+
"""Manages parallel execution state using unified state system.
|
|
52
|
+
|
|
53
|
+
This replaces the old ParallelState dataclass to use .up/state.json
|
|
54
|
+
instead of the legacy .parallel_state.json file.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def __init__(self, workspace: Path = None):
|
|
58
|
+
self.workspace = workspace or Path.cwd()
|
|
59
|
+
self._state_manager = get_state_manager(self.workspace)
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def state(self):
|
|
63
|
+
"""Get the parallel state from unified state."""
|
|
64
|
+
return self._state_manager.state.parallel
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def iteration(self) -> int:
|
|
68
|
+
return self.state.current_batch
|
|
69
|
+
|
|
70
|
+
@iteration.setter
|
|
71
|
+
def iteration(self, value: int):
|
|
72
|
+
self.state.current_batch = value
|
|
73
|
+
self._state_manager.save()
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def parallel_limit(self) -> int:
|
|
77
|
+
return self.state.max_workers
|
|
78
|
+
|
|
79
|
+
@parallel_limit.setter
|
|
80
|
+
def parallel_limit(self, value: int):
|
|
81
|
+
self.state.max_workers = value
|
|
82
|
+
self._state_manager.save()
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def active_worktrees(self) -> List[str]:
|
|
86
|
+
return self.state.agents
|
|
87
|
+
|
|
88
|
+
def add_active_worktree(self, task_id: str):
|
|
89
|
+
if task_id not in self.state.agents:
|
|
90
|
+
self.state.agents.append(task_id)
|
|
91
|
+
self._state_manager.save()
|
|
92
|
+
|
|
93
|
+
def remove_active_worktree(self, task_id: str):
|
|
94
|
+
if task_id in self.state.agents:
|
|
95
|
+
self.state.agents.remove(task_id)
|
|
96
|
+
self._state_manager.save()
|
|
97
|
+
|
|
98
|
+
def set_active(self, active: bool):
|
|
99
|
+
self.state.active = active
|
|
100
|
+
self._state_manager.save()
|
|
101
|
+
|
|
102
|
+
def save(self):
|
|
103
|
+
"""Explicit save (for compatibility)."""
|
|
104
|
+
self._state_manager.save()
|
|
105
|
+
|
|
106
|
+
def record_task_complete(self, task_id: str):
|
|
107
|
+
"""Record a task completion in metrics."""
|
|
108
|
+
self._state_manager.record_task_complete(task_id)
|
|
109
|
+
|
|
110
|
+
def record_task_failed(self, task_id: str):
|
|
111
|
+
"""Record a task failure in metrics."""
|
|
112
|
+
self._state_manager.record_task_failed(task_id)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def get_pending_tasks(prd_path: Path, limit: int = None) -> list[dict]:
|
|
116
|
+
"""Get pending tasks from PRD file.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
prd_path: Path to prd.json
|
|
120
|
+
limit: Maximum tasks to return
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of pending task dicts
|
|
124
|
+
"""
|
|
125
|
+
if not prd_path.exists():
|
|
126
|
+
return []
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
data = json.loads(prd_path.read_text())
|
|
130
|
+
stories = data.get("userStories", [])
|
|
131
|
+
|
|
132
|
+
pending = [s for s in stories if not s.get("passes", False)]
|
|
133
|
+
|
|
134
|
+
if limit:
|
|
135
|
+
pending = pending[:limit]
|
|
136
|
+
|
|
137
|
+
return pending
|
|
138
|
+
except json.JSONDecodeError:
|
|
139
|
+
return []
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def execute_task_in_worktree(
|
|
143
|
+
worktree_path: Path,
|
|
144
|
+
task: dict,
|
|
145
|
+
cli_name: str = "claude",
|
|
146
|
+
timeout: int = 600
|
|
147
|
+
) -> TaskResult:
|
|
148
|
+
"""Execute a single task in its worktree.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
worktree_path: Path to the worktree
|
|
152
|
+
task: Task dict from PRD
|
|
153
|
+
cli_name: AI CLI to use
|
|
154
|
+
timeout: Timeout in seconds
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
TaskResult with execution outcome
|
|
158
|
+
"""
|
|
159
|
+
task_id = task.get("id", "unknown")
|
|
160
|
+
start_time = time.time()
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
# Update state to executing
|
|
164
|
+
state = WorktreeState.load(worktree_path)
|
|
165
|
+
state.status = "executing"
|
|
166
|
+
state.phase = "CHECKPOINT"
|
|
167
|
+
state.save(worktree_path)
|
|
168
|
+
|
|
169
|
+
# Create checkpoint
|
|
170
|
+
checkpoint = create_checkpoint(worktree_path, f"{task_id}-start")
|
|
171
|
+
state.checkpoints.append({
|
|
172
|
+
"name": checkpoint,
|
|
173
|
+
"time": datetime.now().isoformat()
|
|
174
|
+
})
|
|
175
|
+
state.save(worktree_path)
|
|
176
|
+
|
|
177
|
+
# Build implementation prompt
|
|
178
|
+
prompt = _build_task_prompt(task)
|
|
179
|
+
|
|
180
|
+
# Run AI implementation
|
|
181
|
+
state.phase = "AI_IMPL"
|
|
182
|
+
state.save(worktree_path)
|
|
183
|
+
|
|
184
|
+
success, output = run_ai_task(
|
|
185
|
+
workspace=worktree_path,
|
|
186
|
+
prompt=prompt,
|
|
187
|
+
cli_name=cli_name,
|
|
188
|
+
timeout=timeout
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
state.ai_invocations.append({
|
|
192
|
+
"success": success,
|
|
193
|
+
"duration_seconds": time.time() - start_time,
|
|
194
|
+
"output_preview": output[:500] if output else ""
|
|
195
|
+
})
|
|
196
|
+
state.save(worktree_path)
|
|
197
|
+
|
|
198
|
+
if not success:
|
|
199
|
+
state.status = "failed"
|
|
200
|
+
state.error = output[:500] if output else "AI execution failed"
|
|
201
|
+
state.save(worktree_path)
|
|
202
|
+
|
|
203
|
+
return TaskResult(
|
|
204
|
+
task_id=task_id,
|
|
205
|
+
success=False,
|
|
206
|
+
phase="executed",
|
|
207
|
+
duration_seconds=time.time() - start_time,
|
|
208
|
+
error=state.error
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
# Commit AI changes
|
|
212
|
+
subprocess.run(
|
|
213
|
+
["git", "add", "-A"],
|
|
214
|
+
cwd=worktree_path,
|
|
215
|
+
capture_output=True
|
|
216
|
+
)
|
|
217
|
+
subprocess.run(
|
|
218
|
+
["git", "commit", "-m", f"feat({task_id}): {task.get('title', 'Implement task')}"],
|
|
219
|
+
cwd=worktree_path,
|
|
220
|
+
capture_output=True
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
state.status = "executed"
|
|
224
|
+
state.phase = "VERIFY"
|
|
225
|
+
state.save(worktree_path)
|
|
226
|
+
|
|
227
|
+
commits = count_commits_since(worktree_path, "main")
|
|
228
|
+
|
|
229
|
+
return TaskResult(
|
|
230
|
+
task_id=task_id,
|
|
231
|
+
success=True,
|
|
232
|
+
phase="executed",
|
|
233
|
+
duration_seconds=time.time() - start_time,
|
|
234
|
+
commits=commits
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
return TaskResult(
|
|
239
|
+
task_id=task_id,
|
|
240
|
+
success=False,
|
|
241
|
+
phase="failed",
|
|
242
|
+
duration_seconds=time.time() - start_time,
|
|
243
|
+
error=str(e)
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def verify_worktree(worktree_path: Path) -> TaskResult:
|
|
248
|
+
"""Run verification (tests, lint) in a worktree.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
worktree_path: Path to the worktree
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
TaskResult with verification outcome
|
|
255
|
+
"""
|
|
256
|
+
start_time = time.time()
|
|
257
|
+
|
|
258
|
+
try:
|
|
259
|
+
state = WorktreeState.load(worktree_path)
|
|
260
|
+
task_id = state.task_id
|
|
261
|
+
except FileNotFoundError:
|
|
262
|
+
task_id = worktree_path.name
|
|
263
|
+
state = None
|
|
264
|
+
|
|
265
|
+
test_results = {
|
|
266
|
+
"tests": None,
|
|
267
|
+
"lint": None,
|
|
268
|
+
"type_check": None
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# Run pytest if available
|
|
272
|
+
result = subprocess.run(
|
|
273
|
+
["pytest", "-q", "--tb=no"],
|
|
274
|
+
cwd=worktree_path,
|
|
275
|
+
capture_output=True,
|
|
276
|
+
text=True
|
|
277
|
+
)
|
|
278
|
+
test_results["tests"] = result.returncode == 0
|
|
279
|
+
|
|
280
|
+
# Run ruff if available
|
|
281
|
+
result = subprocess.run(
|
|
282
|
+
["ruff", "check", "src/", "--quiet"],
|
|
283
|
+
cwd=worktree_path,
|
|
284
|
+
capture_output=True,
|
|
285
|
+
text=True
|
|
286
|
+
)
|
|
287
|
+
test_results["lint"] = result.returncode == 0
|
|
288
|
+
|
|
289
|
+
# Run mypy if available
|
|
290
|
+
result = subprocess.run(
|
|
291
|
+
["mypy", "src/", "--ignore-missing-imports", "--no-error-summary"],
|
|
292
|
+
cwd=worktree_path,
|
|
293
|
+
capture_output=True,
|
|
294
|
+
text=True
|
|
295
|
+
)
|
|
296
|
+
test_results["type_check"] = result.returncode == 0
|
|
297
|
+
|
|
298
|
+
# All must pass (None counts as pass - tool not available)
|
|
299
|
+
passed = all(
|
|
300
|
+
v is None or v is True
|
|
301
|
+
for v in test_results.values()
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
if state:
|
|
305
|
+
state.verification = test_results
|
|
306
|
+
state.status = "passed" if passed else "failed"
|
|
307
|
+
state.phase = "MERGE" if passed else "FAILED"
|
|
308
|
+
state.save(worktree_path)
|
|
309
|
+
|
|
310
|
+
return TaskResult(
|
|
311
|
+
task_id=task_id,
|
|
312
|
+
success=passed,
|
|
313
|
+
phase="verified",
|
|
314
|
+
duration_seconds=time.time() - start_time,
|
|
315
|
+
test_results=test_results
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
def _build_task_prompt(task: dict) -> str:
|
|
320
|
+
"""Build AI prompt for task implementation."""
|
|
321
|
+
task_id = task.get("id", "unknown")
|
|
322
|
+
title = task.get("title", "")
|
|
323
|
+
description = task.get("description", title)
|
|
324
|
+
criteria = task.get("acceptanceCriteria", [])
|
|
325
|
+
|
|
326
|
+
prompt = f"""Implement task {task_id}: {title}
|
|
327
|
+
|
|
328
|
+
Description: {description}
|
|
329
|
+
|
|
330
|
+
"""
|
|
331
|
+
|
|
332
|
+
if criteria:
|
|
333
|
+
prompt += "Acceptance Criteria:\n"
|
|
334
|
+
for c in criteria:
|
|
335
|
+
prompt += f" - {c}\n"
|
|
336
|
+
prompt += "\n"
|
|
337
|
+
|
|
338
|
+
prompt += """Instructions:
|
|
339
|
+
1. Implement the feature as described
|
|
340
|
+
2. Add appropriate tests
|
|
341
|
+
3. Ensure code passes linting
|
|
342
|
+
4. Commit your changes
|
|
343
|
+
|
|
344
|
+
Begin implementation."""
|
|
345
|
+
|
|
346
|
+
return prompt
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
def run_parallel_loop(
|
|
350
|
+
workspace: Path,
|
|
351
|
+
prd_path: Path,
|
|
352
|
+
max_workers: int = 3,
|
|
353
|
+
run_all: bool = False,
|
|
354
|
+
timeout: int = 600,
|
|
355
|
+
dry_run: bool = False
|
|
356
|
+
) -> dict:
|
|
357
|
+
"""Run the parallel product loop.
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
workspace: Project root directory
|
|
361
|
+
prd_path: Path to prd.json
|
|
362
|
+
max_workers: Number of parallel tasks
|
|
363
|
+
run_all: Whether to run all tasks or just one batch
|
|
364
|
+
timeout: AI timeout per task
|
|
365
|
+
dry_run: Preview without executing
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
Summary dict with results
|
|
369
|
+
"""
|
|
370
|
+
# Use unified state management
|
|
371
|
+
state_mgr = ParallelExecutionManager(workspace)
|
|
372
|
+
state_mgr.iteration += 1
|
|
373
|
+
state_mgr.parallel_limit = max_workers
|
|
374
|
+
state_mgr.set_active(True)
|
|
375
|
+
|
|
376
|
+
summary = {
|
|
377
|
+
"batches": 0,
|
|
378
|
+
"completed": [],
|
|
379
|
+
"failed": [],
|
|
380
|
+
"total_duration": 0
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
start_time = time.time()
|
|
384
|
+
|
|
385
|
+
try:
|
|
386
|
+
while True:
|
|
387
|
+
# Get pending tasks
|
|
388
|
+
tasks = get_pending_tasks(prd_path, limit=max_workers)
|
|
389
|
+
|
|
390
|
+
if not tasks:
|
|
391
|
+
console.print("\n[green]✓[/] All tasks completed!")
|
|
392
|
+
break
|
|
393
|
+
|
|
394
|
+
summary["batches"] += 1
|
|
395
|
+
console.print(f"\n[bold]Batch {summary['batches']}:[/] {len(tasks)} tasks")
|
|
396
|
+
|
|
397
|
+
if dry_run:
|
|
398
|
+
for task in tasks:
|
|
399
|
+
console.print(f" Would execute: {task.get('id')} - {task.get('title')}")
|
|
400
|
+
if not run_all:
|
|
401
|
+
break
|
|
402
|
+
continue
|
|
403
|
+
|
|
404
|
+
# Phase 1: Create worktrees
|
|
405
|
+
console.print("\n[dim]Creating worktrees...[/]")
|
|
406
|
+
worktrees = []
|
|
407
|
+
for task in tasks:
|
|
408
|
+
task_id = task.get("id")
|
|
409
|
+
wt_path, wt_state = create_worktree(
|
|
410
|
+
task_id,
|
|
411
|
+
task.get("title", "")
|
|
412
|
+
)
|
|
413
|
+
worktrees.append({
|
|
414
|
+
"path": wt_path,
|
|
415
|
+
"state": wt_state,
|
|
416
|
+
"task": task
|
|
417
|
+
})
|
|
418
|
+
console.print(f" ✓ {wt_path}")
|
|
419
|
+
state_mgr.add_active_worktree(task_id)
|
|
420
|
+
|
|
421
|
+
# Phase 2: Execute in parallel
|
|
422
|
+
console.print("\n[dim]Executing tasks...[/]")
|
|
423
|
+
|
|
424
|
+
cli_name, cli_available = check_ai_cli()
|
|
425
|
+
if not cli_available:
|
|
426
|
+
console.print("[yellow]No AI CLI found. Skipping execution.[/]")
|
|
427
|
+
for wt in worktrees:
|
|
428
|
+
remove_worktree(wt["task"].get("id"))
|
|
429
|
+
break
|
|
430
|
+
|
|
431
|
+
results = {}
|
|
432
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
433
|
+
futures = {
|
|
434
|
+
executor.submit(
|
|
435
|
+
execute_task_in_worktree,
|
|
436
|
+
wt["path"],
|
|
437
|
+
wt["task"],
|
|
438
|
+
cli_name,
|
|
439
|
+
timeout
|
|
440
|
+
): wt["task"].get("id")
|
|
441
|
+
for wt in worktrees
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
for future in as_completed(futures):
|
|
445
|
+
task_id = futures[future]
|
|
446
|
+
try:
|
|
447
|
+
result = future.result()
|
|
448
|
+
results[task_id] = result
|
|
449
|
+
status = "✓" if result.success else "✗"
|
|
450
|
+
console.print(f" {status} {task_id}: {result.phase}")
|
|
451
|
+
except Exception as e:
|
|
452
|
+
results[task_id] = TaskResult(
|
|
453
|
+
task_id=task_id,
|
|
454
|
+
success=False,
|
|
455
|
+
phase="failed",
|
|
456
|
+
duration_seconds=0,
|
|
457
|
+
error=str(e)
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
# Phase 3: Verify
|
|
461
|
+
console.print("\n[dim]Verifying...[/]")
|
|
462
|
+
for wt in worktrees:
|
|
463
|
+
task_id = wt["task"].get("id")
|
|
464
|
+
if results.get(task_id, TaskResult("", False, "failed", 0)).success:
|
|
465
|
+
verify_result = verify_worktree(wt["path"])
|
|
466
|
+
results[task_id] = verify_result
|
|
467
|
+
|
|
468
|
+
status = "✅" if verify_result.success else "❌"
|
|
469
|
+
test_info = ""
|
|
470
|
+
if verify_result.test_results:
|
|
471
|
+
test_info = f" (tests: {verify_result.test_results.get('tests', '?')})"
|
|
472
|
+
console.print(f" {status} {task_id}{test_info}")
|
|
473
|
+
|
|
474
|
+
# Phase 4: Merge passed tasks
|
|
475
|
+
console.print("\n[dim]Merging...[/]")
|
|
476
|
+
for wt in worktrees:
|
|
477
|
+
task_id = wt["task"].get("id")
|
|
478
|
+
result = results.get(task_id)
|
|
479
|
+
|
|
480
|
+
if result and result.success:
|
|
481
|
+
if merge_worktree(task_id):
|
|
482
|
+
console.print(f" ✓ {task_id} merged")
|
|
483
|
+
summary["completed"].append(task_id)
|
|
484
|
+
state_mgr.record_task_complete(task_id)
|
|
485
|
+
|
|
486
|
+
# Mark task complete in PRD
|
|
487
|
+
_mark_task_complete(prd_path, task_id)
|
|
488
|
+
else:
|
|
489
|
+
console.print(f" ✗ {task_id} merge failed")
|
|
490
|
+
summary["failed"].append(task_id)
|
|
491
|
+
state_mgr.record_task_failed(task_id)
|
|
492
|
+
else:
|
|
493
|
+
console.print(f" - {task_id} skipped (not passed)")
|
|
494
|
+
summary["failed"].append(task_id)
|
|
495
|
+
state_mgr.record_task_failed(task_id)
|
|
496
|
+
|
|
497
|
+
# Remove from active
|
|
498
|
+
state_mgr.remove_active_worktree(task_id)
|
|
499
|
+
|
|
500
|
+
if not run_all:
|
|
501
|
+
break
|
|
502
|
+
finally:
|
|
503
|
+
# Mark parallel execution as inactive
|
|
504
|
+
state_mgr.set_active(False)
|
|
505
|
+
|
|
506
|
+
summary["total_duration"] = time.time() - start_time
|
|
507
|
+
|
|
508
|
+
# Print summary
|
|
509
|
+
_print_summary(summary)
|
|
510
|
+
|
|
511
|
+
return summary
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
def _mark_task_complete(prd_path: Path, task_id: str):
|
|
515
|
+
"""Mark a task as complete in the PRD."""
|
|
516
|
+
if not prd_path.exists():
|
|
517
|
+
return
|
|
518
|
+
|
|
519
|
+
try:
|
|
520
|
+
data = json.loads(prd_path.read_text())
|
|
521
|
+
for story in data.get("userStories", []):
|
|
522
|
+
if story.get("id") == task_id:
|
|
523
|
+
story["passes"] = True
|
|
524
|
+
story["completedAt"] = datetime.now().strftime("%Y-%m-%d")
|
|
525
|
+
break
|
|
526
|
+
prd_path.write_text(json.dumps(data, indent=2))
|
|
527
|
+
except (json.JSONDecodeError, IOError):
|
|
528
|
+
pass
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
def _print_summary(summary: dict):
|
|
532
|
+
"""Print execution summary."""
|
|
533
|
+
console.print("\n" + "═" * 50)
|
|
534
|
+
console.print("[bold]SUMMARY[/]")
|
|
535
|
+
console.print("═" * 50)
|
|
536
|
+
|
|
537
|
+
table = Table(show_header=False, box=None)
|
|
538
|
+
table.add_column("Metric", style="dim")
|
|
539
|
+
table.add_column("Value")
|
|
540
|
+
|
|
541
|
+
table.add_row("Batches", str(summary["batches"]))
|
|
542
|
+
table.add_row("Completed", f"[green]{len(summary['completed'])}[/]")
|
|
543
|
+
table.add_row("Failed", f"[red]{len(summary['failed'])}[/]")
|
|
544
|
+
table.add_row("Duration", f"{summary['total_duration']:.1f}s")
|
|
545
|
+
|
|
546
|
+
console.print(table)
|
|
547
|
+
|
|
548
|
+
if summary["completed"]:
|
|
549
|
+
console.print(f"\n[green]Completed:[/] {', '.join(summary['completed'])}")
|
|
550
|
+
if summary["failed"]:
|
|
551
|
+
console.print(f"[red]Failed:[/] {', '.join(summary['failed'])}")
|