@graypark/loophaus 3.1.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/loophaus.mjs +81 -0
- package/commands/loop-plan.md +167 -17
- package/core/engine.mjs +23 -0
- package/core/merge-strategy.mjs +72 -0
- package/core/parallel-runner.mjs +94 -0
- package/core/policy.mjs +58 -0
- package/core/session.mjs +66 -0
- package/core/worktree.mjs +97 -0
- package/hooks/stop-hook.mjs +49 -0
- package/package.json +1 -1
- package/platforms/codex-cli/installer.mjs +24 -9
- package/platforms/kiro-cli/installer.mjs +12 -7
- package/skills/ralph-claude-interview/SKILL.md +5 -1
package/bin/loophaus.mjs
CHANGED
|
@@ -44,6 +44,10 @@ Usage:
|
|
|
44
44
|
npx @graypark/loophaus replay <trace-file> [--speed 2]
|
|
45
45
|
npx @graypark/loophaus compare <trace1> <trace2>
|
|
46
46
|
npx @graypark/loophaus loops
|
|
47
|
+
npx @graypark/loophaus worktree <create|remove|list>
|
|
48
|
+
npx @graypark/loophaus parallel <prd.json> [--count N] [--base branch]
|
|
49
|
+
npx @graypark/loophaus sessions
|
|
50
|
+
npx @graypark/loophaus resume <session-id>
|
|
47
51
|
npx @graypark/loophaus --version
|
|
48
52
|
|
|
49
53
|
Hosts:
|
|
@@ -371,6 +375,79 @@ async function runCompare() {
|
|
|
371
375
|
console.log("");
|
|
372
376
|
}
|
|
373
377
|
|
|
378
|
+
async function runWorktree() {
|
|
379
|
+
const sub = args[1];
|
|
380
|
+
const { createWorktree, removeWorktree, listWorktrees } = await import("../core/worktree.mjs");
|
|
381
|
+
|
|
382
|
+
switch (sub) {
|
|
383
|
+
case "create": {
|
|
384
|
+
const name = args[2];
|
|
385
|
+
const base = args[3] || "HEAD";
|
|
386
|
+
if (!name) { console.log("Usage: loophaus worktree create <name> [base-branch]"); return; }
|
|
387
|
+
const wt = await createWorktree(name, base);
|
|
388
|
+
console.log(`Created worktree: ${wt.name} at ${wt.path} (branch: ${wt.branch})`);
|
|
389
|
+
break;
|
|
390
|
+
}
|
|
391
|
+
case "remove": {
|
|
392
|
+
const name = args[2];
|
|
393
|
+
if (!name) { console.log("Usage: loophaus worktree remove <name>"); return; }
|
|
394
|
+
await removeWorktree(name);
|
|
395
|
+
console.log(`Removed worktree: ${name}`);
|
|
396
|
+
break;
|
|
397
|
+
}
|
|
398
|
+
case "list": {
|
|
399
|
+
const wts = await listWorktrees();
|
|
400
|
+
if (wts.length === 0) { console.log("No loophaus worktrees."); return; }
|
|
401
|
+
console.log("Worktrees");
|
|
402
|
+
console.log("\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
|
|
403
|
+
for (const wt of wts) {
|
|
404
|
+
console.log(` ${wt.name} ${wt.branch} ${wt.path}`);
|
|
405
|
+
}
|
|
406
|
+
break;
|
|
407
|
+
}
|
|
408
|
+
default:
|
|
409
|
+
console.log("Usage: loophaus worktree <create|remove|list>");
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
async function runSessions() {
|
|
414
|
+
const { listSessions } = await import("../core/session.mjs");
|
|
415
|
+
const sessions = await listSessions();
|
|
416
|
+
if (sessions.length === 0) { console.log("No saved sessions."); return; }
|
|
417
|
+
console.log("Sessions");
|
|
418
|
+
console.log("\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
|
|
419
|
+
for (const s of sessions) {
|
|
420
|
+
const age = Math.round((Date.now() - new Date(s.savedAt).getTime()) / 60000);
|
|
421
|
+
console.log(` ${s.sessionId} iter=${s.currentIteration || 0} ${age}m ago`);
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
async function runResume() {
|
|
426
|
+
const id = args[1];
|
|
427
|
+
if (!id) { console.log("Usage: loophaus resume <session-id>"); return; }
|
|
428
|
+
const { resumeSession } = await import("../core/session.mjs");
|
|
429
|
+
const state = await resumeSession(id);
|
|
430
|
+
if (!state) { console.log(`Session not found: ${id}`); return; }
|
|
431
|
+
console.log(`Resumed session ${id} at iteration ${state.currentIteration}`);
|
|
432
|
+
console.log(`Loop is now active. The stop hook will continue from here.`);
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
async function runParallelCmd() {
|
|
436
|
+
const prdPath = args[1] || "prd.json";
|
|
437
|
+
const count = parseInt(getFlag("--count") || "2", 10);
|
|
438
|
+
const base = getFlag("--base") || "HEAD";
|
|
439
|
+
|
|
440
|
+
const { runParallel } = await import("../core/parallel-runner.mjs");
|
|
441
|
+
const result = await runParallel({ prdPath, count, baseBranch: base });
|
|
442
|
+
console.log(result.message);
|
|
443
|
+
if (result.worktrees) {
|
|
444
|
+
console.log("\nWorktrees:");
|
|
445
|
+
for (const wt of result.worktrees) {
|
|
446
|
+
console.log(` ${wt.name} branch:${wt.branch} stories:[${wt.stories.join(",")}]`);
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
|
|
374
451
|
try {
|
|
375
452
|
switch (command) {
|
|
376
453
|
case "install": await runInstall(); break;
|
|
@@ -381,6 +458,10 @@ try {
|
|
|
381
458
|
case "watch": await runWatch(); break;
|
|
382
459
|
case "replay": await runReplay(); break;
|
|
383
460
|
case "compare": await runCompare(); break;
|
|
461
|
+
case "worktree": await runWorktree(); break;
|
|
462
|
+
case "parallel": await runParallelCmd(); break;
|
|
463
|
+
case "sessions": await runSessions(); break;
|
|
464
|
+
case "resume": await runResume(); break;
|
|
384
465
|
default:
|
|
385
466
|
if (command.startsWith("-")) {
|
|
386
467
|
await runInstall();
|
package/commands/loop-plan.md
CHANGED
|
@@ -1,44 +1,166 @@
|
|
|
1
1
|
---
|
|
2
|
-
description: "Plan and start loop via interactive interview"
|
|
2
|
+
description: "Plan and start loop via interactive interview — auto-parallelizes across worktrees"
|
|
3
3
|
argument-hint: "TASK_DESCRIPTION"
|
|
4
4
|
allowed-tools: ["Bash", "Read", "Write", "Edit", "Glob", "Grep", "Agent", "Skill"]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
|
-
# /loop-plan —
|
|
7
|
+
# /loop-plan — Plan, Parallelize, Execute, Merge
|
|
8
|
+
|
|
9
|
+
End-to-end workflow: interview → PRD → parallel distribution → loop execution → merge.
|
|
10
|
+
The user runs `/loop-plan` once and gets a single merged branch with all work done.
|
|
11
|
+
|
|
12
|
+
---
|
|
8
13
|
|
|
9
14
|
## Phase 1: Discovery Interview
|
|
10
15
|
|
|
11
|
-
Ask
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
-
|
|
16
|
+
Ask 3-5 focused questions about $ARGUMENTS:
|
|
17
|
+
|
|
18
|
+
| Category | What to confirm |
|
|
19
|
+
|----------|----------------|
|
|
20
|
+
| Scope | Single feature? Multi-file? Full refactor? |
|
|
21
|
+
| Success criteria | What counts as "done"? |
|
|
22
|
+
| Verification | `npm test`, `npx tsc`, lint commands? |
|
|
23
|
+
| References | Existing code, patterns to follow? |
|
|
24
|
+
| Parallelism | Multiple services? Independent file groups? |
|
|
25
|
+
| Constraints | Must not break existing tests? Library restrictions? |
|
|
26
|
+
|
|
27
|
+
One round of questions only. Skip questions already answered in $ARGUMENTS.
|
|
16
28
|
|
|
17
29
|
## Phase 2: PRD Generation
|
|
18
30
|
|
|
19
|
-
Generate `prd.json
|
|
31
|
+
Generate `prd.json`:
|
|
20
32
|
|
|
21
33
|
```json
|
|
22
34
|
{
|
|
23
35
|
"title": "<project title>",
|
|
36
|
+
"description": "<1-2 sentence summary>",
|
|
24
37
|
"userStories": [
|
|
25
|
-
{
|
|
26
|
-
|
|
38
|
+
{
|
|
39
|
+
"id": "US-001",
|
|
40
|
+
"title": "<story title>",
|
|
41
|
+
"description": "<what to implement>",
|
|
42
|
+
"acceptanceCriteria": ["<criterion 1>", "<criterion 2>"],
|
|
43
|
+
"priority": 1,
|
|
44
|
+
"passes": false,
|
|
45
|
+
"group": "<ownership group — e.g., frontend, backend, auth, database>",
|
|
46
|
+
"testCommand": "<optional: npm test -- US-001>"
|
|
47
|
+
}
|
|
27
48
|
]
|
|
28
49
|
}
|
|
29
50
|
```
|
|
30
51
|
|
|
31
|
-
|
|
52
|
+
Rules:
|
|
53
|
+
- Right-size stories: each completable in 1-2 iterations
|
|
54
|
+
- Assign `group` to each story for parallel distribution
|
|
55
|
+
- Order by `priority` (dependencies first)
|
|
56
|
+
- Include `testCommand` when verification is possible
|
|
57
|
+
|
|
58
|
+
Present PRD to user for approval before proceeding.
|
|
59
|
+
|
|
60
|
+
## Phase 3: Parallelism Assessment
|
|
61
|
+
|
|
62
|
+
Score the task for parallel execution:
|
|
63
|
+
|
|
64
|
+
| Factor | Score |
|
|
65
|
+
|--------|-------|
|
|
66
|
+
| Stories span 3+ directories | +2 |
|
|
67
|
+
| Stories are independent (no shared state) | +2 |
|
|
68
|
+
| Multiple services (frontend/backend/auth) | +3 |
|
|
69
|
+
| 6+ stories total | +1 |
|
|
70
|
+
| Stories need full codebase context | -2 |
|
|
71
|
+
| Strict ordering required | -3 |
|
|
72
|
+
| Cross-file understanding needed | -1 |
|
|
73
|
+
|
|
74
|
+
**Decision:**
|
|
75
|
+
- Score >= 3 → **Parallel mode** (distribute across worktrees by `group`)
|
|
76
|
+
- Score < 3 → **Sequential mode** (single loop, stories in order)
|
|
77
|
+
|
|
78
|
+
Display the score and recommendation to the user. Proceed with the recommended mode unless the user overrides.
|
|
79
|
+
|
|
80
|
+
## Phase 4A: Parallel Execution (score >= 3)
|
|
81
|
+
|
|
82
|
+
### Step 1: Distribute stories by group
|
|
83
|
+
|
|
84
|
+
Group stories by their `group` field. Each group becomes a worktree.
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
# Create the current branch as base for worktrees
|
|
88
|
+
BASE_BRANCH=$(git branch --show-current)
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Step 2: Create worktrees and distribute PRDs
|
|
92
|
+
|
|
93
|
+
For each group, use the Agent tool to spawn a subagent in an isolated worktree:
|
|
94
|
+
|
|
95
|
+
```
|
|
96
|
+
Agent(
|
|
97
|
+
prompt: "Implement the following stories from prd.json in this worktree.
|
|
98
|
+
Work on one story at a time. For each story:
|
|
99
|
+
1. Read prd.json and pick the next story where passes=false
|
|
100
|
+
2. Implement the story
|
|
101
|
+
3. Verify with the test command if provided
|
|
102
|
+
4. Set passes=true in prd.json
|
|
103
|
+
5. Commit: git add -A && git commit -m 'feat: <story-id> <title>'
|
|
104
|
+
|
|
105
|
+
When ALL stories pass, output <promise>TASK COMPLETE</promise>.
|
|
106
|
+
|
|
107
|
+
Stories assigned to you:
|
|
108
|
+
<filtered stories for this group>",
|
|
109
|
+
isolation: "worktree",
|
|
110
|
+
run_in_background: true,
|
|
111
|
+
name: "<group-name>"
|
|
112
|
+
)
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
Launch ALL group agents in a single message (parallel execution).
|
|
116
|
+
|
|
117
|
+
### Step 3: Wait and collect results
|
|
118
|
+
|
|
119
|
+
Monitor agent completion. When all agents finish:
|
|
120
|
+
|
|
121
|
+
1. List completed worktrees and their branches
|
|
122
|
+
2. For each worktree branch, check if all assigned stories passed
|
|
123
|
+
|
|
124
|
+
### Step 4: Merge results
|
|
125
|
+
|
|
126
|
+
Merge all worktree branches back to the base branch using squash strategy:
|
|
127
|
+
|
|
128
|
+
```bash
|
|
129
|
+
git checkout $BASE_BRANCH
|
|
130
|
+
|
|
131
|
+
# For each completed worktree branch:
|
|
132
|
+
git merge --squash loophaus/<group-name>
|
|
133
|
+
git commit -m "feat: merge <group-name> stories"
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
If merge conflicts occur:
|
|
137
|
+
1. Report the conflict to the user
|
|
138
|
+
2. Suggest manual resolution
|
|
139
|
+
3. Do NOT force-resolve
|
|
140
|
+
|
|
141
|
+
### Step 5: Cleanup
|
|
32
142
|
|
|
33
|
-
|
|
143
|
+
```bash
|
|
144
|
+
# Remove worktrees
|
|
145
|
+
git worktree list | grep .loophaus/worktrees | awk '{print $1}' | xargs -I {} git worktree remove {} --force
|
|
146
|
+
# Remove branches
|
|
147
|
+
git branch | grep loophaus/ | xargs git branch -D
|
|
148
|
+
```
|
|
34
149
|
|
|
35
|
-
|
|
150
|
+
### Step 6: Final verification
|
|
151
|
+
|
|
152
|
+
Run the full test/verification command on the merged result.
|
|
153
|
+
Update the main `prd.json` with all stories marked as `passes: true`.
|
|
154
|
+
|
|
155
|
+
## Phase 4B: Sequential Execution (score < 3)
|
|
156
|
+
|
|
157
|
+
Single loop, no worktrees:
|
|
36
158
|
|
|
37
159
|
1. Create `.loophaus/state.json`:
|
|
38
160
|
```json
|
|
39
161
|
{
|
|
40
162
|
"active": true,
|
|
41
|
-
"prompt": "
|
|
163
|
+
"prompt": "Read prd.json. Pick next story where passes=false. Implement, verify, commit. Update progress.txt.",
|
|
42
164
|
"completionPromise": "TASK COMPLETE",
|
|
43
165
|
"maxIterations": <stories * 2 + 3>,
|
|
44
166
|
"currentIteration": 0,
|
|
@@ -47,9 +169,37 @@ After PRD approval, initialize the loop:
|
|
|
47
169
|
```
|
|
48
170
|
|
|
49
171
|
2. Start working on US-001 immediately.
|
|
172
|
+
3. Each iteration: implement one story, verify, commit, update prd.json.
|
|
173
|
+
4. Output `<promise>TASK COMPLETE</promise>` when ALL stories pass.
|
|
174
|
+
|
|
175
|
+
## Phase 5: Summary Report
|
|
176
|
+
|
|
177
|
+
After completion (parallel or sequential), output:
|
|
178
|
+
|
|
179
|
+
```
|
|
180
|
+
Loop Plan Complete
|
|
181
|
+
══════════════════
|
|
182
|
+
|
|
183
|
+
Mode: parallel (3 worktrees) | sequential
|
|
184
|
+
Stories: 7/7 done
|
|
185
|
+
Duration: ~15 minutes
|
|
186
|
+
Iterations: 12 total (across all workers)
|
|
187
|
+
|
|
188
|
+
Stories:
|
|
189
|
+
✓ US-001 Add login API (backend, 2 iterations)
|
|
190
|
+
✓ US-002 Add auth middleware (backend, 1 iteration)
|
|
191
|
+
✓ US-003 Add login UI (frontend, 3 iterations)
|
|
192
|
+
...
|
|
193
|
+
|
|
194
|
+
Branch: feature/auth-system (all work merged)
|
|
195
|
+
Verify: npm test ✓
|
|
196
|
+
```
|
|
50
197
|
|
|
51
198
|
## Rules
|
|
52
199
|
|
|
53
|
-
-
|
|
54
|
-
-
|
|
55
|
-
-
|
|
200
|
+
- ALWAYS present PRD for user approval before execution
|
|
201
|
+
- ALWAYS show parallelism score and recommendation
|
|
202
|
+
- If parallel: launch ALL agents simultaneously (single message with multiple Agent calls)
|
|
203
|
+
- If merge conflict: STOP and report. Do not auto-resolve.
|
|
204
|
+
- Use `<promise>TASK COMPLETE</promise>` ONLY when genuinely complete
|
|
205
|
+
- Update `progress.txt` with learnings after each story
|
package/core/engine.mjs
CHANGED
|
@@ -27,6 +27,19 @@ export function evaluateStopHook(input, state) {
|
|
|
27
27
|
};
|
|
28
28
|
}
|
|
29
29
|
|
|
30
|
+
if (input.policy_result && input.policy_result.shouldStop) {
|
|
31
|
+
nextState.active = false;
|
|
32
|
+
events.push({ event: "stop", reason: "policy_violation", violations: input.policy_result.violations });
|
|
33
|
+
const reasons = input.policy_result.violations.map(v => `${v.type}: ${v.current}/${v.limit}`).join(", ");
|
|
34
|
+
return {
|
|
35
|
+
decision: "allow",
|
|
36
|
+
nextState,
|
|
37
|
+
events,
|
|
38
|
+
output: null,
|
|
39
|
+
message: `Loop: policy violation (${reasons}).`,
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
|
|
30
43
|
if (nextState.completionPromise && input.last_assistant_text) {
|
|
31
44
|
if (extractPromise(input.last_assistant_text, nextState.completionPromise)) {
|
|
32
45
|
nextState.active = false;
|
|
@@ -57,6 +70,16 @@ export function evaluateStopHook(input, state) {
|
|
|
57
70
|
events.push({ event: "verify_failed", script: nextState.verifyScript, output: input.verify_result.output || "" });
|
|
58
71
|
}
|
|
59
72
|
|
|
73
|
+
if (input.test_results && input.test_results.length > 0) {
|
|
74
|
+
const allPassed = input.test_results.every(r => r.passed);
|
|
75
|
+
if (allPassed) {
|
|
76
|
+
events.push({ event: "test_result", status: "all_passed", results: input.test_results });
|
|
77
|
+
} else {
|
|
78
|
+
const failed = input.test_results.filter(r => !r.passed);
|
|
79
|
+
events.push({ event: "test_result", status: "some_failed", failed: failed.map(f => f.storyId) });
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
60
83
|
if (input.stop_hook_active === true) {
|
|
61
84
|
if (!input.has_pending_stories) {
|
|
62
85
|
nextState.active = false;
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
// core/merge-strategy.mjs
|
|
2
|
+
// Strategies for merging parallel worktree results back
|
|
3
|
+
|
|
4
|
+
import { execFile } from "node:child_process";
|
|
5
|
+
import { promisify } from "node:util";
|
|
6
|
+
|
|
7
|
+
const execFileAsync = promisify(execFile);
|
|
8
|
+
|
|
9
|
+
export const STRATEGIES = {
|
|
10
|
+
sequential: "Merge branches one by one in order",
|
|
11
|
+
"cherry-pick": "Cherry-pick specific commits from each branch",
|
|
12
|
+
squash: "Squash each branch into a single commit before merging",
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
export async function mergeSequential(branches, targetBranch = "main") {
|
|
16
|
+
const results = [];
|
|
17
|
+
for (const branch of branches) {
|
|
18
|
+
try {
|
|
19
|
+
await execFileAsync("git", ["merge", branch, "--no-edit"]);
|
|
20
|
+
results.push({ branch, status: "merged" });
|
|
21
|
+
} catch (err) {
|
|
22
|
+
results.push({ branch, status: "conflict", error: err.message });
|
|
23
|
+
try { await execFileAsync("git", ["merge", "--abort"]); } catch {}
|
|
24
|
+
break;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
return results;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export async function mergeSquash(branches) {
|
|
31
|
+
const results = [];
|
|
32
|
+
for (const branch of branches) {
|
|
33
|
+
try {
|
|
34
|
+
await execFileAsync("git", ["merge", "--squash", branch]);
|
|
35
|
+
await execFileAsync("git", ["commit", "-m", `squash: merge ${branch}`]);
|
|
36
|
+
results.push({ branch, status: "squashed" });
|
|
37
|
+
} catch (err) {
|
|
38
|
+
results.push({ branch, status: "conflict", error: err.message });
|
|
39
|
+
try { await execFileAsync("git", ["merge", "--abort"]); } catch {}
|
|
40
|
+
break;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
return results;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
export async function mergeCherryPick(branches) {
|
|
47
|
+
const results = [];
|
|
48
|
+
for (const branch of branches) {
|
|
49
|
+
try {
|
|
50
|
+
const { stdout } = await execFileAsync("git", ["log", `main..${branch}`, "--format=%H", "--reverse"]);
|
|
51
|
+
const commits = stdout.trim().split("\n").filter(Boolean);
|
|
52
|
+
for (const commit of commits) {
|
|
53
|
+
await execFileAsync("git", ["cherry-pick", commit]);
|
|
54
|
+
}
|
|
55
|
+
results.push({ branch, status: "cherry-picked", commits: commits.length });
|
|
56
|
+
} catch (err) {
|
|
57
|
+
results.push({ branch, status: "conflict", error: err.message });
|
|
58
|
+
try { await execFileAsync("git", ["cherry-pick", "--abort"]); } catch {}
|
|
59
|
+
break;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
return results;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
export async function merge(strategy, branches, targetBranch) {
|
|
66
|
+
switch (strategy) {
|
|
67
|
+
case "sequential": return mergeSequential(branches, targetBranch);
|
|
68
|
+
case "squash": return mergeSquash(branches);
|
|
69
|
+
case "cherry-pick": return mergeCherryPick(branches);
|
|
70
|
+
default: throw new Error(`Unknown merge strategy: ${strategy}`);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
// core/parallel-runner.mjs
|
|
2
|
+
// Parallel loop execution across worktrees
|
|
3
|
+
|
|
4
|
+
import { fork } from "node:child_process";
|
|
5
|
+
import { readFile, writeFile, mkdir } from "node:fs/promises";
|
|
6
|
+
import { join, resolve, dirname } from "node:path";
|
|
7
|
+
import { fileURLToPath } from "node:url";
|
|
8
|
+
import { createWorktree, removeWorktree, listWorktrees } from "./worktree.mjs";
|
|
9
|
+
|
|
10
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
11
|
+
const HOOKS_DIR = resolve(dirname(__filename), "..", "hooks");
|
|
12
|
+
|
|
13
|
+
export function distributeStories(stories, n) {
|
|
14
|
+
const sorted = [...stories].sort((a, b) => (a.priority || 999) - (b.priority || 999));
|
|
15
|
+
const buckets = Array.from({ length: n }, () => []);
|
|
16
|
+
sorted.forEach((story, i) => buckets[i % n].push(story));
|
|
17
|
+
return buckets;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export async function runParallel({ prdPath, count = 2, baseBranch = "HEAD", cwd }) {
|
|
21
|
+
const raw = await readFile(prdPath, "utf-8");
|
|
22
|
+
const prd = JSON.parse(raw);
|
|
23
|
+
const pending = (prd.userStories || []).filter(s => !s.passes);
|
|
24
|
+
|
|
25
|
+
if (pending.length === 0) {
|
|
26
|
+
return { success: true, message: "No pending stories.", results: [] };
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const effectiveCount = Math.min(count, pending.length);
|
|
30
|
+
const buckets = distributeStories(pending, effectiveCount);
|
|
31
|
+
|
|
32
|
+
const worktrees = [];
|
|
33
|
+
for (let i = 0; i < effectiveCount; i++) {
|
|
34
|
+
const name = `parallel-${i}`;
|
|
35
|
+
try {
|
|
36
|
+
const wt = await createWorktree(name, baseBranch);
|
|
37
|
+
worktrees.push({ ...wt, stories: buckets[i] });
|
|
38
|
+
} catch (err) {
|
|
39
|
+
for (const prev of worktrees) {
|
|
40
|
+
try { await removeWorktree(prev.name); } catch {}
|
|
41
|
+
}
|
|
42
|
+
throw new Error(`Failed to create worktree ${name}: ${err.message}`);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
for (const wt of worktrees) {
|
|
47
|
+
const wtPrd = {
|
|
48
|
+
...prd,
|
|
49
|
+
userStories: wt.stories,
|
|
50
|
+
};
|
|
51
|
+
const wtPrdPath = join(wt.path, "prd.json");
|
|
52
|
+
await writeFile(wtPrdPath, JSON.stringify(wtPrd, null, 2), "utf-8");
|
|
53
|
+
|
|
54
|
+
const stateDir = join(wt.path, ".loophaus");
|
|
55
|
+
await mkdir(stateDir, { recursive: true });
|
|
56
|
+
await writeFile(join(stateDir, "state.json"), JSON.stringify({
|
|
57
|
+
active: true,
|
|
58
|
+
prompt: `Implement stories from prd.json. Work on one story at a time.`,
|
|
59
|
+
completionPromise: "TASK COMPLETE",
|
|
60
|
+
maxIterations: wt.stories.length * 2 + 3,
|
|
61
|
+
currentIteration: 0,
|
|
62
|
+
sessionId: "",
|
|
63
|
+
name: wt.name,
|
|
64
|
+
startedAt: new Date().toISOString(),
|
|
65
|
+
}, null, 2), "utf-8");
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
return {
|
|
69
|
+
success: true,
|
|
70
|
+
worktrees: worktrees.map(wt => ({
|
|
71
|
+
name: wt.name,
|
|
72
|
+
path: wt.path,
|
|
73
|
+
branch: wt.branch,
|
|
74
|
+
stories: wt.stories.map(s => s.id),
|
|
75
|
+
})),
|
|
76
|
+
message: `Created ${effectiveCount} parallel worktrees with ${pending.length} stories distributed.`,
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
export async function cleanupParallel() {
|
|
81
|
+
const worktrees = await listWorktrees();
|
|
82
|
+
const results = [];
|
|
83
|
+
for (const wt of worktrees) {
|
|
84
|
+
if (wt.name.startsWith("parallel-")) {
|
|
85
|
+
try {
|
|
86
|
+
await removeWorktree(wt.name);
|
|
87
|
+
results.push({ name: wt.name, removed: true });
|
|
88
|
+
} catch (err) {
|
|
89
|
+
results.push({ name: wt.name, removed: false, error: err.message });
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
return results;
|
|
94
|
+
}
|
package/core/policy.mjs
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { readFile } from "node:fs/promises";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
|
|
4
|
+
const DEFAULT_POLICY = {
|
|
5
|
+
id: "default",
|
|
6
|
+
conditions: [
|
|
7
|
+
{ type: "max_iterations", value: 20 },
|
|
8
|
+
],
|
|
9
|
+
};
|
|
10
|
+
|
|
11
|
+
export async function loadPolicy(cwd) {
|
|
12
|
+
const policyPath = join(cwd || process.cwd(), ".loophaus", "policy.json");
|
|
13
|
+
try {
|
|
14
|
+
const raw = await readFile(policyPath, "utf-8");
|
|
15
|
+
return JSON.parse(raw);
|
|
16
|
+
} catch {
|
|
17
|
+
return DEFAULT_POLICY;
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export function evaluatePolicy(policy, state, context = {}) {
|
|
22
|
+
const violations = [];
|
|
23
|
+
|
|
24
|
+
for (const condition of policy.conditions || []) {
|
|
25
|
+
switch (condition.type) {
|
|
26
|
+
case "max_iterations":
|
|
27
|
+
if (state.currentIteration > condition.value) {
|
|
28
|
+
violations.push({ type: "max_iterations", limit: condition.value, current: state.currentIteration });
|
|
29
|
+
}
|
|
30
|
+
break;
|
|
31
|
+
case "max_cost":
|
|
32
|
+
if (context.totalCost && context.totalCost > condition.value) {
|
|
33
|
+
violations.push({ type: "max_cost", limit: condition.value, current: context.totalCost });
|
|
34
|
+
}
|
|
35
|
+
break;
|
|
36
|
+
case "max_time_minutes":
|
|
37
|
+
if (state.startedAt) {
|
|
38
|
+
const elapsed = (Date.now() - new Date(state.startedAt).getTime()) / 60000;
|
|
39
|
+
if (elapsed > condition.value) {
|
|
40
|
+
violations.push({ type: "max_time_minutes", limit: condition.value, current: Math.round(elapsed) });
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
break;
|
|
44
|
+
case "max_errors":
|
|
45
|
+
if (context.errorCount && context.errorCount > condition.value) {
|
|
46
|
+
violations.push({ type: "max_errors", limit: condition.value, current: context.errorCount });
|
|
47
|
+
}
|
|
48
|
+
break;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return {
|
|
53
|
+
shouldStop: violations.length > 0,
|
|
54
|
+
violations,
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export { DEFAULT_POLICY };
|
package/core/session.mjs
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { readFile, writeFile, readdir, mkdir } from "node:fs/promises";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
|
|
4
|
+
function getSessionsDir(cwd) {
|
|
5
|
+
return join(cwd || process.cwd(), ".loophaus", "sessions");
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export async function saveCheckpoint(sessionId, data, cwd) {
|
|
9
|
+
const dir = getSessionsDir(cwd);
|
|
10
|
+
await mkdir(dir, { recursive: true });
|
|
11
|
+
const checkpoint = {
|
|
12
|
+
sessionId,
|
|
13
|
+
savedAt: new Date().toISOString(),
|
|
14
|
+
...data,
|
|
15
|
+
};
|
|
16
|
+
await writeFile(join(dir, `${sessionId}.json`), JSON.stringify(checkpoint, null, 2), "utf-8");
|
|
17
|
+
return checkpoint;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export async function loadCheckpoint(sessionId, cwd) {
|
|
21
|
+
const dir = getSessionsDir(cwd);
|
|
22
|
+
try {
|
|
23
|
+
const raw = await readFile(join(dir, `${sessionId}.json`), "utf-8");
|
|
24
|
+
return JSON.parse(raw);
|
|
25
|
+
} catch {
|
|
26
|
+
return null;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export async function listSessions(cwd) {
|
|
31
|
+
const dir = getSessionsDir(cwd);
|
|
32
|
+
try {
|
|
33
|
+
const files = await readdir(dir);
|
|
34
|
+
const sessions = [];
|
|
35
|
+
for (const file of files) {
|
|
36
|
+
if (!file.endsWith(".json")) continue;
|
|
37
|
+
try {
|
|
38
|
+
const raw = await readFile(join(dir, file), "utf-8");
|
|
39
|
+
const data = JSON.parse(raw);
|
|
40
|
+
sessions.push(data);
|
|
41
|
+
} catch { /* skip malformed */ }
|
|
42
|
+
}
|
|
43
|
+
return sessions.sort((a, b) => new Date(b.savedAt).getTime() - new Date(a.savedAt).getTime());
|
|
44
|
+
} catch {
|
|
45
|
+
return [];
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
export async function resumeSession(sessionId, cwd) {
|
|
50
|
+
const checkpoint = await loadCheckpoint(sessionId, cwd);
|
|
51
|
+
if (!checkpoint) return null;
|
|
52
|
+
|
|
53
|
+
const { write } = await import("../store/state-store.mjs");
|
|
54
|
+
const state = {
|
|
55
|
+
active: true,
|
|
56
|
+
prompt: checkpoint.prompt || "",
|
|
57
|
+
completionPromise: checkpoint.completionPromise || "TADA",
|
|
58
|
+
maxIterations: checkpoint.maxIterations || 20,
|
|
59
|
+
currentIteration: checkpoint.currentIteration || 0,
|
|
60
|
+
sessionId: checkpoint.sessionId,
|
|
61
|
+
name: checkpoint.name || "",
|
|
62
|
+
startedAt: checkpoint.startedAt || new Date().toISOString(),
|
|
63
|
+
};
|
|
64
|
+
await write(state, cwd, checkpoint.name);
|
|
65
|
+
return state;
|
|
66
|
+
}
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
// core/worktree.mjs
|
|
2
|
+
// Git worktree lifecycle management
|
|
3
|
+
|
|
4
|
+
import { execFile } from "node:child_process";
|
|
5
|
+
import { promisify } from "node:util";
|
|
6
|
+
import { mkdir, rm, access } from "node:fs/promises";
|
|
7
|
+
import { join } from "node:path";
|
|
8
|
+
|
|
9
|
+
const execFileAsync = promisify(execFile);
|
|
10
|
+
|
|
11
|
+
async function fileExists(p) {
|
|
12
|
+
try { await access(p); return true; } catch { return false; }
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export async function getRepoRoot() {
|
|
16
|
+
try {
|
|
17
|
+
const { stdout } = await execFileAsync("git", ["rev-parse", "--show-toplevel"]);
|
|
18
|
+
return stdout.trim();
|
|
19
|
+
} catch {
|
|
20
|
+
return null;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export async function createWorktree(name, baseBranch = "HEAD") {
|
|
25
|
+
const root = await getRepoRoot();
|
|
26
|
+
if (!root) throw new Error("Not in a git repository");
|
|
27
|
+
|
|
28
|
+
const worktreePath = join(root, ".loophaus", "worktrees", name);
|
|
29
|
+
const branchName = `loophaus/${name}`;
|
|
30
|
+
|
|
31
|
+
if (await fileExists(worktreePath)) {
|
|
32
|
+
throw new Error(`Worktree already exists: ${name}`);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
await mkdir(join(root, ".loophaus", "worktrees"), { recursive: true });
|
|
36
|
+
|
|
37
|
+
await execFileAsync("git", ["worktree", "add", "-b", branchName, worktreePath, baseBranch]);
|
|
38
|
+
|
|
39
|
+
return { name, path: worktreePath, branch: branchName };
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export async function removeWorktree(name) {
|
|
43
|
+
const root = await getRepoRoot();
|
|
44
|
+
if (!root) throw new Error("Not in a git repository");
|
|
45
|
+
|
|
46
|
+
const worktreePath = join(root, ".loophaus", "worktrees", name);
|
|
47
|
+
|
|
48
|
+
if (!(await fileExists(worktreePath))) {
|
|
49
|
+
throw new Error(`Worktree not found: ${name}`);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
await execFileAsync("git", ["worktree", "remove", worktreePath, "--force"]);
|
|
53
|
+
|
|
54
|
+
const branchName = `loophaus/${name}`;
|
|
55
|
+
try {
|
|
56
|
+
await execFileAsync("git", ["branch", "-D", branchName]);
|
|
57
|
+
} catch { /* branch may not exist */ }
|
|
58
|
+
|
|
59
|
+
return { name, removed: true };
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
export async function listWorktrees() {
|
|
63
|
+
const root = await getRepoRoot();
|
|
64
|
+
if (!root) return [];
|
|
65
|
+
|
|
66
|
+
try {
|
|
67
|
+
const { stdout } = await execFileAsync("git", ["worktree", "list", "--porcelain"]);
|
|
68
|
+
const entries = [];
|
|
69
|
+
let current = {};
|
|
70
|
+
|
|
71
|
+
for (const line of stdout.split("\n")) {
|
|
72
|
+
if (line.startsWith("worktree ")) {
|
|
73
|
+
if (current.path) entries.push(current);
|
|
74
|
+
current = { path: line.slice(9) };
|
|
75
|
+
} else if (line.startsWith("HEAD ")) {
|
|
76
|
+
current.head = line.slice(5);
|
|
77
|
+
} else if (line.startsWith("branch ")) {
|
|
78
|
+
current.branch = line.slice(7);
|
|
79
|
+
} else if (line === "bare") {
|
|
80
|
+
current.bare = true;
|
|
81
|
+
} else if (line === "") {
|
|
82
|
+
if (current.path) entries.push(current);
|
|
83
|
+
current = {};
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const loophausDir = join(root, ".loophaus", "worktrees");
|
|
88
|
+
return entries.filter(e => e.path && e.path.startsWith(loophausDir)).map(e => ({
|
|
89
|
+
name: e.path.split("/").pop(),
|
|
90
|
+
path: e.path,
|
|
91
|
+
branch: e.branch || "",
|
|
92
|
+
head: e.head || "",
|
|
93
|
+
}));
|
|
94
|
+
} catch {
|
|
95
|
+
return [];
|
|
96
|
+
}
|
|
97
|
+
}
|
package/hooks/stop-hook.mjs
CHANGED
|
@@ -4,6 +4,32 @@ import { evaluateStopHook } from "../core/engine.mjs";
|
|
|
4
4
|
import { getLastAssistantText, hasPendingStories } from "../core/io-helpers.mjs";
|
|
5
5
|
import { read as readState, write as writeState } from "../store/state-store.mjs";
|
|
6
6
|
import { logEvents } from "../core/event-logger.mjs";
|
|
7
|
+
import { join } from "node:path";
|
|
8
|
+
|
|
9
|
+
async function runStoryTests(cwd) {
|
|
10
|
+
const { readFile } = await import("node:fs/promises");
|
|
11
|
+
const { execFile } = await import("node:child_process");
|
|
12
|
+
const { promisify } = await import("node:util");
|
|
13
|
+
const execFileAsync = promisify(execFile);
|
|
14
|
+
const prdPath = join(cwd, "prd.json");
|
|
15
|
+
|
|
16
|
+
try {
|
|
17
|
+
const prd = JSON.parse(await readFile(prdPath, "utf-8"));
|
|
18
|
+
if (!Array.isArray(prd.userStories)) return [];
|
|
19
|
+
|
|
20
|
+
const results = [];
|
|
21
|
+
for (const story of prd.userStories) {
|
|
22
|
+
if (!story.testCommand || story.passes) continue;
|
|
23
|
+
try {
|
|
24
|
+
await execFileAsync("sh", ["-c", story.testCommand], { cwd, timeout: 60_000 });
|
|
25
|
+
results.push({ storyId: story.id, passed: true });
|
|
26
|
+
} catch (err) {
|
|
27
|
+
results.push({ storyId: story.id, passed: false, error: err.message });
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
return results;
|
|
31
|
+
} catch { return []; }
|
|
32
|
+
}
|
|
7
33
|
|
|
8
34
|
async function readStdin() {
|
|
9
35
|
const chunks = [];
|
|
@@ -39,11 +65,21 @@ async function main() {
|
|
|
39
65
|
}
|
|
40
66
|
}
|
|
41
67
|
|
|
68
|
+
// Run story tests if prd.json has testCommand fields
|
|
69
|
+
const testResults = await runStoryTests(cwd);
|
|
70
|
+
|
|
71
|
+
// Evaluate loop policy
|
|
72
|
+
const { loadPolicy, evaluatePolicy } = await import("../core/policy.mjs");
|
|
73
|
+
const policy = await loadPolicy(cwd);
|
|
74
|
+
const policyResult = evaluatePolicy(policy, state, { totalCost: 0, errorCount: 0 });
|
|
75
|
+
|
|
42
76
|
const input = {
|
|
43
77
|
...hookInput,
|
|
44
78
|
last_assistant_text: lastText,
|
|
45
79
|
has_pending_stories: pending,
|
|
46
80
|
verify_result: verifyResult,
|
|
81
|
+
test_results: testResults,
|
|
82
|
+
policy_result: policyResult,
|
|
47
83
|
};
|
|
48
84
|
|
|
49
85
|
const result = evaluateStopHook(input, state);
|
|
@@ -51,6 +87,19 @@ async function main() {
|
|
|
51
87
|
await writeState(result.nextState, cwd);
|
|
52
88
|
await logEvents(result.events, { adapter: "auto", loop_id: state.sessionId || "unknown" }, cwd);
|
|
53
89
|
|
|
90
|
+
// Save session checkpoint (best-effort)
|
|
91
|
+
try {
|
|
92
|
+
const { saveCheckpoint } = await import("../core/session.mjs");
|
|
93
|
+
await saveCheckpoint(result.nextState.sessionId || `auto-${Date.now()}`, {
|
|
94
|
+
prompt: result.nextState.prompt,
|
|
95
|
+
completionPromise: result.nextState.completionPromise,
|
|
96
|
+
maxIterations: result.nextState.maxIterations,
|
|
97
|
+
currentIteration: result.nextState.currentIteration,
|
|
98
|
+
name: result.nextState.name,
|
|
99
|
+
startedAt: result.nextState.startedAt,
|
|
100
|
+
}, cwd);
|
|
101
|
+
} catch { /* best-effort */ }
|
|
102
|
+
|
|
54
103
|
if (result.message) process.stderr.write(result.message + "\n");
|
|
55
104
|
if (result.output) process.stdout.write(JSON.stringify(result.output));
|
|
56
105
|
process.exit(0);
|
package/package.json
CHANGED
|
@@ -86,22 +86,37 @@ description: "Stop active loop"
|
|
|
86
86
|
"loop-plan": {
|
|
87
87
|
content: `---
|
|
88
88
|
name: loop-plan
|
|
89
|
-
description: "Plan and start loop via interactive interview"
|
|
89
|
+
description: "Plan and start loop via interactive interview — auto-parallelizes across worktrees"
|
|
90
90
|
argument-hint: "TASK_DESCRIPTION"
|
|
91
91
|
---
|
|
92
92
|
|
|
93
|
-
# /loop-plan —
|
|
93
|
+
# /loop-plan — Plan, Parallelize, Execute, Merge
|
|
94
94
|
|
|
95
95
|
## Phase 1: Discovery Interview
|
|
96
|
-
Ask 3-5 focused questions about
|
|
96
|
+
Ask 3-5 focused questions about scope, success criteria, verification commands, parallelism potential.
|
|
97
97
|
|
|
98
98
|
## Phase 2: PRD Generation
|
|
99
|
-
Generate \`prd.json\` with
|
|
100
|
-
|
|
101
|
-
## Phase 3:
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
99
|
+
Generate \`prd.json\` with user stories. Each story has: id, title, description, acceptanceCriteria, priority, passes, group (for parallel distribution), testCommand.
|
|
100
|
+
|
|
101
|
+
## Phase 3: Parallelism Assessment
|
|
102
|
+
Score the task: stories span 3+ dirs (+2), independent (+2), multiple services (+3), 6+ stories (+1), need full context (-2), strict ordering (-3).
|
|
103
|
+
Score >= 3: parallel mode (worktrees by group). Score < 3: sequential mode.
|
|
104
|
+
|
|
105
|
+
## Phase 4A: Parallel Execution (score >= 3)
|
|
106
|
+
1. Group stories by \`group\` field
|
|
107
|
+
2. For each group, create an isolated worktree and assign stories
|
|
108
|
+
3. Run all groups simultaneously
|
|
109
|
+
4. When all complete, merge branches back (squash strategy)
|
|
110
|
+
5. Run full verification on merged result
|
|
111
|
+
|
|
112
|
+
## Phase 4B: Sequential Execution (score < 3)
|
|
113
|
+
Create \`.loophaus/state.json\` and work through stories one at a time.
|
|
114
|
+
|
|
115
|
+
## Rules
|
|
116
|
+
- Present PRD for user approval before execution
|
|
117
|
+
- Show parallelism score and recommendation
|
|
118
|
+
- If merge conflict: STOP and report
|
|
119
|
+
- Use \`<promise>TASK COMPLETE</promise>\` ONLY when ALL stories pass
|
|
105
120
|
`,
|
|
106
121
|
},
|
|
107
122
|
"loop-pulse": {
|
|
@@ -64,22 +64,27 @@ description: "Stop active loop — use when user says 'stop loop', 'cancel loop'
|
|
|
64
64
|
"loop-plan": {
|
|
65
65
|
content: `---
|
|
66
66
|
name: loop-plan
|
|
67
|
-
description: "Plan and start loop via interactive interview — use when user says 'plan loop', 'interview', 'create PRD', 'plan task'"
|
|
67
|
+
description: "Plan and start loop via interactive interview with auto-parallelization — use when user says 'plan loop', 'interview', 'create PRD', 'plan task', 'parallelize'"
|
|
68
68
|
argument-hint: "TASK_DESCRIPTION"
|
|
69
69
|
---
|
|
70
70
|
|
|
71
|
-
#
|
|
71
|
+
# Plan, Parallelize, Execute, Merge
|
|
72
72
|
|
|
73
73
|
## Phase 1: Discovery Interview
|
|
74
|
-
Ask 3-5 focused questions about
|
|
74
|
+
Ask 3-5 focused questions about scope, success criteria, verification, parallelism.
|
|
75
75
|
|
|
76
76
|
## Phase 2: PRD Generation
|
|
77
|
-
Generate prd.json
|
|
77
|
+
Generate prd.json. Each story has: id, title, group (for parallel distribution), testCommand, passes.
|
|
78
78
|
|
|
79
|
-
## Phase 3:
|
|
80
|
-
|
|
79
|
+
## Phase 3: Parallelism Assessment
|
|
80
|
+
Score: independent stories (+2), multiple services (+3), 6+ stories (+1), strict ordering (-3).
|
|
81
|
+
Score >= 3: parallel (worktrees). Score < 3: sequential.
|
|
81
82
|
|
|
82
|
-
|
|
83
|
+
## Phase 4: Execution
|
|
84
|
+
Parallel: create worktrees per group, distribute stories, run simultaneously, merge back.
|
|
85
|
+
Sequential: single loop through stories in order.
|
|
86
|
+
|
|
87
|
+
Rules: present PRD for approval, show parallelism score, stop on merge conflicts.
|
|
83
88
|
`,
|
|
84
89
|
},
|
|
85
90
|
"loop-pulse": {
|
|
@@ -50,7 +50,9 @@ Ask **concise questions** for missing items. Max 3-5 per round, one round only.
|
|
|
50
50
|
| Cross-file understanding needed | -1 |
|
|
51
51
|
| Multiple services | +3 |
|
|
52
52
|
|
|
53
|
-
Score >= 3:
|
|
53
|
+
Score >= 3: **Auto-parallel mode.** Group stories by `group` field, create isolated worktrees per group using Agent tool with `isolation: "worktree"`, launch ALL groups simultaneously in a single message. After all agents complete, merge branches back with squash strategy.
|
|
54
|
+
|
|
55
|
+
Score < 3: Sequential mode. Single loop, stories in order.
|
|
54
56
|
|
|
55
57
|
### max-iterations
|
|
56
58
|
|
|
@@ -80,6 +82,8 @@ ralph-skills compatible format:
|
|
|
80
82
|
"acceptanceCriteria": ["Criterion 1", "Typecheck passes"],
|
|
81
83
|
"priority": 1,
|
|
82
84
|
"passes": false,
|
|
85
|
+
"group": "backend",
|
|
86
|
+
"testCommand": "npm test -- US-001",
|
|
83
87
|
"notes": ""
|
|
84
88
|
}
|
|
85
89
|
]
|