taskplane 0.0.1 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +2 -20
- package/bin/taskplane.mjs +706 -0
- package/dashboard/public/app.js +900 -0
- package/dashboard/public/index.html +92 -0
- package/dashboard/public/style.css +924 -0
- package/dashboard/server.cjs +531 -0
- package/extensions/task-orchestrator.ts +28 -0
- package/extensions/task-runner.ts +1923 -0
- package/extensions/taskplane/abort.ts +466 -0
- package/extensions/taskplane/config.ts +102 -0
- package/extensions/taskplane/discovery.ts +988 -0
- package/extensions/taskplane/engine.ts +758 -0
- package/extensions/taskplane/execution.ts +1752 -0
- package/extensions/taskplane/extension.ts +577 -0
- package/extensions/taskplane/formatting.ts +718 -0
- package/extensions/taskplane/git.ts +38 -0
- package/extensions/taskplane/index.ts +22 -0
- package/extensions/taskplane/merge.ts +795 -0
- package/extensions/taskplane/messages.ts +134 -0
- package/extensions/taskplane/persistence.ts +1121 -0
- package/extensions/taskplane/resume.ts +1092 -0
- package/extensions/taskplane/sessions.ts +92 -0
- package/extensions/taskplane/types.ts +1514 -0
- package/extensions/taskplane/waves.ts +900 -0
- package/extensions/taskplane/worktree.ts +1624 -0
- package/package.json +48 -3
- package/skills/create-taskplane-task/SKILL.md +326 -0
- package/skills/create-taskplane-task/references/context-template.md +78 -0
- package/skills/create-taskplane-task/references/prompt-template.md +246 -0
- package/templates/agents/task-merger.md +256 -0
- package/templates/agents/task-reviewer.md +81 -0
- package/templates/agents/task-worker.md +140 -0
- package/templates/config/task-orchestrator.yaml +89 -0
- package/templates/config/task-runner.yaml +99 -0
- package/templates/tasks/CONTEXT.md +31 -0
- package/templates/tasks/EXAMPLE-001-hello-world/PROMPT.md +90 -0
- package/templates/tasks/EXAMPLE-001-hello-world/STATUS.md +73 -0
|
@@ -0,0 +1,466 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Abort logic (graceful and hard)
|
|
3
|
+
* @module orch/abort
|
|
4
|
+
*/
|
|
5
|
+
import { writeFileSync, existsSync } from "fs";
|
|
6
|
+
import { execSync } from "child_process";
|
|
7
|
+
import { join, resolve } from "path";
|
|
8
|
+
|
|
9
|
+
import { execLog, tmuxHasSession, tmuxKillSession } from "./execution.ts";
|
|
10
|
+
import { deleteBatchState, parseOrchSessionNames, persistRuntimeState } from "./persistence.ts";
|
|
11
|
+
import type { AbortActionStep, AbortErrorCode, AbortLaneResult, AbortMode, AbortResult, AbortTargetSession, AllocatedLane, OrchBatchRuntimeState, PersistedBatchState } from "./types.ts";
|
|
12
|
+
|
|
13
|
+
// ── Abort Pure Functions ─────────────────────────────────────────────
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Select and enrich target sessions for abort.
|
|
17
|
+
*
|
|
18
|
+
* Filters sessions to only `orch-lane-*` and `orch-merge-*` patterns,
|
|
19
|
+
* then enriches with task folder and worktree info from persisted or
|
|
20
|
+
* runtime state.
|
|
21
|
+
*
|
|
22
|
+
* Pure function: no side effects.
|
|
23
|
+
*
|
|
24
|
+
* @param allSessionNames - All TMUX session names matching the prefix
|
|
25
|
+
* @param persistedState - Loaded persisted state (null if unavailable)
|
|
26
|
+
* @param runtimeLanes - Current in-memory lanes (from orchBatchState)
|
|
27
|
+
* @param repoRoot - Repository root path for task folder resolution
|
|
28
|
+
* @returns Filtered and enriched target sessions
|
|
29
|
+
*/
|
|
30
|
+
export function selectAbortTargetSessions(
|
|
31
|
+
allSessionNames: string[],
|
|
32
|
+
persistedState: PersistedBatchState | null,
|
|
33
|
+
runtimeLanes: AllocatedLane[],
|
|
34
|
+
repoRoot: string,
|
|
35
|
+
prefix: string = "orch",
|
|
36
|
+
): AbortTargetSession[] {
|
|
37
|
+
// Filter to only lane and merge sessions for the exact orchestrator prefix.
|
|
38
|
+
const targetNames = allSessionNames.filter(name => {
|
|
39
|
+
const prefixWithDash = `${prefix}-`;
|
|
40
|
+
if (!name.startsWith(prefixWithDash)) return false;
|
|
41
|
+
const suffix = name.slice(prefixWithDash.length);
|
|
42
|
+
return suffix.startsWith("lane-") || suffix.startsWith("merge-");
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
// Build lookup from persisted state task records
|
|
46
|
+
const persistedLookup = new Map<string, { laneId: string; taskId: string; taskFolder: string }>();
|
|
47
|
+
if (persistedState) {
|
|
48
|
+
for (const task of persistedState.tasks) {
|
|
49
|
+
if (task.sessionName) {
|
|
50
|
+
persistedLookup.set(task.sessionName, {
|
|
51
|
+
laneId: `lane-${task.laneNumber}`,
|
|
52
|
+
taskId: task.taskId,
|
|
53
|
+
taskFolder: task.taskFolder,
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Build lookup from runtime lanes
|
|
60
|
+
const runtimeLookup = new Map<string, { laneId: string; taskId: string | null; worktreePath: string; taskFolder: string | null }>();
|
|
61
|
+
for (const lane of runtimeLanes) {
|
|
62
|
+
const currentTask = lane.tasks.length > 0 ? lane.tasks[0] : null;
|
|
63
|
+
runtimeLookup.set(lane.tmuxSessionName, {
|
|
64
|
+
laneId: lane.laneId,
|
|
65
|
+
taskId: currentTask?.taskId || null,
|
|
66
|
+
worktreePath: lane.worktreePath,
|
|
67
|
+
taskFolder: currentTask?.task.taskFolder || null,
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return targetNames.map(sessionName => {
|
|
72
|
+
const runtime = runtimeLookup.get(sessionName);
|
|
73
|
+
const persisted = persistedLookup.get(sessionName);
|
|
74
|
+
|
|
75
|
+
const laneId = runtime?.laneId || persisted?.laneId || "unknown";
|
|
76
|
+
const taskId = runtime?.taskId || persisted?.taskId || null;
|
|
77
|
+
const worktreePath = runtime?.worktreePath || null;
|
|
78
|
+
const taskFolder = runtime?.taskFolder || persisted?.taskFolder || null;
|
|
79
|
+
|
|
80
|
+
// Resolve task folder path within the worktree
|
|
81
|
+
let taskFolderInWorktree: string | null = null;
|
|
82
|
+
if (taskFolder && worktreePath && repoRoot) {
|
|
83
|
+
const repoRootNorm = resolve(repoRoot).replace(/\\/g, "/");
|
|
84
|
+
const folderNorm = resolve(taskFolder).replace(/\\/g, "/");
|
|
85
|
+
let relativePath: string;
|
|
86
|
+
if (folderNorm.startsWith(repoRootNorm + "/")) {
|
|
87
|
+
relativePath = folderNorm.slice(repoRootNorm.length + 1);
|
|
88
|
+
} else {
|
|
89
|
+
relativePath = taskFolder;
|
|
90
|
+
}
|
|
91
|
+
taskFolderInWorktree = join(worktreePath, relativePath);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
sessionName,
|
|
96
|
+
laneId,
|
|
97
|
+
taskId,
|
|
98
|
+
taskFolderInWorktree,
|
|
99
|
+
worktreePath,
|
|
100
|
+
};
|
|
101
|
+
});
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Plan the ordered list of abort actions based on mode.
|
|
106
|
+
*
|
|
107
|
+
* Pure function: no side effects.
|
|
108
|
+
*
|
|
109
|
+
* @param mode - Abort mode (graceful or hard)
|
|
110
|
+
* @param gracePeriodMs - Grace period in ms (graceful only, default 60000)
|
|
111
|
+
* @param pollIntervalMs - Poll interval in ms (graceful only, default 2000)
|
|
112
|
+
* @returns Ordered list of abort action steps
|
|
113
|
+
*/
|
|
114
|
+
export function planAbortActions(
|
|
115
|
+
mode: AbortMode,
|
|
116
|
+
gracePeriodMs: number = 60_000,
|
|
117
|
+
pollIntervalMs: number = 2_000,
|
|
118
|
+
): AbortActionStep[] {
|
|
119
|
+
if (mode === "hard") {
|
|
120
|
+
return [{ type: "kill-all" }];
|
|
121
|
+
}
|
|
122
|
+
return [
|
|
123
|
+
{ type: "write-wrapup" },
|
|
124
|
+
{ type: "poll-wait", gracePeriodMs, pollIntervalMs },
|
|
125
|
+
{ type: "kill-remaining" },
|
|
126
|
+
];
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
// ── Abort Orchestration Functions ────────────────────────────────────
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* Write wrap-up signal files to each lane's task folder.
|
|
134
|
+
*
|
|
135
|
+
* Writes both `.task-wrap-up` (primary) and `.wiggum-wrap-up` (legacy)
|
|
136
|
+
* for backward compatibility. Continues on partial failure — aggregates
|
|
137
|
+
* errors per lane.
|
|
138
|
+
*
|
|
139
|
+
* @param targets - Target sessions with resolved task folders
|
|
140
|
+
* @returns Updated target results with wrapUpWritten/wrapUpError
|
|
141
|
+
*/
|
|
142
|
+
export function writeWrapUpFiles(
|
|
143
|
+
targets: AbortTargetSession[],
|
|
144
|
+
): Array<{ sessionName: string; written: boolean; error: string | null }> {
|
|
145
|
+
const timestamp = new Date().toISOString();
|
|
146
|
+
const content = `Abort requested at ${timestamp}`;
|
|
147
|
+
const results: Array<{ sessionName: string; written: boolean; error: string | null }> = [];
|
|
148
|
+
|
|
149
|
+
for (const target of targets) {
|
|
150
|
+
if (!target.taskFolderInWorktree) {
|
|
151
|
+
// Skip child sessions (workers, reviewers) — only main lane sessions have task folders
|
|
152
|
+
// Also skip merge sessions (no task folder)
|
|
153
|
+
if (target.sessionName.endsWith("-worker") || target.sessionName.endsWith("-reviewer") || target.sessionName.includes("merge")) {
|
|
154
|
+
results.push({ sessionName: target.sessionName, written: false, error: null });
|
|
155
|
+
} else {
|
|
156
|
+
results.push({ sessionName: target.sessionName, written: false, error: "No task folder resolved" });
|
|
157
|
+
}
|
|
158
|
+
continue;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
try {
|
|
162
|
+
const primaryPath = join(target.taskFolderInWorktree, ".task-wrap-up");
|
|
163
|
+
const legacyPath = join(target.taskFolderInWorktree, ".wiggum-wrap-up");
|
|
164
|
+
|
|
165
|
+
// Ensure directory exists
|
|
166
|
+
if (!existsSync(target.taskFolderInWorktree)) {
|
|
167
|
+
results.push({ sessionName: target.sessionName, written: false, error: `Task folder does not exist: ${target.taskFolderInWorktree}` });
|
|
168
|
+
continue;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
writeFileSync(primaryPath, content, "utf-8");
|
|
172
|
+
writeFileSync(legacyPath, content, "utf-8");
|
|
173
|
+
results.push({ sessionName: target.sessionName, written: true, error: null });
|
|
174
|
+
} catch (err) {
|
|
175
|
+
results.push({
|
|
176
|
+
sessionName: target.sessionName,
|
|
177
|
+
written: false,
|
|
178
|
+
error: err instanceof Error ? err.message : String(err),
|
|
179
|
+
});
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return results;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Wait for TMUX sessions to exit gracefully.
|
|
188
|
+
*
|
|
189
|
+
* Polls every `pollIntervalMs` until all sessions have exited or the
|
|
190
|
+
* grace period expires.
|
|
191
|
+
*
|
|
192
|
+
* @param sessionNames - Session names to monitor
|
|
193
|
+
* @param gracePeriodMs - Maximum time to wait
|
|
194
|
+
* @param pollIntervalMs - Polling interval
|
|
195
|
+
* @returns Object with exited and remaining session names
|
|
196
|
+
*/
|
|
197
|
+
export async function waitForSessionExit(
|
|
198
|
+
sessionNames: string[],
|
|
199
|
+
gracePeriodMs: number,
|
|
200
|
+
pollIntervalMs: number,
|
|
201
|
+
): Promise<{ exited: string[]; remaining: string[] }> {
|
|
202
|
+
const deadline = Date.now() + gracePeriodMs;
|
|
203
|
+
const exited: string[] = [];
|
|
204
|
+
const remaining = new Set(sessionNames);
|
|
205
|
+
|
|
206
|
+
while (Date.now() < deadline && remaining.size > 0) {
|
|
207
|
+
for (const name of [...remaining]) {
|
|
208
|
+
if (!tmuxHasSession(name)) {
|
|
209
|
+
remaining.delete(name);
|
|
210
|
+
exited.push(name);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
if (remaining.size === 0) break;
|
|
214
|
+
await new Promise(r => setTimeout(r, pollIntervalMs));
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
return { exited, remaining: [...remaining] };
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* Kill orchestrator TMUX sessions.
|
|
222
|
+
*
|
|
223
|
+
* Kills each session and its children (worker, reviewer).
|
|
224
|
+
* Returns per-session kill results.
|
|
225
|
+
*
|
|
226
|
+
* @param sessionNames - Session names to kill
|
|
227
|
+
* @returns Per-session kill results
|
|
228
|
+
*/
|
|
229
|
+
export function killOrchSessions(
|
|
230
|
+
sessionNames: string[],
|
|
231
|
+
): Array<{ sessionName: string; killed: boolean; error: string | null }> {
|
|
232
|
+
const results: Array<{ sessionName: string; killed: boolean; error: string | null }> = [];
|
|
233
|
+
|
|
234
|
+
// Group into base sessions (lane/merge) and child sessions
|
|
235
|
+
const baseSessionNames = sessionNames.filter(name =>
|
|
236
|
+
!name.endsWith("-worker") && !name.endsWith("-reviewer"),
|
|
237
|
+
);
|
|
238
|
+
const childSessionNames = sessionNames.filter(name =>
|
|
239
|
+
name.endsWith("-worker") || name.endsWith("-reviewer"),
|
|
240
|
+
);
|
|
241
|
+
|
|
242
|
+
// Kill explicitly-targeted child sessions first.
|
|
243
|
+
for (const name of childSessionNames) {
|
|
244
|
+
const killed = tmuxKillSession(name);
|
|
245
|
+
results.push({
|
|
246
|
+
sessionName: name,
|
|
247
|
+
killed,
|
|
248
|
+
error: killed ? null : `Session '${name}' still alive after kill attempt`,
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Then kill base sessions (and defensively kill their children).
|
|
253
|
+
for (const name of baseSessionNames) {
|
|
254
|
+
// Best-effort child cleanup even if not explicitly targeted.
|
|
255
|
+
tmuxKillSession(`${name}-worker`);
|
|
256
|
+
tmuxKillSession(`${name}-reviewer`);
|
|
257
|
+
|
|
258
|
+
const killed = tmuxKillSession(name);
|
|
259
|
+
results.push({
|
|
260
|
+
sessionName: name,
|
|
261
|
+
killed,
|
|
262
|
+
error: killed ? null : `Session '${name}' still alive after kill attempt`,
|
|
263
|
+
});
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
return results;
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
/**
|
|
270
|
+
* Execute a full abort operation.
|
|
271
|
+
*
|
|
272
|
+
* Phase/state transition ordering:
|
|
273
|
+
* 1. Set phase to "stopped"
|
|
274
|
+
* 2. Persist runtime state (so state file reflects stopped phase)
|
|
275
|
+
* 3. Select target sessions
|
|
276
|
+
* 4. Execute mode-specific flow (graceful or hard)
|
|
277
|
+
* 5. Delete batch state file
|
|
278
|
+
* 6. Return AbortResult
|
|
279
|
+
*
|
|
280
|
+
* Non-goal: does NOT delete worktrees/branches (preserved for inspection).
|
|
281
|
+
*
|
|
282
|
+
* @param mode - Abort mode (graceful or hard)
|
|
283
|
+
* @param prefix - TMUX session prefix (e.g., "orch")
|
|
284
|
+
* @param repoRoot - Repository root path
|
|
285
|
+
* @param batchState - Current batch runtime state (mutated: phase set to stopped)
|
|
286
|
+
* @param persistedState - Loaded persisted state (for session enrichment)
|
|
287
|
+
* @param gracePeriodMs - Grace period for graceful abort (default 60000)
|
|
288
|
+
* @param pollIntervalMs - Poll interval for graceful abort (default 2000)
|
|
289
|
+
* @returns AbortResult with per-lane details
|
|
290
|
+
*/
|
|
291
|
+
export async function executeAbort(
|
|
292
|
+
mode: AbortMode,
|
|
293
|
+
prefix: string,
|
|
294
|
+
repoRoot: string,
|
|
295
|
+
batchState: OrchBatchRuntimeState,
|
|
296
|
+
persistedState: PersistedBatchState | null,
|
|
297
|
+
gracePeriodMs: number = 60_000,
|
|
298
|
+
pollIntervalMs: number = 2_000,
|
|
299
|
+
): Promise<AbortResult> {
|
|
300
|
+
const startTime = Date.now();
|
|
301
|
+
const errors: Array<{ code: AbortErrorCode; message: string }> = [];
|
|
302
|
+
|
|
303
|
+
// Step 1: Set phase to stopped
|
|
304
|
+
batchState.phase = "stopped";
|
|
305
|
+
batchState.endedAt = Date.now();
|
|
306
|
+
|
|
307
|
+
// Step 2: Persist state (best-effort — abort must continue even if persist fails)
|
|
308
|
+
try {
|
|
309
|
+
persistRuntimeState(
|
|
310
|
+
`abort-${mode}`,
|
|
311
|
+
batchState,
|
|
312
|
+
[], // wavePlan not needed for abort persistence
|
|
313
|
+
batchState.currentLanes,
|
|
314
|
+
[], // taskOutcomes not needed
|
|
315
|
+
null, // discovery not needed
|
|
316
|
+
repoRoot,
|
|
317
|
+
);
|
|
318
|
+
} catch (err) {
|
|
319
|
+
execLog("abort", batchState.batchId, `Failed to persist state during abort: ${err instanceof Error ? err.message : String(err)}`);
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// Step 3: List all orch sessions
|
|
323
|
+
let allSessionNames: string[];
|
|
324
|
+
try {
|
|
325
|
+
allSessionNames = parseOrchSessionNames(
|
|
326
|
+
(() => {
|
|
327
|
+
try {
|
|
328
|
+
return execSync('tmux list-sessions -F "#{session_name}"', {
|
|
329
|
+
encoding: "utf-8",
|
|
330
|
+
timeout: 5000,
|
|
331
|
+
});
|
|
332
|
+
} catch {
|
|
333
|
+
return "";
|
|
334
|
+
}
|
|
335
|
+
})(),
|
|
336
|
+
prefix,
|
|
337
|
+
);
|
|
338
|
+
} catch (err) {
|
|
339
|
+
errors.push({
|
|
340
|
+
code: "ABORT_TMUX_LIST_FAILED",
|
|
341
|
+
message: err instanceof Error ? err.message : String(err),
|
|
342
|
+
});
|
|
343
|
+
allSessionNames = [];
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// Step 4: Select and enrich target sessions
|
|
347
|
+
const targets = selectAbortTargetSessions(
|
|
348
|
+
allSessionNames,
|
|
349
|
+
persistedState,
|
|
350
|
+
batchState.currentLanes,
|
|
351
|
+
repoRoot,
|
|
352
|
+
prefix,
|
|
353
|
+
);
|
|
354
|
+
|
|
355
|
+
const laneResults: AbortLaneResult[] = [];
|
|
356
|
+
let gracefulExits = 0;
|
|
357
|
+
let wrapUpFailures = 0;
|
|
358
|
+
|
|
359
|
+
if (mode === "graceful") {
|
|
360
|
+
// Step 5a: Write wrap-up files
|
|
361
|
+
const wrapUpResults = writeWrapUpFiles(targets);
|
|
362
|
+
for (const wr of wrapUpResults) {
|
|
363
|
+
if (wr.error) wrapUpFailures++;
|
|
364
|
+
}
|
|
365
|
+
if (wrapUpFailures > 0) {
|
|
366
|
+
errors.push({
|
|
367
|
+
code: "ABORT_WRAPUP_WRITE_FAILED",
|
|
368
|
+
message: `Failed to write wrap-up files for ${wrapUpFailures} session(s)`,
|
|
369
|
+
});
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// Step 5b: Wait for sessions to exit
|
|
373
|
+
const allTargetNames = targets.map(t => t.sessionName);
|
|
374
|
+
const waitResult = await waitForSessionExit(allTargetNames, gracePeriodMs, pollIntervalMs);
|
|
375
|
+
gracefulExits = waitResult.exited.length;
|
|
376
|
+
|
|
377
|
+
// Step 5c: Force-kill remaining sessions
|
|
378
|
+
const killResultBySession = new Map<string, { killed: boolean; error: string | null }>();
|
|
379
|
+
if (waitResult.remaining.length > 0) {
|
|
380
|
+
const killResults = killOrchSessions(waitResult.remaining);
|
|
381
|
+
for (const kr of killResults) {
|
|
382
|
+
killResultBySession.set(kr.sessionName, { killed: kr.killed, error: kr.error });
|
|
383
|
+
}
|
|
384
|
+
const killFailures = killResults.filter(kr => !kr.killed);
|
|
385
|
+
if (killFailures.length > 0) {
|
|
386
|
+
errors.push({
|
|
387
|
+
code: "ABORT_KILL_FAILED",
|
|
388
|
+
message: `Failed to kill ${killFailures.length} session(s)`,
|
|
389
|
+
});
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Build lane results
|
|
394
|
+
const exitedSet = new Set(waitResult.exited);
|
|
395
|
+
for (const target of targets) {
|
|
396
|
+
const wrapUp = wrapUpResults.find(wr => wr.sessionName === target.sessionName);
|
|
397
|
+
const wasGraceful = exitedSet.has(target.sessionName);
|
|
398
|
+
const killResult = killResultBySession.get(target.sessionName);
|
|
399
|
+
const sessionKilled = wasGraceful || killResult?.killed === true;
|
|
400
|
+
laneResults.push({
|
|
401
|
+
sessionName: target.sessionName,
|
|
402
|
+
laneId: target.laneId,
|
|
403
|
+
taskId: target.taskId,
|
|
404
|
+
taskFolderInWorktree: target.taskFolderInWorktree,
|
|
405
|
+
wrapUpWritten: wrapUp?.written || false,
|
|
406
|
+
wrapUpError: wrapUp?.error || null,
|
|
407
|
+
sessionKilled,
|
|
408
|
+
exitedGracefully: wasGraceful,
|
|
409
|
+
});
|
|
410
|
+
}
|
|
411
|
+
} else {
|
|
412
|
+
// Hard mode: kill all immediately
|
|
413
|
+
const allTargetNames = targets.map(t => t.sessionName);
|
|
414
|
+
const killResults = killOrchSessions(allTargetNames);
|
|
415
|
+
const killResultBySession = new Map<string, { killed: boolean; error: string | null }>();
|
|
416
|
+
for (const kr of killResults) {
|
|
417
|
+
killResultBySession.set(kr.sessionName, { killed: kr.killed, error: kr.error });
|
|
418
|
+
}
|
|
419
|
+
const killFailures = killResults.filter(kr => !kr.killed);
|
|
420
|
+
if (killFailures.length > 0) {
|
|
421
|
+
errors.push({
|
|
422
|
+
code: "ABORT_KILL_FAILED",
|
|
423
|
+
message: `Failed to kill ${killFailures.length} session(s)`,
|
|
424
|
+
});
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
for (const target of targets) {
|
|
428
|
+
const killResult = killResultBySession.get(target.sessionName);
|
|
429
|
+
laneResults.push({
|
|
430
|
+
sessionName: target.sessionName,
|
|
431
|
+
laneId: target.laneId,
|
|
432
|
+
taskId: target.taskId,
|
|
433
|
+
taskFolderInWorktree: target.taskFolderInWorktree,
|
|
434
|
+
wrapUpWritten: false,
|
|
435
|
+
wrapUpError: null,
|
|
436
|
+
sessionKilled: killResult?.killed === true,
|
|
437
|
+
exitedGracefully: false,
|
|
438
|
+
});
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
// Step 6: Delete batch state file
|
|
443
|
+
let stateDeleted = false;
|
|
444
|
+
try {
|
|
445
|
+
deleteBatchState(repoRoot);
|
|
446
|
+
stateDeleted = true;
|
|
447
|
+
} catch (err) {
|
|
448
|
+
errors.push({
|
|
449
|
+
code: "ABORT_STATE_DELETE_FAILED",
|
|
450
|
+
message: err instanceof Error ? err.message : String(err),
|
|
451
|
+
});
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
return {
|
|
455
|
+
mode,
|
|
456
|
+
sessionsFound: targets.length,
|
|
457
|
+
sessionsKilled: laneResults.filter(lr => lr.sessionKilled).length,
|
|
458
|
+
gracefulExits,
|
|
459
|
+
laneResults,
|
|
460
|
+
wrapUpFailures,
|
|
461
|
+
stateDeleted,
|
|
462
|
+
errors,
|
|
463
|
+
durationMs: Date.now() - startTime,
|
|
464
|
+
};
|
|
465
|
+
}
|
|
466
|
+
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Config loading from YAML
|
|
3
|
+
* @module orch/config
|
|
4
|
+
*/
|
|
5
|
+
import { readFileSync, existsSync } from "fs";
|
|
6
|
+
import { join } from "path";
|
|
7
|
+
import { parse as yamlParse } from "yaml";
|
|
8
|
+
|
|
9
|
+
import { DEFAULT_ORCHESTRATOR_CONFIG, DEFAULT_TASK_RUNNER_CONFIG } from "./types.ts";
|
|
10
|
+
import type { OrchestratorConfig, TaskArea, TaskRunnerConfig } from "./types.ts";
|
|
11
|
+
|
|
12
|
+
// ── Config Loading ───────────────────────────────────────────────────
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Load orchestrator config from .pi/task-orchestrator.yaml.
|
|
16
|
+
* Merges with defaults for any missing fields.
|
|
17
|
+
*/
|
|
18
|
+
export function loadOrchestratorConfig(cwd: string): OrchestratorConfig {
|
|
19
|
+
const configPath = join(cwd, ".pi", "task-orchestrator.yaml");
|
|
20
|
+
if (!existsSync(configPath)) {
|
|
21
|
+
return { ...DEFAULT_ORCHESTRATOR_CONFIG };
|
|
22
|
+
}
|
|
23
|
+
try {
|
|
24
|
+
const raw = readFileSync(configPath, "utf-8");
|
|
25
|
+
const loaded = yamlParse(raw) as any;
|
|
26
|
+
return {
|
|
27
|
+
orchestrator: {
|
|
28
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.orchestrator,
|
|
29
|
+
...loaded?.orchestrator,
|
|
30
|
+
},
|
|
31
|
+
dependencies: {
|
|
32
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.dependencies,
|
|
33
|
+
...loaded?.dependencies,
|
|
34
|
+
},
|
|
35
|
+
assignment: {
|
|
36
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.assignment,
|
|
37
|
+
...loaded?.assignment,
|
|
38
|
+
size_weights: {
|
|
39
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.assignment.size_weights,
|
|
40
|
+
...loaded?.assignment?.size_weights,
|
|
41
|
+
},
|
|
42
|
+
},
|
|
43
|
+
pre_warm: {
|
|
44
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.pre_warm,
|
|
45
|
+
...loaded?.pre_warm,
|
|
46
|
+
commands: {
|
|
47
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.pre_warm.commands,
|
|
48
|
+
...loaded?.pre_warm?.commands,
|
|
49
|
+
},
|
|
50
|
+
always: loaded?.pre_warm?.always ?? DEFAULT_ORCHESTRATOR_CONFIG.pre_warm.always,
|
|
51
|
+
},
|
|
52
|
+
merge: {
|
|
53
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.merge,
|
|
54
|
+
...loaded?.merge,
|
|
55
|
+
verify: loaded?.merge?.verify ?? DEFAULT_ORCHESTRATOR_CONFIG.merge.verify,
|
|
56
|
+
},
|
|
57
|
+
failure: {
|
|
58
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.failure,
|
|
59
|
+
...loaded?.failure,
|
|
60
|
+
},
|
|
61
|
+
monitoring: {
|
|
62
|
+
...DEFAULT_ORCHESTRATOR_CONFIG.monitoring,
|
|
63
|
+
...loaded?.monitoring,
|
|
64
|
+
},
|
|
65
|
+
};
|
|
66
|
+
} catch {
|
|
67
|
+
return { ...DEFAULT_ORCHESTRATOR_CONFIG };
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Load task-runner config from .pi/task-runner.yaml.
|
|
73
|
+
* Extracts only the fields the orchestrator needs: task_areas, reference_docs.
|
|
74
|
+
*/
|
|
75
|
+
export function loadTaskRunnerConfig(cwd: string): TaskRunnerConfig {
|
|
76
|
+
const configPath = join(cwd, ".pi", "task-runner.yaml");
|
|
77
|
+
if (!existsSync(configPath)) {
|
|
78
|
+
return { ...DEFAULT_TASK_RUNNER_CONFIG };
|
|
79
|
+
}
|
|
80
|
+
try {
|
|
81
|
+
const raw = readFileSync(configPath, "utf-8");
|
|
82
|
+
const loaded = yamlParse(raw) as any;
|
|
83
|
+
const taskAreas: Record<string, TaskArea> = {};
|
|
84
|
+
if (loaded?.task_areas) {
|
|
85
|
+
for (const [name, area] of Object.entries(loaded.task_areas)) {
|
|
86
|
+
const a = area as any;
|
|
87
|
+
taskAreas[name] = {
|
|
88
|
+
path: a?.path || "",
|
|
89
|
+
prefix: a?.prefix || "",
|
|
90
|
+
context: a?.context || "",
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
return {
|
|
95
|
+
task_areas: taskAreas,
|
|
96
|
+
reference_docs: loaded?.reference_docs || {},
|
|
97
|
+
};
|
|
98
|
+
} catch {
|
|
99
|
+
return { ...DEFAULT_TASK_RUNNER_CONFIG };
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|