@os-eco/overstory-cli 0.8.5 → 0.8.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -1
- package/agents/coordinator.md +52 -4
- package/package.json +1 -1
- package/src/commands/clean.test.ts +136 -0
- package/src/commands/clean.ts +198 -4
- package/src/commands/coordinator.test.ts +420 -1
- package/src/commands/coordinator.ts +173 -1
- package/src/commands/init.test.ts +137 -0
- package/src/commands/init.ts +57 -1
- package/src/commands/log.test.ts +10 -11
- package/src/commands/log.ts +31 -32
- package/src/commands/prime.ts +30 -5
- package/src/commands/sling.ts +312 -322
- package/src/commands/spec.ts +8 -2
- package/src/commands/stop.test.ts +127 -6
- package/src/commands/stop.ts +95 -43
- package/src/commands/watch.ts +29 -9
- package/src/config.test.ts +72 -0
- package/src/config.ts +26 -1
- package/src/index.ts +4 -1
- package/src/merge/resolver.test.ts +243 -19
- package/src/merge/resolver.ts +235 -95
- package/src/runtimes/pi.test.ts +118 -1
- package/src/runtimes/pi.ts +61 -12
- package/src/types.ts +17 -0
- package/src/watchdog/daemon.test.ts +257 -0
- package/src/watchdog/daemon.ts +66 -23
- package/src/worktree/manager.test.ts +65 -1
- package/src/worktree/manager.ts +36 -0
package/README.md
CHANGED
|
@@ -99,6 +99,7 @@ Every command supports `--json` where noted. Global flags: `-q`/`--quiet`, `--ti
|
|
|
99
99
|
| `ov coordinator send` | Fire-and-forget message to coordinator (`--subject`) |
|
|
100
100
|
| `ov coordinator ask` | Synchronous request/response to coordinator (`--subject`, `--timeout`) |
|
|
101
101
|
| `ov coordinator output` | Show recent coordinator output (`--lines`) |
|
|
102
|
+
| `ov coordinator check-complete` | Evaluate exit triggers, return completion status |
|
|
102
103
|
| `ov supervisor start` | **[DEPRECATED]** Start per-project supervisor agent |
|
|
103
104
|
| `ov supervisor stop` | **[DEPRECATED]** Stop supervisor |
|
|
104
105
|
| `ov supervisor status` | **[DEPRECATED]** Show supervisor state |
|
|
@@ -232,7 +233,7 @@ overstory/
|
|
|
232
233
|
config.ts Config loader + validation
|
|
233
234
|
errors.ts Custom error types
|
|
234
235
|
json.ts Standardized JSON envelope helpers
|
|
235
|
-
commands/ One file per CLI subcommand (
|
|
236
|
+
commands/ One file per CLI subcommand (35 commands)
|
|
236
237
|
agents.ts Agent discovery and querying
|
|
237
238
|
coordinator.ts Persistent orchestrator lifecycle
|
|
238
239
|
supervisor.ts Team lead management [DEPRECATED]
|
package/agents/coordinator.md
CHANGED
|
@@ -22,6 +22,7 @@ These are named failures. If you catch yourself doing any of these, stop and cor
|
|
|
22
22
|
- **UNNECESSARY_SPAWN** -- Spawning a lead for a trivially small task. If the objective is a single small change, a single lead is sufficient. Only spawn multiple leads for genuinely independent work streams.
|
|
23
23
|
- **OVERLAPPING_FILE_AREAS** -- Assigning overlapping file areas to multiple leads. Check existing agent file scopes via `ov status` before dispatching.
|
|
24
24
|
- **PREMATURE_MERGE** -- Merging a branch before the lead signals `merge_ready`. Always wait for the lead's explicit `merge_ready` mail. Watchdog completion nudges (e.g. "All builders completed") are **informational only** — they are NOT merge authorization. Only a typed `merge_ready` mail from the owning lead authorizes a merge.
|
|
25
|
+
- **PREMATURE_ISSUE_CLOSE** -- Closing a seeds issue before the lead has sent `merge_ready` AND the branch has been successfully merged. Builder completion alone does NOT authorize issue closure. The required sequence is strictly: lead sends `merge_ready` → coordinator merges branch → merge succeeds → then close the issue. Closing based on builder `worker_done` signals, group auto-close, or `ov status` showing agents completed is a bug. Always verify the merge step is complete first.
|
|
25
26
|
- **SILENT_ESCALATION_DROP** -- Receiving an escalation mail and not acting on it. Every escalation must be routed according to its severity.
|
|
26
27
|
- **ORPHANED_AGENTS** -- Dispatching leads and losing track of them. Every dispatched lead must be in a task group.
|
|
27
28
|
- **SCOPE_EXPLOSION** -- Decomposing into too many leads. Target 2-5 leads per batch. Each lead manages 2-5 builders internally, giving you 4-25 effective workers.
|
|
@@ -226,6 +227,12 @@ Coordinator (you, depth 0)
|
|
|
226
227
|
ov merge --branch <lead-branch> # then merge
|
|
227
228
|
```
|
|
228
229
|
**Do NOT merge based on watchdog nudges, `ov status` showing "completed" builders, or your own git inspection.** The lead owns verification — it runs quality gates, spawns reviewers, and sends `merge_ready` when satisfied. Wait for that mail.
|
|
230
|
+
|
|
231
|
+
After a successful merge, close the corresponding issue:
|
|
232
|
+
```bash
|
|
233
|
+
{{TRACKER_CLI}} close <task-id> --reason "Merged branch <lead-branch>"
|
|
234
|
+
```
|
|
235
|
+
**Do NOT close issues before their branches are merged.** Issue closure is the final step after merge confirmation, never before.
|
|
229
236
|
10. **Close the batch** when the group auto-completes or all issues are resolved:
|
|
230
237
|
- Verify all issues are closed: `{{TRACKER_CLI}} show <id>` for each.
|
|
231
238
|
- Clean up worktrees: `ov worktree clean --completed`.
|
|
@@ -281,14 +288,55 @@ Report to the human operator immediately. Critical escalations mean the automate
|
|
|
281
288
|
|
|
282
289
|
When a batch is complete (task group auto-closed, all issues resolved):
|
|
283
290
|
|
|
291
|
+
**CRITICAL: Never close an issue until its branch is merged.** The correct close sequence is:
|
|
292
|
+
1. Receive `merge_ready` from lead.
|
|
293
|
+
2. Run `ov merge --branch <branch> --dry-run` (check first), then `ov merge --branch <branch>`.
|
|
294
|
+
3. Verify merge succeeded (no error output, `merged` mail received or `ov status` confirms).
|
|
295
|
+
4. **Only then** close the issue: `{{TRACKER_CLI}} close <id> --reason "Merged branch <branch-name>"`.
|
|
296
|
+
|
|
284
297
|
1. Verify all issues are closed: run `{{TRACKER_CLI}} show <id>` for each issue in the group.
|
|
285
|
-
2. Verify all branches are merged: check `ov status` for unmerged branches.
|
|
298
|
+
2. Verify all branches are merged: check `ov status` for unmerged branches. If any branch is unmerged, do NOT proceed — wait for the lead's `merge_ready` signal.
|
|
286
299
|
3. Clean up worktrees: `ov worktree clean --completed`.
|
|
287
300
|
4. Record orchestration insights: `ml record <domain> --type <type> --classification <foundational|tactical|observational> --description "<insight>"`.
|
|
288
|
-
5.
|
|
289
|
-
|
|
301
|
+
5. Commit and sync state files: after all work is merged and issues are closed, commit any outstanding state changes so runtime state is not left uncommitted when the coordinator goes idle:
|
|
302
|
+
```bash
|
|
303
|
+
{{TRACKER_CLI}} sync
|
|
304
|
+
git add .overstory/ .mulch/
|
|
305
|
+
git diff --cached --quiet || git commit -m "chore: sync runtime state"
|
|
306
|
+
git push
|
|
307
|
+
```
|
|
308
|
+
6. Report to the human operator: summarize what was accomplished, what was merged, any issues encountered.
|
|
309
|
+
7. Check for follow-up work: `{{TRACKER_CLI}} ready` to see if new issues surfaced during the batch.
|
|
310
|
+
|
|
311
|
+
After processing each batch of mail and dispatching work, evaluate whether your exit conditions are met:
|
|
312
|
+
|
|
313
|
+
```bash
|
|
314
|
+
ov coordinator check-complete --json
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
The command evaluates configured `coordinator.exitTriggers` from config.yaml:
|
|
318
|
+
- **allAgentsDone**: all spawned agents in the current run have completed and branches merged
|
|
319
|
+
- **taskTrackerEmpty**: `{{TRACKER_CLI}} ready` returns no unblocked work
|
|
320
|
+
- **onShutdownSignal**: a shutdown message was received via mail
|
|
321
|
+
|
|
322
|
+
When ALL enabled triggers are met (`complete: true` in the JSON output):
|
|
323
|
+
|
|
324
|
+
1. Commit and sync state files so runtime state is not left uncommitted:
|
|
325
|
+
```bash
|
|
326
|
+
{{TRACKER_CLI}} sync
|
|
327
|
+
git add .overstory/ .mulch/
|
|
328
|
+
git diff --cached --quiet || git commit -m "chore: sync runtime state"
|
|
329
|
+
git push
|
|
330
|
+
```
|
|
331
|
+
2. Run `ov run complete` to mark the current run as finished.
|
|
332
|
+
3. Send a final status mail to the operator:
|
|
333
|
+
```bash
|
|
334
|
+
ov mail send --to operator --subject "Run complete" \
|
|
335
|
+
--body "All exit triggers met. Run completed." --type status
|
|
336
|
+
```
|
|
337
|
+
4. Stop processing. Do not spawn additional agents or process further mail.
|
|
290
338
|
|
|
291
|
-
|
|
339
|
+
If no exit triggers are configured (all false), the coordinator runs indefinitely until manually stopped. This is the default behavior for backward compatibility.
|
|
292
340
|
|
|
293
341
|
## persistence-and-context-recovery
|
|
294
342
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@os-eco/overstory-cli",
|
|
3
|
-
"version": "0.8.
|
|
3
|
+
"version": "0.8.6",
|
|
4
4
|
"description": "Multi-agent orchestration for AI coding agents — spawn workers in git worktrees via tmux, coordinate through SQLite mail, merge with tiered conflict resolution. Pluggable runtime adapters for Claude Code, Pi, and more.",
|
|
5
5
|
"author": "Jaymin West",
|
|
6
6
|
"license": "MIT",
|
|
@@ -77,6 +77,12 @@ describe("validation", () => {
|
|
|
77
77
|
test("no flags throws ValidationError", async () => {
|
|
78
78
|
await expect(cleanCommand({})).rejects.toThrow("No cleanup targets specified");
|
|
79
79
|
});
|
|
80
|
+
|
|
81
|
+
test("--agent and --all throws ValidationError", async () => {
|
|
82
|
+
await expect(cleanCommand({ agent: "my-builder", all: true })).rejects.toThrow(
|
|
83
|
+
"--agent and --all are mutually exclusive",
|
|
84
|
+
);
|
|
85
|
+
});
|
|
80
86
|
});
|
|
81
87
|
|
|
82
88
|
// === --all ===
|
|
@@ -656,3 +662,133 @@ describe("mulch health checks", () => {
|
|
|
656
662
|
expect(stdoutOutput).toBeDefined();
|
|
657
663
|
});
|
|
658
664
|
});
|
|
665
|
+
|
|
666
|
+
// === --agent ===
|
|
667
|
+
|
|
668
|
+
describe("--agent", () => {
|
|
669
|
+
function makeSession(overrides: Partial<AgentSession> = {}): AgentSession {
|
|
670
|
+
return {
|
|
671
|
+
id: "s1",
|
|
672
|
+
agentName: "test-builder",
|
|
673
|
+
capability: "builder",
|
|
674
|
+
worktreePath: join(tempDir, ".overstory", "worktrees", "test-builder"),
|
|
675
|
+
branchName: "overstory/test-builder/task-1",
|
|
676
|
+
taskId: "task-1",
|
|
677
|
+
tmuxSession: "overstory-test-project-test-builder",
|
|
678
|
+
state: "working",
|
|
679
|
+
pid: 99999,
|
|
680
|
+
parentAgent: null,
|
|
681
|
+
depth: 1,
|
|
682
|
+
runId: "run-123",
|
|
683
|
+
startedAt: new Date().toISOString(),
|
|
684
|
+
lastActivity: new Date().toISOString(),
|
|
685
|
+
escalationLevel: 0,
|
|
686
|
+
stalledSince: null,
|
|
687
|
+
transcriptPath: null,
|
|
688
|
+
...overrides,
|
|
689
|
+
};
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
function saveSession(session: AgentSession): void {
|
|
693
|
+
const { store } = openSessionStore(overstoryDir);
|
|
694
|
+
try {
|
|
695
|
+
store.upsert(session);
|
|
696
|
+
} finally {
|
|
697
|
+
store.close();
|
|
698
|
+
}
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
test("throws AgentError when agent not found", async () => {
|
|
702
|
+
await expect(cleanCommand({ agent: "nonexistent" })).rejects.toThrow("not found");
|
|
703
|
+
});
|
|
704
|
+
|
|
705
|
+
test("clears agent and logs directories", async () => {
|
|
706
|
+
const session = makeSession();
|
|
707
|
+
saveSession(session);
|
|
708
|
+
|
|
709
|
+
// Create agent and logs dirs with content
|
|
710
|
+
const agentDir = join(overstoryDir, "agents", "test-builder");
|
|
711
|
+
const logsDir = join(overstoryDir, "logs", "test-builder");
|
|
712
|
+
await mkdir(agentDir, { recursive: true });
|
|
713
|
+
await mkdir(logsDir, { recursive: true });
|
|
714
|
+
await writeFile(join(agentDir, "identity.yaml"), "name: test-builder");
|
|
715
|
+
await writeFile(join(logsDir, "session.log"), "log data");
|
|
716
|
+
|
|
717
|
+
await cleanCommand({ agent: "test-builder" });
|
|
718
|
+
|
|
719
|
+
// Dirs should be cleared (but still exist)
|
|
720
|
+
const agentEntries = await readdir(agentDir);
|
|
721
|
+
const logEntries = await readdir(logsDir);
|
|
722
|
+
expect(agentEntries).toHaveLength(0);
|
|
723
|
+
expect(logEntries).toHaveLength(0);
|
|
724
|
+
|
|
725
|
+
expect(stdoutOutput).toContain("Agent cleaned");
|
|
726
|
+
expect(stdoutOutput).toContain("test-builder");
|
|
727
|
+
});
|
|
728
|
+
|
|
729
|
+
test("marks agent session as completed", async () => {
|
|
730
|
+
const session = makeSession({ state: "working" });
|
|
731
|
+
saveSession(session);
|
|
732
|
+
|
|
733
|
+
await cleanCommand({ agent: "test-builder" });
|
|
734
|
+
|
|
735
|
+
const { store } = openSessionStore(overstoryDir);
|
|
736
|
+
const updated = store.getByName("test-builder");
|
|
737
|
+
store.close();
|
|
738
|
+
expect(updated?.state).toBe("completed");
|
|
739
|
+
});
|
|
740
|
+
|
|
741
|
+
test("logs synthetic session-end event for non-completed agent", async () => {
|
|
742
|
+
const session = makeSession({ state: "working" });
|
|
743
|
+
saveSession(session);
|
|
744
|
+
|
|
745
|
+
await cleanCommand({ agent: "test-builder" });
|
|
746
|
+
|
|
747
|
+
const eventsDbPath = join(overstoryDir, "events.db");
|
|
748
|
+
const eventStore = createEventStore(eventsDbPath);
|
|
749
|
+
const events = eventStore.getByAgent("test-builder");
|
|
750
|
+
eventStore.close();
|
|
751
|
+
|
|
752
|
+
const sessionEndEvents = events.filter((e) => e.eventType === "session_end");
|
|
753
|
+
expect(sessionEndEvents).toHaveLength(1);
|
|
754
|
+
const data = JSON.parse(sessionEndEvents[0]?.data ?? "{}");
|
|
755
|
+
expect(data.reason).toContain("clean --agent");
|
|
756
|
+
});
|
|
757
|
+
|
|
758
|
+
test("does not log session-end event for already-completed agent", async () => {
|
|
759
|
+
const session = makeSession({ state: "completed" });
|
|
760
|
+
saveSession(session);
|
|
761
|
+
|
|
762
|
+
await cleanCommand({ agent: "test-builder" });
|
|
763
|
+
|
|
764
|
+
const eventsDbPath = join(overstoryDir, "events.db");
|
|
765
|
+
if (existsSync(eventsDbPath)) {
|
|
766
|
+
const eventStore = createEventStore(eventsDbPath);
|
|
767
|
+
const events = eventStore.getByAgent("test-builder");
|
|
768
|
+
eventStore.close();
|
|
769
|
+
const sessionEndEvents = events.filter((e) => e.eventType === "session_end");
|
|
770
|
+
expect(sessionEndEvents).toHaveLength(0);
|
|
771
|
+
}
|
|
772
|
+
});
|
|
773
|
+
|
|
774
|
+
test("--agent + --json returns JSON with agent result", async () => {
|
|
775
|
+
const session = makeSession({ state: "working" });
|
|
776
|
+
saveSession(session);
|
|
777
|
+
|
|
778
|
+
await cleanCommand({ agent: "test-builder", json: true });
|
|
779
|
+
|
|
780
|
+
const result = JSON.parse(stdoutOutput);
|
|
781
|
+
expect(result).toHaveProperty("agent");
|
|
782
|
+
expect(result.agent).toHaveProperty("agentName", "test-builder");
|
|
783
|
+
expect(result.agent).toHaveProperty("markedCompleted");
|
|
784
|
+
});
|
|
785
|
+
|
|
786
|
+
test("handles missing agent/logs directories gracefully", async () => {
|
|
787
|
+
const session = makeSession({ state: "completed" });
|
|
788
|
+
saveSession(session);
|
|
789
|
+
|
|
790
|
+
// No agent or logs dirs — should not error
|
|
791
|
+
await cleanCommand({ agent: "test-builder" });
|
|
792
|
+
expect(stdoutOutput).toContain("Agent cleaned");
|
|
793
|
+
});
|
|
794
|
+
});
|
package/src/commands/clean.ts
CHANGED
|
@@ -23,7 +23,7 @@ import { existsSync } from "node:fs";
|
|
|
23
23
|
import { readdir, rm, unlink } from "node:fs/promises";
|
|
24
24
|
import { join } from "node:path";
|
|
25
25
|
import { loadConfig } from "../config.ts";
|
|
26
|
-
import { ValidationError } from "../errors.ts";
|
|
26
|
+
import { AgentError, ValidationError } from "../errors.ts";
|
|
27
27
|
import { createEventStore } from "../events/store.ts";
|
|
28
28
|
import { jsonOutput } from "../json.ts";
|
|
29
29
|
import { printHint, printSuccess } from "../logging/color.ts";
|
|
@@ -31,9 +31,16 @@ import { createMulchClient } from "../mulch/client.ts";
|
|
|
31
31
|
import { openSessionStore } from "../sessions/compat.ts";
|
|
32
32
|
import type { AgentSession, MulchDoctorResult, MulchPruneResult, MulchStatus } from "../types.ts";
|
|
33
33
|
import { listWorktrees, removeWorktree } from "../worktree/manager.ts";
|
|
34
|
-
import {
|
|
34
|
+
import {
|
|
35
|
+
isProcessAlive,
|
|
36
|
+
isSessionAlive,
|
|
37
|
+
killProcessTree,
|
|
38
|
+
killSession,
|
|
39
|
+
listSessions,
|
|
40
|
+
} from "../worktree/tmux.ts";
|
|
35
41
|
|
|
36
42
|
export interface CleanOptions {
|
|
43
|
+
agent?: string;
|
|
37
44
|
all?: boolean;
|
|
38
45
|
mail?: boolean;
|
|
39
46
|
sessions?: boolean;
|
|
@@ -395,6 +402,158 @@ async function checkMulchHealth(repoRoot: string): Promise<{
|
|
|
395
402
|
}
|
|
396
403
|
}
|
|
397
404
|
|
|
405
|
+
interface AgentCleanResult {
|
|
406
|
+
agentName: string;
|
|
407
|
+
tmuxKilled: boolean;
|
|
408
|
+
pidKilled: boolean;
|
|
409
|
+
worktreeRemoved: boolean;
|
|
410
|
+
branchDeleted: boolean;
|
|
411
|
+
agentDirCleared: boolean;
|
|
412
|
+
logsDirCleared: boolean;
|
|
413
|
+
sessionEndEventLogged: boolean;
|
|
414
|
+
markedCompleted: boolean;
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
/**
|
|
418
|
+
* Delete a git branch (best-effort).
|
|
419
|
+
*/
|
|
420
|
+
async function deleteBranch(repoRoot: string, branch: string): Promise<boolean> {
|
|
421
|
+
try {
|
|
422
|
+
const proc = Bun.spawn(["git", "branch", "-D", branch], {
|
|
423
|
+
cwd: repoRoot,
|
|
424
|
+
stdout: "pipe",
|
|
425
|
+
stderr: "pipe",
|
|
426
|
+
});
|
|
427
|
+
const exitCode = await proc.exited;
|
|
428
|
+
return exitCode === 0;
|
|
429
|
+
} catch {
|
|
430
|
+
return false;
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
/**
|
|
435
|
+
* Perform targeted cleanup of a single agent.
|
|
436
|
+
*
|
|
437
|
+
* Kills its tmux session or process, removes its worktree, deletes its branch,
|
|
438
|
+
* clears its agent and log directories, logs a synthetic session-end event,
|
|
439
|
+
* and marks the session as completed.
|
|
440
|
+
*/
|
|
441
|
+
async function cleanSingleAgent(
|
|
442
|
+
agentName: string,
|
|
443
|
+
overstoryDir: string,
|
|
444
|
+
projectRoot: string,
|
|
445
|
+
): Promise<AgentCleanResult> {
|
|
446
|
+
const result: AgentCleanResult = {
|
|
447
|
+
agentName,
|
|
448
|
+
tmuxKilled: false,
|
|
449
|
+
pidKilled: false,
|
|
450
|
+
worktreeRemoved: false,
|
|
451
|
+
branchDeleted: false,
|
|
452
|
+
agentDirCleared: false,
|
|
453
|
+
logsDirCleared: false,
|
|
454
|
+
sessionEndEventLogged: false,
|
|
455
|
+
markedCompleted: false,
|
|
456
|
+
};
|
|
457
|
+
|
|
458
|
+
const { store } = openSessionStore(overstoryDir);
|
|
459
|
+
let session: AgentSession | undefined;
|
|
460
|
+
try {
|
|
461
|
+
const found = store.getByName(agentName);
|
|
462
|
+
if (!found) {
|
|
463
|
+
throw new AgentError(`Agent "${agentName}" not found`, { agentName });
|
|
464
|
+
}
|
|
465
|
+
session = found;
|
|
466
|
+
|
|
467
|
+
// Log synthetic session-end event for non-completed agents
|
|
468
|
+
if (session.state !== "completed") {
|
|
469
|
+
try {
|
|
470
|
+
const eventsDbPath = join(overstoryDir, "events.db");
|
|
471
|
+
const eventStore = createEventStore(eventsDbPath);
|
|
472
|
+
try {
|
|
473
|
+
eventStore.insert({
|
|
474
|
+
runId: session.runId,
|
|
475
|
+
agentName: session.agentName,
|
|
476
|
+
sessionId: session.id,
|
|
477
|
+
eventType: "session_end",
|
|
478
|
+
toolName: null,
|
|
479
|
+
toolArgs: null,
|
|
480
|
+
toolDurationMs: null,
|
|
481
|
+
level: "info",
|
|
482
|
+
data: JSON.stringify({ reason: "clean --agent", capability: session.capability }),
|
|
483
|
+
});
|
|
484
|
+
result.sessionEndEventLogged = true;
|
|
485
|
+
} finally {
|
|
486
|
+
eventStore.close();
|
|
487
|
+
}
|
|
488
|
+
} catch {
|
|
489
|
+
// Best effort
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
const isHeadless = session.tmuxSession === "" && session.pid !== null;
|
|
494
|
+
|
|
495
|
+
// Kill tmux session or process
|
|
496
|
+
if (isHeadless && session.pid !== null) {
|
|
497
|
+
try {
|
|
498
|
+
if (isProcessAlive(session.pid)) {
|
|
499
|
+
await killProcessTree(session.pid);
|
|
500
|
+
result.pidKilled = true;
|
|
501
|
+
}
|
|
502
|
+
} catch {
|
|
503
|
+
// Best effort
|
|
504
|
+
}
|
|
505
|
+
} else if (session.tmuxSession) {
|
|
506
|
+
try {
|
|
507
|
+
if (await isSessionAlive(session.tmuxSession)) {
|
|
508
|
+
await killSession(session.tmuxSession);
|
|
509
|
+
result.tmuxKilled = true;
|
|
510
|
+
}
|
|
511
|
+
} catch {
|
|
512
|
+
// Best effort
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
// Remove worktree (force)
|
|
517
|
+
if (session.worktreePath) {
|
|
518
|
+
try {
|
|
519
|
+
await removeWorktree(projectRoot, session.worktreePath, {
|
|
520
|
+
force: true,
|
|
521
|
+
forceBranch: false,
|
|
522
|
+
});
|
|
523
|
+
result.worktreeRemoved = true;
|
|
524
|
+
} catch {
|
|
525
|
+
// Best effort
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
// Delete branch
|
|
530
|
+
if (session.branchName) {
|
|
531
|
+
result.branchDeleted = await deleteBranch(projectRoot, session.branchName);
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
// Mark completed
|
|
535
|
+
if (session.state !== "completed") {
|
|
536
|
+
store.updateState(agentName, "completed");
|
|
537
|
+
store.updateLastActivity(agentName);
|
|
538
|
+
result.markedCompleted = true;
|
|
539
|
+
}
|
|
540
|
+
} finally {
|
|
541
|
+
store.close();
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
// Clear agent identity directory
|
|
545
|
+
if (session) {
|
|
546
|
+
const agentDir = join(overstoryDir, "agents", agentName);
|
|
547
|
+
result.agentDirCleared = await clearDirectory(agentDir);
|
|
548
|
+
|
|
549
|
+
// Clear agent logs directory
|
|
550
|
+
const logsDir = join(overstoryDir, "logs", agentName);
|
|
551
|
+
result.logsDirCleared = await clearDirectory(logsDir);
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
return result;
|
|
555
|
+
}
|
|
556
|
+
|
|
398
557
|
/**
|
|
399
558
|
* Entry point for `ov clean [flags]`.
|
|
400
559
|
*
|
|
@@ -403,6 +562,15 @@ async function checkMulchHealth(repoRoot: string): Promise<{
|
|
|
403
562
|
export async function cleanCommand(opts: CleanOptions): Promise<void> {
|
|
404
563
|
const json = opts.json ?? false;
|
|
405
564
|
const all = opts.all ?? false;
|
|
565
|
+
const agentName = opts.agent;
|
|
566
|
+
|
|
567
|
+
// --agent and --all are mutually exclusive
|
|
568
|
+
if (agentName && all) {
|
|
569
|
+
throw new ValidationError(
|
|
570
|
+
"--agent and --all are mutually exclusive. Use --agent <name> for single-agent cleanup or --all for full cleanup.",
|
|
571
|
+
{ field: "flags" },
|
|
572
|
+
);
|
|
573
|
+
}
|
|
406
574
|
|
|
407
575
|
const doWorktrees = all || (opts.worktrees ?? false);
|
|
408
576
|
const doBranches = all || (opts.branches ?? false);
|
|
@@ -414,11 +582,19 @@ export async function cleanCommand(opts: CleanOptions): Promise<void> {
|
|
|
414
582
|
const doSpecs = all || (opts.specs ?? false);
|
|
415
583
|
|
|
416
584
|
const anySelected =
|
|
417
|
-
|
|
585
|
+
agentName ||
|
|
586
|
+
doWorktrees ||
|
|
587
|
+
doBranches ||
|
|
588
|
+
doMail ||
|
|
589
|
+
doSessions ||
|
|
590
|
+
doMetrics ||
|
|
591
|
+
doLogs ||
|
|
592
|
+
doAgents ||
|
|
593
|
+
doSpecs;
|
|
418
594
|
|
|
419
595
|
if (!anySelected) {
|
|
420
596
|
throw new ValidationError(
|
|
421
|
-
"No cleanup targets specified. Use --all for full cleanup, or individual flags (--mail, --sessions, --metrics, --logs, --worktrees, --branches, --agents, --specs).",
|
|
597
|
+
"No cleanup targets specified. Use --all for full cleanup, --agent <name> for single-agent cleanup, or individual flags (--mail, --sessions, --metrics, --logs, --worktrees, --branches, --agents, --specs).",
|
|
422
598
|
{ field: "flags" },
|
|
423
599
|
);
|
|
424
600
|
}
|
|
@@ -427,6 +603,24 @@ export async function cleanCommand(opts: CleanOptions): Promise<void> {
|
|
|
427
603
|
const root = config.project.root;
|
|
428
604
|
const overstoryDir = join(root, ".overstory");
|
|
429
605
|
|
|
606
|
+
// Per-agent cleanup: targeted single-agent cleanup
|
|
607
|
+
if (agentName) {
|
|
608
|
+
const agentResult = await cleanSingleAgent(agentName, overstoryDir, root);
|
|
609
|
+
if (json) {
|
|
610
|
+
jsonOutput("clean", { agent: agentResult });
|
|
611
|
+
} else {
|
|
612
|
+
printSuccess("Agent cleaned", agentName);
|
|
613
|
+
if (agentResult.tmuxKilled) process.stdout.write(` Tmux session killed\n`);
|
|
614
|
+
if (agentResult.pidKilled) process.stdout.write(` Process killed (PID)\n`);
|
|
615
|
+
if (agentResult.worktreeRemoved) process.stdout.write(` Worktree removed\n`);
|
|
616
|
+
if (agentResult.branchDeleted)
|
|
617
|
+
process.stdout.write(` Branch deleted: ${agentResult.agentName}\n`);
|
|
618
|
+
if (agentResult.agentDirCleared) process.stdout.write(` Cleared agents/${agentName}/\n`);
|
|
619
|
+
if (agentResult.logsDirCleared) process.stdout.write(` Cleared logs/${agentName}/\n`);
|
|
620
|
+
}
|
|
621
|
+
return;
|
|
622
|
+
}
|
|
623
|
+
|
|
430
624
|
const result: CleanResult = {
|
|
431
625
|
sessionEndEventsLogged: 0,
|
|
432
626
|
tmuxKilled: 0,
|