@os-eco/overstory-cli 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +381 -0
- package/agents/builder.md +137 -0
- package/agents/coordinator.md +263 -0
- package/agents/lead.md +301 -0
- package/agents/merger.md +160 -0
- package/agents/monitor.md +214 -0
- package/agents/reviewer.md +140 -0
- package/agents/scout.md +119 -0
- package/agents/supervisor.md +423 -0
- package/package.json +47 -0
- package/src/agents/checkpoint.test.ts +88 -0
- package/src/agents/checkpoint.ts +101 -0
- package/src/agents/hooks-deployer.test.ts +2040 -0
- package/src/agents/hooks-deployer.ts +607 -0
- package/src/agents/identity.test.ts +603 -0
- package/src/agents/identity.ts +384 -0
- package/src/agents/lifecycle.test.ts +196 -0
- package/src/agents/lifecycle.ts +183 -0
- package/src/agents/manifest.test.ts +746 -0
- package/src/agents/manifest.ts +354 -0
- package/src/agents/overlay.test.ts +676 -0
- package/src/agents/overlay.ts +308 -0
- package/src/beads/client.test.ts +217 -0
- package/src/beads/client.ts +202 -0
- package/src/beads/molecules.test.ts +338 -0
- package/src/beads/molecules.ts +198 -0
- package/src/commands/agents.test.ts +322 -0
- package/src/commands/agents.ts +287 -0
- package/src/commands/clean.test.ts +670 -0
- package/src/commands/clean.ts +618 -0
- package/src/commands/completions.test.ts +342 -0
- package/src/commands/completions.ts +887 -0
- package/src/commands/coordinator.test.ts +1530 -0
- package/src/commands/coordinator.ts +733 -0
- package/src/commands/costs.test.ts +1119 -0
- package/src/commands/costs.ts +564 -0
- package/src/commands/dashboard.test.ts +308 -0
- package/src/commands/dashboard.ts +838 -0
- package/src/commands/doctor.test.ts +294 -0
- package/src/commands/doctor.ts +213 -0
- package/src/commands/errors.test.ts +647 -0
- package/src/commands/errors.ts +248 -0
- package/src/commands/feed.test.ts +578 -0
- package/src/commands/feed.ts +361 -0
- package/src/commands/group.test.ts +262 -0
- package/src/commands/group.ts +511 -0
- package/src/commands/hooks.test.ts +458 -0
- package/src/commands/hooks.ts +253 -0
- package/src/commands/init.test.ts +347 -0
- package/src/commands/init.ts +650 -0
- package/src/commands/inspect.test.ts +670 -0
- package/src/commands/inspect.ts +431 -0
- package/src/commands/log.test.ts +1454 -0
- package/src/commands/log.ts +724 -0
- package/src/commands/logs.test.ts +379 -0
- package/src/commands/logs.ts +546 -0
- package/src/commands/mail.test.ts +1270 -0
- package/src/commands/mail.ts +771 -0
- package/src/commands/merge.test.ts +670 -0
- package/src/commands/merge.ts +355 -0
- package/src/commands/metrics.test.ts +444 -0
- package/src/commands/metrics.ts +143 -0
- package/src/commands/monitor.test.ts +191 -0
- package/src/commands/monitor.ts +390 -0
- package/src/commands/nudge.test.ts +230 -0
- package/src/commands/nudge.ts +372 -0
- package/src/commands/prime.test.ts +470 -0
- package/src/commands/prime.ts +381 -0
- package/src/commands/replay.test.ts +741 -0
- package/src/commands/replay.ts +360 -0
- package/src/commands/run.test.ts +431 -0
- package/src/commands/run.ts +351 -0
- package/src/commands/sling.test.ts +657 -0
- package/src/commands/sling.ts +661 -0
- package/src/commands/spec.test.ts +203 -0
- package/src/commands/spec.ts +168 -0
- package/src/commands/status.test.ts +430 -0
- package/src/commands/status.ts +398 -0
- package/src/commands/stop.test.ts +420 -0
- package/src/commands/stop.ts +151 -0
- package/src/commands/supervisor.test.ts +187 -0
- package/src/commands/supervisor.ts +535 -0
- package/src/commands/trace.test.ts +745 -0
- package/src/commands/trace.ts +325 -0
- package/src/commands/watch.test.ts +145 -0
- package/src/commands/watch.ts +247 -0
- package/src/commands/worktree.test.ts +786 -0
- package/src/commands/worktree.ts +311 -0
- package/src/config.test.ts +822 -0
- package/src/config.ts +829 -0
- package/src/doctor/agents.test.ts +454 -0
- package/src/doctor/agents.ts +396 -0
- package/src/doctor/config-check.test.ts +190 -0
- package/src/doctor/config-check.ts +183 -0
- package/src/doctor/consistency.test.ts +651 -0
- package/src/doctor/consistency.ts +294 -0
- package/src/doctor/databases.test.ts +290 -0
- package/src/doctor/databases.ts +218 -0
- package/src/doctor/dependencies.test.ts +184 -0
- package/src/doctor/dependencies.ts +175 -0
- package/src/doctor/logs.test.ts +251 -0
- package/src/doctor/logs.ts +295 -0
- package/src/doctor/merge-queue.test.ts +216 -0
- package/src/doctor/merge-queue.ts +144 -0
- package/src/doctor/structure.test.ts +291 -0
- package/src/doctor/structure.ts +198 -0
- package/src/doctor/types.ts +37 -0
- package/src/doctor/version.test.ts +136 -0
- package/src/doctor/version.ts +129 -0
- package/src/e2e/init-sling-lifecycle.test.ts +277 -0
- package/src/errors.ts +217 -0
- package/src/events/store.test.ts +660 -0
- package/src/events/store.ts +369 -0
- package/src/events/tool-filter.test.ts +330 -0
- package/src/events/tool-filter.ts +126 -0
- package/src/index.ts +316 -0
- package/src/insights/analyzer.test.ts +466 -0
- package/src/insights/analyzer.ts +203 -0
- package/src/logging/color.test.ts +142 -0
- package/src/logging/color.ts +71 -0
- package/src/logging/logger.test.ts +813 -0
- package/src/logging/logger.ts +266 -0
- package/src/logging/reporter.test.ts +259 -0
- package/src/logging/reporter.ts +109 -0
- package/src/logging/sanitizer.test.ts +190 -0
- package/src/logging/sanitizer.ts +57 -0
- package/src/mail/broadcast.test.ts +203 -0
- package/src/mail/broadcast.ts +92 -0
- package/src/mail/client.test.ts +773 -0
- package/src/mail/client.ts +223 -0
- package/src/mail/store.test.ts +705 -0
- package/src/mail/store.ts +387 -0
- package/src/merge/queue.test.ts +359 -0
- package/src/merge/queue.ts +231 -0
- package/src/merge/resolver.test.ts +1345 -0
- package/src/merge/resolver.ts +645 -0
- package/src/metrics/store.test.ts +667 -0
- package/src/metrics/store.ts +445 -0
- package/src/metrics/summary.test.ts +398 -0
- package/src/metrics/summary.ts +178 -0
- package/src/metrics/transcript.test.ts +356 -0
- package/src/metrics/transcript.ts +175 -0
- package/src/mulch/client.test.ts +671 -0
- package/src/mulch/client.ts +332 -0
- package/src/sessions/compat.test.ts +280 -0
- package/src/sessions/compat.ts +104 -0
- package/src/sessions/store.test.ts +873 -0
- package/src/sessions/store.ts +494 -0
- package/src/test-helpers.test.ts +124 -0
- package/src/test-helpers.ts +126 -0
- package/src/tracker/beads.ts +56 -0
- package/src/tracker/factory.test.ts +80 -0
- package/src/tracker/factory.ts +64 -0
- package/src/tracker/seeds.ts +182 -0
- package/src/tracker/types.ts +52 -0
- package/src/types.ts +724 -0
- package/src/watchdog/daemon.test.ts +1975 -0
- package/src/watchdog/daemon.ts +671 -0
- package/src/watchdog/health.test.ts +431 -0
- package/src/watchdog/health.ts +264 -0
- package/src/watchdog/triage.test.ts +164 -0
- package/src/watchdog/triage.ts +179 -0
- package/src/worktree/manager.test.ts +439 -0
- package/src/worktree/manager.ts +198 -0
- package/src/worktree/tmux.test.ts +1009 -0
- package/src/worktree/tmux.ts +509 -0
- package/templates/CLAUDE.md.tmpl +89 -0
- package/templates/hooks.json.tmpl +105 -0
- package/templates/overlay.md.tmpl +81 -0
|
@@ -0,0 +1,618 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CLI command: overstory clean [--all] [--mail] [--sessions] [--metrics]
|
|
3
|
+
* [--logs] [--worktrees] [--branches] [--agents] [--specs]
|
|
4
|
+
*
|
|
5
|
+
* Nuclear cleanup of overstory runtime state.
|
|
6
|
+
* --all does everything. Individual flags allow selective cleanup.
|
|
7
|
+
*
|
|
8
|
+
* Execution order for --all (processes → filesystem → databases):
|
|
9
|
+
* 0. Run mulch health checks (informational, non-destructive):
|
|
10
|
+
* - Check domains approaching governance limits
|
|
11
|
+
* - Run mulch prune --dry-run (report stale record counts)
|
|
12
|
+
* - Run mulch doctor (report health issues)
|
|
13
|
+
* 1. Kill all overstory tmux sessions
|
|
14
|
+
* 2. Remove all worktrees
|
|
15
|
+
* 3. Delete orphaned overstory/* branches
|
|
16
|
+
* 4. Delete SQLite databases (mail.db, metrics.db)
|
|
17
|
+
* 5. Wipe sessions.db, merge-queue.db
|
|
18
|
+
* 6. Clear directory contents (logs/, agents/, specs/)
|
|
19
|
+
* 7. Delete nudge-state.json
|
|
20
|
+
*/
|
|
21
|
+
|
|
22
|
+
import { existsSync } from "node:fs";
|
|
23
|
+
import { readdir, rm, unlink } from "node:fs/promises";
|
|
24
|
+
import { join } from "node:path";
|
|
25
|
+
import { loadConfig } from "../config.ts";
|
|
26
|
+
import { ValidationError } from "../errors.ts";
|
|
27
|
+
import { createEventStore } from "../events/store.ts";
|
|
28
|
+
import { createMulchClient } from "../mulch/client.ts";
|
|
29
|
+
import { openSessionStore } from "../sessions/compat.ts";
|
|
30
|
+
import type { AgentSession, MulchDoctorResult, MulchPruneResult, MulchStatus } from "../types.ts";
|
|
31
|
+
import { listWorktrees, removeWorktree } from "../worktree/manager.ts";
|
|
32
|
+
import { killSession, listSessions } from "../worktree/tmux.ts";
|
|
33
|
+
|
|
34
|
+
function hasFlag(args: string[], flag: string): boolean {
|
|
35
|
+
return args.includes(flag);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Load active agent sessions from SessionStore for session-end event logging.
|
|
40
|
+
* Returns sessions that are in an active state (booting, working, stalled).
|
|
41
|
+
*
|
|
42
|
+
* Checks for sessions.db or sessions.json existence first to avoid creating
|
|
43
|
+
* an empty database file as a side effect (which would interfere with
|
|
44
|
+
* the "Nothing to clean" detection later in the pipeline).
|
|
45
|
+
*/
|
|
46
|
+
function loadActiveSessions(overstoryDir: string): AgentSession[] {
|
|
47
|
+
try {
|
|
48
|
+
const dbPath = join(overstoryDir, "sessions.db");
|
|
49
|
+
const jsonPath = join(overstoryDir, "sessions.json");
|
|
50
|
+
if (!existsSync(dbPath) && !existsSync(jsonPath)) {
|
|
51
|
+
return [];
|
|
52
|
+
}
|
|
53
|
+
const { store } = openSessionStore(overstoryDir);
|
|
54
|
+
try {
|
|
55
|
+
return store.getActive();
|
|
56
|
+
} finally {
|
|
57
|
+
store.close();
|
|
58
|
+
}
|
|
59
|
+
} catch {
|
|
60
|
+
return [];
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Log synthetic session-end events for all active agents before killing tmux sessions.
|
|
66
|
+
*
|
|
67
|
+
* When clean --all or --worktrees kills tmux sessions, the Stop hook never fires
|
|
68
|
+
* because the process is killed externally. This function writes session_end events
|
|
69
|
+
* to the EventStore with reason='clean' so observability records are complete.
|
|
70
|
+
*/
|
|
71
|
+
async function logSyntheticSessionEndEvents(overstoryDir: string): Promise<number> {
|
|
72
|
+
let logged = 0;
|
|
73
|
+
try {
|
|
74
|
+
const activeSessions = loadActiveSessions(overstoryDir);
|
|
75
|
+
if (activeSessions.length === 0) {
|
|
76
|
+
return 0;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const eventsDbPath = join(overstoryDir, "events.db");
|
|
80
|
+
const eventStore = createEventStore(eventsDbPath);
|
|
81
|
+
try {
|
|
82
|
+
for (const session of activeSessions) {
|
|
83
|
+
eventStore.insert({
|
|
84
|
+
runId: session.runId,
|
|
85
|
+
agentName: session.agentName,
|
|
86
|
+
sessionId: session.id,
|
|
87
|
+
eventType: "session_end",
|
|
88
|
+
toolName: null,
|
|
89
|
+
toolArgs: null,
|
|
90
|
+
toolDurationMs: null,
|
|
91
|
+
level: "info",
|
|
92
|
+
data: JSON.stringify({ reason: "clean", capability: session.capability }),
|
|
93
|
+
});
|
|
94
|
+
logged++;
|
|
95
|
+
}
|
|
96
|
+
} finally {
|
|
97
|
+
eventStore.close();
|
|
98
|
+
}
|
|
99
|
+
} catch {
|
|
100
|
+
// Best effort: event logging should not block cleanup
|
|
101
|
+
}
|
|
102
|
+
return logged;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
interface CleanResult {
|
|
106
|
+
sessionEndEventsLogged: number;
|
|
107
|
+
tmuxKilled: number;
|
|
108
|
+
worktreesCleaned: number;
|
|
109
|
+
branchesDeleted: number;
|
|
110
|
+
mailWiped: boolean;
|
|
111
|
+
sessionsCleared: boolean;
|
|
112
|
+
mergeQueueCleared: boolean;
|
|
113
|
+
metricsWiped: boolean;
|
|
114
|
+
logsCleared: boolean;
|
|
115
|
+
agentsCleared: boolean;
|
|
116
|
+
specsCleared: boolean;
|
|
117
|
+
nudgeStateCleared: boolean;
|
|
118
|
+
currentRunCleared: boolean;
|
|
119
|
+
mulchHealth: {
|
|
120
|
+
checked: boolean;
|
|
121
|
+
domainsNearLimit: Array<{ domain: string; recordCount: number; warnThreshold: number }>;
|
|
122
|
+
stalePruneCandidates: number;
|
|
123
|
+
doctorIssues: number;
|
|
124
|
+
doctorWarnings: number;
|
|
125
|
+
} | null;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Kill overstory tmux sessions registered in THIS project's SessionStore.
|
|
130
|
+
*
|
|
131
|
+
* Project-scoped: only kills tmux sessions whose names appear in the
|
|
132
|
+
* project's sessions.db (or sessions.json). This prevents cross-project
|
|
133
|
+
* kills during dogfooding, where `bun test` might run inside a live swarm.
|
|
134
|
+
*
|
|
135
|
+
* Falls back to killing all "overstory-{projectName}-" prefixed tmux sessions
|
|
136
|
+
* only if the SessionStore is unavailable (graceful degradation for broken state).
|
|
137
|
+
*/
|
|
138
|
+
async function killAllTmuxSessions(overstoryDir: string, projectName: string): Promise<number> {
|
|
139
|
+
let killed = 0;
|
|
140
|
+
const projectPrefix = `overstory-${projectName}-`;
|
|
141
|
+
try {
|
|
142
|
+
const tmuxSessions = await listSessions();
|
|
143
|
+
const overStorySessions = tmuxSessions.filter((s) => s.name.startsWith(projectPrefix));
|
|
144
|
+
if (overStorySessions.length === 0) {
|
|
145
|
+
return 0;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Build a set of tmux session names registered in this project's SessionStore.
|
|
149
|
+
const registeredNames = loadRegisteredTmuxNames(overstoryDir);
|
|
150
|
+
|
|
151
|
+
// If we got registered names, only kill those. Otherwise fall back to all
|
|
152
|
+
// overstory-{projectName}-* sessions.
|
|
153
|
+
const toKill =
|
|
154
|
+
registeredNames !== null
|
|
155
|
+
? overStorySessions.filter((s) => registeredNames.has(s.name))
|
|
156
|
+
: overStorySessions;
|
|
157
|
+
|
|
158
|
+
for (const session of toKill) {
|
|
159
|
+
try {
|
|
160
|
+
await killSession(session.name);
|
|
161
|
+
killed++;
|
|
162
|
+
} catch {
|
|
163
|
+
// Best effort
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
} catch {
|
|
167
|
+
// tmux not available or no server running
|
|
168
|
+
}
|
|
169
|
+
return killed;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Load the set of tmux session names registered in this project's SessionStore.
|
|
174
|
+
*
|
|
175
|
+
* Returns null if the SessionStore cannot be opened (signals the caller to
|
|
176
|
+
* fall back to the legacy "kill all overstory-*" behavior).
|
|
177
|
+
*/
|
|
178
|
+
function loadRegisteredTmuxNames(overstoryDir: string): Set<string> | null {
|
|
179
|
+
try {
|
|
180
|
+
const dbPath = join(overstoryDir, "sessions.db");
|
|
181
|
+
const jsonPath = join(overstoryDir, "sessions.json");
|
|
182
|
+
if (!existsSync(dbPath) && !existsSync(jsonPath)) {
|
|
183
|
+
// No session data at all -- return empty set (not null).
|
|
184
|
+
// This is distinct from "store unavailable": it means the project
|
|
185
|
+
// has no registered sessions, so nothing should be killed.
|
|
186
|
+
return new Set();
|
|
187
|
+
}
|
|
188
|
+
const { store } = openSessionStore(overstoryDir);
|
|
189
|
+
try {
|
|
190
|
+
const allSessions = store.getAll();
|
|
191
|
+
return new Set(allSessions.map((s) => s.tmuxSession));
|
|
192
|
+
} finally {
|
|
193
|
+
store.close();
|
|
194
|
+
}
|
|
195
|
+
} catch {
|
|
196
|
+
// SessionStore is broken -- fall back to legacy behavior
|
|
197
|
+
return null;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
/**
|
|
202
|
+
* Remove all overstory worktrees (force remove with branch deletion).
|
|
203
|
+
*/
|
|
204
|
+
async function cleanAllWorktrees(root: string): Promise<number> {
|
|
205
|
+
let cleaned = 0;
|
|
206
|
+
try {
|
|
207
|
+
const worktrees = await listWorktrees(root);
|
|
208
|
+
const overstoryWts = worktrees.filter((wt) => wt.branch.startsWith("overstory/"));
|
|
209
|
+
for (const wt of overstoryWts) {
|
|
210
|
+
try {
|
|
211
|
+
await removeWorktree(root, wt.path, { force: true, forceBranch: true });
|
|
212
|
+
cleaned++;
|
|
213
|
+
} catch {
|
|
214
|
+
// Best effort
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
} catch {
|
|
218
|
+
// No worktrees or git error
|
|
219
|
+
}
|
|
220
|
+
return cleaned;
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/**
|
|
224
|
+
* Delete orphaned overstory/* branch refs not tied to a worktree.
|
|
225
|
+
*/
|
|
226
|
+
async function deleteOrphanedBranches(root: string): Promise<number> {
|
|
227
|
+
let deleted = 0;
|
|
228
|
+
try {
|
|
229
|
+
const proc = Bun.spawn(
|
|
230
|
+
["git", "for-each-ref", "refs/heads/overstory/", "--format=%(refname:short)"],
|
|
231
|
+
{ cwd: root, stdout: "pipe", stderr: "pipe" },
|
|
232
|
+
);
|
|
233
|
+
const stdout = await new Response(proc.stdout).text();
|
|
234
|
+
await proc.exited;
|
|
235
|
+
|
|
236
|
+
const branches = stdout
|
|
237
|
+
.trim()
|
|
238
|
+
.split("\n")
|
|
239
|
+
.filter((b) => b.length > 0);
|
|
240
|
+
for (const branch of branches) {
|
|
241
|
+
try {
|
|
242
|
+
const del = Bun.spawn(["git", "branch", "-D", branch], {
|
|
243
|
+
cwd: root,
|
|
244
|
+
stdout: "pipe",
|
|
245
|
+
stderr: "pipe",
|
|
246
|
+
});
|
|
247
|
+
const exitCode = await del.exited;
|
|
248
|
+
if (exitCode === 0) deleted++;
|
|
249
|
+
} catch {
|
|
250
|
+
// Best effort
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
} catch {
|
|
254
|
+
// Git error
|
|
255
|
+
}
|
|
256
|
+
return deleted;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Delete a SQLite database file and its WAL/SHM companions.
|
|
261
|
+
*/
|
|
262
|
+
async function wipeSqliteDb(dbPath: string): Promise<boolean> {
|
|
263
|
+
const extensions = ["", "-wal", "-shm"];
|
|
264
|
+
let wiped = false;
|
|
265
|
+
for (const ext of extensions) {
|
|
266
|
+
try {
|
|
267
|
+
await unlink(`${dbPath}${ext}`);
|
|
268
|
+
if (ext === "") wiped = true;
|
|
269
|
+
} catch {
|
|
270
|
+
// File may not exist
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
return wiped;
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
/**
|
|
277
|
+
* Reset a JSON file to an empty array.
|
|
278
|
+
*/
|
|
279
|
+
async function resetJsonFile(path: string): Promise<boolean> {
|
|
280
|
+
const file = Bun.file(path);
|
|
281
|
+
if (await file.exists()) {
|
|
282
|
+
await Bun.write(path, "[]\n");
|
|
283
|
+
return true;
|
|
284
|
+
}
|
|
285
|
+
return false;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Clear all entries inside a directory but keep the directory itself.
|
|
290
|
+
*/
|
|
291
|
+
async function clearDirectory(dirPath: string): Promise<boolean> {
|
|
292
|
+
try {
|
|
293
|
+
const entries = await readdir(dirPath);
|
|
294
|
+
for (const entry of entries) {
|
|
295
|
+
await rm(join(dirPath, entry), { recursive: true, force: true });
|
|
296
|
+
}
|
|
297
|
+
return entries.length > 0;
|
|
298
|
+
} catch {
|
|
299
|
+
// Directory may not exist
|
|
300
|
+
return false;
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
/**
|
|
305
|
+
* Delete a single file if it exists.
|
|
306
|
+
*/
|
|
307
|
+
async function deleteFile(path: string): Promise<boolean> {
|
|
308
|
+
try {
|
|
309
|
+
await unlink(path);
|
|
310
|
+
return true;
|
|
311
|
+
} catch {
|
|
312
|
+
return false;
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
/**
|
|
317
|
+
* Check mulch repository health and return diagnostic information.
|
|
318
|
+
*
|
|
319
|
+
* Governance limits warn threshold (based on mulch defaults):
|
|
320
|
+
* - Max records per domain: 500 (warn at 400 = 80%)
|
|
321
|
+
*
|
|
322
|
+
* This is informational only — no data is modified.
|
|
323
|
+
*/
|
|
324
|
+
async function checkMulchHealth(repoRoot: string): Promise<{
|
|
325
|
+
domainsNearLimit: Array<{ domain: string; recordCount: number; warnThreshold: number }>;
|
|
326
|
+
stalePruneCandidates: number;
|
|
327
|
+
doctorIssues: number;
|
|
328
|
+
doctorWarnings: number;
|
|
329
|
+
} | null> {
|
|
330
|
+
try {
|
|
331
|
+
const mulch = createMulchClient(repoRoot);
|
|
332
|
+
|
|
333
|
+
// 1. Check domain sizes against governance limits
|
|
334
|
+
let status: MulchStatus;
|
|
335
|
+
try {
|
|
336
|
+
status = await mulch.status();
|
|
337
|
+
} catch {
|
|
338
|
+
// Mulch not available or no .mulch directory
|
|
339
|
+
return null;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
const warnThreshold = 400; // 80% of 500 max
|
|
343
|
+
const domainsNearLimit = status.domains
|
|
344
|
+
.filter((d) => d.recordCount >= warnThreshold)
|
|
345
|
+
.map((d) => ({ domain: d.name, recordCount: d.recordCount, warnThreshold }));
|
|
346
|
+
|
|
347
|
+
// 2. Run prune --dry-run to count stale records
|
|
348
|
+
let pruneResult: MulchPruneResult;
|
|
349
|
+
try {
|
|
350
|
+
pruneResult = await mulch.prune({ dryRun: true });
|
|
351
|
+
} catch {
|
|
352
|
+
// Prune failed — skip this check
|
|
353
|
+
pruneResult = { success: false, command: "prune", dryRun: true, totalPruned: 0, results: [] };
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
const stalePruneCandidates = pruneResult.totalPruned;
|
|
357
|
+
|
|
358
|
+
// 3. Run doctor to check repository health
|
|
359
|
+
let doctorResult: MulchDoctorResult;
|
|
360
|
+
try {
|
|
361
|
+
doctorResult = await mulch.doctor({ fix: false });
|
|
362
|
+
} catch {
|
|
363
|
+
// Doctor failed — skip this check
|
|
364
|
+
doctorResult = {
|
|
365
|
+
success: false,
|
|
366
|
+
command: "doctor",
|
|
367
|
+
checks: [],
|
|
368
|
+
summary: { pass: 0, warn: 0, fail: 0 },
|
|
369
|
+
};
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
const doctorIssues = doctorResult.summary.fail;
|
|
373
|
+
const doctorWarnings = doctorResult.summary.warn;
|
|
374
|
+
|
|
375
|
+
return {
|
|
376
|
+
domainsNearLimit,
|
|
377
|
+
stalePruneCandidates,
|
|
378
|
+
doctorIssues,
|
|
379
|
+
doctorWarnings,
|
|
380
|
+
};
|
|
381
|
+
} catch {
|
|
382
|
+
// Mulch not available or other error — skip health checks
|
|
383
|
+
return null;
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
const CLEAN_HELP = `overstory clean — Wipe runtime state (nuclear cleanup)
|
|
388
|
+
|
|
389
|
+
Usage: overstory clean [flags]
|
|
390
|
+
|
|
391
|
+
Flags:
|
|
392
|
+
--all Wipe everything (nuclear option)
|
|
393
|
+
--mail Delete mail.db (all messages)
|
|
394
|
+
--sessions Wipe sessions.db
|
|
395
|
+
--metrics Delete metrics.db
|
|
396
|
+
--logs Remove all agent logs
|
|
397
|
+
--worktrees Remove all worktrees + kill tmux sessions
|
|
398
|
+
--branches Delete all overstory/* branch refs
|
|
399
|
+
--agents Remove agent identity files
|
|
400
|
+
--specs Remove task spec files
|
|
401
|
+
|
|
402
|
+
Options:
|
|
403
|
+
--json Output as JSON
|
|
404
|
+
--help, -h Show this help
|
|
405
|
+
|
|
406
|
+
When --all is passed, ALL of the above are executed in safe order:
|
|
407
|
+
0. Run mulch health checks (informational, non-destructive):
|
|
408
|
+
- Check domains approaching governance limits (warn threshold: 400 records)
|
|
409
|
+
- Run mulch prune --dry-run (report stale record counts)
|
|
410
|
+
- Run mulch doctor (report health issues)
|
|
411
|
+
1. Kill all overstory tmux sessions (processes first)
|
|
412
|
+
2. Remove all worktrees
|
|
413
|
+
3. Delete orphaned branch refs
|
|
414
|
+
4. Wipe mail.db, metrics.db, sessions.db, merge-queue.db
|
|
415
|
+
5. Clear logs, agents, specs, nudge state`;
|
|
416
|
+
|
|
417
|
+
export async function cleanCommand(args: string[]): Promise<void> {
|
|
418
|
+
if (hasFlag(args, "--help") || hasFlag(args, "-h")) {
|
|
419
|
+
process.stdout.write(`${CLEAN_HELP}\n`);
|
|
420
|
+
return;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
const json = hasFlag(args, "--json");
|
|
424
|
+
const all = hasFlag(args, "--all");
|
|
425
|
+
|
|
426
|
+
const doWorktrees = all || hasFlag(args, "--worktrees");
|
|
427
|
+
const doBranches = all || hasFlag(args, "--branches");
|
|
428
|
+
const doMail = all || hasFlag(args, "--mail");
|
|
429
|
+
const doSessions = all || hasFlag(args, "--sessions");
|
|
430
|
+
const doMetrics = all || hasFlag(args, "--metrics");
|
|
431
|
+
const doLogs = all || hasFlag(args, "--logs");
|
|
432
|
+
const doAgents = all || hasFlag(args, "--agents");
|
|
433
|
+
const doSpecs = all || hasFlag(args, "--specs");
|
|
434
|
+
|
|
435
|
+
const anySelected =
|
|
436
|
+
doWorktrees || doBranches || doMail || doSessions || doMetrics || doLogs || doAgents || doSpecs;
|
|
437
|
+
|
|
438
|
+
if (!anySelected) {
|
|
439
|
+
throw new ValidationError(
|
|
440
|
+
"No cleanup targets specified. Use --all for full cleanup, or individual flags (--mail, --sessions, --metrics, --logs, --worktrees, --branches, --agents, --specs).",
|
|
441
|
+
{ field: "flags" },
|
|
442
|
+
);
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
const config = await loadConfig(process.cwd());
|
|
446
|
+
const root = config.project.root;
|
|
447
|
+
const overstoryDir = join(root, ".overstory");
|
|
448
|
+
|
|
449
|
+
const result: CleanResult = {
|
|
450
|
+
sessionEndEventsLogged: 0,
|
|
451
|
+
tmuxKilled: 0,
|
|
452
|
+
worktreesCleaned: 0,
|
|
453
|
+
branchesDeleted: 0,
|
|
454
|
+
mailWiped: false,
|
|
455
|
+
sessionsCleared: false,
|
|
456
|
+
mergeQueueCleared: false,
|
|
457
|
+
metricsWiped: false,
|
|
458
|
+
logsCleared: false,
|
|
459
|
+
agentsCleared: false,
|
|
460
|
+
specsCleared: false,
|
|
461
|
+
nudgeStateCleared: false,
|
|
462
|
+
currentRunCleared: false,
|
|
463
|
+
mulchHealth: null,
|
|
464
|
+
};
|
|
465
|
+
|
|
466
|
+
// 0. Run mulch health checks BEFORE cleanup operations (when --all is set).
|
|
467
|
+
// This is informational only — no data is modified.
|
|
468
|
+
if (all) {
|
|
469
|
+
const healthCheck = await checkMulchHealth(root);
|
|
470
|
+
if (healthCheck) {
|
|
471
|
+
result.mulchHealth = {
|
|
472
|
+
checked: true,
|
|
473
|
+
domainsNearLimit: healthCheck.domainsNearLimit,
|
|
474
|
+
stalePruneCandidates: healthCheck.stalePruneCandidates,
|
|
475
|
+
doctorIssues: healthCheck.doctorIssues,
|
|
476
|
+
doctorWarnings: healthCheck.doctorWarnings,
|
|
477
|
+
};
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
// 1. Log synthetic session-end events BEFORE killing tmux sessions.
|
|
482
|
+
// When processes are killed externally, the Stop hook never fires,
|
|
483
|
+
// so session_end events would be lost without this step.
|
|
484
|
+
if (doWorktrees || all) {
|
|
485
|
+
result.sessionEndEventsLogged = await logSyntheticSessionEndEvents(overstoryDir);
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
// 2. Kill tmux sessions (must happen before worktree removal)
|
|
489
|
+
if (doWorktrees || all) {
|
|
490
|
+
result.tmuxKilled = await killAllTmuxSessions(overstoryDir, config.project.name);
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
// 3. Remove worktrees
|
|
494
|
+
if (doWorktrees) {
|
|
495
|
+
result.worktreesCleaned = await cleanAllWorktrees(root);
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
// 4. Delete orphaned branches
|
|
499
|
+
if (doBranches) {
|
|
500
|
+
result.branchesDeleted = await deleteOrphanedBranches(root);
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
// 5. Wipe databases
|
|
504
|
+
if (doMail) {
|
|
505
|
+
result.mailWiped = await wipeSqliteDb(join(overstoryDir, "mail.db"));
|
|
506
|
+
}
|
|
507
|
+
if (doMetrics) {
|
|
508
|
+
result.metricsWiped = await wipeSqliteDb(join(overstoryDir, "metrics.db"));
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
// 6. Wipe sessions.db + legacy sessions.json
|
|
512
|
+
if (doSessions) {
|
|
513
|
+
result.sessionsCleared = await wipeSqliteDb(join(overstoryDir, "sessions.db"));
|
|
514
|
+
// Also clean legacy sessions.json if it still exists
|
|
515
|
+
await resetJsonFile(join(overstoryDir, "sessions.json"));
|
|
516
|
+
}
|
|
517
|
+
if (all) {
|
|
518
|
+
result.mergeQueueCleared = await wipeSqliteDb(join(overstoryDir, "merge-queue.db"));
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
// 7. Clear directories
|
|
522
|
+
if (doLogs) {
|
|
523
|
+
result.logsCleared = await clearDirectory(join(overstoryDir, "logs"));
|
|
524
|
+
}
|
|
525
|
+
if (doAgents) {
|
|
526
|
+
result.agentsCleared = await clearDirectory(join(overstoryDir, "agents"));
|
|
527
|
+
}
|
|
528
|
+
if (doSpecs) {
|
|
529
|
+
result.specsCleared = await clearDirectory(join(overstoryDir, "specs"));
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
// 8. Delete nudge state + pending nudge markers + current-run.txt
|
|
533
|
+
if (all) {
|
|
534
|
+
result.nudgeStateCleared = await deleteFile(join(overstoryDir, "nudge-state.json"));
|
|
535
|
+
await clearDirectory(join(overstoryDir, "pending-nudges"));
|
|
536
|
+
result.currentRunCleared = await deleteFile(join(overstoryDir, "current-run.txt"));
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
// Output
|
|
540
|
+
if (json) {
|
|
541
|
+
process.stdout.write(`${JSON.stringify(result, null, "\t")}\n`);
|
|
542
|
+
return;
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
const lines: string[] = [];
|
|
546
|
+
if (result.sessionEndEventsLogged > 0) {
|
|
547
|
+
lines.push(
|
|
548
|
+
`Logged ${result.sessionEndEventsLogged} synthetic session-end event${result.sessionEndEventsLogged === 1 ? "" : "s"}`,
|
|
549
|
+
);
|
|
550
|
+
}
|
|
551
|
+
if (result.tmuxKilled > 0) {
|
|
552
|
+
lines.push(`Killed ${result.tmuxKilled} tmux session${result.tmuxKilled === 1 ? "" : "s"}`);
|
|
553
|
+
}
|
|
554
|
+
if (result.worktreesCleaned > 0) {
|
|
555
|
+
lines.push(
|
|
556
|
+
`Removed ${result.worktreesCleaned} worktree${result.worktreesCleaned === 1 ? "" : "s"}`,
|
|
557
|
+
);
|
|
558
|
+
}
|
|
559
|
+
if (result.branchesDeleted > 0) {
|
|
560
|
+
lines.push(
|
|
561
|
+
`Deleted ${result.branchesDeleted} orphaned branch${result.branchesDeleted === 1 ? "" : "es"}`,
|
|
562
|
+
);
|
|
563
|
+
}
|
|
564
|
+
if (result.mailWiped) lines.push("Wiped mail.db");
|
|
565
|
+
if (result.metricsWiped) lines.push("Wiped metrics.db");
|
|
566
|
+
if (result.sessionsCleared) lines.push("Wiped sessions.db");
|
|
567
|
+
if (result.mergeQueueCleared) lines.push("Wiped merge-queue.db");
|
|
568
|
+
if (result.logsCleared) lines.push("Cleared logs/");
|
|
569
|
+
if (result.agentsCleared) lines.push("Cleared agents/");
|
|
570
|
+
if (result.specsCleared) lines.push("Cleared specs/");
|
|
571
|
+
if (result.nudgeStateCleared) lines.push("Cleared nudge-state.json");
|
|
572
|
+
if (result.currentRunCleared) lines.push("Cleared current-run.txt");
|
|
573
|
+
|
|
574
|
+
// Mulch health diagnostics (shown before cleanup results)
|
|
575
|
+
if (result.mulchHealth?.checked) {
|
|
576
|
+
const health = result.mulchHealth;
|
|
577
|
+
const healthLines: string[] = [];
|
|
578
|
+
|
|
579
|
+
if (health.domainsNearLimit.length > 0) {
|
|
580
|
+
healthLines.push("\n⚠️ Mulch domains approaching governance limits:");
|
|
581
|
+
for (const d of health.domainsNearLimit) {
|
|
582
|
+
healthLines.push(
|
|
583
|
+
` ${d.domain}: ${d.recordCount} records (warn threshold: ${d.warnThreshold})`,
|
|
584
|
+
);
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
if (health.stalePruneCandidates > 0) {
|
|
589
|
+
healthLines.push(
|
|
590
|
+
`\n📦 Stale records found: ${health.stalePruneCandidates} candidate${health.stalePruneCandidates === 1 ? "" : "s"} (run 'mulch prune' to remove)`,
|
|
591
|
+
);
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
if (health.doctorWarnings > 0 || health.doctorIssues > 0) {
|
|
595
|
+
healthLines.push(
|
|
596
|
+
`\n🩺 Mulch health check: ${health.doctorWarnings} warning${health.doctorWarnings === 1 ? "" : "s"}, ${health.doctorIssues} issue${health.doctorIssues === 1 ? "" : "s"} (run 'mulch doctor' for details)`,
|
|
597
|
+
);
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
if (healthLines.length > 0) {
|
|
601
|
+
for (const line of healthLines) {
|
|
602
|
+
process.stdout.write(`${line}\n`);
|
|
603
|
+
}
|
|
604
|
+
}
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
if (lines.length === 0) {
|
|
608
|
+
process.stdout.write("Nothing to clean.\n");
|
|
609
|
+
} else {
|
|
610
|
+
if (result.mulchHealth?.checked) {
|
|
611
|
+
process.stdout.write("\n--- Cleanup Results ---\n");
|
|
612
|
+
}
|
|
613
|
+
for (const line of lines) {
|
|
614
|
+
process.stdout.write(`${line}\n`);
|
|
615
|
+
}
|
|
616
|
+
process.stdout.write("\nClean complete.\n");
|
|
617
|
+
}
|
|
618
|
+
}
|