@os-eco/overstory-cli 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +381 -0
- package/agents/builder.md +137 -0
- package/agents/coordinator.md +263 -0
- package/agents/lead.md +301 -0
- package/agents/merger.md +160 -0
- package/agents/monitor.md +214 -0
- package/agents/reviewer.md +140 -0
- package/agents/scout.md +119 -0
- package/agents/supervisor.md +423 -0
- package/package.json +47 -0
- package/src/agents/checkpoint.test.ts +88 -0
- package/src/agents/checkpoint.ts +101 -0
- package/src/agents/hooks-deployer.test.ts +2040 -0
- package/src/agents/hooks-deployer.ts +607 -0
- package/src/agents/identity.test.ts +603 -0
- package/src/agents/identity.ts +384 -0
- package/src/agents/lifecycle.test.ts +196 -0
- package/src/agents/lifecycle.ts +183 -0
- package/src/agents/manifest.test.ts +746 -0
- package/src/agents/manifest.ts +354 -0
- package/src/agents/overlay.test.ts +676 -0
- package/src/agents/overlay.ts +308 -0
- package/src/beads/client.test.ts +217 -0
- package/src/beads/client.ts +202 -0
- package/src/beads/molecules.test.ts +338 -0
- package/src/beads/molecules.ts +198 -0
- package/src/commands/agents.test.ts +322 -0
- package/src/commands/agents.ts +287 -0
- package/src/commands/clean.test.ts +670 -0
- package/src/commands/clean.ts +618 -0
- package/src/commands/completions.test.ts +342 -0
- package/src/commands/completions.ts +887 -0
- package/src/commands/coordinator.test.ts +1530 -0
- package/src/commands/coordinator.ts +733 -0
- package/src/commands/costs.test.ts +1119 -0
- package/src/commands/costs.ts +564 -0
- package/src/commands/dashboard.test.ts +308 -0
- package/src/commands/dashboard.ts +838 -0
- package/src/commands/doctor.test.ts +294 -0
- package/src/commands/doctor.ts +213 -0
- package/src/commands/errors.test.ts +647 -0
- package/src/commands/errors.ts +248 -0
- package/src/commands/feed.test.ts +578 -0
- package/src/commands/feed.ts +361 -0
- package/src/commands/group.test.ts +262 -0
- package/src/commands/group.ts +511 -0
- package/src/commands/hooks.test.ts +458 -0
- package/src/commands/hooks.ts +253 -0
- package/src/commands/init.test.ts +347 -0
- package/src/commands/init.ts +650 -0
- package/src/commands/inspect.test.ts +670 -0
- package/src/commands/inspect.ts +431 -0
- package/src/commands/log.test.ts +1454 -0
- package/src/commands/log.ts +724 -0
- package/src/commands/logs.test.ts +379 -0
- package/src/commands/logs.ts +546 -0
- package/src/commands/mail.test.ts +1270 -0
- package/src/commands/mail.ts +771 -0
- package/src/commands/merge.test.ts +670 -0
- package/src/commands/merge.ts +355 -0
- package/src/commands/metrics.test.ts +444 -0
- package/src/commands/metrics.ts +143 -0
- package/src/commands/monitor.test.ts +191 -0
- package/src/commands/monitor.ts +390 -0
- package/src/commands/nudge.test.ts +230 -0
- package/src/commands/nudge.ts +372 -0
- package/src/commands/prime.test.ts +470 -0
- package/src/commands/prime.ts +381 -0
- package/src/commands/replay.test.ts +741 -0
- package/src/commands/replay.ts +360 -0
- package/src/commands/run.test.ts +431 -0
- package/src/commands/run.ts +351 -0
- package/src/commands/sling.test.ts +657 -0
- package/src/commands/sling.ts +661 -0
- package/src/commands/spec.test.ts +203 -0
- package/src/commands/spec.ts +168 -0
- package/src/commands/status.test.ts +430 -0
- package/src/commands/status.ts +398 -0
- package/src/commands/stop.test.ts +420 -0
- package/src/commands/stop.ts +151 -0
- package/src/commands/supervisor.test.ts +187 -0
- package/src/commands/supervisor.ts +535 -0
- package/src/commands/trace.test.ts +745 -0
- package/src/commands/trace.ts +325 -0
- package/src/commands/watch.test.ts +145 -0
- package/src/commands/watch.ts +247 -0
- package/src/commands/worktree.test.ts +786 -0
- package/src/commands/worktree.ts +311 -0
- package/src/config.test.ts +822 -0
- package/src/config.ts +829 -0
- package/src/doctor/agents.test.ts +454 -0
- package/src/doctor/agents.ts +396 -0
- package/src/doctor/config-check.test.ts +190 -0
- package/src/doctor/config-check.ts +183 -0
- package/src/doctor/consistency.test.ts +651 -0
- package/src/doctor/consistency.ts +294 -0
- package/src/doctor/databases.test.ts +290 -0
- package/src/doctor/databases.ts +218 -0
- package/src/doctor/dependencies.test.ts +184 -0
- package/src/doctor/dependencies.ts +175 -0
- package/src/doctor/logs.test.ts +251 -0
- package/src/doctor/logs.ts +295 -0
- package/src/doctor/merge-queue.test.ts +216 -0
- package/src/doctor/merge-queue.ts +144 -0
- package/src/doctor/structure.test.ts +291 -0
- package/src/doctor/structure.ts +198 -0
- package/src/doctor/types.ts +37 -0
- package/src/doctor/version.test.ts +136 -0
- package/src/doctor/version.ts +129 -0
- package/src/e2e/init-sling-lifecycle.test.ts +277 -0
- package/src/errors.ts +217 -0
- package/src/events/store.test.ts +660 -0
- package/src/events/store.ts +369 -0
- package/src/events/tool-filter.test.ts +330 -0
- package/src/events/tool-filter.ts +126 -0
- package/src/index.ts +316 -0
- package/src/insights/analyzer.test.ts +466 -0
- package/src/insights/analyzer.ts +203 -0
- package/src/logging/color.test.ts +142 -0
- package/src/logging/color.ts +71 -0
- package/src/logging/logger.test.ts +813 -0
- package/src/logging/logger.ts +266 -0
- package/src/logging/reporter.test.ts +259 -0
- package/src/logging/reporter.ts +109 -0
- package/src/logging/sanitizer.test.ts +190 -0
- package/src/logging/sanitizer.ts +57 -0
- package/src/mail/broadcast.test.ts +203 -0
- package/src/mail/broadcast.ts +92 -0
- package/src/mail/client.test.ts +773 -0
- package/src/mail/client.ts +223 -0
- package/src/mail/store.test.ts +705 -0
- package/src/mail/store.ts +387 -0
- package/src/merge/queue.test.ts +359 -0
- package/src/merge/queue.ts +231 -0
- package/src/merge/resolver.test.ts +1345 -0
- package/src/merge/resolver.ts +645 -0
- package/src/metrics/store.test.ts +667 -0
- package/src/metrics/store.ts +445 -0
- package/src/metrics/summary.test.ts +398 -0
- package/src/metrics/summary.ts +178 -0
- package/src/metrics/transcript.test.ts +356 -0
- package/src/metrics/transcript.ts +175 -0
- package/src/mulch/client.test.ts +671 -0
- package/src/mulch/client.ts +332 -0
- package/src/sessions/compat.test.ts +280 -0
- package/src/sessions/compat.ts +104 -0
- package/src/sessions/store.test.ts +873 -0
- package/src/sessions/store.ts +494 -0
- package/src/test-helpers.test.ts +124 -0
- package/src/test-helpers.ts +126 -0
- package/src/tracker/beads.ts +56 -0
- package/src/tracker/factory.test.ts +80 -0
- package/src/tracker/factory.ts +64 -0
- package/src/tracker/seeds.ts +182 -0
- package/src/tracker/types.ts +52 -0
- package/src/types.ts +724 -0
- package/src/watchdog/daemon.test.ts +1975 -0
- package/src/watchdog/daemon.ts +671 -0
- package/src/watchdog/health.test.ts +431 -0
- package/src/watchdog/health.ts +264 -0
- package/src/watchdog/triage.test.ts +164 -0
- package/src/watchdog/triage.ts +179 -0
- package/src/worktree/manager.test.ts +439 -0
- package/src/worktree/manager.ts +198 -0
- package/src/worktree/tmux.test.ts +1009 -0
- package/src/worktree/tmux.ts +509 -0
- package/templates/CLAUDE.md.tmpl +89 -0
- package/templates/hooks.json.tmpl +105 -0
- package/templates/overlay.md.tmpl +81 -0
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Mulch CLI client.
|
|
3
|
+
*
|
|
4
|
+
* Wraps the `mulch` command-line tool for structured expertise operations.
|
|
5
|
+
* Uses Bun.spawn — zero runtime dependencies.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { AgentError } from "../errors.ts";
|
|
9
|
+
import type {
|
|
10
|
+
MulchCompactResult,
|
|
11
|
+
MulchDiffResult,
|
|
12
|
+
MulchDoctorResult,
|
|
13
|
+
MulchLearnResult,
|
|
14
|
+
MulchPruneResult,
|
|
15
|
+
MulchReadyResult,
|
|
16
|
+
MulchStatus,
|
|
17
|
+
} from "../types.ts";
|
|
18
|
+
|
|
19
|
+
export interface MulchClient {
|
|
20
|
+
/** Generate a priming prompt, optionally scoped to specific domains. */
|
|
21
|
+
prime(
|
|
22
|
+
domains?: string[],
|
|
23
|
+
format?: "markdown" | "xml" | "json",
|
|
24
|
+
options?: {
|
|
25
|
+
files?: string[];
|
|
26
|
+
excludeDomain?: string[];
|
|
27
|
+
},
|
|
28
|
+
): Promise<string>;
|
|
29
|
+
|
|
30
|
+
/** Show domain statistics. */
|
|
31
|
+
status(): Promise<MulchStatus>;
|
|
32
|
+
|
|
33
|
+
/** Record an expertise entry for a domain. */
|
|
34
|
+
record(
|
|
35
|
+
domain: string,
|
|
36
|
+
options: {
|
|
37
|
+
type: string;
|
|
38
|
+
name?: string;
|
|
39
|
+
description?: string;
|
|
40
|
+
title?: string;
|
|
41
|
+
rationale?: string;
|
|
42
|
+
tags?: string[];
|
|
43
|
+
classification?: string;
|
|
44
|
+
stdin?: boolean;
|
|
45
|
+
evidenceBead?: string;
|
|
46
|
+
outcomeStatus?: "success" | "failure";
|
|
47
|
+
outcomeDuration?: number;
|
|
48
|
+
outcomeTestResults?: string;
|
|
49
|
+
outcomeAgent?: string;
|
|
50
|
+
},
|
|
51
|
+
): Promise<void>;
|
|
52
|
+
|
|
53
|
+
/** Query expertise records, optionally scoped to a domain. */
|
|
54
|
+
query(domain?: string): Promise<string>;
|
|
55
|
+
|
|
56
|
+
/** Search records across all domains. */
|
|
57
|
+
search(query: string, options?: { file?: string; sortByScore?: boolean }): Promise<string>;
|
|
58
|
+
|
|
59
|
+
/** Show expertise record changes since a git ref. */
|
|
60
|
+
diff(options?: { since?: string }): Promise<MulchDiffResult>;
|
|
61
|
+
|
|
62
|
+
/** Show changed files and suggest domains for recording learnings. */
|
|
63
|
+
learn(options?: { since?: string }): Promise<MulchLearnResult>;
|
|
64
|
+
|
|
65
|
+
/** Remove unused or stale records. */
|
|
66
|
+
prune(options?: { dryRun?: boolean }): Promise<MulchPruneResult>;
|
|
67
|
+
|
|
68
|
+
/** Run health checks on mulch repository. */
|
|
69
|
+
doctor(options?: { fix?: boolean }): Promise<MulchDoctorResult>;
|
|
70
|
+
|
|
71
|
+
/** Show recently added or updated expertise records. */
|
|
72
|
+
ready(options?: { limit?: number; domain?: string; since?: string }): Promise<MulchReadyResult>;
|
|
73
|
+
|
|
74
|
+
/** Compact and optimize domain storage. */
|
|
75
|
+
compact(
|
|
76
|
+
domain?: string,
|
|
77
|
+
options?: {
|
|
78
|
+
analyze?: boolean;
|
|
79
|
+
apply?: boolean;
|
|
80
|
+
auto?: boolean;
|
|
81
|
+
dryRun?: boolean;
|
|
82
|
+
minGroup?: number;
|
|
83
|
+
maxRecords?: number;
|
|
84
|
+
yes?: boolean;
|
|
85
|
+
records?: string[];
|
|
86
|
+
},
|
|
87
|
+
): Promise<MulchCompactResult>;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Run a shell command and capture its output.
|
|
92
|
+
*/
|
|
93
|
+
async function runCommand(
|
|
94
|
+
cmd: string[],
|
|
95
|
+
cwd: string,
|
|
96
|
+
): Promise<{ stdout: string; stderr: string; exitCode: number }> {
|
|
97
|
+
const proc = Bun.spawn(cmd, {
|
|
98
|
+
cwd,
|
|
99
|
+
stdout: "pipe",
|
|
100
|
+
stderr: "pipe",
|
|
101
|
+
});
|
|
102
|
+
const stdout = await new Response(proc.stdout).text();
|
|
103
|
+
const stderr = await new Response(proc.stderr).text();
|
|
104
|
+
const exitCode = await proc.exited;
|
|
105
|
+
return { stdout, stderr, exitCode };
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Create a MulchClient bound to the given working directory.
|
|
110
|
+
*
|
|
111
|
+
* @param cwd - Working directory where mulch commands should run
|
|
112
|
+
* @returns A MulchClient instance wrapping the mulch CLI
|
|
113
|
+
*/
|
|
114
|
+
export function createMulchClient(cwd: string): MulchClient {
|
|
115
|
+
async function runMulch(
|
|
116
|
+
args: string[],
|
|
117
|
+
context: string,
|
|
118
|
+
): Promise<{ stdout: string; stderr: string }> {
|
|
119
|
+
const { stdout, stderr, exitCode } = await runCommand(["mulch", ...args], cwd);
|
|
120
|
+
if (exitCode !== 0) {
|
|
121
|
+
throw new AgentError(`mulch ${context} failed (exit ${exitCode}): ${stderr.trim()}`);
|
|
122
|
+
}
|
|
123
|
+
return { stdout, stderr };
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return {
|
|
127
|
+
async prime(domains, format, options) {
|
|
128
|
+
const args = ["prime"];
|
|
129
|
+
if (domains && domains.length > 0) {
|
|
130
|
+
args.push(...domains);
|
|
131
|
+
}
|
|
132
|
+
if (format) {
|
|
133
|
+
args.push("--format", format);
|
|
134
|
+
}
|
|
135
|
+
if (options?.files && options.files.length > 0) {
|
|
136
|
+
args.push("--files", ...options.files);
|
|
137
|
+
}
|
|
138
|
+
if (options?.excludeDomain && options.excludeDomain.length > 0) {
|
|
139
|
+
args.push("--exclude-domain", ...options.excludeDomain);
|
|
140
|
+
}
|
|
141
|
+
const { stdout } = await runMulch(args, "prime");
|
|
142
|
+
return stdout;
|
|
143
|
+
},
|
|
144
|
+
|
|
145
|
+
async status() {
|
|
146
|
+
const { stdout } = await runMulch(["status", "--json"], "status");
|
|
147
|
+
const trimmed = stdout.trim();
|
|
148
|
+
if (trimmed === "") {
|
|
149
|
+
return { domains: [] };
|
|
150
|
+
}
|
|
151
|
+
try {
|
|
152
|
+
return JSON.parse(trimmed) as MulchStatus;
|
|
153
|
+
} catch {
|
|
154
|
+
throw new AgentError(
|
|
155
|
+
`Failed to parse JSON output from mulch status: ${trimmed.slice(0, 200)}`,
|
|
156
|
+
);
|
|
157
|
+
}
|
|
158
|
+
},
|
|
159
|
+
|
|
160
|
+
async record(domain, options) {
|
|
161
|
+
const args = ["record", domain, "--type", options.type];
|
|
162
|
+
if (options.name) {
|
|
163
|
+
args.push("--name", options.name);
|
|
164
|
+
}
|
|
165
|
+
if (options.description) {
|
|
166
|
+
args.push("--description", options.description);
|
|
167
|
+
}
|
|
168
|
+
if (options.title) {
|
|
169
|
+
args.push("--title", options.title);
|
|
170
|
+
}
|
|
171
|
+
if (options.rationale) {
|
|
172
|
+
args.push("--rationale", options.rationale);
|
|
173
|
+
}
|
|
174
|
+
if (options.tags && options.tags.length > 0) {
|
|
175
|
+
args.push("--tags", options.tags.join(","));
|
|
176
|
+
}
|
|
177
|
+
if (options.classification) {
|
|
178
|
+
args.push("--classification", options.classification);
|
|
179
|
+
}
|
|
180
|
+
if (options.stdin) {
|
|
181
|
+
args.push("--stdin");
|
|
182
|
+
}
|
|
183
|
+
if (options.evidenceBead) {
|
|
184
|
+
args.push("--evidence-bead", options.evidenceBead);
|
|
185
|
+
}
|
|
186
|
+
if (options.outcomeStatus) {
|
|
187
|
+
args.push("--outcome-status", options.outcomeStatus);
|
|
188
|
+
}
|
|
189
|
+
if (options.outcomeDuration !== undefined) {
|
|
190
|
+
args.push("--outcome-duration", String(options.outcomeDuration));
|
|
191
|
+
}
|
|
192
|
+
if (options.outcomeTestResults) {
|
|
193
|
+
args.push("--outcome-test-results", options.outcomeTestResults);
|
|
194
|
+
}
|
|
195
|
+
if (options.outcomeAgent) {
|
|
196
|
+
args.push("--outcome-agent", options.outcomeAgent);
|
|
197
|
+
}
|
|
198
|
+
await runMulch(args, `record ${domain}`);
|
|
199
|
+
},
|
|
200
|
+
|
|
201
|
+
async query(domain) {
|
|
202
|
+
const args = ["query"];
|
|
203
|
+
if (domain) {
|
|
204
|
+
args.push(domain);
|
|
205
|
+
}
|
|
206
|
+
const { stdout } = await runMulch(args, "query");
|
|
207
|
+
return stdout;
|
|
208
|
+
},
|
|
209
|
+
|
|
210
|
+
async search(query, options) {
|
|
211
|
+
const args = ["search", query];
|
|
212
|
+
if (options?.file) args.push("--file", options.file);
|
|
213
|
+
if (options?.sortByScore) args.push("--sort-by-score");
|
|
214
|
+
const { stdout } = await runMulch(args, "search");
|
|
215
|
+
return stdout;
|
|
216
|
+
},
|
|
217
|
+
|
|
218
|
+
async diff(options) {
|
|
219
|
+
const args = ["diff", "--json"];
|
|
220
|
+
if (options?.since) {
|
|
221
|
+
args.push("--since", options.since);
|
|
222
|
+
}
|
|
223
|
+
const { stdout } = await runMulch(args, "diff");
|
|
224
|
+
const trimmed = stdout.trim();
|
|
225
|
+
try {
|
|
226
|
+
return JSON.parse(trimmed) as MulchDiffResult;
|
|
227
|
+
} catch {
|
|
228
|
+
throw new AgentError(`Failed to parse JSON from mulch diff: ${trimmed.slice(0, 200)}`);
|
|
229
|
+
}
|
|
230
|
+
},
|
|
231
|
+
|
|
232
|
+
async learn(options) {
|
|
233
|
+
const args = ["learn", "--json"];
|
|
234
|
+
if (options?.since) {
|
|
235
|
+
args.push("--since", options.since);
|
|
236
|
+
}
|
|
237
|
+
const { stdout } = await runMulch(args, "learn");
|
|
238
|
+
const trimmed = stdout.trim();
|
|
239
|
+
try {
|
|
240
|
+
return JSON.parse(trimmed) as MulchLearnResult;
|
|
241
|
+
} catch {
|
|
242
|
+
throw new AgentError(`Failed to parse JSON from mulch learn: ${trimmed.slice(0, 200)}`);
|
|
243
|
+
}
|
|
244
|
+
},
|
|
245
|
+
|
|
246
|
+
async prune(options) {
|
|
247
|
+
const args = ["prune", "--json"];
|
|
248
|
+
if (options?.dryRun) {
|
|
249
|
+
args.push("--dry-run");
|
|
250
|
+
}
|
|
251
|
+
const { stdout } = await runMulch(args, "prune");
|
|
252
|
+
const trimmed = stdout.trim();
|
|
253
|
+
try {
|
|
254
|
+
return JSON.parse(trimmed) as MulchPruneResult;
|
|
255
|
+
} catch {
|
|
256
|
+
throw new AgentError(`Failed to parse JSON from mulch prune: ${trimmed.slice(0, 200)}`);
|
|
257
|
+
}
|
|
258
|
+
},
|
|
259
|
+
|
|
260
|
+
async doctor(options) {
|
|
261
|
+
const args = ["doctor", "--json"];
|
|
262
|
+
if (options?.fix) {
|
|
263
|
+
args.push("--fix");
|
|
264
|
+
}
|
|
265
|
+
const { stdout } = await runMulch(args, "doctor");
|
|
266
|
+
const trimmed = stdout.trim();
|
|
267
|
+
try {
|
|
268
|
+
return JSON.parse(trimmed) as MulchDoctorResult;
|
|
269
|
+
} catch {
|
|
270
|
+
throw new AgentError(`Failed to parse JSON from mulch doctor: ${trimmed.slice(0, 200)}`);
|
|
271
|
+
}
|
|
272
|
+
},
|
|
273
|
+
|
|
274
|
+
async ready(options) {
|
|
275
|
+
const args = ["ready", "--json"];
|
|
276
|
+
if (options?.limit !== undefined) {
|
|
277
|
+
args.push("--limit", String(options.limit));
|
|
278
|
+
}
|
|
279
|
+
if (options?.domain) {
|
|
280
|
+
args.push("--domain", options.domain);
|
|
281
|
+
}
|
|
282
|
+
if (options?.since) {
|
|
283
|
+
args.push("--since", options.since);
|
|
284
|
+
}
|
|
285
|
+
const { stdout } = await runMulch(args, "ready");
|
|
286
|
+
const trimmed = stdout.trim();
|
|
287
|
+
try {
|
|
288
|
+
return JSON.parse(trimmed) as MulchReadyResult;
|
|
289
|
+
} catch {
|
|
290
|
+
throw new AgentError(`Failed to parse JSON from mulch ready: ${trimmed.slice(0, 200)}`);
|
|
291
|
+
}
|
|
292
|
+
},
|
|
293
|
+
|
|
294
|
+
async compact(domain, options) {
|
|
295
|
+
const args = ["compact", "--json"];
|
|
296
|
+
if (domain) {
|
|
297
|
+
args.push(domain);
|
|
298
|
+
}
|
|
299
|
+
if (options?.analyze) {
|
|
300
|
+
args.push("--analyze");
|
|
301
|
+
}
|
|
302
|
+
if (options?.apply) {
|
|
303
|
+
args.push("--apply");
|
|
304
|
+
}
|
|
305
|
+
if (options?.auto) {
|
|
306
|
+
args.push("--auto");
|
|
307
|
+
}
|
|
308
|
+
if (options?.dryRun) {
|
|
309
|
+
args.push("--dry-run");
|
|
310
|
+
}
|
|
311
|
+
if (options?.minGroup !== undefined) {
|
|
312
|
+
args.push("--min-group", String(options.minGroup));
|
|
313
|
+
}
|
|
314
|
+
if (options?.maxRecords !== undefined) {
|
|
315
|
+
args.push("--max-records", String(options.maxRecords));
|
|
316
|
+
}
|
|
317
|
+
if (options?.yes) {
|
|
318
|
+
args.push("--yes");
|
|
319
|
+
}
|
|
320
|
+
if (options?.records && options.records.length > 0) {
|
|
321
|
+
args.push("--records", options.records.join(","));
|
|
322
|
+
}
|
|
323
|
+
const { stdout } = await runMulch(args, domain ? `compact ${domain}` : "compact");
|
|
324
|
+
const trimmed = stdout.trim();
|
|
325
|
+
try {
|
|
326
|
+
return JSON.parse(trimmed) as MulchCompactResult;
|
|
327
|
+
} catch {
|
|
328
|
+
throw new AgentError(`Failed to parse JSON from mulch compact: ${trimmed.slice(0, 200)}`);
|
|
329
|
+
}
|
|
330
|
+
},
|
|
331
|
+
};
|
|
332
|
+
}
|
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests for the session compat shim (JSON -> SQLite migration).
|
|
3
|
+
*
|
|
4
|
+
* Uses real filesystem and bun:sqlite. No mocks.
|
|
5
|
+
* Tests file-based migration behavior, so temp files are required (not :memory:).
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { afterEach, beforeEach, describe, expect, test } from "bun:test";
|
|
9
|
+
import { mkdtemp, rm, writeFile } from "node:fs/promises";
|
|
10
|
+
import { tmpdir } from "node:os";
|
|
11
|
+
import { join } from "node:path";
|
|
12
|
+
import { openSessionStore } from "./compat.ts";
|
|
13
|
+
|
|
14
|
+
let tempDir: string;
|
|
15
|
+
let overstoryDir: string;
|
|
16
|
+
|
|
17
|
+
beforeEach(async () => {
|
|
18
|
+
tempDir = await mkdtemp(join(tmpdir(), "overstory-compat-test-"));
|
|
19
|
+
overstoryDir = join(tempDir, ".overstory");
|
|
20
|
+
const { mkdir } = await import("node:fs/promises");
|
|
21
|
+
await mkdir(overstoryDir, { recursive: true });
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
afterEach(async () => {
|
|
25
|
+
await rm(tempDir, { recursive: true, force: true });
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
/** Create a sessions.json with the given entries. */
|
|
29
|
+
async function writeSessionsJson(sessions: Record<string, unknown>[]): Promise<void> {
|
|
30
|
+
const jsonPath = join(overstoryDir, "sessions.json");
|
|
31
|
+
await writeFile(jsonPath, `${JSON.stringify(sessions, null, "\t")}\n`, "utf-8");
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/** A valid session object as it would appear in sessions.json. */
|
|
35
|
+
function makeJsonSession(overrides: Record<string, unknown> = {}): Record<string, unknown> {
|
|
36
|
+
return {
|
|
37
|
+
id: "session-001-test-agent",
|
|
38
|
+
agentName: "test-agent",
|
|
39
|
+
capability: "builder",
|
|
40
|
+
worktreePath: "/tmp/worktrees/test-agent",
|
|
41
|
+
branchName: "overstory/test-agent/task-1",
|
|
42
|
+
beadId: "task-1",
|
|
43
|
+
tmuxSession: "overstory-test-agent",
|
|
44
|
+
state: "working",
|
|
45
|
+
pid: 12345,
|
|
46
|
+
parentAgent: null,
|
|
47
|
+
depth: 0,
|
|
48
|
+
startedAt: "2026-01-15T10:00:00.000Z",
|
|
49
|
+
lastActivity: "2026-01-15T10:05:00.000Z",
|
|
50
|
+
escalationLevel: 0,
|
|
51
|
+
stalledSince: null,
|
|
52
|
+
...overrides,
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// === Migration from sessions.json ===
|
|
57
|
+
|
|
58
|
+
describe("openSessionStore", () => {
|
|
59
|
+
test("creates empty DB when neither sessions.json nor sessions.db exist", () => {
|
|
60
|
+
const { store, migrated } = openSessionStore(overstoryDir);
|
|
61
|
+
|
|
62
|
+
expect(migrated).toBe(false);
|
|
63
|
+
expect(store.getAll()).toEqual([]);
|
|
64
|
+
store.close();
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
test("imports sessions from sessions.json when sessions.db does not exist", async () => {
|
|
68
|
+
await writeSessionsJson([
|
|
69
|
+
makeJsonSession({ agentName: "agent-a", id: "s-a" }),
|
|
70
|
+
makeJsonSession({ agentName: "agent-b", id: "s-b", state: "completed" }),
|
|
71
|
+
]);
|
|
72
|
+
|
|
73
|
+
const { store, migrated } = openSessionStore(overstoryDir);
|
|
74
|
+
|
|
75
|
+
expect(migrated).toBe(true);
|
|
76
|
+
const all = store.getAll();
|
|
77
|
+
expect(all).toHaveLength(2);
|
|
78
|
+
|
|
79
|
+
const agentA = store.getByName("agent-a");
|
|
80
|
+
expect(agentA).not.toBeNull();
|
|
81
|
+
expect(agentA?.id).toBe("s-a");
|
|
82
|
+
expect(agentA?.state).toBe("working");
|
|
83
|
+
|
|
84
|
+
const agentB = store.getByName("agent-b");
|
|
85
|
+
expect(agentB).not.toBeNull();
|
|
86
|
+
expect(agentB?.state).toBe("completed");
|
|
87
|
+
|
|
88
|
+
store.close();
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
test("migration adds runId=null when sessions.json entries lack runId", async () => {
|
|
92
|
+
// Write a session WITHOUT runId (old format)
|
|
93
|
+
const oldFormatSession = makeJsonSession();
|
|
94
|
+
(oldFormatSession as Record<string, unknown>).runId = undefined;
|
|
95
|
+
|
|
96
|
+
await writeSessionsJson([oldFormatSession]);
|
|
97
|
+
|
|
98
|
+
const { store, migrated } = openSessionStore(overstoryDir);
|
|
99
|
+
|
|
100
|
+
expect(migrated).toBe(true);
|
|
101
|
+
const session = store.getByName("test-agent");
|
|
102
|
+
expect(session).not.toBeNull();
|
|
103
|
+
expect(session?.runId).toBeNull();
|
|
104
|
+
|
|
105
|
+
store.close();
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
test("does not re-migrate when sessions.db already exists", async () => {
|
|
109
|
+
// First call: create the DB with migration
|
|
110
|
+
await writeSessionsJson([makeJsonSession({ agentName: "original", id: "s-1" })]);
|
|
111
|
+
|
|
112
|
+
const { store: store1, migrated: migrated1 } = openSessionStore(overstoryDir);
|
|
113
|
+
expect(migrated1).toBe(true);
|
|
114
|
+
store1.close();
|
|
115
|
+
|
|
116
|
+
// Modify sessions.json to add a new entry
|
|
117
|
+
await writeSessionsJson([
|
|
118
|
+
makeJsonSession({ agentName: "original", id: "s-1" }),
|
|
119
|
+
makeJsonSession({ agentName: "new-agent", id: "s-2" }),
|
|
120
|
+
]);
|
|
121
|
+
|
|
122
|
+
// Second call: DB exists, so no migration
|
|
123
|
+
const { store: store2, migrated: migrated2 } = openSessionStore(overstoryDir);
|
|
124
|
+
expect(migrated2).toBe(false);
|
|
125
|
+
|
|
126
|
+
// Should still have only the original session from the first migration
|
|
127
|
+
const all = store2.getAll();
|
|
128
|
+
expect(all).toHaveLength(1);
|
|
129
|
+
expect(all[0]?.agentName).toBe("original");
|
|
130
|
+
|
|
131
|
+
store2.close();
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
test("imports from sessions.json when sessions.db exists but is empty", async () => {
|
|
135
|
+
// Simulate the scenario where sessions.db was created (e.g., by init) but
|
|
136
|
+
// no sessions were ever written to it, while sessions.json has records from
|
|
137
|
+
// a previous coordinator start that used the old code path (overstory-036f).
|
|
138
|
+
const { store: emptyStore } = openSessionStore(overstoryDir);
|
|
139
|
+
expect(emptyStore.getAll()).toHaveLength(0);
|
|
140
|
+
emptyStore.close();
|
|
141
|
+
|
|
142
|
+
// Now write records to sessions.json (simulating old code path)
|
|
143
|
+
await writeSessionsJson([
|
|
144
|
+
makeJsonSession({ agentName: "orphaned-coordinator", id: "s-orphan", state: "booting" }),
|
|
145
|
+
]);
|
|
146
|
+
|
|
147
|
+
// Re-open: DB exists but is empty, so JSON records should be imported
|
|
148
|
+
const { store: store2, migrated } = openSessionStore(overstoryDir);
|
|
149
|
+
expect(migrated).toBe(true);
|
|
150
|
+
|
|
151
|
+
const all = store2.getAll();
|
|
152
|
+
expect(all).toHaveLength(1);
|
|
153
|
+
expect(all[0]?.agentName).toBe("orphaned-coordinator");
|
|
154
|
+
expect(all[0]?.state).toBe("booting");
|
|
155
|
+
store2.close();
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
test("handles empty sessions.json (no migration needed)", async () => {
|
|
159
|
+
await writeSessionsJson([]);
|
|
160
|
+
|
|
161
|
+
const { store, migrated } = openSessionStore(overstoryDir);
|
|
162
|
+
|
|
163
|
+
expect(migrated).toBe(false);
|
|
164
|
+
expect(store.getAll()).toEqual([]);
|
|
165
|
+
store.close();
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
test("handles malformed sessions.json gracefully", async () => {
|
|
169
|
+
const jsonPath = join(overstoryDir, "sessions.json");
|
|
170
|
+
await writeFile(jsonPath, "this is not json", "utf-8");
|
|
171
|
+
|
|
172
|
+
const { store, migrated } = openSessionStore(overstoryDir);
|
|
173
|
+
|
|
174
|
+
expect(migrated).toBe(false);
|
|
175
|
+
expect(store.getAll()).toEqual([]);
|
|
176
|
+
store.close();
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
test("handles sessions.json with non-array content gracefully", async () => {
|
|
180
|
+
const jsonPath = join(overstoryDir, "sessions.json");
|
|
181
|
+
await writeFile(jsonPath, '{"not": "an array"}', "utf-8");
|
|
182
|
+
|
|
183
|
+
const { store, migrated } = openSessionStore(overstoryDir);
|
|
184
|
+
|
|
185
|
+
expect(migrated).toBe(false);
|
|
186
|
+
expect(store.getAll()).toEqual([]);
|
|
187
|
+
store.close();
|
|
188
|
+
});
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
// === Data integrity after migration ===
|
|
192
|
+
|
|
193
|
+
describe("data integrity", () => {
|
|
194
|
+
test("all fields from sessions.json are preserved in SQLite", async () => {
|
|
195
|
+
const fullSession = makeJsonSession({
|
|
196
|
+
id: "session-full",
|
|
197
|
+
agentName: "full-agent",
|
|
198
|
+
capability: "scout",
|
|
199
|
+
worktreePath: "/tmp/worktrees/full-agent",
|
|
200
|
+
branchName: "overstory/full-agent/task-42",
|
|
201
|
+
beadId: "task-42",
|
|
202
|
+
tmuxSession: "overstory-full-agent",
|
|
203
|
+
state: "stalled",
|
|
204
|
+
pid: 99999,
|
|
205
|
+
parentAgent: "lead-agent",
|
|
206
|
+
depth: 2,
|
|
207
|
+
startedAt: "2026-02-01T08:00:00.000Z",
|
|
208
|
+
lastActivity: "2026-02-01T09:00:00.000Z",
|
|
209
|
+
escalationLevel: 3,
|
|
210
|
+
stalledSince: "2026-02-01T08:50:00.000Z",
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
await writeSessionsJson([fullSession]);
|
|
214
|
+
|
|
215
|
+
const { store } = openSessionStore(overstoryDir);
|
|
216
|
+
const result = store.getByName("full-agent");
|
|
217
|
+
|
|
218
|
+
expect(result).not.toBeNull();
|
|
219
|
+
expect(result?.id).toBe("session-full");
|
|
220
|
+
expect(result?.agentName).toBe("full-agent");
|
|
221
|
+
expect(result?.capability).toBe("scout");
|
|
222
|
+
expect(result?.worktreePath).toBe("/tmp/worktrees/full-agent");
|
|
223
|
+
expect(result?.branchName).toBe("overstory/full-agent/task-42");
|
|
224
|
+
expect(result?.beadId).toBe("task-42");
|
|
225
|
+
expect(result?.tmuxSession).toBe("overstory-full-agent");
|
|
226
|
+
expect(result?.state).toBe("stalled");
|
|
227
|
+
expect(result?.pid).toBe(99999);
|
|
228
|
+
expect(result?.parentAgent).toBe("lead-agent");
|
|
229
|
+
expect(result?.depth).toBe(2);
|
|
230
|
+
expect(result?.runId).toBeNull(); // Not in old JSON format
|
|
231
|
+
expect(result?.startedAt).toBe("2026-02-01T08:00:00.000Z");
|
|
232
|
+
expect(result?.lastActivity).toBe("2026-02-01T09:00:00.000Z");
|
|
233
|
+
expect(result?.escalationLevel).toBe(3);
|
|
234
|
+
expect(result?.stalledSince).toBe("2026-02-01T08:50:00.000Z");
|
|
235
|
+
|
|
236
|
+
store.close();
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
test("migrated store supports all SessionStore operations", async () => {
|
|
240
|
+
await writeSessionsJson([
|
|
241
|
+
makeJsonSession({ agentName: "agent-a", id: "s-a", state: "working" }),
|
|
242
|
+
makeJsonSession({ agentName: "agent-b", id: "s-b", state: "completed" }),
|
|
243
|
+
]);
|
|
244
|
+
|
|
245
|
+
const { store } = openSessionStore(overstoryDir);
|
|
246
|
+
|
|
247
|
+
// getActive should return only "working" sessions
|
|
248
|
+
const active = store.getActive();
|
|
249
|
+
expect(active).toHaveLength(1);
|
|
250
|
+
expect(active[0]?.agentName).toBe("agent-a");
|
|
251
|
+
|
|
252
|
+
// updateState should work on migrated data
|
|
253
|
+
store.updateState("agent-a", "completed");
|
|
254
|
+
expect(store.getByName("agent-a")?.state).toBe("completed");
|
|
255
|
+
|
|
256
|
+
// remove should work on migrated data
|
|
257
|
+
store.remove("agent-b");
|
|
258
|
+
expect(store.getByName("agent-b")).toBeNull();
|
|
259
|
+
|
|
260
|
+
store.close();
|
|
261
|
+
});
|
|
262
|
+
|
|
263
|
+
test("multiple sessions with same agent name in JSON: last one wins", async () => {
|
|
264
|
+
// This edge case shouldn't happen in practice, but test defensive behavior
|
|
265
|
+
await writeSessionsJson([
|
|
266
|
+
makeJsonSession({ agentName: "dupe", id: "s-1", state: "booting" }),
|
|
267
|
+
makeJsonSession({ agentName: "dupe", id: "s-2", state: "working" }),
|
|
268
|
+
]);
|
|
269
|
+
|
|
270
|
+
const { store } = openSessionStore(overstoryDir);
|
|
271
|
+
|
|
272
|
+
const all = store.getAll();
|
|
273
|
+
expect(all).toHaveLength(1);
|
|
274
|
+
// Last upsert wins (s-2)
|
|
275
|
+
expect(all[0]?.id).toBe("s-2");
|
|
276
|
+
expect(all[0]?.state).toBe("working");
|
|
277
|
+
|
|
278
|
+
store.close();
|
|
279
|
+
});
|
|
280
|
+
});
|