@delegance/claude-autopilot 5.2.2 → 6.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +1027 -1
- package/README.md +104 -17
- package/dist/src/adapters/council/claude.js +2 -1
- package/dist/src/adapters/council/openai.js +14 -7
- package/dist/src/adapters/deploy/_http.d.ts +43 -0
- package/dist/src/adapters/deploy/_http.js +99 -0
- package/dist/src/adapters/deploy/fly.d.ts +206 -0
- package/dist/src/adapters/deploy/fly.js +696 -0
- package/dist/src/adapters/deploy/generic.d.ts +39 -0
- package/dist/src/adapters/deploy/generic.js +98 -0
- package/dist/src/adapters/deploy/index.d.ts +15 -0
- package/dist/src/adapters/deploy/index.js +78 -0
- package/dist/src/adapters/deploy/render.d.ts +181 -0
- package/dist/src/adapters/deploy/render.js +550 -0
- package/dist/src/adapters/deploy/types.d.ts +221 -0
- package/dist/src/adapters/deploy/types.js +15 -0
- package/dist/src/adapters/deploy/vercel.d.ts +143 -0
- package/dist/src/adapters/deploy/vercel.js +426 -0
- package/dist/src/adapters/pricing.d.ts +36 -0
- package/dist/src/adapters/pricing.js +40 -0
- package/dist/src/adapters/review-engine/claude.js +2 -1
- package/dist/src/adapters/review-engine/codex.js +12 -8
- package/dist/src/adapters/review-engine/gemini.js +2 -1
- package/dist/src/adapters/review-engine/openai-compatible.js +2 -1
- package/dist/src/adapters/sdk-loader.d.ts +15 -0
- package/dist/src/adapters/sdk-loader.js +77 -0
- package/dist/src/cli/autopilot.d.ts +71 -0
- package/dist/src/cli/autopilot.js +735 -0
- package/dist/src/cli/brainstorm.d.ts +23 -0
- package/dist/src/cli/brainstorm.js +131 -0
- package/dist/src/cli/costs.d.ts +15 -1
- package/dist/src/cli/costs.js +99 -10
- package/dist/src/cli/deploy.d.ts +71 -0
- package/dist/src/cli/deploy.js +539 -0
- package/dist/src/cli/fix.d.ts +18 -0
- package/dist/src/cli/fix.js +105 -11
- package/dist/src/cli/help-text.d.ts +52 -0
- package/dist/src/cli/help-text.js +400 -0
- package/dist/src/cli/implement.d.ts +91 -0
- package/dist/src/cli/implement.js +196 -0
- package/dist/src/cli/index.js +784 -222
- package/dist/src/cli/json-envelope.d.ts +187 -0
- package/dist/src/cli/json-envelope.js +270 -0
- package/dist/src/cli/json-mode.d.ts +33 -0
- package/dist/src/cli/json-mode.js +201 -0
- package/dist/src/cli/migrate.d.ts +111 -0
- package/dist/src/cli/migrate.js +305 -0
- package/dist/src/cli/plan.d.ts +81 -0
- package/dist/src/cli/plan.js +149 -0
- package/dist/src/cli/pr.d.ts +106 -0
- package/dist/src/cli/pr.js +191 -19
- package/dist/src/cli/preflight.js +102 -1
- package/dist/src/cli/review.d.ts +27 -0
- package/dist/src/cli/review.js +126 -0
- package/dist/src/cli/runs-watch-renderer.d.ts +45 -0
- package/dist/src/cli/runs-watch-renderer.js +275 -0
- package/dist/src/cli/runs-watch.d.ts +41 -0
- package/dist/src/cli/runs-watch.js +395 -0
- package/dist/src/cli/runs.d.ts +122 -0
- package/dist/src/cli/runs.js +902 -0
- package/dist/src/cli/scan.d.ts +93 -0
- package/dist/src/cli/scan.js +166 -40
- package/dist/src/cli/spec.d.ts +66 -0
- package/dist/src/cli/spec.js +132 -0
- package/dist/src/cli/validate.d.ts +29 -0
- package/dist/src/cli/validate.js +131 -0
- package/dist/src/core/config/schema.d.ts +43 -0
- package/dist/src/core/config/schema.js +25 -0
- package/dist/src/core/config/types.d.ts +17 -0
- package/dist/src/core/council/runner.d.ts +10 -1
- package/dist/src/core/council/runner.js +25 -3
- package/dist/src/core/council/types.d.ts +7 -0
- package/dist/src/core/errors.d.ts +1 -1
- package/dist/src/core/errors.js +12 -0
- package/dist/src/core/logging/redaction.d.ts +13 -0
- package/dist/src/core/logging/redaction.js +20 -0
- package/dist/src/core/migrate/detector-rules.js +6 -0
- package/dist/src/core/migrate/schema-validator.js +22 -1
- package/dist/src/core/phases/static-rules.d.ts +5 -1
- package/dist/src/core/phases/static-rules.js +2 -5
- package/dist/src/core/run-state/budget.d.ts +88 -0
- package/dist/src/core/run-state/budget.js +141 -0
- package/dist/src/core/run-state/cli-internal.d.ts +21 -0
- package/dist/src/core/run-state/cli-internal.js +174 -0
- package/dist/src/core/run-state/events.d.ts +59 -0
- package/dist/src/core/run-state/events.js +504 -0
- package/dist/src/core/run-state/lock.d.ts +61 -0
- package/dist/src/core/run-state/lock.js +206 -0
- package/dist/src/core/run-state/phase-context.d.ts +60 -0
- package/dist/src/core/run-state/phase-context.js +108 -0
- package/dist/src/core/run-state/phase-registry.d.ts +137 -0
- package/dist/src/core/run-state/phase-registry.js +162 -0
- package/dist/src/core/run-state/phase-runner.d.ts +80 -0
- package/dist/src/core/run-state/phase-runner.js +447 -0
- package/dist/src/core/run-state/provider-readback.d.ts +130 -0
- package/dist/src/core/run-state/provider-readback.js +426 -0
- package/dist/src/core/run-state/replay-decision.d.ts +69 -0
- package/dist/src/core/run-state/replay-decision.js +144 -0
- package/dist/src/core/run-state/resolve-engine.d.ts +100 -0
- package/dist/src/core/run-state/resolve-engine.js +190 -0
- package/dist/src/core/run-state/resume-preflight.d.ts +66 -0
- package/dist/src/core/run-state/resume-preflight.js +116 -0
- package/dist/src/core/run-state/run-phase-with-lifecycle.d.ts +73 -0
- package/dist/src/core/run-state/run-phase-with-lifecycle.js +186 -0
- package/dist/src/core/run-state/runs.d.ts +57 -0
- package/dist/src/core/run-state/runs.js +288 -0
- package/dist/src/core/run-state/snapshot.d.ts +14 -0
- package/dist/src/core/run-state/snapshot.js +114 -0
- package/dist/src/core/run-state/state.d.ts +40 -0
- package/dist/src/core/run-state/state.js +164 -0
- package/dist/src/core/run-state/types.d.ts +278 -0
- package/dist/src/core/run-state/types.js +13 -0
- package/dist/src/core/run-state/ulid.d.ts +11 -0
- package/dist/src/core/run-state/ulid.js +95 -0
- package/dist/src/core/schema-alignment/extractor/index.d.ts +1 -1
- package/dist/src/core/schema-alignment/extractor/index.js +2 -2
- package/dist/src/core/schema-alignment/extractor/prisma.d.ts +13 -1
- package/dist/src/core/schema-alignment/extractor/prisma.js +65 -10
- package/dist/src/core/schema-alignment/git-history.d.ts +19 -0
- package/dist/src/core/schema-alignment/git-history.js +53 -0
- package/dist/src/core/static-rules/rules/brand-tokens.js +2 -2
- package/dist/src/core/static-rules/rules/schema-alignment.js +14 -4
- package/package.json +9 -5
- package/scripts/autoregress.ts +3 -2
- package/skills/claude-autopilot.md +1 -1
- package/skills/make-interfaces-feel-better/SKILL.md +104 -0
- package/skills/migrate/SKILL.md +193 -47
- package/skills/simplify-ui/SKILL.md +103 -0
- package/skills/ui/SKILL.md +117 -0
- package/skills/ui-ux-pro-max/SKILL.md +90 -0
|
@@ -0,0 +1,504 @@
|
|
|
1
|
+
// src/core/run-state/events.ts
|
|
2
|
+
//
|
|
3
|
+
// Append-only event log + state replay. Implements the persistence protocol
|
|
4
|
+
// from the v6 spec — open(O_APPEND) + write + fsync(fd) for every event;
|
|
5
|
+
// monotonic seq assigned by the holding writer; partial-write detection on
|
|
6
|
+
// read with auto-emission of `run.recovery` on the next append.
|
|
7
|
+
//
|
|
8
|
+
// Spec: docs/specs/v6-run-state-engine.md "Persistence protocol", "Run
|
|
9
|
+
// lifecycle", "Failure modes".
|
|
10
|
+
import * as fs from 'node:fs';
|
|
11
|
+
import * as path from 'node:path';
|
|
12
|
+
import { GuardrailError } from "../errors.js";
|
|
13
|
+
import { updateLockSeq } from "./lock.js";
|
|
14
|
+
import { RUN_STATE_SCHEMA_VERSION, } from "./types.js";
|
|
15
|
+
const EVENTS_FILE = 'events.ndjson';
|
|
16
|
+
/** Optional sidecar that records the highest seq we've successfully written.
|
|
17
|
+
* Lets us assign the next seq in O(1) instead of rescanning the tail of the
|
|
18
|
+
* log. The log itself is still authoritative — if the sidecar disagrees, we
|
|
19
|
+
* trust the log. */
|
|
20
|
+
const SEQ_SIDECAR = '.seq';
|
|
21
|
+
/** Marker that lives next to events.ndjson when the last read detected a
|
|
22
|
+
* truncated tail. The next `appendEvent` consumes the marker, emits a
|
|
23
|
+
* recovery event, and clears it. */
|
|
24
|
+
const PARTIAL_WRITE_MARKER = '.partial-write';
|
|
25
|
+
export function eventsPath(runDir) {
|
|
26
|
+
return path.join(runDir, EVENTS_FILE);
|
|
27
|
+
}
|
|
28
|
+
function seqSidecarPath(runDir) {
|
|
29
|
+
return path.join(runDir, SEQ_SIDECAR);
|
|
30
|
+
}
|
|
31
|
+
function partialMarkerPath(runDir) {
|
|
32
|
+
return path.join(runDir, PARTIAL_WRITE_MARKER);
|
|
33
|
+
}
|
|
34
|
+
function readSeqSidecar(runDir) {
|
|
35
|
+
const p = seqSidecarPath(runDir);
|
|
36
|
+
if (!fs.existsSync(p))
|
|
37
|
+
return null;
|
|
38
|
+
const raw = fs.readFileSync(p, 'utf8').trim();
|
|
39
|
+
if (!raw)
|
|
40
|
+
return null;
|
|
41
|
+
const n = Number.parseInt(raw, 10);
|
|
42
|
+
return Number.isFinite(n) && n >= 0 ? n : null;
|
|
43
|
+
}
|
|
44
|
+
function writeSeqSidecar(runDir, seq) {
|
|
45
|
+
// Best-effort. If this fails we'll just rescan the log on next open.
|
|
46
|
+
try {
|
|
47
|
+
fs.writeFileSync(seqSidecarPath(runDir), String(seq), 'utf8');
|
|
48
|
+
}
|
|
49
|
+
catch {
|
|
50
|
+
// intentionally swallowed
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
/** Stream all events from disk. Detects partial-JSON tail and signals
|
|
54
|
+
* recovery via `truncatedTail`. Does NOT throw on individual line parse
|
|
55
|
+
* errors that are NOT the last line — those produce a `partial_write`
|
|
56
|
+
* GuardrailError because mid-log corruption is unrecoverable here. */
|
|
57
|
+
export function readEvents(runDir, opts = {}) {
|
|
58
|
+
const p = eventsPath(runDir);
|
|
59
|
+
if (!fs.existsSync(p)) {
|
|
60
|
+
return { events: [], truncatedTail: false, maxSeq: 0 };
|
|
61
|
+
}
|
|
62
|
+
const raw = fs.readFileSync(p, 'utf8');
|
|
63
|
+
if (!raw)
|
|
64
|
+
return { events: [], truncatedTail: false, maxSeq: 0 };
|
|
65
|
+
// A well-formed ndjson file ends in '\n'. If the last char isn't '\n',
|
|
66
|
+
// the file was truncated mid-write and the trailing fragment is junk.
|
|
67
|
+
const endsWithNewline = raw.endsWith('\n');
|
|
68
|
+
const lines = raw.split('\n');
|
|
69
|
+
// After split, an ndjson file ending in '\n' produces a trailing '' that
|
|
70
|
+
// we drop; a truncated file produces a non-empty trailing fragment that
|
|
71
|
+
// we must also drop AND signal as truncated.
|
|
72
|
+
let truncatedTail = false;
|
|
73
|
+
let lastIdx = lines.length - 1;
|
|
74
|
+
if (lines[lastIdx] === '') {
|
|
75
|
+
// Normal case — trailing newline.
|
|
76
|
+
lastIdx -= 1;
|
|
77
|
+
}
|
|
78
|
+
else if (!endsWithNewline) {
|
|
79
|
+
truncatedTail = true;
|
|
80
|
+
lastIdx -= 1;
|
|
81
|
+
}
|
|
82
|
+
const events = [];
|
|
83
|
+
let maxSeq = 0;
|
|
84
|
+
for (let i = 0; i <= lastIdx; i++) {
|
|
85
|
+
const line = lines[i];
|
|
86
|
+
if (!line)
|
|
87
|
+
continue; // skip blank lines defensively
|
|
88
|
+
let parsed;
|
|
89
|
+
try {
|
|
90
|
+
parsed = JSON.parse(line);
|
|
91
|
+
}
|
|
92
|
+
catch (err) {
|
|
93
|
+
// The truncated tail (when present) is already excluded from the loop
|
|
94
|
+
// by the `lastIdx -= 1` decrement above, so any parse failure here is
|
|
95
|
+
// real mid-file corruption. Caught by Cursor Bugbot on PR #86 (MEDIUM):
|
|
96
|
+
// the prior `i === lastIdx && !endsWithNewline` heuristic also matched
|
|
97
|
+
// the LAST processed (well-terminated) line of a tail-truncated file
|
|
98
|
+
// and silently swallowed genuine corruption on it.
|
|
99
|
+
throw new GuardrailError(`events.ndjson: corrupt JSON at line ${i + 1}`, {
|
|
100
|
+
code: 'partial_write',
|
|
101
|
+
provider: 'run-state',
|
|
102
|
+
details: { runDir, line: i + 1, error: err.message },
|
|
103
|
+
});
|
|
104
|
+
}
|
|
105
|
+
if (typeof parsed.seq === 'number' && parsed.seq > maxSeq)
|
|
106
|
+
maxSeq = parsed.seq;
|
|
107
|
+
events.push(parsed);
|
|
108
|
+
}
|
|
109
|
+
// Persist the partial-write marker so the next append knows to emit
|
|
110
|
+
// a recovery event. We do this here on read because read is cheap and
|
|
111
|
+
// happens once at writer-startup; appending a marker mid-read is racy
|
|
112
|
+
// only in the multi-writer case which our advisory lock disallows.
|
|
113
|
+
if (truncatedTail) {
|
|
114
|
+
try {
|
|
115
|
+
fs.writeFileSync(partialMarkerPath(runDir), '1', 'utf8');
|
|
116
|
+
}
|
|
117
|
+
catch {
|
|
118
|
+
// intentionally swallowed
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
let result = events;
|
|
122
|
+
if (typeof opts.fromSeq === 'number') {
|
|
123
|
+
const fromSeq = opts.fromSeq;
|
|
124
|
+
result = result.filter(e => e.seq >= fromSeq);
|
|
125
|
+
}
|
|
126
|
+
if (typeof opts.tail === 'number' && opts.tail > 0) {
|
|
127
|
+
result = result.slice(-opts.tail);
|
|
128
|
+
}
|
|
129
|
+
return { events: result, truncatedTail, maxSeq };
|
|
130
|
+
}
|
|
131
|
+
/** Read just the highest seq from disk. Prefers the sidecar; falls back to
|
|
132
|
+
* rescanning the events file. */
|
|
133
|
+
export function readMaxSeq(runDir) {
|
|
134
|
+
const sidecar = readSeqSidecar(runDir);
|
|
135
|
+
if (sidecar !== null)
|
|
136
|
+
return sidecar;
|
|
137
|
+
return readEvents(runDir).maxSeq;
|
|
138
|
+
}
|
|
139
|
+
/** Append a single event to events.ndjson. Strict ordering:
|
|
140
|
+
* 1. open(O_APPEND), write line, fsync(fd), close.
|
|
141
|
+
* 2. Update sidecar with new seq (best-effort).
|
|
142
|
+
*
|
|
143
|
+
* Returns the fully-formed RunEvent that landed on disk (with seq, ts,
|
|
144
|
+
* schema_version, etc. filled in).
|
|
145
|
+
*
|
|
146
|
+
* This is the ONLY supported way to append. Bypassing it with raw fs writes
|
|
147
|
+
* will desync the seq sidecar and may break recovery. */
|
|
148
|
+
export function appendEvent(runDir, input, opts) {
|
|
149
|
+
fs.mkdirSync(runDir, { recursive: true });
|
|
150
|
+
// If the previous open detected a truncated tail, drop a recovery event
|
|
151
|
+
// FIRST so consumers see exactly one signal of the gap before any further
|
|
152
|
+
// payload events. We clear the marker before the recursion so we don't
|
|
153
|
+
// loop forever if the recovery write itself somehow lands and then bails.
|
|
154
|
+
// The tail bytes (the partial JSON without a trailing newline) MUST be
|
|
155
|
+
// truncated off before we append, otherwise the next event line gets
|
|
156
|
+
// glued onto the corrupt bytes and we end up with a permanently broken
|
|
157
|
+
// log even after recovery.
|
|
158
|
+
const markerPath = partialMarkerPath(runDir);
|
|
159
|
+
if (fs.existsSync(markerPath)) {
|
|
160
|
+
try {
|
|
161
|
+
fs.unlinkSync(markerPath);
|
|
162
|
+
}
|
|
163
|
+
catch { /* ignore */ }
|
|
164
|
+
truncateToLastNewline(runDir);
|
|
165
|
+
appendEventInner(runDir, {
|
|
166
|
+
event: 'run.recovery',
|
|
167
|
+
reason: 'recovered-from-partial-write',
|
|
168
|
+
}, opts);
|
|
169
|
+
}
|
|
170
|
+
return appendEventInner(runDir, input, opts);
|
|
171
|
+
}
|
|
172
|
+
/** Truncate everything after the last newline in events.ndjson. Used during
|
|
173
|
+
* partial-write recovery to discard the trailing fragment so the next
|
|
174
|
+
* appended event lands on a fresh line. Best-effort — if anything goes
|
|
175
|
+
* wrong the appender will still produce JSON output, just on a malformed
|
|
176
|
+
* line; the seq gap detection will surface the problem on next replay. */
|
|
177
|
+
function truncateToLastNewline(runDir) {
|
|
178
|
+
const p = eventsPath(runDir);
|
|
179
|
+
let raw;
|
|
180
|
+
try {
|
|
181
|
+
raw = fs.readFileSync(p, 'utf8');
|
|
182
|
+
}
|
|
183
|
+
catch {
|
|
184
|
+
return;
|
|
185
|
+
}
|
|
186
|
+
if (raw.length === 0 || raw.endsWith('\n'))
|
|
187
|
+
return;
|
|
188
|
+
const lastNl = raw.lastIndexOf('\n');
|
|
189
|
+
if (lastNl < 0) {
|
|
190
|
+
// No newline at all — file is entirely partial. Wipe it.
|
|
191
|
+
try {
|
|
192
|
+
fs.writeFileSync(p, '', 'utf8');
|
|
193
|
+
}
|
|
194
|
+
catch { /* ignore */ }
|
|
195
|
+
invalidateSeqSidecar(runDir);
|
|
196
|
+
return;
|
|
197
|
+
}
|
|
198
|
+
// Keep everything through the last '\n'.
|
|
199
|
+
const kept = raw.slice(0, lastNl + 1);
|
|
200
|
+
try {
|
|
201
|
+
fs.writeFileSync(p, kept, 'utf8');
|
|
202
|
+
}
|
|
203
|
+
catch { /* ignore */ }
|
|
204
|
+
// The .seq sidecar may now reference a seq from the truncated fragment,
|
|
205
|
+
// which would create a phantom gap on the next append → foldEvents
|
|
206
|
+
// throws corrupted_state, breaking the very recovery path. Invalidate it
|
|
207
|
+
// so the next readMaxSeq falls back to scanning the (now correct) file.
|
|
208
|
+
// Caught by Cursor Bugbot on PR #86 (LOW).
|
|
209
|
+
invalidateSeqSidecar(runDir);
|
|
210
|
+
}
|
|
211
|
+
function invalidateSeqSidecar(runDir) {
|
|
212
|
+
try {
|
|
213
|
+
fs.unlinkSync(seqSidecarPath(runDir));
|
|
214
|
+
}
|
|
215
|
+
catch { /* ignore — not present is fine */ }
|
|
216
|
+
}
|
|
217
|
+
function appendEventInner(runDir, input, opts) {
|
|
218
|
+
const runId = opts.runId ?? path.basename(runDir);
|
|
219
|
+
const prevSeq = readMaxSeq(runDir);
|
|
220
|
+
const seq = prevSeq + 1;
|
|
221
|
+
const fullEvent = {
|
|
222
|
+
schema_version: RUN_STATE_SCHEMA_VERSION,
|
|
223
|
+
ts: new Date().toISOString(),
|
|
224
|
+
runId,
|
|
225
|
+
seq,
|
|
226
|
+
writerId: opts.writerId,
|
|
227
|
+
...input,
|
|
228
|
+
};
|
|
229
|
+
const line = JSON.stringify(fullEvent) + '\n';
|
|
230
|
+
const fd = fs.openSync(eventsPath(runDir), 'a');
|
|
231
|
+
try {
|
|
232
|
+
fs.writeSync(fd, line);
|
|
233
|
+
fs.fsyncSync(fd);
|
|
234
|
+
}
|
|
235
|
+
finally {
|
|
236
|
+
fs.closeSync(fd);
|
|
237
|
+
}
|
|
238
|
+
writeSeqSidecar(runDir, seq);
|
|
239
|
+
updateLockSeq(runDir, seq);
|
|
240
|
+
return fullEvent;
|
|
241
|
+
}
|
|
242
|
+
// ----------------------------------------------------------------------------
|
|
243
|
+
// State replay. Folds events.ndjson into a RunState. Used both as:
|
|
244
|
+
// a) the recovery path when state.json is missing/corrupt, and
|
|
245
|
+
// b) a sanity check for tests / `runs doctor`.
|
|
246
|
+
// ----------------------------------------------------------------------------
|
|
247
|
+
const EMPTY_PHASE_SHELL = (name, index) => ({
|
|
248
|
+
schema_version: RUN_STATE_SCHEMA_VERSION,
|
|
249
|
+
name,
|
|
250
|
+
index,
|
|
251
|
+
status: 'pending',
|
|
252
|
+
idempotent: false,
|
|
253
|
+
hasSideEffects: false,
|
|
254
|
+
costUSD: 0,
|
|
255
|
+
attempts: 0,
|
|
256
|
+
artifacts: [],
|
|
257
|
+
externalRefs: [],
|
|
258
|
+
});
|
|
259
|
+
/** Replay events.ndjson into a fresh RunState snapshot. The events file is
|
|
260
|
+
* the source of truth — this is always callable; if the file is missing or
|
|
261
|
+
* empty, the result is a minimal "pending" state with no phases.
|
|
262
|
+
*
|
|
263
|
+
* Throws GuardrailError(corrupted_state) if the log has internal
|
|
264
|
+
* contradictions that prevent a coherent snapshot (e.g. seq gaps,
|
|
265
|
+
* phase.success without a prior phase.start), OR if the persisted
|
|
266
|
+
* `schema_version` falls outside this binary's supported window
|
|
267
|
+
* (`RUN_STATE_MIN_SUPPORTED_SCHEMA_VERSION..RUN_STATE_MAX_SUPPORTED_SCHEMA_VERSION`).
|
|
268
|
+
* Per v6.2.2 spec — the prior shape would fail with a cryptic
|
|
269
|
+
* `cannot read property 'phases' of undefined` instead of an actionable
|
|
270
|
+
* "this run dir is from a newer/older version" message. */
|
|
271
|
+
export function replayState(runDir) {
|
|
272
|
+
const { events } = readEvents(runDir);
|
|
273
|
+
// v6.2.2 — version-window check. The bounds live in `state.ts` per the
|
|
274
|
+
// spec, but a top-level import would close a cycle (state.ts already
|
|
275
|
+
// imports `replayState` from this file). The values are derived from
|
|
276
|
+
// RUN_STATE_SCHEMA_VERSION (already imported above), so we recompute them
|
|
277
|
+
// locally — `state.ts` exposes the same shape via re-export.
|
|
278
|
+
const minSupported = 1;
|
|
279
|
+
const maxSupported = RUN_STATE_SCHEMA_VERSION;
|
|
280
|
+
// Surface schema_version from the durable record. Each event carries the
|
|
281
|
+
// writer's `schema_version`; the run.start event is the canonical
|
|
282
|
+
// observation point because it's always first and always present after
|
|
283
|
+
// createRun. If no events exist (fresh empty dir) we have nothing to check
|
|
284
|
+
// and fall through to the existing empty-state path inside foldEvents.
|
|
285
|
+
if (events.length > 0) {
|
|
286
|
+
const observed = events[0].schema_version;
|
|
287
|
+
if (typeof observed === 'number' &&
|
|
288
|
+
(observed < minSupported || observed > maxSupported)) {
|
|
289
|
+
throw new GuardrailError(`run dir at ${runDir} has schema_version ${observed}; this binary supports schema_version ${minSupported}..${maxSupported}. ` +
|
|
290
|
+
`Use the version of claude-autopilot that created this run dir, or delete the run dir to start fresh.`, {
|
|
291
|
+
code: 'corrupted_state',
|
|
292
|
+
provider: 'run-state',
|
|
293
|
+
details: {
|
|
294
|
+
runDir,
|
|
295
|
+
observed,
|
|
296
|
+
minSupported,
|
|
297
|
+
maxSupported,
|
|
298
|
+
},
|
|
299
|
+
});
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
return foldEvents(runDir, events);
|
|
303
|
+
}
|
|
304
|
+
export function foldEvents(runDir, events) {
|
|
305
|
+
// Verify monotonic seq (no gaps, no duplicates) — the whole replay
|
|
306
|
+
// contract depends on this. A gap means a writer crashed between
|
|
307
|
+
// assigning seq and fsync; we treat that as corrupted_state and force
|
|
308
|
+
// the user to acknowledge.
|
|
309
|
+
for (let i = 0; i < events.length; i++) {
|
|
310
|
+
const expected = i + 1;
|
|
311
|
+
const got = events[i].seq;
|
|
312
|
+
if (got !== expected) {
|
|
313
|
+
throw new GuardrailError(`events.ndjson: seq gap at line ${i + 1} — expected ${expected}, got ${got}`, {
|
|
314
|
+
code: 'corrupted_state',
|
|
315
|
+
provider: 'run-state',
|
|
316
|
+
details: { runDir, line: i + 1, expected, got },
|
|
317
|
+
});
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
// Find the run.start to seed the state.
|
|
321
|
+
const startEvent = events.find(e => e.event === 'run.start');
|
|
322
|
+
if (!startEvent) {
|
|
323
|
+
// No start event yet — return a stub. Used during the brief window
|
|
324
|
+
// between mkdir and the first appendEvent call in createRun.
|
|
325
|
+
return {
|
|
326
|
+
schema_version: RUN_STATE_SCHEMA_VERSION,
|
|
327
|
+
runId: path.basename(runDir),
|
|
328
|
+
startedAt: new Date(0).toISOString(),
|
|
329
|
+
status: 'pending',
|
|
330
|
+
phases: [],
|
|
331
|
+
currentPhaseIdx: 0,
|
|
332
|
+
totalCostUSD: 0,
|
|
333
|
+
lastEventSeq: 0,
|
|
334
|
+
writerId: { pid: 0, hostHash: '' },
|
|
335
|
+
cwd: '',
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
if (startEvent.event !== 'run.start') {
|
|
339
|
+
// Defensive — TS narrowing.
|
|
340
|
+
throw new GuardrailError(`events.ndjson: first event is not run.start (got ${startEvent.event})`, {
|
|
341
|
+
code: 'corrupted_state',
|
|
342
|
+
provider: 'run-state',
|
|
343
|
+
details: { runDir, firstEvent: startEvent.event },
|
|
344
|
+
});
|
|
345
|
+
}
|
|
346
|
+
const phases = startEvent.phases.map((name, idx) => EMPTY_PHASE_SHELL(name, idx));
|
|
347
|
+
const state = {
|
|
348
|
+
schema_version: RUN_STATE_SCHEMA_VERSION,
|
|
349
|
+
runId: startEvent.runId,
|
|
350
|
+
startedAt: startEvent.ts,
|
|
351
|
+
status: 'pending',
|
|
352
|
+
phases,
|
|
353
|
+
currentPhaseIdx: 0,
|
|
354
|
+
totalCostUSD: 0,
|
|
355
|
+
lastEventSeq: events.length > 0 ? events[events.length - 1].seq : 0,
|
|
356
|
+
writerId: startEvent.writerId,
|
|
357
|
+
cwd: '',
|
|
358
|
+
config: startEvent.config,
|
|
359
|
+
};
|
|
360
|
+
for (const ev of events) {
|
|
361
|
+
applyEvent(state, ev);
|
|
362
|
+
}
|
|
363
|
+
return state;
|
|
364
|
+
}
|
|
365
|
+
function getPhase(state, idx, name) {
|
|
366
|
+
// Expand the phase array if a phase.start event arrives for an index
|
|
367
|
+
// beyond the registered phases (defensive — shouldn't happen in normal
|
|
368
|
+
// flow but lets recovery be lenient).
|
|
369
|
+
while (state.phases.length <= idx) {
|
|
370
|
+
state.phases.push(EMPTY_PHASE_SHELL(name, state.phases.length));
|
|
371
|
+
}
|
|
372
|
+
const p = state.phases[idx];
|
|
373
|
+
if (p.name !== name) {
|
|
374
|
+
p.name = name; // accept rename if event disagrees with stub
|
|
375
|
+
}
|
|
376
|
+
return p;
|
|
377
|
+
}
|
|
378
|
+
function applyEvent(state, ev) {
|
|
379
|
+
state.lastEventSeq = ev.seq;
|
|
380
|
+
switch (ev.event) {
|
|
381
|
+
case 'run.start':
|
|
382
|
+
// Already seeded above; nothing to do (idempotent here).
|
|
383
|
+
state.status = 'pending';
|
|
384
|
+
break;
|
|
385
|
+
case 'run.complete':
|
|
386
|
+
state.status = ev.status;
|
|
387
|
+
state.endedAt = ev.ts;
|
|
388
|
+
// totalCostUSD is also tallied per-phase; ev.totalCostUSD is the
|
|
389
|
+
// writer's authoritative running total.
|
|
390
|
+
state.totalCostUSD = ev.totalCostUSD;
|
|
391
|
+
break;
|
|
392
|
+
case 'run.warning':
|
|
393
|
+
case 'run.recovery':
|
|
394
|
+
case 'index.rebuilt':
|
|
395
|
+
case 'lock.takeover':
|
|
396
|
+
case 'budget.check':
|
|
397
|
+
// Pure observability; no state mutation needed. The runner reads
|
|
398
|
+
// events.ndjson directly to compute actualSoFar — replay does not
|
|
399
|
+
// need to track budget decisions for state-correctness purposes.
|
|
400
|
+
break;
|
|
401
|
+
case 'phase.start': {
|
|
402
|
+
state.status = 'running';
|
|
403
|
+
state.currentPhaseIdx = ev.phaseIdx;
|
|
404
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
405
|
+
p.status = 'running';
|
|
406
|
+
p.idempotent = ev.idempotent;
|
|
407
|
+
p.hasSideEffects = ev.hasSideEffects;
|
|
408
|
+
p.startedAt = ev.ts;
|
|
409
|
+
p.attempts = ev.attempt;
|
|
410
|
+
break;
|
|
411
|
+
}
|
|
412
|
+
case 'phase.success': {
|
|
413
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
414
|
+
p.status = 'succeeded';
|
|
415
|
+
p.endedAt = ev.ts;
|
|
416
|
+
p.durationMs = ev.durationMs;
|
|
417
|
+
p.artifacts = ev.artifacts.slice();
|
|
418
|
+
// If this was the last phase, the next event should be run.complete;
|
|
419
|
+
// we don't presume that here.
|
|
420
|
+
break;
|
|
421
|
+
}
|
|
422
|
+
case 'phase.failed': {
|
|
423
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
424
|
+
p.status = 'failed';
|
|
425
|
+
p.endedAt = ev.ts;
|
|
426
|
+
p.durationMs = ev.durationMs;
|
|
427
|
+
p.lastError = ev.error;
|
|
428
|
+
state.status = 'paused';
|
|
429
|
+
break;
|
|
430
|
+
}
|
|
431
|
+
case 'phase.aborted': {
|
|
432
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
433
|
+
p.status = 'aborted';
|
|
434
|
+
p.endedAt = ev.ts;
|
|
435
|
+
state.status = 'aborted';
|
|
436
|
+
break;
|
|
437
|
+
}
|
|
438
|
+
case 'phase.cost': {
|
|
439
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
440
|
+
p.costUSD += ev.costUSD;
|
|
441
|
+
state.totalCostUSD += ev.costUSD;
|
|
442
|
+
break;
|
|
443
|
+
}
|
|
444
|
+
case 'phase.externalRef': {
|
|
445
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
446
|
+
const ref = ev.ref;
|
|
447
|
+
// Dedup by kind+id to keep replays idempotent on multiple emits.
|
|
448
|
+
const dup = p.externalRefs.find(r => r.kind === ref.kind && r.id === ref.id);
|
|
449
|
+
if (!dup)
|
|
450
|
+
p.externalRefs.push(ref);
|
|
451
|
+
break;
|
|
452
|
+
}
|
|
453
|
+
case 'phase.needs-human': {
|
|
454
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
455
|
+
p.status = 'failed'; // surfaces as paused at the run level
|
|
456
|
+
p.lastError = `needs-human: ${ev.reason}`;
|
|
457
|
+
state.status = 'paused';
|
|
458
|
+
break;
|
|
459
|
+
}
|
|
460
|
+
case 'replay.override': {
|
|
461
|
+
// Phase 6 — purely advisory in the snapshot fold (the override itself
|
|
462
|
+
// happened at decision time; the subsequent phase.start/.success or
|
|
463
|
+
// .failed events drive state changes). We capture it on the phase's
|
|
464
|
+
// meta so `runs show` can surface that an override was applied.
|
|
465
|
+
const p = getPhase(state, ev.phaseIdx, ev.phase);
|
|
466
|
+
const meta = (p.meta ?? {});
|
|
467
|
+
const list = Array.isArray(meta.replayOverrides) ? meta.replayOverrides : [];
|
|
468
|
+
list.push({ ts: ev.ts, reason: ev.reason, refsConsulted: ev.refsConsulted });
|
|
469
|
+
meta.replayOverrides = list;
|
|
470
|
+
p.meta = meta;
|
|
471
|
+
break;
|
|
472
|
+
}
|
|
473
|
+
default: {
|
|
474
|
+
// Exhaustiveness check. Adding a new event variant without updating
|
|
475
|
+
// this switch will produce a TS error here at compile time.
|
|
476
|
+
const _exhaustive = ev;
|
|
477
|
+
void _exhaustive;
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
/** Fold an in-memory state into a list-row used by `runs list`. Lives here
|
|
482
|
+
* because it's a pure projection over RunState — no IO, no side effects. */
|
|
483
|
+
export function stateToIndexEntry(state, recovered = false) {
|
|
484
|
+
// "Last phase" is the most recently advanced phase that isn't pending.
|
|
485
|
+
let last;
|
|
486
|
+
for (const p of state.phases) {
|
|
487
|
+
if (p.status !== 'pending')
|
|
488
|
+
last = p.name;
|
|
489
|
+
}
|
|
490
|
+
const entry = {
|
|
491
|
+
runId: state.runId,
|
|
492
|
+
status: state.status,
|
|
493
|
+
startedAt: state.startedAt,
|
|
494
|
+
totalCostUSD: state.totalCostUSD,
|
|
495
|
+
};
|
|
496
|
+
if (state.endedAt !== undefined)
|
|
497
|
+
entry.endedAt = state.endedAt;
|
|
498
|
+
if (last !== undefined)
|
|
499
|
+
entry.lastPhase = last;
|
|
500
|
+
if (recovered)
|
|
501
|
+
entry.recovered = true;
|
|
502
|
+
return entry;
|
|
503
|
+
}
|
|
504
|
+
//# sourceMappingURL=events.js.map
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import type { RunEventInput, WriterId } from './types.ts';
|
|
2
|
+
interface LockMeta {
|
|
3
|
+
writerId: WriterId;
|
|
4
|
+
acquiredAt: string;
|
|
5
|
+
/** Last seq the writer confirmed it had appended. Optional — useful for
|
|
6
|
+
* takeover paths that want to resume the seq counter without rescanning
|
|
7
|
+
* events.ndjson. */
|
|
8
|
+
lastSeq?: number;
|
|
9
|
+
}
|
|
10
|
+
/** Hash the hostname so we never persist raw machine identity. */
|
|
11
|
+
export declare function makeWriterId(): WriterId;
|
|
12
|
+
/** True iff a process with the given PID is alive on THIS host. We refuse
|
|
13
|
+
* to make a determination for off-host PIDs (different hostHash) and treat
|
|
14
|
+
* them as alive — better to bail with `lock_held` than to silently steal a
|
|
15
|
+
* lock owned by another machine sharing a network filesystem. */
|
|
16
|
+
export declare function isPidAlive(writerId: WriterId | null): boolean;
|
|
17
|
+
export interface AcquireRunLockOptions {
|
|
18
|
+
/** Override proper-lockfile stale ms. Default 10_000. */
|
|
19
|
+
stale?: number;
|
|
20
|
+
/** Retry config, forwarded to proper-lockfile. Default: no retries (we
|
|
21
|
+
* want fail-fast on contention so the caller can surface an actionable
|
|
22
|
+
* error). Set to a number / OperationOptions for blocking acquires. */
|
|
23
|
+
retries?: number;
|
|
24
|
+
/** Override the writerId. Tests use this to simulate cross-process owners
|
|
25
|
+
* without forking. Production callers should let it default. */
|
|
26
|
+
writerId?: WriterId;
|
|
27
|
+
}
|
|
28
|
+
export interface RunLockHandle {
|
|
29
|
+
writerId: WriterId;
|
|
30
|
+
/** Releases the lock. Idempotent. */
|
|
31
|
+
release: () => Promise<void>;
|
|
32
|
+
}
|
|
33
|
+
/** Acquire the per-run advisory lock. Throws GuardrailError(lock_held) if
|
|
34
|
+
* another writer owns it. The caller is expected to hold the returned
|
|
35
|
+
* handle for the duration of the run and call `release()` on shutdown. */
|
|
36
|
+
export declare function acquireRunLock(runDir: string, opts?: AcquireRunLockOptions): Promise<RunLockHandle>;
|
|
37
|
+
/** Update the lastSeq field in the lock metadata. Best-effort; never throws.
|
|
38
|
+
* The events.ndjson is the source of truth, so a missed update is harmless. */
|
|
39
|
+
export declare function updateLockSeq(runDir: string, lastSeq: number): void;
|
|
40
|
+
/** Non-blocking peek at who currently owns the lock. Returns null if no
|
|
41
|
+
* metadata is present (which generally means no live writer either, but
|
|
42
|
+
* callers should not infer aliveness from that alone). */
|
|
43
|
+
export declare function peekLockOwner(runDir: string): LockMeta | null;
|
|
44
|
+
/** Forcibly take ownership. Returns the `lock.takeover` event the caller
|
|
45
|
+
* should append (the events log is sequenced by the appender, so this
|
|
46
|
+
* function deliberately does NOT write to events.ndjson itself).
|
|
47
|
+
*
|
|
48
|
+
* Throws GuardrailError(lock_held) if the previous writer is still alive
|
|
49
|
+
* per `isPidAlive` — taking over a live writer would corrupt the log.
|
|
50
|
+
*
|
|
51
|
+
* After this call returns, the caller should:
|
|
52
|
+
* 1. Append the returned event via `appendEvent`.
|
|
53
|
+
* 2. Call `acquireRunLock` to obtain the new handle.
|
|
54
|
+
* Both steps run after takeover. We do not auto-acquire here so the
|
|
55
|
+
* caller can decide on its own retry / stale-ms strategy.
|
|
56
|
+
*/
|
|
57
|
+
export declare function forceTakeover(runDir: string, reason: string): RunEventInput & {
|
|
58
|
+
event: 'lock.takeover';
|
|
59
|
+
};
|
|
60
|
+
export {};
|
|
61
|
+
//# sourceMappingURL=lock.d.ts.map
|