@delegance/claude-autopilot 6.2.2 → 7.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +886 -0
  2. package/README.md +10 -1
  3. package/bin/_launcher.js +38 -23
  4. package/dist/src/cli/autopilot.d.ts +4 -0
  5. package/dist/src/cli/autopilot.js +15 -0
  6. package/dist/src/cli/dashboard/index.d.ts +5 -0
  7. package/dist/src/cli/dashboard/index.js +49 -0
  8. package/dist/src/cli/dashboard/login.d.ts +22 -0
  9. package/dist/src/cli/dashboard/login.js +260 -0
  10. package/dist/src/cli/dashboard/logout.d.ts +12 -0
  11. package/dist/src/cli/dashboard/logout.js +45 -0
  12. package/dist/src/cli/dashboard/status.d.ts +30 -0
  13. package/dist/src/cli/dashboard/status.js +65 -0
  14. package/dist/src/cli/dashboard/upload.d.ts +16 -0
  15. package/dist/src/cli/dashboard/upload.js +48 -0
  16. package/dist/src/cli/engine-flag-deprecation.d.ts +14 -0
  17. package/dist/src/cli/engine-flag-deprecation.js +20 -0
  18. package/dist/src/cli/help-text.d.ts +1 -1
  19. package/dist/src/cli/help-text.js +44 -28
  20. package/dist/src/cli/index.d.ts +2 -1
  21. package/dist/src/cli/index.js +72 -17
  22. package/dist/src/cli/scaffold.d.ts +39 -0
  23. package/dist/src/cli/scaffold.js +287 -0
  24. package/dist/src/cli/setup.d.ts +30 -0
  25. package/dist/src/cli/setup.js +137 -0
  26. package/dist/src/core/run-state/events.js +10 -2
  27. package/dist/src/core/run-state/resolve-engine.d.ts +26 -81
  28. package/dist/src/core/run-state/resolve-engine.js +39 -155
  29. package/dist/src/core/run-state/run-phase-with-lifecycle.d.ts +5 -9
  30. package/dist/src/core/run-state/run-phase-with-lifecycle.js +26 -19
  31. package/dist/src/core/run-state/state.d.ts +1 -1
  32. package/dist/src/core/run-state/types.d.ts +8 -2
  33. package/dist/src/core/run-state/types.js +8 -2
  34. package/dist/src/dashboard/auto-upload.d.ts +26 -0
  35. package/dist/src/dashboard/auto-upload.js +107 -0
  36. package/dist/src/dashboard/config.d.ts +22 -0
  37. package/dist/src/dashboard/config.js +109 -0
  38. package/dist/src/dashboard/upload/canonical.d.ts +3 -0
  39. package/dist/src/dashboard/upload/canonical.js +16 -0
  40. package/dist/src/dashboard/upload/chain.d.ts +9 -0
  41. package/dist/src/dashboard/upload/chain.js +27 -0
  42. package/dist/src/dashboard/upload/snapshot.d.ts +23 -0
  43. package/dist/src/dashboard/upload/snapshot.js +66 -0
  44. package/dist/src/dashboard/upload/uploader.d.ts +54 -0
  45. package/dist/src/dashboard/upload/uploader.js +330 -0
  46. package/package.json +18 -3
  47. package/scripts/test-runner.mjs +4 -0
@@ -0,0 +1,9 @@
1
+ export declare const ZERO_HASH: string;
2
+ export declare function hashChunk(prevHashHex: string, body: Buffer): string;
3
+ /**
4
+ * Compute the chain root for an ordered sequence of chunks.
5
+ * Unambiguous loop form (per spec): prev=ZERO_HASH; for seq in 0..N-1:
6
+ * h[seq] = sha256(chunk[seq] || prev); prev = h[seq]; root = prev.
7
+ */
8
+ export declare function computeChainRoot(chunks: Buffer[]): string;
9
+ //# sourceMappingURL=chain.d.ts.map
@@ -0,0 +1,27 @@
1
+ // Parity copy of apps/web/lib/upload/chain.ts.
2
+ // CLI ↔ web hash agreement is asserted in tests/dashboard/parity.test.ts.
3
+ import { createHash } from 'node:crypto';
4
+ export const ZERO_HASH = '0'.repeat(64);
5
+ export function hashChunk(prevHashHex, body) {
6
+ if (!/^[0-9a-f]{64}$/.test(prevHashHex)) {
7
+ throw new Error(`hashChunk: prev hash must be 64 lowercase hex chars`);
8
+ }
9
+ const prevBytes = Buffer.from(prevHashHex, 'hex');
10
+ const hash = createHash('sha256');
11
+ hash.update(body);
12
+ hash.update(prevBytes);
13
+ return hash.digest('hex');
14
+ }
15
+ /**
16
+ * Compute the chain root for an ordered sequence of chunks.
17
+ * Unambiguous loop form (per spec): prev=ZERO_HASH; for seq in 0..N-1:
18
+ * h[seq] = sha256(chunk[seq] || prev); prev = h[seq]; root = prev.
19
+ */
20
+ export function computeChainRoot(chunks) {
21
+ let prev = ZERO_HASH;
22
+ for (const chunk of chunks) {
23
+ prev = hashChunk(prev, chunk);
24
+ }
25
+ return prev;
26
+ }
27
+ //# sourceMappingURL=chain.js.map
@@ -0,0 +1,23 @@
1
+ export interface SnapshotPaths {
2
+ snapshotDir: string;
3
+ events: string;
4
+ state: string;
5
+ }
6
+ export interface SnapshotResult extends SnapshotPaths {
7
+ /** Bytes of the snapshot events file. May be 0. */
8
+ eventsBytes: number;
9
+ }
10
+ export declare class SnapshotMismatchError extends Error {
11
+ constructor(message: string);
12
+ }
13
+ /**
14
+ * Copy events.ndjson + state.json from `runDir` to `runDir/.upload-snapshot/`.
15
+ *
16
+ * Defense in depth: stat-before/stat-after on the source files, fail
17
+ * loudly if size or mtime changes during copy. Per spec, snapshot is
18
+ * post-`run.complete` only (writers are flushed) so this should never
19
+ * fire — but if it does, abort rather than upload a torn read.
20
+ */
21
+ export declare function snapshotRun(runDir: string): Promise<SnapshotResult>;
22
+ export declare function deleteSnapshot(runDir: string): Promise<void>;
23
+ //# sourceMappingURL=snapshot.d.ts.map
@@ -0,0 +1,66 @@
1
+ // Snapshot-before-upload — copy events.ndjson + state.json to
2
+ // <runDir>/.upload-snapshot/ atomically before chunking begins.
3
+ // The uploader then reads only the snapshot, so the writer streaming new
4
+ // events into events.ndjson can't shift bytes mid-upload.
5
+ import { promises as fs } from 'node:fs';
6
+ import * as path from 'node:path';
7
+ export class SnapshotMismatchError extends Error {
8
+ constructor(message) {
9
+ super(message);
10
+ this.name = 'SnapshotMismatchError';
11
+ }
12
+ }
13
+ /**
14
+ * Copy events.ndjson + state.json from `runDir` to `runDir/.upload-snapshot/`.
15
+ *
16
+ * Defense in depth: stat-before/stat-after on the source files, fail
17
+ * loudly if size or mtime changes during copy. Per spec, snapshot is
18
+ * post-`run.complete` only (writers are flushed) so this should never
19
+ * fire — but if it does, abort rather than upload a torn read.
20
+ */
21
+ export async function snapshotRun(runDir) {
22
+ const eventsSrc = path.join(runDir, 'events.ndjson');
23
+ const stateSrc = path.join(runDir, 'state.json');
24
+ const snapshotDir = path.join(runDir, '.upload-snapshot');
25
+ const eventsDst = path.join(snapshotDir, 'events.ndjson');
26
+ const stateDst = path.join(snapshotDir, 'state.json');
27
+ const eventsBefore = await fs.stat(eventsSrc);
28
+ let stateBefore = null;
29
+ try {
30
+ stateBefore = await fs.stat(stateSrc);
31
+ }
32
+ catch {
33
+ // state.json may not exist for an in-flight run; we still snapshot events.
34
+ }
35
+ await fs.mkdir(snapshotDir, { recursive: true });
36
+ await fs.copyFile(eventsSrc, eventsDst);
37
+ if (stateBefore) {
38
+ await fs.copyFile(stateSrc, stateDst);
39
+ }
40
+ const eventsAfter = await fs.stat(eventsSrc);
41
+ if (eventsAfter.size !== eventsBefore.size || eventsAfter.mtimeMs !== eventsBefore.mtimeMs) {
42
+ throw new SnapshotMismatchError(`events.ndjson changed during snapshot (size ${eventsBefore.size}->${eventsAfter.size}, mtime ${eventsBefore.mtimeMs}->${eventsAfter.mtimeMs})`);
43
+ }
44
+ if (stateBefore) {
45
+ const stateAfter = await fs.stat(stateSrc);
46
+ if (stateAfter.size !== stateBefore.size || stateAfter.mtimeMs !== stateBefore.mtimeMs) {
47
+ throw new SnapshotMismatchError(`state.json changed during snapshot (size ${stateBefore.size}->${stateAfter.size})`);
48
+ }
49
+ }
50
+ return {
51
+ snapshotDir,
52
+ events: eventsDst,
53
+ state: stateDst,
54
+ eventsBytes: eventsAfter.size,
55
+ };
56
+ }
57
+ export async function deleteSnapshot(runDir) {
58
+ const snapshotDir = path.join(runDir, '.upload-snapshot');
59
+ try {
60
+ await fs.rm(snapshotDir, { recursive: true, force: true });
61
+ }
62
+ catch {
63
+ /* idempotent */
64
+ }
65
+ }
66
+ //# sourceMappingURL=snapshot.js.map
@@ -0,0 +1,54 @@
1
+ export interface UploadOptions {
2
+ signal?: AbortSignal;
3
+ baseUrl?: string;
4
+ apiKey: string;
5
+ onProgress?: (event: ProgressEvent) => void;
6
+ /** Test seam — substitute fetch impl. Defaults to global fetch. */
7
+ fetchImpl?: typeof fetch;
8
+ }
9
+ export type ProgressEvent = {
10
+ kind: 'snapshot';
11
+ bytes: number;
12
+ } | {
13
+ kind: 'session';
14
+ resumed: boolean;
15
+ nextExpectedSeq: number;
16
+ } | {
17
+ kind: 'chunk-uploaded';
18
+ seq: number;
19
+ total: number;
20
+ } | {
21
+ kind: 'finalized';
22
+ };
23
+ export interface UploadResult {
24
+ ok: boolean;
25
+ url?: string;
26
+ skipped?: boolean;
27
+ error?: string;
28
+ }
29
+ export declare class UploadError extends Error {
30
+ readonly status: number | null;
31
+ constructor(message: string, status?: number | null);
32
+ }
33
+ /**
34
+ * Phase 3 — thrown when /api/upload-session returns 402 with a structured
35
+ * `limit_reached` payload. Auto-upload entry point detects this subclass
36
+ * and prints a friendly message without retrying or overriding the run's
37
+ * exit code.
38
+ */
39
+ export declare class UploadLimitError extends UploadError {
40
+ readonly payload: {
41
+ limit: string;
42
+ current: number;
43
+ max: number;
44
+ upgrade_url: string;
45
+ };
46
+ constructor(message: string, payload: {
47
+ limit: string;
48
+ current: number;
49
+ max: number;
50
+ upgrade_url: string;
51
+ });
52
+ }
53
+ export declare function uploadRun(runId: string, runDir: string, opts: UploadOptions): Promise<UploadResult>;
54
+ //# sourceMappingURL=uploader.d.ts.map
@@ -0,0 +1,330 @@
1
+ // CLI uploader — snapshot, chunk, retry, finalize.
2
+ //
3
+ // Flow:
4
+ // 1. Empty events.ndjson check → skip upload (Phase 2.2 returns 422 on
5
+ // expectedChunkCount=0; never call it).
6
+ // 2. Snapshot events.ndjson + state.json to <runDir>/.upload-snapshot/.
7
+ // 3. Bootstrap session: GET dashboard upload-session for resume; if 404
8
+ // mint fresh via POST /api/upload-session (Phase 2.2 endpoint, accepts
9
+ // Bearer clp_<key> via resolveCaller).
10
+ // 4. PUT each chunk with x-chunk-prev-hash; retry transient 5xx.
11
+ // 5. POST /api/runs/:runId/finalize with chainRoot + state.
12
+ // 6. On success, delete the snapshot dir.
13
+ import { promises as fs } from 'node:fs';
14
+ import * as path from 'node:path';
15
+ import { hashChunk, ZERO_HASH } from "./chain.js";
16
+ import { sha256OfCanonical } from "./canonical.js";
17
+ import { snapshotRun, deleteSnapshot, SnapshotMismatchError } from "./snapshot.js";
18
+ const CHUNK_BYTES = 1024 * 1024; // 1 MiB matches server MAX_CHUNK_BYTES
19
+ const DEFAULT_RETRY_DELAYS_MS = [1000, 4000, 16000, 64000];
20
+ function resolveRetryDelays() {
21
+ // Test seam — let CI/tests override the exponential backoff schedule
22
+ // so transient-failure assertions don't add minutes to the suite.
23
+ const override = process.env.CLAUDE_AUTOPILOT_UPLOAD_RETRY_MS;
24
+ if (!override)
25
+ return DEFAULT_RETRY_DELAYS_MS;
26
+ return override
27
+ .split(',')
28
+ .map((s) => Number.parseInt(s.trim(), 10))
29
+ .filter((n) => Number.isFinite(n) && n >= 0);
30
+ }
31
+ export class UploadError extends Error {
32
+ status;
33
+ constructor(message, status = null) {
34
+ super(message);
35
+ this.status = status;
36
+ }
37
+ }
38
+ /**
39
+ * Phase 3 — thrown when /api/upload-session returns 402 with a structured
40
+ * `limit_reached` payload. Auto-upload entry point detects this subclass
41
+ * and prints a friendly message without retrying or overriding the run's
42
+ * exit code.
43
+ */
44
+ export class UploadLimitError extends UploadError {
45
+ payload;
46
+ constructor(message, payload) {
47
+ super(message, 402);
48
+ this.payload = payload;
49
+ }
50
+ }
51
+ function resolveBaseUrl(opts) {
52
+ return (opts.baseUrl ??
53
+ process.env.AUTOPILOT_DASHBOARD_BASE_URL ??
54
+ 'https://autopilot.dev');
55
+ }
56
+ function checkAborted(signal) {
57
+ if (signal?.aborted) {
58
+ const reason = signal.reason;
59
+ const err = reason instanceof Error
60
+ ? reason
61
+ : new Error('upload aborted');
62
+ throw err;
63
+ }
64
+ }
65
+ async function delay(ms, signal) {
66
+ return new Promise((resolve, reject) => {
67
+ const t = setTimeout(resolve, ms);
68
+ if (signal) {
69
+ const onAbort = () => {
70
+ clearTimeout(t);
71
+ reject(new Error('aborted'));
72
+ };
73
+ if (signal.aborted)
74
+ onAbort();
75
+ else
76
+ signal.addEventListener('abort', onAbort, { once: true });
77
+ }
78
+ });
79
+ }
80
+ async function readChunks(filePath) {
81
+ const handle = await fs.open(filePath, 'r');
82
+ try {
83
+ const stat = await handle.stat();
84
+ const total = stat.size;
85
+ const out = [];
86
+ let position = 0;
87
+ while (position < total) {
88
+ const remaining = total - position;
89
+ const size = remaining < CHUNK_BYTES ? remaining : CHUNK_BYTES;
90
+ const buf = Buffer.alloc(size);
91
+ const { bytesRead } = await handle.read(buf, 0, size, position);
92
+ if (bytesRead !== size) {
93
+ throw new UploadError(`short read at offset ${position}: ${bytesRead}/${size}`);
94
+ }
95
+ out.push(buf);
96
+ position += size;
97
+ }
98
+ return out;
99
+ }
100
+ finally {
101
+ await handle.close();
102
+ }
103
+ }
104
+ async function fetchWithRetry(url, init, fetchImpl, signal, is5xxRetryable) {
105
+ let lastErr = null;
106
+ const delays = resolveRetryDelays();
107
+ for (let attempt = 0; attempt <= delays.length; attempt++) {
108
+ checkAborted(signal);
109
+ try {
110
+ const res = await fetchImpl(url, init);
111
+ if (res.status >= 500 && res.status < 600 && is5xxRetryable && attempt < delays.length) {
112
+ const wait = delays[attempt];
113
+ await delay(wait, signal);
114
+ continue;
115
+ }
116
+ return res;
117
+ }
118
+ catch (err) {
119
+ if (signal?.aborted)
120
+ throw err;
121
+ lastErr = err;
122
+ if (attempt < delays.length) {
123
+ const wait = delays[attempt];
124
+ await delay(wait, signal);
125
+ continue;
126
+ }
127
+ throw err;
128
+ }
129
+ }
130
+ throw lastErr instanceof Error ? lastErr : new UploadError('exhausted retries');
131
+ }
132
+ async function bootstrapSession(baseUrl, apiKey, runId, expectedChunkCount, expectedBytes, fetchImpl, signal) {
133
+ // Resume path first.
134
+ const resumeUrl = `${baseUrl}/api/dashboard/runs/${encodeURIComponent(runId)}/upload-session`;
135
+ const resumeRes = await fetchWithRetry(resumeUrl, {
136
+ method: 'GET',
137
+ headers: { authorization: `Bearer ${apiKey}` },
138
+ signal,
139
+ }, fetchImpl, signal, true);
140
+ if (resumeRes.status === 200) {
141
+ const data = await resumeRes.json();
142
+ return { session: data, resumed: true };
143
+ }
144
+ if (resumeRes.status !== 404) {
145
+ const text = await resumeRes.text().catch(() => '');
146
+ throw new UploadError(`resume bootstrap failed: ${resumeRes.status} ${text}`, resumeRes.status);
147
+ }
148
+ // Mint fresh via Phase 2.2 endpoint.
149
+ const mintUrl = `${baseUrl}/api/upload-session`;
150
+ const mintRes = await fetchWithRetry(mintUrl, {
151
+ method: 'POST',
152
+ headers: {
153
+ authorization: `Bearer ${apiKey}`,
154
+ 'content-type': 'application/json',
155
+ },
156
+ body: JSON.stringify({ runId, expectedChunkCount, expectedBytes }),
157
+ signal,
158
+ }, fetchImpl, signal, true);
159
+ // Phase 3 — structured 402 means we hit a runs/storage cap. Surface as a
160
+ // typed error so the auto-upload caller can print a friendly message
161
+ // without retrying or overriding the run's exit code.
162
+ if (mintRes.status === 402) {
163
+ let parsed = {};
164
+ try {
165
+ parsed = await mintRes.json();
166
+ }
167
+ catch {
168
+ // fall through — message below still useful
169
+ }
170
+ const limit = parsed.limit ?? 'unknown';
171
+ const current = parsed.current ?? 0;
172
+ const max = parsed.max ?? 0;
173
+ const upgradeUrl = parsed.upgrade_url ?? '';
174
+ throw new UploadLimitError(`upload rejected — ${limit} cap reached (${current}/${max}). Upgrade at ${upgradeUrl}`, { limit, current, max, upgrade_url: upgradeUrl });
175
+ }
176
+ if (mintRes.status !== 201) {
177
+ const text = await mintRes.text().catch(() => '');
178
+ throw new UploadError(`mint failed: ${mintRes.status} ${text}`, mintRes.status);
179
+ }
180
+ const data = await mintRes.json();
181
+ return { session: { ...data, session: { ...data.session, nextExpectedSeq: 0 } }, resumed: false };
182
+ }
183
+ export async function uploadRun(runId, runDir, opts) {
184
+ const fetchImpl = opts.fetchImpl ?? fetch;
185
+ const baseUrl = resolveBaseUrl(opts);
186
+ const signal = opts.signal;
187
+ try {
188
+ // (1) Empty events check — skip cleanly so server's 422 isn't tripped.
189
+ const eventsPath = path.join(runDir, 'events.ndjson');
190
+ let eventsStat;
191
+ try {
192
+ eventsStat = await fs.stat(eventsPath);
193
+ }
194
+ catch {
195
+ return { ok: true, skipped: true };
196
+ }
197
+ if (eventsStat.size === 0) {
198
+ return { ok: true, skipped: true };
199
+ }
200
+ // (2) Snapshot.
201
+ checkAborted(signal);
202
+ const snap = await snapshotRun(runDir);
203
+ opts.onProgress?.({ kind: 'snapshot', bytes: snap.eventsBytes });
204
+ const chunks = await readChunks(snap.events);
205
+ const expectedChunkCount = chunks.length;
206
+ // (3) Bootstrap. Phase 3 — pass expectedBytes for storage cap preflight.
207
+ checkAborted(signal);
208
+ const { session, resumed } = await bootstrapSession(baseUrl, opts.apiKey, runId, expectedChunkCount, snap.eventsBytes, fetchImpl, signal);
209
+ const startSeq = session.session.nextExpectedSeq ?? 0;
210
+ opts.onProgress?.({ kind: 'session', resumed, nextExpectedSeq: startSeq });
211
+ // (4) Stream chunks. Walk the chain forward from seq 0 even when
212
+ // resuming so prev-hash for seq=startSeq is correct.
213
+ let prev = ZERO_HASH;
214
+ for (let i = 0; i < startSeq; i++) {
215
+ const chunk = chunks[i];
216
+ if (!chunk)
217
+ throw new UploadError(`missing chunk at seq ${i} during prefix replay`);
218
+ prev = hashChunk(prev, chunk);
219
+ }
220
+ let token = session.uploadToken;
221
+ let chainRoot = prev;
222
+ let reauthAttempts = 0; // bugbot HIGH — bound the 401 re-bootstrap retry
223
+ const MAX_REAUTH_ATTEMPTS = 1;
224
+ for (let seq = startSeq; seq < chunks.length; seq++) {
225
+ checkAborted(signal);
226
+ const chunk = chunks[seq];
227
+ if (!chunk)
228
+ throw new UploadError(`missing chunk at seq ${seq}`);
229
+ const thisHash = hashChunk(prev, chunk);
230
+ const url = `${baseUrl}/api/runs/${encodeURIComponent(runId)}/events/${seq}`;
231
+ const init = {
232
+ method: 'PUT',
233
+ headers: {
234
+ authorization: `Bearer ${token}`,
235
+ 'content-type': 'application/octet-stream',
236
+ 'x-chunk-prev-hash': prev,
237
+ },
238
+ body: chunk,
239
+ signal,
240
+ };
241
+ const res = await fetchWithRetry(url, init, fetchImpl, signal, true);
242
+ if (res.status === 200 || res.status === 201) {
243
+ prev = thisHash;
244
+ chainRoot = thisHash;
245
+ opts.onProgress?.({ kind: 'chunk-uploaded', seq, total: chunks.length });
246
+ continue;
247
+ }
248
+ if (res.status === 401) {
249
+ // bugbot HIGH — bound retries. Token might be expired, OR the API
250
+ // key is revoked (bootstrap succeeds but minted tokens are still
251
+ // 401). Without a counter, the loop spins forever.
252
+ if (reauthAttempts >= MAX_REAUTH_ATTEMPTS) {
253
+ const text = await res.text().catch(() => '');
254
+ throw new UploadError(`chunk ${seq} unauthorized after ${reauthAttempts} re-bootstrap attempt(s); check API key validity. ${text}`, res.status);
255
+ }
256
+ reauthAttempts++;
257
+ const reboot = await bootstrapSession(baseUrl, opts.apiKey, runId, expectedChunkCount, snap.eventsBytes, fetchImpl, signal);
258
+ token = reboot.session.uploadToken;
259
+ seq -= 1;
260
+ continue;
261
+ }
262
+ if (res.status === 409) {
263
+ // Duplicate chunk content with matching hash is treated as success
264
+ // by the server (RPC path); treat as success here too if hash agrees.
265
+ const text = await res.text().catch(() => '');
266
+ if (/duplicate/i.test(text)) {
267
+ prev = thisHash;
268
+ chainRoot = thisHash;
269
+ opts.onProgress?.({ kind: 'chunk-uploaded', seq, total: chunks.length });
270
+ continue;
271
+ }
272
+ throw new UploadError(`chunk ${seq} rejected: ${res.status} ${text}`, res.status);
273
+ }
274
+ const text = await res.text().catch(() => '');
275
+ throw new UploadError(`chunk ${seq} failed: ${res.status} ${text}`, res.status);
276
+ }
277
+ // (5) Finalize.
278
+ checkAborted(signal);
279
+ let stateJson = {};
280
+ try {
281
+ const raw = await fs.readFile(snap.state, 'utf-8');
282
+ stateJson = JSON.parse(raw);
283
+ }
284
+ catch {
285
+ stateJson = {};
286
+ }
287
+ // sha256 not strictly needed here — server recomputes — but include for parity.
288
+ void sha256OfCanonical(stateJson);
289
+ const finalizeUrl = `${baseUrl}/api/runs/${encodeURIComponent(runId)}/finalize`;
290
+ const finalRes = await fetchWithRetry(finalizeUrl, {
291
+ method: 'POST',
292
+ headers: {
293
+ authorization: `Bearer ${token}`,
294
+ 'content-type': 'application/json',
295
+ },
296
+ body: JSON.stringify({
297
+ chainRoot,
298
+ expectedChunkCount,
299
+ stateJson,
300
+ }),
301
+ signal,
302
+ }, fetchImpl, signal, true);
303
+ if (finalRes.status !== 200) {
304
+ const text = await finalRes.text().catch(() => '');
305
+ throw new UploadError(`finalize failed: ${finalRes.status} ${text}`, finalRes.status);
306
+ }
307
+ opts.onProgress?.({ kind: 'finalized' });
308
+ // (6) Cleanup snapshot.
309
+ await deleteSnapshot(runDir);
310
+ return {
311
+ ok: true,
312
+ url: `${baseUrl}/runs/${encodeURIComponent(runId)}`,
313
+ };
314
+ }
315
+ catch (err) {
316
+ // Phase 3 — let UploadLimitError bubble so the auto-upload entry point
317
+ // can print the friendly message + preserve the run's exit code.
318
+ if (err instanceof UploadLimitError) {
319
+ throw err;
320
+ }
321
+ if (err instanceof SnapshotMismatchError) {
322
+ return { ok: false, error: `snapshot mismatch: ${err.message}` };
323
+ }
324
+ if (err.message === 'aborted') {
325
+ return { ok: false, error: 'aborted' };
326
+ }
327
+ return { ok: false, error: err.message ?? String(err) };
328
+ }
329
+ }
330
+ //# sourceMappingURL=uploader.js.map
package/package.json CHANGED
@@ -1,7 +1,10 @@
1
1
  {
2
2
  "name": "@delegance/claude-autopilot",
3
- "version": "6.2.2",
3
+ "version": "7.2.1",
4
4
  "type": "module",
5
+ "publishConfig": {
6
+ "tag": "next"
7
+ },
5
8
  "description": "Autonomous development pipeline for Claude Code: brainstorm → spec → plan → implement → migrate → validate → PR → review → merge. Multi-model, local-first, every phase a skill you can intervene in.",
6
9
  "keywords": [
7
10
  "claude-autopilot",
@@ -15,6 +18,10 @@
15
18
  "pipeline"
16
19
  ],
17
20
  "license": "MIT",
21
+ "workspaces": [
22
+ "apps/*",
23
+ "packages/*"
24
+ ],
18
25
  "repository": {
19
26
  "type": "git",
20
27
  "url": "https://github.com/axledbetter/claude-autopilot.git"
@@ -53,20 +60,27 @@
53
60
  "scripts": {
54
61
  "test": "node scripts/test-runner.mjs",
55
62
  "test:adapters:live": "node --test --import=tsx tests/adapters/live/vercel.cert.ts tests/adapters/live/fly.cert.ts tests/adapters/live/render.cert.ts",
63
+ "test:rls": "node --test --import=tsx tests/rls/*.test.ts",
56
64
  "typecheck": "tsc --noEmit",
57
65
  "build": "tsc -p tsconfig.build.json && node scripts/post-build-rewrite-imports.mjs",
58
66
  "prepublishOnly": "npm run build && npm test",
59
- "autoregress": "tsx scripts/autoregress.ts"
67
+ "autoregress": "tsx scripts/autoregress.ts",
68
+ "db:start": "bash scripts/db/start-supabase.sh",
69
+ "db:stop": "bash scripts/db/stop-supabase.sh",
70
+ "db:reset": "bash scripts/db/reset-supabase.sh"
60
71
  },
61
72
  "dependencies": {
73
+ "@supabase/supabase-js": "^2.97.0",
62
74
  "ajv": "^8",
63
75
  "ajv-formats": "^3.0.1",
76
+ "canonicalize": "^3.0.0",
64
77
  "dotenv": ">=16",
65
78
  "js-yaml": "^4",
66
79
  "minimatch": ">=9",
67
80
  "proper-lockfile": "^4.1.2",
68
81
  "shell-quote": "^1.8.3",
69
- "tsx": ">=4"
82
+ "tsx": ">=4",
83
+ "ulid": "^3.0.2"
70
84
  },
71
85
  "optionalDependencies": {
72
86
  "@anthropic-ai/sdk": "^0.91.1",
@@ -79,6 +93,7 @@
79
93
  "@types/node": "^25",
80
94
  "@types/proper-lockfile": "^4.1.4",
81
95
  "@types/shell-quote": "^1.7.5",
96
+ "supabase": "^2.20.0",
82
97
  "typescript": "^6"
83
98
  },
84
99
  "peerDependencies": {
@@ -4,6 +4,10 @@ import { spawnSync } from 'node:child_process';
4
4
 
5
5
  const files = [];
6
6
  for await (const f of glob('tests/**/*.test.ts')) {
7
+ // RLS tests require a live Supabase stack + env credentials; they run
8
+ // from a dedicated workflow (.github/workflows/db-tests.yml) via
9
+ // `npm run test:rls`, not from the general test runner.
10
+ if (f.startsWith('tests/rls/') || f.startsWith('tests\\rls\\')) continue;
7
11
  files.push(f);
8
12
  }
9
13
  files.sort();