surrogate-protocol-mcp 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +30 -0
- package/README.md +198 -0
- package/dist/__tests__/gates.test.d.ts +2 -0
- package/dist/__tests__/gates.test.d.ts.map +1 -0
- package/dist/__tests__/gates.test.js +342 -0
- package/dist/__tests__/gates.test.js.map +1 -0
- package/dist/__tests__/observability.test.d.ts +2 -0
- package/dist/__tests__/observability.test.d.ts.map +1 -0
- package/dist/__tests__/observability.test.js +382 -0
- package/dist/__tests__/observability.test.js.map +1 -0
- package/dist/__tests__/state.test.d.ts +2 -0
- package/dist/__tests__/state.test.d.ts.map +1 -0
- package/dist/__tests__/state.test.js +415 -0
- package/dist/__tests__/state.test.js.map +1 -0
- package/dist/agent-runner.d.ts +3 -0
- package/dist/agent-runner.d.ts.map +1 -0
- package/dist/agent-runner.js +419 -0
- package/dist/agent-runner.js.map +1 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +763 -0
- package/dist/cli.js.map +1 -0
- package/dist/mcp-server.d.ts +3 -0
- package/dist/mcp-server.d.ts.map +1 -0
- package/dist/mcp-server.js +1382 -0
- package/dist/mcp-server.js.map +1 -0
- package/dist/utils/agent.d.ts +42 -0
- package/dist/utils/agent.d.ts.map +1 -0
- package/dist/utils/agent.js +231 -0
- package/dist/utils/agent.js.map +1 -0
- package/dist/utils/artifacts.d.ts +92 -0
- package/dist/utils/artifacts.d.ts.map +1 -0
- package/dist/utils/artifacts.js +338 -0
- package/dist/utils/artifacts.js.map +1 -0
- package/dist/utils/capsule.d.ts +28 -0
- package/dist/utils/capsule.d.ts.map +1 -0
- package/dist/utils/capsule.js +462 -0
- package/dist/utils/capsule.js.map +1 -0
- package/dist/utils/diagnostics.d.ts +62 -0
- package/dist/utils/diagnostics.d.ts.map +1 -0
- package/dist/utils/diagnostics.js +404 -0
- package/dist/utils/diagnostics.js.map +1 -0
- package/dist/utils/gates.d.ts +78 -0
- package/dist/utils/gates.d.ts.map +1 -0
- package/dist/utils/gates.js +661 -0
- package/dist/utils/gates.js.map +1 -0
- package/dist/utils/logging.d.ts +53 -0
- package/dist/utils/logging.d.ts.map +1 -0
- package/dist/utils/logging.js +187 -0
- package/dist/utils/logging.js.map +1 -0
- package/dist/utils/project-gates.d.ts +73 -0
- package/dist/utils/project-gates.d.ts.map +1 -0
- package/dist/utils/project-gates.js +520 -0
- package/dist/utils/project-gates.js.map +1 -0
- package/dist/utils/reconcile.d.ts +50 -0
- package/dist/utils/reconcile.d.ts.map +1 -0
- package/dist/utils/reconcile.js +262 -0
- package/dist/utils/reconcile.js.map +1 -0
- package/dist/utils/state.d.ts +241 -0
- package/dist/utils/state.d.ts.map +1 -0
- package/dist/utils/state.js +865 -0
- package/dist/utils/state.js.map +1 -0
- package/dist/utils/worktree.d.ts +27 -0
- package/dist/utils/worktree.d.ts.map +1 -0
- package/dist/utils/worktree.js +275 -0
- package/dist/utils/worktree.js.map +1 -0
- package/package.json +65 -0
|
@@ -0,0 +1,865 @@
|
|
|
1
|
+
// surrogate-tools/src/utils/state.ts
|
|
2
|
+
// Safe YAML state management - LLMs NEVER touch this file directly
|
|
3
|
+
import * as fs from 'fs';
|
|
4
|
+
import * as path from 'path';
|
|
5
|
+
import * as yaml from 'yaml';
|
|
6
|
+
// Resolve project root from current working directory
|
|
7
|
+
export const PROJECT_ROOT = process.cwd();
|
|
8
|
+
const SURROGATE_DIR = '.surrogate';
|
|
9
|
+
const FLIGHT_RECORDER = path.join(SURROGATE_DIR, 'flight_recorder.yaml');
|
|
10
|
+
const ARCHIVE_DIR = path.join(SURROGATE_DIR, 'archive');
|
|
11
|
+
const STATUS_DIR = path.join(SURROGATE_DIR, 'status'); // Sharded agent status files
|
|
12
|
+
/**
|
|
13
|
+
* Validate that evidence is sufficient for the error type.
|
|
14
|
+
*/
|
|
15
|
+
export function validateFailureEvidence(failure) {
|
|
16
|
+
const { error_type, evidence } = failure;
|
|
17
|
+
// Evidence must have at least one of: (exit_code + stderr) OR log_path
|
|
18
|
+
const hasProcessEvidence = evidence.exit_code !== undefined || evidence.stderr_tail !== undefined;
|
|
19
|
+
const hasLogEvidence = evidence.log_path !== undefined;
|
|
20
|
+
if (!hasProcessEvidence && !hasLogEvidence) {
|
|
21
|
+
return {
|
|
22
|
+
valid: false,
|
|
23
|
+
error: 'Evidence required: provide exit_code/stderr_tail OR log_path',
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
// Type-specific validation
|
|
27
|
+
switch (error_type) {
|
|
28
|
+
case 'GATE_FAILED':
|
|
29
|
+
if (!evidence.gate_name) {
|
|
30
|
+
return { valid: false, error: 'GATE_FAILED requires evidence.gate_name' };
|
|
31
|
+
}
|
|
32
|
+
break;
|
|
33
|
+
case 'MERGE_CONFLICT':
|
|
34
|
+
if (!evidence.conflicting_files || evidence.conflicting_files.length === 0) {
|
|
35
|
+
return { valid: false, error: 'MERGE_CONFLICT requires evidence.conflicting_files' };
|
|
36
|
+
}
|
|
37
|
+
break;
|
|
38
|
+
case 'TOOLING_MISSING':
|
|
39
|
+
if (!evidence.command) {
|
|
40
|
+
return { valid: false, error: 'TOOLING_MISSING requires evidence.command' };
|
|
41
|
+
}
|
|
42
|
+
break;
|
|
43
|
+
}
|
|
44
|
+
return { valid: true };
|
|
45
|
+
}
|
|
46
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
47
|
+
// STATE OPERATIONS
|
|
48
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
49
|
+
const LOCK_FILE = path.join(SURROGATE_DIR, 'flight_recorder.lock');
|
|
50
|
+
const LOCK_TIMEOUT_MS = 10000; // 10 seconds max wait
|
|
51
|
+
const LOCK_RETRY_MS = 50; // Retry every 50ms
|
|
52
|
+
const LOCK_STALE_MS = 60000; // Consider lock stale after 60 seconds
|
|
53
|
+
/**
|
|
54
|
+
* Advisory lock for YAML state file (WS1-3).
|
|
55
|
+
* Prevents race conditions during concurrent read-modify-write operations.
|
|
56
|
+
*/
|
|
57
|
+
function acquireLock() {
|
|
58
|
+
ensureDirectories();
|
|
59
|
+
const startTime = Date.now();
|
|
60
|
+
while (Date.now() - startTime < LOCK_TIMEOUT_MS) {
|
|
61
|
+
try {
|
|
62
|
+
// Check for stale lock
|
|
63
|
+
if (fs.existsSync(LOCK_FILE)) {
|
|
64
|
+
const lockStat = fs.statSync(LOCK_FILE);
|
|
65
|
+
const lockAge = Date.now() - lockStat.mtimeMs;
|
|
66
|
+
if (lockAge > LOCK_STALE_MS) {
|
|
67
|
+
console.error(`[State] Removing stale lock (age: ${Math.round(lockAge / 1000)}s)`);
|
|
68
|
+
fs.unlinkSync(LOCK_FILE);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
// Try to create lock file exclusively
|
|
72
|
+
const fd = fs.openSync(LOCK_FILE, 'wx');
|
|
73
|
+
fs.writeSync(fd, JSON.stringify({ pid: process.pid, time: new Date().toISOString() }));
|
|
74
|
+
fs.closeSync(fd);
|
|
75
|
+
return {
|
|
76
|
+
acquired: true,
|
|
77
|
+
release: () => {
|
|
78
|
+
try {
|
|
79
|
+
if (fs.existsSync(LOCK_FILE)) {
|
|
80
|
+
fs.unlinkSync(LOCK_FILE);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
catch {
|
|
84
|
+
// Ignore release errors
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
catch (error) {
|
|
90
|
+
if (error.code === 'EEXIST') {
|
|
91
|
+
// Lock exists, wait and retry
|
|
92
|
+
const waitTime = LOCK_RETRY_MS + Math.random() * LOCK_RETRY_MS;
|
|
93
|
+
Atomics.wait(new Int32Array(new SharedArrayBuffer(4)), 0, 0, waitTime);
|
|
94
|
+
}
|
|
95
|
+
else {
|
|
96
|
+
throw error;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
console.error('[State] Failed to acquire lock within timeout');
|
|
101
|
+
return { acquired: false, release: () => { } };
|
|
102
|
+
}
|
|
103
|
+
export function ensureDirectories() {
|
|
104
|
+
const dirs = [SURROGATE_DIR, ARCHIVE_DIR, STATUS_DIR, path.join(SURROGATE_DIR, 'worktrees')];
|
|
105
|
+
dirs.forEach(dir => {
|
|
106
|
+
if (!fs.existsSync(dir)) {
|
|
107
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
108
|
+
}
|
|
109
|
+
});
|
|
110
|
+
}
|
|
111
|
+
export function loadState() {
|
|
112
|
+
ensureDirectories();
|
|
113
|
+
if (!fs.existsSync(FLIGHT_RECORDER)) {
|
|
114
|
+
return createInitialState();
|
|
115
|
+
}
|
|
116
|
+
try {
|
|
117
|
+
const content = fs.readFileSync(FLIGHT_RECORDER, 'utf8');
|
|
118
|
+
return yaml.parse(content);
|
|
119
|
+
}
|
|
120
|
+
catch (error) {
|
|
121
|
+
// WS1-3: Don't silently reset - backup corrupt file
|
|
122
|
+
const backupPath = `${FLIGHT_RECORDER}.corrupt.${Date.now()}`;
|
|
123
|
+
console.error(`[State] YAML parse failed, backing up to ${backupPath}`);
|
|
124
|
+
try {
|
|
125
|
+
fs.copyFileSync(FLIGHT_RECORDER, backupPath);
|
|
126
|
+
}
|
|
127
|
+
catch {
|
|
128
|
+
// Ignore backup errors
|
|
129
|
+
}
|
|
130
|
+
console.error('[State] Creating fresh state after corruption');
|
|
131
|
+
return createInitialState();
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
export function saveState(state) {
|
|
135
|
+
ensureDirectories();
|
|
136
|
+
// Update metadata
|
|
137
|
+
state.meta.last_updated = new Date().toISOString();
|
|
138
|
+
state.meta.context_version++;
|
|
139
|
+
// Auto-archive if too many completed surrogates
|
|
140
|
+
archiveIfNeeded(state);
|
|
141
|
+
// Validate before saving
|
|
142
|
+
validateState(state);
|
|
143
|
+
// Atomic write (write to temp, then rename)
|
|
144
|
+
const tempPath = `${FLIGHT_RECORDER}.tmp`;
|
|
145
|
+
fs.writeFileSync(tempPath, yaml.stringify(state, { lineWidth: 0 }));
|
|
146
|
+
fs.renameSync(tempPath, FLIGHT_RECORDER);
|
|
147
|
+
}
|
|
148
|
+
/**
|
|
149
|
+
* Thread-safe state update with advisory lock (WS1-3).
|
|
150
|
+
* Prevents race conditions when multiple processes update state concurrently.
|
|
151
|
+
*/
|
|
152
|
+
export function updateState(updater) {
|
|
153
|
+
const lock = acquireLock();
|
|
154
|
+
if (!lock.acquired) {
|
|
155
|
+
throw new Error('Failed to acquire state lock - another process may be updating');
|
|
156
|
+
}
|
|
157
|
+
try {
|
|
158
|
+
const state = loadState();
|
|
159
|
+
updater(state);
|
|
160
|
+
saveState(state);
|
|
161
|
+
return state;
|
|
162
|
+
}
|
|
163
|
+
finally {
|
|
164
|
+
lock.release();
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
168
|
+
// SURROGATE OPERATIONS
|
|
169
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
170
|
+
export function addSurrogate(id, role, task, worktreePath, branch, fileLocks) {
|
|
171
|
+
// Normalize file locks to canonical format (WS1-2)
|
|
172
|
+
const normalizedLocks = canonicalLockPaths(fileLocks);
|
|
173
|
+
return updateState(state => {
|
|
174
|
+
// Add surrogate
|
|
175
|
+
const surrogate = {
|
|
176
|
+
id,
|
|
177
|
+
role,
|
|
178
|
+
task_description: task,
|
|
179
|
+
status: 'INITIALIZING',
|
|
180
|
+
worktree: {
|
|
181
|
+
path: worktreePath,
|
|
182
|
+
branch,
|
|
183
|
+
created: new Date().toISOString(),
|
|
184
|
+
},
|
|
185
|
+
file_locks: {
|
|
186
|
+
exclusive: normalizedLocks, // Store normalized paths
|
|
187
|
+
read_only: [],
|
|
188
|
+
},
|
|
189
|
+
dependencies: [],
|
|
190
|
+
test_gate: {
|
|
191
|
+
test_written: false,
|
|
192
|
+
test_failing: false,
|
|
193
|
+
},
|
|
194
|
+
started: new Date().toISOString(),
|
|
195
|
+
last_heartbeat: new Date().toISOString(),
|
|
196
|
+
};
|
|
197
|
+
state.active_surrogates.push(surrogate);
|
|
198
|
+
// Add file locks (using normalized paths)
|
|
199
|
+
normalizedLocks.forEach(file => {
|
|
200
|
+
state.file_locks[file] = id;
|
|
201
|
+
});
|
|
202
|
+
// Update statistics
|
|
203
|
+
state.statistics.total_spawned++;
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
export function updateSurrogateStatus(id, status) {
|
|
207
|
+
return updateState(state => {
|
|
208
|
+
const surrogate = state.active_surrogates.find(s => s.id === id);
|
|
209
|
+
if (surrogate) {
|
|
210
|
+
surrogate.status = status;
|
|
211
|
+
surrogate.last_heartbeat = new Date().toISOString();
|
|
212
|
+
}
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
export function completeSurrogateState(id, artifacts, prUrl) {
|
|
216
|
+
return updateState(state => {
|
|
217
|
+
const surrogateIndex = state.active_surrogates.findIndex(s => s.id === id);
|
|
218
|
+
if (surrogateIndex === -1) {
|
|
219
|
+
throw new Error(`Surrogate ${id} not found`);
|
|
220
|
+
}
|
|
221
|
+
const surrogate = state.active_surrogates[surrogateIndex];
|
|
222
|
+
// Create completed record
|
|
223
|
+
const completed = {
|
|
224
|
+
...surrogate,
|
|
225
|
+
status: 'COMPLETE',
|
|
226
|
+
completed_at: new Date().toISOString(),
|
|
227
|
+
pr_url: prUrl,
|
|
228
|
+
artifacts,
|
|
229
|
+
report_path: `.surrogate/reports/${id}-report.yaml`,
|
|
230
|
+
};
|
|
231
|
+
// Move to recent_completed
|
|
232
|
+
state.recent_completed.push(completed);
|
|
233
|
+
// Remove from active
|
|
234
|
+
state.active_surrogates.splice(surrogateIndex, 1);
|
|
235
|
+
// Release file locks
|
|
236
|
+
surrogate.file_locks.exclusive.forEach(file => {
|
|
237
|
+
delete state.file_locks[file];
|
|
238
|
+
});
|
|
239
|
+
// Update statistics
|
|
240
|
+
state.statistics.total_completed++;
|
|
241
|
+
// Cleanup agent status file (sharded state)
|
|
242
|
+
cleanupAgentStatusFile(id);
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
/**
|
|
246
|
+
* P2-1: Typed fail_surrogate with evidence requirement.
|
|
247
|
+
* Rejects failures without proper evidence.
|
|
248
|
+
*
|
|
249
|
+
* @param id - Surrogate ID
|
|
250
|
+
* @param failure - Typed failure with error_type, message, and evidence
|
|
251
|
+
* @throws Error if evidence is insufficient
|
|
252
|
+
*/
|
|
253
|
+
export function failSurrogateState(id, failure) {
|
|
254
|
+
// Validate evidence first
|
|
255
|
+
const validation = validateFailureEvidence(failure);
|
|
256
|
+
if (!validation.valid) {
|
|
257
|
+
throw new Error(`EVIDENCE_REQUIRED: ${validation.error}. Cannot fail surrogate without proper evidence.`);
|
|
258
|
+
}
|
|
259
|
+
return updateState(state => {
|
|
260
|
+
const surrogateIndex = state.active_surrogates.findIndex(s => s.id === id);
|
|
261
|
+
if (surrogateIndex === -1) {
|
|
262
|
+
throw new Error(`Surrogate ${id} not found`);
|
|
263
|
+
}
|
|
264
|
+
const surrogate = state.active_surrogates[surrogateIndex];
|
|
265
|
+
// Map error_type to appropriate status
|
|
266
|
+
let status = 'FAILED';
|
|
267
|
+
if (failure.error_type === 'TIMEOUT')
|
|
268
|
+
status = 'TIMEOUT';
|
|
269
|
+
else if (failure.error_type === 'MERGE_CONFLICT')
|
|
270
|
+
status = 'MERGE_CONFLICT';
|
|
271
|
+
else if (failure.error_type === 'AUTH' || failure.error_type === 'TOOLING_MISSING')
|
|
272
|
+
status = 'CONFIG_ERROR';
|
|
273
|
+
// Create failed record
|
|
274
|
+
const failed = {
|
|
275
|
+
...surrogate,
|
|
276
|
+
status,
|
|
277
|
+
completed_at: new Date().toISOString(),
|
|
278
|
+
artifacts: { files_created: [], files_modified: [], tests_added: [] },
|
|
279
|
+
report_path: `.surrogate/reports/${id}-failed.yaml`,
|
|
280
|
+
error_details: failure.message,
|
|
281
|
+
};
|
|
282
|
+
// Write structured failure report (P2-1: includes all evidence)
|
|
283
|
+
const reportPath = path.join(SURROGATE_DIR, 'reports', `${id}-failed.yaml`);
|
|
284
|
+
fs.mkdirSync(path.dirname(reportPath), { recursive: true });
|
|
285
|
+
fs.writeFileSync(reportPath, yaml.stringify({
|
|
286
|
+
id,
|
|
287
|
+
error_type: failure.error_type,
|
|
288
|
+
message: failure.message,
|
|
289
|
+
evidence: failure.evidence,
|
|
290
|
+
failed_at: new Date().toISOString(),
|
|
291
|
+
status,
|
|
292
|
+
}));
|
|
293
|
+
// Move to recent_completed
|
|
294
|
+
state.recent_completed.push(failed);
|
|
295
|
+
// Remove from active
|
|
296
|
+
state.active_surrogates.splice(surrogateIndex, 1);
|
|
297
|
+
// Release file locks
|
|
298
|
+
surrogate.file_locks.exclusive.forEach(file => {
|
|
299
|
+
delete state.file_locks[file];
|
|
300
|
+
});
|
|
301
|
+
// Update statistics
|
|
302
|
+
state.statistics.total_failed++;
|
|
303
|
+
// Cleanup agent status file (sharded state)
|
|
304
|
+
cleanupAgentStatusFile(id);
|
|
305
|
+
});
|
|
306
|
+
}
|
|
307
|
+
/**
|
|
308
|
+
* Legacy fail_surrogate for backward compatibility.
|
|
309
|
+
* Deprecated: Use failSurrogateState with TypedFailure instead.
|
|
310
|
+
*/
|
|
311
|
+
export function failSurrogateStateLegacy(id, reason) {
|
|
312
|
+
// Convert legacy reason to typed failure with minimal evidence
|
|
313
|
+
const failure = {
|
|
314
|
+
error_type: 'TASK_IMPOSSIBLE',
|
|
315
|
+
message: reason,
|
|
316
|
+
evidence: {
|
|
317
|
+
log_path: `.surrogate/logs/${id}.log`,
|
|
318
|
+
},
|
|
319
|
+
};
|
|
320
|
+
return failSurrogateState(id, failure);
|
|
321
|
+
}
|
|
322
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
323
|
+
// LOCK OPERATIONS
|
|
324
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
325
|
+
/**
|
|
326
|
+
* Canonical lock path normalization (WS1-2).
|
|
327
|
+
* Ensures consistent path format regardless of input style:
|
|
328
|
+
* - Converts backslashes to forward slashes (Windows compatibility)
|
|
329
|
+
* - Removes leading ./ or .\
|
|
330
|
+
* - Removes trailing slashes
|
|
331
|
+
* - Normalizes to lowercase on Windows
|
|
332
|
+
*/
|
|
333
|
+
export function canonicalLockPath(filePath) {
|
|
334
|
+
let normalized = filePath
|
|
335
|
+
.replace(/\\/g, '/') // Backslash -> forward slash
|
|
336
|
+
.replace(/^\.\//, '') // Remove leading ./
|
|
337
|
+
.replace(/^\.\\/, '') // Remove leading .\
|
|
338
|
+
.replace(/\/+$/, '') // Remove trailing slashes
|
|
339
|
+
.replace(/\/+/g, '/'); // Collapse multiple slashes
|
|
340
|
+
// On Windows, normalize to lowercase for case-insensitive matching
|
|
341
|
+
if (process.platform === 'win32') {
|
|
342
|
+
normalized = normalized.toLowerCase();
|
|
343
|
+
}
|
|
344
|
+
return normalized;
|
|
345
|
+
}
|
|
346
|
+
/**
|
|
347
|
+
* Normalize an array of file paths to canonical lock format.
|
|
348
|
+
*/
|
|
349
|
+
export function canonicalLockPaths(files) {
|
|
350
|
+
return files.map(canonicalLockPath);
|
|
351
|
+
}
|
|
352
|
+
export function checkLockConflict(files) {
|
|
353
|
+
const state = loadState();
|
|
354
|
+
const conflicts = [];
|
|
355
|
+
// Normalize incoming files for comparison
|
|
356
|
+
const normalizedFiles = canonicalLockPaths(files);
|
|
357
|
+
normalizedFiles.forEach((file, index) => {
|
|
358
|
+
if (state.file_locks[file]) {
|
|
359
|
+
conflicts.push({ file: files[index], owner: state.file_locks[file] });
|
|
360
|
+
}
|
|
361
|
+
});
|
|
362
|
+
return { hasConflict: conflicts.length > 0, conflicts };
|
|
363
|
+
}
|
|
364
|
+
export function getLockedFiles() {
|
|
365
|
+
return loadState().file_locks;
|
|
366
|
+
}
|
|
367
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
368
|
+
// ARCHIVING
|
|
369
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
370
|
+
function archiveIfNeeded(state) {
|
|
371
|
+
const MAX_RECENT = 5;
|
|
372
|
+
const MAX_TOKENS_ESTIMATE = 2000;
|
|
373
|
+
// Archive if too many recent completed
|
|
374
|
+
while (state.recent_completed.length > MAX_RECENT) {
|
|
375
|
+
const oldest = state.recent_completed.shift();
|
|
376
|
+
if (oldest) {
|
|
377
|
+
appendToArchive(oldest);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
// Rough token estimate (chars / 4)
|
|
381
|
+
const stateSize = yaml.stringify(state).length / 4;
|
|
382
|
+
if (stateSize > MAX_TOKENS_ESTIMATE && state.recent_completed.length > 1) {
|
|
383
|
+
const oldest = state.recent_completed.shift();
|
|
384
|
+
if (oldest) {
|
|
385
|
+
appendToArchive(oldest);
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
function appendToArchive(surrogate) {
|
|
390
|
+
const date = new Date().toISOString().split('T')[0];
|
|
391
|
+
const archivePath = path.join(ARCHIVE_DIR, `history-${date}.yaml`);
|
|
392
|
+
let archive = [];
|
|
393
|
+
if (fs.existsSync(archivePath)) {
|
|
394
|
+
archive = yaml.parse(fs.readFileSync(archivePath, 'utf8')) || [];
|
|
395
|
+
}
|
|
396
|
+
archive.push(surrogate);
|
|
397
|
+
fs.writeFileSync(archivePath, yaml.stringify(archive));
|
|
398
|
+
}
|
|
399
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
400
|
+
// VALIDATION
|
|
401
|
+
// ═══════════════════════════════════════════════════════════════════════════
|
|
402
|
+
function validateState(state) {
|
|
403
|
+
// Check for duplicate surrogate IDs
|
|
404
|
+
const ids = state.active_surrogates.map(s => s.id);
|
|
405
|
+
const duplicates = ids.filter((id, index) => ids.indexOf(id) !== index);
|
|
406
|
+
if (duplicates.length > 0) {
|
|
407
|
+
throw new Error(`Duplicate surrogate IDs detected: ${duplicates.join(', ')}`);
|
|
408
|
+
}
|
|
409
|
+
// Check for orphaned locks
|
|
410
|
+
const activeIds = new Set(state.active_surrogates.map(s => s.id));
|
|
411
|
+
Object.entries(state.file_locks).forEach(([file, owner]) => {
|
|
412
|
+
if (!activeIds.has(owner)) {
|
|
413
|
+
console.warn(`Warning: Orphaned lock on ${file} by non-existent surrogate ${owner}`);
|
|
414
|
+
delete state.file_locks[file];
|
|
415
|
+
}
|
|
416
|
+
});
|
|
417
|
+
}
|
|
418
|
+
function createInitialState() {
|
|
419
|
+
const state = {
|
|
420
|
+
meta: {
|
|
421
|
+
project: path.basename(process.cwd()),
|
|
422
|
+
initialized: new Date().toISOString(),
|
|
423
|
+
last_updated: new Date().toISOString(),
|
|
424
|
+
context_version: 1,
|
|
425
|
+
},
|
|
426
|
+
mission: {
|
|
427
|
+
objective: null,
|
|
428
|
+
decomposition: [],
|
|
429
|
+
},
|
|
430
|
+
active_surrogates: [],
|
|
431
|
+
recent_completed: [],
|
|
432
|
+
file_locks: {},
|
|
433
|
+
constraints: {
|
|
434
|
+
forbidden_paths: ['node_modules/*', '.env*', '.git/*', '.surrogate/flight_recorder.yaml'],
|
|
435
|
+
protected_paths: ['package.json', 'package-lock.json', 'yarn.lock', 'pnpm-lock.yaml'],
|
|
436
|
+
required_patterns: [],
|
|
437
|
+
},
|
|
438
|
+
config: {
|
|
439
|
+
heartbeat_ttl_minutes: 5, // Auto-kill after 5 min silence
|
|
440
|
+
scoped_tests: true, // Use --findRelatedTests by default
|
|
441
|
+
cleanup_on_complete: true, // Clean worktree after successful merge
|
|
442
|
+
},
|
|
443
|
+
statistics: {
|
|
444
|
+
total_spawned: 0,
|
|
445
|
+
total_completed: 0,
|
|
446
|
+
total_failed: 0,
|
|
447
|
+
total_merged: 0,
|
|
448
|
+
},
|
|
449
|
+
};
|
|
450
|
+
saveState(state);
|
|
451
|
+
return state;
|
|
452
|
+
}
|
|
453
|
+
/**
|
|
454
|
+
* Read all agent status files from .surrogate/status/
|
|
455
|
+
* Each agent writes only to its own file (no race condition)
|
|
456
|
+
*/
|
|
457
|
+
function readAgentStatusFiles() {
|
|
458
|
+
const statusMap = new Map();
|
|
459
|
+
ensureDirectories();
|
|
460
|
+
if (!fs.existsSync(STATUS_DIR)) {
|
|
461
|
+
return statusMap;
|
|
462
|
+
}
|
|
463
|
+
try {
|
|
464
|
+
const files = fs.readdirSync(STATUS_DIR).filter(f => f.endsWith('.json'));
|
|
465
|
+
for (const file of files) {
|
|
466
|
+
try {
|
|
467
|
+
const content = fs.readFileSync(path.join(STATUS_DIR, file), 'utf8');
|
|
468
|
+
const status = JSON.parse(content);
|
|
469
|
+
statusMap.set(status.surrogate_id, status);
|
|
470
|
+
}
|
|
471
|
+
catch {
|
|
472
|
+
// Skip corrupted files
|
|
473
|
+
console.error(`[State] Failed to read status file: ${file}`);
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
catch {
|
|
478
|
+
// Directory read error - return empty
|
|
479
|
+
}
|
|
480
|
+
return statusMap;
|
|
481
|
+
}
|
|
482
|
+
/**
|
|
483
|
+
* Clean up status file when surrogate completes or fails
|
|
484
|
+
*/
|
|
485
|
+
export function cleanupAgentStatusFile(surrogateId) {
|
|
486
|
+
const statusFile = path.join(STATUS_DIR, `${surrogateId}.json`);
|
|
487
|
+
try {
|
|
488
|
+
if (fs.existsSync(statusFile)) {
|
|
489
|
+
fs.unlinkSync(statusFile);
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
catch {
|
|
493
|
+
console.error(`[State] Failed to cleanup status file for ${surrogateId}`);
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
export function getStatusSummary() {
|
|
497
|
+
const state = loadState();
|
|
498
|
+
const agentStatuses = readAgentStatusFiles(); // Real-time from agents
|
|
499
|
+
const now = Date.now();
|
|
500
|
+
const ttlMs = (state.config?.heartbeat_ttl_minutes || 5) * 60 * 1000;
|
|
501
|
+
// Merge YAML state with real-time agent status
|
|
502
|
+
const mergedSurrogates = state.active_surrogates.map(s => {
|
|
503
|
+
const agentStatus = agentStatuses.get(s.id);
|
|
504
|
+
// If agent has written a status file, use its real-time data
|
|
505
|
+
if (agentStatus) {
|
|
506
|
+
return {
|
|
507
|
+
...s,
|
|
508
|
+
status: agentStatus.status,
|
|
509
|
+
last_heartbeat: agentStatus.last_heartbeat,
|
|
510
|
+
};
|
|
511
|
+
}
|
|
512
|
+
return s;
|
|
513
|
+
});
|
|
514
|
+
// Detect stale surrogates (no heartbeat within TTL)
|
|
515
|
+
// Uses MERGED data (real-time from agent files)
|
|
516
|
+
const staleSurrogates = mergedSurrogates
|
|
517
|
+
.filter(s => {
|
|
518
|
+
const lastHeartbeat = new Date(s.last_heartbeat).getTime();
|
|
519
|
+
return (now - lastHeartbeat) > ttlMs;
|
|
520
|
+
})
|
|
521
|
+
.map(s => s.id);
|
|
522
|
+
return {
|
|
523
|
+
mission: state.mission.objective,
|
|
524
|
+
context_version: state.meta.context_version,
|
|
525
|
+
active_count: mergedSurrogates.length,
|
|
526
|
+
active_surrogates: mergedSurrogates.map(s => ({
|
|
527
|
+
id: s.id,
|
|
528
|
+
role: s.role,
|
|
529
|
+
status: s.status,
|
|
530
|
+
task: s.task_description,
|
|
531
|
+
locks: s.file_locks.exclusive,
|
|
532
|
+
})),
|
|
533
|
+
locked_files: Object.keys(state.file_locks),
|
|
534
|
+
recent_completed_count: state.recent_completed.length,
|
|
535
|
+
statistics: state.statistics,
|
|
536
|
+
stale_surrogates: staleSurrogates, // Surrogates that need cleanup
|
|
537
|
+
};
|
|
538
|
+
}
|
|
539
|
+
/**
|
|
540
|
+
* Emergency force release locks for a surrogate (WS3-3).
|
|
541
|
+
* WARNING: This is a destructive operation - does not properly clean up state.
|
|
542
|
+
*/
|
|
543
|
+
export function forceReleaseLocks(surrogateId) {
|
|
544
|
+
const released = [];
|
|
545
|
+
const errors = [];
|
|
546
|
+
try {
|
|
547
|
+
updateState(state => {
|
|
548
|
+
const surrogate = state.active_surrogates.find(s => s.id === surrogateId);
|
|
549
|
+
if (!surrogate) {
|
|
550
|
+
errors.push(`Surrogate '${surrogateId}' not found in active surrogates`);
|
|
551
|
+
return;
|
|
552
|
+
}
|
|
553
|
+
// Release all file locks owned by this surrogate
|
|
554
|
+
for (const [file, owner] of Object.entries(state.file_locks)) {
|
|
555
|
+
if (owner === surrogateId) {
|
|
556
|
+
delete state.file_locks[file];
|
|
557
|
+
released.push(file);
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
// Clear the surrogate's lock list
|
|
561
|
+
surrogate.file_locks.exclusive = [];
|
|
562
|
+
surrogate.file_locks.read_only = [];
|
|
563
|
+
});
|
|
564
|
+
}
|
|
565
|
+
catch (error) {
|
|
566
|
+
errors.push(error instanceof Error ? error.message : String(error));
|
|
567
|
+
}
|
|
568
|
+
return { released, errors };
|
|
569
|
+
}
|
|
570
|
+
// Check if file is protected (requires explicit lock acknowledgment)
|
|
571
|
+
export function checkProtectedFiles(files) {
|
|
572
|
+
const state = loadState();
|
|
573
|
+
const protectedPaths = state.constraints?.protected_paths || [];
|
|
574
|
+
const protectedFiles = files.filter(file => protectedPaths.some(p => file === p || file.endsWith(`/${p}`)));
|
|
575
|
+
return { hasProtected: protectedFiles.length > 0, protected: protectedFiles };
|
|
576
|
+
}
|
|
577
|
+
/**
|
|
578
|
+
* Check if a process is alive by PID.
|
|
579
|
+
*/
|
|
580
|
+
function isProcessAlive(pid) {
|
|
581
|
+
try {
|
|
582
|
+
// On Unix, signal 0 checks if process exists without killing it
|
|
583
|
+
// On Windows, this throws if process doesn't exist
|
|
584
|
+
process.kill(pid, 0);
|
|
585
|
+
return true;
|
|
586
|
+
}
|
|
587
|
+
catch {
|
|
588
|
+
return false;
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
/**
|
|
592
|
+
* Read result marker from a worktree.
|
|
593
|
+
*/
|
|
594
|
+
function readResultMarkerFromWorktree(worktreePath) {
|
|
595
|
+
const markerPath = path.join(worktreePath, '.surrogate-result.json');
|
|
596
|
+
if (!fs.existsSync(markerPath)) {
|
|
597
|
+
return null;
|
|
598
|
+
}
|
|
599
|
+
try {
|
|
600
|
+
const content = fs.readFileSync(markerPath, 'utf8');
|
|
601
|
+
return JSON.parse(content);
|
|
602
|
+
}
|
|
603
|
+
catch {
|
|
604
|
+
return null;
|
|
605
|
+
}
|
|
606
|
+
}
|
|
607
|
+
/**
|
|
608
|
+
* Scan for orphaned worktrees (exist but not in active_surrogates).
|
|
609
|
+
*/
|
|
610
|
+
function findOrphanedWorktrees(state) {
|
|
611
|
+
const worktreesDir = path.join(SURROGATE_DIR, 'worktrees');
|
|
612
|
+
if (!fs.existsSync(worktreesDir)) {
|
|
613
|
+
return [];
|
|
614
|
+
}
|
|
615
|
+
const activeIds = new Set(state.active_surrogates.map(s => s.id));
|
|
616
|
+
const orphaned = [];
|
|
617
|
+
try {
|
|
618
|
+
const entries = fs.readdirSync(worktreesDir, { withFileTypes: true });
|
|
619
|
+
for (const entry of entries) {
|
|
620
|
+
if (entry.isDirectory() && !activeIds.has(entry.name)) {
|
|
621
|
+
orphaned.push(entry.name);
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
}
|
|
625
|
+
catch {
|
|
626
|
+
// Ignore read errors
|
|
627
|
+
}
|
|
628
|
+
return orphaned;
|
|
629
|
+
}
|
|
630
|
+
/**
|
|
631
|
+
* Scan for orphaned status files (exist but surrogate not in active list).
|
|
632
|
+
*/
|
|
633
|
+
function findOrphanedStatusFiles(state) {
|
|
634
|
+
if (!fs.existsSync(STATUS_DIR)) {
|
|
635
|
+
return [];
|
|
636
|
+
}
|
|
637
|
+
const activeIds = new Set(state.active_surrogates.map(s => s.id));
|
|
638
|
+
const orphaned = [];
|
|
639
|
+
try {
|
|
640
|
+
const files = fs.readdirSync(STATUS_DIR).filter(f => f.endsWith('.json'));
|
|
641
|
+
for (const file of files) {
|
|
642
|
+
const id = file.replace('.json', '');
|
|
643
|
+
if (!activeIds.has(id)) {
|
|
644
|
+
orphaned.push(id);
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
catch {
|
|
649
|
+
// Ignore read errors
|
|
650
|
+
}
|
|
651
|
+
return orphaned;
|
|
652
|
+
}
|
|
653
|
+
/**
|
|
654
|
+
* Reconcile a single surrogate's state against reality.
|
|
655
|
+
*/
|
|
656
|
+
function reconcileSurrogate(surrogate, dryRun) {
|
|
657
|
+
const check = {
|
|
658
|
+
id: surrogate.id,
|
|
659
|
+
status_in_yaml: surrogate.status,
|
|
660
|
+
actual_status: 'RUNNING',
|
|
661
|
+
has_result_marker: false,
|
|
662
|
+
has_status_file: false,
|
|
663
|
+
process_alive: false,
|
|
664
|
+
worktree_exists: false,
|
|
665
|
+
};
|
|
666
|
+
// Check if worktree exists
|
|
667
|
+
const worktreePath = surrogate.worktree.path;
|
|
668
|
+
check.worktree_exists = fs.existsSync(worktreePath);
|
|
669
|
+
// Check for result marker in worktree
|
|
670
|
+
if (check.worktree_exists) {
|
|
671
|
+
const marker = readResultMarkerFromWorktree(worktreePath);
|
|
672
|
+
if (marker) {
|
|
673
|
+
check.has_result_marker = true;
|
|
674
|
+
check.marker_data = {
|
|
675
|
+
success: marker.success,
|
|
676
|
+
error_type: marker.error_type,
|
|
677
|
+
error_message: marker.error_message,
|
|
678
|
+
duration_ms: marker.duration_ms,
|
|
679
|
+
};
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
// Check for status file
|
|
683
|
+
const statusFile = path.join(STATUS_DIR, `${surrogate.id}.json`);
|
|
684
|
+
check.has_status_file = fs.existsSync(statusFile);
|
|
685
|
+
// Check if process is alive (if we have a PID)
|
|
686
|
+
const pid = surrogate.agent_pid;
|
|
687
|
+
if (pid) {
|
|
688
|
+
check.process_alive = isProcessAlive(pid);
|
|
689
|
+
}
|
|
690
|
+
// Determine actual status
|
|
691
|
+
if (check.has_result_marker) {
|
|
692
|
+
// Agent wrote a result marker - it's done
|
|
693
|
+
check.actual_status = check.marker_data?.success ? 'COMPLETED' : 'FAILED';
|
|
694
|
+
check.issue = `Result marker exists (success=${check.marker_data?.success}) but surrogate still in active list`;
|
|
695
|
+
}
|
|
696
|
+
else if (!check.worktree_exists) {
|
|
697
|
+
// Worktree is gone but surrogate is still active
|
|
698
|
+
check.actual_status = 'ORPHANED';
|
|
699
|
+
check.issue = 'Worktree does not exist but surrogate is in active list';
|
|
700
|
+
}
|
|
701
|
+
else if (pid && !check.process_alive) {
|
|
702
|
+
// Process died without writing a marker
|
|
703
|
+
check.actual_status = 'ZOMBIE';
|
|
704
|
+
check.issue = `Process ${pid} is not running and no result marker was written`;
|
|
705
|
+
}
|
|
706
|
+
else {
|
|
707
|
+
// Check for stale heartbeat
|
|
708
|
+
const ttlMs = 5 * 60 * 1000; // 5 minutes
|
|
709
|
+
const lastHeartbeat = new Date(surrogate.last_heartbeat).getTime();
|
|
710
|
+
const now = Date.now();
|
|
711
|
+
if ((now - lastHeartbeat) > ttlMs) {
|
|
712
|
+
check.actual_status = 'STALE';
|
|
713
|
+
check.issue = `No heartbeat for ${Math.round((now - lastHeartbeat) / 60000)} minutes`;
|
|
714
|
+
}
|
|
715
|
+
else {
|
|
716
|
+
check.actual_status = 'RUNNING';
|
|
717
|
+
}
|
|
718
|
+
}
|
|
719
|
+
return check;
|
|
720
|
+
}
|
|
721
|
+
/**
|
|
722
|
+
* P2-2: Main reconcile function.
|
|
723
|
+
* Scans all surrogates and corrects state based on evidence from markers and reports.
|
|
724
|
+
*
|
|
725
|
+
* @param dryRun - If true, don't modify state, just report what would be done
|
|
726
|
+
* @returns ReconcileResult with all findings and actions taken
|
|
727
|
+
*/
|
|
728
|
+
export function reconcileState(dryRun = false) {
|
|
729
|
+
const result = {
|
|
730
|
+
timestamp: new Date().toISOString(),
|
|
731
|
+
checks: [],
|
|
732
|
+
orphaned_status_files: [],
|
|
733
|
+
orphaned_worktrees: [],
|
|
734
|
+
orphaned_locks: [],
|
|
735
|
+
actions_taken: [],
|
|
736
|
+
state_was_corrected: false,
|
|
737
|
+
summary: '',
|
|
738
|
+
};
|
|
739
|
+
const state = loadState();
|
|
740
|
+
// 1. Check each active surrogate
|
|
741
|
+
for (const surrogate of state.active_surrogates) {
|
|
742
|
+
const check = reconcileSurrogate(surrogate, dryRun);
|
|
743
|
+
result.checks.push(check);
|
|
744
|
+
}
|
|
745
|
+
// 2. Find orphaned status files
|
|
746
|
+
result.orphaned_status_files = findOrphanedStatusFiles(state);
|
|
747
|
+
// 3. Find orphaned worktrees
|
|
748
|
+
result.orphaned_worktrees = findOrphanedWorktrees(state);
|
|
749
|
+
// 4. Find orphaned locks (locks without active surrogate)
|
|
750
|
+
const activeIds = new Set(state.active_surrogates.map(s => s.id));
|
|
751
|
+
for (const [file, owner] of Object.entries(state.file_locks)) {
|
|
752
|
+
if (!activeIds.has(owner)) {
|
|
753
|
+
result.orphaned_locks.push({ file, owner });
|
|
754
|
+
}
|
|
755
|
+
}
|
|
756
|
+
// 5. Take corrective actions (unless dry run)
|
|
757
|
+
if (!dryRun) {
|
|
758
|
+
// Process surrogates that need correction
|
|
759
|
+
for (const check of result.checks) {
|
|
760
|
+
if (check.actual_status === 'COMPLETED' && check.marker_data?.success) {
|
|
761
|
+
// Agent completed successfully - mark as complete
|
|
762
|
+
try {
|
|
763
|
+
completeSurrogateState(check.id, {
|
|
764
|
+
files_created: [],
|
|
765
|
+
files_modified: [],
|
|
766
|
+
tests_added: [],
|
|
767
|
+
});
|
|
768
|
+
check.action_taken = 'Marked as COMPLETE based on result marker';
|
|
769
|
+
result.actions_taken.push(`Completed ${check.id}`);
|
|
770
|
+
result.state_was_corrected = true;
|
|
771
|
+
}
|
|
772
|
+
catch (error) {
|
|
773
|
+
check.action_taken = `Failed to complete: ${error}`;
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
else if (check.actual_status === 'FAILED' && check.has_result_marker) {
|
|
777
|
+
// Agent failed - mark as failed with evidence from marker
|
|
778
|
+
try {
|
|
779
|
+
const errorType = mapMarkerErrorTypeToFailureType(check.marker_data?.error_type);
|
|
780
|
+
failSurrogateState(check.id, {
|
|
781
|
+
error_type: errorType,
|
|
782
|
+
message: check.marker_data?.error_message || 'Agent failed (from reconcile)',
|
|
783
|
+
evidence: {
|
|
784
|
+
log_path: `.surrogate/logs/${check.id}.log`,
|
|
785
|
+
},
|
|
786
|
+
});
|
|
787
|
+
check.action_taken = 'Marked as FAILED based on result marker';
|
|
788
|
+
result.actions_taken.push(`Failed ${check.id} (${errorType})`);
|
|
789
|
+
result.state_was_corrected = true;
|
|
790
|
+
}
|
|
791
|
+
catch (error) {
|
|
792
|
+
check.action_taken = `Failed to mark as failed: ${error}`;
|
|
793
|
+
}
|
|
794
|
+
}
|
|
795
|
+
else if (check.actual_status === 'ZOMBIE' || check.actual_status === 'ORPHANED') {
|
|
796
|
+
// Process died or worktree gone - mark as failed
|
|
797
|
+
try {
|
|
798
|
+
failSurrogateState(check.id, {
|
|
799
|
+
error_type: 'RUNTIME_ERROR',
|
|
800
|
+
message: check.issue || `Surrogate ${check.actual_status.toLowerCase()} - detected by reconcile`,
|
|
801
|
+
evidence: {
|
|
802
|
+
log_path: `.surrogate/logs/${check.id}.log`,
|
|
803
|
+
},
|
|
804
|
+
});
|
|
805
|
+
check.action_taken = `Marked as FAILED (${check.actual_status})`;
|
|
806
|
+
result.actions_taken.push(`Failed ${check.id} (${check.actual_status})`);
|
|
807
|
+
result.state_was_corrected = true;
|
|
808
|
+
}
|
|
809
|
+
catch (error) {
|
|
810
|
+
check.action_taken = `Failed to mark as failed: ${error}`;
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
// Clean up orphaned status files
|
|
815
|
+
for (const id of result.orphaned_status_files) {
|
|
816
|
+
try {
|
|
817
|
+
const statusFile = path.join(STATUS_DIR, `${id}.json`);
|
|
818
|
+
fs.unlinkSync(statusFile);
|
|
819
|
+
result.actions_taken.push(`Deleted orphaned status file: ${id}`);
|
|
820
|
+
result.state_was_corrected = true;
|
|
821
|
+
}
|
|
822
|
+
catch {
|
|
823
|
+
// Ignore cleanup errors
|
|
824
|
+
}
|
|
825
|
+
}
|
|
826
|
+
// Clean up orphaned locks
|
|
827
|
+
if (result.orphaned_locks.length > 0) {
|
|
828
|
+
updateState(s => {
|
|
829
|
+
for (const lock of result.orphaned_locks) {
|
|
830
|
+
delete s.file_locks[lock.file];
|
|
831
|
+
}
|
|
832
|
+
});
|
|
833
|
+
result.actions_taken.push(`Released ${result.orphaned_locks.length} orphaned locks`);
|
|
834
|
+
result.state_was_corrected = true;
|
|
835
|
+
}
|
|
836
|
+
}
|
|
837
|
+
// Generate summary
|
|
838
|
+
const issueCount = result.checks.filter(c => c.actual_status !== 'RUNNING').length;
|
|
839
|
+
const orphanCount = result.orphaned_status_files.length +
|
|
840
|
+
result.orphaned_worktrees.length +
|
|
841
|
+
result.orphaned_locks.length;
|
|
842
|
+
if (issueCount === 0 && orphanCount === 0) {
|
|
843
|
+
result.summary = 'State is consistent - no issues found';
|
|
844
|
+
}
|
|
845
|
+
else if (dryRun) {
|
|
846
|
+
result.summary = `Found ${issueCount} surrogate issues, ${orphanCount} orphaned resources (dry run - no changes made)`;
|
|
847
|
+
}
|
|
848
|
+
else {
|
|
849
|
+
result.summary = `Corrected ${result.actions_taken.length} issues`;
|
|
850
|
+
}
|
|
851
|
+
return result;
|
|
852
|
+
}
|
|
853
|
+
/**
|
|
854
|
+
* Map marker error types to TypedFailure error types.
|
|
855
|
+
*/
|
|
856
|
+
function mapMarkerErrorTypeToFailureType(markerType) {
|
|
857
|
+
switch (markerType) {
|
|
858
|
+
case 'TIMEOUT': return 'TIMEOUT';
|
|
859
|
+
case 'CONFIG_ERROR': return 'AUTH';
|
|
860
|
+
case 'SPAWN_FAILED': return 'TOOLING_MISSING';
|
|
861
|
+
case 'EXECUTION_ERROR': return 'RUNTIME_ERROR';
|
|
862
|
+
default: return 'RUNTIME_ERROR';
|
|
863
|
+
}
|
|
864
|
+
}
|
|
865
|
+
//# sourceMappingURL=state.js.map
|