principles-disciple 1.8.2 → 1.8.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/openclaw.plugin.json +4 -4
- package/package.json +1 -1
- package/templates/langs/en/skills/ai-sprint-orchestration/EXAMPLES.md +63 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/REFERENCE.md +136 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/SKILL.md +67 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/references/agent-registry.json +214 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/references/specs/bugfix-complex-template.json +107 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/references/specs/feature-complex-template.json +107 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/references/specs/workflow-validation-minimal-verify.json +105 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/references/specs/workflow-validation-minimal.json +108 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/references/workflow-v1-acceptance-checklist.md +58 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/references/workflow-v1.4-work-unit-handoff.md +190 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/runtime/.gitignore +2 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/scripts/lib/archive.mjs +310 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/scripts/lib/contract-enforcement.mjs +683 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/scripts/lib/decision.mjs +604 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/scripts/lib/state-store.mjs +32 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/scripts/lib/task-specs.mjs +707 -0
- package/templates/langs/en/skills/ai-sprint-orchestration/scripts/run.mjs +3419 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/EXAMPLES.md +63 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/REFERENCE.md +136 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/SKILL.md +67 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/references/agent-registry.json +214 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/references/specs/bugfix-complex-template.json +107 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/references/specs/feature-complex-template.json +107 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/references/specs/workflow-validation-minimal-verify.json +105 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/references/specs/workflow-validation-minimal.json +108 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/references/workflow-v1-acceptance-checklist.md +58 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/references/workflow-v1.4-work-unit-handoff.md +190 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/runtime/.gitignore +2 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/scripts/lib/archive.mjs +310 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/scripts/lib/contract-enforcement.mjs +683 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/scripts/lib/decision.mjs +604 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/scripts/lib/state-store.mjs +32 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/scripts/lib/task-specs.mjs +707 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/scripts/run.mjs +3419 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/test/archive.test.mjs +230 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/test/contract-enforcement.test.mjs +672 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/test/decision.test.mjs +1321 -0
- package/templates/langs/zh/skills/ai-sprint-orchestration/test/run.test.mjs +1419 -0
|
@@ -0,0 +1,1419 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* run.mjs git/worktree/merge-gate tests.
|
|
3
|
+
*
|
|
4
|
+
* Tests P1-1 (merge gate targetBranch vs worktree.branchName), P1-2 (worktree legal git params),
|
|
5
|
+
* and P2 (cleanupWorktree correct cwd) via source analysis.
|
|
6
|
+
*
|
|
7
|
+
* Run: node --test test/run.test.mjs
|
|
8
|
+
*/
|
|
9
|
+
import test from 'node:test';
|
|
10
|
+
import assert from 'node:assert/strict';
|
|
11
|
+
import fs from 'fs';
|
|
12
|
+
import path from 'path';
|
|
13
|
+
import os from 'os';
|
|
14
|
+
import { fileURLToPath } from 'url';
|
|
15
|
+
import { decideAndPersist, formatRoleValidation, getIsolationDir, findIsolationReport, collectIsolationArtifacts, isIsolationCollectAllowed, ISOLATION_COLLECT_ALLOWLIST } from '../scripts/run.mjs';
|
|
16
|
+
import { getTaskSpec, buildStageBrief } from '../scripts/lib/task-specs.mjs';
|
|
17
|
+
|
|
18
|
+
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
19
|
+
const runMjsPath = path.join(__dirname, '..', 'scripts', 'run.mjs');
|
|
20
|
+
const archiveMjsPath = path.join(__dirname, '..', 'scripts', 'lib', 'archive.mjs');
|
|
21
|
+
const SOURCE = fs.readFileSync(runMjsPath, 'utf8');
|
|
22
|
+
const ARCHIVE_SOURCE = fs.readFileSync(archiveMjsPath, 'utf8');
|
|
23
|
+
|
|
24
|
+
function getFuncBody(funcName) {
|
|
25
|
+
const start = SOURCE.indexOf(`function ${funcName}`);
|
|
26
|
+
if (start === -1) throw new Error(`Function ${funcName} not found`);
|
|
27
|
+
const end = SOURCE.indexOf('\nfunction ', start + 1);
|
|
28
|
+
return SOURCE.slice(start, end === -1 ? SOURCE.length : end);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
function getArchiveFuncBody(funcName) {
|
|
32
|
+
const start = ARCHIVE_SOURCE.indexOf(`function ${funcName}`);
|
|
33
|
+
if (start === -1) throw new Error(`Archive function ${funcName} not found`);
|
|
34
|
+
const end = ARCHIVE_SOURCE.indexOf('\nfunction ', start + 1);
|
|
35
|
+
return ARCHIVE_SOURCE.slice(start, end === -1 ? ARCHIVE_SOURCE.length : end);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// ---------------------------------------------------------------------------
|
|
39
|
+
// P1-1: merge gate — targetBranch (spec.branch) vs worktree.branchName
|
|
40
|
+
// ---------------------------------------------------------------------------
|
|
41
|
+
|
|
42
|
+
test('P1-1: merge gate does NOT compare against origin/HEAD', () => {
|
|
43
|
+
const body = getFuncBody('runMergeGateCheck');
|
|
44
|
+
const hasOriginHead = /rev-parse.*`\$\{remote\}\/HEAD`|rev-parse.*origin\/HEAD/.test(body);
|
|
45
|
+
assert.equal(hasOriginHead, false,
|
|
46
|
+
'merge gate must NOT compare against origin/HEAD');
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
test('P1-1: merge gate uses targetBranch from spec, NOT worktree.branchName', () => {
|
|
50
|
+
const body = getFuncBody('runMergeGateCheck');
|
|
51
|
+
|
|
52
|
+
// targetBranch must be derived from spec.branch, never worktree.branchName
|
|
53
|
+
// worktree.branchName = sprint/<runId>/<stage> (internal, never pushed to remote)
|
|
54
|
+
// targetBranch = spec.branch ?? 'main' (the real PR branch)
|
|
55
|
+
assert.ok(/targetBranch/.test(body), 'must use targetBranch variable');
|
|
56
|
+
|
|
57
|
+
// Verify: targetBranch = spec.branch ?? 'main' (NO worktree.branchName fallback)
|
|
58
|
+
const hasSpecBranchOnly = /targetBranch\s*=\s*spec\.branch\s*\?\?\s*['"]main['"]/.test(body) ||
|
|
59
|
+
/targetBranch\s*=\s*spec\.branch/.test(body);
|
|
60
|
+
assert.ok(hasSpecBranchOnly, 'targetBranch must be set from spec.branch, not worktree.branchName');
|
|
61
|
+
|
|
62
|
+
// Verify: the line that defines targetBranch does NOT contain worktree
|
|
63
|
+
const targetBranchDefIdx = body.indexOf('const targetBranch');
|
|
64
|
+
if (targetBranchDefIdx !== -1) {
|
|
65
|
+
const defLineEnd = body.indexOf(';', targetBranchDefIdx);
|
|
66
|
+
const defLine = body.slice(targetBranchDefIdx, defLineEnd);
|
|
67
|
+
const usesWorktree = /worktree/.test(defLine);
|
|
68
|
+
assert.equal(usesWorktree, false,
|
|
69
|
+
'targetBranch definition must NOT reference worktree.branchName');
|
|
70
|
+
}
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
test('P1-1: merge gate result includes targetBranch, not worktree branchName', () => {
|
|
74
|
+
const body = getFuncBody('runMergeGateCheck');
|
|
75
|
+
const resultMatch = body.match(/const result\s*=\s*\{[^}]+\}/);
|
|
76
|
+
assert.ok(resultMatch, 'should find result object');
|
|
77
|
+
assert.ok(/targetBranch/.test(resultMatch[0]), 'result must include targetBranch field');
|
|
78
|
+
assert.ok(!/branchName[^N]/.test(resultMatch[0]) || /targetBranch/.test(resultMatch[0]),
|
|
79
|
+
'result field should be targetBranch, not the internal worktree branchName');
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
test('P1-1: merge gate sha-mismatch halt reason uses targetBranch, not worktree branchName', () => {
|
|
83
|
+
const haltIdx = SOURCE.indexOf("type: 'merge_gate_sha_mismatch'");
|
|
84
|
+
assert.ok(haltIdx !== -1, 'sha-mismatch halt reason must use targetBranch');
|
|
85
|
+
const section = SOURCE.slice(haltIdx, haltIdx + 500);
|
|
86
|
+
|
|
87
|
+
// Must use targetBranch in details, not worktree's internal branchName
|
|
88
|
+
assert.ok(/targetBranch/.test(section) || /mergeGate\.targetBranch/.test(section),
|
|
89
|
+
'halt details must reference mergeGate.targetBranch');
|
|
90
|
+
// Must not use mergeGate.branchName (the old field name)
|
|
91
|
+
const usesOldField = /mergeGate\.branchName/.test(section);
|
|
92
|
+
assert.equal(usesOldField, false,
|
|
93
|
+
'halt reason must use targetBranch, not branchName');
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
test('P1-1: merge gate fetches specific targetBranch refspec', () => {
|
|
97
|
+
const body = getFuncBody('runMergeGateCheck');
|
|
98
|
+
// Fetch must use targetBranch, not worktree.branchName
|
|
99
|
+
const fetchWithTargetBranch = /fetch.*targetBranch|refspec.*targetBranch|refs\/heads\/\$\{targetBranch\}/.test(body);
|
|
100
|
+
assert.ok(fetchWithTargetBranch, 'git fetch must use targetBranch in refspec');
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
test('P1-1: fetch-failed merge gate uses mergePending, not a halt type', () => {
|
|
104
|
+
const advanceBody = getFuncBody('advanceState');
|
|
105
|
+
assert.ok(/mergePending/.test(advanceBody), 'fetch-failed path should record mergePending');
|
|
106
|
+
assert.equal(/merge_gate_branch_not_on_remote/.test(advanceBody), false,
|
|
107
|
+
'fetch-failed path should no longer assign a halt type');
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
test('P1-1: merge gate halt does not mention "remote HEAD"', () => {
|
|
111
|
+
const haltIdx = SOURCE.indexOf("type: fetchFailed ? 'merge_gate_branch_not_on_remote'");
|
|
112
|
+
const section = SOURCE.slice(haltIdx, haltIdx + 600);
|
|
113
|
+
const mentionsRemoteHead = /remote\s+HEAD/.test(section);
|
|
114
|
+
assert.equal(mentionsRemoteHead, false,
|
|
115
|
+
'halt details must not say "remote HEAD"');
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
test('P1-1 NEW: worktree branch != targetBranch — merge gate still uses targetBranch', () => {
|
|
119
|
+
// Even when worktree.branchName = sprint/<runId>/<stage>
|
|
120
|
+
// the merge gate comparison must use spec.branch (the real PR branch)
|
|
121
|
+
const body = getFuncBody('runMergeGateCheck');
|
|
122
|
+
|
|
123
|
+
// Verify: targetBranch is set BEFORE the fetch call
|
|
124
|
+
const targetBranchDefIdx = body.indexOf('const targetBranch');
|
|
125
|
+
const fetchIdx = body.indexOf("'fetch'");
|
|
126
|
+
assert.ok(targetBranchDefIdx !== -1 && targetBranchDefIdx < fetchIdx,
|
|
127
|
+
'targetBranch must be defined before the fetch call');
|
|
128
|
+
|
|
129
|
+
// Verify: worktree.branchName is NOT in the targetBranch assignment line
|
|
130
|
+
const targetBranchLine = body.slice(targetBranchDefIdx, body.indexOf(';', targetBranchDefIdx));
|
|
131
|
+
const usesWorktreeInTarget = /worktree/.test(targetBranchLine);
|
|
132
|
+
assert.equal(usesWorktreeInTarget, false,
|
|
133
|
+
'targetBranch must NOT derive from worktree.branchName — it uses spec.branch only');
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
test('P1-1 NEW: targetBranch missing from remote → fetchFailed=true', () => {
|
|
137
|
+
const body = getFuncBody('runMergeGateCheck');
|
|
138
|
+
// When branch doesn't exist on remote, fetchResult.status !== 0
|
|
139
|
+
// Must set fetchFailed: true in that case
|
|
140
|
+
const hasFetchFailedFlag = /fetchFailed:\s*true/.test(body);
|
|
141
|
+
assert.ok(hasFetchFailedFlag, 'missing remote branch must set fetchFailed: true');
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
test('P1-1 NEW: missing remote branch completes with mergePending instead of halting', () => {
|
|
145
|
+
const body = getFuncBody('advanceState');
|
|
146
|
+
assert.ok(/mergePending/.test(body),
|
|
147
|
+
'advanceState must mark mergePending for fetchFailed merge gate results');
|
|
148
|
+
assert.equal(/state\.status\s*=\s*'halted'[\s\S]{0,200}fetchFailed/.test(body), false,
|
|
149
|
+
'fetchFailed merge gate path must not halt the sprint');
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
test('P1-1 NEW: targetBranch defaults to main when spec.branch absent', () => {
|
|
153
|
+
const body = getFuncBody('runMergeGateCheck');
|
|
154
|
+
// targetBranch = spec.branch ?? 'main' — safe fallback to 'main'
|
|
155
|
+
const hasMainFallback = /targetBranch\s*=\s*spec\.branch\s*\?\?\s*['"]main['"]/.test(body);
|
|
156
|
+
assert.ok(hasMainFallback, 'targetBranch must default to "main" when spec.branch is absent');
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
// ---------------------------------------------------------------------------
|
|
160
|
+
// P1-2: worktree add legal git params
|
|
161
|
+
// ---------------------------------------------------------------------------
|
|
162
|
+
|
|
163
|
+
test('P1-2: ensureWorktree worktree add uses baseRef (git ref), not baseWorkspace (path)', () => {
|
|
164
|
+
const body = getFuncBody('ensureWorktree');
|
|
165
|
+
|
|
166
|
+
// Must define baseRef via resolveBaseRef function (not direct assignment)
|
|
167
|
+
const hasResolveBaseRef = /const baseRef\s*=\s*resolveBaseRef\(\)/.test(body) ||
|
|
168
|
+
/baseRef\s*=\s*resolveBaseRef/.test(body);
|
|
169
|
+
assert.ok(hasResolveBaseRef, 'must define baseRef via resolveBaseRef function');
|
|
170
|
+
|
|
171
|
+
const argsMatch = body.match(/worktree',\s*'add'[^;]*?\](?:\s*,|\s*\{)/);
|
|
172
|
+
if (argsMatch) {
|
|
173
|
+
const argsStr = argsMatch[0];
|
|
174
|
+
assert.equal(/baseWorkspace/.test(argsStr), false,
|
|
175
|
+
'git worktree add args must not include baseWorkspace (path); use baseRef');
|
|
176
|
+
}
|
|
177
|
+
assert.ok(/worktree.*add.*-b.*baseRef/.test(body) ||
|
|
178
|
+
/baseRef.*\]/.test(body.slice(body.indexOf('worktree add'))),
|
|
179
|
+
'git worktree add should use baseRef as the starting-point (commit-ish) argument');
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
test('P1-2: ensureWorktree has NO illegal fallback "git worktree add <path> <branchName>"', () => {
|
|
183
|
+
const body = getFuncBody('ensureWorktree');
|
|
184
|
+
|
|
185
|
+
const hasIllegalFallback = /git.*worktree.*add.*\$worktreePath.*\$branchName/.test(body) ||
|
|
186
|
+
/git.*worktree.*add.*worktreePath.*branchName[^A-Z]/.test(body);
|
|
187
|
+
assert.equal(hasIllegalFallback, false,
|
|
188
|
+
'must not have fallback "git worktree add <path> <branchName>" — branchName does not exist yet');
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
test('P1-2: ensureWorktree worktreeInfo includes baseWorkspace for cleanup', () => {
|
|
192
|
+
const body = getFuncBody('ensureWorktree');
|
|
193
|
+
|
|
194
|
+
const infoMatch = body.match(/const worktreeInfo\s*=\s*\{[\s\S]*?\};/);
|
|
195
|
+
assert.ok(infoMatch, 'should find worktreeInfo object');
|
|
196
|
+
assert.ok(/baseWorkspace/.test(infoMatch[0]),
|
|
197
|
+
'worktreeInfo must include baseWorkspace for cleanupWorktree git cwd');
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
// ---------------------------------------------------------------------------
|
|
201
|
+
// P2: cleanupWorktree correct cwd
|
|
202
|
+
// ---------------------------------------------------------------------------
|
|
203
|
+
|
|
204
|
+
test('P2: cleanupWorktree does NOT use path.dirname(worktreePath) as cwd', () => {
|
|
205
|
+
const body = getFuncBody('cleanupWorktree');
|
|
206
|
+
|
|
207
|
+
const usesDirnameWorktreePath = /cwd:\s*path\.dirname\(worktreePath\)/.test(body);
|
|
208
|
+
assert.equal(usesDirnameWorktreePath, false,
|
|
209
|
+
'cleanupWorktree must NOT use path.dirname(worktreePath) as git cwd');
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
test('P2: cleanupWorktree uses baseWorkspace as git cwd', () => {
|
|
213
|
+
const body = getFuncBody('cleanupWorktree');
|
|
214
|
+
|
|
215
|
+
assert.ok(/baseWorkspace/.test(body),
|
|
216
|
+
'cleanupWorktree must use baseWorkspace from state.worktree as git cwd');
|
|
217
|
+
|
|
218
|
+
const gitCwdUsesBaseWorkspace = /cwd:\s*gitCwd/.test(body) ||
|
|
219
|
+
/cwd:\s*baseWorkspace/.test(body) ||
|
|
220
|
+
/gitCwd.*baseWorkspace/.test(body);
|
|
221
|
+
assert.ok(gitCwdUsesBaseWorkspace,
|
|
222
|
+
'git worktree remove must use baseWorkspace as cwd');
|
|
223
|
+
});
|
|
224
|
+
|
|
225
|
+
test('P2: cleanupWorktree destructures baseWorkspace from state.worktree', () => {
|
|
226
|
+
const body = getFuncBody('cleanupWorktree');
|
|
227
|
+
const destructures = /\{[^}]*worktreePath[^}]*baseWorkspace[^}]*\}/.test(body) ||
|
|
228
|
+
/baseWorkspace/.test(body.slice(0, body.indexOf('spawnSync')));
|
|
229
|
+
assert.ok(destructures, 'cleanupWorktree must destructure baseWorkspace from state.worktree');
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
// ---------------------------------------------------------------------------
|
|
233
|
+
// captureGitStatus
|
|
234
|
+
// ---------------------------------------------------------------------------
|
|
235
|
+
|
|
236
|
+
test('captureGitStatus: remoteBranch derived from worktree.branchName, not hardcoded null', () => {
|
|
237
|
+
const body = getFuncBody('captureGitStatus');
|
|
238
|
+
|
|
239
|
+
const infoMatch = body.match(/const gitStatus\s*=\s*\{[\s\S]*?\};/);
|
|
240
|
+
assert.ok(infoMatch, 'should find gitStatus object');
|
|
241
|
+
const infoStr = infoMatch[0];
|
|
242
|
+
|
|
243
|
+
const hasHardcodedNull = /remoteBranch:\s*null(?!,)/.test(infoStr);
|
|
244
|
+
assert.equal(hasHardcodedNull, false,
|
|
245
|
+
'remoteBranch should be set from worktree.branchName, not hardcoded null');
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
// ---------------------------------------------------------------------------
|
|
249
|
+
// Protected file validation
|
|
250
|
+
// ---------------------------------------------------------------------------
|
|
251
|
+
|
|
252
|
+
test('protectedArtifacts: excludes run-level timeline and latest-summary to avoid orchestrator self-writes', () => {
|
|
253
|
+
const body = getFuncBody('protectedArtifacts');
|
|
254
|
+
assert.equal(/latest-summary\.md/.test(body), false,
|
|
255
|
+
'protectedArtifacts must not include latest-summary.md');
|
|
256
|
+
assert.equal(/timeline\.md/.test(body), false,
|
|
257
|
+
'protectedArtifacts must not include timeline.md');
|
|
258
|
+
});
|
|
259
|
+
|
|
260
|
+
test('protectedArtifacts: still protects decision.md and scorecard.json', () => {
|
|
261
|
+
const body = getFuncBody('protectedArtifacts');
|
|
262
|
+
assert.ok(/paths\.decisionPath/.test(body),
|
|
263
|
+
'protectedArtifacts must continue protecting decision.md');
|
|
264
|
+
assert.ok(/paths\.scorecardPath/.test(body),
|
|
265
|
+
'protectedArtifacts must continue protecting scorecard.json');
|
|
266
|
+
});
|
|
267
|
+
|
|
268
|
+
test('protectedArtifacts: still protects sprint.json run state', () => {
|
|
269
|
+
const body = getFuncBody('protectedArtifacts');
|
|
270
|
+
assert.ok(/sprint\.json/.test(body),
|
|
271
|
+
'protectedArtifacts must continue protecting sprint.json');
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
test('protected write detection call sites pass runDir for severity classification', () => {
|
|
275
|
+
const runReviewerBody = getFuncBody('runReviewerRole');
|
|
276
|
+
assert.ok(/detectProtectedWriteViolation\(protectedFiles,\s*protectedSnapshot,\s*runDir\)/.test(runReviewerBody),
|
|
277
|
+
'reviewer protected write detection must pass runDir');
|
|
278
|
+
|
|
279
|
+
const executeStageBody = getFuncBody('executeStage');
|
|
280
|
+
assert.ok(/detectProtectedWriteViolation\(protectedFiles,\s*protectedSnapshot,\s*runDir\)/.test(executeStageBody),
|
|
281
|
+
'stage-level protected write detection must pass runDir');
|
|
282
|
+
});
|
|
283
|
+
|
|
284
|
+
// ---------------------------------------------------------------------------
|
|
285
|
+
// Process tree cleanup and role state bookkeeping
|
|
286
|
+
// ---------------------------------------------------------------------------
|
|
287
|
+
|
|
288
|
+
test('process cleanup: timeout path uses terminateProcessTree instead of proc.kill', () => {
|
|
289
|
+
const body = getFuncBody('runAgentAsync');
|
|
290
|
+
assert.ok(/terminateProcessTree\(proc\?\.pid/.test(body),
|
|
291
|
+
'runAgentAsync timeout must terminate the full process tree');
|
|
292
|
+
assert.equal(/proc\?\.kill\(\)/.test(body), false,
|
|
293
|
+
'runAgentAsync timeout must not rely on proc.kill() only');
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
test('process cleanup: terminateProcessTree uses taskkill /T /F on Windows', () => {
|
|
297
|
+
const body = getFuncBody('terminateProcessTree');
|
|
298
|
+
assert.ok(/taskkill/.test(body), 'terminateProcessTree must use taskkill on Windows');
|
|
299
|
+
assert.ok(/\/T/.test(body) && /\/F/.test(body),
|
|
300
|
+
'terminateProcessTree must kill the full descendant tree with /T /F');
|
|
301
|
+
});
|
|
302
|
+
|
|
303
|
+
test('role bookkeeping: reviewer path records spawned pid into role state', () => {
|
|
304
|
+
const body = getFuncBody('runReviewerRole');
|
|
305
|
+
assert.ok(/onSpawn:\s*\(pid\)\s*=>\s*updateRoleState\(paths,\s*role,\s*\{\s*lastPid:\s*pid\s*\}\)/.test(body),
|
|
306
|
+
'runReviewerRole must persist reviewer pid for stale cleanup');
|
|
307
|
+
});
|
|
308
|
+
|
|
309
|
+
test('stale and abort cleanup: recorded role processes are cleaned up', () => {
|
|
310
|
+
const reconcileBody = getFuncBody('reconcileRunState');
|
|
311
|
+
const abortBody = getFuncBody('abortRun');
|
|
312
|
+
assert.ok(/cleanupRecordedRoleProcesses/.test(reconcileBody),
|
|
313
|
+
'reconcileRunState must clean up recorded role processes');
|
|
314
|
+
assert.ok(/cleanupRecordedRoleProcesses/.test(abortBody),
|
|
315
|
+
'abortRun must clean up recorded role processes');
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
test('global reviewer: required report must be non-empty, not just present', () => {
|
|
319
|
+
const body = getFuncBody('decideAndPersist');
|
|
320
|
+
assert.ok(/reportExistsAndNonEmpty\(globalReviewerPath\)/.test(body),
|
|
321
|
+
'required global reviewer report must be validated as non-empty');
|
|
322
|
+
});
|
|
323
|
+
|
|
324
|
+
test('global reviewer: empty report must not be read as decision input', () => {
|
|
325
|
+
const body = getFuncBody('decideAndPersist');
|
|
326
|
+
assert.ok(/reportExistsAndNonEmpty\(globalReviewerPath\)/.test(body),
|
|
327
|
+
'decision input must only load non-empty global reviewer reports');
|
|
328
|
+
});
|
|
329
|
+
|
|
330
|
+
test('global reviewer runtime: failed reviewer does not get auto-promoted to completed without a report', () => {
|
|
331
|
+
const body = getFuncBody('executeStage');
|
|
332
|
+
assert.ok(/if \(reportExistsAndNonEmpty\(grReportPath\)\) \{\s*updateRoleState\(paths, 'global_reviewer', \{\s*status: 'completed'/.test(body.replace(/\r?\n/g, ' ')),
|
|
333
|
+
'global reviewer should only be marked completed when a non-empty report exists');
|
|
334
|
+
});
|
|
335
|
+
|
|
336
|
+
test('resume path reconciles stale runs before switching back to running', () => {
|
|
337
|
+
const body = getFuncBody('loadOrInitState');
|
|
338
|
+
assert.ok(/const state = reconcileRunState\(runDir, readJson\(sprintFile\)\)/.test(body),
|
|
339
|
+
'resume should reconcile stale state before changing status to running');
|
|
340
|
+
});
|
|
341
|
+
|
|
342
|
+
test('main loop does not redundantly reconcile after loadOrInitState', () => {
|
|
343
|
+
const body = getFuncBody('main');
|
|
344
|
+
assert.equal(/reconcileRunState\(runDir, state\)/.test(body), false,
|
|
345
|
+
'main should rely on loadOrInitState reconciliation instead of repeating it');
|
|
346
|
+
});
|
|
347
|
+
|
|
348
|
+
test('cleanup bookkeeping: recorded role pids are only cleared after successful termination', () => {
|
|
349
|
+
const body = getFuncBody('cleanupRecordedRoleProcesses');
|
|
350
|
+
assert.ok(/const terminated = terminateProcessTree/.test(body),
|
|
351
|
+
'cleanup should capture termination success');
|
|
352
|
+
assert.ok(/if \(terminated\)/.test(body),
|
|
353
|
+
'cleanup should only clear pid bookkeeping after successful termination');
|
|
354
|
+
});
|
|
355
|
+
|
|
356
|
+
test('cleanup bookkeeping: worktree is only cleared after successful git removal', () => {
|
|
357
|
+
const body = getFuncBody('cleanupWorktree');
|
|
358
|
+
assert.ok(/if \(result\.status === 0\)/.test(body),
|
|
359
|
+
'cleanupWorktree should check git exit status');
|
|
360
|
+
assert.ok(/state\.worktree = null/.test(body),
|
|
361
|
+
'cleanupWorktree must still clear state on success');
|
|
362
|
+
});
|
|
363
|
+
|
|
364
|
+
test('archive filter excludes prompt scratch, runtime scratch, and worktrees', () => {
|
|
365
|
+
const body = getArchiveFuncBody('shouldArchiveEntry');
|
|
366
|
+
assert.ok(/\.ai-sprint-prompt-/.test(body), 'archive filter must exclude prompt scratch files');
|
|
367
|
+
assert.ok(/worktrees/.test(body), 'archive filter must exclude worktree directories');
|
|
368
|
+
assert.ok(/runtime/.test(body), 'archive filter should exclude runtime scratch directories');
|
|
369
|
+
});
|
|
370
|
+
|
|
371
|
+
test('archive capture uses latest stage git-status.json instead of repo-wide git diff', () => {
|
|
372
|
+
const body = getArchiveFuncBody('captureGitInfo');
|
|
373
|
+
assert.ok(/findLatestStageGitStatus/.test(body),
|
|
374
|
+
'archive capture should read stage-local git-status.json');
|
|
375
|
+
assert.equal(/spawnSync\('git'/.test(body), false,
|
|
376
|
+
'archive capture should not shell out to repo-wide git commands anymore');
|
|
377
|
+
});
|
|
378
|
+
|
|
379
|
+
// ---------------------------------------------------------------------------
|
|
380
|
+
// P1-3: Base ref selection robustness
|
|
381
|
+
// ---------------------------------------------------------------------------
|
|
382
|
+
|
|
383
|
+
test('P1-3: ensureWorktree has robust base ref fallback chain', () => {
|
|
384
|
+
const body = getFuncBody('ensureWorktree');
|
|
385
|
+
|
|
386
|
+
// Must have a resolveBaseRef or similar function with multiple candidates
|
|
387
|
+
const hasResolveFunction = /resolveBaseRef|candidates\s*=\s*\[/.test(body);
|
|
388
|
+
assert.ok(hasResolveFunction, 'ensureWorktree must have a resolve function with fallback candidates');
|
|
389
|
+
|
|
390
|
+
// Must try at least: spec.branch, origin/{branch}, HEAD, main
|
|
391
|
+
const hasLocalBranch = /spec\.branch|baseBranch/.test(body);
|
|
392
|
+
const hasRemoteBranch = /origin\/.*branch/.test(body);
|
|
393
|
+
const hasHead = /HEAD/.test(body);
|
|
394
|
+
const hasMain = /main/.test(body);
|
|
395
|
+
|
|
396
|
+
assert.ok(hasLocalBranch, 'must try local branch from spec');
|
|
397
|
+
assert.ok(hasRemoteBranch, 'must try remote branch origin/{branch}');
|
|
398
|
+
assert.ok(hasHead, 'must fallback to HEAD');
|
|
399
|
+
assert.ok(hasMain, 'must have final fallback to main');
|
|
400
|
+
});
|
|
401
|
+
|
|
402
|
+
test('P1-3: base ref resolution logs which candidate was selected', () => {
|
|
403
|
+
const body = getFuncBody('ensureWorktree');
|
|
404
|
+
|
|
405
|
+
// Should log when using a fallback (not the first choice)
|
|
406
|
+
const logsFallback = /appendTimeline.*Base ref resolved to|appendTimeline.*resolved to/.test(body);
|
|
407
|
+
assert.ok(logsFallback, 'must log when using a fallback base ref');
|
|
408
|
+
});
|
|
409
|
+
|
|
410
|
+
// ---------------------------------------------------------------------------
|
|
411
|
+
// Dynamic timeout: progress detection
|
|
412
|
+
// ---------------------------------------------------------------------------
|
|
413
|
+
|
|
414
|
+
test('dynamic timeout: checkProgressEvidence checks worklog mtime', () => {
|
|
415
|
+
const body = getFuncBody('checkProgressEvidence');
|
|
416
|
+
assert.ok(/worklogPath/.test(body), 'must check worklog path');
|
|
417
|
+
assert.ok(/mtimeMs|stat\.mtime/.test(body), 'must check file modification time');
|
|
418
|
+
});
|
|
419
|
+
|
|
420
|
+
test('dynamic timeout: checkProgressEvidence checks stdout growth', () => {
|
|
421
|
+
const body = getFuncBody('checkProgressEvidence');
|
|
422
|
+
assert.ok(/lastStdoutLength|currentStdout/.test(body), 'must accept stdout length params');
|
|
423
|
+
assert.ok(/stdout.*length|length.*stdout/.test(body), 'must compare stdout lengths');
|
|
424
|
+
});
|
|
425
|
+
|
|
426
|
+
test('dynamic timeout: checkProgressEvidence checks git changes', () => {
|
|
427
|
+
const body = getFuncBody('checkProgressEvidence');
|
|
428
|
+
assert.ok(/git.*diff|diff.*name-only/.test(body), 'must check git diff for recent changes');
|
|
429
|
+
assert.ok(/worktreePath/.test(body), 'must use worktreePath for git diff');
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
test('dynamic timeout: runAgentWithProgressCheck exists and has soft/hard timeout params', () => {
|
|
433
|
+
const body = getFuncBody('runAgentWithProgressCheck');
|
|
434
|
+
assert.ok(/softTimeoutRatio|softTimeoutSeconds/.test(body), 'must have soft timeout param');
|
|
435
|
+
assert.ok(/hardTimeoutSeconds|hardTimeout|hardTimer/.test(body), 'must have hard timeout param');
|
|
436
|
+
assert.ok(/extensionSeconds|maxExtensions/.test(body), 'must have extension params');
|
|
437
|
+
});
|
|
438
|
+
|
|
439
|
+
test('dynamic timeout: progress check extends timeout when progress detected', () => {
|
|
440
|
+
const body = getFuncBody('runAgentWithProgressCheck');
|
|
441
|
+
|
|
442
|
+
// Must check for progress and conditionally extend
|
|
443
|
+
const hasProgressCheck = /checkProgressEvidence/.test(body);
|
|
444
|
+
const hasExtension = /extensionsUsed\+\+|extensionsUsed\s*\+=\s*1/.test(body);
|
|
445
|
+
|
|
446
|
+
assert.ok(hasProgressCheck, 'must call checkProgressEvidence');
|
|
447
|
+
assert.ok(hasExtension, 'must increment extensionsUsed when progress detected');
|
|
448
|
+
});
|
|
449
|
+
|
|
450
|
+
test('dynamic timeout: logs extension events to timeline', () => {
|
|
451
|
+
const body = getFuncBody('runAgentWithProgressCheck');
|
|
452
|
+
assert.ok(/appendTimeline.*extend|appendTimeline.*progress/.test(body),
|
|
453
|
+
'must log extension events to timeline');
|
|
454
|
+
});
|
|
455
|
+
|
|
456
|
+
test('dynamic timeout: respects maxExtensions limit', () => {
|
|
457
|
+
const body = getFuncBody('runAgentWithProgressCheck');
|
|
458
|
+
assert.ok(/extensionsUsed\s*<\s*maxExtensions|extensionsUsed.*>=.*maxExtensions/.test(body),
|
|
459
|
+
'must check extensionsUsed against maxExtensions');
|
|
460
|
+
});
|
|
461
|
+
|
|
462
|
+
test('dynamic timeout: extension window scales with timeoutSeconds instead of fixed 300s', () => {
|
|
463
|
+
const body = getFuncBody('runAgentWithProgressCheck');
|
|
464
|
+
assert.ok(/scaledExtensionSeconds/.test(body),
|
|
465
|
+
'runAgentWithProgressCheck should derive a scaled extension window');
|
|
466
|
+
assert.ok(/Math\.max\(30,\s*Math\.min\(300,\s*Math\.floor\(timeoutSeconds \/ 2\)\)\)/.test(body),
|
|
467
|
+
'extension window should scale from timeoutSeconds with bounded defaults');
|
|
468
|
+
});
|
|
469
|
+
|
|
470
|
+
// ---------------------------------------------------------------------------
|
|
471
|
+
// Isolation Report: iflow writes to tmp/sprint-agent/{runId}/{stage}-{role}/
|
|
472
|
+
// Phase 2: Uses runId directly, not fragile timestamp extraction
|
|
473
|
+
// ---------------------------------------------------------------------------
|
|
474
|
+
|
|
475
|
+
test('isolation: getIsolationDir uses runId for unique isolation path', () => {
|
|
476
|
+
const runId = '2026-04-03T12-34-56-789Z-test-task';
|
|
477
|
+
const isolationDir = getIsolationDir(runId, 'implement', 'producer');
|
|
478
|
+
assert.ok(isolationDir.includes('tmp'), 'must include tmp');
|
|
479
|
+
assert.ok(isolationDir.includes('sprint-agent'), 'must include sprint-agent');
|
|
480
|
+
assert.ok(isolationDir.includes(runId), 'must include runId');
|
|
481
|
+
assert.ok(isolationDir.includes('implement-producer'), 'must include stage-role');
|
|
482
|
+
});
|
|
483
|
+
|
|
484
|
+
test('isolation: different runs have different isolation directories', () => {
|
|
485
|
+
const runId1 = '2026-04-03T12-34-56-789Z-task-a';
|
|
486
|
+
const runId2 = '2026-04-03T12-34-56-789Z-task-b';
|
|
487
|
+
const dir1 = getIsolationDir(runId1, 'implement', 'producer');
|
|
488
|
+
const dir2 = getIsolationDir(runId2, 'implement', 'producer');
|
|
489
|
+
assert.notEqual(dir1, dir2, 'different runIds must produce different isolation dirs');
|
|
490
|
+
});
|
|
491
|
+
|
|
492
|
+
test('isolation: findIsolationReport returns null when runId is null', () => {
|
|
493
|
+
const result = findIsolationReport({ runId: null, stageName: 'implement', role: 'producer', reportFilename: 'producer.md' });
|
|
494
|
+
assert.equal(result, null, 'must return null when runId is null');
|
|
495
|
+
});
|
|
496
|
+
|
|
497
|
+
test('isolation: findIsolationReport returns null for non-existent isolation', () => {
|
|
498
|
+
const result = findIsolationReport({ runId: 'nonexistent-run-id', stageName: 'implement', role: 'producer', reportFilename: 'producer.md' });
|
|
499
|
+
assert.equal(result, null, 'must return null for non-existent isolation dir');
|
|
500
|
+
});
|
|
501
|
+
|
|
502
|
+
test('isolation: findIsolationReport finds report in isolation directory', () => {
|
|
503
|
+
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'isolation-test-'));
|
|
504
|
+
try {
|
|
505
|
+
const runId = 'test-run-id';
|
|
506
|
+
const stageName = 'implement';
|
|
507
|
+
const role = 'producer';
|
|
508
|
+
const isolationDir = getIsolationDir(runId, stageName, role);
|
|
509
|
+
|
|
510
|
+
// Create isolation directory with report
|
|
511
|
+
fs.mkdirSync(isolationDir, { recursive: true });
|
|
512
|
+
const reportContent = `# Report\n\n## SUMMARY\nTask completed.\n`;
|
|
513
|
+
fs.writeFileSync(path.join(isolationDir, 'producer.md'), reportContent);
|
|
514
|
+
|
|
515
|
+
const result = findIsolationReport({ runId, stageName, role, reportFilename: 'producer.md' });
|
|
516
|
+
assert.ok(result, 'must find report in isolation dir');
|
|
517
|
+
assert.ok(result.includes('producer.md'), 'result path must include report filename');
|
|
518
|
+
} finally {
|
|
519
|
+
fs.rmSync(tmp, { recursive: true, force: true });
|
|
520
|
+
}
|
|
521
|
+
});
|
|
522
|
+
|
|
523
|
+
test('isolation: isIsolationCollectAllowed respects allowlist', () => {
|
|
524
|
+
assert.equal(isIsolationCollectAllowed('producer.md'), true, 'producer.md must be allowed');
|
|
525
|
+
assert.equal(isIsolationCollectAllowed('reviewer-a.md'), true, 'reviewer-a.md must be allowed');
|
|
526
|
+
assert.equal(isIsolationCollectAllowed('reviewer-b.md'), true, 'reviewer-b.md must be allowed');
|
|
527
|
+
assert.equal(isIsolationCollectAllowed('global-reviewer.md'), true, 'global-reviewer.md must be allowed');
|
|
528
|
+
assert.equal(isIsolationCollectAllowed('report.md'), true, 'report.md must be allowed');
|
|
529
|
+
// Non-allowlist files
|
|
530
|
+
assert.equal(isIsolationCollectAllowed('session.log'), false, 'session.log must NOT be allowed');
|
|
531
|
+
assert.equal(isIsolationCollectAllowed('state.json'), false, 'state.json must NOT be allowed');
|
|
532
|
+
assert.equal(isIsolationCollectAllowed('tmp.txt'), false, 'tmp.txt must NOT be allowed');
|
|
533
|
+
});
|
|
534
|
+
|
|
535
|
+
test('isolation: collectIsolationArtifacts only collects allowed files', () => {
|
|
536
|
+
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'isolation-test-'));
|
|
537
|
+
try {
|
|
538
|
+
const runId = 'test-collect-allowed';
|
|
539
|
+
const stageName = 'implement';
|
|
540
|
+
const role = 'producer';
|
|
541
|
+
const isolationDir = getIsolationDir(runId, stageName, role);
|
|
542
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
543
|
+
|
|
544
|
+
fs.mkdirSync(isolationDir, { recursive: true });
|
|
545
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
546
|
+
|
|
547
|
+
// Create isolation report
|
|
548
|
+
const reportContent = `# Report\n\n## SUMMARY\nTask completed.\n`;
|
|
549
|
+
fs.writeFileSync(path.join(isolationDir, 'producer.md'), reportContent);
|
|
550
|
+
|
|
551
|
+
const result = collectIsolationArtifacts({
|
|
552
|
+
runId,
|
|
553
|
+
stageName,
|
|
554
|
+
role,
|
|
555
|
+
stageDir,
|
|
556
|
+
reportFilename: 'producer.md',
|
|
557
|
+
runDir: tmp,
|
|
558
|
+
});
|
|
559
|
+
|
|
560
|
+
assert.deepEqual(result.collected, ['producer.md'], 'producer.md must be collected');
|
|
561
|
+
assert.ok(fs.existsSync(path.join(stageDir, 'producer.md')), 'report must exist in stage dir');
|
|
562
|
+
} finally {
|
|
563
|
+
fs.rmSync(tmp, { recursive: true, force: true });
|
|
564
|
+
}
|
|
565
|
+
});
|
|
566
|
+
|
|
567
|
+
test('isolation: collectIsolationArtifacts skips non-allowlist files', () => {
|
|
568
|
+
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'isolation-test-'));
|
|
569
|
+
try {
|
|
570
|
+
const runId = 'test-collect-skip';
|
|
571
|
+
const stageName = 'implement';
|
|
572
|
+
const role = 'producer';
|
|
573
|
+
const isolationDir = getIsolationDir(runId, stageName, role);
|
|
574
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
575
|
+
|
|
576
|
+
fs.mkdirSync(isolationDir, { recursive: true });
|
|
577
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
578
|
+
|
|
579
|
+
// Try to collect a non-allowlist file
|
|
580
|
+
const result = collectIsolationArtifacts({
|
|
581
|
+
runId,
|
|
582
|
+
stageName,
|
|
583
|
+
role,
|
|
584
|
+
stageDir,
|
|
585
|
+
reportFilename: 'session.log', // NOT in allowlist
|
|
586
|
+
runDir: tmp,
|
|
587
|
+
});
|
|
588
|
+
|
|
589
|
+
assert.deepEqual(result.collected, [], 'session.log must NOT be collected');
|
|
590
|
+
assert.deepEqual(result.skipped, ['session.log'], 'session.log must be in skipped list');
|
|
591
|
+
} finally {
|
|
592
|
+
fs.rmSync(tmp, { recursive: true, force: true });
|
|
593
|
+
}
|
|
594
|
+
});
|
|
595
|
+
|
|
596
|
+
test('isolation: different runs do NOT share isolation lookup', () => {
|
|
597
|
+
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'isolation-test-'));
|
|
598
|
+
try {
|
|
599
|
+
const runId1 = 'run-1';
|
|
600
|
+
const runId2 = 'run-2';
|
|
601
|
+
const stageName = 'implement';
|
|
602
|
+
const role = 'producer';
|
|
603
|
+
|
|
604
|
+
// Create isolation for run-1 only
|
|
605
|
+
const isolationDir1 = getIsolationDir(runId1, stageName, role);
|
|
606
|
+
fs.mkdirSync(isolationDir1, { recursive: true });
|
|
607
|
+
fs.writeFileSync(path.join(isolationDir1, 'producer.md'), `# Report\n\n## SUMMARY\nRun 1 report.\n`);
|
|
608
|
+
|
|
609
|
+
// run-1 should find the report
|
|
610
|
+
const result1 = findIsolationReport({ runId: runId1, stageName, role, reportFilename: 'producer.md' });
|
|
611
|
+
assert.ok(result1, 'run-1 must find its isolation report');
|
|
612
|
+
|
|
613
|
+
// run-2 should NOT find any report
|
|
614
|
+
const result2 = findIsolationReport({ runId: runId2, stageName, role, reportFilename: 'producer.md' });
|
|
615
|
+
assert.equal(result2, null, 'run-2 must NOT find run-1 isolation report');
|
|
616
|
+
} finally {
|
|
617
|
+
fs.rmSync(tmp, { recursive: true, force: true });
|
|
618
|
+
}
|
|
619
|
+
});
|
|
620
|
+
|
|
621
|
+
test('isolation: ISOLATION_COLLECT_ALLOWLIST contains expected files', () => {
|
|
622
|
+
assert.ok(Array.isArray(ISOLATION_COLLECT_ALLOWLIST), 'allowlist must be an array');
|
|
623
|
+
assert.ok(ISOLATION_COLLECT_ALLOWLIST.includes('producer.md'), 'must include producer.md');
|
|
624
|
+
assert.ok(ISOLATION_COLLECT_ALLOWLIST.includes('reviewer-a.md'), 'must include reviewer-a.md');
|
|
625
|
+
assert.ok(ISOLATION_COLLECT_ALLOWLIST.includes('reviewer-b.md'), 'must include reviewer-b.md');
|
|
626
|
+
assert.ok(ISOLATION_COLLECT_ALLOWLIST.includes('global-reviewer.md'), 'must include global-reviewer.md');
|
|
627
|
+
assert.ok(ISOLATION_COLLECT_ALLOWLIST.includes('report.md'), 'must include report.md');
|
|
628
|
+
// Should NOT include non-report files
|
|
629
|
+
assert.equal(ISOLATION_COLLECT_ALLOWLIST.includes('session.log'), false, 'must NOT include session.log');
|
|
630
|
+
assert.equal(ISOLATION_COLLECT_ALLOWLIST.includes('state.json'), false, 'must NOT include state.json');
|
|
631
|
+
});
|
|
632
|
+
|
|
633
|
+
// ---------------------------------------------------------------------------
|
|
634
|
+
// Validation Schema & Persistence Regression Tests
|
|
635
|
+
// ---------------------------------------------------------------------------
|
|
636
|
+
|
|
637
|
+
test('validation schema: decision.md uses role-level errorSummary, not .errors', () => {
|
|
638
|
+
const body = getFuncBody('decideAndPersist');
|
|
639
|
+
// Must NOT access .errors on role objects (field doesn't exist)
|
|
640
|
+
assert.equal(/producer\.errors|reviewerA\.errors|reviewerB\.errors/.test(body), false,
|
|
641
|
+
'decision.md must NOT read non-existent .errors field on role validation objects');
|
|
642
|
+
// Must use formatRoleValidation helper or direct errorSummary access
|
|
643
|
+
assert.ok(/formatRoleValidation|errorSummary|missingSections|invalidFields/.test(body),
|
|
644
|
+
'decision.md must use canonical validation fields: errorSummary, missingSections, invalidFields');
|
|
645
|
+
});
|
|
646
|
+
|
|
647
|
+
test('validation schema: scorecard.json uses errorSummary not errors array', () => {
|
|
648
|
+
const body = getFuncBody('decideAndPersist');
|
|
649
|
+
// Check the validation object in scorecard
|
|
650
|
+
const validationMatch = body.match(/validation:\s*\{[^}]+\}/);
|
|
651
|
+
assert.ok(validationMatch, 'scorecard must have validation object');
|
|
652
|
+
// Must have errorSummary at top level
|
|
653
|
+
assert.ok(/errorSummary:\s*decision\.validation\?\.\errorSummary/.test(body),
|
|
654
|
+
'scorecard validation must include top-level errorSummary');
|
|
655
|
+
// Must NOT have legacy errors array as primary source
|
|
656
|
+
assert.equal(/errors:\s*decision\.validation\?\.\errors\s*\?\?/.test(body), false,
|
|
657
|
+
'scorecard should not use legacy errors array as validation source');
|
|
658
|
+
});
|
|
659
|
+
|
|
660
|
+
test('validation schema: scorecard includes role-level validation objects', () => {
|
|
661
|
+
const body = getFuncBody('decideAndPersist');
|
|
662
|
+
// Each role must be included with full validation object
|
|
663
|
+
// Use [?] character class to match literal question mark in optional chaining
|
|
664
|
+
assert.ok(/producer:\s*decision\.validation[?]\.producer/.test(body),
|
|
665
|
+
'scorecard must include producer validation');
|
|
666
|
+
assert.ok(/reviewerA:\s*decision\.validation[?]\.reviewerA/.test(body),
|
|
667
|
+
'scorecard must include reviewerA validation');
|
|
668
|
+
assert.ok(/reviewerB:\s*decision\.validation[?]\.reviewerB/.test(body),
|
|
669
|
+
'scorecard must include reviewerB validation');
|
|
670
|
+
assert.ok(/globalReviewer:\s*decision\.validation[?]\.globalReviewer/.test(body),
|
|
671
|
+
'scorecard must include globalReviewer validation');
|
|
672
|
+
});
|
|
673
|
+
|
|
674
|
+
test('persistence: outputQuality written to decision.md and scorecard.json', () => {
|
|
675
|
+
const body = getFuncBody('decideAndPersist');
|
|
676
|
+
// decision.md must include Output Quality
|
|
677
|
+
assert.ok(/Output Quality.*decision\.outputQuality/.test(body),
|
|
678
|
+
'decision.md must include Output Quality field');
|
|
679
|
+
// scorecard must include outputQuality
|
|
680
|
+
assert.ok(/outputQuality:\s*decision\.outputQuality/.test(body),
|
|
681
|
+
'scorecard must include outputQuality field');
|
|
682
|
+
});
|
|
683
|
+
|
|
684
|
+
test('persistence: qualityReasons written to decision.md and scorecard.json', () => {
|
|
685
|
+
const body = getFuncBody('decideAndPersist');
|
|
686
|
+
// decision.md must include Quality Reasons section
|
|
687
|
+
assert.ok(/qualityReasons|Quality Reasons/.test(body),
|
|
688
|
+
'decision.md must include Quality Reasons section');
|
|
689
|
+
// scorecard must include qualityReasons
|
|
690
|
+
assert.ok(/qualityReasons:\s*decision\.qualityReasons/.test(body),
|
|
691
|
+
'scorecard must include qualityReasons field');
|
|
692
|
+
});
|
|
693
|
+
|
|
694
|
+
test('formatRoleValidation: helper function exists and uses canonical fields', () => {
|
|
695
|
+
const body = getFuncBody('formatRoleValidation');
|
|
696
|
+
assert.ok(/missingSections/.test(body), 'formatRoleValidation must use missingSections');
|
|
697
|
+
assert.ok(/invalidFields/.test(body), 'formatRoleValidation must use invalidFields');
|
|
698
|
+
assert.ok(/valid/.test(body), 'formatRoleValidation must check valid status');
|
|
699
|
+
});
|
|
700
|
+
|
|
701
|
+
// ---------------------------------------------------------------------------
|
|
702
|
+
// BEHAVIOR-LEVEL PERSISTENCE TESTS
|
|
703
|
+
// Tests that verify actual file writes, not source structure
|
|
704
|
+
// ---------------------------------------------------------------------------
|
|
705
|
+
|
|
706
|
+
function createTempDir() {
|
|
707
|
+
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'run-test-'));
|
|
708
|
+
return tmp;
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
function cleanupTempDir(tmp) {
|
|
712
|
+
try {
|
|
713
|
+
fs.rmSync(tmp, { recursive: true, force: true });
|
|
714
|
+
} catch {}
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
function createMockState(currentRound = 1, maxRoundsPerStage = 3, taskId = 'test-task') {
|
|
718
|
+
return {
|
|
719
|
+
currentRound,
|
|
720
|
+
maxRoundsPerStage,
|
|
721
|
+
status: 'running',
|
|
722
|
+
taskId,
|
|
723
|
+
};
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
function createTempSpec(tmp, taskId = 'test-task', extra = {}) {
|
|
727
|
+
const specDir = path.join(tmp, 'specs');
|
|
728
|
+
fs.mkdirSync(specDir, { recursive: true });
|
|
729
|
+
const specPath = path.join(specDir, `${taskId}.json`);
|
|
730
|
+
const spec = {
|
|
731
|
+
taskId,
|
|
732
|
+
stageCriteria: {
|
|
733
|
+
implement: {
|
|
734
|
+
requiredProducerSections: ['SUMMARY', 'CHANGES'],
|
|
735
|
+
requiredReviewerSections: ['VERDICT', 'BLOCKERS'],
|
|
736
|
+
},
|
|
737
|
+
},
|
|
738
|
+
...extra,
|
|
739
|
+
};
|
|
740
|
+
fs.writeFileSync(specPath, JSON.stringify(spec));
|
|
741
|
+
return specPath;
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
function createValidProducer() {
|
|
745
|
+
return `# Producer Report
|
|
746
|
+
|
|
747
|
+
## SUMMARY
|
|
748
|
+
Task completed successfully.
|
|
749
|
+
|
|
750
|
+
## CHANGES
|
|
751
|
+
- Fixed bug in validation logic
|
|
752
|
+
- Added new test cases
|
|
753
|
+
|
|
754
|
+
## EVIDENCE
|
|
755
|
+
- All tests pass
|
|
756
|
+
- Code review completed
|
|
757
|
+
|
|
758
|
+
## CODE_EVIDENCE
|
|
759
|
+
- src/fix.ts: Fixed validation schema
|
|
760
|
+
|
|
761
|
+
## KEY_EVENTS
|
|
762
|
+
- Bug identified and fixed
|
|
763
|
+
- Tests added
|
|
764
|
+
|
|
765
|
+
## HYPOTHESIS_MATRIX
|
|
766
|
+
| Hypothesis | Status | Evidence |
|
|
767
|
+
|------------|--------|----------|
|
|
768
|
+
| Bug in validation | Confirmed | Fixed in src/fix.ts |
|
|
769
|
+
|
|
770
|
+
## CONTRACT
|
|
771
|
+
- [x] DONE: fix-validation: Fix validation schema issues
|
|
772
|
+
- [x] DONE: add-tests: Add regression tests
|
|
773
|
+
|
|
774
|
+
CHECKS: evidence=gathered;tests=passing
|
|
775
|
+
|
|
776
|
+
## OPEN_RISKS
|
|
777
|
+
- None identified`;
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
function createValidReviewer(verdict = 'APPROVE') {
|
|
781
|
+
return `# Reviewer Report
|
|
782
|
+
|
|
783
|
+
## VERDICT
|
|
784
|
+
VERDICT: ${verdict}
|
|
785
|
+
|
|
786
|
+
## BLOCKERS
|
|
787
|
+
${verdict === 'APPROVE' ? '- None.' : '- Issue found in implementation'}
|
|
788
|
+
|
|
789
|
+
## FINDINGS
|
|
790
|
+
- Code quality is good
|
|
791
|
+
- Tests are comprehensive
|
|
792
|
+
|
|
793
|
+
## CODE_EVIDENCE
|
|
794
|
+
- Reviewed src/fix.ts
|
|
795
|
+
|
|
796
|
+
## HYPOTHESIS_MATRIX
|
|
797
|
+
| Hypothesis | Status | Evidence |
|
|
798
|
+
|------------|--------|----------|
|
|
799
|
+
| Bug fixed | Confirmed | Tests pass |
|
|
800
|
+
|
|
801
|
+
## NEXT_FOCUS
|
|
802
|
+
Continue with next task
|
|
803
|
+
|
|
804
|
+
CHECKS: criteria=met;blockers=0
|
|
805
|
+
|
|
806
|
+
## CHECKS
|
|
807
|
+
- All checks passed`;
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
function createInvalidProducer() {
|
|
811
|
+
return `# Producer Report
|
|
812
|
+
|
|
813
|
+
## CHANGES
|
|
814
|
+
- Some changes made
|
|
815
|
+
|
|
816
|
+
## CONTRACT
|
|
817
|
+
- [ ] TODO: missing-deliverable: This deliverable is not done
|
|
818
|
+
`;
|
|
819
|
+
// Missing SUMMARY, EVIDENCE, CODE_EVIDENCE, KEY_EVENTS, HYPOTHESIS_MATRIX, CHECKS, OPEN_RISKS
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
function createInvalidReviewer() {
|
|
823
|
+
return `# Reviewer Report
|
|
824
|
+
|
|
825
|
+
## FINDINGS
|
|
826
|
+
- Found some issues
|
|
827
|
+
`;
|
|
828
|
+
// Missing VERDICT, BLOCKERS, CODE_EVIDENCE, HYPOTHESIS_MATRIX, NEXT_FOCUS, CHECKS
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
test('behavior: decision.md contains validation details when report is invalid', () => {
|
|
832
|
+
const tmp = createTempDir();
|
|
833
|
+
try {
|
|
834
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
835
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
836
|
+
|
|
837
|
+
const specPath = createTempSpec(tmp);
|
|
838
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
839
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
840
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
841
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
842
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
843
|
+
|
|
844
|
+
// Write invalid producer and valid reviewers
|
|
845
|
+
fs.writeFileSync(producerPath, createInvalidProducer());
|
|
846
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('APPROVE'));
|
|
847
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('APPROVE'));
|
|
848
|
+
|
|
849
|
+
decideAndPersist({
|
|
850
|
+
runDir: tmp,
|
|
851
|
+
stageName: 'implement',
|
|
852
|
+
stageDir,
|
|
853
|
+
decisionPath,
|
|
854
|
+
scorecardPath,
|
|
855
|
+
producerPath,
|
|
856
|
+
reviewerAPath,
|
|
857
|
+
reviewerBPath,
|
|
858
|
+
globalReviewerPath: null,
|
|
859
|
+
state: { ...createMockState(), specPath },
|
|
860
|
+
});
|
|
861
|
+
|
|
862
|
+
// Verify decision.md was written
|
|
863
|
+
assert.ok(fs.existsSync(decisionPath), 'decision.md should be written');
|
|
864
|
+
const decision = fs.readFileSync(decisionPath, 'utf8');
|
|
865
|
+
|
|
866
|
+
// Verify Validation section contains proper role-level details
|
|
867
|
+
assert.ok(/## Validation/.test(decision), 'decision.md must have Validation section');
|
|
868
|
+
assert.ok(/Contract Valid: false/.test(decision), 'decision.md must show Contract Valid: false for invalid reports');
|
|
869
|
+
assert.ok(/Producer: \[FAIL\]/.test(decision), 'decision.md must show Producer validation failed');
|
|
870
|
+
assert.ok(/missing: SUMMARY/.test(decision), 'decision.md must show missing SUMMARY section');
|
|
871
|
+
} finally {
|
|
872
|
+
cleanupTempDir(tmp);
|
|
873
|
+
}
|
|
874
|
+
});
|
|
875
|
+
|
|
876
|
+
test('behavior: missing reviewer report persists reviewerFailures details', () => {
|
|
877
|
+
const tmp = createTempDir();
|
|
878
|
+
try {
|
|
879
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
880
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
881
|
+
|
|
882
|
+
const specPath = createTempSpec(tmp);
|
|
883
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
884
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
885
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
886
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
887
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
888
|
+
|
|
889
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
890
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('APPROVE'));
|
|
891
|
+
|
|
892
|
+
decideAndPersist({
|
|
893
|
+
runDir: tmp,
|
|
894
|
+
stageName: 'implement',
|
|
895
|
+
stageDir,
|
|
896
|
+
decisionPath,
|
|
897
|
+
scorecardPath,
|
|
898
|
+
producerPath,
|
|
899
|
+
reviewerAPath,
|
|
900
|
+
reviewerBPath,
|
|
901
|
+
globalReviewerPath: null,
|
|
902
|
+
reviewerFailures: [
|
|
903
|
+
{
|
|
904
|
+
role: 'reviewer_b',
|
|
905
|
+
summary: 'primary=opencode/test-model: Agent opencode failed with status 1 | fallback=iflow/glm-4.7: timed out',
|
|
906
|
+
attempts: [
|
|
907
|
+
{ label: 'primary', agent: 'opencode', model: 'test-model', error: 'Agent opencode failed with status 1', timedOut: false },
|
|
908
|
+
{ label: 'fallback', agent: 'iflow', model: 'glm-4.7', error: 'Agent iflow timed out', timedOut: true },
|
|
909
|
+
],
|
|
910
|
+
},
|
|
911
|
+
],
|
|
912
|
+
state: { ...createMockState(), specPath },
|
|
913
|
+
});
|
|
914
|
+
|
|
915
|
+
const decision = fs.readFileSync(decisionPath, 'utf8');
|
|
916
|
+
const scorecard = JSON.parse(fs.readFileSync(scorecardPath, 'utf8'));
|
|
917
|
+
|
|
918
|
+
assert.ok(decision.includes('reviewer_b'), 'decision should mention missing reviewer_b');
|
|
919
|
+
assert.ok(decision.includes('primary=opencode/test-model'), 'decision should include reviewer failure summary');
|
|
920
|
+
assert.ok(Array.isArray(scorecard.reviewerFailures), 'scorecard should persist reviewerFailures');
|
|
921
|
+
assert.equal(scorecard.reviewerFailures[0].role, 'reviewer_b');
|
|
922
|
+
assert.ok(scorecard.reviewerFailures[0].summary.includes('fallback=iflow/glm-4.7'));
|
|
923
|
+
} finally {
|
|
924
|
+
cleanupTempDir(tmp);
|
|
925
|
+
}
|
|
926
|
+
});
|
|
927
|
+
|
|
928
|
+
test('behavior: scorecard.json validation.errorSummary matches role-level validation', () => {
|
|
929
|
+
const tmp = createTempDir();
|
|
930
|
+
try {
|
|
931
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
932
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
933
|
+
|
|
934
|
+
const specPath = createTempSpec(tmp);
|
|
935
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
936
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
937
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
938
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
939
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
940
|
+
|
|
941
|
+
// Write invalid producer and valid reviewers
|
|
942
|
+
fs.writeFileSync(producerPath, createInvalidProducer());
|
|
943
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('APPROVE'));
|
|
944
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('APPROVE'));
|
|
945
|
+
|
|
946
|
+
decideAndPersist({
|
|
947
|
+
runDir: tmp,
|
|
948
|
+
stageName: 'implement',
|
|
949
|
+
stageDir,
|
|
950
|
+
decisionPath,
|
|
951
|
+
scorecardPath,
|
|
952
|
+
producerPath,
|
|
953
|
+
reviewerAPath,
|
|
954
|
+
reviewerBPath,
|
|
955
|
+
globalReviewerPath: null,
|
|
956
|
+
state: { ...createMockState(), specPath },
|
|
957
|
+
});
|
|
958
|
+
|
|
959
|
+
// Verify scorecard.json was written
|
|
960
|
+
assert.ok(fs.existsSync(scorecardPath), 'scorecard.json should be written');
|
|
961
|
+
const scorecard = JSON.parse(fs.readFileSync(scorecardPath, 'utf8'));
|
|
962
|
+
|
|
963
|
+
// Verify validation structure
|
|
964
|
+
assert.ok(scorecard.validation, 'scorecard must have validation object');
|
|
965
|
+
assert.equal(scorecard.validation.valid, false, 'validation.valid must be false for invalid reports');
|
|
966
|
+
assert.ok(scorecard.validation.errorSummary, 'validation must have errorSummary when invalid');
|
|
967
|
+
assert.ok(scorecard.validation.producer, 'validation must have producer object');
|
|
968
|
+
assert.equal(scorecard.validation.producer.valid, false, 'producer validation must be false');
|
|
969
|
+
assert.ok(Array.isArray(scorecard.validation.producer.missingSections), 'producer must have missingSections array');
|
|
970
|
+
assert.ok(scorecard.validation.producer.missingSections.includes('SUMMARY'), 'missingSections must include SUMMARY');
|
|
971
|
+
|
|
972
|
+
// Verify errorSummary consistency
|
|
973
|
+
assert.ok(
|
|
974
|
+
scorecard.validation.errorSummary.includes('SUMMARY') || scorecard.validation.producer.errorSummary?.includes('SUMMARY'),
|
|
975
|
+
'errorSummary must reference the missing section'
|
|
976
|
+
);
|
|
977
|
+
} finally {
|
|
978
|
+
cleanupTempDir(tmp);
|
|
979
|
+
}
|
|
980
|
+
});
|
|
981
|
+
|
|
982
|
+
test('behavior: outputQuality and qualityReasons persisted to files', () => {
|
|
983
|
+
const tmp = createTempDir();
|
|
984
|
+
try {
|
|
985
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
986
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
987
|
+
|
|
988
|
+
const specPath = createTempSpec(tmp);
|
|
989
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
990
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
991
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
992
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
993
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
994
|
+
|
|
995
|
+
// Write valid reports
|
|
996
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
997
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('APPROVE'));
|
|
998
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('APPROVE'));
|
|
999
|
+
|
|
1000
|
+
decideAndPersist({
|
|
1001
|
+
runDir: tmp,
|
|
1002
|
+
stageName: 'implement',
|
|
1003
|
+
stageDir,
|
|
1004
|
+
decisionPath,
|
|
1005
|
+
scorecardPath,
|
|
1006
|
+
producerPath,
|
|
1007
|
+
reviewerAPath,
|
|
1008
|
+
reviewerBPath,
|
|
1009
|
+
globalReviewerPath: null,
|
|
1010
|
+
state: { ...createMockState(), specPath },
|
|
1011
|
+
});
|
|
1012
|
+
|
|
1013
|
+
// Verify decision.md has Output Quality
|
|
1014
|
+
const decision = fs.readFileSync(decisionPath, 'utf8');
|
|
1015
|
+
assert.ok(/Output Quality:/.test(decision), 'decision.md must have Output Quality field');
|
|
1016
|
+
|
|
1017
|
+
// Verify scorecard.json has outputQuality and qualityReasons
|
|
1018
|
+
const scorecard = JSON.parse(fs.readFileSync(scorecardPath, 'utf8'));
|
|
1019
|
+
assert.ok('outputQuality' in scorecard, 'scorecard must have outputQuality field');
|
|
1020
|
+
assert.ok(Array.isArray(scorecard.qualityReasons), 'scorecard must have qualityReasons array');
|
|
1021
|
+
} finally {
|
|
1022
|
+
cleanupTempDir(tmp);
|
|
1023
|
+
}
|
|
1024
|
+
});
|
|
1025
|
+
|
|
1026
|
+
test('behavior: handoff.json generated when outcome is revise', () => {
|
|
1027
|
+
const tmp = createTempDir();
|
|
1028
|
+
try {
|
|
1029
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
1030
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
1031
|
+
|
|
1032
|
+
const specPath = createTempSpec(tmp);
|
|
1033
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
1034
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
1035
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
1036
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
1037
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
1038
|
+
const handoffPath = path.join(stageDir, 'handoff.json');
|
|
1039
|
+
|
|
1040
|
+
// Write valid producer but reviewers request revise
|
|
1041
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
1042
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('REVISE'));
|
|
1043
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('REVISE'));
|
|
1044
|
+
|
|
1045
|
+
decideAndPersist({
|
|
1046
|
+
runDir: tmp,
|
|
1047
|
+
stageName: 'implement',
|
|
1048
|
+
stageDir,
|
|
1049
|
+
decisionPath,
|
|
1050
|
+
scorecardPath,
|
|
1051
|
+
producerPath,
|
|
1052
|
+
reviewerAPath,
|
|
1053
|
+
reviewerBPath,
|
|
1054
|
+
globalReviewerPath: null,
|
|
1055
|
+
state: { ...createMockState(), specPath },
|
|
1056
|
+
});
|
|
1057
|
+
|
|
1058
|
+
// Verify handoff.json was written
|
|
1059
|
+
assert.ok(fs.existsSync(handoffPath), 'handoff.json should be written when outcome is revise');
|
|
1060
|
+
|
|
1061
|
+
const handoff = JSON.parse(fs.readFileSync(handoffPath, 'utf8'));
|
|
1062
|
+
assert.ok(Array.isArray(handoff.blockers), 'handoff must have blockers array');
|
|
1063
|
+
assert.equal(handoff.stageName, 'implement', 'handoff must have correct stageName');
|
|
1064
|
+
assert.equal(handoff.round, 1, 'handoff must have correct round');
|
|
1065
|
+
assert.ok(handoff.generatedAt, 'handoff must have generatedAt timestamp');
|
|
1066
|
+
} finally {
|
|
1067
|
+
cleanupTempDir(tmp);
|
|
1068
|
+
}
|
|
1069
|
+
});
|
|
1070
|
+
|
|
1071
|
+
test('behavior: handoff.json includes work-unit metadata and compact carry-forward summary', () => {
|
|
1072
|
+
const tmp = createTempDir();
|
|
1073
|
+
try {
|
|
1074
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
1075
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
1076
|
+
|
|
1077
|
+
const specPath = createTempSpec(tmp, 'work-unit-task', {
|
|
1078
|
+
id: 'work-unit-task',
|
|
1079
|
+
title: 'Work unit task',
|
|
1080
|
+
workUnits: {
|
|
1081
|
+
implement: [
|
|
1082
|
+
{
|
|
1083
|
+
workUnitId: 'wu-1',
|
|
1084
|
+
workUnitGoal: 'Tighten continuation context',
|
|
1085
|
+
allowedFiles: ['scripts/ai-sprint-orchestrator/run.mjs'],
|
|
1086
|
+
unitChecks: ['node --test run.test.mjs'],
|
|
1087
|
+
unitDeliverables: ['carry-forward summary'],
|
|
1088
|
+
unitSummary: 'Initial unit summary',
|
|
1089
|
+
carryForwardSummary: 'Initial carry-forward summary',
|
|
1090
|
+
},
|
|
1091
|
+
],
|
|
1092
|
+
},
|
|
1093
|
+
});
|
|
1094
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
1095
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
1096
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
1097
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
1098
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
1099
|
+
const handoffPath = path.join(stageDir, 'handoff.json');
|
|
1100
|
+
|
|
1101
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
1102
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('REVISE'));
|
|
1103
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('REVISE'));
|
|
1104
|
+
|
|
1105
|
+
decideAndPersist({
|
|
1106
|
+
runDir: tmp,
|
|
1107
|
+
stageName: 'implement',
|
|
1108
|
+
stageDir,
|
|
1109
|
+
decisionPath,
|
|
1110
|
+
scorecardPath,
|
|
1111
|
+
producerPath,
|
|
1112
|
+
reviewerAPath,
|
|
1113
|
+
reviewerBPath,
|
|
1114
|
+
globalReviewerPath: null,
|
|
1115
|
+
state: { ...createMockState(), specPath, currentWorkUnitIndex: 0 },
|
|
1116
|
+
});
|
|
1117
|
+
|
|
1118
|
+
const handoff = JSON.parse(fs.readFileSync(handoffPath, 'utf8'));
|
|
1119
|
+
assert.equal(handoff.workUnitId, 'wu-1');
|
|
1120
|
+
assert.equal(handoff.workUnitGoal, 'Tighten continuation context');
|
|
1121
|
+
assert.deepEqual(handoff.allowedFiles, ['scripts/ai-sprint-orchestrator/run.mjs']);
|
|
1122
|
+
assert.ok(typeof handoff.unitSummary === 'string' && handoff.unitSummary.length > 0);
|
|
1123
|
+
assert.ok(typeof handoff.carryForwardSummary === 'string' && handoff.carryForwardSummary.includes('Goal: Tighten continuation context'));
|
|
1124
|
+
} finally {
|
|
1125
|
+
cleanupTempDir(tmp);
|
|
1126
|
+
}
|
|
1127
|
+
});
|
|
1128
|
+
|
|
1129
|
+
test('work-unit contract: getTaskSpec rejects missing minimum work-unit fields', () => {
|
|
1130
|
+
const tmp = createTempDir();
|
|
1131
|
+
try {
|
|
1132
|
+
const specPath = createTempSpec(tmp, 'invalid-work-unit', {
|
|
1133
|
+
id: 'invalid-work-unit',
|
|
1134
|
+
title: 'Invalid work unit',
|
|
1135
|
+
workUnits: {
|
|
1136
|
+
implement: [
|
|
1137
|
+
{
|
|
1138
|
+
workUnitId: 'wu-1',
|
|
1139
|
+
workUnitGoal: 'Incomplete unit',
|
|
1140
|
+
allowedFiles: ['scripts/ai-sprint-orchestrator/run.mjs'],
|
|
1141
|
+
unitChecks: ['node --test run.test.mjs'],
|
|
1142
|
+
unitDeliverables: ['handoff'],
|
|
1143
|
+
unitSummary: '',
|
|
1144
|
+
carryForwardSummary: '',
|
|
1145
|
+
},
|
|
1146
|
+
],
|
|
1147
|
+
},
|
|
1148
|
+
});
|
|
1149
|
+
|
|
1150
|
+
assert.throws(
|
|
1151
|
+
() => getTaskSpec('invalid-work-unit', specPath),
|
|
1152
|
+
/minimum work-unit contract/i,
|
|
1153
|
+
);
|
|
1154
|
+
} finally {
|
|
1155
|
+
cleanupTempDir(tmp);
|
|
1156
|
+
}
|
|
1157
|
+
});
|
|
1158
|
+
|
|
1159
|
+
test('work-unit brief prefers compact carry-forward summary over prior decision text', () => {
|
|
1160
|
+
const spec = {
|
|
1161
|
+
title: 'Workflow v1.4 foundation',
|
|
1162
|
+
stageGoals: { implement: ['Connect work-unit brief'] },
|
|
1163
|
+
stageCriteria: { implement: {} },
|
|
1164
|
+
context: ['Stay within workflow-only scope.'],
|
|
1165
|
+
workUnits: {
|
|
1166
|
+
implement: [
|
|
1167
|
+
{
|
|
1168
|
+
workUnitId: 'wu-brief',
|
|
1169
|
+
workUnitGoal: 'Use compact carry-forward first',
|
|
1170
|
+
allowedFiles: ['scripts/ai-sprint-orchestrator/lib/task-specs.mjs'],
|
|
1171
|
+
unitChecks: ['node --test decision.test.mjs'],
|
|
1172
|
+
unitDeliverables: ['brief update'],
|
|
1173
|
+
unitSummary: 'Default unit summary',
|
|
1174
|
+
carryForwardSummary: 'Default compact carry-forward',
|
|
1175
|
+
},
|
|
1176
|
+
],
|
|
1177
|
+
},
|
|
1178
|
+
};
|
|
1179
|
+
|
|
1180
|
+
const brief = buildStageBrief(
|
|
1181
|
+
spec,
|
|
1182
|
+
'implement',
|
|
1183
|
+
2,
|
|
1184
|
+
'VERY LONG PRIOR DECISION SHOULD NOT BE PRIMARY',
|
|
1185
|
+
{
|
|
1186
|
+
carryForwardSummary: 'Compact carry-forward summary wins.',
|
|
1187
|
+
unitSummary: 'Unit summary from previous unit.',
|
|
1188
|
+
},
|
|
1189
|
+
{ workUnitIndex: 0 },
|
|
1190
|
+
);
|
|
1191
|
+
|
|
1192
|
+
assert.ok(brief.includes('## Active Work Unit'));
|
|
1193
|
+
assert.ok(brief.includes('workUnitId: wu-brief'));
|
|
1194
|
+
assert.ok(brief.includes('Compact carry-forward summary wins.'));
|
|
1195
|
+
assert.ok(brief.includes('Use this compact carry-forward summary as the default continuation context.'));
|
|
1196
|
+
assert.equal(brief.includes('VERY LONG PRIOR DECISION SHOULD NOT BE PRIMARY'), false);
|
|
1197
|
+
});
|
|
1198
|
+
|
|
1199
|
+
test('behavior: handoff.json NOT generated when outcome is advance', () => {
|
|
1200
|
+
const tmp = createTempDir();
|
|
1201
|
+
try {
|
|
1202
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
1203
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
1204
|
+
|
|
1205
|
+
const specPath = createTempSpec(tmp);
|
|
1206
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
1207
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
1208
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
1209
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
1210
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
1211
|
+
const handoffPath = path.join(stageDir, 'handoff.json');
|
|
1212
|
+
|
|
1213
|
+
// Write valid reports with approvals
|
|
1214
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
1215
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('APPROVE'));
|
|
1216
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('APPROVE'));
|
|
1217
|
+
|
|
1218
|
+
decideAndPersist({
|
|
1219
|
+
runDir: tmp,
|
|
1220
|
+
stageName: 'implement',
|
|
1221
|
+
stageDir,
|
|
1222
|
+
decisionPath,
|
|
1223
|
+
scorecardPath,
|
|
1224
|
+
producerPath,
|
|
1225
|
+
reviewerAPath,
|
|
1226
|
+
reviewerBPath,
|
|
1227
|
+
globalReviewerPath: null,
|
|
1228
|
+
state: { ...createMockState(), specPath },
|
|
1229
|
+
});
|
|
1230
|
+
|
|
1231
|
+
// Verify handoff.json was NOT written
|
|
1232
|
+
assert.equal(fs.existsSync(handoffPath), false, 'handoff.json should NOT be written when outcome is advance');
|
|
1233
|
+
} finally {
|
|
1234
|
+
cleanupTempDir(tmp);
|
|
1235
|
+
}
|
|
1236
|
+
});
|
|
1237
|
+
|
|
1238
|
+
test('behavior: formatRoleValidation produces correct output', () => {
|
|
1239
|
+
// Test valid case
|
|
1240
|
+
const validResult = formatRoleValidation('Producer', { valid: true });
|
|
1241
|
+
assert.deepEqual(validResult, ['- Producer: [OK]']);
|
|
1242
|
+
|
|
1243
|
+
// Test invalid case with missing sections
|
|
1244
|
+
const invalidResult = formatRoleValidation('Producer', {
|
|
1245
|
+
valid: false,
|
|
1246
|
+
missingSections: ['SUMMARY', 'CHANGES'],
|
|
1247
|
+
invalidFields: ['CHECKS: invalid format'],
|
|
1248
|
+
});
|
|
1249
|
+
assert.ok(invalidResult.length > 0, 'should produce output for invalid result');
|
|
1250
|
+
assert.ok(invalidResult[0].includes('[FAIL]'), 'should show failure marker');
|
|
1251
|
+
assert.ok(invalidResult[0].includes('missing: SUMMARY, CHANGES'), 'should show missing sections');
|
|
1252
|
+
assert.ok(invalidResult[0].includes('invalid: CHECKS: invalid format'), 'should show invalid fields');
|
|
1253
|
+
});
|
|
1254
|
+
|
|
1255
|
+
test('behavior: decision.md has correct structure with all sections', () => {
|
|
1256
|
+
const tmp = createTempDir();
|
|
1257
|
+
try {
|
|
1258
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
1259
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
1260
|
+
|
|
1261
|
+
const specPath = createTempSpec(tmp);
|
|
1262
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
1263
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
1264
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
1265
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
1266
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
1267
|
+
|
|
1268
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
1269
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('APPROVE'));
|
|
1270
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('APPROVE'));
|
|
1271
|
+
|
|
1272
|
+
decideAndPersist({
|
|
1273
|
+
runDir: tmp,
|
|
1274
|
+
stageName: 'implement',
|
|
1275
|
+
stageDir,
|
|
1276
|
+
decisionPath,
|
|
1277
|
+
scorecardPath,
|
|
1278
|
+
producerPath,
|
|
1279
|
+
reviewerAPath,
|
|
1280
|
+
reviewerBPath,
|
|
1281
|
+
globalReviewerPath: null,
|
|
1282
|
+
state: { ...createMockState(), specPath },
|
|
1283
|
+
});
|
|
1284
|
+
|
|
1285
|
+
const decision = fs.readFileSync(decisionPath, 'utf8');
|
|
1286
|
+
|
|
1287
|
+
// Verify all expected sections exist
|
|
1288
|
+
assert.ok(/^# Decision/m.test(decision), 'must have Decision header');
|
|
1289
|
+
assert.ok(/## Summary/.test(decision), 'must have Summary section');
|
|
1290
|
+
assert.ok(/## Validation/.test(decision), 'must have Validation section');
|
|
1291
|
+
assert.ok(/## Blockers/.test(decision), 'must have Blockers section');
|
|
1292
|
+
assert.ok(/## Metrics/.test(decision), 'must have Metrics section');
|
|
1293
|
+
assert.ok(/## Files/.test(decision), 'must have Files section');
|
|
1294
|
+
} finally {
|
|
1295
|
+
cleanupTempDir(tmp);
|
|
1296
|
+
}
|
|
1297
|
+
});
|
|
1298
|
+
|
|
1299
|
+
test('persistence: nextRunRecommendation written to decision.md', () => {
|
|
1300
|
+
const tmp = createTempDir();
|
|
1301
|
+
try {
|
|
1302
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
1303
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
1304
|
+
|
|
1305
|
+
const specPath = createTempSpec(tmp);
|
|
1306
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
1307
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
1308
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
1309
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
1310
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
1311
|
+
|
|
1312
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
1313
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('REVISE')); // Will produce NEEDS_WORK
|
|
1314
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('APPROVE'));
|
|
1315
|
+
|
|
1316
|
+
decideAndPersist({
|
|
1317
|
+
runDir: tmp,
|
|
1318
|
+
stageName: 'implement',
|
|
1319
|
+
stageDir,
|
|
1320
|
+
decisionPath,
|
|
1321
|
+
scorecardPath,
|
|
1322
|
+
producerPath,
|
|
1323
|
+
reviewerAPath,
|
|
1324
|
+
reviewerBPath,
|
|
1325
|
+
globalReviewerPath: null,
|
|
1326
|
+
state: { ...createMockState(), specPath },
|
|
1327
|
+
});
|
|
1328
|
+
|
|
1329
|
+
const decision = fs.readFileSync(decisionPath, 'utf8');
|
|
1330
|
+
// For NEEDS_WORK outcome, nextRunRecommendation should be present
|
|
1331
|
+
assert.ok(/## Next Run Recommendation/.test(decision), 'must have Next Run Recommendation section');
|
|
1332
|
+
assert.ok(/Type:/.test(decision), 'must have recommendation type');
|
|
1333
|
+
} finally {
|
|
1334
|
+
cleanupTempDir(tmp);
|
|
1335
|
+
}
|
|
1336
|
+
});
|
|
1337
|
+
|
|
1338
|
+
test('persistence: nextRunRecommendation written to scorecard.json', () => {
|
|
1339
|
+
const tmp = createTempDir();
|
|
1340
|
+
try {
|
|
1341
|
+
const stageDir = path.join(tmp, 'stages', 'implement');
|
|
1342
|
+
fs.mkdirSync(stageDir, { recursive: true });
|
|
1343
|
+
|
|
1344
|
+
const specPath = createTempSpec(tmp);
|
|
1345
|
+
const producerPath = path.join(stageDir, 'producer.md');
|
|
1346
|
+
const reviewerAPath = path.join(stageDir, 'reviewer-a.md');
|
|
1347
|
+
const reviewerBPath = path.join(stageDir, 'reviewer-b.md');
|
|
1348
|
+
const decisionPath = path.join(stageDir, 'decision.md');
|
|
1349
|
+
const scorecardPath = path.join(stageDir, 'scorecard.json');
|
|
1350
|
+
|
|
1351
|
+
fs.writeFileSync(producerPath, createValidProducer());
|
|
1352
|
+
fs.writeFileSync(reviewerAPath, createValidReviewer('REVISE'));
|
|
1353
|
+
fs.writeFileSync(reviewerBPath, createValidReviewer('APPROVE'));
|
|
1354
|
+
|
|
1355
|
+
decideAndPersist({
|
|
1356
|
+
runDir: tmp,
|
|
1357
|
+
stageName: 'implement',
|
|
1358
|
+
stageDir,
|
|
1359
|
+
decisionPath,
|
|
1360
|
+
scorecardPath,
|
|
1361
|
+
producerPath,
|
|
1362
|
+
reviewerAPath,
|
|
1363
|
+
reviewerBPath,
|
|
1364
|
+
globalReviewerPath: null,
|
|
1365
|
+
state: { ...createMockState(), specPath },
|
|
1366
|
+
});
|
|
1367
|
+
|
|
1368
|
+
const scorecard = JSON.parse(fs.readFileSync(scorecardPath, 'utf8'));
|
|
1369
|
+
assert.ok(scorecard.nextRunRecommendation !== undefined, 'scorecard must have nextRunRecommendation field');
|
|
1370
|
+
assert.ok(scorecard.nextRunRecommendation !== null, 'nextRunRecommendation should not be null');
|
|
1371
|
+
assert.ok(scorecard.nextRunRecommendation.type, 'nextRunRecommendation must have type');
|
|
1372
|
+
assert.ok(Array.isArray(scorecard.nextRunRecommendation.reasons), 'nextRunRecommendation must have reasons array');
|
|
1373
|
+
} finally {
|
|
1374
|
+
cleanupTempDir(tmp);
|
|
1375
|
+
}
|
|
1376
|
+
});
|
|
1377
|
+
|
|
1378
|
+
test('acceptance checklist: file exists and has correct content', () => {
|
|
1379
|
+
const checklistPath = path.resolve(__dirname, '..', 'references', 'workflow-v1-acceptance-checklist.md');
|
|
1380
|
+
assert.ok(fs.existsSync(checklistPath), 'acceptance checklist file must exist');
|
|
1381
|
+
|
|
1382
|
+
const content = fs.readFileSync(checklistPath, 'utf8');
|
|
1383
|
+
// Must not contain the deprecated --spec flag (replaced by --task / --task-spec)
|
|
1384
|
+
assert.ok(!content.includes('--spec '), 'must NOT use deprecated --spec flag');
|
|
1385
|
+
// Must contain the correct command patterns
|
|
1386
|
+
assert.ok(content.includes('--task'), 'must use --task flag');
|
|
1387
|
+
// Must have run result recording structure
|
|
1388
|
+
assert.ok(content.includes('runId') || content.includes('run-id') || content.includes('Run ID'), 'must have run-id field');
|
|
1389
|
+
assert.ok(content.includes('outputQuality'), 'must mention outputQuality');
|
|
1390
|
+
assert.ok(content.includes('nextRunRecommendation'), 'must mention nextRunRecommendation');
|
|
1391
|
+
// Must have complete failure classification
|
|
1392
|
+
assert.ok(content.includes('workflow bug'), 'must classify workflow bug');
|
|
1393
|
+
assert.ok(content.includes('agent behavior'), 'must classify agent behavior');
|
|
1394
|
+
assert.ok(content.includes('environment'), 'must classify environment issues');
|
|
1395
|
+
assert.ok(content.includes('sample-spec') || content.includes('spec issue'), 'must classify spec issues');
|
|
1396
|
+
});
|
|
1397
|
+
|
|
1398
|
+
test('preflight check validates acpx not agent names', () => {
|
|
1399
|
+
// The preflight code in run.mjs should check that acpx is available,
|
|
1400
|
+
// NOT that agent names like "iflow" or "claude" exist as shell binaries.
|
|
1401
|
+
const runPath = path.resolve(__dirname, '..', 'scripts', 'run.mjs');
|
|
1402
|
+
const content = fs.readFileSync(runPath, 'utf8');
|
|
1403
|
+
// Should NOT use "which" with agent names
|
|
1404
|
+
assert.ok(!content.includes("'which', [agentName]"), 'preflight must not check agent names with which');
|
|
1405
|
+
assert.ok(!content.includes("'which', [agent"), 'preflight must not check agent binary with which');
|
|
1406
|
+
// Should check acpx availability instead
|
|
1407
|
+
assert.ok(content.includes('acpx'), 'preflight should reference acpx');
|
|
1408
|
+
});
|
|
1409
|
+
|
|
1410
|
+
test('cleanupAcpxOrphans does not fallback to spec directory', () => {
|
|
1411
|
+
const runPath = path.resolve(__dirname, '..', 'scripts', 'run.mjs');
|
|
1412
|
+
const content = fs.readFileSync(runPath, 'utf8');
|
|
1413
|
+
// Must NOT use path.dirname(state.specPath) as workspace fallback
|
|
1414
|
+
assert.ok(!content.includes('path.dirname(state.specPath)'),
|
|
1415
|
+
'cleanupAcpxOrphans must not fallback to spec file directory');
|
|
1416
|
+
// Must use workspace or worktree only
|
|
1417
|
+
assert.ok(content.includes('spec.workspace') || content.includes('spec?.workspace'),
|
|
1418
|
+
'cleanupAcpxOrphans should reference spec.workspace');
|
|
1419
|
+
});
|