cclaw-cli 6.13.0 → 6.13.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/artifact-linter/tdd.d.ts +19 -6
- package/dist/artifact-linter/tdd.js +148 -64
- package/dist/content/hooks.js +40 -0
- package/dist/content/skills.js +15 -12
- package/dist/content/stages/tdd.js +7 -7
- package/dist/content/start-command.js +6 -3
- package/dist/delegation.d.ts +9 -0
- package/dist/delegation.js +49 -1
- package/dist/install.js +27 -2
- package/dist/internal/plan-split-waves.d.ts +46 -0
- package/dist/internal/plan-split-waves.js +225 -6
- package/package.json +1 -1
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { DelegationEntry } from "../delegation.js";
|
|
1
|
+
import type { DelegationEntry, DelegationEvent } from "../delegation.js";
|
|
2
2
|
import { type LintFinding, type StageLintContext } from "./shared.js";
|
|
3
3
|
/**
|
|
4
4
|
* v6.11.0 — TDD stage linter.
|
|
@@ -52,15 +52,28 @@ interface RedCheckpointResult {
|
|
|
52
52
|
ok: boolean;
|
|
53
53
|
details: string;
|
|
54
54
|
}
|
|
55
|
+
/**
|
|
56
|
+
* v6.13.1 — detect single-slice dispatch when the merged wave plan
|
|
57
|
+
* requires parallel ready slice-implementer fan-out.
|
|
58
|
+
*/
|
|
59
|
+
export declare function evaluateWavePlanDispatchIgnored(params: {
|
|
60
|
+
artifactsDir: string;
|
|
61
|
+
planMarkdown: string;
|
|
62
|
+
runEvents: DelegationEvent[];
|
|
63
|
+
runId: string;
|
|
64
|
+
slices: Map<string, DelegationEntry[]>;
|
|
65
|
+
legacyContinuation: boolean;
|
|
66
|
+
}): Promise<LintFinding | null>;
|
|
55
67
|
/**
|
|
56
68
|
* v6.12.0 Phase W — RED checkpoint enforcement. The wave protocol
|
|
57
69
|
* requires ALL Phase A REDs to land before ANY Phase B GREEN starts.
|
|
58
70
|
* The rule is enforced on a per-wave basis, where a wave is defined by
|
|
59
|
-
*
|
|
60
|
-
*
|
|
61
|
-
*
|
|
62
|
-
* `phase=red` events with no other-phase
|
|
63
|
-
* fires only when the implicit wave has
|
|
71
|
+
* the managed `## Parallel Execution Plan` block in `05-plan.md` and/or
|
|
72
|
+
* `<artifacts-dir>/wave-plans/wave-NN.md` files. When no wave manifest
|
|
73
|
+
* exists, the linter falls back to a conservative implicit detection: a
|
|
74
|
+
* wave is a contiguous run of `phase=red` events with no other-phase
|
|
75
|
+
* events between them; the rule fires only when the implicit wave has
|
|
76
|
+
* 2+ members.
|
|
64
77
|
*
|
|
65
78
|
* @param waveMembers Optional explicit wave manifest. Map key is wave
|
|
66
79
|
* name (e.g. `"W-01"`); value is the set of slice ids in that wave.
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import fs from "node:fs/promises";
|
|
2
2
|
import path from "node:path";
|
|
3
|
-
import { readDelegationLedger, readDelegationEvents } from "../delegation.js";
|
|
3
|
+
import { loadTddReadySlicePool, readDelegationLedger, readDelegationEvents, selectReadySlices } from "../delegation.js";
|
|
4
|
+
import { mergeParallelWaveDefinitions, parseParallelExecutionPlanWaves, parseWavePlanDirectory } from "../internal/plan-split-waves.js";
|
|
4
5
|
import { evaluateInvestigationTrace, sectionBodyByName } from "./shared.js";
|
|
5
6
|
const SLICE_SUMMARY_START = "<!-- auto-start: tdd-slice-summary -->";
|
|
6
7
|
const SLICE_SUMMARY_END = "<!-- auto-end: tdd-slice-summary -->";
|
|
@@ -26,8 +27,17 @@ const SLICES_INDEX_END = "<!-- auto-end: slices-index -->";
|
|
|
26
27
|
* via `## Slices Index`.
|
|
27
28
|
*/
|
|
28
29
|
export async function lintTddStage(ctx) {
|
|
29
|
-
const { projectRoot, discoveryMode, raw, absFile, sections, findings, parsedFrontmatter, worktreeExecutionMode } = ctx;
|
|
30
|
+
const { projectRoot, discoveryMode, raw, absFile, sections, findings, parsedFrontmatter, worktreeExecutionMode, legacyContinuation } = ctx;
|
|
30
31
|
void parsedFrontmatter;
|
|
32
|
+
const artifactsDir = path.dirname(absFile);
|
|
33
|
+
const planPath = path.join(artifactsDir, "05-plan.md");
|
|
34
|
+
let planRaw = "";
|
|
35
|
+
try {
|
|
36
|
+
planRaw = await fs.readFile(planPath, "utf8");
|
|
37
|
+
}
|
|
38
|
+
catch {
|
|
39
|
+
planRaw = "";
|
|
40
|
+
}
|
|
31
41
|
evaluateInvestigationTrace(ctx, "Watched-RED Proof");
|
|
32
42
|
const delegationLedger = await readDelegationLedger(ctx.projectRoot);
|
|
33
43
|
const activeRunEntries = delegationLedger.entries.filter((entry) => entry.stage === "tdd" && entry.runId === delegationLedger.runId);
|
|
@@ -176,7 +186,7 @@ export async function lintTddStage(ctx) {
|
|
|
176
186
|
// (size >= 2). Sequential per-slice runs (red→green→refactor in a
|
|
177
187
|
// tight loop) form size-1 implicit waves and are unaffected.
|
|
178
188
|
if (eventsActive) {
|
|
179
|
-
const waveManifest = await
|
|
189
|
+
const waveManifest = await readMergedWaveManifestForCheckpoint(artifactsDir, planRaw);
|
|
180
190
|
const checkpointResult = evaluateRedCheckpoint(slicesByEvents, waveManifest);
|
|
181
191
|
if (!checkpointResult.ok) {
|
|
182
192
|
findings.push({
|
|
@@ -203,52 +213,69 @@ export async function lintTddStage(ctx) {
|
|
|
203
213
|
}
|
|
204
214
|
const { events: jsonlEvents, fanInAudits } = await readDelegationEvents(projectRoot);
|
|
205
215
|
const runEvents = jsonlEvents.filter((e) => e.runId === delegationLedger.runId);
|
|
216
|
+
if (eventsActive && planRaw.length > 0) {
|
|
217
|
+
const ignoredWave = await evaluateWavePlanDispatchIgnored({
|
|
218
|
+
artifactsDir,
|
|
219
|
+
planMarkdown: planRaw,
|
|
220
|
+
runEvents,
|
|
221
|
+
runId: delegationLedger.runId,
|
|
222
|
+
slices: slicesByEvents,
|
|
223
|
+
legacyContinuation
|
|
224
|
+
});
|
|
225
|
+
if (ignoredWave) {
|
|
226
|
+
findings.push(ignoredWave);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
206
229
|
if (eventsActive && worktreeExecutionMode === "worktree-first") {
|
|
207
230
|
const terminalPhases = new Set([
|
|
208
|
-
"green",
|
|
209
231
|
"refactor",
|
|
210
232
|
"refactor-deferred",
|
|
211
233
|
"resolve-conflict"
|
|
212
234
|
]);
|
|
213
|
-
const
|
|
235
|
+
const missingGreenMeta = new Set();
|
|
214
236
|
for (const ev of runEvents) {
|
|
215
237
|
if (ev.stage !== "tdd" || ev.agent !== "slice-implementer")
|
|
216
238
|
continue;
|
|
217
|
-
if (ev.status !== "completed"
|
|
239
|
+
if (ev.status !== "completed" || ev.phase !== "green")
|
|
218
240
|
continue;
|
|
219
|
-
if (
|
|
241
|
+
if (typeof ev.sliceId !== "string")
|
|
220
242
|
continue;
|
|
221
243
|
const tok = ev.claimToken?.trim() ?? "";
|
|
222
|
-
|
|
223
|
-
|
|
244
|
+
const lane = ev.ownerLaneId?.trim() ?? "";
|
|
245
|
+
const lease = ev.leasedUntil?.trim() ?? "";
|
|
246
|
+
if (tok.length === 0 || lane.length === 0 || lease.length === 0) {
|
|
247
|
+
missingGreenMeta.add(ev.sliceId);
|
|
224
248
|
}
|
|
225
249
|
}
|
|
226
|
-
if (
|
|
250
|
+
if (missingGreenMeta.size > 0) {
|
|
227
251
|
findings.push({
|
|
228
|
-
section: "
|
|
252
|
+
section: "tdd_slice_lane_metadata_missing",
|
|
229
253
|
required: true,
|
|
230
|
-
rule: "Worktree-first:
|
|
254
|
+
rule: "Worktree-first: every completed slice-implementer phase=green row must record claimToken, ownerLaneId (--lane-id), and leasedUntil (--lease-until).",
|
|
231
255
|
found: false,
|
|
232
|
-
details: `Slices missing
|
|
256
|
+
details: `Slices missing one or more lane fields on GREEN: ${[...missingGreenMeta].sort().join(", ")}. Remediation: include --claim-token, --lane-id, and --lease-until on every slice-implementer --phase green delegation-record write (schedule through completion); the hook fails fast with dispatch_lane_metadata_missing when they are omitted.`
|
|
233
257
|
});
|
|
234
258
|
}
|
|
235
|
-
const
|
|
259
|
+
const missingClaim = new Set();
|
|
236
260
|
for (const ev of runEvents) {
|
|
237
261
|
if (ev.stage !== "tdd" || ev.agent !== "slice-implementer")
|
|
238
262
|
continue;
|
|
239
|
-
if (ev.status !== "completed"
|
|
263
|
+
if (ev.status !== "completed" && ev.status !== "failed")
|
|
240
264
|
continue;
|
|
241
|
-
if (!ev.
|
|
242
|
-
|
|
265
|
+
if (!ev.phase || !terminalPhases.has(ev.phase))
|
|
266
|
+
continue;
|
|
267
|
+
const tok = ev.claimToken?.trim() ?? "";
|
|
268
|
+
if (tok.length === 0 && typeof ev.sliceId === "string") {
|
|
269
|
+
missingClaim.add(ev.sliceId);
|
|
243
270
|
}
|
|
244
271
|
}
|
|
245
|
-
if (
|
|
272
|
+
if (missingClaim.size > 0) {
|
|
246
273
|
findings.push({
|
|
247
|
-
section: "
|
|
274
|
+
section: "tdd_slice_claim_token_missing",
|
|
248
275
|
required: true,
|
|
249
|
-
rule: "Worktree-first:
|
|
276
|
+
rule: "Worktree-first: terminal slice-implementer rows (refactor / refactor-deferred / resolve-conflict) must echo --claim-token. Remediation: pass the same --claim-token used on the scheduled row for every completed/failed terminal phase.",
|
|
250
277
|
found: false,
|
|
251
|
-
details: `Slices missing
|
|
278
|
+
details: `Slices missing claim token on non-GREEN terminal rows: ${[...missingClaim].join(", ")}.`
|
|
252
279
|
});
|
|
253
280
|
}
|
|
254
281
|
const conflictSlices = [
|
|
@@ -338,7 +365,6 @@ export async function lintTddStage(ctx) {
|
|
|
338
365
|
const completedSliceImplementers = activeRunEntries.filter((entry) => entry.agent === "slice-implementer" && entry.status === "completed");
|
|
339
366
|
const fanOutDetected = completedSliceImplementers.length > 1;
|
|
340
367
|
if (fanOutDetected) {
|
|
341
|
-
const artifactsDir = path.dirname(absFile);
|
|
342
368
|
const cohesionContractMarkdownPath = path.join(artifactsDir, "cohesion-contract.md");
|
|
343
369
|
const cohesionContractJsonPath = path.join(artifactsDir, "cohesion-contract.json");
|
|
344
370
|
let cohesionContractFound = true;
|
|
@@ -418,7 +444,6 @@ export async function lintTddStage(ctx) {
|
|
|
418
444
|
// Phase S — sharded slice files. Validate per-slice file presence
|
|
419
445
|
// and required headings. `tdd-slices/` is optional; missing folder
|
|
420
446
|
// simply means main-only mode (legacy fallback).
|
|
421
|
-
const artifactsDir = path.dirname(absFile);
|
|
422
447
|
const slicesDir = path.join(artifactsDir, "tdd-slices");
|
|
423
448
|
const sliceFiles = await listSliceFiles(slicesDir);
|
|
424
449
|
for (const sliceFile of sliceFiles) {
|
|
@@ -694,15 +719,110 @@ export function evaluateSliceImplementerCoverage(slices) {
|
|
|
694
719
|
}
|
|
695
720
|
return { missing };
|
|
696
721
|
}
|
|
722
|
+
async function readMergedWaveManifestForCheckpoint(artifactsDir, planMarkdown) {
|
|
723
|
+
try {
|
|
724
|
+
const merged = mergeParallelWaveDefinitions(parseParallelExecutionPlanWaves(planMarkdown), await parseWavePlanDirectory(artifactsDir));
|
|
725
|
+
if (merged.length === 0)
|
|
726
|
+
return null;
|
|
727
|
+
const map = new Map();
|
|
728
|
+
for (const w of merged) {
|
|
729
|
+
map.set(w.waveId, new Set(w.members.map((m) => m.sliceId)));
|
|
730
|
+
}
|
|
731
|
+
return map.size > 0 ? map : null;
|
|
732
|
+
}
|
|
733
|
+
catch {
|
|
734
|
+
return null;
|
|
735
|
+
}
|
|
736
|
+
}
|
|
737
|
+
function sliceRefactorTerminal(sliceId, slices) {
|
|
738
|
+
const rows = slices.get(sliceId);
|
|
739
|
+
if (!rows)
|
|
740
|
+
return false;
|
|
741
|
+
return rows.some((e) => e.agent === "slice-implementer" &&
|
|
742
|
+
(e.phase === "refactor" || e.phase === "refactor-deferred") &&
|
|
743
|
+
(e.status === "completed" || e.status === "failed"));
|
|
744
|
+
}
|
|
745
|
+
/**
|
|
746
|
+
* v6.13.1 — detect single-slice dispatch when the merged wave plan
|
|
747
|
+
* requires parallel ready slice-implementer fan-out.
|
|
748
|
+
*/
|
|
749
|
+
export async function evaluateWavePlanDispatchIgnored(params) {
|
|
750
|
+
let merged;
|
|
751
|
+
try {
|
|
752
|
+
merged = mergeParallelWaveDefinitions(parseParallelExecutionPlanWaves(params.planMarkdown), await parseWavePlanDirectory(params.artifactsDir));
|
|
753
|
+
}
|
|
754
|
+
catch {
|
|
755
|
+
return null;
|
|
756
|
+
}
|
|
757
|
+
if (merged.length === 0)
|
|
758
|
+
return null;
|
|
759
|
+
let pool;
|
|
760
|
+
try {
|
|
761
|
+
pool = await loadTddReadySlicePool(params.planMarkdown, params.artifactsDir, {
|
|
762
|
+
legacyParallelDefaultSerial: params.legacyContinuation
|
|
763
|
+
});
|
|
764
|
+
}
|
|
765
|
+
catch {
|
|
766
|
+
return null;
|
|
767
|
+
}
|
|
768
|
+
if (pool.length === 0)
|
|
769
|
+
return null;
|
|
770
|
+
const completedUnitIds = new Set();
|
|
771
|
+
for (const u of pool) {
|
|
772
|
+
if (sliceRefactorTerminal(u.sliceId, params.slices)) {
|
|
773
|
+
completedUnitIds.add(u.unitId);
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
const scoped = params.runEvents.filter((e) => e.runId === params.runId);
|
|
777
|
+
const tail = scoped.slice(-20);
|
|
778
|
+
const implInTail = new Set();
|
|
779
|
+
for (const e of tail) {
|
|
780
|
+
if (e.agent === "slice-implementer" && typeof e.sliceId === "string" && e.sliceId.length > 0) {
|
|
781
|
+
implInTail.add(e.sliceId);
|
|
782
|
+
}
|
|
783
|
+
}
|
|
784
|
+
if (implInTail.size !== 1)
|
|
785
|
+
return null;
|
|
786
|
+
for (const wave of merged) {
|
|
787
|
+
const waveSliceSet = new Set(wave.members.map((m) => m.sliceId));
|
|
788
|
+
const wavePool = pool.filter((u) => waveSliceSet.has(u.sliceId));
|
|
789
|
+
if (wavePool.length < 2)
|
|
790
|
+
continue;
|
|
791
|
+
const waveIncomplete = wave.members.some((m) => !sliceRefactorTerminal(m.sliceId, params.slices));
|
|
792
|
+
if (!waveIncomplete)
|
|
793
|
+
continue;
|
|
794
|
+
const ready = selectReadySlices(wavePool, {
|
|
795
|
+
cap: Math.max(32, wavePool.length),
|
|
796
|
+
completedUnitIds,
|
|
797
|
+
activePathHolders: [],
|
|
798
|
+
legacyContinuation: params.legacyContinuation
|
|
799
|
+
});
|
|
800
|
+
if (ready.length < 2)
|
|
801
|
+
continue;
|
|
802
|
+
const only = [...implInTail][0];
|
|
803
|
+
const missed = ready.map((r) => r.sliceId).filter((s) => s !== only);
|
|
804
|
+
if (missed.length === 0)
|
|
805
|
+
continue;
|
|
806
|
+
return {
|
|
807
|
+
section: "tdd_wave_plan_ignored",
|
|
808
|
+
required: true,
|
|
809
|
+
rule: "When the Parallel Execution Plan (or wave-plans/) defines an open wave with two or more ready parallelizable slices, the controller must fan out slice-implementer work for each ready slice instead of serializing to one slice only.",
|
|
810
|
+
found: false,
|
|
811
|
+
details: `Wave ${wave.waveId}: scheduler-ready members ${ready.map((r) => r.sliceId).join(", ")}; last 20 delegation events show slice-implementer only for ${only}. Missed parallel dispatch: ${missed.join(", ")}. Remediation: load \`05-plan.md\` (Parallel Execution Plan) and \`wave-plans/\` before routing, launch the wave (AskQuestion only when waveCount>=2 and single-slice is a real alternative), then dispatch GREEN+DOC for every ready slice with mandatory worktree-first flags on GREEN.`
|
|
812
|
+
};
|
|
813
|
+
}
|
|
814
|
+
return null;
|
|
815
|
+
}
|
|
697
816
|
/**
|
|
698
817
|
* v6.12.0 Phase W — RED checkpoint enforcement. The wave protocol
|
|
699
818
|
* requires ALL Phase A REDs to land before ANY Phase B GREEN starts.
|
|
700
819
|
* The rule is enforced on a per-wave basis, where a wave is defined by
|
|
701
|
-
*
|
|
702
|
-
*
|
|
703
|
-
*
|
|
704
|
-
* `phase=red` events with no other-phase
|
|
705
|
-
* fires only when the implicit wave has
|
|
820
|
+
* the managed `## Parallel Execution Plan` block in `05-plan.md` and/or
|
|
821
|
+
* `<artifacts-dir>/wave-plans/wave-NN.md` files. When no wave manifest
|
|
822
|
+
* exists, the linter falls back to a conservative implicit detection: a
|
|
823
|
+
* wave is a contiguous run of `phase=red` events with no other-phase
|
|
824
|
+
* events between them; the rule fires only when the implicit wave has
|
|
825
|
+
* 2+ members.
|
|
706
826
|
*
|
|
707
827
|
* @param waveMembers Optional explicit wave manifest. Map key is wave
|
|
708
828
|
* name (e.g. `"W-01"`); value is the set of slice ids in that wave.
|
|
@@ -783,42 +903,6 @@ export function evaluateRedCheckpoint(slices, waveMembers = null) {
|
|
|
783
903
|
"Dispatch ALL Phase A test-author --phase red calls in one message, verify every phase=red event lands with non-empty evidenceRefs, and only then dispatch Phase B slice-implementer --phase green + slice-documenter --phase doc fan-out."
|
|
784
904
|
};
|
|
785
905
|
}
|
|
786
|
-
/**
|
|
787
|
-
* Read explicit wave manifest from `<artifacts-dir>/wave-plans/wave-NN.md`
|
|
788
|
-
* files. Returns a map from wave name to the set of slice ids it
|
|
789
|
-
* contains. Slice ids are extracted via `S-<digits>` regex matches in
|
|
790
|
-
* each wave file. Returns null when no wave files exist or all are
|
|
791
|
-
* empty/unparseable.
|
|
792
|
-
*/
|
|
793
|
-
async function readWaveManifest(artifactsDir) {
|
|
794
|
-
const wavePlansDir = path.join(artifactsDir, "wave-plans");
|
|
795
|
-
let entries = [];
|
|
796
|
-
try {
|
|
797
|
-
entries = await fs.readdir(wavePlansDir);
|
|
798
|
-
}
|
|
799
|
-
catch {
|
|
800
|
-
return null;
|
|
801
|
-
}
|
|
802
|
-
const waves = new Map();
|
|
803
|
-
for (const name of entries) {
|
|
804
|
-
const match = /^wave-(\d+)\.md$/u.exec(name);
|
|
805
|
-
if (!match)
|
|
806
|
-
continue;
|
|
807
|
-
const wavePath = path.join(wavePlansDir, name);
|
|
808
|
-
let body = "";
|
|
809
|
-
try {
|
|
810
|
-
body = await fs.readFile(wavePath, "utf8");
|
|
811
|
-
}
|
|
812
|
-
catch {
|
|
813
|
-
continue;
|
|
814
|
-
}
|
|
815
|
-
const ids = extractSliceIdsFromBody(body);
|
|
816
|
-
if (ids.length === 0)
|
|
817
|
-
continue;
|
|
818
|
-
waves.set(`W-${match[1]}`, new Set(ids));
|
|
819
|
-
}
|
|
820
|
-
return waves.size > 0 ? waves : null;
|
|
821
|
-
}
|
|
822
906
|
const LEGACY_PER_SLICE_SECTIONS = [
|
|
823
907
|
"Test Discovery",
|
|
824
908
|
"RED Evidence",
|
package/dist/content/hooks.js
CHANGED
|
@@ -294,6 +294,19 @@ async function readRunId(root) {
|
|
|
294
294
|
}
|
|
295
295
|
}
|
|
296
296
|
|
|
297
|
+
async function readWorktreeExecutionModeInline(root) {
|
|
298
|
+
try {
|
|
299
|
+
const raw = await fs.readFile(path.join(root, RUNTIME_ROOT, "state", "flow-state.json"), "utf8");
|
|
300
|
+
const parsed = JSON.parse(raw);
|
|
301
|
+
if (parsed && parsed.worktreeExecutionMode === "worktree-first") {
|
|
302
|
+
return "worktree-first";
|
|
303
|
+
}
|
|
304
|
+
return "single-tree";
|
|
305
|
+
} catch {
|
|
306
|
+
return "single-tree";
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
|
|
297
310
|
async function readDelegationEvents(root) {
|
|
298
311
|
try {
|
|
299
312
|
const raw = await fs.readFile(path.join(root, RUNTIME_ROOT, "state", "delegation-events.jsonl"), "utf8");
|
|
@@ -1265,6 +1278,33 @@ async function main() {
|
|
|
1265
1278
|
}
|
|
1266
1279
|
}
|
|
1267
1280
|
|
|
1281
|
+
if (
|
|
1282
|
+
clean.stage === "tdd" &&
|
|
1283
|
+
clean.agent === "slice-implementer" &&
|
|
1284
|
+
clean.phase === "green" &&
|
|
1285
|
+
(await readWorktreeExecutionModeInline(root)) === "worktree-first"
|
|
1286
|
+
) {
|
|
1287
|
+
const tok = typeof clean.claimToken === "string" ? clean.claimToken.trim() : "";
|
|
1288
|
+
const lane = typeof clean.ownerLaneId === "string" ? clean.ownerLaneId.trim() : "";
|
|
1289
|
+
const lease = typeof clean.leasedUntil === "string" ? clean.leasedUntil.trim() : "";
|
|
1290
|
+
if (tok.length === 0 || lane.length === 0 || lease.length === 0) {
|
|
1291
|
+
const missing = [];
|
|
1292
|
+
if (tok.length === 0) missing.push("--claim-token");
|
|
1293
|
+
if (lane.length === 0) missing.push("--lane-id");
|
|
1294
|
+
if (lease.length === 0) missing.push("--lease-until");
|
|
1295
|
+
emitErrorJson(
|
|
1296
|
+
"dispatch_lane_metadata_missing",
|
|
1297
|
+
{
|
|
1298
|
+
missing,
|
|
1299
|
+
remediation:
|
|
1300
|
+
"worktree-first mode requires --claim-token, --lane-id, and --lease-until on every slice-implementer --phase green delegation-record write (from scheduled through completed)."
|
|
1301
|
+
},
|
|
1302
|
+
json
|
|
1303
|
+
);
|
|
1304
|
+
return;
|
|
1305
|
+
}
|
|
1306
|
+
}
|
|
1307
|
+
|
|
1268
1308
|
await persistEntry(root, runId, clean, event);
|
|
1269
1309
|
process.stdout.write(JSON.stringify({ ok: true, event }, null, 2) + "\\n");
|
|
1270
1310
|
}
|
package/dist/content/skills.js
CHANGED
|
@@ -4,6 +4,7 @@ import { FLOW_STAGES } from "../types.js";
|
|
|
4
4
|
import { behaviorAnchorFor, stageExamples } from "./examples.js";
|
|
5
5
|
import { INVESTIGATION_DISCIPLINE_BLOCK } from "./templates.js";
|
|
6
6
|
import { reviewStackAwareRoutes, reviewStackAwareRoutingSummary, stageAutoSubagentDispatch, stageSchema, stageTrackRenderContext } from "./stage-schema.js";
|
|
7
|
+
import { renderTrackTerminology } from "./track-render-context.js";
|
|
7
8
|
import { referencePatternsForStage } from "./reference-patterns.js";
|
|
8
9
|
import { harnessDelegationRecipes } from "../harness-adapters.js";
|
|
9
10
|
const VERIFICATION_STAGES = ["tdd", "review", "ship"];
|
|
@@ -196,16 +197,20 @@ ONE slice = THREE dispatches, in this order. Do not skip, do not collapse.
|
|
|
196
197
|
The file-overlap scheduler auto-allows parallel dispatch because \`claimedPaths\` are disjoint. Fire BOTH calls in the same message — never serialize independent work.
|
|
197
198
|
4. **REFACTOR** — \`Task("slice-implementer --slice S-<id> --phase refactor")\` OR \`--phase refactor-deferred --refactor-rationale '<why>'\`.
|
|
198
199
|
|
|
200
|
+
**Rule 1 (v6.13.1):** Before any slice-routing question, read \`<artifacts-dir>/05-plan.md\` (managed \`## Parallel Execution Plan\`) **and** list \`<artifacts-dir>/wave-plans/wave-NN.md\`. Merge mentally: Parallel Execution Plan first, wave files second; duplicate slices with conflicting wave membership are invalid. If the merged plan shows a wave with **two or more** scheduler-ready slices, issue **exactly one** \`AskQuestion\`: \`Launch wave W-NN with N parallel lanes (S-a, S-b, ...)?\` with default option **launch wave** and alternate **single-slice instead**. Do not ask "which slice next?" when that question is redundant (single ready slice or no wave). After **launch wave** confirmation, execute RED checkpoint → parallel GREEN+DOC → per-lane REFACTOR without further routing asks. After **single-slice instead**, fall back to the legacy single-slice ritual. **Wave dispatch resume:** if part of the wave is already done, parallelize only the remaining members.
|
|
201
|
+
|
|
199
202
|
**FORBIDDEN:**
|
|
200
203
|
- Controller writing GREEN production code. ALL GREEN goes through \`slice-implementer\` — linter rule \`tdd_slice_implementer_missing\` blocks the gate.
|
|
201
204
|
- Controller writing per-slice prose into legacy \`06-tdd.md\` sections (Test Discovery / RED Evidence / GREEN Evidence / Watched-RED Proof / Vertical Slice Cycle / Per-Slice Review / Failure Analysis / Acceptance Mapping). \`slice-documenter\` owns \`tdd-slices/S-<id>.md\` — \`tdd_slice_documenter_missing\` blocks the gate.
|
|
202
205
|
- Hand-editing auto-render blocks between \`auto-start: tdd-slice-summary\` / \`auto-start: slices-index\` markers — overwritten every lint.
|
|
203
206
|
|
|
204
|
-
Delegation-record signature
|
|
207
|
+
Delegation-record signature (extend with lane metadata for every GREEN row in \`worktree-first\`):
|
|
208
|
+
|
|
209
|
+
\`node .cclaw/hooks/delegation-record.mjs --stage=tdd --agent=slice-implementer --mode=mandatory --status=scheduled --span-id=<id> --dispatch-id=<id> --dispatch-surface=<surface> --agent-definition-path=<path> --slice=S-1 --phase=green --paths=src/a.ts --claim-token=<opaque> --lane-id=<lane> --lease-until=<iso8601> --json\`
|
|
205
210
|
|
|
206
|
-
## Wave Batch Mode (v6.
|
|
211
|
+
## Wave Batch Mode (v6.13.1+)
|
|
207
212
|
|
|
208
|
-
|
|
213
|
+
**Triggers:** managed \`## Parallel Execution Plan\` in \`05-plan.md\` **or** any \`<artifacts-dir>/wave-plans/wave-NN.md\`, OR 2+ slices with disjoint \`claimedPaths\`. Cap = 5 \`slice-implementer\` lanes (10 subagents incl. paired documenters) via \`MAX_PARALLEL_SLICE_IMPLEMENTERS\`. **Preconditions:** Load both sources before routing. Worktree-first: every GREEN delegation-record MUST include \`--claim-token\`, \`--lane-id\`, \`--lease-until\` (hook exits \`2\`, \`dispatch_lane_metadata_missing\` otherwise).
|
|
209
214
|
|
|
210
215
|
**Phase A — RED checkpoint** — ONE message, all test-authors:
|
|
211
216
|
\`\`\`
|
|
@@ -215,20 +220,18 @@ Task("test-author --slice S-3 --phase red")
|
|
|
215
220
|
\`\`\`
|
|
216
221
|
Wait for ALL Phase A REDs to land with non-empty \`evidenceRefs\` before Phase B. Linter \`tdd_red_checkpoint_violation\` (required: true) blocks any wave where a \`phase=green\` \`completedTs\` precedes the wave's last \`phase=red\` \`completedTs\`.
|
|
217
222
|
|
|
218
|
-
**Phase B — GREEN+DOC fan-out** — ONE message,
|
|
223
|
+
**Phase B — GREEN+DOC fan-out** — ONE message; pair per slice (repeat for each lane, flags unique per lane):
|
|
224
|
+
|
|
219
225
|
\`\`\`
|
|
220
|
-
Task("slice-implementer --slice S-1 --phase green --paths <
|
|
226
|
+
Task("slice-implementer --slice S-1 --phase green --paths <prod> --claim-token=<t> --lane-id=<lane-1> --lease-until=<iso>")
|
|
221
227
|
Task("slice-documenter --slice S-1 --phase doc --paths <artifacts-dir>/tdd-slices/S-1.md")
|
|
222
|
-
Task("slice-implementer --slice S-2 --phase green --paths <S-2 prod>")
|
|
223
|
-
Task("slice-documenter --slice S-2 --phase doc --paths <artifacts-dir>/tdd-slices/S-2.md")
|
|
224
228
|
\`\`\`
|
|
225
|
-
Launch ALL Phase B pairs in ONE message. **Never serialize independent work.**
|
|
226
229
|
|
|
227
|
-
|
|
230
|
+
Launch every slice's pair in that same message. **Never serialize independent work.**
|
|
228
231
|
|
|
229
|
-
**
|
|
232
|
+
**Phase C — REFACTOR per slice** — after GREEN+DOC lands, dispatch refactor/refactor-deferred per slice. **Fan-in (worktree-first):** echo claim/lane/lease on completed GREEN rows; stage-complete runs deterministic \`git apply --3way\` (no \`-X ours/theirs\`). Conflicts: \`slice-implementer --phase resolve-conflict\`. With 2+ lanes, still dispatch \`integration-overseer\` before review.
|
|
230
233
|
|
|
231
|
-
**slice-documenter
|
|
234
|
+
**slice-documenter:** record the \`phase=doc\` row in the same message as GREEN; write a **provisional** row in \`tdd-slices/S-<id>.md\` immediately at dispatch, then **finalize** that file after the matching \`slice-implementer\` \`phase=green\` event lands (evidence-backed prose, not guesswork before GREEN exists).
|
|
232
235
|
|
|
233
236
|
`;
|
|
234
237
|
}
|
|
@@ -655,7 +658,7 @@ If you are about to violate the Iron Law, STOP. No amount of urgency, partial pr
|
|
|
655
658
|
|
|
656
659
|
</EXTREMELY-IMPORTANT>
|
|
657
660
|
|
|
658
|
-
${tddTopOfSkillBlock(stage)}${quickStartBlock(stage, track)}
|
|
661
|
+
${renderTrackTerminology(tddTopOfSkillBlock(stage), trackContext)}${quickStartBlock(stage, track)}
|
|
659
662
|
|
|
660
663
|
${STAGE_LANGUAGE_POLICY_POINTER}
|
|
661
664
|
## Philosophy
|
|
@@ -37,29 +37,29 @@ export const TDD = {
|
|
|
37
37
|
},
|
|
38
38
|
executionModel: {
|
|
39
39
|
checklist: [
|
|
40
|
-
"
|
|
40
|
+
"**Wave dispatch (v6.13.1):** Before routing, read the Parallel Execution Plan (managed block in the track planning artifact) and `<artifacts-dir>/wave-plans/`. Multi-ready waves: one AskQuestion (launch wave vs single-slice); then RED checkpoint, parallel GREEN+DOC with worktree-first flags, per-lane REFACTOR. Resume partial waves by parallelizing remaining members only (see top-of-skill `## Wave Batch Mode`).",
|
|
41
|
+
"Select vertical slice — the active wave plan (or single ready slice) defines work. Do not ask \"which slice next?\" when the plan already resolves it. Before starting, read `.cclaw/state/ralph-loop.json` (`loopIteration`, `acClosed[]`, `redOpenSlices[]`) so you skip cycles already closed. If `redOpenSlices[]` is non-empty, repair or explicitly park those slices before opening a new RED.",
|
|
41
42
|
"Map to acceptance criterion — identify the specific spec criterion this test proves.",
|
|
42
43
|
"Discover the test surface — inspect existing tests, fixtures, helpers, test commands, and nearby assertions before authoring RED. Reuse the local test style unless the slice genuinely needs a new pattern.",
|
|
43
44
|
"Run a system-wide impact check — name callbacks, state transitions, interfaces, schemas, CLI/config/API contracts, persistence, or event boundaries that this slice can affect. Add RED coverage for each affected public contract or record why it is out of scope.",
|
|
44
45
|
"Source/test preflight — before production edits, classify planned paths using test-path patterns; verify the RED touches a test path and the GREEN touches only source paths needed for the failing behavior.",
|
|
45
46
|
"Use the mandatory `test-author` delegation for RED — after discovery and impact check, dispatch with `--slice S-<id> --phase red`. Produce failing behavior tests only (no production edits) and let the harness record the dispatch via the generated `delegation-record` hook. Set `CCLAW_ACTIVE_AGENT=tdd-red` when the harness supports phase labels.",
|
|
46
47
|
"RED: do NOT hand-edit `## Watched-RED Proof`, `## Vertical Slice Cycle`, or `## RED Evidence` markdown tables. The linter auto-renders them from `delegation-events.jsonl` slice phase rows; manual edits inside the auto-render markers are overwritten on the next lint.",
|
|
47
|
-
"Dispatch the `slice-implementer` for GREEN with `--slice S-<id> --phase green` and explicit `--paths` so the file-overlap scheduler can auto-allow parallel slices.
|
|
48
|
+
"Dispatch the `slice-implementer` for GREEN with `--slice S-<id> --phase green` and explicit `--paths` so the file-overlap scheduler can auto-allow parallel slices. When `flow-state.json::worktreeExecutionMode` is `worktree-first`, **mandatory** flags on every GREEN delegation-record row: `--claim-token=<opaque> --lane-id=<lane> --lease-until=<iso8601>`. Attach an evidence ref so the Vertical Slice Cycle row is well-formed. Set `CCLAW_ACTIVE_AGENT=tdd-green` when the harness supports phase labels.",
|
|
48
49
|
"GREEN: Run full suite — execute ALL tests, not just the ones you wrote. The full suite must be GREEN.",
|
|
49
50
|
"GREEN: Verify no regressions — if any existing test breaks, fix the regression before proceeding.",
|
|
50
51
|
"Run verification-before-completion discipline for the slice — capture a fresh test command, explicit PASS/FAIL status, and a config-aware ref (commit SHA when VCS is present/required, or no-vcs attestation when allowed).",
|
|
51
52
|
"REFACTOR: re-dispatch the `slice-implementer` (or `test-author`) with `--phase refactor` once GREEN holds, OR `--phase refactor-deferred --refactor-rationale \"<why>\"` to close the slice without a refactor pass. Both options are recorded as a delegation event; the linter accepts either as REFACTOR coverage. Set `CCLAW_ACTIVE_AGENT=tdd-refactor` when the harness supports phase labels.",
|
|
52
|
-
"DOC (parallel, mandatory v6.12.0): dispatch `slice-documenter --slice S-<id> --phase doc --paths <artifacts-dir>/tdd-slices/S-<id>.md` IN PARALLEL with `slice-implementer --phase green` for the same slice — ONE message with TWO concurrent Task calls. The documenter only writes `tdd-slices/S-<id>.md`, so its `--paths` are disjoint from the implementer's production paths and the file-overlap scheduler auto-allows the parallel dispatch.
|
|
53
|
-
"## Wave Batch Mode (v6.13+) — (A) RED checkpoint across all wave members before any GREEN in that wave. (B) Parallel implementer+documenter fan-out across multiple slices when paths are disjoint and claims/leases are valid (`--claim-token`, `--lane-id`, `--lease-until`). (C) Per-lane REFACTOR or refactor-deferred. (D) Deterministic git fan-in at TDD stage-complete merges lane diffs with `git apply --3way`; unresolved conflicts block advance until `--phase resolve-conflict` succeeds.",
|
|
53
|
+
"DOC (parallel, mandatory v6.12.0): dispatch `slice-documenter --slice S-<id> --phase doc --paths <artifacts-dir>/tdd-slices/S-<id>.md` IN PARALLEL with `slice-implementer --phase green` for the same slice — ONE message with TWO concurrent Task calls. The documenter only writes `tdd-slices/S-<id>.md`, so its `--paths` are disjoint from the implementer's production paths and the file-overlap scheduler auto-allows the parallel dispatch. **Provisional-then-finalize:** append a provisional row/section in `tdd-slices/S-<id>.md` at dispatch time, then finalize that artifact after the matching `phase=green` event records evidence (never treat guesses as final before GREEN lands). Linter rule `tdd_slice_documenter_missing` blocks the gate when the `phase=doc` event is absent (regardless of `discoveryMode`).",
|
|
54
54
|
"**slice-documenter writes per-slice prose** (test discovery, system-wide impact check, RED/GREEN/REFACTOR notes, acceptance mapping, failure analysis) into `tdd-slices/S-<id>.md`. Controller does NOT touch this content. When logging a `green` row, attach the closed acceptance-criterion IDs in `acIds` so Ralph Loop status counts them.",
|
|
55
55
|
"Annotate traceability — link to the active track's source: plan task ID + spec criterion on standard/medium, or spec acceptance item / bug reproduction slice on quick.",
|
|
56
56
|
"**Boundary with review (do NOT escalate single-slice findings to whole-diff review).** `tdd.Per-Slice Review` OWNS severity-classified findings WITHIN one slice (correctness, edge cases, regression). `review` OWNS whole-diff Layer 1 (spec compliance) plus Layer 2 (cross-slice integration, security sweep, dependency/version audit, observability). When a single-slice finding genuinely needs whole-diff escalation, surface it in `06-tdd.md > Per-Slice Review` first; review will cite it (not re-classify) and the cross-artifact-duplication linter requires matching severity/disposition.",
|
|
57
57
|
"Per-Slice Review (conditional) — if the slice meets any trigger (touchCount >= filesChangedThreshold, touchPaths match touchTriggers, or highRisk=true), append a `## Per-Slice Review` entry for this slice before moving on (see the dedicated section below).",
|
|
58
|
-
"Repeat for each slice — return to
|
|
58
|
+
"Repeat for each slice — when not in multi-slice wave mode, return to wave-plan discovery; otherwise continue the active wave until members close.",
|
|
59
59
|
],
|
|
60
60
|
interactionProtocol: [
|
|
61
|
-
"Pick one vertical slice at a time
|
|
62
|
-
"Slice implementers are sequential
|
|
61
|
+
"Pick one vertical slice at a time **only when** the merged wave plan leaves a single scheduler-ready slice or the operator chose single-slice mode. Parallel implementers are allowed when (a) lanes touch non-overlapping files (the file-overlap scheduler auto-allows parallel when `--paths` are disjoint), and (b) an `integration-overseer` is dispatched after the parallel lanes and writes cohesion-evidence into the artifact before the gate is marked passed.",
|
|
62
|
+
"Slice implementers are sequential only when the plan serializes work; prefer wave-parallel GREEN+DOC when the Parallel Execution Plan marks multiple ready members.",
|
|
63
63
|
"Controller owns orchestration. For each slice S-<id>, dispatch in this order: (1) `test-author --slice S-<id> --phase red` (RED-only, no production edits), (2) `slice-implementer --slice S-<id> --phase green --paths <comma-separated>` for GREEN, (3) re-dispatch `--phase refactor` or `--phase refactor-deferred --refactor-rationale \"<why>\"` to close REFACTOR. Each dispatch records a row in `delegation-events.jsonl` and the linter auto-derives the Watched-RED + Vertical Slice Cycle tables from those rows. Do NOT hand-edit those tables.",
|
|
64
64
|
"Before writing RED tests, discover relevant existing tests and commands so the new test extends the suite instead of fighting it.",
|
|
65
65
|
"Before implementation, perform a system-wide impact check across callbacks, state, interfaces, schemas, and external contracts touched by the slice.",
|
|
@@ -115,6 +115,7 @@ If during any stage the agent discovers evidence that contradicts the initial Ph
|
|
|
115
115
|
2. If flow state is missing → guide the user to run \`npx cclaw-cli init\` and stop.
|
|
116
116
|
3. If flow state is only a fresh init placeholder (\`completedStages: []\`, all \`passed\` arrays empty, and no \`00-idea.md\`) → stop and ask for \`/cc <prompt>\` to start a tracked run. Do not create a brainstorm state implicitly.
|
|
117
117
|
4. Otherwise check current stage gates, resume if incomplete, and advance if complete.
|
|
118
|
+
5. **TDD wave dispatch (v6.13.1):** When \`currentStage\` is \`tdd\`, read \`${RUNTIME_ROOT}/artifacts/05-plan.md\` Parallel Execution Plan block and \`${RUNTIME_ROOT}/artifacts/wave-plans/\` **before** any slice-routing question. If an open wave still has multiple ready slices, resume parallel dispatch for the **remaining** members only (do not restart completed slices).
|
|
118
119
|
|
|
119
120
|
## Headless mode (CI/automation only)
|
|
120
121
|
|
|
@@ -207,9 +208,11 @@ Progress the tracked flow only when one exists:
|
|
|
207
208
|
2. If missing, guide the user to run \`npx cclaw-cli init\` and stop.
|
|
208
209
|
3. If it is only a fresh init placeholder (\`completedStages: []\`, no passed gates, and no \`${RUNTIME_ROOT}/artifacts/00-idea.md\`), stop and ask for \`/cc <prompt>\` to start a tracked run. Do not silently create a brainstorm run.
|
|
209
210
|
4. Check gates for \`currentStage\`.
|
|
210
|
-
5.
|
|
211
|
-
6. If
|
|
212
|
-
7. If
|
|
211
|
+
5. **TDD (v6.13.1):** When \`currentStage\` is \`tdd\`, read \`${RUNTIME_ROOT}/artifacts/05-plan.md\` (managed \`## Parallel Execution Plan\` between \`parallel-exec-managed\` markers) and scan \`${RUNTIME_ROOT}/artifacts/wave-plans/wave-NN.md\` **before** asking which slice runs next. Merge sources in controller memory: Parallel Execution Plan first, wave files second; the same slice must not disagree across sources.
|
|
212
|
+
6. **Wave dispatch resume:** If a wave is partially closed (some members already past GREEN/REFACTOR), continue with the remaining members in parallel; never redo finished lanes.
|
|
213
|
+
7. If incomplete → load current stage skill and execute.
|
|
214
|
+
8. If complete → advance to next stage and execute.
|
|
215
|
+
9. If flow is done → report completion.
|
|
213
216
|
|
|
214
217
|
## Public flow habit
|
|
215
218
|
|
package/dist/delegation.d.ts
CHANGED
|
@@ -2,6 +2,7 @@ import { type SubagentFallback } from "./harness-adapters.js";
|
|
|
2
2
|
import { type MandatoryDelegationTaskClass } from "./content/stage-schema.js";
|
|
3
3
|
import type { FlowStage } from "./types.js";
|
|
4
4
|
import { type FlowState } from "./flow-state.js";
|
|
5
|
+
import { type ParseImplementationUnitParallelOptions, type ParsedParallelWave } from "./internal/plan-split-waves.js";
|
|
5
6
|
export type DelegationMode = "mandatory" | "proactive";
|
|
6
7
|
export type DelegationStatus = "scheduled" | "launched" | "acknowledged" | "completed" | "failed" | "waived" | "stale";
|
|
7
8
|
export declare const DELEGATION_DISPATCH_SURFACES: readonly ["claude-task", "cursor-task", "opencode-agent", "codex-agent", "generic-task", "role-switch", "manual"];
|
|
@@ -390,6 +391,14 @@ export interface SelectReadySlicesOptions {
|
|
|
390
391
|
* `claimedPaths` intersections with already-selected units and active holders.
|
|
391
392
|
*/
|
|
392
393
|
export declare function selectReadySlices(units: ReadySliceUnit[], opts: SelectReadySlicesOptions): ReadySliceUnit[];
|
|
394
|
+
/**
|
|
395
|
+
* v6.13.1 — build scheduler rows from merged parallel wave definitions + plan units.
|
|
396
|
+
*/
|
|
397
|
+
export declare function readySliceUnitsFromMergedWaves(mergedWaves: ParsedParallelWave[], planMarkdown: string, options?: ParseImplementationUnitParallelOptions): ReadySliceUnit[];
|
|
398
|
+
/**
|
|
399
|
+
* v6.13.1 — load merged wave plan (Parallel Execution Plan block + wave-plans/) and map to `ReadySliceUnit[]`.
|
|
400
|
+
*/
|
|
401
|
+
export declare function loadTddReadySlicePool(planMarkdown: string, artifactsDir: string, options?: ParseImplementationUnitParallelOptions): Promise<ReadySliceUnit[]>;
|
|
393
402
|
/**
|
|
394
403
|
* v6.10.0 (P1) — when scheduling a `slice-implementer` on a TDD stage,
|
|
395
404
|
* compare `claimedPaths` against every currently active span on the
|
package/dist/delegation.js
CHANGED
|
@@ -9,7 +9,7 @@ import { HARNESS_ADAPTERS } from "./harness-adapters.js";
|
|
|
9
9
|
import { readFlowState } from "./runs.js";
|
|
10
10
|
import { mandatoryAgentsFor, stageSchema } from "./content/stage-schema.js";
|
|
11
11
|
import { effectiveWorktreeExecutionMode } from "./flow-state.js";
|
|
12
|
-
import { compareCanonicalUnitIds } from "./internal/plan-split-waves.js";
|
|
12
|
+
import { compareCanonicalUnitIds, mergeParallelWaveDefinitions, parseImplementationUnitParallelFields, parseImplementationUnits, parseParallelExecutionPlanWaves, parseWavePlanDirectory } from "./internal/plan-split-waves.js";
|
|
13
13
|
const execFileAsync = promisify(execFile);
|
|
14
14
|
const TERMINAL_DELEGATION_STATUSES = new Set(["completed", "failed", "waived", "stale"]);
|
|
15
15
|
export const DELEGATION_DISPATCH_SURFACES = [
|
|
@@ -730,6 +730,54 @@ export function selectReadySlices(units, opts) {
|
|
|
730
730
|
}
|
|
731
731
|
return selected;
|
|
732
732
|
}
|
|
733
|
+
/**
|
|
734
|
+
* v6.13.1 — build scheduler rows from merged parallel wave definitions + plan units.
|
|
735
|
+
*/
|
|
736
|
+
export function readySliceUnitsFromMergedWaves(mergedWaves, planMarkdown, options) {
|
|
737
|
+
const units = parseImplementationUnits(planMarkdown);
|
|
738
|
+
const metaByUnit = new Map(units.map((u) => {
|
|
739
|
+
const m = parseImplementationUnitParallelFields(u, options);
|
|
740
|
+
return [m.unitId, m];
|
|
741
|
+
}));
|
|
742
|
+
const sliceSet = new Set();
|
|
743
|
+
for (const w of mergedWaves) {
|
|
744
|
+
for (const m of w.members) {
|
|
745
|
+
sliceSet.add(m.sliceId);
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
const out = [];
|
|
749
|
+
for (const sliceId of [...sliceSet].sort((a, b) => a.localeCompare(b))) {
|
|
750
|
+
const member = mergedWaves.flatMap((w) => w.members).find((x) => x.sliceId === sliceId);
|
|
751
|
+
if (!member)
|
|
752
|
+
continue;
|
|
753
|
+
const meta = metaByUnit.get(member.unitId);
|
|
754
|
+
if (!meta) {
|
|
755
|
+
out.push({
|
|
756
|
+
unitId: member.unitId,
|
|
757
|
+
sliceId,
|
|
758
|
+
dependsOn: [],
|
|
759
|
+
claimedPaths: [],
|
|
760
|
+
parallelizable: true
|
|
761
|
+
});
|
|
762
|
+
continue;
|
|
763
|
+
}
|
|
764
|
+
out.push({
|
|
765
|
+
unitId: meta.unitId,
|
|
766
|
+
sliceId,
|
|
767
|
+
dependsOn: meta.dependsOn,
|
|
768
|
+
claimedPaths: meta.claimedPaths,
|
|
769
|
+
parallelizable: meta.parallelizable
|
|
770
|
+
});
|
|
771
|
+
}
|
|
772
|
+
return out;
|
|
773
|
+
}
|
|
774
|
+
/**
|
|
775
|
+
* v6.13.1 — load merged wave plan (Parallel Execution Plan block + wave-plans/) and map to `ReadySliceUnit[]`.
|
|
776
|
+
*/
|
|
777
|
+
export async function loadTddReadySlicePool(planMarkdown, artifactsDir, options) {
|
|
778
|
+
const merged = mergeParallelWaveDefinitions(parseParallelExecutionPlanWaves(planMarkdown), await parseWavePlanDirectory(artifactsDir));
|
|
779
|
+
return readySliceUnitsFromMergedWaves(merged, planMarkdown, options);
|
|
780
|
+
}
|
|
733
781
|
function readMaxParallelOverrideFromEnv() {
|
|
734
782
|
const raw = process.env.CCLAW_MAX_PARALLEL_SLICE_IMPLEMENTERS;
|
|
735
783
|
if (typeof raw !== "string" || raw.trim().length === 0)
|
package/dist/install.js
CHANGED
|
@@ -25,7 +25,7 @@ import { LANGUAGE_RULE_PACK_DIR, LEGACY_LANGUAGE_RULE_PACK_FOLDERS } from "./con
|
|
|
25
25
|
import { RESEARCH_PLAYBOOKS } from "./content/research-playbooks.js";
|
|
26
26
|
import { SUBAGENT_CONTEXT_SKILLS } from "./content/subagent-context-skills.js";
|
|
27
27
|
import { CCLAW_AGENTS } from "./content/core-agents.js";
|
|
28
|
-
import { createInitialFlowState } from "./flow-state.js";
|
|
28
|
+
import { createInitialFlowState, effectiveWorktreeExecutionMode } from "./flow-state.js";
|
|
29
29
|
import { ensureDir, exists, writeFileSafe } from "./fs-utils.js";
|
|
30
30
|
import { ManagedResourceSession, setActiveManagedResourceSession } from "./managed-resources.js";
|
|
31
31
|
import { ensureGitignore, removeGitignorePatterns } from "./gitignore.js";
|
|
@@ -34,7 +34,7 @@ import { validateHookDocument } from "./hook-schema.js";
|
|
|
34
34
|
import { detectHarnesses } from "./init-detect.js";
|
|
35
35
|
import { classifyCodexHooksFlag, codexConfigPath, readCodexConfig } from "./codex-feature-flag.js";
|
|
36
36
|
import { CorruptFlowStateError, ensureRunSystem, readFlowState, writeFlowState } from "./runs.js";
|
|
37
|
-
import { PLAN_SPLIT_DEFAULT_WAVE_SIZE, buildParallelExecutionPlanSection, planArtifactLacksV613ParallelMetadata, upsertParallelExecutionPlanSection } from "./internal/plan-split-waves.js";
|
|
37
|
+
import { PLAN_SPLIT_DEFAULT_WAVE_SIZE, buildParallelExecutionPlanSection, formatNextParallelWaveSyncHint, mergeParallelWaveDefinitions, parseParallelExecutionPlanWaves, parseWavePlanDirectory, planArtifactLacksV613ParallelMetadata, upsertParallelExecutionPlanSection } from "./internal/plan-split-waves.js";
|
|
38
38
|
import { FLOW_STAGES } from "./types.js";
|
|
39
39
|
const OPENCODE_PLUGIN_REL_PATH = ".opencode/plugins/cclaw-plugin.mjs";
|
|
40
40
|
const CURSOR_RULE_REL_PATH = ".cursor/rules/cclaw-workflow.mdc";
|
|
@@ -1135,6 +1135,28 @@ async function assertExpectedHarnessShims(projectRoot, harnesses) {
|
|
|
1135
1135
|
}
|
|
1136
1136
|
}
|
|
1137
1137
|
}
|
|
1138
|
+
async function maybeLogParallelWaveDispatchHint(projectRoot) {
|
|
1139
|
+
const flowPath = runtimePath(projectRoot, "state", "flow-state.json");
|
|
1140
|
+
if (!(await exists(flowPath)))
|
|
1141
|
+
return;
|
|
1142
|
+
try {
|
|
1143
|
+
const state = await readFlowState(projectRoot);
|
|
1144
|
+
if (effectiveWorktreeExecutionMode(state) !== "worktree-first")
|
|
1145
|
+
return;
|
|
1146
|
+
const planPath = runtimePath(projectRoot, "artifacts", "05-plan.md");
|
|
1147
|
+
if (!(await exists(planPath)))
|
|
1148
|
+
return;
|
|
1149
|
+
const planRaw = await fs.readFile(planPath, "utf8");
|
|
1150
|
+
const merged = mergeParallelWaveDefinitions(parseParallelExecutionPlanWaves(planRaw), await parseWavePlanDirectory(runtimePath(projectRoot, "artifacts")));
|
|
1151
|
+
const hint = formatNextParallelWaveSyncHint(merged);
|
|
1152
|
+
if (hint) {
|
|
1153
|
+
process.stdout.write(`cclaw: ${hint}\n`);
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
catch {
|
|
1157
|
+
// best-effort note only
|
|
1158
|
+
}
|
|
1159
|
+
}
|
|
1138
1160
|
async function materializeRuntime(projectRoot, config, forceStateReset, operation = "sync") {
|
|
1139
1161
|
await warnStaleInitSentinel(projectRoot, operation);
|
|
1140
1162
|
const sentinelPath = await writeInitSentinel(projectRoot, operation);
|
|
@@ -1175,6 +1197,9 @@ async function materializeRuntime(projectRoot, config, forceStateReset, operatio
|
|
|
1175
1197
|
await assertExpectedHarnessShims(projectRoot, harnesses);
|
|
1176
1198
|
await writeCursorWorkflowRule(projectRoot, harnesses);
|
|
1177
1199
|
await ensureGitignore(projectRoot);
|
|
1200
|
+
if (operation === "sync" || operation === "upgrade") {
|
|
1201
|
+
await maybeLogParallelWaveDispatchHint(projectRoot);
|
|
1202
|
+
}
|
|
1178
1203
|
await managedSession.commit();
|
|
1179
1204
|
await fs.unlink(sentinelPath).catch(() => undefined);
|
|
1180
1205
|
}
|
|
@@ -31,6 +31,52 @@ export interface PlanSplitWavesArgs {
|
|
|
31
31
|
}
|
|
32
32
|
export declare const PLAN_SPLIT_DEFAULT_WAVE_SIZE = 5;
|
|
33
33
|
export declare const PLAN_SPLIT_SMALL_PLAN_THRESHOLD = 50;
|
|
34
|
+
/** v6.13.1 — member line in Parallel Execution Plan or wave-NN.md */
|
|
35
|
+
export interface ParsedParallelWaveMember {
|
|
36
|
+
sliceId: string;
|
|
37
|
+
unitId: string;
|
|
38
|
+
}
|
|
39
|
+
export interface ParsedParallelWave {
|
|
40
|
+
waveId: string;
|
|
41
|
+
members: ParsedParallelWaveMember[];
|
|
42
|
+
}
|
|
43
|
+
export declare class WavePlanDuplicateSliceError extends Error {
|
|
44
|
+
constructor(message: string);
|
|
45
|
+
}
|
|
46
|
+
export declare class WavePlanMergeConflictError extends Error {
|
|
47
|
+
constructor(message: string);
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Raw body between parallel execution managed markers (no markers included).
|
|
51
|
+
*/
|
|
52
|
+
export declare function extractParallelExecutionManagedBody(planMarkdown: string): string | null;
|
|
53
|
+
/**
|
|
54
|
+
* Members list after `Members:` in Parallel Execution Plan / wave-NN headers.
|
|
55
|
+
* Supports markdown bold `**Members:**` (colon between Members and closing `**`)
|
|
56
|
+
* and plain `Members:`.
|
|
57
|
+
*/
|
|
58
|
+
export declare function extractMembersListFromLine(trimmedLine: string): string | null;
|
|
59
|
+
/**
|
|
60
|
+
* Parse `## Parallel Execution Plan` managed block for wave headings and Members lines.
|
|
61
|
+
* Malformed member tokens are skipped. Duplicate slice ids in one plan source throw.
|
|
62
|
+
*/
|
|
63
|
+
export declare function parseParallelExecutionPlanWaves(planMarkdown: string): ParsedParallelWave[];
|
|
64
|
+
/**
|
|
65
|
+
* Parse a single wave-NN.md: prefer a `Members:` line in the header; otherwise
|
|
66
|
+
* collect distinct S-N tokens in the first lines (legacy).
|
|
67
|
+
*/
|
|
68
|
+
export declare function parseWavePlanFileBody(body: string, waveId: string): ParsedParallelWave;
|
|
69
|
+
export declare function parseWavePlanDirectory(artifactsDir: string): Promise<ParsedParallelWave[]>;
|
|
70
|
+
/**
|
|
71
|
+
* Merge wave definitions: managed Parallel Execution Plan first, then wave-NN.md.
|
|
72
|
+
* Same slice must map to the same wave id and unit id in both sources or a
|
|
73
|
+
* `WavePlanMergeConflictError` is thrown.
|
|
74
|
+
*/
|
|
75
|
+
export declare function mergeParallelWaveDefinitions(primary: ParsedParallelWave[], secondary: ParsedParallelWave[]): ParsedParallelWave[];
|
|
76
|
+
/**
|
|
77
|
+
* One-line operator hint after sync when a multi-member wave exists.
|
|
78
|
+
*/
|
|
79
|
+
export declare function formatNextParallelWaveSyncHint(merged: ParsedParallelWave[]): string | null;
|
|
34
80
|
export interface ParsedImplementationUnit {
|
|
35
81
|
id: string;
|
|
36
82
|
/**
|
|
@@ -10,6 +10,215 @@ const WAVE_MANAGED_START = "<!-- wave-split-managed-start -->";
|
|
|
10
10
|
const WAVE_MANAGED_END = "<!-- wave-split-managed-end -->";
|
|
11
11
|
const PARALLEL_EXEC_MANAGED_START = "<!-- parallel-exec-managed-start -->";
|
|
12
12
|
const PARALLEL_EXEC_MANAGED_END = "<!-- parallel-exec-managed-end -->";
|
|
13
|
+
export class WavePlanDuplicateSliceError extends Error {
|
|
14
|
+
constructor(message) {
|
|
15
|
+
super(message);
|
|
16
|
+
this.name = "WavePlanDuplicateSliceError";
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
export class WavePlanMergeConflictError extends Error {
|
|
20
|
+
constructor(message) {
|
|
21
|
+
super(message);
|
|
22
|
+
this.name = "WavePlanMergeConflictError";
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Raw body between parallel execution managed markers (no markers included).
|
|
27
|
+
*/
|
|
28
|
+
export function extractParallelExecutionManagedBody(planMarkdown) {
|
|
29
|
+
const startIdx = planMarkdown.indexOf(PARALLEL_EXEC_MANAGED_START);
|
|
30
|
+
const endIdx = planMarkdown.indexOf(PARALLEL_EXEC_MANAGED_END);
|
|
31
|
+
if (startIdx < 0 || endIdx <= startIdx)
|
|
32
|
+
return null;
|
|
33
|
+
return planMarkdown.slice(startIdx + PARALLEL_EXEC_MANAGED_START.length, endIdx).trim();
|
|
34
|
+
}
|
|
35
|
+
function tokenToSliceAndUnit(token) {
|
|
36
|
+
const t = token.trim().replace(/^[`"'[\]()]+|[`"'[\]()]+$/gu, "");
|
|
37
|
+
const u = /^U-(\d+)$/u.exec(t);
|
|
38
|
+
if (u) {
|
|
39
|
+
const n = u[1];
|
|
40
|
+
return { unitId: `U-${n}`, sliceId: `S-${n}` };
|
|
41
|
+
}
|
|
42
|
+
const s = /^S-(\d+)$/u.exec(t);
|
|
43
|
+
if (s) {
|
|
44
|
+
const n = s[1];
|
|
45
|
+
return { unitId: `U-${n}`, sliceId: `S-${n}` };
|
|
46
|
+
}
|
|
47
|
+
return null;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Members list after `Members:` in Parallel Execution Plan / wave-NN headers.
|
|
51
|
+
* Supports markdown bold `**Members:**` (colon between Members and closing `**`)
|
|
52
|
+
* and plain `Members:`.
|
|
53
|
+
*/
|
|
54
|
+
export function extractMembersListFromLine(trimmedLine) {
|
|
55
|
+
const bold = /^[-*]?\s*\*\*Members:\*\*\s*(.+)$/iu.exec(trimmedLine);
|
|
56
|
+
if (bold)
|
|
57
|
+
return bold[1].trim();
|
|
58
|
+
const plain = /^[-*]?\s*Members\s*:\s*(.+)$/iu.exec(trimmedLine);
|
|
59
|
+
if (plain)
|
|
60
|
+
return plain[1].trim();
|
|
61
|
+
return null;
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Parse `## Parallel Execution Plan` managed block for wave headings and Members lines.
|
|
65
|
+
* Malformed member tokens are skipped. Duplicate slice ids in one plan source throw.
|
|
66
|
+
*/
|
|
67
|
+
export function parseParallelExecutionPlanWaves(planMarkdown) {
|
|
68
|
+
const body = extractParallelExecutionManagedBody(planMarkdown);
|
|
69
|
+
if (!body)
|
|
70
|
+
return [];
|
|
71
|
+
const lines = body.split(/\r?\n/u);
|
|
72
|
+
const waves = [];
|
|
73
|
+
let current = null;
|
|
74
|
+
const seenSlices = new Set();
|
|
75
|
+
const flushCurrent = () => {
|
|
76
|
+
if (current && current.members.length > 0) {
|
|
77
|
+
waves.push(current);
|
|
78
|
+
}
|
|
79
|
+
};
|
|
80
|
+
for (const rawLine of lines) {
|
|
81
|
+
const trimmed = rawLine.trim();
|
|
82
|
+
const waveMatch = /^###\s+Wave\s+(\d+)\s*$/iu.exec(trimmed);
|
|
83
|
+
if (waveMatch) {
|
|
84
|
+
flushCurrent();
|
|
85
|
+
const n = waveMatch[1];
|
|
86
|
+
current = { waveId: `W-${n.padStart(2, "0")}`, members: [] };
|
|
87
|
+
continue;
|
|
88
|
+
}
|
|
89
|
+
const membersCsv = extractMembersListFromLine(trimmed);
|
|
90
|
+
if (membersCsv !== null && current) {
|
|
91
|
+
const parts = membersCsv
|
|
92
|
+
.split(/,/u)
|
|
93
|
+
.map((p) => p.trim())
|
|
94
|
+
.filter((p) => p.length > 0);
|
|
95
|
+
for (const part of parts) {
|
|
96
|
+
const ids = tokenToSliceAndUnit(part);
|
|
97
|
+
if (!ids)
|
|
98
|
+
continue;
|
|
99
|
+
if (seenSlices.has(ids.sliceId)) {
|
|
100
|
+
throw new WavePlanDuplicateSliceError(`duplicate slice ${ids.sliceId} in Parallel Execution Plan managed block`);
|
|
101
|
+
}
|
|
102
|
+
seenSlices.add(ids.sliceId);
|
|
103
|
+
current.members.push(ids);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
flushCurrent();
|
|
108
|
+
return waves;
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Parse a single wave-NN.md: prefer a `Members:` line in the header; otherwise
|
|
112
|
+
* collect distinct S-N tokens in the first lines (legacy).
|
|
113
|
+
*/
|
|
114
|
+
export function parseWavePlanFileBody(body, waveId) {
|
|
115
|
+
const members = [];
|
|
116
|
+
const seen = new Set();
|
|
117
|
+
const headLines = body.split(/\r?\n/u).slice(0, 120);
|
|
118
|
+
let membersCsv = null;
|
|
119
|
+
for (const raw of headLines) {
|
|
120
|
+
membersCsv = extractMembersListFromLine(raw.trim());
|
|
121
|
+
if (membersCsv !== null)
|
|
122
|
+
break;
|
|
123
|
+
}
|
|
124
|
+
if (membersCsv !== null) {
|
|
125
|
+
for (const part of membersCsv.split(/,/u)) {
|
|
126
|
+
const ids = tokenToSliceAndUnit(part);
|
|
127
|
+
if (!ids)
|
|
128
|
+
continue;
|
|
129
|
+
if (seen.has(ids.sliceId)) {
|
|
130
|
+
throw new WavePlanDuplicateSliceError(`duplicate slice ${ids.sliceId} in ${waveId} wave file`);
|
|
131
|
+
}
|
|
132
|
+
seen.add(ids.sliceId);
|
|
133
|
+
members.push(ids);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
if (members.length === 0) {
|
|
137
|
+
const regex = /\b(S-\d+)\b/gu;
|
|
138
|
+
let match;
|
|
139
|
+
while ((match = regex.exec(body)) !== null) {
|
|
140
|
+
const ids = tokenToSliceAndUnit(match[1]);
|
|
141
|
+
if (!ids)
|
|
142
|
+
continue;
|
|
143
|
+
if (seen.has(ids.sliceId))
|
|
144
|
+
continue;
|
|
145
|
+
seen.add(ids.sliceId);
|
|
146
|
+
members.push(ids);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
return { waveId, members };
|
|
150
|
+
}
|
|
151
|
+
export async function parseWavePlanDirectory(artifactsDir) {
|
|
152
|
+
const wavePlansDir = path.join(artifactsDir, "wave-plans");
|
|
153
|
+
let entries = [];
|
|
154
|
+
try {
|
|
155
|
+
entries = await fs.readdir(wavePlansDir);
|
|
156
|
+
}
|
|
157
|
+
catch {
|
|
158
|
+
return [];
|
|
159
|
+
}
|
|
160
|
+
const out = [];
|
|
161
|
+
for (const name of [...entries].sort()) {
|
|
162
|
+
const match = /^wave-(\d+)\.md$/u.exec(name);
|
|
163
|
+
if (!match)
|
|
164
|
+
continue;
|
|
165
|
+
const waveId = `W-${match[1].padStart(2, "0")}`;
|
|
166
|
+
const body = await fs.readFile(path.join(wavePlansDir, name), "utf8");
|
|
167
|
+
const wave = parseWavePlanFileBody(body, waveId);
|
|
168
|
+
if (wave.members.length > 0) {
|
|
169
|
+
out.push(wave);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
return out;
|
|
173
|
+
}
|
|
174
|
+
/**
|
|
175
|
+
* Merge wave definitions: managed Parallel Execution Plan first, then wave-NN.md.
|
|
176
|
+
* Same slice must map to the same wave id and unit id in both sources or a
|
|
177
|
+
* `WavePlanMergeConflictError` is thrown.
|
|
178
|
+
*/
|
|
179
|
+
export function mergeParallelWaveDefinitions(primary, secondary) {
|
|
180
|
+
const byWave = new Map();
|
|
181
|
+
const sliceBinding = new Map();
|
|
182
|
+
const addWaves = (waves) => {
|
|
183
|
+
for (const wave of waves) {
|
|
184
|
+
let memMap = byWave.get(wave.waveId);
|
|
185
|
+
if (!memMap) {
|
|
186
|
+
memMap = new Map();
|
|
187
|
+
byWave.set(wave.waveId, memMap);
|
|
188
|
+
}
|
|
189
|
+
for (const member of wave.members) {
|
|
190
|
+
const prev = sliceBinding.get(member.sliceId);
|
|
191
|
+
if (prev) {
|
|
192
|
+
if (prev.waveId !== wave.waveId || prev.unitId !== member.unitId) {
|
|
193
|
+
throw new WavePlanMergeConflictError(`slice ${member.sliceId}: conflicting wave plan sources (wave ${prev.waveId} vs ${wave.waveId}, unit ${prev.unitId} vs ${member.unitId})`);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
else {
|
|
197
|
+
sliceBinding.set(member.sliceId, { waveId: wave.waveId, unitId: member.unitId });
|
|
198
|
+
}
|
|
199
|
+
memMap.set(member.sliceId, member);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
};
|
|
203
|
+
addWaves(primary);
|
|
204
|
+
addWaves(secondary);
|
|
205
|
+
return [...byWave.entries()]
|
|
206
|
+
.sort(([a], [b]) => a.localeCompare(b))
|
|
207
|
+
.map(([wid, memMap]) => ({
|
|
208
|
+
waveId: wid,
|
|
209
|
+
members: [...memMap.values()].sort((p, q) => p.sliceId.localeCompare(q.sliceId))
|
|
210
|
+
}));
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* One-line operator hint after sync when a multi-member wave exists.
|
|
214
|
+
*/
|
|
215
|
+
export function formatNextParallelWaveSyncHint(merged) {
|
|
216
|
+
const candidate = merged.find((w) => w.members.length >= 2);
|
|
217
|
+
if (!candidate)
|
|
218
|
+
return null;
|
|
219
|
+
const ids = candidate.members.map((m) => m.sliceId).join(", ");
|
|
220
|
+
return `Parallel Execution Plan: ${candidate.waveId} has ${candidate.members.length} parallel members (${ids}).`;
|
|
221
|
+
}
|
|
13
222
|
/**
|
|
14
223
|
* Parse v6.13 parallel-metadata bullets from an implementation unit body.
|
|
15
224
|
* Missing keys use conservative defaults (`dependsOn: []`, `parallelizable: true`
|
|
@@ -18,12 +227,17 @@ const PARALLEL_EXEC_MANAGED_END = "<!-- parallel-exec-managed-end -->";
|
|
|
18
227
|
export function parseImplementationUnitParallelFields(unit, options) {
|
|
19
228
|
const text = unit.body;
|
|
20
229
|
const pick = (label) => {
|
|
21
|
-
const
|
|
230
|
+
const esc = label.replace(/[.*+?^${}()|[\]\\]/gu, "\\$&");
|
|
231
|
+
const bold = new RegExp(`^[-*]\\s*\\*\\*${esc}:\\*\\*\\s*(.*)$`, "imu");
|
|
232
|
+
const legacy = new RegExp(`^[-*]\\s*\\*{0,2}${esc}\\*{0,2}\\s*:\\s*(.*)$`, "imu");
|
|
22
233
|
for (const rawLine of text.split(/\r?\n/u)) {
|
|
23
234
|
const line = rawLine.trim();
|
|
24
|
-
const
|
|
25
|
-
if (
|
|
26
|
-
return
|
|
235
|
+
const mb = bold.exec(line);
|
|
236
|
+
if (mb)
|
|
237
|
+
return mb[1]?.trim();
|
|
238
|
+
const ml = legacy.exec(line);
|
|
239
|
+
if (ml)
|
|
240
|
+
return ml[1]?.trim();
|
|
27
241
|
}
|
|
28
242
|
return undefined;
|
|
29
243
|
};
|
|
@@ -53,8 +267,13 @@ export function parseImplementationUnitParallelFields(unit, options) {
|
|
|
53
267
|
return { unitId: id, dependsOn, claimedPaths, parallelizable, riskTier, lane };
|
|
54
268
|
}
|
|
55
269
|
function unitBodyHasV613ParallelBullet(body, label) {
|
|
56
|
-
const
|
|
57
|
-
|
|
270
|
+
const esc = label.replace(/[.*+?^${}()|[\]\\]/gu, "\\$&");
|
|
271
|
+
const bold = new RegExp(`^[-*]\\s*\\*\\*${esc}:\\*\\*`, "imu");
|
|
272
|
+
const legacy = new RegExp(`^[-*]\\s*\\*{0,2}${esc}\\*{0,2}\\s*:`, "imu");
|
|
273
|
+
return body.split(/\r?\n/u).some((raw) => {
|
|
274
|
+
const line = raw.trim();
|
|
275
|
+
return bold.test(line) || legacy.test(line);
|
|
276
|
+
});
|
|
58
277
|
}
|
|
59
278
|
/**
|
|
60
279
|
* True when the plan has implementation units but any unit is missing v6.13.0
|