cclaw-cli 6.14.2 → 6.14.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -285,13 +285,25 @@ export async function lintTddStage(ctx) {
285
285
  "refactor-deferred",
286
286
  "resolve-conflict"
287
287
  ]);
288
- // v6.14.2 — under `legacyContinuation: true` AND a stamped
289
- // boundary, exempt closed slices that NEVER recorded ANY of the
290
- // three worktree-first metadata fields. This is the "all-or-
291
- // nothing legacy" rule from v6.14.2 Fix 3: partial-metadata
292
- // slices stay flagged (a real bug), but slices that pre-date
293
- // the worktree-first flip get amnesty.
294
- const sliceWorktreeMetaState = computeSliceWorktreeMetaState(runEvents);
288
+ // v6.14.3 — under `legacyContinuation: true` AND a stamped
289
+ // boundary, exempt every slice closed at or before
290
+ // `tddWorktreeCutoverSliceId`. The cutover boundary itself is the
291
+ // contract: slices boundary were closed before the
292
+ // worktree-first metadata mandate took effect, so we trust the
293
+ // boundary as authoritative and do not require the slice to have
294
+ // recorded zero metadata across all rows.
295
+ //
296
+ // The earlier v6.14.2 "all-or-nothing" rule rejected the common
297
+ // hox-shape pattern where the GREEN row carries claim/lane/lease
298
+ // (added on the v6.14.x worktree-first flip) but a later
299
+ // `refactor-deferred` terminal row does not. That partial-
300
+ // metadata layout is the operator-visible signature of the
301
+ // failure mode this exemption was introduced to fix; flagging it
302
+ // again under a different code defeated the entire migration.
303
+ //
304
+ // Operators who want a strict gate can opt out by clearing
305
+ // `legacyContinuation` (or omitting `tddWorktreeCutoverSliceId`)
306
+ // — both fields are explicit, persisted, and operator-editable.
295
307
  const isExemptLegacySlice = (sliceId) => {
296
308
  if (!legacyContinuation)
297
309
  return false;
@@ -300,14 +312,7 @@ export async function lintTddStage(ctx) {
300
312
  const n = parseSliceNumber(sliceId);
301
313
  if (n === null)
302
314
  return false;
303
- if (n > worktreeCutoverBoundary)
304
- return false;
305
- const meta = sliceWorktreeMetaState.get(sliceId);
306
- if (!meta)
307
- return true; // no slice-implementer rows at all → fully legacy
308
- // Exempt only when the slice carries ZERO worktree fields across
309
- // all rows. Partial metadata stays flagged.
310
- return !meta.anyMeta;
315
+ return n <= worktreeCutoverBoundary;
311
316
  };
312
317
  const missingGreenMeta = new Set();
313
318
  const exemptedGreenMeta = new Set();
@@ -1334,35 +1339,19 @@ function pickEventTs(rows) {
1334
1339
  return undefined;
1335
1340
  }
1336
1341
  /**
1337
- * v6.14.2 — for each slice id appearing in `slice-implementer` rows of
1338
- * the active run, record whether ANY row carried at least one of the
1339
- * three worktree-first metadata fields (`claimToken`, `ownerLaneId`,
1340
- * `leasedUntil`). Used by `isExemptLegacySlice` to enforce the "all-or-
1341
- * nothing legacy" rule: only slices with NO worktree fields anywhere
1342
- * in their rows qualify for the legacyContinuation amnesty.
1343
- */
1344
- function computeSliceWorktreeMetaState(events) {
1345
- const out = new Map();
1346
- for (const ev of events) {
1347
- if (ev.stage !== "tdd" || ev.agent !== "slice-implementer")
1348
- continue;
1349
- if (typeof ev.sliceId !== "string")
1350
- continue;
1351
- const tok = ev.claimToken?.trim() ?? "";
1352
- const lane = ev.ownerLaneId?.trim() ?? "";
1353
- const lease = ev.leasedUntil?.trim() ?? "";
1354
- const anyHere = tok.length > 0 || lane.length > 0 || lease.length > 0;
1355
- const prev = out.get(ev.sliceId) ?? { anyMeta: false };
1356
- out.set(ev.sliceId, { anyMeta: prev.anyMeta || anyHere });
1357
- }
1358
- return out;
1359
- }
1360
- /**
1361
- * v6.14.2 — slices whose terminal `refactor` / `refactor-deferred` /
1362
- * `resolve-conflict` row recorded a `completedTs` that PRECEDES the
1363
- * latest `leasedUntil` for the same slice. The lease was never
1364
- * reclaimed but the wave closed in time; the missing audit row is
1365
- * advisory bookkeeping, not a correctness failure.
1342
+ * v6.14.2 — slices whose terminal closure event recorded a `completedTs`
1343
+ * that PRECEDES the latest `leasedUntil` for the same slice. The lease
1344
+ * was never reclaimed but the wave closed in time; the missing audit
1345
+ * row is advisory bookkeeping, not a correctness failure.
1346
+ *
1347
+ * v6.14.4 also recognize stream-mode (per-slice checkpoint) closure:
1348
+ * a `phase=green status=completed` row carrying `refactorOutcome`
1349
+ * (`inline` or `deferred`) IS the slice closure. Without this, every
1350
+ * stream-mode slice incorrectly fired `tdd_lease_expired_unreclaimed`
1351
+ * once its lease expired, even though the slice was already closed
1352
+ * (this mirrors the same predicate already used in
1353
+ * `src/internal/wave-status.ts` for `closedSlices` tracking — same
1354
+ * decision, just now applied to lease-closure detection).
1366
1355
  */
1367
1356
  function computeClosedBeforeLeaseExpiry(events) {
1368
1357
  const terminalPhases = new Set([
@@ -1372,6 +1361,15 @@ function computeClosedBeforeLeaseExpiry(events) {
1372
1361
  ]);
1373
1362
  const lastLease = new Map();
1374
1363
  const earliestTerminal = new Map();
1364
+ const recordTerminal = (sliceId, completedTs) => {
1365
+ const ts = Date.parse(completedTs);
1366
+ if (!Number.isFinite(ts))
1367
+ return;
1368
+ const prev = earliestTerminal.get(sliceId);
1369
+ if (prev === undefined || ts < prev) {
1370
+ earliestTerminal.set(sliceId, ts);
1371
+ }
1372
+ };
1375
1373
  for (const ev of events) {
1376
1374
  if (ev.stage !== "tdd" || ev.agent !== "slice-implementer")
1377
1375
  continue;
@@ -1386,16 +1384,21 @@ function computeClosedBeforeLeaseExpiry(events) {
1386
1384
  }
1387
1385
  }
1388
1386
  }
1389
- if (ev.status === "completed" &&
1390
- typeof ev.phase === "string" &&
1391
- terminalPhases.has(ev.phase) &&
1392
- typeof ev.completedTs === "string") {
1393
- const ts = Date.parse(ev.completedTs);
1394
- if (Number.isFinite(ts)) {
1395
- const prev = earliestTerminal.get(ev.sliceId);
1396
- if (prev === undefined || ts < prev) {
1397
- earliestTerminal.set(ev.sliceId, ts);
1398
- }
1387
+ if (ev.status !== "completed" || typeof ev.completedTs !== "string") {
1388
+ continue;
1389
+ }
1390
+ if (typeof ev.phase !== "string")
1391
+ continue;
1392
+ if (terminalPhases.has(ev.phase)) {
1393
+ recordTerminal(ev.sliceId, ev.completedTs);
1394
+ continue;
1395
+ }
1396
+ // v6.14.4 — stream-mode closure: GREEN-only with refactorOutcome
1397
+ // folded inline IS the slice's terminal row.
1398
+ if (ev.phase === "green" && ev.refactorOutcome) {
1399
+ const mode = ev.refactorOutcome.mode;
1400
+ if (mode === "inline" || mode === "deferred") {
1401
+ recordTerminal(ev.sliceId, ev.completedTs);
1399
1402
  }
1400
1403
  }
1401
1404
  }
package/dist/install.js CHANGED
@@ -939,8 +939,20 @@ async function applyTddCutoverIfNeeded(projectRoot) {
939
939
  if (typeof obj.tddCutoverSliceId === "string" && obj.tddCutoverSliceId.length > 0) {
940
940
  return;
941
941
  }
942
- obj.tddCutoverSliceId = cutoverSliceId;
943
- await writeFileSafe(flowStatePath, `${JSON.stringify(obj, null, 2)}\n`, { mode: 0o600 });
942
+ // v6.14.3 refresh the SHA256 sidecar by writing through
943
+ // `writeFlowState`. The previous direct `writeFileSafe` invocation
944
+ // left the sidecar stale, so the very next guarded hook on a synced
945
+ // legacy project rejected its own `tddCutoverSliceId` stamp.
946
+ try {
947
+ const state = await readFlowState(projectRoot);
948
+ await writeFlowState(projectRoot, { ...state, tddCutoverSliceId: cutoverSliceId }, {
949
+ allowReset: true,
950
+ writerSubsystem: "sync-v6.12-tdd-cutover-stamp"
951
+ });
952
+ }
953
+ catch {
954
+ // Best-effort: corrupt/missing state is handled elsewhere on sync.
955
+ }
944
956
  }
945
957
  const V613_LEGACY_PLAN_BANNER = "<!-- legacy-continuation: predates v6.13 parallel metadata. New units MAY add dependsOn/claimedPaths/parallelizable; existing units treated as best-effort serial. -->";
946
958
  /**
@@ -1091,9 +1103,16 @@ async function applyV614DefaultsIfNeeded(projectRoot) {
1091
1103
  if (summary.length === 0) {
1092
1104
  return null;
1093
1105
  }
1094
- const merged = { ...obj, ...updates };
1106
+ // v6.14.3 refresh the SHA256 sidecar in lockstep so guarded reads
1107
+ // (verify-current-state, advance-stage, etc.) don't trip a guard
1108
+ // mismatch immediately after `cclaw-cli sync`/`upgrade` writes the
1109
+ // v6.14.2 stream-style defaults.
1095
1110
  try {
1096
- await writeFileSafe(flowStatePath, `${JSON.stringify(merged, null, 2)}\n`, { mode: 0o600 });
1111
+ const state = await readFlowState(projectRoot);
1112
+ await writeFlowState(projectRoot, { ...state, ...updates }, {
1113
+ allowReset: true,
1114
+ writerSubsystem: "sync-v6.14.2-stream-defaults"
1115
+ });
1097
1116
  }
1098
1117
  catch {
1099
1118
  return null;
@@ -1204,10 +1223,18 @@ async function applyV6142WorktreeCutoverIfNeeded(projectRoot) {
1204
1223
  }
1205
1224
  if (!stamped)
1206
1225
  return null;
1207
- const merged = { ...obj, tddWorktreeCutoverSliceId: stamped };
1226
+ // v6.14.3 go through `writeFlowState` so the SHA256 sidecar
1227
+ // (`.cclaw/.flow-state.guard.json`) is refreshed in lockstep with
1228
+ // the on-disk flow-state.json. The previous v6.14.2 implementation
1229
+ // wrote the field via `writeFileSafe` directly, which left the
1230
+ // sidecar pointing at the pre-stamp digest; the next guarded hook
1231
+ // (e.g. `cclaw internal verify-current-state`) then failed with
1232
+ // `flow-state guard mismatch` and demanded a manual repair.
1208
1233
  try {
1209
- await writeFileSafe(flowStatePath, `${JSON.stringify(merged, null, 2)}\n`, {
1210
- mode: 0o600
1234
+ const state = await readFlowState(projectRoot);
1235
+ await writeFlowState(projectRoot, { ...state, tddWorktreeCutoverSliceId: stamped }, {
1236
+ allowReset: true,
1237
+ writerSubsystem: "sync-v6.14.2-worktree-cutover-stamp"
1211
1238
  });
1212
1239
  }
1213
1240
  catch {
@@ -57,8 +57,48 @@ export declare function extractParallelExecutionManagedBody(planMarkdown: string
57
57
  */
58
58
  export declare function extractMembersListFromLine(trimmedLine: string): string | null;
59
59
  /**
60
- * Parse `## Parallel Execution Plan` managed block for wave headings and Members lines.
61
- * Malformed member tokens are skipped. Duplicate slice ids in one plan source throw.
60
+ * v6.14.4 extract a `(sliceId, unitId)` pair from a markdown table data
61
+ * row whose first column is an `S-NN` token. Used by the wave parser to
62
+ * recognize the table-format Parallel Execution Plan that hox-shape
63
+ * projects emit alongside (or instead of) the legacy `**Members:**`
64
+ * bullet line.
65
+ *
66
+ * Rules:
67
+ * - The line must start with `|` (after trimming).
68
+ * - Column 1 (after stripping markdown noise) must match `^S-(\d+)$` —
69
+ * header rows (`| sliceId | …`) and separator rows (`|---|---|…`) are
70
+ * silently skipped.
71
+ * - Column 2, when present and non-empty, becomes the `unitId`
72
+ * verbatim (after stripping whitespace + backticks/quotes/brackets).
73
+ * This preserves the hox convention of recording task ids
74
+ * (`T-010`, `T-008a`, …) in the `unit` column without forcing a
75
+ * `U-NN` derivation.
76
+ * - When column 2 is absent or empty, fall back to the legacy
77
+ * `S-NN → U-NN` derivation so the existing `**Members:**` parser path
78
+ * stays bit-identical for non-table plans.
79
+ */
80
+ export declare function parseTableRowMember(trimmedLine: string): ParsedParallelWaveMember | null;
81
+ /**
82
+ * Parse `## Parallel Execution Plan` managed block for wave headings and
83
+ * member declarations. Recognizes BOTH the legacy `**Members:**` /
84
+ * `Members:` line shape AND the markdown-table shape
85
+ * (`| sliceId | unit | dependsOn | …`) used by hox-shape projects and by
86
+ * any plan written by `cclaw-cli sync` after v6.13.x.
87
+ *
88
+ * Wave headings accepted (case-insensitive, trailing text allowed):
89
+ * - `### Wave 04`
90
+ * - `### Wave W-04`
91
+ * - `### Wave W-04 — после успешного fan-in W-03 (5 lanes …)`
92
+ *
93
+ * Within a single wave the parser dedupes by `sliceId`: if the same
94
+ * slice appears in both `**Members:**` and a table row, the first
95
+ * occurrence wins (line-order). Cross-wave duplicates still throw
96
+ * `WavePlanDuplicateSliceError`.
97
+ *
98
+ * Malformed member tokens are skipped. Empty waves (heading present
99
+ * but neither a Members line nor any matching `| S-NN |` row found
100
+ * before the next heading) are RETURNED with `members: []` so callers
101
+ * can surface the boundary; classification is up to the caller.
62
102
  */
63
103
  export declare function parseParallelExecutionPlanWaves(planMarkdown: string): ParsedParallelWave[];
64
104
  /**
@@ -61,8 +61,73 @@ export function extractMembersListFromLine(trimmedLine) {
61
61
  return null;
62
62
  }
63
63
  /**
64
- * Parse `## Parallel Execution Plan` managed block for wave headings and Members lines.
65
- * Malformed member tokens are skipped. Duplicate slice ids in one plan source throw.
64
+ * v6.14.4 extract a `(sliceId, unitId)` pair from a markdown table data
65
+ * row whose first column is an `S-NN` token. Used by the wave parser to
66
+ * recognize the table-format Parallel Execution Plan that hox-shape
67
+ * projects emit alongside (or instead of) the legacy `**Members:**`
68
+ * bullet line.
69
+ *
70
+ * Rules:
71
+ * - The line must start with `|` (after trimming).
72
+ * - Column 1 (after stripping markdown noise) must match `^S-(\d+)$` —
73
+ * header rows (`| sliceId | …`) and separator rows (`|---|---|…`) are
74
+ * silently skipped.
75
+ * - Column 2, when present and non-empty, becomes the `unitId`
76
+ * verbatim (after stripping whitespace + backticks/quotes/brackets).
77
+ * This preserves the hox convention of recording task ids
78
+ * (`T-010`, `T-008a`, …) in the `unit` column without forcing a
79
+ * `U-NN` derivation.
80
+ * - When column 2 is absent or empty, fall back to the legacy
81
+ * `S-NN → U-NN` derivation so the existing `**Members:**` parser path
82
+ * stays bit-identical for non-table plans.
83
+ */
84
+ export function parseTableRowMember(trimmedLine) {
85
+ if (!trimmedLine.startsWith("|"))
86
+ return null;
87
+ const inner = trimmedLine.replace(/^\|/u, "").replace(/\|\s*$/u, "");
88
+ if (inner.length === 0)
89
+ return null;
90
+ const cells = inner.split("|").map((cell) => cell.trim());
91
+ if (cells.length === 0)
92
+ return null;
93
+ const stripDecorations = (raw) => raw.replace(/^[`"'[\]()]+|[`"'[\]()]+$/gu, "").trim();
94
+ const col1 = stripDecorations(cells[0]);
95
+ const sliceMatch = /^S-(\d+)$/u.exec(col1);
96
+ if (!sliceMatch)
97
+ return null;
98
+ const sliceNum = sliceMatch[1];
99
+ const sliceId = `S-${sliceNum}`;
100
+ let unitId = `U-${sliceNum}`;
101
+ if (cells.length >= 2) {
102
+ const col2 = stripDecorations(cells[1]);
103
+ if (col2.length > 0) {
104
+ const normalized = tokenToSliceAndUnit(col2);
105
+ unitId = normalized ? normalized.unitId : col2;
106
+ }
107
+ }
108
+ return { sliceId, unitId };
109
+ }
110
+ /**
111
+ * Parse `## Parallel Execution Plan` managed block for wave headings and
112
+ * member declarations. Recognizes BOTH the legacy `**Members:**` /
113
+ * `Members:` line shape AND the markdown-table shape
114
+ * (`| sliceId | unit | dependsOn | …`) used by hox-shape projects and by
115
+ * any plan written by `cclaw-cli sync` after v6.13.x.
116
+ *
117
+ * Wave headings accepted (case-insensitive, trailing text allowed):
118
+ * - `### Wave 04`
119
+ * - `### Wave W-04`
120
+ * - `### Wave W-04 — после успешного fan-in W-03 (5 lanes …)`
121
+ *
122
+ * Within a single wave the parser dedupes by `sliceId`: if the same
123
+ * slice appears in both `**Members:**` and a table row, the first
124
+ * occurrence wins (line-order). Cross-wave duplicates still throw
125
+ * `WavePlanDuplicateSliceError`.
126
+ *
127
+ * Malformed member tokens are skipped. Empty waves (heading present
128
+ * but neither a Members line nor any matching `| S-NN |` row found
129
+ * before the next heading) are RETURNED with `members: []` so callers
130
+ * can surface the boundary; classification is up to the caller.
66
131
  */
67
132
  export function parseParallelExecutionPlanWaves(planMarkdown) {
68
133
  const body = extractParallelExecutionManagedBody(planMarkdown);
@@ -72,22 +137,60 @@ export function parseParallelExecutionPlanWaves(planMarkdown) {
72
137
  const waves = [];
73
138
  let current = null;
74
139
  const seenSlices = new Set();
140
+ let inWaveSlicesSeen = new Set();
75
141
  const flushCurrent = () => {
76
- if (current && current.members.length > 0) {
142
+ if (current) {
77
143
  waves.push(current);
78
144
  }
79
145
  };
146
+ /**
147
+ * Strict add: throw on duplicates within the same wave OR across waves.
148
+ * Used for the `**Members:**` path so v6.13.1's duplicate-detection
149
+ * contract is preserved bit-identically.
150
+ */
151
+ const addMemberStrict = (member) => {
152
+ if (!current)
153
+ return;
154
+ if (inWaveSlicesSeen.has(member.sliceId) ||
155
+ seenSlices.has(member.sliceId)) {
156
+ throw new WavePlanDuplicateSliceError(`duplicate slice ${member.sliceId} in Parallel Execution Plan managed block`);
157
+ }
158
+ seenSlices.add(member.sliceId);
159
+ inWaveSlicesSeen.add(member.sliceId);
160
+ current.members.push(member);
161
+ };
162
+ /**
163
+ * Lenient add: silently dedupe duplicates within the same wave (so the
164
+ * documented "Members + table both present" case keeps the Members
165
+ * declaration as authoritative); still throw on cross-wave duplicates
166
+ * to surface real plan-authoring bugs.
167
+ */
168
+ const addMemberDedupInWave = (member) => {
169
+ if (!current)
170
+ return;
171
+ if (inWaveSlicesSeen.has(member.sliceId))
172
+ return;
173
+ if (seenSlices.has(member.sliceId)) {
174
+ throw new WavePlanDuplicateSliceError(`duplicate slice ${member.sliceId} in Parallel Execution Plan managed block`);
175
+ }
176
+ seenSlices.add(member.sliceId);
177
+ inWaveSlicesSeen.add(member.sliceId);
178
+ current.members.push(member);
179
+ };
80
180
  for (const rawLine of lines) {
81
181
  const trimmed = rawLine.trim();
82
- const waveMatch = /^###\s+Wave\s+(\d+)\s*$/iu.exec(trimmed);
182
+ const waveMatch = /^###\s+Wave\s+(?:W-)?(\d+)\b/iu.exec(trimmed);
83
183
  if (waveMatch) {
84
184
  flushCurrent();
85
185
  const n = waveMatch[1];
86
186
  current = { waveId: `W-${n.padStart(2, "0")}`, members: [] };
187
+ inWaveSlicesSeen = new Set();
87
188
  continue;
88
189
  }
190
+ if (!current)
191
+ continue;
89
192
  const membersCsv = extractMembersListFromLine(trimmed);
90
- if (membersCsv !== null && current) {
193
+ if (membersCsv !== null) {
91
194
  const parts = membersCsv
92
195
  .split(/,/u)
93
196
  .map((p) => p.trim())
@@ -96,12 +199,13 @@ export function parseParallelExecutionPlanWaves(planMarkdown) {
96
199
  const ids = tokenToSliceAndUnit(part);
97
200
  if (!ids)
98
201
  continue;
99
- if (seenSlices.has(ids.sliceId)) {
100
- throw new WavePlanDuplicateSliceError(`duplicate slice ${ids.sliceId} in Parallel Execution Plan managed block`);
101
- }
102
- seenSlices.add(ids.sliceId);
103
- current.members.push(ids);
202
+ addMemberStrict(ids);
104
203
  }
204
+ continue;
205
+ }
206
+ const tableMember = parseTableRowMember(trimmed);
207
+ if (tableMember) {
208
+ addMemberDedupInWave(tableMember);
105
209
  }
106
210
  }
107
211
  flushCurrent();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cclaw-cli",
3
- "version": "6.14.2",
3
+ "version": "6.14.4",
4
4
  "description": "Installer-first flow toolkit for coding agents",
5
5
  "type": "module",
6
6
  "bin": {