cclaw-cli 7.7.1 → 8.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +211 -134
- package/dist/artifact-frontmatter.d.ts +51 -0
- package/dist/artifact-frontmatter.js +131 -0
- package/dist/artifact-paths.d.ts +7 -27
- package/dist/artifact-paths.js +20 -249
- package/dist/cancel.d.ts +16 -0
- package/dist/cancel.js +66 -0
- package/dist/cli.d.ts +2 -27
- package/dist/cli.js +107 -511
- package/dist/compound.d.ts +26 -0
- package/dist/compound.js +96 -0
- package/dist/config.d.ts +14 -51
- package/dist/config.js +23 -359
- package/dist/constants.d.ts +11 -18
- package/dist/constants.js +19 -106
- package/dist/content/antipatterns.d.ts +1 -0
- package/dist/content/antipatterns.js +109 -0
- package/dist/content/artifact-templates.d.ts +10 -0
- package/dist/content/artifact-templates.js +550 -0
- package/dist/content/cancel-command.d.ts +2 -2
- package/dist/content/cancel-command.js +25 -17
- package/dist/content/core-agents.d.ts +9 -233
- package/dist/content/core-agents.js +39 -768
- package/dist/content/decision-protocol.d.ts +1 -12
- package/dist/content/decision-protocol.js +27 -20
- package/dist/content/examples.d.ts +8 -42
- package/dist/content/examples.js +293 -425
- package/dist/content/idea-command.d.ts +2 -0
- package/dist/content/idea-command.js +38 -0
- package/dist/content/iron-laws.d.ts +4 -138
- package/dist/content/iron-laws.js +18 -197
- package/dist/content/meta-skill.d.ts +1 -3
- package/dist/content/meta-skill.js +57 -134
- package/dist/content/node-hooks.d.ts +12 -8
- package/dist/content/node-hooks.js +188 -838
- package/dist/content/recovery.d.ts +8 -0
- package/dist/content/recovery.js +179 -0
- package/dist/content/reference-patterns.d.ts +4 -13
- package/dist/content/reference-patterns.js +260 -389
- package/dist/content/research-playbooks.d.ts +8 -8
- package/dist/content/research-playbooks.js +108 -121
- package/dist/content/review-loop.d.ts +6 -192
- package/dist/content/review-loop.js +29 -731
- package/dist/content/skills.d.ts +8 -38
- package/dist/content/skills.js +681 -732
- package/dist/content/specialist-prompts/architect.d.ts +1 -0
- package/dist/content/specialist-prompts/architect.js +225 -0
- package/dist/content/specialist-prompts/brainstormer.d.ts +1 -0
- package/dist/content/specialist-prompts/brainstormer.js +168 -0
- package/dist/content/specialist-prompts/index.d.ts +2 -0
- package/dist/content/specialist-prompts/index.js +14 -0
- package/dist/content/specialist-prompts/planner.d.ts +1 -0
- package/dist/content/specialist-prompts/planner.js +182 -0
- package/dist/content/specialist-prompts/reviewer.d.ts +1 -0
- package/dist/content/specialist-prompts/reviewer.js +193 -0
- package/dist/content/specialist-prompts/security-reviewer.d.ts +1 -0
- package/dist/content/specialist-prompts/security-reviewer.js +133 -0
- package/dist/content/specialist-prompts/slice-builder.d.ts +1 -0
- package/dist/content/specialist-prompts/slice-builder.js +232 -0
- package/dist/content/stage-playbooks.d.ts +8 -0
- package/dist/content/stage-playbooks.js +404 -0
- package/dist/content/start-command.d.ts +2 -12
- package/dist/content/start-command.js +221 -207
- package/dist/flow-state.d.ts +21 -178
- package/dist/flow-state.js +67 -170
- package/dist/fs-utils.d.ts +6 -26
- package/dist/fs-utils.js +29 -162
- package/dist/gitignore.d.ts +2 -1
- package/dist/gitignore.js +51 -34
- package/dist/harness-detect.d.ts +10 -0
- package/dist/harness-detect.js +29 -0
- package/dist/harness-prompt.d.ts +26 -0
- package/dist/harness-prompt.js +142 -0
- package/dist/install.d.ts +35 -15
- package/dist/install.js +238 -1347
- package/dist/knowledge-store.d.ts +19 -163
- package/dist/knowledge-store.js +56 -590
- package/dist/logger.d.ts +8 -3
- package/dist/logger.js +13 -4
- package/dist/orchestrator-routing.d.ts +29 -0
- package/dist/orchestrator-routing.js +156 -0
- package/dist/run-persistence.d.ts +7 -118
- package/dist/run-persistence.js +29 -845
- package/dist/runtime/run-hook.entry.d.ts +1 -3
- package/dist/runtime/run-hook.entry.js +19 -4
- package/dist/runtime/run-hook.mjs +13 -1024
- package/dist/types.d.ts +25 -261
- package/dist/types.js +8 -36
- package/package.json +6 -3
- package/dist/artifact-linter/brainstorm.d.ts +0 -2
- package/dist/artifact-linter/brainstorm.js +0 -353
- package/dist/artifact-linter/design.d.ts +0 -18
- package/dist/artifact-linter/design.js +0 -444
- package/dist/artifact-linter/findings-dedup.d.ts +0 -56
- package/dist/artifact-linter/findings-dedup.js +0 -232
- package/dist/artifact-linter/plan.d.ts +0 -2
- package/dist/artifact-linter/plan.js +0 -826
- package/dist/artifact-linter/review-army.d.ts +0 -49
- package/dist/artifact-linter/review-army.js +0 -520
- package/dist/artifact-linter/review.d.ts +0 -2
- package/dist/artifact-linter/review.js +0 -113
- package/dist/artifact-linter/scope.d.ts +0 -2
- package/dist/artifact-linter/scope.js +0 -158
- package/dist/artifact-linter/shared.d.ts +0 -637
- package/dist/artifact-linter/shared.js +0 -2163
- package/dist/artifact-linter/ship.d.ts +0 -2
- package/dist/artifact-linter/ship.js +0 -250
- package/dist/artifact-linter/spec.d.ts +0 -2
- package/dist/artifact-linter/spec.js +0 -176
- package/dist/artifact-linter/tdd.d.ts +0 -118
- package/dist/artifact-linter/tdd.js +0 -1404
- package/dist/artifact-linter.d.ts +0 -15
- package/dist/artifact-linter.js +0 -517
- package/dist/codex-feature-flag.d.ts +0 -58
- package/dist/codex-feature-flag.js +0 -193
- package/dist/content/closeout-guidance.d.ts +0 -14
- package/dist/content/closeout-guidance.js +0 -44
- package/dist/content/diff-command.d.ts +0 -1
- package/dist/content/diff-command.js +0 -43
- package/dist/content/harness-doc.d.ts +0 -1
- package/dist/content/harness-doc.js +0 -65
- package/dist/content/hook-events.d.ts +0 -9
- package/dist/content/hook-events.js +0 -23
- package/dist/content/hook-manifest.d.ts +0 -81
- package/dist/content/hook-manifest.js +0 -156
- package/dist/content/hooks.d.ts +0 -11
- package/dist/content/hooks.js +0 -1972
- package/dist/content/idea.d.ts +0 -60
- package/dist/content/idea.js +0 -416
- package/dist/content/language-policy.d.ts +0 -2
- package/dist/content/language-policy.js +0 -13
- package/dist/content/learnings.d.ts +0 -6
- package/dist/content/learnings.js +0 -141
- package/dist/content/observe.d.ts +0 -19
- package/dist/content/observe.js +0 -86
- package/dist/content/opencode-plugin.d.ts +0 -1
- package/dist/content/opencode-plugin.js +0 -635
- package/dist/content/review-prompts.d.ts +0 -1
- package/dist/content/review-prompts.js +0 -104
- package/dist/content/runtime-shared-snippets.d.ts +0 -8
- package/dist/content/runtime-shared-snippets.js +0 -80
- package/dist/content/session-hooks.d.ts +0 -7
- package/dist/content/session-hooks.js +0 -107
- package/dist/content/skills-elicitation.d.ts +0 -1
- package/dist/content/skills-elicitation.js +0 -167
- package/dist/content/stage-command.d.ts +0 -2
- package/dist/content/stage-command.js +0 -17
- package/dist/content/stage-schema.d.ts +0 -117
- package/dist/content/stage-schema.js +0 -955
- package/dist/content/stages/_lint-metadata/index.d.ts +0 -2
- package/dist/content/stages/_lint-metadata/index.js +0 -97
- package/dist/content/stages/brainstorm.d.ts +0 -2
- package/dist/content/stages/brainstorm.js +0 -184
- package/dist/content/stages/design.d.ts +0 -2
- package/dist/content/stages/design.js +0 -288
- package/dist/content/stages/index.d.ts +0 -8
- package/dist/content/stages/index.js +0 -11
- package/dist/content/stages/plan.d.ts +0 -2
- package/dist/content/stages/plan.js +0 -191
- package/dist/content/stages/review.d.ts +0 -2
- package/dist/content/stages/review.js +0 -240
- package/dist/content/stages/schema-types.d.ts +0 -203
- package/dist/content/stages/schema-types.js +0 -1
- package/dist/content/stages/scope.d.ts +0 -2
- package/dist/content/stages/scope.js +0 -254
- package/dist/content/stages/ship.d.ts +0 -2
- package/dist/content/stages/ship.js +0 -159
- package/dist/content/stages/spec.d.ts +0 -2
- package/dist/content/stages/spec.js +0 -170
- package/dist/content/stages/tdd.d.ts +0 -4
- package/dist/content/stages/tdd.js +0 -273
- package/dist/content/state-contracts.d.ts +0 -1
- package/dist/content/state-contracts.js +0 -63
- package/dist/content/status-command.d.ts +0 -4
- package/dist/content/status-command.js +0 -109
- package/dist/content/subagent-context-skills.d.ts +0 -4
- package/dist/content/subagent-context-skills.js +0 -279
- package/dist/content/subagents.d.ts +0 -3
- package/dist/content/subagents.js +0 -997
- package/dist/content/templates.d.ts +0 -26
- package/dist/content/templates.js +0 -1692
- package/dist/content/track-render-context.d.ts +0 -18
- package/dist/content/track-render-context.js +0 -53
- package/dist/content/tree-command.d.ts +0 -1
- package/dist/content/tree-command.js +0 -64
- package/dist/content/utility-skills.d.ts +0 -30
- package/dist/content/utility-skills.js +0 -160
- package/dist/content/view-command.d.ts +0 -2
- package/dist/content/view-command.js +0 -92
- package/dist/delegation.d.ts +0 -649
- package/dist/delegation.js +0 -1539
- package/dist/early-loop.d.ts +0 -70
- package/dist/early-loop.js +0 -302
- package/dist/execution-topology.d.ts +0 -44
- package/dist/execution-topology.js +0 -95
- package/dist/gate-evidence.d.ts +0 -85
- package/dist/gate-evidence.js +0 -631
- package/dist/harness-adapters.d.ts +0 -151
- package/dist/harness-adapters.js +0 -756
- package/dist/harness-selection.d.ts +0 -31
- package/dist/harness-selection.js +0 -214
- package/dist/hook-schema.d.ts +0 -6
- package/dist/hook-schema.js +0 -114
- package/dist/hook-schemas/claude-hooks.v1.json +0 -10
- package/dist/hook-schemas/codex-hooks.v1.json +0 -10
- package/dist/hook-schemas/cursor-hooks.v1.json +0 -13
- package/dist/init-detect.d.ts +0 -2
- package/dist/init-detect.js +0 -50
- package/dist/internal/advance-stage/advance.d.ts +0 -89
- package/dist/internal/advance-stage/advance.js +0 -655
- package/dist/internal/advance-stage/cancel-run.d.ts +0 -8
- package/dist/internal/advance-stage/cancel-run.js +0 -19
- package/dist/internal/advance-stage/flow-state-coercion.d.ts +0 -3
- package/dist/internal/advance-stage/flow-state-coercion.js +0 -81
- package/dist/internal/advance-stage/helpers.d.ts +0 -14
- package/dist/internal/advance-stage/helpers.js +0 -145
- package/dist/internal/advance-stage/hook.d.ts +0 -8
- package/dist/internal/advance-stage/hook.js +0 -40
- package/dist/internal/advance-stage/parsers.d.ts +0 -72
- package/dist/internal/advance-stage/parsers.js +0 -357
- package/dist/internal/advance-stage/proactive-delegation-trace.d.ts +0 -24
- package/dist/internal/advance-stage/proactive-delegation-trace.js +0 -56
- package/dist/internal/advance-stage/review-loop.d.ts +0 -16
- package/dist/internal/advance-stage/review-loop.js +0 -199
- package/dist/internal/advance-stage/rewind.d.ts +0 -14
- package/dist/internal/advance-stage/rewind.js +0 -108
- package/dist/internal/advance-stage/start-flow.d.ts +0 -13
- package/dist/internal/advance-stage/start-flow.js +0 -241
- package/dist/internal/advance-stage/verify.d.ts +0 -21
- package/dist/internal/advance-stage/verify.js +0 -185
- package/dist/internal/advance-stage.d.ts +0 -7
- package/dist/internal/advance-stage.js +0 -138
- package/dist/internal/cohesion-contract-stub.d.ts +0 -24
- package/dist/internal/cohesion-contract-stub.js +0 -148
- package/dist/internal/compound-readiness.d.ts +0 -23
- package/dist/internal/compound-readiness.js +0 -102
- package/dist/internal/detect-public-api-changes.d.ts +0 -5
- package/dist/internal/detect-public-api-changes.js +0 -45
- package/dist/internal/detect-supply-chain-changes.d.ts +0 -6
- package/dist/internal/detect-supply-chain-changes.js +0 -138
- package/dist/internal/early-loop-status.d.ts +0 -7
- package/dist/internal/early-loop-status.js +0 -93
- package/dist/internal/envelope-validate.d.ts +0 -7
- package/dist/internal/envelope-validate.js +0 -66
- package/dist/internal/flow-state-repair.d.ts +0 -20
- package/dist/internal/flow-state-repair.js +0 -104
- package/dist/internal/plan-split-waves.d.ts +0 -190
- package/dist/internal/plan-split-waves.js +0 -764
- package/dist/internal/runtime-integrity.d.ts +0 -7
- package/dist/internal/runtime-integrity.js +0 -268
- package/dist/internal/slice-commit.d.ts +0 -7
- package/dist/internal/slice-commit.js +0 -619
- package/dist/internal/tdd-loop-status.d.ts +0 -14
- package/dist/internal/tdd-loop-status.js +0 -68
- package/dist/internal/tdd-red-evidence.d.ts +0 -7
- package/dist/internal/tdd-red-evidence.js +0 -153
- package/dist/internal/waiver-grant.d.ts +0 -62
- package/dist/internal/waiver-grant.js +0 -294
- package/dist/internal/wave-status.d.ts +0 -74
- package/dist/internal/wave-status.js +0 -506
- package/dist/managed-resources.d.ts +0 -53
- package/dist/managed-resources.js +0 -313
- package/dist/policy.d.ts +0 -10
- package/dist/policy.js +0 -167
- package/dist/retro-gate.d.ts +0 -9
- package/dist/retro-gate.js +0 -47
- package/dist/run-archive.d.ts +0 -61
- package/dist/run-archive.js +0 -391
- package/dist/runs.d.ts +0 -2
- package/dist/runs.js +0 -2
- package/dist/stack-detection.d.ts +0 -116
- package/dist/stack-detection.js +0 -489
- package/dist/streaming/event-stream.d.ts +0 -31
- package/dist/streaming/event-stream.js +0 -114
- package/dist/tdd-cycle.d.ts +0 -107
- package/dist/tdd-cycle.js +0 -289
- package/dist/tdd-verification-evidence.d.ts +0 -17
- package/dist/tdd-verification-evidence.js +0 -122
- package/dist/track-heuristics.d.ts +0 -27
- package/dist/track-heuristics.js +0 -154
- package/dist/util/slice-id.d.ts +0 -58
- package/dist/util/slice-id.js +0 -89
- package/dist/worktree-manager.d.ts +0 -20
- package/dist/worktree-manager.js +0 -108
|
@@ -1,826 +0,0 @@
|
|
|
1
|
-
import { evaluateInvestigationTrace, evaluateLayeredDocumentReviewStatus, extractAcceptanceCriterionIdsFromMarkdown, extractAuthoredBody, extractH2Sections, headingPresent, sectionBodyByName, collectPatternHits, PLACEHOLDER_PATTERNS, extractDecisionIds, SCOPE_REDUCTION_PATTERNS } from "./shared.js";
|
|
2
|
-
import { resolveArtifactPath as resolveStageArtifactPath } from "../artifact-paths.js";
|
|
3
|
-
import { exists } from "../fs-utils.js";
|
|
4
|
-
import { FORBIDDEN_PLACEHOLDER_TOKENS, CONFIDENCE_FINDING_REGEX_SOURCE } from "../content/skills.js";
|
|
5
|
-
import fs from "node:fs/promises";
|
|
6
|
-
import path from "node:path";
|
|
7
|
-
import { PLAN_SPLIT_SMALL_PLAN_THRESHOLD, parseImplementationUnits, parseImplementationUnitParallelFields, parseParallelExecutionPlanWaves } from "../internal/plan-split-waves.js";
|
|
8
|
-
import { compareSliceIds, parseSliceId } from "../util/slice-id.js";
|
|
9
|
-
import { execFile } from "node:child_process";
|
|
10
|
-
import { promisify } from "node:util";
|
|
11
|
-
import { loadStackAdapter } from "../stack-detection.js";
|
|
12
|
-
import { readConfig, resolveExecutionStrictness, resolveExecutionTopology, resolvePlanMicroTaskPolicy, resolvePlanSliceGranularity } from "../config.js";
|
|
13
|
-
const execFileAsync = promisify(execFile);
|
|
14
|
-
const PARALLEL_EXEC_MANAGED_START = "<!-- parallel-exec-managed-start -->";
|
|
15
|
-
const PARALLEL_EXEC_MANAGED_END = "<!-- parallel-exec-managed-end -->";
|
|
16
|
-
const TASK_ID_PATTERN = /\bT-\d{3}[a-z]?(?:\.\d{1,3})?\b/giu;
|
|
17
|
-
const UNIT_ID_PATTERN = /\bU-\d+(?:[a-z][a-z0-9]*)?\b/giu;
|
|
18
|
-
const ACCEPTANCE_ID_PATTERN = /\bAC-\d+\b/giu;
|
|
19
|
-
const PLAN_LANE_WHITELIST = new Set([
|
|
20
|
-
"production",
|
|
21
|
-
"test",
|
|
22
|
-
"docs",
|
|
23
|
-
"infra",
|
|
24
|
-
"scaffold",
|
|
25
|
-
"migration"
|
|
26
|
-
]);
|
|
27
|
-
/**
|
|
28
|
-
* Extract every distinct T-NNN[a-z]?(.NNN)? id from a markdown body.
|
|
29
|
-
*
|
|
30
|
-
* Used by the `plan_parallel_exec_full_coverage` linter to compute the
|
|
31
|
-
* authored task set (from `## Task List`) vs. the wave-claimed task set
|
|
32
|
-
* (from inside `<!-- parallel-exec-managed-start -->`).
|
|
33
|
-
*/
|
|
34
|
-
function extractTaskIds(body) {
|
|
35
|
-
const ids = new Set();
|
|
36
|
-
for (const match of body.matchAll(TASK_ID_PATTERN)) {
|
|
37
|
-
ids.add(match[0]);
|
|
38
|
-
}
|
|
39
|
-
return ids;
|
|
40
|
-
}
|
|
41
|
-
function extractUnitIds(body) {
|
|
42
|
-
const ids = new Set();
|
|
43
|
-
for (const match of body.matchAll(UNIT_ID_PATTERN)) {
|
|
44
|
-
ids.add(match[0].toUpperCase());
|
|
45
|
-
}
|
|
46
|
-
return ids;
|
|
47
|
-
}
|
|
48
|
-
function extractAcceptanceTaskLinks(body) {
|
|
49
|
-
const links = [];
|
|
50
|
-
for (const line of body.split(/\r?\n/u)) {
|
|
51
|
-
const acIds = [...line.matchAll(ACCEPTANCE_ID_PATTERN)].map((match) => match[0].toUpperCase());
|
|
52
|
-
const taskIds = [...line.matchAll(TASK_ID_PATTERN)].map((match) => match[0]);
|
|
53
|
-
if (acIds.length === 0 || taskIds.length === 0)
|
|
54
|
-
continue;
|
|
55
|
-
for (const acId of acIds) {
|
|
56
|
-
for (const taskId of taskIds) {
|
|
57
|
-
links.push({ acId, taskId });
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
return links;
|
|
62
|
-
}
|
|
63
|
-
/**
|
|
64
|
-
* Return the body between the parallel-exec managed comment markers, or
|
|
65
|
-
* an empty string if the block is absent. The TDD wave parser uses the
|
|
66
|
-
* same delimiters; keeping the regex local avoids cross-package import
|
|
67
|
-
* cycles in the linter.
|
|
68
|
-
*/
|
|
69
|
-
function extractParallelExecManagedBody(planMarkdown) {
|
|
70
|
-
const startIdx = planMarkdown.indexOf(PARALLEL_EXEC_MANAGED_START);
|
|
71
|
-
const endIdx = planMarkdown.indexOf(PARALLEL_EXEC_MANAGED_END);
|
|
72
|
-
if (startIdx === -1 || endIdx === -1 || endIdx <= startIdx) {
|
|
73
|
-
return "";
|
|
74
|
-
}
|
|
75
|
-
return planMarkdown.slice(startIdx + PARALLEL_EXEC_MANAGED_START.length, endIdx);
|
|
76
|
-
}
|
|
77
|
-
function normalizePathToken(raw) {
|
|
78
|
-
return raw.trim().replace(/^`|`$/gu, "").replace(/^\.\/+/u, "");
|
|
79
|
-
}
|
|
80
|
-
function parsePipeRow(trimmedLine) {
|
|
81
|
-
const inner = trimmedLine.replace(/^\|/u, "").replace(/\|\s*$/u, "");
|
|
82
|
-
return inner.split("|").map((cell) => cell.trim());
|
|
83
|
-
}
|
|
84
|
-
function headerIndexByName(cells) {
|
|
85
|
-
const map = new Map();
|
|
86
|
-
for (let i = 0; i < cells.length; i += 1) {
|
|
87
|
-
const key = cells[i].toLowerCase().replace(/[^a-z0-9]/gu, "");
|
|
88
|
-
if (key.length > 0 && !map.has(key)) {
|
|
89
|
-
map.set(key, i);
|
|
90
|
-
}
|
|
91
|
-
}
|
|
92
|
-
return map;
|
|
93
|
-
}
|
|
94
|
-
function parseParallelWaveTableMetadata(planMarkdown) {
|
|
95
|
-
const body = extractParallelExecManagedBody(planMarkdown);
|
|
96
|
-
if (body.trim().length === 0)
|
|
97
|
-
return [];
|
|
98
|
-
const lines = body.split(/\r?\n/u);
|
|
99
|
-
const out = [];
|
|
100
|
-
let current = null;
|
|
101
|
-
let headerIdx = null;
|
|
102
|
-
const flush = () => {
|
|
103
|
-
if (current)
|
|
104
|
-
out.push(current);
|
|
105
|
-
};
|
|
106
|
-
for (const rawLine of lines) {
|
|
107
|
-
const trimmed = rawLine.trim();
|
|
108
|
-
const waveMatch = /^###\s+Wave\s+(?:W-)?(\d+)\b/iu.exec(trimmed);
|
|
109
|
-
if (waveMatch) {
|
|
110
|
-
flush();
|
|
111
|
-
current = {
|
|
112
|
-
waveId: `W-${waveMatch[1].padStart(2, "0")}`,
|
|
113
|
-
rows: [],
|
|
114
|
-
notes: []
|
|
115
|
-
};
|
|
116
|
-
headerIdx = null;
|
|
117
|
-
continue;
|
|
118
|
-
}
|
|
119
|
-
if (!current)
|
|
120
|
-
continue;
|
|
121
|
-
current.notes.push(trimmed);
|
|
122
|
-
if (!trimmed.startsWith("|"))
|
|
123
|
-
continue;
|
|
124
|
-
const cells = parsePipeRow(trimmed);
|
|
125
|
-
if (cells.length === 0)
|
|
126
|
-
continue;
|
|
127
|
-
const first = cells[0].toLowerCase();
|
|
128
|
-
const isHeader = first === "sliceid" ||
|
|
129
|
-
first === "slice id" ||
|
|
130
|
-
first === "unitid" ||
|
|
131
|
-
first === "unit id" ||
|
|
132
|
-
first === "unit";
|
|
133
|
-
if (isHeader) {
|
|
134
|
-
headerIdx = headerIndexByName(cells);
|
|
135
|
-
continue;
|
|
136
|
-
}
|
|
137
|
-
if (cells.every((cell) => /^:?-{3,}:?$/u.test(cell))) {
|
|
138
|
-
continue;
|
|
139
|
-
}
|
|
140
|
-
const sliceCell = cells[0].replace(/^`|`$/gu, "").trim();
|
|
141
|
-
const parsedSlice = parseSliceId(sliceCell);
|
|
142
|
-
const parsedUnit = /^U-(\d+(?:[a-z][a-z0-9]*)?)$/iu.exec(sliceCell);
|
|
143
|
-
if (!parsedSlice && !parsedUnit)
|
|
144
|
-
continue;
|
|
145
|
-
const sliceId = parsedSlice?.id ?? `S-${parsedUnit[1].toLowerCase()}`;
|
|
146
|
-
const idx = headerIdx ?? new Map();
|
|
147
|
-
const unitIdx = idx.get("unit") ?? idx.get("taskid") ?? 1;
|
|
148
|
-
const pathsIdx = idx.get("claimedpaths");
|
|
149
|
-
const parallelizableIdx = idx.get("parallelizable");
|
|
150
|
-
const laneIdx = idx.get("lane");
|
|
151
|
-
const dependsOnIdx = idx.get("dependson");
|
|
152
|
-
const rawPaths = pathsIdx !== undefined ? (cells[pathsIdx] ?? "") : "";
|
|
153
|
-
const claimedPaths = rawPaths.length === 0
|
|
154
|
-
? []
|
|
155
|
-
: rawPaths
|
|
156
|
-
.split(",")
|
|
157
|
-
.map((p) => normalizePathToken(p))
|
|
158
|
-
.filter((p) => p.length > 0);
|
|
159
|
-
const rawParallel = parallelizableIdx !== undefined ? (cells[parallelizableIdx] ?? "").toLowerCase() : "";
|
|
160
|
-
let parallelizable = null;
|
|
161
|
-
if (rawParallel === "true" || rawParallel === "yes")
|
|
162
|
-
parallelizable = true;
|
|
163
|
-
if (rawParallel === "false" || rawParallel === "no")
|
|
164
|
-
parallelizable = false;
|
|
165
|
-
const laneRaw = laneIdx !== undefined ? (cells[laneIdx] ?? "").trim().toLowerCase() : "";
|
|
166
|
-
const rawDeps = dependsOnIdx !== undefined ? (cells[dependsOnIdx] ?? "") : "";
|
|
167
|
-
const dependsOn = rawDeps.length === 0
|
|
168
|
-
? []
|
|
169
|
-
: rawDeps
|
|
170
|
-
.replace(/^\[|\]$/gu, "")
|
|
171
|
-
.split(/[,\s]+/u)
|
|
172
|
-
.map((token) => token.trim().replace(/^`|`$/gu, ""))
|
|
173
|
-
.map((token) => parseSliceId(token)?.id ?? "")
|
|
174
|
-
.filter((id) => id.length > 0);
|
|
175
|
-
current.rows.push({
|
|
176
|
-
sliceId,
|
|
177
|
-
unit: (cells[unitIdx] ?? "").trim(),
|
|
178
|
-
claimedPaths,
|
|
179
|
-
parallelizable,
|
|
180
|
-
lane: laneRaw.length > 0 ? laneRaw : null,
|
|
181
|
-
dependsOn
|
|
182
|
-
});
|
|
183
|
-
}
|
|
184
|
-
flush();
|
|
185
|
-
return out;
|
|
186
|
-
}
|
|
187
|
-
function waveHasSequentialModeHint(wave) {
|
|
188
|
-
const noteText = wave.notes.join("\n").toLowerCase();
|
|
189
|
-
return /mode\s*:\s*sequential/iu.test(noteText) || /\bsequential\b/iu.test(noteText) || /\bserial\b/iu.test(noteText);
|
|
190
|
-
}
|
|
191
|
-
/**
|
|
192
|
-
* Capture the set of repo-relative paths tracked at HEAD. Returns an
|
|
193
|
-
* empty set when the project root is not a git repo or `git ls-files`
|
|
194
|
-
* fails — the wiring linter degrades to "no aggregator required" in
|
|
195
|
-
* that case rather than crashing the whole stage check.
|
|
196
|
-
*/
|
|
197
|
-
async function readHeadFiles(projectRoot) {
|
|
198
|
-
try {
|
|
199
|
-
const { stdout } = await execFileAsync("git", ["ls-files", "-z"], { cwd: projectRoot, maxBuffer: 64 * 1024 * 1024 });
|
|
200
|
-
const out = new Set();
|
|
201
|
-
for (const segment of stdout.split("\u0000")) {
|
|
202
|
-
const trimmed = segment.trim();
|
|
203
|
-
if (trimmed.length === 0)
|
|
204
|
-
continue;
|
|
205
|
-
out.add(trimmed.replace(/\\/gu, "/"));
|
|
206
|
-
}
|
|
207
|
-
return out;
|
|
208
|
-
}
|
|
209
|
-
catch {
|
|
210
|
-
return new Set();
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
function buildSliceClaimGraph(waves) {
|
|
214
|
-
const bySliceId = new Map();
|
|
215
|
-
for (const wave of waves) {
|
|
216
|
-
for (const row of wave.rows) {
|
|
217
|
-
bySliceId.set(row.sliceId, row);
|
|
218
|
-
}
|
|
219
|
-
}
|
|
220
|
-
return { bySliceId };
|
|
221
|
-
}
|
|
222
|
-
/**
|
|
223
|
-
* Walk the dependsOn graph from `sliceId` and return the set of
|
|
224
|
-
* predecessor slice ids (transitive). Skips ids that aren't in the
|
|
225
|
-
* graph and handles cycles via a `visiting` set so a malformed plan
|
|
226
|
-
* doesn't lock the linter.
|
|
227
|
-
*/
|
|
228
|
-
function transitivePredecessors(sliceId, graph) {
|
|
229
|
-
const out = new Set();
|
|
230
|
-
const stack = [sliceId];
|
|
231
|
-
const visiting = new Set();
|
|
232
|
-
while (stack.length > 0) {
|
|
233
|
-
const current = stack.pop();
|
|
234
|
-
if (visiting.has(current))
|
|
235
|
-
continue;
|
|
236
|
-
visiting.add(current);
|
|
237
|
-
const row = graph.bySliceId.get(current);
|
|
238
|
-
if (!row)
|
|
239
|
-
continue;
|
|
240
|
-
for (const predecessor of row.dependsOn) {
|
|
241
|
-
const normalized = parseSliceId(predecessor)?.id ?? predecessor;
|
|
242
|
-
if (out.has(normalized))
|
|
243
|
-
continue;
|
|
244
|
-
out.add(normalized);
|
|
245
|
-
stack.push(normalized);
|
|
246
|
-
}
|
|
247
|
-
}
|
|
248
|
-
return out;
|
|
249
|
-
}
|
|
250
|
-
export async function lintPlanStage(ctx) {
|
|
251
|
-
const { projectRoot, track, raw, absFile, sections, findings, parsedFrontmatter, brainstormShortCircuitBody, brainstormShortCircuitActivated, staleDiagramAuditEnabled, isTrivialOverride } = ctx;
|
|
252
|
-
const config = await readConfig(projectRoot).catch(() => null);
|
|
253
|
-
const executionStrictness = resolveExecutionStrictness(config);
|
|
254
|
-
const executionTopology = resolveExecutionTopology(config);
|
|
255
|
-
const planSliceGranularity = resolvePlanSliceGranularity(config);
|
|
256
|
-
const planMicroTaskPolicy = resolvePlanMicroTaskPolicy(config);
|
|
257
|
-
evaluateInvestigationTrace(ctx, "Implementation Units");
|
|
258
|
-
const strictPlanGuards = parsedFrontmatter.hasFrontmatter ||
|
|
259
|
-
headingPresent(sections, "Plan Quality Scan") ||
|
|
260
|
-
headingPresent(sections, "Locked Decision Coverage");
|
|
261
|
-
const taskListBody = sectionBodyByName(sections, "Task List") ?? raw;
|
|
262
|
-
const placeholderHits = collectPatternHits(taskListBody, PLACEHOLDER_PATTERNS);
|
|
263
|
-
findings.push({
|
|
264
|
-
section: "Plan Quality Scan: Placeholders",
|
|
265
|
-
required: strictPlanGuards,
|
|
266
|
-
rule: "Task List must not contain placeholders (TODO/TBD/FIXME/<fill-in>/<your-*-here>/xxx/ellipsis).",
|
|
267
|
-
found: placeholderHits.length === 0,
|
|
268
|
-
details: placeholderHits.length === 0
|
|
269
|
-
? "No placeholder tokens detected in Task List."
|
|
270
|
-
: `Detected placeholder token(s) in Task List: ${placeholderHits.join(", ")}.`
|
|
271
|
-
});
|
|
272
|
-
const scopeArtifact = await resolveStageArtifactPath("scope", {
|
|
273
|
-
projectRoot,
|
|
274
|
-
track,
|
|
275
|
-
intent: "read"
|
|
276
|
-
});
|
|
277
|
-
const scopeRaw = (await exists(scopeArtifact.absPath))
|
|
278
|
-
? await fs.readFile(scopeArtifact.absPath, "utf8")
|
|
279
|
-
: "";
|
|
280
|
-
const scopeDecisionIds = extractDecisionIds(scopeRaw);
|
|
281
|
-
const missingDecisionRefs = scopeDecisionIds.filter((id) => !raw.includes(id));
|
|
282
|
-
findings.push({
|
|
283
|
-
section: "Locked Decision Traceability",
|
|
284
|
-
required: strictPlanGuards && scopeDecisionIds.length > 0,
|
|
285
|
-
rule: "Every locked decision ID (D-XX) in scope must be referenced in plan.",
|
|
286
|
-
found: missingDecisionRefs.length === 0,
|
|
287
|
-
details: scopeDecisionIds.length === 0
|
|
288
|
-
? "No D-XX IDs found in scope artifact; traceability check skipped."
|
|
289
|
-
: missingDecisionRefs.length === 0
|
|
290
|
-
? `All ${scopeDecisionIds.length} scope decision IDs are referenced in plan.`
|
|
291
|
-
: `Missing scope decision reference(s) in plan: ${missingDecisionRefs.join(", ")}.`
|
|
292
|
-
});
|
|
293
|
-
const reductionHits = collectPatternHits(taskListBody, SCOPE_REDUCTION_PATTERNS);
|
|
294
|
-
findings.push({
|
|
295
|
-
section: "Plan Quality Scan: Scope Reduction",
|
|
296
|
-
required: strictPlanGuards && scopeDecisionIds.length > 0,
|
|
297
|
-
rule: "Task List must not include scope-reduction language when locked decisions exist.",
|
|
298
|
-
found: reductionHits.length === 0,
|
|
299
|
-
details: scopeDecisionIds.length === 0
|
|
300
|
-
? "No locked decisions found in scope artifact; scope-reduction scan is advisory."
|
|
301
|
-
: reductionHits.length === 0
|
|
302
|
-
? "No scope-reduction phrases detected in Task List."
|
|
303
|
-
: `Detected scope-reduction phrase(s) in Task List: ${reductionHits.join(", ")}.`
|
|
304
|
-
});
|
|
305
|
-
const authoredTaskIds = extractTaskIds(taskListBody);
|
|
306
|
-
const acceptanceMappingBody = sectionBodyByName(sections, "Acceptance Mapping") ?? "";
|
|
307
|
-
const acTaskLinks = [
|
|
308
|
-
...extractAcceptanceTaskLinks(taskListBody),
|
|
309
|
-
...extractAcceptanceTaskLinks(acceptanceMappingBody)
|
|
310
|
-
];
|
|
311
|
-
const mappedTaskToAcs = new Map();
|
|
312
|
-
const mappedAcToTasks = new Map();
|
|
313
|
-
for (const link of acTaskLinks) {
|
|
314
|
-
const taskSet = mappedTaskToAcs.get(link.taskId) ?? new Set();
|
|
315
|
-
taskSet.add(link.acId);
|
|
316
|
-
mappedTaskToAcs.set(link.taskId, taskSet);
|
|
317
|
-
const acSet = mappedAcToTasks.get(link.acId) ?? new Set();
|
|
318
|
-
acSet.add(link.taskId);
|
|
319
|
-
mappedAcToTasks.set(link.acId, acSet);
|
|
320
|
-
}
|
|
321
|
-
const tasksMissingAc = [...authoredTaskIds].filter((taskId) => !mappedTaskToAcs.has(taskId));
|
|
322
|
-
let specAcIds = [];
|
|
323
|
-
const specArtifact = await resolveStageArtifactPath("spec", {
|
|
324
|
-
projectRoot,
|
|
325
|
-
track,
|
|
326
|
-
intent: "read"
|
|
327
|
-
});
|
|
328
|
-
if (await exists(specArtifact.absPath)) {
|
|
329
|
-
try {
|
|
330
|
-
const specRaw = await fs.readFile(specArtifact.absPath, "utf8");
|
|
331
|
-
const specSections = extractH2Sections(specRaw);
|
|
332
|
-
const acceptanceBody = sectionBodyByName(specSections, "Acceptance Criteria") ?? specRaw;
|
|
333
|
-
specAcIds = extractAcceptanceCriterionIdsFromMarkdown(acceptanceBody);
|
|
334
|
-
}
|
|
335
|
-
catch {
|
|
336
|
-
specAcIds = [];
|
|
337
|
-
}
|
|
338
|
-
}
|
|
339
|
-
const acsMissingTask = specAcIds.filter((acId) => !mappedAcToTasks.has(acId));
|
|
340
|
-
const mappingFound = authoredTaskIds.size > 0 &&
|
|
341
|
-
tasksMissingAc.length === 0 &&
|
|
342
|
-
acsMissingTask.length === 0;
|
|
343
|
-
findings.push({
|
|
344
|
-
section: "plan_acceptance_mapped",
|
|
345
|
-
required: authoredTaskIds.size > 0,
|
|
346
|
-
rule: "Every T-NNN task must reference >=1 AC-N, and every AC-N from spec must be referenced by >=1 plan task.",
|
|
347
|
-
found: mappingFound,
|
|
348
|
-
details: authoredTaskIds.size === 0
|
|
349
|
-
? "Task List has no T-NNN ids; acceptance mapping check skipped."
|
|
350
|
-
: tasksMissingAc.length > 0
|
|
351
|
-
? `Task(s) missing AC mapping: ${tasksMissingAc.join(", ")}. Add AC-N references in Task List or Acceptance Mapping.`
|
|
352
|
-
: acsMissingTask.length > 0
|
|
353
|
-
? `Spec AC(s) missing task coverage: ${acsMissingTask.join(", ")}.`
|
|
354
|
-
: specAcIds.length === 0
|
|
355
|
-
? `Mapped ${authoredTaskIds.size} task(s) to AC ids; spec artifact AC list is empty or unavailable.`
|
|
356
|
-
: `Mapped ${authoredTaskIds.size} task(s) across ${specAcIds.length} spec AC(s).`
|
|
357
|
-
});
|
|
358
|
-
// Universal Layer 2.5 structural checks (superpowers writing-plans + ce-plan).
|
|
359
|
-
// Plan-wide placeholder scan (broader than Task List) using the
|
|
360
|
-
// FORBIDDEN_PLACEHOLDER_TOKENS list shared with the cross-cutting block.
|
|
361
|
-
const planHeaderBody = sectionBodyByName(sections, "Plan Header");
|
|
362
|
-
if (planHeaderBody !== null) {
|
|
363
|
-
const required = ["Goal:", "Architecture:", "Tech Stack:"];
|
|
364
|
-
const missing = required.filter((token) => !new RegExp(token.replace(":", "\\s*:"), "iu").test(planHeaderBody));
|
|
365
|
-
findings.push({
|
|
366
|
-
section: "Plan Header Coverage",
|
|
367
|
-
required: true,
|
|
368
|
-
rule: "Plan Header must include Goal, Architecture, and Tech Stack lines.",
|
|
369
|
-
found: missing.length === 0,
|
|
370
|
-
details: missing.length === 0
|
|
371
|
-
? "Plan Header covers Goal/Architecture/Tech Stack."
|
|
372
|
-
: `Plan Header is missing field(s): ${missing.join(", ")}.`
|
|
373
|
-
});
|
|
374
|
-
}
|
|
375
|
-
const unitBlocks = raw.match(/###\s+Implementation Unit\s+U-\d+/giu) ?? [];
|
|
376
|
-
if (unitBlocks.length > 0) {
|
|
377
|
-
const requiredKeys = ["Goal:", "Files", "Approach:", "Test scenarios:", "Verification:"];
|
|
378
|
-
const blockBodies = raw.split(/(?=###\s+Implementation Unit\s+U-\d+)/iu).slice(1);
|
|
379
|
-
const validBlocks = blockBodies.filter((block) => requiredKeys.every((key) => new RegExp(key.replace(":", "\\s*:"), "iu").test(block)));
|
|
380
|
-
findings.push({
|
|
381
|
-
section: "Implementation Unit Shape",
|
|
382
|
-
required: true,
|
|
383
|
-
rule: "Each `### Implementation Unit U-<n>` must include Goal, Files, Approach, Test scenarios, Verification.",
|
|
384
|
-
found: validBlocks.length === unitBlocks.length,
|
|
385
|
-
details: validBlocks.length === unitBlocks.length
|
|
386
|
-
? `All ${unitBlocks.length} implementation unit(s) include the required fields.`
|
|
387
|
-
: `${unitBlocks.length - validBlocks.length} implementation unit(s) are missing required fields.`
|
|
388
|
-
});
|
|
389
|
-
}
|
|
390
|
-
const allPlaceholderTokens = FORBIDDEN_PLACEHOLDER_TOKENS.map((token) => token.toLowerCase());
|
|
391
|
-
const authoredBody = extractAuthoredBody(raw);
|
|
392
|
-
const lowerRaw = authoredBody.toLowerCase();
|
|
393
|
-
const planWidePlaceholderHits = allPlaceholderTokens.filter((token) => lowerRaw.includes(token));
|
|
394
|
-
// Strip the "## NO PLACEHOLDERS Rule" section (which lists tokens) and
|
|
395
|
-
// any acknowledgement text from the scan to avoid false positives where
|
|
396
|
-
// the plan deliberately references the rule by name.
|
|
397
|
-
const placeholderRuleSection = sectionBodyByName(sections, "NO PLACEHOLDERS Rule");
|
|
398
|
-
const ruleScanBody = (placeholderRuleSection ?? "").toLowerCase();
|
|
399
|
-
const ruleAcceptedHits = ruleScanBody.length > 0
|
|
400
|
-
? allPlaceholderTokens.filter((token) => ruleScanBody.includes(token))
|
|
401
|
-
: [];
|
|
402
|
-
const filteredPlanHits = planWidePlaceholderHits.filter((token) => {
|
|
403
|
-
// If the only occurrence is in the rule section, ignore it.
|
|
404
|
-
if (!ruleAcceptedHits.includes(token))
|
|
405
|
-
return true;
|
|
406
|
-
const occurrencesElsewhere = lowerRaw.split(token).length - 1
|
|
407
|
-
- (ruleScanBody.split(token).length - 1);
|
|
408
|
-
return occurrencesElsewhere > 0;
|
|
409
|
-
});
|
|
410
|
-
findings.push({
|
|
411
|
-
section: "Plan-wide Placeholder Scan",
|
|
412
|
-
required: false,
|
|
413
|
-
rule: "Plan should not contain forbidden placeholder tokens outside the NO PLACEHOLDERS rule section.",
|
|
414
|
-
found: filteredPlanHits.length === 0,
|
|
415
|
-
details: filteredPlanHits.length === 0
|
|
416
|
-
? "No forbidden placeholder tokens detected outside the rule section."
|
|
417
|
-
: `Detected forbidden token(s) elsewhere in plan: ${filteredPlanHits.join(", ")}.`
|
|
418
|
-
});
|
|
419
|
-
// advisory `plan_too_large_no_waves`. Fires when a
|
|
420
|
-
// standard-track plan has more than the wave-split threshold of
|
|
421
|
-
// implementation units AND the wave-plans/ directory is empty.
|
|
422
|
-
// Linter advisories never block stage-complete (`required: false`),
|
|
423
|
-
// so the agent gets a nudge to run `cclaw-cli internal plan-split-waves`
|
|
424
|
-
// without the plan stage failing.
|
|
425
|
-
try {
|
|
426
|
-
const planUnits = parseImplementationUnits(raw);
|
|
427
|
-
if (planUnits.length > PLAN_SPLIT_SMALL_PLAN_THRESHOLD) {
|
|
428
|
-
const artifactsDir = path.dirname(absFile);
|
|
429
|
-
const wavePlansDir = path.join(artifactsDir, "wave-plans");
|
|
430
|
-
let wavePlansHasContent = false;
|
|
431
|
-
try {
|
|
432
|
-
const dirEntries = await fs.readdir(wavePlansDir);
|
|
433
|
-
wavePlansHasContent = dirEntries.some((name) => /^wave-\d+\.md$/u.test(name));
|
|
434
|
-
}
|
|
435
|
-
catch {
|
|
436
|
-
wavePlansHasContent = false;
|
|
437
|
-
}
|
|
438
|
-
if (!wavePlansHasContent) {
|
|
439
|
-
findings.push({
|
|
440
|
-
section: "plan_too_large_no_waves",
|
|
441
|
-
required: false,
|
|
442
|
-
rule: "Plans with > 50 implementation units benefit from being split into manageable waves via `cclaw-cli internal plan-split-waves`.",
|
|
443
|
-
found: false,
|
|
444
|
-
details: `Plan has ${planUnits.length} implementation unit(s) (threshold ${PLAN_SPLIT_SMALL_PLAN_THRESHOLD}) and no wave-plans/ directory yet. ` +
|
|
445
|
-
"Run `cclaw-cli internal plan-split-waves` to break this plan into manageable waves; the linter is advisory only and will not block stage-complete."
|
|
446
|
-
});
|
|
447
|
-
}
|
|
448
|
-
}
|
|
449
|
-
}
|
|
450
|
-
catch {
|
|
451
|
-
// Parser errors should never block the linter — the advisory is
|
|
452
|
-
// purely a nudge.
|
|
453
|
-
}
|
|
454
|
-
const handoffBody = sectionBodyByName(sections, "Execution Handoff");
|
|
455
|
-
if (handoffBody !== null) {
|
|
456
|
-
const ok = /(subagent-driven|inline executor)/iu.test(handoffBody);
|
|
457
|
-
findings.push({
|
|
458
|
-
section: "Execution Handoff Posture",
|
|
459
|
-
required: true,
|
|
460
|
-
rule: "Execution Handoff must declare a posture (Subagent-Driven or Inline executor).",
|
|
461
|
-
found: ok,
|
|
462
|
-
details: ok
|
|
463
|
-
? "Execution Handoff posture declared."
|
|
464
|
-
: "Execution Handoff is missing a posture declaration (Subagent-Driven or Inline executor)."
|
|
465
|
-
});
|
|
466
|
-
}
|
|
467
|
-
const planCalibratedBody = sectionBodyByName(sections, "Calibrated Findings");
|
|
468
|
-
if (planCalibratedBody !== null) {
|
|
469
|
-
const isEmpty = /none this stage|none\b/iu.test(planCalibratedBody);
|
|
470
|
-
const findingRegex = new RegExp(CONFIDENCE_FINDING_REGEX_SOURCE, "iu");
|
|
471
|
-
const validRows = planCalibratedBody
|
|
472
|
-
.split("\n")
|
|
473
|
-
.filter((line) => /^[-*]\s+\[/u.test(line.trim()))
|
|
474
|
-
.filter((line) => findingRegex.test(line));
|
|
475
|
-
const ok = isEmpty || validRows.length >= 1;
|
|
476
|
-
findings.push({
|
|
477
|
-
section: "Plan Calibrated Finding Format",
|
|
478
|
-
required: false,
|
|
479
|
-
rule: "Calibrated Findings should either declare `None this stage` or include at least one line in `[P1|P2|P3] (confidence: <n>/10) <path>[:<line>] — <description>` format.",
|
|
480
|
-
found: ok,
|
|
481
|
-
details: isEmpty
|
|
482
|
-
? "No calibrated findings recorded for this plan stage."
|
|
483
|
-
: ok
|
|
484
|
-
? `Detected ${validRows.length} calibrated plan finding(s).`
|
|
485
|
-
: "No calibrated findings detected in canonical format."
|
|
486
|
-
});
|
|
487
|
-
}
|
|
488
|
-
const regressionIronBody = sectionBodyByName(sections, "Regression Iron Rule");
|
|
489
|
-
if (regressionIronBody !== null) {
|
|
490
|
-
const acknowledged = /iron\s+rule\s+acknowledged\s*:\s*yes\b/iu.test(regressionIronBody);
|
|
491
|
-
findings.push({
|
|
492
|
-
section: "Plan Regression Iron Rule Acknowledgement",
|
|
493
|
-
required: false,
|
|
494
|
-
rule: "Regression Iron Rule should include `Iron rule acknowledged: yes`.",
|
|
495
|
-
found: acknowledged,
|
|
496
|
-
details: acknowledged
|
|
497
|
-
? "Regression Iron Rule is explicitly acknowledged."
|
|
498
|
-
: "Regression Iron Rule section is present but missing `Iron rule acknowledged: yes`."
|
|
499
|
-
});
|
|
500
|
-
}
|
|
501
|
-
const layeredDocumentReview = evaluateLayeredDocumentReviewStatus(sections, CONFIDENCE_FINDING_REGEX_SOURCE);
|
|
502
|
-
if (layeredDocumentReview !== null) {
|
|
503
|
-
findings.push({
|
|
504
|
-
section: "Document Reviewer Structured Findings",
|
|
505
|
-
required: true,
|
|
506
|
-
rule: "When Layered review references coherence-reviewer/scope-guardian-reviewer/feasibility-reviewer, include explicit reviewer status plus calibrated finding lines.",
|
|
507
|
-
found: layeredDocumentReview.missingStructured.length === 0,
|
|
508
|
-
details: layeredDocumentReview.missingStructured.length === 0
|
|
509
|
-
? `Structured findings present for reviewers: ${layeredDocumentReview.triggeredReviewers.join(", ")}.`
|
|
510
|
-
: `Missing status or calibrated findings for: ${layeredDocumentReview.missingStructured.join(", ")}.`
|
|
511
|
-
});
|
|
512
|
-
findings.push({
|
|
513
|
-
section: "document-review.fail_without_waiver",
|
|
514
|
-
required: true,
|
|
515
|
-
rule: "[P1] document-review.fail_without_waiver — reviewer FAIL/PARTIAL requires fix evidence or explicit waiver.",
|
|
516
|
-
found: layeredDocumentReview.failOrPartialWithoutWaiver.length === 0,
|
|
517
|
-
details: layeredDocumentReview.failOrPartialWithoutWaiver.length === 0
|
|
518
|
-
? "No unwaived FAIL/PARTIAL reviewer statuses detected."
|
|
519
|
-
: `Unwaived FAIL/PARTIAL statuses: ${layeredDocumentReview.failOrPartialWithoutWaiver.join(", ")}.`
|
|
520
|
-
});
|
|
521
|
-
}
|
|
522
|
-
const planUnits = parseImplementationUnits(raw);
|
|
523
|
-
const authoredTaskIdsForShape = extractTaskIds(sectionBodyByName(sections, "Task List") ?? "");
|
|
524
|
-
const microtaskOnlyPlan = authoredTaskIdsForShape.size > 1 &&
|
|
525
|
-
planUnits.length === 0 &&
|
|
526
|
-
executionTopology !== "strict-micro" &&
|
|
527
|
-
planSliceGranularity !== "strict-micro";
|
|
528
|
-
const strictMicroPolicy = executionStrictness === "strict" ||
|
|
529
|
-
executionTopology === "strict-micro" ||
|
|
530
|
-
planSliceGranularity === "strict-micro" ||
|
|
531
|
-
planMicroTaskPolicy === "strict";
|
|
532
|
-
const microtaskOnlyAdvisoryApplies = microtaskOnlyPlan &&
|
|
533
|
-
!strictMicroPolicy &&
|
|
534
|
-
(executionStrictness === "fast" || executionStrictness === "balanced");
|
|
535
|
-
findings.push({
|
|
536
|
-
section: "plan_microtask_only_advisory",
|
|
537
|
-
required: false,
|
|
538
|
-
rule: "Balanced/fast execution should plan feature-atomic implementation units/slices with internal 2-5 minute TDD steps; reserve one-task-one-slice microtask plans for `execution.topology: strict-micro`, `execution.strictness: strict`, or `plan.microTaskPolicy: strict`.",
|
|
539
|
-
found: !microtaskOnlyAdvisoryApplies,
|
|
540
|
-
details: microtaskOnlyAdvisoryApplies
|
|
541
|
-
? `Task List has ${authoredTaskIdsForShape.size} tiny task id(s) but no Implementation Units. In execution.strictness=${executionStrictness} with plan.microTaskPolicy=${planMicroTaskPolicy}, group related tasks into U-* feature-atomic slices with internal RED/GREEN/REFACTOR steps, or set execution.topology=strict-micro / plan.microTaskPolicy=strict for high-risk micro-slice execution.`
|
|
542
|
-
: strictMicroPolicy
|
|
543
|
-
? "Strict micro-slice posture is configured; microtask-only planning is allowed."
|
|
544
|
-
: "Plan includes implementation units or does not look microtask-only."
|
|
545
|
-
});
|
|
546
|
-
const parallelMetaApplies = strictPlanGuards && planUnits.length > 0;
|
|
547
|
-
if (parallelMetaApplies) {
|
|
548
|
-
const metaRulesRequired = true;
|
|
549
|
-
const missingDepends = [];
|
|
550
|
-
const missingPaths = [];
|
|
551
|
-
const missingParallelMeta = [];
|
|
552
|
-
for (const unit of planUnits) {
|
|
553
|
-
const id = unit.id;
|
|
554
|
-
if (!/\bdependsOn\s*:/iu.test(unit.body)) {
|
|
555
|
-
missingDepends.push(id);
|
|
556
|
-
}
|
|
557
|
-
if (!/\bclaimedPaths\s*:/iu.test(unit.body)) {
|
|
558
|
-
missingPaths.push(id);
|
|
559
|
-
}
|
|
560
|
-
if (!/\bparallelizable\s*:/iu.test(unit.body) || !/\briskTier\s*:/iu.test(unit.body)) {
|
|
561
|
-
missingParallelMeta.push(id);
|
|
562
|
-
}
|
|
563
|
-
}
|
|
564
|
-
findings.push({
|
|
565
|
-
section: "plan_units_missing_dependsOn",
|
|
566
|
-
required: metaRulesRequired,
|
|
567
|
-
rule: "Every implementation unit must declare `dependsOn:` — use comma-separated unit ids or `none`.",
|
|
568
|
-
found: missingDepends.length === 0,
|
|
569
|
-
details: missingDepends.length === 0
|
|
570
|
-
? "All implementation units declare dependsOn."
|
|
571
|
-
: `Missing dependsOn on: ${missingDepends.join(", ")}. Remediation: add a bullet \`- **dependsOn:** U-2, U-3\` or \`- **dependsOn:** none\`.`
|
|
572
|
-
});
|
|
573
|
-
findings.push({
|
|
574
|
-
section: "plan_units_missing_claimedPaths",
|
|
575
|
-
required: metaRulesRequired,
|
|
576
|
-
rule: "Every implementation unit must declare explicit `claimedPaths:` predictions for parallel scheduling.",
|
|
577
|
-
found: missingPaths.length === 0,
|
|
578
|
-
details: missingPaths.length === 0
|
|
579
|
-
? "All implementation units declare claimedPaths."
|
|
580
|
-
: `Missing claimedPaths on: ${missingPaths.join(", ")}. Remediation: add \`- **claimedPaths:** path/a, path/b\` (repo-relative globs or files).`
|
|
581
|
-
});
|
|
582
|
-
findings.push({
|
|
583
|
-
section: "plan_units_missing_parallel_metadata",
|
|
584
|
-
required: metaRulesRequired,
|
|
585
|
-
rule: "Every implementation unit must declare `parallelizable:` and `riskTier:` (low|standard|high).",
|
|
586
|
-
found: missingParallelMeta.length === 0,
|
|
587
|
-
details: missingParallelMeta.length === 0
|
|
588
|
-
? "All implementation units carry parallelizable + riskTier."
|
|
589
|
-
: `Missing parallel metadata on: ${missingParallelMeta.join(", ")}. Remediation: add \`- **parallelizable:** true|false\` and \`- **riskTier:** low|standard|high\`.`
|
|
590
|
-
});
|
|
591
|
-
const parallelizableCount = planUnits.filter((u) => parseImplementationUnitParallelFields(u).parallelizable).length;
|
|
592
|
-
const advisorySerial = parallelizableCount === 0 && planUnits.length > 1;
|
|
593
|
-
findings.push({
|
|
594
|
-
section: "plan_no_parallel_lanes_detected",
|
|
595
|
-
required: false,
|
|
596
|
-
rule: "When multiple independent units exist, consider marking at least one `parallelizable: true` with disjoint claimedPaths.",
|
|
597
|
-
found: !advisorySerial,
|
|
598
|
-
details: advisorySerial
|
|
599
|
-
? "All units are marked parallelizable false; scheduler will serialize. If surfaces are independent, opt units into parallelism explicitly."
|
|
600
|
-
: "Parallel-ready units detected or plan is single-unit."
|
|
601
|
-
});
|
|
602
|
-
}
|
|
603
|
-
// plan_parallel_exec_full_coverage + atomic wave metadata checks.
|
|
604
|
-
// Every T-NNN task listed in the
|
|
605
|
-
// plan's Task List must be assigned to a slice inside the
|
|
606
|
-
// <!-- parallel-exec-managed-start --> block. Without this, TDD
|
|
607
|
-
// cannot fan out work the plan never authored as waves; the previous
|
|
608
|
-
// failure mode was `stage-complete tdd` succeeding when only the
|
|
609
|
-
// first batch of tasks had been wave-assigned.
|
|
610
|
-
//
|
|
611
|
-
// Spike rows (`S-N`) live in the same Task List but are excluded
|
|
612
|
-
// because they are wall-clock spikes that produce evidence files
|
|
613
|
-
// and are not part of the regular slice fan-out. A task is also
|
|
614
|
-
// excluded when it appears under a `## Deferred Tasks` (or
|
|
615
|
-
// `## Backlog`) heading inside the plan with an explicit reason.
|
|
616
|
-
if (strictPlanGuards) {
|
|
617
|
-
const taskListSection = sectionBodyByName(sections, "Task List") ?? "";
|
|
618
|
-
const authoredTaskIds = extractTaskIds(taskListSection);
|
|
619
|
-
const authoredUnitIds = new Set(planUnits.map((unit) => unit.id.toUpperCase()));
|
|
620
|
-
// Collect deferred / backlog task ids so they don't trigger the
|
|
621
|
-
// "uncovered" finding. Both heading variants are accepted.
|
|
622
|
-
const deferredBody = (sectionBodyByName(sections, "Deferred Tasks") ?? "") +
|
|
623
|
-
"\n" +
|
|
624
|
-
(sectionBodyByName(sections, "Backlog") ?? "");
|
|
625
|
-
const deferredIds = extractTaskIds(deferredBody);
|
|
626
|
-
const parallelExecBody = extractParallelExecManagedBody(raw);
|
|
627
|
-
const claimedIds = extractTaskIds(parallelExecBody);
|
|
628
|
-
const claimedUnitIds = extractUnitIds(parallelExecBody);
|
|
629
|
-
try {
|
|
630
|
-
for (const wave of parseParallelExecutionPlanWaves(raw)) {
|
|
631
|
-
for (const member of wave.members) {
|
|
632
|
-
if (/^U-\d+(?:[a-z][a-z0-9]*)?$/iu.test(member.unitId)) {
|
|
633
|
-
claimedUnitIds.add(member.unitId.toUpperCase());
|
|
634
|
-
}
|
|
635
|
-
}
|
|
636
|
-
}
|
|
637
|
-
}
|
|
638
|
-
catch {
|
|
639
|
-
// Duplicate/malformed wave plans are reported by the wave parser/status
|
|
640
|
-
// path; this coverage gate falls back to raw token extraction.
|
|
641
|
-
}
|
|
642
|
-
const useImplementationUnitCoverage = authoredUnitIds.size > 0;
|
|
643
|
-
const uncovered = [];
|
|
644
|
-
if (useImplementationUnitCoverage) {
|
|
645
|
-
for (const id of authoredUnitIds) {
|
|
646
|
-
if (claimedUnitIds.has(id))
|
|
647
|
-
continue;
|
|
648
|
-
uncovered.push(id);
|
|
649
|
-
}
|
|
650
|
-
}
|
|
651
|
-
else {
|
|
652
|
-
for (const id of authoredTaskIds) {
|
|
653
|
-
if (claimedIds.has(id))
|
|
654
|
-
continue;
|
|
655
|
-
if (deferredIds.has(id))
|
|
656
|
-
continue;
|
|
657
|
-
uncovered.push(id);
|
|
658
|
-
}
|
|
659
|
-
}
|
|
660
|
-
uncovered.sort();
|
|
661
|
-
const blockPresent = parallelExecBody.length > 0;
|
|
662
|
-
const coverageTargetPresent = useImplementationUnitCoverage || authoredTaskIds.size > 0;
|
|
663
|
-
const coverageTargetLabel = useImplementationUnitCoverage
|
|
664
|
-
? "implementation unit"
|
|
665
|
-
: "task id";
|
|
666
|
-
findings.push({
|
|
667
|
-
section: "plan_parallel_exec_full_coverage",
|
|
668
|
-
required: coverageTargetPresent,
|
|
669
|
-
rule: "Every feature-atomic Implementation Unit (`U-*`) must be assigned to at least one slice/wave inside the `<!-- parallel-exec-managed-start -->` block. Legacy strict-micro plans without units may instead cover every non-deferred `T-NNN` task. TDD cannot fan out waves the plan never authored.",
|
|
670
|
-
found: coverageTargetPresent && blockPresent && uncovered.length === 0,
|
|
671
|
-
details: !coverageTargetPresent
|
|
672
|
-
? "No Implementation Units or T-NNN task ids found; full-coverage check skipped."
|
|
673
|
-
: !blockPresent
|
|
674
|
-
? "`<!-- parallel-exec-managed-start -->` block is missing or empty. Author the Parallel Execution Plan with W-02..W-N covering every implementation unit/slice before plan-final-approval."
|
|
675
|
-
: uncovered.length === 0
|
|
676
|
-
? useImplementationUnitCoverage
|
|
677
|
-
? `Parallel Execution Plan covers all ${authoredUnitIds.size} implementation unit(s); internal ${authoredTaskIds.size} T-NNN step(s) remain inside those units.`
|
|
678
|
-
: `Parallel Execution Plan covers all ${authoredTaskIds.size} authored task id(s); ${deferredIds.size} task id(s) are explicitly deferred.`
|
|
679
|
-
: `Uncovered ${coverageTargetLabel}(s) — author waves for: ${uncovered.slice(0, 25).join(", ")}${uncovered.length > 25 ? `, … (${uncovered.length - 25} more)` : ""}. ${useImplementationUnitCoverage ? "Add U-* rows/members inside <!-- parallel-exec-managed-start -->." : "Either add slices for them inside <!-- parallel-exec-managed-start --> or move them under `## Deferred Tasks` with a reason."}`
|
|
680
|
-
});
|
|
681
|
-
const waveMeta = parseParallelWaveTableMetadata(raw);
|
|
682
|
-
const pathConflicts = [];
|
|
683
|
-
for (const wave of waveMeta) {
|
|
684
|
-
const rows = wave.rows;
|
|
685
|
-
for (let i = 0; i < rows.length; i += 1) {
|
|
686
|
-
for (let j = i + 1; j < rows.length; j += 1) {
|
|
687
|
-
const left = rows[i];
|
|
688
|
-
const right = rows[j];
|
|
689
|
-
const rightPathSet = new Set(right.claimedPaths);
|
|
690
|
-
const overlap = left.claimedPaths.filter((p) => rightPathSet.has(p));
|
|
691
|
-
if (overlap.length === 0)
|
|
692
|
-
continue;
|
|
693
|
-
pathConflicts.push(`${wave.waveId} ${left.sliceId}<->${right.sliceId} overlap: ${overlap.join(", ")}`);
|
|
694
|
-
}
|
|
695
|
-
}
|
|
696
|
-
}
|
|
697
|
-
findings.push({
|
|
698
|
-
section: "plan_wave_paths_disjoint",
|
|
699
|
-
required: coverageTargetPresent,
|
|
700
|
-
rule: "Slices within the same wave must keep `claimedPaths` disjoint so TDD can safely fan out parallel slice-builders.",
|
|
701
|
-
found: coverageTargetPresent && blockPresent && pathConflicts.length === 0,
|
|
702
|
-
details: !coverageTargetPresent
|
|
703
|
-
? "No Implementation Units or T-NNN task ids found; disjoint-path wave check skipped."
|
|
704
|
-
: !blockPresent
|
|
705
|
-
? "`<!-- parallel-exec-managed-start -->` block is missing or empty; cannot validate wave path disjointness."
|
|
706
|
-
: pathConflicts.length === 0
|
|
707
|
-
? "All parsed same-wave slice rows have disjoint claimedPaths."
|
|
708
|
-
: `Overlapping claimedPaths detected: ${pathConflicts.slice(0, 12).join(" | ")}${pathConflicts.length > 12 ? ` | … (${pathConflicts.length - 12} more)` : ""}.`
|
|
709
|
-
});
|
|
710
|
-
const invalidLanes = [];
|
|
711
|
-
for (const wave of waveMeta) {
|
|
712
|
-
for (const row of wave.rows) {
|
|
713
|
-
if (!row.lane)
|
|
714
|
-
continue;
|
|
715
|
-
if (!PLAN_LANE_WHITELIST.has(row.lane)) {
|
|
716
|
-
invalidLanes.push(`${wave.waveId}/${row.sliceId}:${row.lane}`);
|
|
717
|
-
}
|
|
718
|
-
}
|
|
719
|
-
}
|
|
720
|
-
findings.push({
|
|
721
|
-
section: "plan_lane_meaningful",
|
|
722
|
-
required: false,
|
|
723
|
-
rule: "When a lane is declared, it must be one of: production, test, docs, infra, scaffold, migration.",
|
|
724
|
-
found: invalidLanes.length === 0,
|
|
725
|
-
details: invalidLanes.length === 0
|
|
726
|
-
? "All declared lane values are either omitted or in the approved lane whitelist."
|
|
727
|
-
: `Invalid lane value(s): ${invalidLanes.join(", ")}. Remove lane or use a whitelisted value.`
|
|
728
|
-
});
|
|
729
|
-
const inconsistentParallelizable = [];
|
|
730
|
-
for (const wave of waveMeta) {
|
|
731
|
-
const hasSerialSlice = wave.rows.some((row) => row.parallelizable === false);
|
|
732
|
-
if (!hasSerialSlice)
|
|
733
|
-
continue;
|
|
734
|
-
if (!waveHasSequentialModeHint(wave)) {
|
|
735
|
-
const serialSlices = wave.rows
|
|
736
|
-
.filter((row) => row.parallelizable === false)
|
|
737
|
-
.map((row) => row.sliceId)
|
|
738
|
-
.join(", ");
|
|
739
|
-
inconsistentParallelizable.push(`${wave.waveId} [${serialSlices}]`);
|
|
740
|
-
}
|
|
741
|
-
}
|
|
742
|
-
findings.push({
|
|
743
|
-
section: "plan_parallelizable_consistency",
|
|
744
|
-
required: false,
|
|
745
|
-
rule: "Waves containing `parallelizable: false` slices should be explicitly marked sequential in wave notes/mode.",
|
|
746
|
-
found: inconsistentParallelizable.length === 0,
|
|
747
|
-
details: inconsistentParallelizable.length === 0
|
|
748
|
-
? "No serial slices were found outside a sequentially-labeled wave context."
|
|
749
|
-
: `Serial slice(s) found without sequential wave mode hints in: ${inconsistentParallelizable.join(", ")}. Add a wave mode/note indicating sequential execution.`
|
|
750
|
-
});
|
|
751
|
-
const mermaidBlocks = raw.match(/```mermaid[\s\S]*?```/giu) ?? [];
|
|
752
|
-
const hasParallelExecMermaid = mermaidBlocks.some((block) => /(flowchart|gantt)/iu.test(block) && /\bW-\d+\b/iu.test(block) && /\bS-\d+(?:[a-z][a-z0-9]*)?\b/iu.test(block));
|
|
753
|
-
findings.push({
|
|
754
|
-
section: "plan_parallel_exec_mermaid_present",
|
|
755
|
-
required: false,
|
|
756
|
-
rule: "Plan should include a mermaid flowchart/gantt for parallel waves and slice dependencies to make fanout shape visually reviewable.",
|
|
757
|
-
found: hasParallelExecMermaid,
|
|
758
|
-
details: hasParallelExecMermaid
|
|
759
|
-
? "Mermaid visualization for parallel execution waves is present."
|
|
760
|
-
: "No mermaid parallel-execution visualization found (advisory). Add a ` ```mermaid ` flowchart or gantt with W-* and S-* nodes."
|
|
761
|
-
});
|
|
762
|
-
// 7.6.0 — plan_module_introducing_slice_wires_root.
|
|
763
|
-
// Stack-aware: stack-adapter exposes a `wiringAggregator` contract
|
|
764
|
-
// for stacks where introducing a new module file requires a
|
|
765
|
-
// sibling aggregator update (Rust lib.rs, Python __init__.py,
|
|
766
|
-
// optional Node-TS index.ts). For each NEW path in a slice's
|
|
767
|
-
// claim, if the adapter says an aggregator is required, the
|
|
768
|
-
// aggregator path must appear in the slice's own claim or in any
|
|
769
|
-
// transitive predecessor's claim within the same flow.
|
|
770
|
-
//
|
|
771
|
-
// For unknown stacks (Go, Java, Ruby, Swift, .NET, Elixir, …)
|
|
772
|
-
// the adapter returns `wiringAggregator: undefined`, so this
|
|
773
|
-
// gate is a no-op and `found: true`.
|
|
774
|
-
const stackAdapter = await loadStackAdapter(projectRoot);
|
|
775
|
-
const headFiles = await readHeadFiles(projectRoot);
|
|
776
|
-
const wiringIssues = [];
|
|
777
|
-
if (stackAdapter.wiringAggregator) {
|
|
778
|
-
const claimGraph = buildSliceClaimGraph(waveMeta);
|
|
779
|
-
for (const wave of waveMeta) {
|
|
780
|
-
for (const row of [...wave.rows].sort((a, b) => compareSliceIds(a.sliceId, b.sliceId))) {
|
|
781
|
-
const predecessors = transitivePredecessors(row.sliceId, claimGraph);
|
|
782
|
-
const predecessorClaims = new Set();
|
|
783
|
-
for (const predId of predecessors) {
|
|
784
|
-
const predRow = claimGraph.bySliceId.get(predId);
|
|
785
|
-
if (!predRow)
|
|
786
|
-
continue;
|
|
787
|
-
for (const claim of predRow.claimedPaths) {
|
|
788
|
-
predecessorClaims.add(normalizePathToken(claim));
|
|
789
|
-
}
|
|
790
|
-
}
|
|
791
|
-
const ownClaims = new Set(row.claimedPaths.map(normalizePathToken));
|
|
792
|
-
for (const rawClaim of row.claimedPaths) {
|
|
793
|
-
const claim = normalizePathToken(rawClaim);
|
|
794
|
-
if (claim.length === 0)
|
|
795
|
-
continue;
|
|
796
|
-
// Only NEW paths (not present at HEAD) require an
|
|
797
|
-
// aggregator update — existing modules are already wired.
|
|
798
|
-
if (headFiles.size > 0 && headFiles.has(claim))
|
|
799
|
-
continue;
|
|
800
|
-
const required = stackAdapter.wiringAggregator.resolveAggregatorFor(claim, { headFiles });
|
|
801
|
-
if (!required)
|
|
802
|
-
continue;
|
|
803
|
-
const aggregatorPath = normalizePathToken(required);
|
|
804
|
-
if (ownClaims.has(aggregatorPath))
|
|
805
|
-
continue;
|
|
806
|
-
if (predecessorClaims.has(aggregatorPath))
|
|
807
|
-
continue;
|
|
808
|
-
wiringIssues.push(`${wave.waveId}/${row.sliceId} introduces ${claim} but wiring aggregator ${aggregatorPath} is not in its claim or any predecessor's claim`);
|
|
809
|
-
}
|
|
810
|
-
}
|
|
811
|
-
}
|
|
812
|
-
}
|
|
813
|
-
const wiringApplies = stackAdapter.wiringAggregator !== undefined;
|
|
814
|
-
findings.push({
|
|
815
|
-
section: "plan_module_introducing_slice_wires_root",
|
|
816
|
-
required: coverageTargetPresent && wiringApplies,
|
|
817
|
-
rule: "When a slice introduces a new module file, the stack-adapter's wiring aggregator (e.g. Rust `lib.rs`, Python `__init__.py`, Node-TS barrel `index.*` when present) must be in the same slice's claim or in a transitive predecessor's claim, otherwise the new module is dead code and RED can't be expressed.",
|
|
818
|
-
found: !wiringApplies || wiringIssues.length === 0,
|
|
819
|
-
details: !wiringApplies
|
|
820
|
-
? `Stack adapter (id=${stackAdapter.id}) does not declare a wiring aggregator; gate is a no-op for this stack.`
|
|
821
|
-
: wiringIssues.length === 0
|
|
822
|
-
? `Stack adapter (id=${stackAdapter.id}) wiring aggregator coverage verified across all wave slices.`
|
|
823
|
-
: `Wiring aggregator coverage gaps: ${wiringIssues.slice(0, 12).join(" | ")}${wiringIssues.length > 12 ? ` | … (${wiringIssues.length - 12} more)` : ""}.`
|
|
824
|
-
});
|
|
825
|
-
}
|
|
826
|
-
}
|