create-sdd-project 0.17.2 → 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/adapt-agents.js +4 -2
- package/lib/init-generator.js +59 -8
- package/lib/init-wizard.js +17 -2
- package/lib/meta.js +5 -4
- package/lib/scanner.js +151 -25
- package/package.json +1 -1
- package/template/.claude/commands/audit-merge.md +156 -2
- package/template/.gemini/commands/audit-merge-instructions.md +153 -2
package/lib/adapt-agents.js
CHANGED
|
@@ -227,7 +227,8 @@ function adaptAgentContentForProjectType(dest, config, replaceInFileFn) {
|
|
|
227
227
|
// WORKFLOW_CORE_PROJECT_TYPE_RULES table above so upgrade-generator.js
|
|
228
228
|
// can apply the same rules in-memory (smart-diff fallback comparison).
|
|
229
229
|
// pr-template.md + AGENTS.md + base-standards.mdc remain inline because
|
|
230
|
-
// they're not workflow-core files (pr-template is v0.17.
|
|
230
|
+
// they're not workflow-core files (pr-template is out of scope for v0.17.1 —
|
|
231
|
+
// see dev/ROADMAP.md "Known follow-ups" item 2).
|
|
231
232
|
const wfRules = WORKFLOW_CORE_PROJECT_TYPE_RULES[config.projectType];
|
|
232
233
|
if (wfRules) {
|
|
233
234
|
for (const dir of toolDirs) {
|
|
@@ -243,7 +244,8 @@ function adaptAgentContentForProjectType(dest, config, replaceInFileFn) {
|
|
|
243
244
|
[', `ui-ux-designer`', ''],
|
|
244
245
|
]);
|
|
245
246
|
for (const dir of toolDirs) {
|
|
246
|
-
// pr-template: remove ui-components from checklist (
|
|
247
|
+
// pr-template: remove ui-components from checklist (out of scope for
|
|
248
|
+
// v0.17.1 — see dev/ROADMAP.md "Known follow-ups" item 2)
|
|
247
249
|
replaceInFileFn(path.join(dest, dir, 'skills', 'development-workflow', 'references', 'pr-template.md'), [
|
|
248
250
|
[' / ui-components.md', ''],
|
|
249
251
|
]);
|
package/lib/init-generator.js
CHANGED
|
@@ -232,10 +232,32 @@ function generateInit(config) {
|
|
|
232
232
|
console.log(' These files were generated from project analysis. Adjust patterns');
|
|
233
233
|
console.log(' and conventions to match your team\'s actual practices.');
|
|
234
234
|
|
|
235
|
-
// Test coverage note
|
|
236
|
-
|
|
235
|
+
// Test coverage note — v0.17.3: broaden signal detection across root,
|
|
236
|
+
// backend, and frontend tests (covers E2E-only setups and monorepos
|
|
237
|
+
// with workspace-level tests). Only show the note when NO test signal
|
|
238
|
+
// exists anywhere.
|
|
239
|
+
const hasAnyTestSignal =
|
|
240
|
+
scan.tests.framework !== 'none' ||
|
|
241
|
+
scan.backendTests.framework !== 'none' ||
|
|
242
|
+
scan.frontendTests.framework !== 'none' ||
|
|
243
|
+
scan.tests.e2eFramework !== null ||
|
|
244
|
+
scan.tests.testFiles > 0 ||
|
|
245
|
+
scan.backendTests.testFiles > 0 ||
|
|
246
|
+
scan.frontendTests.testFiles > 0;
|
|
247
|
+
const maxCoverageRank = (c) =>
|
|
248
|
+
c === 'high' ? 3 : c === 'medium' ? 2 : c === 'low' ? 1 : 0;
|
|
249
|
+
const bestCoverage = Math.max(
|
|
250
|
+
maxCoverageRank(scan.tests.estimatedCoverage),
|
|
251
|
+
maxCoverageRank(scan.backendTests.estimatedCoverage),
|
|
252
|
+
maxCoverageRank(scan.frontendTests.estimatedCoverage),
|
|
253
|
+
);
|
|
254
|
+
if (!hasAnyTestSignal || bestCoverage <= 1 /* none or low */) {
|
|
237
255
|
console.log('');
|
|
238
|
-
const fileCount =
|
|
256
|
+
const fileCount = Math.max(
|
|
257
|
+
scan.tests.testFiles,
|
|
258
|
+
scan.backendTests.testFiles,
|
|
259
|
+
scan.frontendTests.testFiles,
|
|
260
|
+
);
|
|
239
261
|
if (fileCount === 0) {
|
|
240
262
|
console.log(' 📝 No test files detected.');
|
|
241
263
|
} else {
|
|
@@ -458,8 +480,11 @@ function adaptBackendStandards(template, scan) {
|
|
|
458
480
|
const db = scan.backend.db;
|
|
459
481
|
const lang = scan.language === 'typescript' ? 'TypeScript' : 'JavaScript';
|
|
460
482
|
|
|
461
|
-
|
|
462
|
-
|
|
483
|
+
// v0.17.3: consume backendTests instead of tests so that monorepos with
|
|
484
|
+
// workspace-only test frameworks (e.g., fx: vitest in packages/api) emit
|
|
485
|
+
// the correct Testing line. Single-package: backendTests === tests.
|
|
486
|
+
const testFramework = scan.backendTests.framework !== 'none'
|
|
487
|
+
? capitalizeFramework(scan.backendTests.framework)
|
|
463
488
|
: 'Not configured';
|
|
464
489
|
|
|
465
490
|
let stackLines = [
|
|
@@ -570,9 +595,14 @@ function adaptFrontendStandards(template, scan) {
|
|
|
570
595
|
const state = scan.frontend.state ? `, ${scan.frontend.state}` : '';
|
|
571
596
|
const lang = scan.language === 'typescript' ? 'TypeScript' : 'JavaScript';
|
|
572
597
|
|
|
598
|
+
// v0.17.3: consume frontendTests (see backend equivalent above).
|
|
599
|
+
const frontendTestFramework = scan.frontendTests.framework !== 'none'
|
|
600
|
+
? capitalizeFramework(scan.frontendTests.framework)
|
|
601
|
+
: 'Not configured';
|
|
602
|
+
|
|
573
603
|
content = content.replace(
|
|
574
604
|
/## Technology Stack\n\n[\s\S]*?(?=\n## Project Structure)/,
|
|
575
|
-
`## Technology Stack\n\n- **Framework**: ${framework}\n- **Language**: ${lang}\n- **Styling**: ${styling}${components ? `\n- **Components**: ${components.slice(2)}` : ''}${state ? `\n- **State Management**: ${state.slice(2)}` : ''}\n- **Testing**: ${
|
|
605
|
+
`## Technology Stack\n\n- **Framework**: ${framework}\n- **Language**: ${lang}\n- **Styling**: ${styling}${components ? `\n- **Components**: ${components.slice(2)}` : ''}${state ? `\n- **State Management**: ${state.slice(2)}` : ''}\n- **Testing**: ${frontendTestFramework}\n\n`
|
|
576
606
|
);
|
|
577
607
|
|
|
578
608
|
// Update Project Structure
|
|
@@ -848,8 +878,29 @@ function configureProductTracker(template, scan) {
|
|
|
848
878
|
content = content.replace('| backend | pending', `| ${featureType} | pending`);
|
|
849
879
|
}
|
|
850
880
|
|
|
851
|
-
// Add retrofit testing as first feature if coverage is low
|
|
852
|
-
|
|
881
|
+
// Add retrofit testing as first feature if coverage is low — v0.17.3
|
|
882
|
+
// broaden the gate: consider backend and frontend test coverage, not just
|
|
883
|
+
// root-level. Avoids false F001 recommendation on monorepos that have
|
|
884
|
+
// extensive tests in workspaces. Per Gemini round-3 CRITICAL: also gate
|
|
885
|
+
// on the broader "any test signal" disjunction so E2E-only setups
|
|
886
|
+
// (Playwright/Cypress at root, no unit tests anywhere) don't trigger
|
|
887
|
+
// F001 either — symmetric with the console-warning gate above.
|
|
888
|
+
const rankCoverage = (c) =>
|
|
889
|
+
c === 'high' ? 3 : c === 'medium' ? 2 : c === 'low' ? 1 : 0;
|
|
890
|
+
const bestCov = Math.max(
|
|
891
|
+
rankCoverage(scan.tests.estimatedCoverage),
|
|
892
|
+
rankCoverage(scan.backendTests.estimatedCoverage),
|
|
893
|
+
rankCoverage(scan.frontendTests.estimatedCoverage),
|
|
894
|
+
);
|
|
895
|
+
const hasAnyTestSignal =
|
|
896
|
+
scan.tests.framework !== 'none' ||
|
|
897
|
+
scan.backendTests.framework !== 'none' ||
|
|
898
|
+
scan.frontendTests.framework !== 'none' ||
|
|
899
|
+
scan.tests.e2eFramework !== null ||
|
|
900
|
+
scan.tests.testFiles > 0 ||
|
|
901
|
+
scan.backendTests.testFiles > 0 ||
|
|
902
|
+
scan.frontendTests.testFiles > 0;
|
|
903
|
+
if (!hasAnyTestSignal || bestCov <= 1 /* none or low */) {
|
|
853
904
|
// Use regex to match the F001 placeholder row resiliently (handles column changes)
|
|
854
905
|
content = content.replace(
|
|
855
906
|
/\| F001 \|[^\n]*\n/,
|
package/lib/init-wizard.js
CHANGED
|
@@ -41,8 +41,23 @@ function formatScanSummary(scanResult) {
|
|
|
41
41
|
};
|
|
42
42
|
lines.push(` Architecture: ${patternLabels[scanResult.srcStructure.pattern] || 'Unknown'}`);
|
|
43
43
|
|
|
44
|
-
|
|
45
|
-
|
|
44
|
+
// v0.17.3: collect distinct non-'none' frameworks across root, backend,
|
|
45
|
+
// and frontend tests. For mixed monorepos (e.g., fx: vitest in api, jest
|
|
46
|
+
// in web), display joined (e.g., "vitest + jest") so the summary reflects
|
|
47
|
+
// what adapt-functions will actually write. Avoids the v1.1 UX issue
|
|
48
|
+
// where OR-precedence picked root-hoisted jest over workspace vitest.
|
|
49
|
+
const uniqueFrameworks = Array.from(new Set([
|
|
50
|
+
scanResult.tests.framework,
|
|
51
|
+
scanResult.backendTests.framework,
|
|
52
|
+
scanResult.frontendTests.framework,
|
|
53
|
+
].filter((f) => f !== 'none')));
|
|
54
|
+
if (uniqueFrameworks.length > 0) {
|
|
55
|
+
const totalFiles = Math.max(
|
|
56
|
+
scanResult.tests.testFiles,
|
|
57
|
+
scanResult.backendTests.testFiles,
|
|
58
|
+
scanResult.frontendTests.testFiles,
|
|
59
|
+
);
|
|
60
|
+
lines.push(` Tests: ${uniqueFrameworks.join(' + ')} (${totalFiles} test files)`);
|
|
46
61
|
} else {
|
|
47
62
|
lines.push(' Tests: None detected');
|
|
48
63
|
}
|
package/lib/meta.js
CHANGED
|
@@ -236,9 +236,10 @@ function writeMeta(dest, hashes) {
|
|
|
236
236
|
* - 6 workflow-core files (development-workflow SKILL.md + ticket-template.md
|
|
237
237
|
* + merge-checklist.md, × 2 tools) — filtered by aiTools
|
|
238
238
|
*
|
|
239
|
-
* Out of scope for v0.17.1 (deferred
|
|
240
|
-
*
|
|
241
|
-
* and all references/ files except the 3
|
|
239
|
+
* Out of scope for v0.17.1 (deferred — see dev/ROADMAP.md "Known follow-ups"
|
|
240
|
+
* item 2): bug-workflow/SKILL.md, health-check/SKILL.md, pm-orchestrator/SKILL.md,
|
|
241
|
+
* project-memory/SKILL.md, and all references/ files except the 3
|
|
242
|
+
* development-workflow ones above. Did not land in the v0.17.2 scanner hotfix.
|
|
242
243
|
*/
|
|
243
244
|
function expectedSmartDiffTrackedPaths(aiTools, projectType) {
|
|
244
245
|
const paths = new Set();
|
|
@@ -273,7 +274,7 @@ function expectedSmartDiffTrackedPaths(aiTools, projectType) {
|
|
|
273
274
|
|
|
274
275
|
// v0.17.1: development-workflow skill core files — filtered by aiTools.
|
|
275
276
|
// bug-workflow, health-check, pm-orchestrator, project-memory are OUT OF
|
|
276
|
-
// SCOPE for v0.17.1 (deferred
|
|
277
|
+
// SCOPE for v0.17.1 (deferred — see dev/ROADMAP.md "Known follow-ups" item 2).
|
|
277
278
|
for (const dir of toolDirs) {
|
|
278
279
|
paths.add(`${dir}/skills/development-workflow/SKILL.md`);
|
|
279
280
|
paths.add(`${dir}/skills/development-workflow/references/ticket-template.md`);
|
package/lib/scanner.js
CHANGED
|
@@ -46,62 +46,187 @@ function scan(projectDir) {
|
|
|
46
46
|
const frontend = detectFrontend(projectDir, pkg);
|
|
47
47
|
const isMonorepo = detectMonorepo(projectDir, pkg);
|
|
48
48
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
49
|
+
let language = detectLanguage(projectDir);
|
|
50
|
+
let srcStructure = detectArchitecture(projectDir, pkg);
|
|
51
|
+
const rootTests = detectTests(projectDir, pkg);
|
|
52
|
+
let backendTests = rootTests;
|
|
53
|
+
let frontendTests = rootTests;
|
|
54
|
+
|
|
55
|
+
// v0.17.3: single-pass monorepo enumeration.
|
|
56
|
+
//
|
|
57
|
+
// Combines v0.17.2's framework promotion (runs only when root lacks a
|
|
58
|
+
// framework, gated by `!backend.framework` / `!frontend.framework`) with
|
|
59
|
+
// v0.17.3's auxiliary-field promotion (language, architecture, tests,
|
|
60
|
+
// frontend styling/components/state) into a single loop that always runs
|
|
61
|
+
// in monorepos.
|
|
62
|
+
//
|
|
63
|
+
// Rationale for always-enumerate: `workspaceSource` is only set when
|
|
64
|
+
// v0.17.2 actually promoted a framework from a workspace (root lacked it).
|
|
65
|
+
// If root has the framework hoisted (e.g., `"next": "^14.2.29"` at root in
|
|
66
|
+
// a Next.js monorepo), v0.17.2's loop never runs → `workspaceSource` is
|
|
67
|
+
// null → auxiliary detection has no workspace handle. v0.17.3 solves this
|
|
68
|
+
// by tracking `primaryBackendWs` / `primaryFrontendWs` independently:
|
|
69
|
+
// always populated for monorepos with a detectable backend/frontend
|
|
70
|
+
// workspace, regardless of where the framework came from. See
|
|
71
|
+
// dev/v0.17.3-plan.md D5 for full rationale + round-1/round-2 review trail.
|
|
72
|
+
let primaryBackendWs = null;
|
|
73
|
+
let primaryFrontendWs = null;
|
|
74
|
+
if (isMonorepo) {
|
|
52
75
|
const workspaces = enumerateWorkspaces(projectDir, pkg);
|
|
53
76
|
for (const wsRel of workspaces) {
|
|
54
77
|
const wsAbs = path.join(projectDir, ...wsRel.split('/'));
|
|
55
78
|
const wsPkg = readPackageJson(wsAbs);
|
|
56
|
-
|
|
79
|
+
|
|
80
|
+
if (!primaryBackendWs) {
|
|
57
81
|
const wsBackend = detectBackend(wsAbs, wsPkg);
|
|
58
82
|
if (wsBackend.framework) {
|
|
59
|
-
|
|
60
|
-
//
|
|
61
|
-
//
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
if (wsBackend[field] !== null && wsBackend[field] !== undefined) {
|
|
68
|
-
backend[field] = wsBackend[field];
|
|
83
|
+
primaryBackendWs = wsRel;
|
|
84
|
+
// v0.17.2 promotion — only when root lacked framework. Preserves
|
|
85
|
+
// v0.17.2 semantics byte-equivalent.
|
|
86
|
+
if (!backend.framework) {
|
|
87
|
+
for (const field of Object.keys(wsBackend)) {
|
|
88
|
+
if (wsBackend[field] !== null && wsBackend[field] !== undefined) {
|
|
89
|
+
backend[field] = wsBackend[field];
|
|
90
|
+
}
|
|
69
91
|
}
|
|
92
|
+
backend.workspaceSource = wsRel;
|
|
70
93
|
}
|
|
71
|
-
backend.workspaceSource = wsRel;
|
|
72
94
|
}
|
|
73
95
|
}
|
|
74
|
-
|
|
96
|
+
|
|
97
|
+
if (!primaryFrontendWs) {
|
|
75
98
|
const wsFrontend = detectFrontend(wsAbs, wsPkg);
|
|
76
99
|
if (wsFrontend.framework) {
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
100
|
+
primaryFrontendWs = wsRel;
|
|
101
|
+
if (!frontend.framework) {
|
|
102
|
+
for (const field of Object.keys(wsFrontend)) {
|
|
103
|
+
if (wsFrontend[field] !== null && wsFrontend[field] !== undefined) {
|
|
104
|
+
frontend[field] = wsFrontend[field];
|
|
105
|
+
}
|
|
80
106
|
}
|
|
107
|
+
frontend.workspaceSource = wsRel;
|
|
81
108
|
}
|
|
82
|
-
frontend.workspaceSource = wsRel;
|
|
83
109
|
}
|
|
84
110
|
}
|
|
85
|
-
|
|
111
|
+
|
|
112
|
+
if (primaryBackendWs && primaryFrontendWs) break;
|
|
86
113
|
}
|
|
114
|
+
|
|
115
|
+
// v0.17.3 auxiliary detection — runs whenever a primary workspace was
|
|
116
|
+
// identified, independent of v0.17.2 promotion.
|
|
117
|
+
if (primaryBackendWs) {
|
|
118
|
+
const wsAbs = path.join(projectDir, ...primaryBackendWs.split('/'));
|
|
119
|
+
const wsPkg = readPackageJson(wsAbs);
|
|
120
|
+
// D1: scalar language merge — TypeScript wins over JavaScript, never
|
|
121
|
+
// demote. 'javascript' is detectLanguage's default; if the workspace
|
|
122
|
+
// returns 'typescript', promote.
|
|
123
|
+
if (detectLanguage(wsAbs) === 'typescript') language = 'typescript';
|
|
124
|
+
// D2: architecture per-field merge.
|
|
125
|
+
// - pattern: workspace wins if it detected ANY known pattern (any
|
|
126
|
+
// non-'unknown' value). Strict non-demotion: workspace 'unknown'
|
|
127
|
+
// never demotes a known root pattern. Workspace known-vs-known
|
|
128
|
+
// resolves to workspace because the workspace is the authoritative
|
|
129
|
+
// source for the primary backend's organization (root pattern in
|
|
130
|
+
// monorepos is often a false signal from the listing fallback when
|
|
131
|
+
// no src/ exists).
|
|
132
|
+
// - dirs: workspace wins when it has any dirs (root's empty []
|
|
133
|
+
// carries no info, and root in monorepos lists workspace dirs not
|
|
134
|
+
// source dirs which is misleading).
|
|
135
|
+
// - boolean hasX flags: OR-merge (true wins, never demoted from true).
|
|
136
|
+
const wsArch = detectArchitecture(wsAbs, wsPkg);
|
|
137
|
+
if (wsArch.pattern !== 'unknown') srcStructure.pattern = wsArch.pattern;
|
|
138
|
+
if (wsArch.dirs && wsArch.dirs.length > 0) srcStructure.dirs = wsArch.dirs;
|
|
139
|
+
for (const field of [
|
|
140
|
+
'hasControllers',
|
|
141
|
+
'hasRoutes',
|
|
142
|
+
'hasModels',
|
|
143
|
+
'hasServices',
|
|
144
|
+
'hasDomain',
|
|
145
|
+
'hasMiddleware',
|
|
146
|
+
'hasFeatures',
|
|
147
|
+
'hasHandlers',
|
|
148
|
+
]) {
|
|
149
|
+
if (wsArch[field] === true) srcStructure[field] = true;
|
|
150
|
+
}
|
|
151
|
+
// D3: backend tests per-field merge (preserves root-level E2E).
|
|
152
|
+
backendTests = mergeWorkspaceTests(rootTests, detectTests(wsAbs, wsPkg));
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
if (primaryFrontendWs) {
|
|
156
|
+
const wsAbs = path.join(projectDir, ...primaryFrontendWs.split('/'));
|
|
157
|
+
const wsPkg = readPackageJson(wsAbs);
|
|
158
|
+
// D4 (revised): promote auxiliary frontend fields (styling,
|
|
159
|
+
// components, state) from the primary frontend workspace,
|
|
160
|
+
// independent of whether the framework was promoted at root.
|
|
161
|
+
const wsFrontendAux = detectFrontend(wsAbs, wsPkg);
|
|
162
|
+
for (const field of ['styling', 'components', 'state']) {
|
|
163
|
+
if (wsFrontendAux[field] !== null && wsFrontendAux[field] !== undefined) {
|
|
164
|
+
frontend[field] = wsFrontendAux[field];
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
frontendTests = mergeWorkspaceTests(rootTests, detectTests(wsAbs, wsPkg));
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// D5 observability: export primary workspace identifiers for diagnostics
|
|
171
|
+
// and smoke-test assertions. `workspaceSource` (v0.17.2) remains set only
|
|
172
|
+
// when promotion happened; `primaryWorkspace` (v0.17.3) is set whenever a
|
|
173
|
+
// workspace was used for aux detection.
|
|
174
|
+
if (primaryBackendWs) backend.primaryWorkspace = primaryBackendWs;
|
|
175
|
+
if (primaryFrontendWs) frontend.primaryWorkspace = primaryFrontendWs;
|
|
87
176
|
}
|
|
88
177
|
|
|
89
178
|
return {
|
|
90
179
|
projectName: pkg.name || path.basename(projectDir),
|
|
91
180
|
description: pkg.description || '',
|
|
92
|
-
language
|
|
181
|
+
language,
|
|
93
182
|
backend,
|
|
94
183
|
frontend,
|
|
95
184
|
isMonorepo,
|
|
96
185
|
rootDirs: listRootDirs(projectDir),
|
|
97
|
-
srcStructure
|
|
98
|
-
tests:
|
|
186
|
+
srcStructure,
|
|
187
|
+
tests: rootTests,
|
|
188
|
+
backendTests,
|
|
189
|
+
frontendTests,
|
|
99
190
|
existingDocs: detectExistingDocs(projectDir),
|
|
100
191
|
gitBranch: detectGitBranch(projectDir),
|
|
101
192
|
hasGit: fs.existsSync(path.join(projectDir, '.git')),
|
|
102
193
|
};
|
|
103
194
|
}
|
|
104
195
|
|
|
196
|
+
/**
|
|
197
|
+
* v0.17.3: per-field merge of workspace test detection results into a
|
|
198
|
+
* new object based on root-level test detection.
|
|
199
|
+
*
|
|
200
|
+
* Preserves root-level signals the workspace doesn't see:
|
|
201
|
+
* - e2eFramework (Playwright/Cypress typically installed once at root)
|
|
202
|
+
* - framework (if workspace found no unit framework, keep root's default
|
|
203
|
+
* or hoisted value)
|
|
204
|
+
*
|
|
205
|
+
* Promotes workspace signals that override root:
|
|
206
|
+
* - framework: workspace wins if it detected a non-'none' unit framework
|
|
207
|
+
* - hasConfig: logical OR (either source)
|
|
208
|
+
* - testFiles / testDirs / estimatedCoverage: workspace authoritative when
|
|
209
|
+
* it saw any test files. (Note: `countFilesRecursive` walks from
|
|
210
|
+
* projectDir up to depth 6 traversing into packages/*, so rootTests
|
|
211
|
+
* already aggregates workspace counts. A comparison like
|
|
212
|
+
* `wsTests.testFiles > rootTests.testFiles` would be dead code — see
|
|
213
|
+
* dev/v0.17.3-plan.md D3 v1.2 revision.)
|
|
214
|
+
*/
|
|
215
|
+
function mergeWorkspaceTests(rootTests, wsTests) {
|
|
216
|
+
const merged = { ...rootTests };
|
|
217
|
+
if (wsTests.framework !== 'none') {
|
|
218
|
+
merged.framework = wsTests.framework;
|
|
219
|
+
merged.hasConfig = wsTests.hasConfig || rootTests.hasConfig;
|
|
220
|
+
}
|
|
221
|
+
if (wsTests.testFiles > 0) {
|
|
222
|
+
merged.testFiles = wsTests.testFiles;
|
|
223
|
+
merged.testDirs = wsTests.testDirs;
|
|
224
|
+
merged.estimatedCoverage = wsTests.estimatedCoverage;
|
|
225
|
+
}
|
|
226
|
+
if (wsTests.e2eFramework) merged.e2eFramework = wsTests.e2eFramework;
|
|
227
|
+
return merged;
|
|
228
|
+
}
|
|
229
|
+
|
|
105
230
|
/**
|
|
106
231
|
* v0.17.1: enumerate workspace paths declared in `pkg.workspaces`.
|
|
107
232
|
*
|
|
@@ -112,7 +237,8 @@ function scan(projectDir) {
|
|
|
112
237
|
* - Single-wildcard patterns: `"packages/*"` (expand immediate subdirs)
|
|
113
238
|
*
|
|
114
239
|
* Does NOT support: `**` recursive patterns, `!exclude` negation, or
|
|
115
|
-
* `pnpm-workspace.yaml` — all deferred
|
|
240
|
+
* `pnpm-workspace.yaml` — all deferred (see dev/ROADMAP.md "Known follow-ups"
|
|
241
|
+
* item 3). Did not land in the v0.17.2 scanner hotfix.
|
|
116
242
|
*
|
|
117
243
|
* Returns a deterministic, deduplicated array of POSIX-style relative
|
|
118
244
|
* workspace paths. Ordering: outer = declaration order of patterns; inner
|
package/package.json
CHANGED
|
@@ -48,13 +48,130 @@ If DIVERGED, flag as FAIL with instruction to merge target branch first.
|
|
|
48
48
|
|
|
49
49
|
Run only if `git diff origin/<target-branch>..HEAD --name-only` shows `.json` files in seed-data or fixtures directories.
|
|
50
50
|
|
|
51
|
+
### Drift Checks (added v0.18.0) — ADVISORY, not blocking
|
|
52
|
+
|
|
53
|
+
Eleven empirically-validated drift patterns. Failures are NOT blockers for the compliance verdict, but MUST be refreshed before requesting user authorization (the user will otherwise catch them during audit and send the PR back). Each check has a concrete shell recipe — use BSD-grep-compatible regex (no `\K`).
|
|
54
|
+
|
|
55
|
+
**12. P1 — PR body test count stale.** The PR body's "npm test" line should match the terminal test count in the ticket (AC / DoD / Completion Log last entry). Agents commonly open the PR at Step 4 and add tests during Step 5 review — the PR body number becomes stale.
|
|
56
|
+
```bash
|
|
57
|
+
PR_BODY=$(gh pr view --json body -q .body)
|
|
58
|
+
PR_TESTS=$(echo "$PR_BODY" | grep -iE "(npm test|tests?.*(pass|green))" | grep -oE "[0-9]+/[0-9]+" | head -1)
|
|
59
|
+
TICKET_TESTS=$(grep -iE "(npm test|tests?.*(pass|green))" "$TICKET" | grep -oE "[0-9]+/[0-9]+" | tail -1)
|
|
60
|
+
[ -n "$PR_TESTS" ] && [ -n "$TICKET_TESTS" ] && [ "$PR_TESTS" != "$TICKET_TESTS" ] && flag "P1 drift: PR body $PR_TESTS vs ticket $TICKET_TESTS"
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
**13. P2 — Merge Checklist Evidence rows aspirational.** Rows marked `[x]` with future-tense Evidence ("will land", "to be created", "pending", "next commit", "TBD") — the row claims done but the work hasn't happened yet.
|
|
64
|
+
```bash
|
|
65
|
+
awk '/^## Merge Checklist Evidence/,/^## /' "$TICKET" \
|
|
66
|
+
| grep -E '^\|.*\[x\].*(to be |will |pending|TBD|Will be |to be created|next commit|aspirational)' \
|
|
67
|
+
&& flag "P2 drift: aspirational row(s) found"
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
**14. P3 — Post-merge actions not logged** (only fires if PR is MERGED and ticket Status is Done). Items marked as post-merge operator actions (AC / DoD / Test plan unchecked with post-merge keywords) should have a Completion Log row documenting execution.
|
|
71
|
+
```bash
|
|
72
|
+
# Strip checkbox prefix before comparison; use grep -Fq fixed-string match.
|
|
73
|
+
grep -E "^- \[ \].*(post-merge|operator|prod rollout|pending verification)" "$TICKET" \
|
|
74
|
+
| sed -E 's/^- \[ \] //' > /tmp/pm_items.txt
|
|
75
|
+
COMPLETION=$(awk '/^## Completion Log/,/^## Merge Checklist/' "$TICKET")
|
|
76
|
+
while IFS= read -r item; do
|
|
77
|
+
[ -z "$item" ] && continue
|
|
78
|
+
KEY=$(echo "$item" | cut -c1-40)
|
|
79
|
+
echo "$COMPLETION" | grep -Fq "$KEY" || flag "P3 drift: post-merge '$item' not in Completion Log"
|
|
80
|
+
done < /tmp/pm_items.txt
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
**15. P4 — Remote branch orphan after "deleted".** Workflow Step 6 claims `[x] branch deleted` but origin still has the branch.
|
|
84
|
+
```bash
|
|
85
|
+
BRANCH=$(grep -E "^\*\*[Bb]ranch:\*\*" "$TICKET" | head -1 | sed -E 's/^\*\*[Bb]ranch:\*\*[[:space:]]*([^[:space:]|]+).*/\1/')
|
|
86
|
+
git fetch origin --prune --quiet
|
|
87
|
+
git ls-remote --heads origin "$BRANCH" 2>/dev/null | grep -q refs/heads && flag "P4 drift: remote branch $BRANCH still exists (run: git push origin --delete $BRANCH)"
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
**16. P5 — Frozen ticket Status post-merge.** Scan all tickets in `docs/tickets/`; flag any with Status ≠ Done whose ticket-ID appears in `git log --all --grep`. Multi-word Status values like "Ready for Merge" must be handled (use `sed -E` char class, not `\w+`).
|
|
91
|
+
```bash
|
|
92
|
+
FROZEN_COUNT=0
|
|
93
|
+
for t in docs/tickets/*.md; do
|
|
94
|
+
status=$(grep -E "^\*\*Status:\*\*" "$t" | head -1 | sed -E 's/^\*\*Status:\*\*[[:space:]]*([A-Za-z ]+)[[:space:]]*\|.*/\1/' | sed -E 's/[[:space:]]+$//')
|
|
95
|
+
[ "$status" = "Done" ] && continue
|
|
96
|
+
ticket_id=$(basename "$t" .md | sed -E 's/-[a-z].*//')
|
|
97
|
+
git log --all --oneline --grep="$ticket_id" | grep -q . && FROZEN_COUNT=$((FROZEN_COUNT+1))
|
|
98
|
+
done
|
|
99
|
+
[ "$FROZEN_COUNT" -ge 2 ] && flag "P5 drift (SYSTEMIC): $FROZEN_COUNT frozen tickets — Status not updated post-merge"
|
|
100
|
+
[ "$FROZEN_COUNT" -eq 1 ] && flag "P5 drift: 1 frozen ticket"
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
**17. P6 — AC count off-by-N.** Merge Checklist Evidence row 1 claim ("all N marked" / "AC: X/Y") diverges from actual count of `[x]` + `[ ]` in `## Acceptance Criteria`.
|
|
104
|
+
```bash
|
|
105
|
+
ACTUAL=$(awk '/^## Acceptance Criteria/,/^## Definition of Done/' "$TICKET" | grep -cE "^- \[[x ]\]")
|
|
106
|
+
CLAIMED=$(grep -oE 'all [0-9]+ marked|AC: [0-9]+/[0-9]+' "$TICKET" | head -1 | grep -oE "[0-9]+" | head -1)
|
|
107
|
+
[ -n "$CLAIMED" ] && [ "$CLAIMED" != "$ACTUAL" ] && [ $((ACTUAL - CLAIMED)) -ge 2 -o $((CLAIMED - ACTUAL)) -ge 2 ] \
|
|
108
|
+
&& flag "P6 drift: claim '$CLAIMED' vs actual AC count $ACTUAL"
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
**18. P7 — Test count drift within ticket (final-sections only).** Only flag AC / DoD / tracker Active-Session numbers diverging from Completion Log terminal. Intermediate rows are legitimate.
|
|
112
|
+
```bash
|
|
113
|
+
TERMINAL=$(awk '/^## Completion Log/,/^## Merge Checklist/' "$TICKET" | grep -iE "(test|pass|green)" | grep -oE "[0-9]+/[0-9]+" | tail -1)
|
|
114
|
+
AC=$(awk '/^## Acceptance Criteria/,/^## Definition of Done/' "$TICKET")
|
|
115
|
+
DOD=$(awk '/^## Definition of Done/,/^## Workflow Checklist/' "$TICKET")
|
|
116
|
+
FINAL_NUMS=$(printf '%s\n%s\n' "$AC" "$DOD" | grep -iE "(test|pass|green)" | grep -oE "[0-9]+/[0-9]+" | sort -u)
|
|
117
|
+
for n in $FINAL_NUMS; do
|
|
118
|
+
[ -n "$TERMINAL" ] && [ "$n" != "$TERMINAL" ] && flag "P7 drift: final-section count $n vs terminal $TERMINAL"
|
|
119
|
+
done
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
**19. P8 — Completion Log gap vs Workflow Checklist.** Each `[x]` Step N in Workflow should have ≥1 Completion Log row mentioning "Step N". Use `while-read` on unique step numbers (not `for-in` which splits on whitespace).
|
|
123
|
+
```bash
|
|
124
|
+
WORKFLOW=$(awk '/^## Workflow Checklist/,/^## Completion Log/' "$TICKET")
|
|
125
|
+
COMPLETION=$(awk '/^## Completion Log/,/^## Merge Checklist/' "$TICKET")
|
|
126
|
+
CHECKED_STEPS=$(echo "$WORKFLOW" | grep -E "^- \[x\] Step [0-9]+:" | sed -E 's/^- \[x\] Step ([0-9]+):.*/\1/' | sort -u)
|
|
127
|
+
while read -r step_num; do
|
|
128
|
+
[ -z "$step_num" ] && continue
|
|
129
|
+
echo "$COMPLETION" | grep -qE "Step[[:space:]]+$step_num([^0-9]|$)" || flag "P8 drift: Step $step_num [x] but no Completion Log entry"
|
|
130
|
+
done <<< "$CHECKED_STEPS"
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
**20. P9 — Tracker header "Last Updated" stale.** The `**Last Updated:**` header and the `**Active Feature:**` detail should agree on step number (e.g., both say 5/6). Mismatch suggests the header wasn't refreshed after state transitions.
|
|
134
|
+
```bash
|
|
135
|
+
TRACKER=docs/project_notes/product-tracker.md
|
|
136
|
+
HEADER_STEP=$(grep -oE 'Step [0-9]+/6' "$TRACKER" | head -1)
|
|
137
|
+
DETAIL_STEP=$(grep -A 1 '^\*\*Active Feature:\*\*' "$TRACKER" | grep -oE 'Step [0-9]+/6' | head -1)
|
|
138
|
+
[ -n "$HEADER_STEP" ] && [ -n "$DETAIL_STEP" ] && [ "$HEADER_STEP" != "$DETAIL_STEP" ] \
|
|
139
|
+
&& flag "P9 drift: tracker header says $HEADER_STEP, Active Feature says $DETAIL_STEP"
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
**21. P10 — Duplicate Completion Log rows.** Hash `date | action | first-80-of-notes`. Duplicates suggest copy-paste error during editing.
|
|
143
|
+
```bash
|
|
144
|
+
awk -F'|' '/^\| [0-9]{4}-[0-9]{2}-[0-9]{2}/ {
|
|
145
|
+
key = $2 "|" $3 "|" substr($4, 1, 80)
|
|
146
|
+
gsub(/^[[:space:]]+|[[:space:]]+$/, "", key)
|
|
147
|
+
print key
|
|
148
|
+
}' "$TICKET" | sort | uniq -d \
|
|
149
|
+
| while read -r dup; do flag "P10 drift: duplicate Completion Log row: $dup"; done
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
**22. P11 — Tracker Features table status vs ticket Status mismatch.** Ticket Status=Ready for Merge / Review → tracker expects `in-progress`. Ticket Status=Done → tracker expects `done`. Mismatch means one side wasn't updated after the state change.
|
|
153
|
+
```bash
|
|
154
|
+
TICKET_STATUS=$(grep -E "^\*\*Status:\*\*" "$TICKET" | head -1 | sed -E 's/^\*\*Status:\*\*[[:space:]]*([A-Za-z ]+)[[:space:]]*\|.*/\1/' | sed -E 's/[[:space:]]+$//')
|
|
155
|
+
FEATURE_ID=$(basename "$TICKET" .md | sed -E 's/-[a-z].*//')
|
|
156
|
+
TRACKER_STATUS=$(grep -F "$FEATURE_ID" docs/project_notes/product-tracker.md | grep -oE "\| (in-progress|done|pending|blocked) \|" | head -1 | sed -E 's/\| ([a-z-]+) \|/\1/')
|
|
157
|
+
case "$TICKET_STATUS" in
|
|
158
|
+
"Ready for Merge"|"Review"|"In Progress"|"Planning"|"Spec") EXPECTED="in-progress" ;;
|
|
159
|
+
"Done") EXPECTED="done" ;;
|
|
160
|
+
*) EXPECTED="" ;;
|
|
161
|
+
esac
|
|
162
|
+
[ -n "$EXPECTED" ] && [ -n "$TRACKER_STATUS" ] && [ "$TRACKER_STATUS" != "$EXPECTED" ] \
|
|
163
|
+
&& flag "P11 drift: ticket Status='$TICKET_STATUS' expects tracker='$EXPECTED' but tracker='$TRACKER_STATUS'"
|
|
164
|
+
```
|
|
165
|
+
|
|
51
166
|
### Output Format
|
|
52
167
|
|
|
53
|
-
Report
|
|
168
|
+
Report two tables — one for **structural (blocking)** compliance, one for **drift (advisory)**. Emit two verdicts plus a combined summary line.
|
|
54
169
|
|
|
55
170
|
```
|
|
56
171
|
## Merge Compliance Audit — [FEATURE-ID]
|
|
57
172
|
|
|
173
|
+
### Structural (1-11) — blocking merge gate
|
|
174
|
+
|
|
58
175
|
| # | Check | Status | Detail |
|
|
59
176
|
|---|-------|:------:|--------|
|
|
60
177
|
| 1 | Ticket Status | PASS | "Ready for Merge" |
|
|
@@ -69,7 +186,31 @@ Report as a compliance table:
|
|
|
69
186
|
| 10 | Working Tree | PASS | Clean |
|
|
70
187
|
| 11 | Data Files | PASS | N/A — no JSON seed files |
|
|
71
188
|
|
|
72
|
-
**
|
|
189
|
+
**STRUCTURAL: READY FOR MERGE** (or **STRUCTURAL: NEEDS FIX — N blockers**)
|
|
190
|
+
|
|
191
|
+
### Drift (12-22) — advisory, refresh before user authorization
|
|
192
|
+
|
|
193
|
+
| # | Pattern | Status | Detail |
|
|
194
|
+
|---|---------|:------:|--------|
|
|
195
|
+
| 12 | P1 PR body test count stale | PASS | matches ticket terminal |
|
|
196
|
+
| 13 | P2 Aspirational Evidence rows | PASS | all rows past-tense |
|
|
197
|
+
| 14 | P3 Post-merge actions logged | PASS | N/A pre-merge |
|
|
198
|
+
| 15 | P4 Remote branch orphan | PASS | not checked pre-merge |
|
|
199
|
+
| 16 | P5 Frozen ticket Status | PASS | 0 frozen |
|
|
200
|
+
| 17 | P6 AC count off-by-N | PASS | claim matches actual |
|
|
201
|
+
| 18 | P7 Intra-ticket test drift | PASS | final sections = terminal |
|
|
202
|
+
| 19 | P8 Completion Log gap | PASS | every [x] step has narrative |
|
|
203
|
+
| 20 | P9 Tracker header stale | PASS | header = detail |
|
|
204
|
+
| 21 | P10 Duplicate log rows | PASS | no duplicates |
|
|
205
|
+
| 22 | P11 Tracker status mismatch | PASS | in-progress for Ready for Merge |
|
|
206
|
+
|
|
207
|
+
**DRIFT: CLEAN** (or **DRIFT: N advisories — refresh before merge**)
|
|
208
|
+
|
|
209
|
+
### Combined verdict
|
|
210
|
+
|
|
211
|
+
- Both PASS → **READY FOR MERGE** (compliance 11/11, drift clean)
|
|
212
|
+
- Structural fail → **NEEDS FIX — N structural blockers** (any drift noted separately)
|
|
213
|
+
- Structural pass + drift advisories → **READY FOR MERGE PENDING DRIFT CLEANUP — N advisories**
|
|
73
214
|
```
|
|
74
215
|
|
|
75
216
|
### If issues are found
|
|
@@ -81,6 +222,19 @@ Fix them directly:
|
|
|
81
222
|
- Merge base diverged → `git merge origin/<target-branch>` and resolve conflicts
|
|
82
223
|
- Data file issues → fix the data
|
|
83
224
|
|
|
225
|
+
**Drift advisories (12-22) fixes:**
|
|
226
|
+
- **P1 (PR body test count stale)** → edit PR body "Quality Gates" / "npm test" line to match ticket terminal count; add "(+N new tests)" delta note
|
|
227
|
+
- **P2 (Aspirational Evidence)** → rewrite `[x]` rows with past-tense text + commit SHA + concrete numbers
|
|
228
|
+
- **P3 (Post-merge action unlogged)** → add a Completion Log row documenting the post-merge execution with date + action + empirical result
|
|
229
|
+
- **P4 (Remote branch orphan)** → `git push origin --delete <branch>` after confirming merge succeeded
|
|
230
|
+
- **P5 (Frozen ticket Status)** → update each ticket's `**Status:**` field from "In Progress"/"Ready for Merge" to `Done`; this often belongs in a docs-only tracker-sync PR if the cycle is retroactive
|
|
231
|
+
- **P6 (AC count off-by-N)** → recount AC items; update the Merge Checklist Evidence row 1 claim to match actual
|
|
232
|
+
- **P7 (Intra-ticket test drift)** → refresh AC / DoD / tracker numbers to match the Completion Log terminal entry
|
|
233
|
+
- **P8 (Completion Log gap)** → add a Completion Log row per missing Step with agent verdict + commit SHA
|
|
234
|
+
- **P9 (Tracker header stale)** → update `**Last Updated:**` line step reference to match Active Feature detail
|
|
235
|
+
- **P10 (Duplicate log rows)** → remove duplicate rows
|
|
236
|
+
- **P11 (Tracker status mismatch)** → sync tracker Features row status to ticket header Status
|
|
237
|
+
|
|
84
238
|
After fixing, re-run the audit to confirm all checks pass.
|
|
85
239
|
|
|
86
240
|
## Notes
|
|
@@ -48,13 +48,127 @@ If DIVERGED, flag as FAIL with instruction to merge target branch first.
|
|
|
48
48
|
|
|
49
49
|
Run only if `git diff origin/<target-branch>..HEAD --name-only` shows `.json` files in seed-data or fixtures directories.
|
|
50
50
|
|
|
51
|
+
### Drift Checks (added v0.18.0) — ADVISORY, not blocking
|
|
52
|
+
|
|
53
|
+
Eleven empirically-validated drift patterns. Failures are NOT blockers for the compliance verdict, but MUST be refreshed before requesting user authorization. Use BSD-grep-compatible regex (no `\K`).
|
|
54
|
+
|
|
55
|
+
**12. P1 — PR body test count stale.** Ratio must co-occur with test/pass/green marker to avoid AC/DoD ratios (14/14, 7/7).
|
|
56
|
+
```bash
|
|
57
|
+
PR_BODY=$(gh pr view --json body -q .body)
|
|
58
|
+
PR_TESTS=$(echo "$PR_BODY" | grep -iE "(npm test|tests?.*(pass|green))" | grep -oE "[0-9]+/[0-9]+" | head -1)
|
|
59
|
+
TICKET_TESTS=$(grep -iE "(npm test|tests?.*(pass|green))" "$TICKET" | grep -oE "[0-9]+/[0-9]+" | tail -1)
|
|
60
|
+
[ -n "$PR_TESTS" ] && [ -n "$TICKET_TESTS" ] && [ "$PR_TESTS" != "$TICKET_TESTS" ] && flag "P1: PR body $PR_TESTS vs ticket $TICKET_TESTS"
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
**13. P2 — Merge Checklist Evidence aspirational.** `[x]` rows with future-tense text.
|
|
64
|
+
```bash
|
|
65
|
+
awk '/^## Merge Checklist Evidence/,/^## /' "$TICKET" \
|
|
66
|
+
| grep -E '^\|.*\[x\].*(to be |will |pending|TBD|Will be |to be created|next commit|aspirational)' \
|
|
67
|
+
&& flag "P2: aspirational row(s)"
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
**14. P3 — Post-merge actions not logged** (post-merge only).
|
|
71
|
+
```bash
|
|
72
|
+
grep -E "^- \[ \].*(post-merge|operator|prod rollout|pending verification)" "$TICKET" \
|
|
73
|
+
| sed -E 's/^- \[ \] //' > /tmp/pm_items.txt
|
|
74
|
+
COMPLETION=$(awk '/^## Completion Log/,/^## Merge Checklist/' "$TICKET")
|
|
75
|
+
while IFS= read -r item; do
|
|
76
|
+
[ -z "$item" ] && continue
|
|
77
|
+
KEY=$(echo "$item" | cut -c1-40)
|
|
78
|
+
echo "$COMPLETION" | grep -Fq "$KEY" || flag "P3: '$item' not logged"
|
|
79
|
+
done < /tmp/pm_items.txt
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
**15. P4 — Remote branch orphan.**
|
|
83
|
+
```bash
|
|
84
|
+
BRANCH=$(grep -E "^\*\*[Bb]ranch:\*\*" "$TICKET" | head -1 | sed -E 's/^\*\*[Bb]ranch:\*\*[[:space:]]*([^[:space:]|]+).*/\1/')
|
|
85
|
+
git fetch origin --prune --quiet
|
|
86
|
+
git ls-remote --heads origin "$BRANCH" 2>/dev/null | grep -q refs/heads && flag "P4: branch $BRANCH still on origin"
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
**16. P5 — Frozen ticket Status post-merge.** Multi-word status via sed char class, not `\w+`.
|
|
90
|
+
```bash
|
|
91
|
+
FROZEN_COUNT=0
|
|
92
|
+
for t in docs/tickets/*.md; do
|
|
93
|
+
status=$(grep -E "^\*\*Status:\*\*" "$t" | head -1 | sed -E 's/^\*\*Status:\*\*[[:space:]]*([A-Za-z ]+)[[:space:]]*\|.*/\1/' | sed -E 's/[[:space:]]+$//')
|
|
94
|
+
[ "$status" = "Done" ] && continue
|
|
95
|
+
ticket_id=$(basename "$t" .md | sed -E 's/-[a-z].*//')
|
|
96
|
+
git log --all --oneline --grep="$ticket_id" | grep -q . && FROZEN_COUNT=$((FROZEN_COUNT+1))
|
|
97
|
+
done
|
|
98
|
+
[ "$FROZEN_COUNT" -ge 2 ] && flag "P5 SYSTEMIC: $FROZEN_COUNT frozen tickets"
|
|
99
|
+
[ "$FROZEN_COUNT" -eq 1 ] && flag "P5: 1 frozen ticket"
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
**17. P6 — AC count off-by-N.**
|
|
103
|
+
```bash
|
|
104
|
+
ACTUAL=$(awk '/^## Acceptance Criteria/,/^## Definition of Done/' "$TICKET" | grep -cE "^- \[[x ]\]")
|
|
105
|
+
CLAIMED=$(grep -oE 'all [0-9]+ marked|AC: [0-9]+/[0-9]+' "$TICKET" | head -1 | grep -oE "[0-9]+" | head -1)
|
|
106
|
+
[ -n "$CLAIMED" ] && [ "$CLAIMED" != "$ACTUAL" ] && [ $((ACTUAL - CLAIMED)) -ge 2 -o $((CLAIMED - ACTUAL)) -ge 2 ] \
|
|
107
|
+
&& flag "P6: claim $CLAIMED vs actual $ACTUAL"
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
**18. P7 — Test count drift within ticket (final-sections only).**
|
|
111
|
+
```bash
|
|
112
|
+
TERMINAL=$(awk '/^## Completion Log/,/^## Merge Checklist/' "$TICKET" | grep -iE "(test|pass|green)" | grep -oE "[0-9]+/[0-9]+" | tail -1)
|
|
113
|
+
AC=$(awk '/^## Acceptance Criteria/,/^## Definition of Done/' "$TICKET")
|
|
114
|
+
DOD=$(awk '/^## Definition of Done/,/^## Workflow Checklist/' "$TICKET")
|
|
115
|
+
for n in $(printf '%s\n%s\n' "$AC" "$DOD" | grep -iE "(test|pass|green)" | grep -oE "[0-9]+/[0-9]+" | sort -u); do
|
|
116
|
+
[ -n "$TERMINAL" ] && [ "$n" != "$TERMINAL" ] && flag "P7: final $n vs terminal $TERMINAL"
|
|
117
|
+
done
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
**19. P8 — Completion Log gap vs Workflow Checklist.**
|
|
121
|
+
```bash
|
|
122
|
+
WORKFLOW=$(awk '/^## Workflow Checklist/,/^## Completion Log/' "$TICKET")
|
|
123
|
+
COMPLETION=$(awk '/^## Completion Log/,/^## Merge Checklist/' "$TICKET")
|
|
124
|
+
CHECKED_STEPS=$(echo "$WORKFLOW" | grep -E "^- \[x\] Step [0-9]+:" | sed -E 's/^- \[x\] Step ([0-9]+):.*/\1/' | sort -u)
|
|
125
|
+
while read -r step_num; do
|
|
126
|
+
[ -z "$step_num" ] && continue
|
|
127
|
+
echo "$COMPLETION" | grep -qE "Step[[:space:]]+$step_num([^0-9]|$)" || flag "P8: Step $step_num [x] but no log entry"
|
|
128
|
+
done <<< "$CHECKED_STEPS"
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
**20. P9 — Tracker header stale.**
|
|
132
|
+
```bash
|
|
133
|
+
TRACKER=docs/project_notes/product-tracker.md
|
|
134
|
+
HEADER_STEP=$(grep -oE 'Step [0-9]+/6' "$TRACKER" | head -1)
|
|
135
|
+
DETAIL_STEP=$(grep -A 1 '^\*\*Active Feature:\*\*' "$TRACKER" | grep -oE 'Step [0-9]+/6' | head -1)
|
|
136
|
+
[ -n "$HEADER_STEP" ] && [ -n "$DETAIL_STEP" ] && [ "$HEADER_STEP" != "$DETAIL_STEP" ] \
|
|
137
|
+
&& flag "P9: header $HEADER_STEP vs detail $DETAIL_STEP"
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
**21. P10 — Duplicate Completion Log rows.**
|
|
141
|
+
```bash
|
|
142
|
+
awk -F'|' '/^\| [0-9]{4}-[0-9]{2}-[0-9]{2}/ {
|
|
143
|
+
key = $2 "|" $3 "|" substr($4, 1, 80)
|
|
144
|
+
gsub(/^[[:space:]]+|[[:space:]]+$/, "", key)
|
|
145
|
+
print key
|
|
146
|
+
}' "$TICKET" | sort | uniq -d | while read -r dup; do flag "P10: duplicate row: $dup"; done
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
**22. P11 — Tracker Features table status vs ticket Status mismatch.**
|
|
150
|
+
```bash
|
|
151
|
+
TICKET_STATUS=$(grep -E "^\*\*Status:\*\*" "$TICKET" | head -1 | sed -E 's/^\*\*Status:\*\*[[:space:]]*([A-Za-z ]+)[[:space:]]*\|.*/\1/' | sed -E 's/[[:space:]]+$//')
|
|
152
|
+
FEATURE_ID=$(basename "$TICKET" .md | sed -E 's/-[a-z].*//')
|
|
153
|
+
TRACKER_STATUS=$(grep -F "$FEATURE_ID" docs/project_notes/product-tracker.md | grep -oE "\| (in-progress|done|pending|blocked) \|" | head -1 | sed -E 's/\| ([a-z-]+) \|/\1/')
|
|
154
|
+
case "$TICKET_STATUS" in
|
|
155
|
+
"Ready for Merge"|"Review"|"In Progress"|"Planning"|"Spec") EXPECTED="in-progress" ;;
|
|
156
|
+
"Done") EXPECTED="done" ;;
|
|
157
|
+
*) EXPECTED="" ;;
|
|
158
|
+
esac
|
|
159
|
+
[ -n "$EXPECTED" ] && [ -n "$TRACKER_STATUS" ] && [ "$TRACKER_STATUS" != "$EXPECTED" ] \
|
|
160
|
+
&& flag "P11: Status='$TICKET_STATUS' expects tracker='$EXPECTED' but tracker='$TRACKER_STATUS'"
|
|
161
|
+
```
|
|
162
|
+
|
|
51
163
|
### Output Format
|
|
52
164
|
|
|
53
|
-
Report
|
|
165
|
+
Report two tables — one for **structural (blocking)** compliance, one for **drift (advisory)**. Emit two verdicts plus a combined summary line.
|
|
54
166
|
|
|
55
167
|
```
|
|
56
168
|
## Merge Compliance Audit — [FEATURE-ID]
|
|
57
169
|
|
|
170
|
+
### Structural (1-11) — blocking merge gate
|
|
171
|
+
|
|
58
172
|
| # | Check | Status | Detail |
|
|
59
173
|
|---|-------|:------:|--------|
|
|
60
174
|
| 1 | Ticket Status | PASS | "Ready for Merge" |
|
|
@@ -69,7 +183,31 @@ Report as a compliance table:
|
|
|
69
183
|
| 10 | Working Tree | PASS | Clean |
|
|
70
184
|
| 11 | Data Files | PASS | N/A — no JSON seed files |
|
|
71
185
|
|
|
72
|
-
**
|
|
186
|
+
**STRUCTURAL: READY FOR MERGE** (or **STRUCTURAL: NEEDS FIX — N blockers**)
|
|
187
|
+
|
|
188
|
+
### Drift (12-22) — advisory, refresh before user authorization
|
|
189
|
+
|
|
190
|
+
| # | Pattern | Status | Detail |
|
|
191
|
+
|---|---------|:------:|--------|
|
|
192
|
+
| 12 | P1 PR body test count stale | PASS | matches ticket terminal |
|
|
193
|
+
| 13 | P2 Aspirational Evidence rows | PASS | all past-tense |
|
|
194
|
+
| 14 | P3 Post-merge actions logged | PASS | N/A pre-merge |
|
|
195
|
+
| 15 | P4 Remote branch orphan | PASS | not checked pre-merge |
|
|
196
|
+
| 16 | P5 Frozen ticket Status | PASS | 0 frozen |
|
|
197
|
+
| 17 | P6 AC count off-by-N | PASS | claim matches actual |
|
|
198
|
+
| 18 | P7 Intra-ticket test drift | PASS | final = terminal |
|
|
199
|
+
| 19 | P8 Completion Log gap | PASS | every [x] step has narrative |
|
|
200
|
+
| 20 | P9 Tracker header stale | PASS | header = detail |
|
|
201
|
+
| 21 | P10 Duplicate log rows | PASS | no duplicates |
|
|
202
|
+
| 22 | P11 Tracker status mismatch | PASS | status consistent |
|
|
203
|
+
|
|
204
|
+
**DRIFT: CLEAN** (or **DRIFT: N advisories — refresh before merge**)
|
|
205
|
+
|
|
206
|
+
### Combined verdict
|
|
207
|
+
|
|
208
|
+
- Both PASS → **READY FOR MERGE**
|
|
209
|
+
- Structural fail → **NEEDS FIX — N blockers**
|
|
210
|
+
- Structural pass + drift advisories → **READY FOR MERGE PENDING DRIFT CLEANUP — N advisories**
|
|
73
211
|
```
|
|
74
212
|
|
|
75
213
|
### If issues are found
|
|
@@ -81,6 +219,19 @@ Fix them directly:
|
|
|
81
219
|
- Merge base diverged → `git merge origin/<target-branch>` and resolve conflicts
|
|
82
220
|
- Data file issues → fix the data
|
|
83
221
|
|
|
222
|
+
**Drift advisories (12-22) fixes:**
|
|
223
|
+
- **P1** → edit PR body npm test line to match ticket terminal count
|
|
224
|
+
- **P2** → rewrite `[x]` rows with past-tense + commit SHA
|
|
225
|
+
- **P3** → add Completion Log row for each post-merge execution
|
|
226
|
+
- **P4** → `git push origin --delete <branch>` after merge
|
|
227
|
+
- **P5** → update ticket Status from "In Progress"/"Ready for Merge" to `Done`
|
|
228
|
+
- **P6** → recount ACs and update Merge Checklist row 1 claim
|
|
229
|
+
- **P7** → sync AC/DoD/tracker numbers to Completion Log terminal
|
|
230
|
+
- **P8** → add Completion Log row per missing Step with agent verdict + commit SHA
|
|
231
|
+
- **P9** → refresh `**Last Updated:**` step reference
|
|
232
|
+
- **P10** → remove duplicate rows
|
|
233
|
+
- **P11** → sync tracker Features row status to ticket header Status
|
|
234
|
+
|
|
84
235
|
After fixing, re-run the audit to confirm all checks pass.
|
|
85
236
|
|
|
86
237
|
## Notes
|