@hyperdrive.bot/bmad-workflow 1.0.18 → 1.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/config/show.js +8 -2
- package/dist/commands/decompose.js +26 -5
- package/dist/commands/epics/create.d.ts +1 -0
- package/dist/commands/mcp/add.d.ts +16 -0
- package/dist/commands/mcp/add.js +77 -0
- package/dist/commands/mcp/credential/get.d.ts +14 -0
- package/dist/commands/mcp/credential/get.js +35 -0
- package/dist/commands/mcp/credential/list.d.ts +17 -0
- package/dist/commands/mcp/credential/list.js +67 -0
- package/dist/commands/mcp/credential/remove.d.ts +18 -0
- package/dist/commands/mcp/credential/remove.js +84 -0
- package/dist/commands/mcp/credential/set.d.ts +16 -0
- package/dist/commands/mcp/credential/set.js +41 -0
- package/dist/commands/mcp/credential/validate.d.ts +12 -0
- package/dist/commands/mcp/credential/validate.js +150 -0
- package/dist/commands/mcp/list.d.ts +17 -0
- package/dist/commands/mcp/list.js +80 -0
- package/dist/commands/mcp/logs.d.ts +15 -0
- package/dist/commands/mcp/logs.js +64 -0
- package/dist/commands/mcp/preset.d.ts +15 -0
- package/dist/commands/mcp/preset.js +84 -0
- package/dist/commands/mcp/remove.d.ts +14 -0
- package/dist/commands/mcp/remove.js +36 -0
- package/dist/commands/mcp/start.d.ts +12 -0
- package/dist/commands/mcp/start.js +80 -0
- package/dist/commands/mcp/status.d.ts +30 -0
- package/dist/commands/mcp/status.js +180 -0
- package/dist/commands/mcp/stop.d.ts +12 -0
- package/dist/commands/mcp/stop.js +47 -0
- package/dist/commands/stories/create.d.ts +1 -0
- package/dist/commands/stories/develop.d.ts +1 -0
- package/dist/commands/stories/qa.js +5 -2
- package/dist/commands/stories/review.d.ts +124 -0
- package/dist/commands/stories/review.js +516 -0
- package/dist/commands/workflow.d.ts +8 -0
- package/dist/commands/workflow.js +110 -2
- package/dist/mcp/types.d.ts +99 -0
- package/dist/mcp/types.js +7 -0
- package/dist/mcp/utils/docker-utils.d.ts +56 -0
- package/dist/mcp/utils/docker-utils.js +108 -0
- package/dist/mcp/utils/template-loader.d.ts +21 -0
- package/dist/mcp/utils/template-loader.js +60 -0
- package/dist/models/agent-options.d.ts +10 -1
- package/dist/models/workflow-config.d.ts +77 -0
- package/dist/models/workflow-result.d.ts +7 -0
- package/dist/services/agents/claude-agent-runner.js +19 -3
- package/dist/services/file-system/path-resolver.d.ts +10 -0
- package/dist/services/file-system/path-resolver.js +12 -0
- package/dist/services/mcp/mcp-config-manager.d.ts +54 -0
- package/dist/services/mcp/mcp-config-manager.js +146 -0
- package/dist/services/mcp/mcp-context-injector.d.ts +92 -0
- package/dist/services/mcp/mcp-context-injector.js +168 -0
- package/dist/services/mcp/mcp-credential-manager.d.ts +48 -0
- package/dist/services/mcp/mcp-credential-manager.js +124 -0
- package/dist/services/mcp/mcp-health-checker.d.ts +56 -0
- package/dist/services/mcp/mcp-health-checker.js +162 -0
- package/dist/services/mcp/types/health-types.d.ts +31 -0
- package/dist/services/mcp/types/health-types.js +7 -0
- package/dist/services/orchestration/dependency-graph-executor.js +1 -1
- package/dist/services/orchestration/task-decomposition-service.d.ts +2 -1
- package/dist/services/orchestration/task-decomposition-service.js +90 -36
- package/dist/services/orchestration/workflow-orchestrator.d.ts +54 -2
- package/dist/services/orchestration/workflow-orchestrator.js +303 -17
- package/dist/services/review/ai-review-scanner.d.ts +66 -0
- package/dist/services/review/ai-review-scanner.js +142 -0
- package/dist/services/review/coderabbit-scanner.d.ts +25 -0
- package/dist/services/review/coderabbit-scanner.js +31 -0
- package/dist/services/review/index.d.ts +20 -0
- package/dist/services/review/index.js +15 -0
- package/dist/services/review/lint-scanner.d.ts +46 -0
- package/dist/services/review/lint-scanner.js +172 -0
- package/dist/services/review/review-config.d.ts +62 -0
- package/dist/services/review/review-config.js +91 -0
- package/dist/services/review/review-phase-executor.d.ts +69 -0
- package/dist/services/review/review-phase-executor.js +152 -0
- package/dist/services/review/review-queue.d.ts +98 -0
- package/dist/services/review/review-queue.js +174 -0
- package/dist/services/review/review-reporter.d.ts +94 -0
- package/dist/services/review/review-reporter.js +386 -0
- package/dist/services/review/scanner-factory.d.ts +42 -0
- package/dist/services/review/scanner-factory.js +60 -0
- package/dist/services/review/self-heal-loop.d.ts +58 -0
- package/dist/services/review/self-heal-loop.js +132 -0
- package/dist/services/review/severity-classifier.d.ts +17 -0
- package/dist/services/review/severity-classifier.js +314 -0
- package/dist/services/review/tech-debt-tracker.d.ts +52 -0
- package/dist/services/review/tech-debt-tracker.js +245 -0
- package/dist/services/review/types.d.ts +93 -0
- package/dist/services/review/types.js +23 -0
- package/dist/services/validation/config-validator.d.ts +84 -0
- package/dist/services/validation/config-validator.js +78 -0
- package/dist/utils/credential-utils.d.ts +14 -0
- package/dist/utils/credential-utils.js +19 -0
- package/dist/utils/duration.d.ts +41 -0
- package/dist/utils/duration.js +89 -0
- package/dist/utils/shared-flags.d.ts +1 -0
- package/dist/utils/shared-flags.js +11 -2
- package/package.json +4 -2
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Severity Classifier
|
|
3
|
+
*
|
|
4
|
+
* Pure function that parses raw scanner output from multiple sources
|
|
5
|
+
* (structured AI, ESLint JSON, tsc errors) into a unified, deduplicated
|
|
6
|
+
* ClassifiedIssue[] array.
|
|
7
|
+
*
|
|
8
|
+
* Design: zero side effects, deterministic output, no I/O.
|
|
9
|
+
*/
|
|
10
|
+
import type { ClassifiedIssue, RawReviewOutput } from './types.js';
|
|
11
|
+
/**
|
|
12
|
+
* Classify raw review outputs into a unified, deduplicated, deterministically-ordered array.
|
|
13
|
+
*
|
|
14
|
+
* @param outputs - Raw scanner outputs to classify
|
|
15
|
+
* @returns Deduplicated classified issues sorted by severity DESC, file ASC, line ASC
|
|
16
|
+
*/
|
|
17
|
+
export declare function classify(outputs: RawReviewOutput[]): ClassifiedIssue[];
|
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Severity Classifier
|
|
3
|
+
*
|
|
4
|
+
* Pure function that parses raw scanner output from multiple sources
|
|
5
|
+
* (structured AI, ESLint JSON, tsc errors) into a unified, deduplicated
|
|
6
|
+
* ClassifiedIssue[] array.
|
|
7
|
+
*
|
|
8
|
+
* Design: zero side effects, deterministic output, no I/O.
|
|
9
|
+
*/
|
|
10
|
+
import { Severity } from './types.js';
|
|
11
|
+
/** Severity rank for sorting (higher = more severe) */
|
|
12
|
+
const SEVERITY_RANK = {
|
|
13
|
+
[Severity.CRITICAL]: 4,
|
|
14
|
+
[Severity.HIGH]: 3,
|
|
15
|
+
[Severity.MEDIUM]: 2,
|
|
16
|
+
[Severity.LOW]: 1,
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* Classify raw review outputs into a unified, deduplicated, deterministically-ordered array.
|
|
20
|
+
*
|
|
21
|
+
* @param outputs - Raw scanner outputs to classify
|
|
22
|
+
* @returns Deduplicated classified issues sorted by severity DESC, file ASC, line ASC
|
|
23
|
+
*/
|
|
24
|
+
export function classify(outputs) {
|
|
25
|
+
const allIssues = [];
|
|
26
|
+
for (const output of outputs) {
|
|
27
|
+
const parsed = parseOutput(output);
|
|
28
|
+
allIssues.push(...parsed);
|
|
29
|
+
}
|
|
30
|
+
const deduped = deduplicate(allIssues);
|
|
31
|
+
return sortIssues(deduped);
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Route a raw output to the appropriate parser based on source.
|
|
35
|
+
*/
|
|
36
|
+
function parseOutput(output) {
|
|
37
|
+
const source = output.source.toLowerCase();
|
|
38
|
+
if (source === 'ai' || source === 'claude-ai' || source === 'coderabbit') {
|
|
39
|
+
return parseStructuredAI(output.raw);
|
|
40
|
+
}
|
|
41
|
+
if (source === 'lint' || source === 'eslint') {
|
|
42
|
+
return parseEslintJson(output.raw);
|
|
43
|
+
}
|
|
44
|
+
if (source === 'tsc' || source === 'typescript') {
|
|
45
|
+
return parseTscErrors(output.raw);
|
|
46
|
+
}
|
|
47
|
+
// Unknown source — attempt structured AI format as fallback
|
|
48
|
+
return parseStructuredAI(output.raw);
|
|
49
|
+
}
|
|
50
|
+
// ─── Structured AI Format Parser ─────────────────────────────────────────────
|
|
51
|
+
/**
|
|
52
|
+
* Parse structured AI review output.
|
|
53
|
+
*
|
|
54
|
+
* Format: blocks separated by `---`, each containing:
|
|
55
|
+
* SEVERITY: HIGH
|
|
56
|
+
* FILE: src/foo.ts
|
|
57
|
+
* LINE: 42
|
|
58
|
+
* ISSUE: Description
|
|
59
|
+
* FIX: Suggested fix
|
|
60
|
+
*
|
|
61
|
+
* Special case: "REVIEW_PASS: No issues found" → empty array.
|
|
62
|
+
*/
|
|
63
|
+
function parseStructuredAI(raw) {
|
|
64
|
+
const trimmed = raw.trim();
|
|
65
|
+
if (!trimmed)
|
|
66
|
+
return [];
|
|
67
|
+
// Check for REVIEW_PASS sentinel
|
|
68
|
+
if (trimmed.includes('REVIEW_PASS:')) {
|
|
69
|
+
return [];
|
|
70
|
+
}
|
|
71
|
+
const blocks = trimmed.split(/^---$/m).map((b) => b.trim()).filter(Boolean);
|
|
72
|
+
const issues = [];
|
|
73
|
+
for (const block of blocks) {
|
|
74
|
+
const issue = parseStructuredBlock(block);
|
|
75
|
+
if (issue) {
|
|
76
|
+
issues.push(issue);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
return issues;
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Parse a single structured AI block into a ClassifiedIssue.
|
|
83
|
+
* Returns undefined for malformed blocks.
|
|
84
|
+
*/
|
|
85
|
+
function parseStructuredBlock(block) {
|
|
86
|
+
const severity = extractField(block, 'SEVERITY');
|
|
87
|
+
const file = extractField(block, 'FILE');
|
|
88
|
+
const lineStr = extractField(block, 'LINE');
|
|
89
|
+
const issue = extractField(block, 'ISSUE');
|
|
90
|
+
const fix = extractField(block, 'FIX');
|
|
91
|
+
if (!severity || !file || !lineStr || !issue) {
|
|
92
|
+
return undefined;
|
|
93
|
+
}
|
|
94
|
+
const line = Number.parseInt(lineStr, 10);
|
|
95
|
+
if (Number.isNaN(line)) {
|
|
96
|
+
return undefined;
|
|
97
|
+
}
|
|
98
|
+
const mappedSeverity = mapSeverityString(severity);
|
|
99
|
+
if (!mappedSeverity) {
|
|
100
|
+
return undefined;
|
|
101
|
+
}
|
|
102
|
+
return {
|
|
103
|
+
file,
|
|
104
|
+
...(fix ? { fix } : {}),
|
|
105
|
+
issue,
|
|
106
|
+
line,
|
|
107
|
+
severity: mappedSeverity,
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Extract a field value from a structured block line.
|
|
112
|
+
* e.g., "SEVERITY: HIGH" → "HIGH"
|
|
113
|
+
*/
|
|
114
|
+
function extractField(block, fieldName) {
|
|
115
|
+
const regex = new RegExp(`^${fieldName}:\\s*(.+)$`, 'mi');
|
|
116
|
+
const match = regex.exec(block);
|
|
117
|
+
return match ? match[1].trim() : undefined;
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Map a severity string to the Severity enum.
|
|
121
|
+
*/
|
|
122
|
+
function mapSeverityString(value) {
|
|
123
|
+
const upper = value.toUpperCase().trim();
|
|
124
|
+
if (upper === 'CRITICAL')
|
|
125
|
+
return Severity.CRITICAL;
|
|
126
|
+
if (upper === 'HIGH')
|
|
127
|
+
return Severity.HIGH;
|
|
128
|
+
if (upper === 'MEDIUM')
|
|
129
|
+
return Severity.MEDIUM;
|
|
130
|
+
if (upper === 'LOW')
|
|
131
|
+
return Severity.LOW;
|
|
132
|
+
return undefined;
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Parse ESLint JSON output.
|
|
136
|
+
*
|
|
137
|
+
* Format: array of { filePath, messages: [{ severity, line, message, ruleId }] }
|
|
138
|
+
* Severity mapping: 2 (error) → HIGH, 1 (warning) → MEDIUM
|
|
139
|
+
*/
|
|
140
|
+
function parseEslintJson(raw) {
|
|
141
|
+
const trimmed = raw.trim();
|
|
142
|
+
if (!trimmed)
|
|
143
|
+
return [];
|
|
144
|
+
let parsed;
|
|
145
|
+
try {
|
|
146
|
+
parsed = JSON.parse(trimmed);
|
|
147
|
+
}
|
|
148
|
+
catch {
|
|
149
|
+
return [];
|
|
150
|
+
}
|
|
151
|
+
if (!Array.isArray(parsed))
|
|
152
|
+
return [];
|
|
153
|
+
const issues = [];
|
|
154
|
+
for (const fileResult of parsed) {
|
|
155
|
+
const filePath = fileResult.filePath ?? '';
|
|
156
|
+
const messages = fileResult.messages ?? [];
|
|
157
|
+
for (const msg of messages) {
|
|
158
|
+
const severity = mapEslintSeverity(msg.severity);
|
|
159
|
+
if (!severity)
|
|
160
|
+
continue;
|
|
161
|
+
const line = msg.line ?? 0;
|
|
162
|
+
const ruleId = msg.ruleId ? ` (${msg.ruleId})` : '';
|
|
163
|
+
const message = msg.message ?? 'Unknown ESLint issue';
|
|
164
|
+
issues.push({
|
|
165
|
+
file: filePath,
|
|
166
|
+
issue: `${message}${ruleId}`,
|
|
167
|
+
line,
|
|
168
|
+
severity,
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
return issues;
|
|
173
|
+
}
|
|
174
|
+
/**
|
|
175
|
+
* Map ESLint numeric severity to Severity enum.
|
|
176
|
+
*/
|
|
177
|
+
function mapEslintSeverity(severity) {
|
|
178
|
+
if (severity === 2)
|
|
179
|
+
return Severity.HIGH;
|
|
180
|
+
if (severity === 1)
|
|
181
|
+
return Severity.MEDIUM;
|
|
182
|
+
return undefined;
|
|
183
|
+
}
|
|
184
|
+
// ─── tsc Error Format Parser ─────────────────────────────────────────────────
|
|
185
|
+
/**
|
|
186
|
+
* Regex for tsc error lines:
|
|
187
|
+
* file(line,col): error TSxxxx: message
|
|
188
|
+
*/
|
|
189
|
+
const TSC_ERROR_REGEX = /^(.+?)\((\d+),\d+\):\s*error\s+(TS\d+):\s*(.+)$/;
|
|
190
|
+
/**
|
|
191
|
+
* Parse tsc --noEmit --pretty false output.
|
|
192
|
+
*
|
|
193
|
+
* All tsc errors map to HIGH severity.
|
|
194
|
+
*/
|
|
195
|
+
function parseTscErrors(raw) {
|
|
196
|
+
const trimmed = raw.trim();
|
|
197
|
+
if (!trimmed)
|
|
198
|
+
return [];
|
|
199
|
+
const lines = trimmed.split('\n');
|
|
200
|
+
const issues = [];
|
|
201
|
+
for (const line of lines) {
|
|
202
|
+
const match = TSC_ERROR_REGEX.exec(line.trim());
|
|
203
|
+
if (!match)
|
|
204
|
+
continue;
|
|
205
|
+
const [, file, lineStr, errorCode, message] = match;
|
|
206
|
+
const lineNum = Number.parseInt(lineStr, 10);
|
|
207
|
+
issues.push({
|
|
208
|
+
file,
|
|
209
|
+
issue: `${errorCode}: ${message}`,
|
|
210
|
+
line: lineNum,
|
|
211
|
+
severity: Severity.HIGH,
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
return issues;
|
|
215
|
+
}
|
|
216
|
+
// ─── Deduplication ───────────────────────────────────────────────────────────
|
|
217
|
+
/**
|
|
218
|
+
* Deduplicate issues by file + line + similar issue text.
|
|
219
|
+
* When duplicates differ in severity, keep the higher one.
|
|
220
|
+
* Preserve fix field from whichever duplicate has one.
|
|
221
|
+
*/
|
|
222
|
+
function deduplicate(issues) {
|
|
223
|
+
const result = [];
|
|
224
|
+
for (const issue of issues) {
|
|
225
|
+
const existingIndex = result.findIndex((existing) => existing.file === issue.file && existing.line === issue.line && isSimilarText(existing.issue, issue.issue));
|
|
226
|
+
if (existingIndex === -1) {
|
|
227
|
+
result.push({ ...issue });
|
|
228
|
+
}
|
|
229
|
+
else {
|
|
230
|
+
const existing = result[existingIndex];
|
|
231
|
+
// Keep higher severity
|
|
232
|
+
if (SEVERITY_RANK[issue.severity] > SEVERITY_RANK[existing.severity]) {
|
|
233
|
+
existing.severity = issue.severity;
|
|
234
|
+
}
|
|
235
|
+
// Preserve fix if the new one has it and existing doesn't
|
|
236
|
+
if (issue.fix && !existing.fix) {
|
|
237
|
+
existing.fix = issue.fix;
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
return result;
|
|
242
|
+
}
|
|
243
|
+
/**
|
|
244
|
+
* Check if two issue texts are similar enough to be considered duplicates.
|
|
245
|
+
*
|
|
246
|
+
* Strategy: normalize both strings, then check if one is a substring of the other,
|
|
247
|
+
* or if their Levenshtein distance is less than 30% of the shorter string's length.
|
|
248
|
+
*/
|
|
249
|
+
function isSimilarText(a, b) {
|
|
250
|
+
const normA = normalizeText(a);
|
|
251
|
+
const normB = normalizeText(b);
|
|
252
|
+
if (normA === normB)
|
|
253
|
+
return true;
|
|
254
|
+
// Substring check
|
|
255
|
+
if (normA.includes(normB) || normB.includes(normA))
|
|
256
|
+
return true;
|
|
257
|
+
// Levenshtein distance check
|
|
258
|
+
const shorter = Math.min(normA.length, normB.length);
|
|
259
|
+
if (shorter === 0)
|
|
260
|
+
return false;
|
|
261
|
+
const distance = levenshtein(normA, normB);
|
|
262
|
+
return distance < shorter * 0.3;
|
|
263
|
+
}
|
|
264
|
+
/**
|
|
265
|
+
* Normalize text for comparison: lowercase, collapse whitespace, trim.
|
|
266
|
+
*/
|
|
267
|
+
function normalizeText(text) {
|
|
268
|
+
return text.toLowerCase().replace(/\s+/g, ' ').trim();
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Levenshtein distance between two strings.
|
|
272
|
+
*/
|
|
273
|
+
function levenshtein(a, b) {
|
|
274
|
+
const m = a.length;
|
|
275
|
+
const n = b.length;
|
|
276
|
+
if (m === 0)
|
|
277
|
+
return n;
|
|
278
|
+
if (n === 0)
|
|
279
|
+
return m;
|
|
280
|
+
// Use two rows instead of full matrix for memory efficiency
|
|
281
|
+
let prev = new Array(n + 1);
|
|
282
|
+
let curr = new Array(n + 1);
|
|
283
|
+
for (let j = 0; j <= n; j++) {
|
|
284
|
+
prev[j] = j;
|
|
285
|
+
}
|
|
286
|
+
for (let i = 1; i <= m; i++) {
|
|
287
|
+
curr[0] = i;
|
|
288
|
+
for (let j = 1; j <= n; j++) {
|
|
289
|
+
const cost = a[i - 1] === b[j - 1] ? 0 : 1;
|
|
290
|
+
curr[j] = Math.min(prev[j] + 1, curr[j - 1] + 1, prev[j - 1] + cost);
|
|
291
|
+
}
|
|
292
|
+
;
|
|
293
|
+
[prev, curr] = [curr, prev];
|
|
294
|
+
}
|
|
295
|
+
return prev[n];
|
|
296
|
+
}
|
|
297
|
+
// ─── Sorting ─────────────────────────────────────────────────────────────────
|
|
298
|
+
/**
|
|
299
|
+
* Sort issues deterministically: severity DESC, file ASC, line ASC.
|
|
300
|
+
*/
|
|
301
|
+
function sortIssues(issues) {
|
|
302
|
+
return [...issues].sort((a, b) => {
|
|
303
|
+
// Severity DESC (higher rank first)
|
|
304
|
+
const severityDiff = SEVERITY_RANK[b.severity] - SEVERITY_RANK[a.severity];
|
|
305
|
+
if (severityDiff !== 0)
|
|
306
|
+
return severityDiff;
|
|
307
|
+
// File ASC
|
|
308
|
+
const fileDiff = a.file.localeCompare(b.file);
|
|
309
|
+
if (fileDiff !== 0)
|
|
310
|
+
return fileDiff;
|
|
311
|
+
// Line ASC
|
|
312
|
+
return a.line - b.line;
|
|
313
|
+
});
|
|
314
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tech Debt Tracker
|
|
3
|
+
*
|
|
4
|
+
* Documents MEDIUM-severity issues from AI code reviews as tech debt entries.
|
|
5
|
+
* Appends entries to story markdown files and aggregates them in session-level
|
|
6
|
+
* backlog files. Deduplicates by file + line + issue text.
|
|
7
|
+
*/
|
|
8
|
+
import type pino from 'pino';
|
|
9
|
+
import type { ClassifiedIssue } from './types.js';
|
|
10
|
+
/**
|
|
11
|
+
* Documents MEDIUM-severity issues as tech debt in story files and session backlogs.
|
|
12
|
+
*/
|
|
13
|
+
export declare class TechDebtTracker {
|
|
14
|
+
private readonly logger;
|
|
15
|
+
constructor(logger: pino.Logger);
|
|
16
|
+
/**
|
|
17
|
+
* Append MEDIUM-severity issues to a story markdown file under a
|
|
18
|
+
* `### Tech Debt (MEDIUM)` section. Deduplicates against both the
|
|
19
|
+
* input array and entries already present in the file.
|
|
20
|
+
*
|
|
21
|
+
* @param issues - Classified issues (will be filtered to MEDIUM only)
|
|
22
|
+
* @param storyFile - Absolute path to the story markdown file
|
|
23
|
+
*/
|
|
24
|
+
appendToStory(issues: ClassifiedIssue[], storyFile: string): Promise<void>;
|
|
25
|
+
/**
|
|
26
|
+
* Aggregate MEDIUM-severity issues into a session-level backlog file.
|
|
27
|
+
* Creates `{sessionDir}/review/tech-debt-backlog.md`.
|
|
28
|
+
*
|
|
29
|
+
* @param issues - Classified issues (will be filtered to MEDIUM only)
|
|
30
|
+
* @param sessionDir - Absolute path to the session directory
|
|
31
|
+
* @param storyId - Story identifier for grouping (optional)
|
|
32
|
+
*/
|
|
33
|
+
appendToSession(issues: ClassifiedIssue[], sessionDir: string, storyId?: string): Promise<void>;
|
|
34
|
+
/**
|
|
35
|
+
* Remove duplicate issues from the input array.
|
|
36
|
+
* Key: file + line + issue text.
|
|
37
|
+
*/
|
|
38
|
+
private deduplicateIssues;
|
|
39
|
+
/**
|
|
40
|
+
* Filter issues to only MEDIUM severity.
|
|
41
|
+
*/
|
|
42
|
+
private filterMedium;
|
|
43
|
+
/**
|
|
44
|
+
* Find the end of a markdown section (up to the next heading or EOF).
|
|
45
|
+
* Returns the index position where new content can be inserted.
|
|
46
|
+
*/
|
|
47
|
+
private findSectionEnd;
|
|
48
|
+
/**
|
|
49
|
+
* Update the total issue count in the backlog header.
|
|
50
|
+
*/
|
|
51
|
+
private updateTotalCount;
|
|
52
|
+
}
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tech Debt Tracker
|
|
3
|
+
*
|
|
4
|
+
* Documents MEDIUM-severity issues from AI code reviews as tech debt entries.
|
|
5
|
+
* Appends entries to story markdown files and aggregates them in session-level
|
|
6
|
+
* backlog files. Deduplicates by file + line + issue text.
|
|
7
|
+
*/
|
|
8
|
+
import { mkdir, readFile, writeFile } from 'node:fs/promises';
|
|
9
|
+
import { join } from 'node:path';
|
|
10
|
+
import { Severity } from './types.js';
|
|
11
|
+
/** Section header used in story markdown files */
|
|
12
|
+
const TECH_DEBT_HEADER = '### Tech Debt (MEDIUM)';
|
|
13
|
+
/** Regex to parse a tech debt entry line: `- **file:line** — description` */
|
|
14
|
+
const ENTRY_REGEX = /^- \*\*(.+?):(\d+)\*\* — (.+)$/;
|
|
15
|
+
/**
|
|
16
|
+
* Builds a deduplication key from an issue's identifying fields.
|
|
17
|
+
*/
|
|
18
|
+
function deduplicationKey(file, line, issue) {
|
|
19
|
+
return `${file}:${line}:${issue}`;
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Formats a single ClassifiedIssue as a markdown list entry.
|
|
23
|
+
*/
|
|
24
|
+
function formatEntry(issue) {
|
|
25
|
+
return `- **${issue.file}:${issue.line}** — ${issue.issue}`;
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Parses existing tech debt entries from markdown content.
|
|
29
|
+
* Scans the entire document for entry lines matching the bold-path format.
|
|
30
|
+
* Returns a Set of deduplication keys for entries already present.
|
|
31
|
+
*/
|
|
32
|
+
function parseExistingEntries(content) {
|
|
33
|
+
const keys = new Set();
|
|
34
|
+
const lines = content.split('\n');
|
|
35
|
+
for (const line of lines) {
|
|
36
|
+
const match = ENTRY_REGEX.exec(line.trim());
|
|
37
|
+
if (match) {
|
|
38
|
+
keys.add(deduplicationKey(match[1], Number.parseInt(match[2], 10), match[3]));
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
return keys;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Documents MEDIUM-severity issues as tech debt in story files and session backlogs.
|
|
45
|
+
*/
|
|
46
|
+
export class TechDebtTracker {
|
|
47
|
+
logger;
|
|
48
|
+
constructor(logger) {
|
|
49
|
+
this.logger = logger;
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Append MEDIUM-severity issues to a story markdown file under a
|
|
53
|
+
* `### Tech Debt (MEDIUM)` section. Deduplicates against both the
|
|
54
|
+
* input array and entries already present in the file.
|
|
55
|
+
*
|
|
56
|
+
* @param issues - Classified issues (will be filtered to MEDIUM only)
|
|
57
|
+
* @param storyFile - Absolute path to the story markdown file
|
|
58
|
+
*/
|
|
59
|
+
async appendToStory(issues, storyFile) {
|
|
60
|
+
const mediumIssues = this.filterMedium(issues);
|
|
61
|
+
if (mediumIssues.length === 0) {
|
|
62
|
+
this.logger.debug('No MEDIUM issues to document in story');
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
const deduplicated = this.deduplicateIssues(mediumIssues);
|
|
66
|
+
if (deduplicated.length === 0) {
|
|
67
|
+
return;
|
|
68
|
+
}
|
|
69
|
+
let content;
|
|
70
|
+
try {
|
|
71
|
+
content = await readFile(storyFile, 'utf-8');
|
|
72
|
+
}
|
|
73
|
+
catch {
|
|
74
|
+
this.logger.warn({ storyFile }, 'Story file not found, creating new file');
|
|
75
|
+
content = '';
|
|
76
|
+
}
|
|
77
|
+
// Deduplicate against existing entries in the file
|
|
78
|
+
const existingKeys = parseExistingEntries(content);
|
|
79
|
+
const newIssues = deduplicated.filter((issue) => !existingKeys.has(deduplicationKey(issue.file, issue.line, issue.issue)));
|
|
80
|
+
if (newIssues.length === 0) {
|
|
81
|
+
this.logger.debug('All MEDIUM issues already documented in story');
|
|
82
|
+
return;
|
|
83
|
+
}
|
|
84
|
+
const entries = newIssues.map((issue) => formatEntry(issue));
|
|
85
|
+
if (content.includes(TECH_DEBT_HEADER)) {
|
|
86
|
+
// Append below existing section
|
|
87
|
+
const headerIndex = content.indexOf(TECH_DEBT_HEADER);
|
|
88
|
+
const afterHeader = headerIndex + TECH_DEBT_HEADER.length;
|
|
89
|
+
const insertionPoint = this.findSectionEnd(content, afterHeader);
|
|
90
|
+
const before = content.slice(0, insertionPoint);
|
|
91
|
+
const after = content.slice(insertionPoint);
|
|
92
|
+
content = before + entries.join('\n') + '\n' + after;
|
|
93
|
+
}
|
|
94
|
+
else {
|
|
95
|
+
// Append new section at end of file
|
|
96
|
+
const separator = content.endsWith('\n') ? '\n' : '\n\n';
|
|
97
|
+
content = content + separator + TECH_DEBT_HEADER + '\n' + entries.join('\n') + '\n';
|
|
98
|
+
}
|
|
99
|
+
await writeFile(storyFile, content, 'utf-8');
|
|
100
|
+
this.logger.info({ count: newIssues.length, storyFile }, 'Documented tech debt in story');
|
|
101
|
+
}
|
|
102
|
+
/**
|
|
103
|
+
* Aggregate MEDIUM-severity issues into a session-level backlog file.
|
|
104
|
+
* Creates `{sessionDir}/review/tech-debt-backlog.md`.
|
|
105
|
+
*
|
|
106
|
+
* @param issues - Classified issues (will be filtered to MEDIUM only)
|
|
107
|
+
* @param sessionDir - Absolute path to the session directory
|
|
108
|
+
* @param storyId - Story identifier for grouping (optional)
|
|
109
|
+
*/
|
|
110
|
+
async appendToSession(issues, sessionDir, storyId) {
|
|
111
|
+
const mediumIssues = this.filterMedium(issues);
|
|
112
|
+
if (mediumIssues.length === 0) {
|
|
113
|
+
this.logger.debug('No MEDIUM issues to document in session backlog');
|
|
114
|
+
return;
|
|
115
|
+
}
|
|
116
|
+
const deduplicated = this.deduplicateIssues(mediumIssues);
|
|
117
|
+
if (deduplicated.length === 0) {
|
|
118
|
+
return;
|
|
119
|
+
}
|
|
120
|
+
const reviewDir = join(sessionDir, 'review');
|
|
121
|
+
await mkdir(reviewDir, { recursive: true });
|
|
122
|
+
const backlogPath = join(reviewDir, 'tech-debt-backlog.md');
|
|
123
|
+
let existingContent = '';
|
|
124
|
+
try {
|
|
125
|
+
existingContent = await readFile(backlogPath, 'utf-8');
|
|
126
|
+
}
|
|
127
|
+
catch {
|
|
128
|
+
// File doesn't exist yet — will create
|
|
129
|
+
}
|
|
130
|
+
// Deduplicate against existing entries
|
|
131
|
+
const existingKeys = parseExistingEntries(existingContent);
|
|
132
|
+
const newIssues = deduplicated.filter((issue) => !existingKeys.has(deduplicationKey(issue.file, issue.line, issue.issue)));
|
|
133
|
+
if (newIssues.length === 0) {
|
|
134
|
+
this.logger.debug('All MEDIUM issues already documented in session backlog');
|
|
135
|
+
return;
|
|
136
|
+
}
|
|
137
|
+
const entries = newIssues.map((issue) => formatEntry(issue));
|
|
138
|
+
let content;
|
|
139
|
+
if (existingContent.length === 0) {
|
|
140
|
+
// Create new backlog file with header
|
|
141
|
+
const timestamp = new Date().toISOString();
|
|
142
|
+
const lines = [
|
|
143
|
+
'# Tech Debt Backlog',
|
|
144
|
+
'',
|
|
145
|
+
`> Generated: ${timestamp}`,
|
|
146
|
+
`> Total issues: ${newIssues.length}`,
|
|
147
|
+
'',
|
|
148
|
+
];
|
|
149
|
+
if (storyId) {
|
|
150
|
+
lines.push(`## ${storyId}`, '');
|
|
151
|
+
}
|
|
152
|
+
lines.push(...entries, '');
|
|
153
|
+
content = lines.join('\n');
|
|
154
|
+
}
|
|
155
|
+
else {
|
|
156
|
+
// Append to existing backlog
|
|
157
|
+
let appendContent = '';
|
|
158
|
+
if (storyId && !existingContent.includes(`## ${storyId}`)) {
|
|
159
|
+
appendContent = `\n## ${storyId}\n` + entries.join('\n') + '\n';
|
|
160
|
+
}
|
|
161
|
+
else if (storyId && existingContent.includes(`## ${storyId}`)) {
|
|
162
|
+
// Find the story section and append there
|
|
163
|
+
const sectionIndex = existingContent.indexOf(`## ${storyId}`);
|
|
164
|
+
const afterSection = sectionIndex + `## ${storyId}`.length;
|
|
165
|
+
const insertionPoint = this.findSectionEnd(existingContent, afterSection);
|
|
166
|
+
const before = existingContent.slice(0, insertionPoint);
|
|
167
|
+
const after = existingContent.slice(insertionPoint);
|
|
168
|
+
content = before + entries.join('\n') + '\n' + after;
|
|
169
|
+
// Update total count in header
|
|
170
|
+
content = this.updateTotalCount(content);
|
|
171
|
+
await writeFile(backlogPath, content, 'utf-8');
|
|
172
|
+
this.logger.info({ count: newIssues.length, sessionDir }, 'Appended tech debt to session backlog');
|
|
173
|
+
return;
|
|
174
|
+
}
|
|
175
|
+
else {
|
|
176
|
+
appendContent = entries.join('\n') + '\n';
|
|
177
|
+
}
|
|
178
|
+
content = existingContent + appendContent;
|
|
179
|
+
// Update total count in header
|
|
180
|
+
content = this.updateTotalCount(content);
|
|
181
|
+
}
|
|
182
|
+
await writeFile(backlogPath, content, 'utf-8');
|
|
183
|
+
this.logger.info({ count: newIssues.length, sessionDir }, 'Documented tech debt in session backlog');
|
|
184
|
+
}
|
|
185
|
+
/**
|
|
186
|
+
* Remove duplicate issues from the input array.
|
|
187
|
+
* Key: file + line + issue text.
|
|
188
|
+
*/
|
|
189
|
+
deduplicateIssues(issues) {
|
|
190
|
+
const seen = new Set();
|
|
191
|
+
const result = [];
|
|
192
|
+
for (const issue of issues) {
|
|
193
|
+
const key = deduplicationKey(issue.file, issue.line, issue.issue);
|
|
194
|
+
if (!seen.has(key)) {
|
|
195
|
+
seen.add(key);
|
|
196
|
+
result.push(issue);
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
return result;
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Filter issues to only MEDIUM severity.
|
|
203
|
+
*/
|
|
204
|
+
filterMedium(issues) {
|
|
205
|
+
return issues.filter((issue) => issue.severity === Severity.MEDIUM);
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Find the end of a markdown section (up to the next heading or EOF).
|
|
209
|
+
* Returns the index position where new content can be inserted.
|
|
210
|
+
*/
|
|
211
|
+
findSectionEnd(content, startAfter) {
|
|
212
|
+
const remaining = content.slice(startAfter);
|
|
213
|
+
const lines = remaining.split('\n');
|
|
214
|
+
let offset = startAfter;
|
|
215
|
+
for (let i = 0; i < lines.length; i++) {
|
|
216
|
+
// Skip the first line (rest of the header line)
|
|
217
|
+
if (i === 0) {
|
|
218
|
+
offset += lines[i].length + 1; // +1 for \n
|
|
219
|
+
continue;
|
|
220
|
+
}
|
|
221
|
+
// Stop at next heading
|
|
222
|
+
if (/^#{1,3} /.test(lines[i].trim()) && lines[i].trim().length > 0) {
|
|
223
|
+
return offset;
|
|
224
|
+
}
|
|
225
|
+
offset += lines[i].length + 1;
|
|
226
|
+
}
|
|
227
|
+
return offset;
|
|
228
|
+
}
|
|
229
|
+
/**
|
|
230
|
+
* Update the total issue count in the backlog header.
|
|
231
|
+
*/
|
|
232
|
+
updateTotalCount(content) {
|
|
233
|
+
// Count all entry lines
|
|
234
|
+
const entryCount = (content.match(ENTRY_REGEX) || []).length;
|
|
235
|
+
// Note: using regex globally
|
|
236
|
+
let count = 0;
|
|
237
|
+
const lines = content.split('\n');
|
|
238
|
+
for (const line of lines) {
|
|
239
|
+
if (ENTRY_REGEX.test(line.trim())) {
|
|
240
|
+
count++;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
return content.replace(/^> Total issues: \d+$/m, `> Total issues: ${count}`);
|
|
244
|
+
}
|
|
245
|
+
}
|