@arvorco/relentless 0.3.1 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/relentless.convert.md +25 -0
- package/.claude/skills/analyze/SKILL.md +113 -40
- package/.claude/skills/analyze/templates/analysis-report.md +138 -0
- package/.claude/skills/checklist/SKILL.md +143 -51
- package/.claude/skills/checklist/templates/checklist.md +43 -11
- package/.claude/skills/clarify/SKILL.md +70 -11
- package/.claude/skills/constitution/SKILL.md +61 -3
- package/.claude/skills/constitution/templates/constitution.md +241 -160
- package/.claude/skills/constitution/templates/prompt.md +150 -20
- package/.claude/skills/convert/SKILL.md +248 -0
- package/.claude/skills/implement/SKILL.md +82 -34
- package/.claude/skills/plan/SKILL.md +136 -27
- package/.claude/skills/plan/templates/plan.md +92 -9
- package/.claude/skills/specify/SKILL.md +110 -19
- package/.claude/skills/specify/templates/spec.md +40 -5
- package/.claude/skills/tasks/SKILL.md +75 -1
- package/.claude/skills/tasks/templates/tasks.md +5 -4
- package/CHANGELOG.md +63 -1
- package/MANUAL.md +40 -0
- package/README.md +262 -10
- package/bin/relentless.ts +292 -5
- package/package.json +2 -2
- package/relentless/config.json +46 -2
- package/relentless/constitution.md +2 -2
- package/relentless/prompt.md +97 -18
- package/src/agents/amp.ts +53 -13
- package/src/agents/claude.ts +70 -15
- package/src/agents/codex.ts +73 -14
- package/src/agents/droid.ts +68 -14
- package/src/agents/exec.ts +96 -0
- package/src/agents/gemini.ts +59 -16
- package/src/agents/opencode.ts +188 -9
- package/src/cli/fallback-order.ts +210 -0
- package/src/cli/index.ts +63 -0
- package/src/cli/mode-flag.ts +198 -0
- package/src/cli/review-flags.ts +192 -0
- package/src/config/loader.ts +16 -1
- package/src/config/schema.ts +157 -2
- package/src/execution/runner.ts +144 -21
- package/src/init/scaffolder.ts +285 -25
- package/src/prd/parser.ts +92 -1
- package/src/prd/types.ts +136 -0
- package/src/review/index.ts +92 -0
- package/src/review/prompt.ts +293 -0
- package/src/review/runner.ts +337 -0
- package/src/review/tasks/docs.ts +529 -0
- package/src/review/tasks/index.ts +80 -0
- package/src/review/tasks/lint.ts +436 -0
- package/src/review/tasks/quality.ts +760 -0
- package/src/review/tasks/security.ts +452 -0
- package/src/review/tasks/test.ts +456 -0
- package/src/review/tasks/typecheck.ts +323 -0
- package/src/review/types.ts +139 -0
- package/src/routing/cascade.ts +310 -0
- package/src/routing/classifier.ts +338 -0
- package/src/routing/estimate.ts +270 -0
- package/src/routing/fallback.ts +512 -0
- package/src/routing/index.ts +124 -0
- package/src/routing/registry.ts +501 -0
- package/src/routing/report.ts +570 -0
- package/src/routing/router.ts +287 -0
- package/src/tui/App.tsx +2 -0
- package/src/tui/TUIRunner.tsx +103 -8
- package/src/tui/components/CurrentStory.tsx +23 -1
- package/src/tui/hooks/useTUI.ts +1 -0
- package/src/tui/types.ts +9 -0
|
@@ -0,0 +1,760 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Quality Micro-Task
|
|
3
|
+
*
|
|
4
|
+
* Scans changed files for dead code, duplication, and complexity issues.
|
|
5
|
+
*
|
|
6
|
+
* Features:
|
|
7
|
+
* - Retrieves changed files from git diff
|
|
8
|
+
* - Detects unused exports (dead code)
|
|
9
|
+
* - Detects code duplication (>20 similar tokens)
|
|
10
|
+
* - Detects high function complexity (>10)
|
|
11
|
+
* - Generates fix tasks for high-impact issues
|
|
12
|
+
* - Duplication is advisory only (no fix tasks)
|
|
13
|
+
* - Supports @relentless-ignore-quality comment
|
|
14
|
+
*
|
|
15
|
+
* @module src/review/tasks/quality
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
import type { ReviewTaskResult, FixTask } from "../types";
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Types of quality issues
|
|
22
|
+
*/
|
|
23
|
+
export type QualityIssueType = "dead_code" | "duplication" | "high_complexity";
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* A detected quality issue
|
|
27
|
+
*/
|
|
28
|
+
export interface QualityIssue {
|
|
29
|
+
/** Type of quality issue */
|
|
30
|
+
type: QualityIssueType;
|
|
31
|
+
/** File path where found */
|
|
32
|
+
file: string;
|
|
33
|
+
/** Line number (1-based, optional) */
|
|
34
|
+
line?: number;
|
|
35
|
+
/** Description of the issue */
|
|
36
|
+
message: string;
|
|
37
|
+
/** Symbol name (for dead code) */
|
|
38
|
+
symbol?: string;
|
|
39
|
+
/** Function name (for complexity) */
|
|
40
|
+
functionName?: string;
|
|
41
|
+
/** Complexity score (for high_complexity) */
|
|
42
|
+
score?: number;
|
|
43
|
+
/** Similarity percentage (for duplication) */
|
|
44
|
+
similarity?: number;
|
|
45
|
+
/** Related files (for duplication) */
|
|
46
|
+
files?: string[];
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Extended result type for quality micro-task
|
|
51
|
+
*/
|
|
52
|
+
export interface QualityResult extends ReviewTaskResult {
|
|
53
|
+
/** The command that was executed */
|
|
54
|
+
command: string;
|
|
55
|
+
/** Number of files scanned */
|
|
56
|
+
scannedFiles: number;
|
|
57
|
+
/** Detected quality issues */
|
|
58
|
+
issues?: QualityIssue[];
|
|
59
|
+
/** Number of dead code issues */
|
|
60
|
+
deadCodeCount: number;
|
|
61
|
+
/** Number of duplication issues */
|
|
62
|
+
duplications: number;
|
|
63
|
+
/** Number of complexity issues */
|
|
64
|
+
complexityIssues: number;
|
|
65
|
+
/** Overall quality score (0-100) */
|
|
66
|
+
overallQualityScore: number;
|
|
67
|
+
/** Human-readable summary */
|
|
68
|
+
summary?: string;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Options for running quality scan
|
|
73
|
+
*/
|
|
74
|
+
export interface QualityOptions {
|
|
75
|
+
/** Working directory for the command */
|
|
76
|
+
cwd?: string;
|
|
77
|
+
/** Custom file reader for testing */
|
|
78
|
+
readFile?: (path: string) => Promise<string>;
|
|
79
|
+
/** Custom all files reader for testing (for dead code detection) */
|
|
80
|
+
readAllFiles?: () => Promise<Map<string, string>>;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Code file extensions to scan
|
|
85
|
+
*/
|
|
86
|
+
const CODE_EXTENSIONS = [".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs"];
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Complexity decision points pattern
|
|
90
|
+
* Matches: if, else if, while, for, case, catch, &&, ||, ?:
|
|
91
|
+
*/
|
|
92
|
+
const COMPLEXITY_PATTERNS = [
|
|
93
|
+
/\bif\s*\(/g,
|
|
94
|
+
/\belse\s+if\s*\(/g,
|
|
95
|
+
/\bwhile\s*\(/g,
|
|
96
|
+
/\bfor\s*\(/g,
|
|
97
|
+
/\bcase\s+[^:]+:/g,
|
|
98
|
+
/\bcatch\s*\(/g,
|
|
99
|
+
/\?\s*[^:]+\s*:/g, // ternary
|
|
100
|
+
/&&/g,
|
|
101
|
+
/\|\|/g,
|
|
102
|
+
];
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Export patterns
|
|
106
|
+
*/
|
|
107
|
+
const EXPORT_PATTERNS = [
|
|
108
|
+
{ pattern: /export\s+function\s+(\w+)/g, isDefault: false },
|
|
109
|
+
{ pattern: /export\s+const\s+(\w+)/g, isDefault: false },
|
|
110
|
+
{ pattern: /export\s+let\s+(\w+)/g, isDefault: false },
|
|
111
|
+
{ pattern: /export\s+var\s+(\w+)/g, isDefault: false },
|
|
112
|
+
{ pattern: /export\s+class\s+(\w+)/g, isDefault: false },
|
|
113
|
+
{ pattern: /export\s+type\s+(\w+)/g, isDefault: false },
|
|
114
|
+
{ pattern: /export\s+interface\s+(\w+)/g, isDefault: false },
|
|
115
|
+
{ pattern: /export\s+enum\s+(\w+)/g, isDefault: false },
|
|
116
|
+
{ pattern: /export\s+default\s+/g, isDefault: true },
|
|
117
|
+
];
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Ignore comment pattern
|
|
121
|
+
*/
|
|
122
|
+
const IGNORE_PATTERN = /@relentless-ignore-quality/;
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Check if a file should be scanned
|
|
126
|
+
*/
|
|
127
|
+
function shouldScanFile(path: string): boolean {
|
|
128
|
+
return CODE_EXTENSIONS.some((ext) => path.endsWith(ext));
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Check if file has ignore comment
|
|
133
|
+
*/
|
|
134
|
+
function hasIgnoreComment(content: string): boolean {
|
|
135
|
+
return IGNORE_PATTERN.test(content);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* Calculate cyclomatic complexity for a function body
|
|
140
|
+
*/
|
|
141
|
+
function calculateComplexity(code: string): number {
|
|
142
|
+
let complexity = 1; // Base complexity
|
|
143
|
+
|
|
144
|
+
for (const pattern of COMPLEXITY_PATTERNS) {
|
|
145
|
+
const matches = code.match(pattern);
|
|
146
|
+
if (matches) {
|
|
147
|
+
complexity += matches.length;
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
return complexity;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Extract function bodies from code
|
|
156
|
+
*/
|
|
157
|
+
function extractFunctions(
|
|
158
|
+
code: string
|
|
159
|
+
): Array<{ name: string; body: string; line: number }> {
|
|
160
|
+
const functions: Array<{ name: string; body: string; line: number }> = [];
|
|
161
|
+
const lines = code.split("\n");
|
|
162
|
+
|
|
163
|
+
// Simple function extraction - find function declarations and extract bodies
|
|
164
|
+
let currentFunction: { name: string; startLine: number; braceCount: number } | null =
|
|
165
|
+
null;
|
|
166
|
+
let functionBody = "";
|
|
167
|
+
|
|
168
|
+
for (let i = 0; i < lines.length; i++) {
|
|
169
|
+
const line = lines[i];
|
|
170
|
+
|
|
171
|
+
if (!currentFunction) {
|
|
172
|
+
// Look for function start
|
|
173
|
+
// Regular function
|
|
174
|
+
const funcMatch = line.match(/function\s+(\w+)\s*\([^)]*\)\s*\{?/);
|
|
175
|
+
if (funcMatch) {
|
|
176
|
+
currentFunction = {
|
|
177
|
+
name: funcMatch[1],
|
|
178
|
+
startLine: i + 1,
|
|
179
|
+
braceCount: (line.match(/\{/g) || []).length - (line.match(/\}/g) || []).length,
|
|
180
|
+
};
|
|
181
|
+
functionBody = line;
|
|
182
|
+
if (currentFunction.braceCount === 0 && line.includes("{")) {
|
|
183
|
+
// Single line function or starting
|
|
184
|
+
currentFunction.braceCount = 1;
|
|
185
|
+
}
|
|
186
|
+
continue;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// Arrow function
|
|
190
|
+
const arrowMatch = line.match(
|
|
191
|
+
/(?:const|let|var)\s+(\w+)\s*=\s*(?:\([^)]*\)|[^=])\s*=>/
|
|
192
|
+
);
|
|
193
|
+
if (arrowMatch) {
|
|
194
|
+
currentFunction = {
|
|
195
|
+
name: arrowMatch[1],
|
|
196
|
+
startLine: i + 1,
|
|
197
|
+
braceCount: (line.match(/\{/g) || []).length - (line.match(/\}/g) || []).length,
|
|
198
|
+
};
|
|
199
|
+
functionBody = line;
|
|
200
|
+
continue;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Class method
|
|
204
|
+
const methodMatch = line.match(/^\s*(\w+)\s*\([^)]*\)\s*\{/);
|
|
205
|
+
if (methodMatch && !line.includes("function") && !line.includes("=>")) {
|
|
206
|
+
currentFunction = {
|
|
207
|
+
name: methodMatch[1],
|
|
208
|
+
startLine: i + 1,
|
|
209
|
+
braceCount: (line.match(/\{/g) || []).length - (line.match(/\}/g) || []).length,
|
|
210
|
+
};
|
|
211
|
+
functionBody = line;
|
|
212
|
+
continue;
|
|
213
|
+
}
|
|
214
|
+
} else {
|
|
215
|
+
// Continue collecting function body
|
|
216
|
+
functionBody += "\n" + line;
|
|
217
|
+
currentFunction.braceCount +=
|
|
218
|
+
(line.match(/\{/g) || []).length - (line.match(/\}/g) || []).length;
|
|
219
|
+
|
|
220
|
+
if (currentFunction.braceCount <= 0) {
|
|
221
|
+
// Function ended
|
|
222
|
+
functions.push({
|
|
223
|
+
name: currentFunction.name,
|
|
224
|
+
body: functionBody,
|
|
225
|
+
line: currentFunction.startLine,
|
|
226
|
+
});
|
|
227
|
+
currentFunction = null;
|
|
228
|
+
functionBody = "";
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
return functions;
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
/**
|
|
237
|
+
* Analyze code complexity
|
|
238
|
+
*
|
|
239
|
+
* @param content - File content to analyze
|
|
240
|
+
* @param filePath - Path to the file (for context)
|
|
241
|
+
* @returns Array of high complexity issues (complexity > 10)
|
|
242
|
+
*/
|
|
243
|
+
export function analyzeComplexity(
|
|
244
|
+
content: string,
|
|
245
|
+
filePath: string
|
|
246
|
+
): QualityIssue[] {
|
|
247
|
+
const issues: QualityIssue[] = [];
|
|
248
|
+
const functions = extractFunctions(content);
|
|
249
|
+
|
|
250
|
+
for (const func of functions) {
|
|
251
|
+
const complexity = calculateComplexity(func.body);
|
|
252
|
+
|
|
253
|
+
if (complexity > 10) {
|
|
254
|
+
issues.push({
|
|
255
|
+
type: "high_complexity",
|
|
256
|
+
file: filePath,
|
|
257
|
+
line: func.line,
|
|
258
|
+
message: `Function ${func.name} has cyclomatic complexity of ${complexity}`,
|
|
259
|
+
functionName: func.name,
|
|
260
|
+
score: complexity,
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
return issues;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
/**
|
|
269
|
+
* Extract exports from a file
|
|
270
|
+
*/
|
|
271
|
+
function extractExports(content: string): string[] {
|
|
272
|
+
const exports: string[] = [];
|
|
273
|
+
|
|
274
|
+
for (const { pattern, isDefault } of EXPORT_PATTERNS) {
|
|
275
|
+
pattern.lastIndex = 0;
|
|
276
|
+
let match;
|
|
277
|
+
while ((match = pattern.exec(content)) !== null) {
|
|
278
|
+
const symbol = isDefault ? "default" : match[1];
|
|
279
|
+
if (symbol && !exports.includes(symbol)) {
|
|
280
|
+
exports.push(symbol);
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
return exports;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Check if a symbol is used in any file
|
|
290
|
+
*/
|
|
291
|
+
function isSymbolUsed(
|
|
292
|
+
symbol: string,
|
|
293
|
+
sourceFile: string,
|
|
294
|
+
files: Map<string, string>
|
|
295
|
+
): boolean {
|
|
296
|
+
for (const [filePath, content] of files) {
|
|
297
|
+
if (filePath === sourceFile) continue;
|
|
298
|
+
|
|
299
|
+
// Check for import
|
|
300
|
+
const importPattern = new RegExp(
|
|
301
|
+
`import\\s*\\{[^}]*\\b${symbol}\\b[^}]*\\}\\s*from`,
|
|
302
|
+
"g"
|
|
303
|
+
);
|
|
304
|
+
if (importPattern.test(content)) {
|
|
305
|
+
return true;
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// Check for re-export
|
|
309
|
+
const reexportPattern = new RegExp(
|
|
310
|
+
`export\\s*\\{[^}]*\\b${symbol}\\b[^}]*\\}\\s*from`,
|
|
311
|
+
"g"
|
|
312
|
+
);
|
|
313
|
+
if (reexportPattern.test(content)) {
|
|
314
|
+
return true;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Check for default import
|
|
318
|
+
if (symbol === "default") {
|
|
319
|
+
const defaultImportPattern = new RegExp(
|
|
320
|
+
`import\\s+\\w+\\s+from\\s+['"]\\..*${sourceFile.replace(/\.[^.]+$/, "")}`,
|
|
321
|
+
"g"
|
|
322
|
+
);
|
|
323
|
+
if (defaultImportPattern.test(content)) {
|
|
324
|
+
return true;
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
return false;
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
/**
|
|
333
|
+
* Detect unused exports
|
|
334
|
+
*
|
|
335
|
+
* @param files - Map of file paths to their content
|
|
336
|
+
* @param changedFiles - Array of changed file paths to check
|
|
337
|
+
* @returns Array of dead code issues
|
|
338
|
+
*/
|
|
339
|
+
export function detectUnusedExports(
|
|
340
|
+
files: Map<string, string>,
|
|
341
|
+
changedFiles: string[]
|
|
342
|
+
): QualityIssue[] {
|
|
343
|
+
const issues: QualityIssue[] = [];
|
|
344
|
+
|
|
345
|
+
for (const filePath of changedFiles) {
|
|
346
|
+
const content = files.get(filePath);
|
|
347
|
+
if (!content) continue;
|
|
348
|
+
|
|
349
|
+
const exports = extractExports(content);
|
|
350
|
+
|
|
351
|
+
for (const symbol of exports) {
|
|
352
|
+
if (!isSymbolUsed(symbol, filePath, files)) {
|
|
353
|
+
issues.push({
|
|
354
|
+
type: "dead_code",
|
|
355
|
+
file: filePath,
|
|
356
|
+
message: `Export '${symbol}' is not used anywhere in the codebase`,
|
|
357
|
+
symbol,
|
|
358
|
+
});
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
return issues;
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
/**
|
|
367
|
+
* Simple tokenizer for duplication detection
|
|
368
|
+
*/
|
|
369
|
+
function tokenize(content: string): string[] {
|
|
370
|
+
// Remove comments
|
|
371
|
+
const noComments = content
|
|
372
|
+
.replace(/\/\*[\s\S]*?\*\//g, "")
|
|
373
|
+
.replace(/\/\/.*/g, "");
|
|
374
|
+
|
|
375
|
+
// Split into tokens (words, operators, etc.)
|
|
376
|
+
const tokens = noComments.match(/\w+|[^\s\w]/g) || [];
|
|
377
|
+
return tokens;
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
/**
|
|
381
|
+
* Calculate token similarity between two arrays
|
|
382
|
+
*/
|
|
383
|
+
function calculateSimilarity(tokens1: string[], tokens2: string[]): number {
|
|
384
|
+
if (tokens1.length === 0 || tokens2.length === 0) return 0;
|
|
385
|
+
|
|
386
|
+
const set1 = new Set(tokens1);
|
|
387
|
+
const set2 = new Set(tokens2);
|
|
388
|
+
|
|
389
|
+
let intersection = 0;
|
|
390
|
+
for (const token of set1) {
|
|
391
|
+
if (set2.has(token)) {
|
|
392
|
+
intersection++;
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
const union = new Set([...tokens1, ...tokens2]).size;
|
|
397
|
+
return (intersection / union) * 100;
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Detect code duplication
|
|
402
|
+
*
|
|
403
|
+
* @param files - Map of file paths to their content
|
|
404
|
+
* @param changedFiles - Array of changed file paths to check
|
|
405
|
+
* @returns Array of duplication issues
|
|
406
|
+
*/
|
|
407
|
+
export function detectDuplication(
|
|
408
|
+
files: Map<string, string>,
|
|
409
|
+
changedFiles: string[]
|
|
410
|
+
): QualityIssue[] {
|
|
411
|
+
const issues: QualityIssue[] = [];
|
|
412
|
+
const tokenizedFiles: Array<{ path: string; tokens: string[] }> = [];
|
|
413
|
+
|
|
414
|
+
// Tokenize all changed files
|
|
415
|
+
for (const filePath of changedFiles) {
|
|
416
|
+
const content = files.get(filePath);
|
|
417
|
+
if (!content) continue;
|
|
418
|
+
|
|
419
|
+
const tokens = tokenize(content);
|
|
420
|
+
if (tokens.length >= 20) {
|
|
421
|
+
tokenizedFiles.push({ path: filePath, tokens });
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
// Compare each pair of files
|
|
426
|
+
for (let i = 0; i < tokenizedFiles.length; i++) {
|
|
427
|
+
for (let j = i + 1; j < tokenizedFiles.length; j++) {
|
|
428
|
+
const file1 = tokenizedFiles[i];
|
|
429
|
+
const file2 = tokenizedFiles[j];
|
|
430
|
+
|
|
431
|
+
// Compare tokens
|
|
432
|
+
const similarity = calculateSimilarity(file1.tokens, file2.tokens);
|
|
433
|
+
|
|
434
|
+
// If > 70% similarity and both have > 20 tokens, flag as duplication
|
|
435
|
+
if (
|
|
436
|
+
similarity > 70 &&
|
|
437
|
+
file1.tokens.length > 20 &&
|
|
438
|
+
file2.tokens.length > 20
|
|
439
|
+
) {
|
|
440
|
+
issues.push({
|
|
441
|
+
type: "duplication",
|
|
442
|
+
file: file1.path,
|
|
443
|
+
message: `High similarity (${similarity.toFixed(1)}%) detected between files`,
|
|
444
|
+
similarity,
|
|
445
|
+
files: [file1.path, file2.path],
|
|
446
|
+
});
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
return issues;
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
/**
|
|
455
|
+
* Generate human-readable summary
|
|
456
|
+
*/
|
|
457
|
+
function generateSummary(
|
|
458
|
+
issues: QualityIssue[],
|
|
459
|
+
scannedFiles: number
|
|
460
|
+
): string {
|
|
461
|
+
if (issues.length === 0) {
|
|
462
|
+
return `${scannedFiles} file${scannedFiles !== 1 ? "s" : ""} scanned, no quality issues found`;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
const byType: Record<QualityIssueType, number> = {
|
|
466
|
+
dead_code: 0,
|
|
467
|
+
duplication: 0,
|
|
468
|
+
high_complexity: 0,
|
|
469
|
+
};
|
|
470
|
+
|
|
471
|
+
for (const issue of issues) {
|
|
472
|
+
byType[issue.type]++;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
const parts: string[] = [];
|
|
476
|
+
parts.push(`${scannedFiles} file${scannedFiles !== 1 ? "s" : ""} scanned`);
|
|
477
|
+
|
|
478
|
+
const issueParts: string[] = [];
|
|
479
|
+
if (byType.dead_code > 0) issueParts.push(`${byType.dead_code} dead code`);
|
|
480
|
+
if (byType.duplication > 0)
|
|
481
|
+
issueParts.push(`${byType.duplication} duplication`);
|
|
482
|
+
if (byType.high_complexity > 0)
|
|
483
|
+
issueParts.push(`${byType.high_complexity} complexity`);
|
|
484
|
+
|
|
485
|
+
if (issueParts.length > 0) {
|
|
486
|
+
parts.push(issueParts.join(", "));
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
return parts.join(", ");
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
/**
|
|
493
|
+
* Calculate overall quality score (0-100)
|
|
494
|
+
*/
|
|
495
|
+
function calculateQualityScore(
|
|
496
|
+
issues: QualityIssue[],
|
|
497
|
+
scannedFiles: number
|
|
498
|
+
): number {
|
|
499
|
+
if (scannedFiles === 0) return 100;
|
|
500
|
+
|
|
501
|
+
let score = 100;
|
|
502
|
+
|
|
503
|
+
for (const issue of issues) {
|
|
504
|
+
switch (issue.type) {
|
|
505
|
+
case "dead_code":
|
|
506
|
+
score -= 5;
|
|
507
|
+
break;
|
|
508
|
+
case "duplication":
|
|
509
|
+
score -= 3;
|
|
510
|
+
break;
|
|
511
|
+
case "high_complexity":
|
|
512
|
+
score -= issue.score && issue.score > 20 ? 10 : 5;
|
|
513
|
+
break;
|
|
514
|
+
}
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
return Math.max(0, Math.min(100, score));
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
/**
|
|
521
|
+
* Create a fix task from a quality issue
|
|
522
|
+
*/
|
|
523
|
+
function createFixTask(issue: QualityIssue): FixTask {
|
|
524
|
+
let description: string;
|
|
525
|
+
|
|
526
|
+
switch (issue.type) {
|
|
527
|
+
case "dead_code":
|
|
528
|
+
description = `Remove unused export: ${issue.symbol}`;
|
|
529
|
+
break;
|
|
530
|
+
case "high_complexity":
|
|
531
|
+
description = `Refactor function ${issue.functionName}: complexity ${issue.score}`;
|
|
532
|
+
break;
|
|
533
|
+
default:
|
|
534
|
+
description = issue.message;
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
return {
|
|
538
|
+
type: "quality_fix",
|
|
539
|
+
file: issue.file,
|
|
540
|
+
line: issue.line,
|
|
541
|
+
description,
|
|
542
|
+
priority: "medium",
|
|
543
|
+
};
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
/**
|
|
547
|
+
* Run the quality micro-task
|
|
548
|
+
*
|
|
549
|
+
* Retrieves changed files from git diff, analyzes them for quality issues,
|
|
550
|
+
* and generates fix tasks for high-impact problems.
|
|
551
|
+
*
|
|
552
|
+
* @param options - Options including working directory and custom file reader
|
|
553
|
+
* @returns QualityResult with success status, issues, and fix tasks
|
|
554
|
+
*
|
|
555
|
+
* @example
|
|
556
|
+
* ```typescript
|
|
557
|
+
* const result = await runQuality({ cwd: "/path/to/project" });
|
|
558
|
+
* if (!result.success) {
|
|
559
|
+
* console.log(`${result.issues?.length} quality issues found`);
|
|
560
|
+
* result.fixTasks.forEach(task => console.log(task.description));
|
|
561
|
+
* }
|
|
562
|
+
* ```
|
|
563
|
+
*/
|
|
564
|
+
export async function runQuality(
|
|
565
|
+
options: QualityOptions = {}
|
|
566
|
+
): Promise<QualityResult> {
|
|
567
|
+
const cwd = options.cwd || process.cwd();
|
|
568
|
+
const command = "git diff --name-only HEAD~1";
|
|
569
|
+
const startTime = Date.now();
|
|
570
|
+
|
|
571
|
+
try {
|
|
572
|
+
// Get list of changed files
|
|
573
|
+
const proc = Bun.spawn(["git", "diff", "--name-only", "HEAD~1"], {
|
|
574
|
+
cwd,
|
|
575
|
+
stdout: "pipe",
|
|
576
|
+
stderr: "pipe",
|
|
577
|
+
});
|
|
578
|
+
|
|
579
|
+
await proc.exited;
|
|
580
|
+
const stdout = await proc.stdout.text();
|
|
581
|
+
// stderr is captured but not used since git diff errors are rare
|
|
582
|
+
await proc.stderr.text();
|
|
583
|
+
|
|
584
|
+
// Parse changed files
|
|
585
|
+
const changedFiles = stdout
|
|
586
|
+
.split("\n")
|
|
587
|
+
.map((f) => f.trim())
|
|
588
|
+
.filter((f) => f.length > 0 && shouldScanFile(f));
|
|
589
|
+
|
|
590
|
+
const duration = Date.now() - startTime;
|
|
591
|
+
|
|
592
|
+
// If no code files changed, return success
|
|
593
|
+
if (changedFiles.length === 0) {
|
|
594
|
+
return {
|
|
595
|
+
taskType: "quality",
|
|
596
|
+
success: true,
|
|
597
|
+
errorCount: 0,
|
|
598
|
+
warningCount: 0,
|
|
599
|
+
fixTasks: [],
|
|
600
|
+
duration,
|
|
601
|
+
command,
|
|
602
|
+
scannedFiles: 0,
|
|
603
|
+
issues: [],
|
|
604
|
+
deadCodeCount: 0,
|
|
605
|
+
duplications: 0,
|
|
606
|
+
complexityIssues: 0,
|
|
607
|
+
overallQualityScore: 100,
|
|
608
|
+
summary: "0 files scanned, no code files in diff",
|
|
609
|
+
};
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
// Read all files for analysis
|
|
613
|
+
let allFiles: Map<string, string>;
|
|
614
|
+
if (options.readAllFiles) {
|
|
615
|
+
allFiles = await options.readAllFiles();
|
|
616
|
+
} else {
|
|
617
|
+
allFiles = new Map();
|
|
618
|
+
for (const filePath of changedFiles) {
|
|
619
|
+
try {
|
|
620
|
+
let content: string;
|
|
621
|
+
if (options.readFile) {
|
|
622
|
+
content = await options.readFile(filePath);
|
|
623
|
+
} else {
|
|
624
|
+
const file = Bun.file(`${cwd}/${filePath}`);
|
|
625
|
+
content = await file.text();
|
|
626
|
+
}
|
|
627
|
+
allFiles.set(filePath, content);
|
|
628
|
+
} catch {
|
|
629
|
+
// Skip files that can't be read
|
|
630
|
+
continue;
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
const allIssues: QualityIssue[] = [];
|
|
636
|
+
const filesToAnalyze: string[] = [];
|
|
637
|
+
|
|
638
|
+
// Analyze each changed file
|
|
639
|
+
for (const filePath of changedFiles) {
|
|
640
|
+
try {
|
|
641
|
+
let content: string;
|
|
642
|
+
if (allFiles.has(filePath)) {
|
|
643
|
+
content = allFiles.get(filePath)!;
|
|
644
|
+
} else if (options.readFile) {
|
|
645
|
+
content = await options.readFile(filePath);
|
|
646
|
+
allFiles.set(filePath, content);
|
|
647
|
+
} else {
|
|
648
|
+
const file = Bun.file(`${cwd}/${filePath}`);
|
|
649
|
+
content = await file.text();
|
|
650
|
+
allFiles.set(filePath, content);
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
// Skip files with ignore comment
|
|
654
|
+
if (hasIgnoreComment(content)) {
|
|
655
|
+
continue;
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
// Track files that should be analyzed
|
|
659
|
+
filesToAnalyze.push(filePath);
|
|
660
|
+
|
|
661
|
+
// Analyze complexity
|
|
662
|
+
const complexityIssues = analyzeComplexity(content, filePath);
|
|
663
|
+
allIssues.push(...complexityIssues);
|
|
664
|
+
} catch {
|
|
665
|
+
// File unparseable, skip silently
|
|
666
|
+
continue;
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
// Detect unused exports (only for non-ignored files)
|
|
671
|
+
const deadCodeIssues = detectUnusedExports(allFiles, filesToAnalyze);
|
|
672
|
+
allIssues.push(...deadCodeIssues);
|
|
673
|
+
|
|
674
|
+
// Detect duplication (only for non-ignored files)
|
|
675
|
+
const duplicationIssues = detectDuplication(allFiles, filesToAnalyze);
|
|
676
|
+
allIssues.push(...duplicationIssues);
|
|
677
|
+
|
|
678
|
+
// Count issues by type
|
|
679
|
+
const deadCodeCount = allIssues.filter((i) => i.type === "dead_code").length;
|
|
680
|
+
const duplications = allIssues.filter((i) => i.type === "duplication").length;
|
|
681
|
+
const complexityIssues = allIssues.filter(
|
|
682
|
+
(i) => i.type === "high_complexity"
|
|
683
|
+
).length;
|
|
684
|
+
|
|
685
|
+
// Generate fix tasks for high-impact issues
|
|
686
|
+
const fixableIssues = allIssues.filter((issue) => {
|
|
687
|
+
// Dead code always gets fix tasks
|
|
688
|
+
if (issue.type === "dead_code") return true;
|
|
689
|
+
// Complexity > 20 gets fix tasks
|
|
690
|
+
if (issue.type === "high_complexity" && issue.score && issue.score > 20)
|
|
691
|
+
return true;
|
|
692
|
+
// Duplication is advisory only
|
|
693
|
+
return false;
|
|
694
|
+
});
|
|
695
|
+
|
|
696
|
+
// Limit to top 10 most impactful
|
|
697
|
+
const sortedIssues = fixableIssues.sort((a, b) => {
|
|
698
|
+
// Higher complexity first
|
|
699
|
+
if (a.type === "high_complexity" && b.type === "high_complexity") {
|
|
700
|
+
return (b.score || 0) - (a.score || 0);
|
|
701
|
+
}
|
|
702
|
+
// Complexity before dead code
|
|
703
|
+
if (a.type === "high_complexity") return -1;
|
|
704
|
+
if (b.type === "high_complexity") return 1;
|
|
705
|
+
return 0;
|
|
706
|
+
});
|
|
707
|
+
|
|
708
|
+
const topIssues = sortedIssues.slice(0, 10);
|
|
709
|
+
const fixTasks = topIssues.map(createFixTask);
|
|
710
|
+
|
|
711
|
+
// Calculate quality score
|
|
712
|
+
const overallQualityScore = calculateQualityScore(
|
|
713
|
+
allIssues,
|
|
714
|
+
changedFiles.length
|
|
715
|
+
);
|
|
716
|
+
|
|
717
|
+
// Determine success - fail if there are high complexity issues > 20
|
|
718
|
+
const criticalComplexityIssues = allIssues.filter(
|
|
719
|
+
(i) => i.type === "high_complexity" && i.score && i.score > 20
|
|
720
|
+
);
|
|
721
|
+
const success = criticalComplexityIssues.length === 0;
|
|
722
|
+
|
|
723
|
+
return {
|
|
724
|
+
taskType: "quality",
|
|
725
|
+
success,
|
|
726
|
+
errorCount: criticalComplexityIssues.length,
|
|
727
|
+
warningCount: allIssues.length - criticalComplexityIssues.length,
|
|
728
|
+
fixTasks,
|
|
729
|
+
duration: Date.now() - startTime,
|
|
730
|
+
command,
|
|
731
|
+
scannedFiles: changedFiles.length,
|
|
732
|
+
issues: allIssues,
|
|
733
|
+
deadCodeCount,
|
|
734
|
+
duplications,
|
|
735
|
+
complexityIssues,
|
|
736
|
+
overallQualityScore,
|
|
737
|
+
summary: generateSummary(allIssues, changedFiles.length),
|
|
738
|
+
};
|
|
739
|
+
} catch (error) {
|
|
740
|
+
const duration = Date.now() - startTime;
|
|
741
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
742
|
+
|
|
743
|
+
return {
|
|
744
|
+
taskType: "quality",
|
|
745
|
+
success: false,
|
|
746
|
+
errorCount: 1,
|
|
747
|
+
warningCount: 0,
|
|
748
|
+
fixTasks: [],
|
|
749
|
+
duration,
|
|
750
|
+
command,
|
|
751
|
+
scannedFiles: 0,
|
|
752
|
+
issues: [],
|
|
753
|
+
deadCodeCount: 0,
|
|
754
|
+
duplications: 0,
|
|
755
|
+
complexityIssues: 0,
|
|
756
|
+
overallQualityScore: 0,
|
|
757
|
+
error: `Quality scan failed: ${errorMessage}`,
|
|
758
|
+
};
|
|
759
|
+
}
|
|
760
|
+
}
|