@wbern/obscene 2.1.1 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +41 -0
- package/dist/cli.js +32 -1207
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -202,6 +202,47 @@ When the analyzed file set has no measurable cyclomatic complexity (every scanne
|
|
|
202
202
|
|
|
203
203
|
`fileCount` counts files *after* exclusion (`.obsignore` and `--exclude` patterns are already applied). Treat HOT/WARM/COOL as relative groupings rather than risk labels when `totalComplexity` is 0.
|
|
204
204
|
|
|
205
|
+
### Confidence
|
|
206
|
+
|
|
207
|
+
Each ranking and the coupling table carry an epistemic confidence stamp so the tool never oversells a thin sample:
|
|
208
|
+
|
|
209
|
+
| Level | Meaning |
|
|
210
|
+
|-------|---------|
|
|
211
|
+
| `INCONCLUSIVE` | Sample is below the weak floor — the ranking is suppressed (routed to `skipped` in JSON). |
|
|
212
|
+
| `WEAK` | Above the floor but too few samples for stable rank ordering. Treat as suggestive, not actionable. |
|
|
213
|
+
| `PLAUSIBLE` | Sample supports the ranking. Findings are worth reviewing. |
|
|
214
|
+
| `ACCEPTABLE` | Ceiling. Sample is large enough that the ranking is stable. **Never** asserts the code itself is good or bad. |
|
|
215
|
+
|
|
216
|
+
The thresholds are engineering judgment, not paper-prescribed. The defect/coupling floor of 5 commits matches code-maat's `--min-revs` default ([Adam Tornhill](https://github.com/adamtornhill/code-maat)); CodeScene's documented temporal-coupling default filters files with fewer than 10 commits. Upper tiers (plausible, acceptable) are scaled from there.
|
|
217
|
+
|
|
218
|
+
| Dimension | Sample metric | Weak / Plausible / Acceptable | Note |
|
|
219
|
+
|-----------|---------------|-------------------------------|------|
|
|
220
|
+
| Complexity | files with measurable complexity | 3 / 10 / 30 | Any rank ordering needs ≥ 3 items to be meaningful |
|
|
221
|
+
| Nesting | files with depth ≥ 3 | 3 / 10 / 30 | Depth-3 cut matches Campbell's compounding-nesting-penalty intuition (SonarSource 2018) |
|
|
222
|
+
| Defects | total `fix:` commits in window | 5 / 15 / 50 | Floor matches code-maat `--min-revs 5` |
|
|
223
|
+
| Authors | distinct authors on the most-touched file | 2 / 4 / 8 | Bird et al. (FSE 2011) shows minor contributors correlate with defects, but the floor is engineering judgment |
|
|
224
|
+
| Coupling | commits in window | 5 / 30 / 100 | Floor matches code-maat `--min-revs 5` |
|
|
225
|
+
| Composite (RRF) | number of input rankings | min-of-inputs over per-dimension confidences | Reciprocal Rank Fusion (Cormack et al., SIGIR 2009); `min` ensures the composite can never claim more confidence than its weakest input |
|
|
226
|
+
|
|
227
|
+
I want to be transparent: an earlier release of this section over-attributed thresholds to specific papers. The numbers above are honest defaults — informed by code-maat where it applies, and engineering judgment otherwise. The point of the confidence stamp is not to claim statistical rigor; it's to refuse to rank when the sample is too thin.
|
|
228
|
+
|
|
229
|
+
Every confidence stamp in JSON exposes its inputs so the rating is auditable:
|
|
230
|
+
|
|
231
|
+
```json
|
|
232
|
+
"confidence": {
|
|
233
|
+
"level": "plausible",
|
|
234
|
+
"reason": "42 fix: commits across 12 files (PLAUSIBLE sample size).",
|
|
235
|
+
"inputs": {
|
|
236
|
+
"metric": "fixCommits",
|
|
237
|
+
"value": 42,
|
|
238
|
+
"thresholds": { "weak": 5, "plausible": 15, "acceptable": 50 }
|
|
239
|
+
},
|
|
240
|
+
"source": "code-maat's --min-revs default of 5 (Adam Tornhill); higher tiers are engineering judgment. Gall et al. (IWPSE 2003) and Hassan (ICSE 2009) study co-change and change-entropy but do not prescribe a specific commit-count floor."
|
|
241
|
+
}
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
`ACCEPTABLE` is the deliberate ceiling — even with thousands of commits, the rankings remain candidates for review, not verdicts on code quality.
|
|
245
|
+
|
|
205
246
|
## Example output
|
|
206
247
|
|
|
207
248
|
```
|
package/dist/cli.js
CHANGED
|
@@ -1,1208 +1,33 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
var
|
|
11
|
-
function
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
{ pattern: "test-helpers.*", comment: "Test helper files" },
|
|
35
|
-
{ pattern: "__tests__/**", comment: "Test directories" },
|
|
36
|
-
{ pattern: "__mocks__/**", comment: "Mock directories" },
|
|
37
|
-
{ pattern: "*.stories.*", comment: "Storybook stories" },
|
|
38
|
-
{ pattern: "*.d.ts", comment: "TypeScript declaration files" }
|
|
39
|
-
]
|
|
40
|
-
},
|
|
41
|
-
{
|
|
42
|
-
title: "Lock files and package manifests",
|
|
43
|
-
patterns: [
|
|
44
|
-
{ pattern: "package.json", comment: "npm package manifest" },
|
|
45
|
-
{ pattern: "package-lock.json", comment: "npm lock file" },
|
|
46
|
-
{ pattern: "pnpm-lock.yaml", comment: "pnpm lock file" },
|
|
47
|
-
{ pattern: "yarn.lock", comment: "Yarn lock file" },
|
|
48
|
-
{ pattern: "bun.lock", comment: "Bun lock file" }
|
|
49
|
-
]
|
|
50
|
-
}
|
|
51
|
-
];
|
|
52
|
-
var HOT_CUMULATIVE = 0.5;
|
|
53
|
-
var WARM_CUMULATIVE = 0.8;
|
|
54
|
-
var MIN_FIX_COMMITS = 5;
|
|
55
|
-
var MIN_FILES_WITH_FIXES = 3;
|
|
56
|
-
function isExcluded(location, patterns) {
|
|
57
|
-
return patterns.some((p) => p.test(location));
|
|
58
|
-
}
|
|
59
|
-
function globToRegex(pattern) {
|
|
60
|
-
const escaped = pattern.replace(/[.+^${}()|[\]\\]/g, "\\$&").replace(/\*\*/g, "\u27E8GLOBSTAR\u27E9").replace(/\*/g, "[^/]*").replace(/⟨GLOBSTAR⟩/g, ".*").replace(/\?/g, ".");
|
|
61
|
-
return new RegExp(escaped);
|
|
62
|
-
}
|
|
63
|
-
function normalizePath(p) {
|
|
64
|
-
const forwardSlash = p.replaceAll("\\", "/");
|
|
65
|
-
return forwardSlash.startsWith("./") ? forwardSlash.slice(2) : forwardSlash;
|
|
66
|
-
}
|
|
67
|
-
function runScc(excludes = []) {
|
|
68
|
-
const patterns = excludes.map(globToRegex);
|
|
69
|
-
let raw;
|
|
70
|
-
try {
|
|
71
|
-
raw = execSync("scc --by-file --format json --no-cocomo --no-gen", {
|
|
72
|
-
maxBuffer: 50 * 1024 * 1024,
|
|
73
|
-
stdio: ["pipe", "pipe", "pipe"]
|
|
74
|
-
});
|
|
75
|
-
} catch (err) {
|
|
76
|
-
if (err && typeof err === "object" && "code" in err && err.code === "ENOENT") {
|
|
77
|
-
throw new Error(
|
|
78
|
-
"scc not found. Install it: https://github.com/boyter/scc#install"
|
|
79
|
-
);
|
|
80
|
-
}
|
|
81
|
-
throw err;
|
|
82
|
-
}
|
|
83
|
-
const languages = JSON.parse(raw.toString());
|
|
84
|
-
const files = [];
|
|
85
|
-
for (const lang of languages) {
|
|
86
|
-
for (const f of lang.Files) {
|
|
87
|
-
const normalized = normalizePath(f.Location);
|
|
88
|
-
if (isExcluded(normalized, patterns)) continue;
|
|
89
|
-
files.push({
|
|
90
|
-
file: normalized,
|
|
91
|
-
code: f.Code,
|
|
92
|
-
lines: f.Lines,
|
|
93
|
-
complexity: f.Complexity,
|
|
94
|
-
comments: f.Comment,
|
|
95
|
-
complexityDensity: f.Code > 0 ? Math.round(f.Complexity / f.Code * 100) / 100 : 0
|
|
96
|
-
});
|
|
97
|
-
}
|
|
98
|
-
}
|
|
99
|
-
return files.sort((a, b) => b.complexity - a.complexity);
|
|
100
|
-
}
|
|
101
|
-
function gitFileCount(gitArgs, errorMessage) {
|
|
102
|
-
let raw;
|
|
103
|
-
try {
|
|
104
|
-
raw = execSync(gitArgs, {
|
|
105
|
-
maxBuffer: 50 * 1024 * 1024,
|
|
106
|
-
stdio: ["pipe", "pipe", "pipe"]
|
|
107
|
-
});
|
|
108
|
-
} catch {
|
|
109
|
-
throw new Error(errorMessage);
|
|
110
|
-
}
|
|
111
|
-
const counts = /* @__PURE__ */ new Map();
|
|
112
|
-
for (const line of raw.toString().split("\n")) {
|
|
113
|
-
const trimmed = normalizePath(line.trim());
|
|
114
|
-
if (!trimmed) continue;
|
|
115
|
-
counts.set(trimmed, (counts.get(trimmed) ?? 0) + 1);
|
|
116
|
-
}
|
|
117
|
-
return counts;
|
|
118
|
-
}
|
|
119
|
-
function getChurn(months) {
|
|
120
|
-
return gitFileCount(
|
|
121
|
-
`git log --since="${months} months ago" --format="" --name-only`,
|
|
122
|
-
"Not a git repository or git is not installed."
|
|
123
|
-
);
|
|
124
|
-
}
|
|
125
|
-
function getDefects(months) {
|
|
126
|
-
return gitFileCount(
|
|
127
|
-
`git log --since="${months} months ago" --grep="^fix" --format="" --name-only`,
|
|
128
|
-
"Not a git repository or git is not installed."
|
|
129
|
-
);
|
|
130
|
-
}
|
|
131
|
-
function getAuthors(months) {
|
|
132
|
-
let raw;
|
|
133
|
-
try {
|
|
134
|
-
raw = execSync(
|
|
135
|
-
`git log --since="${months} months ago" --format="COMMIT_SEP%n%aN" --name-only`,
|
|
136
|
-
{ maxBuffer: 50 * 1024 * 1024, stdio: ["pipe", "pipe", "pipe"] }
|
|
137
|
-
);
|
|
138
|
-
} catch {
|
|
139
|
-
throw new Error("Not a git repository or git is not installed.");
|
|
140
|
-
}
|
|
141
|
-
const authorSets = /* @__PURE__ */ new Map();
|
|
142
|
-
const blocks = raw.toString().split("COMMIT_SEP\n");
|
|
143
|
-
for (const block of blocks) {
|
|
144
|
-
if (!block.trim()) continue;
|
|
145
|
-
const lines = block.split("\n");
|
|
146
|
-
const author = lines[0].trim();
|
|
147
|
-
if (!author || author.endsWith("[bot]")) continue;
|
|
148
|
-
for (let i = 1; i < lines.length; i++) {
|
|
149
|
-
const file = normalizePath(lines[i].trim());
|
|
150
|
-
if (!file) continue;
|
|
151
|
-
let set = authorSets.get(file);
|
|
152
|
-
if (!set) {
|
|
153
|
-
set = /* @__PURE__ */ new Set();
|
|
154
|
-
authorSets.set(file, set);
|
|
155
|
-
}
|
|
156
|
-
set.add(author);
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
|
-
const counts = /* @__PURE__ */ new Map();
|
|
160
|
-
for (const [file, set] of authorSets) {
|
|
161
|
-
counts.set(file, set.size);
|
|
162
|
-
}
|
|
163
|
-
return counts;
|
|
164
|
-
}
|
|
165
|
-
var MAX_FILES_PER_COMMIT = 20;
|
|
166
|
-
function getCoChanges(months, excludes = []) {
|
|
167
|
-
const patterns = excludes.map(globToRegex);
|
|
168
|
-
let raw;
|
|
169
|
-
try {
|
|
170
|
-
raw = execSync(
|
|
171
|
-
`git log --since="${months} months ago" --format="COMMIT_SEP%n" --name-only`,
|
|
172
|
-
{ maxBuffer: 50 * 1024 * 1024, stdio: ["pipe", "pipe", "pipe"] }
|
|
173
|
-
);
|
|
174
|
-
} catch {
|
|
175
|
-
throw new Error("Not a git repository or git is not installed.");
|
|
176
|
-
}
|
|
177
|
-
const cochanges = /* @__PURE__ */ new Map();
|
|
178
|
-
const commits = raw.toString().split("COMMIT_SEP\n");
|
|
179
|
-
for (const commit of commits) {
|
|
180
|
-
if (!commit.trim()) continue;
|
|
181
|
-
const seen = /* @__PURE__ */ new Set();
|
|
182
|
-
for (const line of commit.split("\n")) {
|
|
183
|
-
const trimmed = normalizePath(line.trim());
|
|
184
|
-
if (!trimmed) continue;
|
|
185
|
-
if (!isExcluded(trimmed, patterns)) {
|
|
186
|
-
seen.add(trimmed);
|
|
187
|
-
}
|
|
188
|
-
}
|
|
189
|
-
const files = [...seen];
|
|
190
|
-
if (files.length < 2 || files.length > MAX_FILES_PER_COMMIT) continue;
|
|
191
|
-
for (let i = 0; i < files.length; i++) {
|
|
192
|
-
for (let j = i + 1; j < files.length; j++) {
|
|
193
|
-
const [a, b] = files[i] < files[j] ? [files[i], files[j]] : [files[j], files[i]];
|
|
194
|
-
const dirA = a.includes("/") ? a.slice(0, a.lastIndexOf("/")) : "";
|
|
195
|
-
const dirB = b.includes("/") ? b.slice(0, b.lastIndexOf("/")) : "";
|
|
196
|
-
if (dirA === dirB) continue;
|
|
197
|
-
const key = `${a}\0${b}`;
|
|
198
|
-
cochanges.set(key, (cochanges.get(key) ?? 0) + 1);
|
|
199
|
-
}
|
|
200
|
-
}
|
|
201
|
-
}
|
|
202
|
-
return cochanges;
|
|
203
|
-
}
|
|
204
|
-
function assignTiers(items, totalScore) {
|
|
205
|
-
let cumulative = 0;
|
|
206
|
-
for (const item of items) {
|
|
207
|
-
item.percentOfTotal = Math.round(item.score / totalScore * 1e3) / 10;
|
|
208
|
-
cumulative += item.score;
|
|
209
|
-
const cumulativeShare = cumulative / totalScore;
|
|
210
|
-
if (cumulativeShare <= HOT_CUMULATIVE) {
|
|
211
|
-
item.tier = "hot";
|
|
212
|
-
} else if (cumulativeShare <= WARM_CUMULATIVE) {
|
|
213
|
-
item.tier = "warm";
|
|
214
|
-
} else {
|
|
215
|
-
item.tier = "cool";
|
|
216
|
-
}
|
|
217
|
-
}
|
|
218
|
-
}
|
|
219
|
-
var RANKING_DEFS = [
|
|
220
|
-
{
|
|
221
|
-
key: "complexity",
|
|
222
|
-
label: "Complexity \xD7 Churn",
|
|
223
|
-
scoreFormula: "complexity \xD7 churn"
|
|
224
|
-
},
|
|
225
|
-
{
|
|
226
|
-
key: "nesting",
|
|
227
|
-
label: "Nesting \xD7 Churn",
|
|
228
|
-
scoreFormula: "maxNesting \xD7 churn"
|
|
229
|
-
},
|
|
230
|
-
{
|
|
231
|
-
key: "defects",
|
|
232
|
-
label: "Fix Activity \xD7 Churn",
|
|
233
|
-
scoreFormula: "fixes \xD7 churn"
|
|
234
|
-
},
|
|
235
|
-
{
|
|
236
|
-
key: "authors",
|
|
237
|
-
label: "Authors \xD7 Churn",
|
|
238
|
-
scoreFormula: "authors \xD7 churn"
|
|
239
|
-
}
|
|
240
|
-
];
|
|
241
|
-
function computeRanking(files, churn, metricExtractor, densityExtractor) {
|
|
242
|
-
const scored = files.map((f) => {
|
|
243
|
-
const fileChurn = churn.get(f.file) ?? 0;
|
|
244
|
-
const metricValue = metricExtractor(f);
|
|
245
|
-
return {
|
|
246
|
-
file: f.file,
|
|
247
|
-
score: metricValue * fileChurn,
|
|
248
|
-
percentOfTotal: 0,
|
|
249
|
-
tier: "cool",
|
|
250
|
-
churn: fileChurn,
|
|
251
|
-
metricValue,
|
|
252
|
-
metricDensity: densityExtractor ? densityExtractor(f) : void 0
|
|
253
|
-
};
|
|
254
|
-
}).filter((e) => e.score > 0).sort((a, b) => b.score - a.score);
|
|
255
|
-
const totalScore = scored.reduce((sum, e) => sum + e.score, 0);
|
|
256
|
-
if (totalScore === 0) return [];
|
|
257
|
-
assignTiers(scored, totalScore);
|
|
258
|
-
return scored;
|
|
259
|
-
}
|
|
260
|
-
function computeAllRankings(files, churn, defects, nestingDepths, authors, top) {
|
|
261
|
-
const extractors = {
|
|
262
|
-
complexity: {
|
|
263
|
-
extract: (f) => f.complexity,
|
|
264
|
-
density: (f) => f.complexityDensity
|
|
265
|
-
},
|
|
266
|
-
nesting: {
|
|
267
|
-
extract: (f) => nestingDepths.get(f.file) ?? 0
|
|
268
|
-
},
|
|
269
|
-
defects: {
|
|
270
|
-
extract: (f) => defects.get(f.file) ?? 0,
|
|
271
|
-
density: (f) => {
|
|
272
|
-
const d = defects.get(f.file) ?? 0;
|
|
273
|
-
return f.code > 0 ? Math.round(d / f.code * 1e4) / 1e4 : 0;
|
|
274
|
-
}
|
|
275
|
-
},
|
|
276
|
-
authors: {
|
|
277
|
-
extract: (f) => authors.get(f.file) ?? 0
|
|
278
|
-
}
|
|
279
|
-
};
|
|
280
|
-
const skipped = {};
|
|
281
|
-
const totalFixCommits = [...defects.values()].reduce((s, v) => s + v, 0);
|
|
282
|
-
const filesWithFixes = defects.size;
|
|
283
|
-
if (totalFixCommits < MIN_FIX_COMMITS || filesWithFixes < MIN_FILES_WITH_FIXES) {
|
|
284
|
-
skipped.defects = {
|
|
285
|
-
reason: `insufficient data (${totalFixCommits} fix: commits across ${filesWithFixes} files, need ${MIN_FIX_COMMITS}+ commits across ${MIN_FILES_WITH_FIXES}+ files)`,
|
|
286
|
-
suggestion: "Adopt conventional commits with fix: prefix. See conventionalcommits.org"
|
|
287
|
-
};
|
|
288
|
-
}
|
|
289
|
-
let maxAuthors = 0;
|
|
290
|
-
for (const count of authors.values()) {
|
|
291
|
-
if (count > maxAuthors) maxAuthors = count;
|
|
292
|
-
}
|
|
293
|
-
if (maxAuthors <= 1) {
|
|
294
|
-
skipped.authors = {
|
|
295
|
-
reason: "all files have the same author count \u2014 no variance to rank"
|
|
296
|
-
};
|
|
297
|
-
}
|
|
298
|
-
const rankings = {};
|
|
299
|
-
for (const def of RANKING_DEFS) {
|
|
300
|
-
if (skipped[def.key]) continue;
|
|
301
|
-
const ext = extractors[def.key];
|
|
302
|
-
const allEntries = computeRanking(files, churn, ext.extract, ext.density);
|
|
303
|
-
if (allEntries.length === 0) continue;
|
|
304
|
-
const limited = top > 0 ? allEntries.slice(0, top) : allEntries;
|
|
305
|
-
const tierCounts = {
|
|
306
|
-
hot: 0,
|
|
307
|
-
warm: 0,
|
|
308
|
-
cool: 0
|
|
309
|
-
};
|
|
310
|
-
for (const e of allEntries) {
|
|
311
|
-
tierCounts[e.tier]++;
|
|
312
|
-
}
|
|
313
|
-
rankings[def.key] = {
|
|
314
|
-
label: def.label,
|
|
315
|
-
scoreFormula: def.scoreFormula,
|
|
316
|
-
totalScore: allEntries.reduce((sum, e) => sum + e.score, 0),
|
|
317
|
-
tierCounts,
|
|
318
|
-
totalEntries: allEntries.length,
|
|
319
|
-
showing: limited.length,
|
|
320
|
-
entries: limited
|
|
321
|
-
};
|
|
322
|
-
}
|
|
323
|
-
return { rankings, skipped };
|
|
324
|
-
}
|
|
325
|
-
function getTrackedFiles() {
|
|
326
|
-
let raw;
|
|
327
|
-
try {
|
|
328
|
-
raw = execSync("git ls-files", {
|
|
329
|
-
maxBuffer: 50 * 1024 * 1024,
|
|
330
|
-
stdio: ["pipe", "pipe", "pipe"]
|
|
331
|
-
});
|
|
332
|
-
} catch {
|
|
333
|
-
throw new Error("Not a git repository or git is not installed.");
|
|
334
|
-
}
|
|
335
|
-
const set = /* @__PURE__ */ new Set();
|
|
336
|
-
for (const line of raw.toString().split("\n")) {
|
|
337
|
-
const trimmed = normalizePath(line.trim());
|
|
338
|
-
if (trimmed) set.add(trimmed);
|
|
339
|
-
}
|
|
340
|
-
return set;
|
|
341
|
-
}
|
|
342
|
-
function computeCoupling(cochanges, churn, complexityMap, minCochanges, trackedFiles) {
|
|
343
|
-
const entries = [];
|
|
344
|
-
for (const [key, count] of cochanges) {
|
|
345
|
-
if (count < minCochanges) continue;
|
|
346
|
-
const [file1, file2] = key.split("\0");
|
|
347
|
-
const churn1 = churn.get(file1) ?? 0;
|
|
348
|
-
const churn2 = churn.get(file2) ?? 0;
|
|
349
|
-
const minChurn = Math.min(churn1, churn2);
|
|
350
|
-
const degree = minChurn > 0 ? Math.round(count / minChurn * 1e3) / 10 : 0;
|
|
351
|
-
const totalComplexity = (complexityMap.get(file1) ?? 0) + (complexityMap.get(file2) ?? 0);
|
|
352
|
-
const entry = {
|
|
353
|
-
file1,
|
|
354
|
-
file2,
|
|
355
|
-
cochanges: count,
|
|
356
|
-
degree,
|
|
357
|
-
totalComplexity,
|
|
358
|
-
couplingScore: count,
|
|
359
|
-
percentOfTotal: 0,
|
|
360
|
-
tier: "cool"
|
|
361
|
-
};
|
|
362
|
-
const maxChurn = Math.max(churn1, churn2);
|
|
363
|
-
if (count > 0 && maxChurn > 0 && count / maxChurn >= 0.9) {
|
|
364
|
-
entry.lockstep = true;
|
|
365
|
-
}
|
|
366
|
-
if (trackedFiles) {
|
|
367
|
-
if (!trackedFiles.has(file1)) entry.file1Deleted = true;
|
|
368
|
-
if (!trackedFiles.has(file2)) entry.file2Deleted = true;
|
|
369
|
-
}
|
|
370
|
-
entries.push(entry);
|
|
371
|
-
}
|
|
372
|
-
entries.sort((a, b) => b.couplingScore - a.couplingScore);
|
|
373
|
-
const totalScore = entries.reduce((sum, e) => sum + e.couplingScore, 0);
|
|
374
|
-
if (totalScore === 0) return [];
|
|
375
|
-
const adapted = entries.map((e) => ({
|
|
376
|
-
...e,
|
|
377
|
-
score: e.couplingScore
|
|
378
|
-
}));
|
|
379
|
-
assignTiers(adapted, totalScore);
|
|
380
|
-
for (let i = 0; i < entries.length; i++) {
|
|
381
|
-
entries[i].percentOfTotal = adapted[i].percentOfTotal;
|
|
382
|
-
entries[i].tier = adapted[i].tier;
|
|
383
|
-
}
|
|
384
|
-
return entries;
|
|
385
|
-
}
|
|
386
|
-
function getNestingDepths(filePaths) {
|
|
387
|
-
const depths = /* @__PURE__ */ new Map();
|
|
388
|
-
for (const filePath of filePaths) {
|
|
389
|
-
let content;
|
|
390
|
-
try {
|
|
391
|
-
content = readFileSync(filePath, "utf-8");
|
|
392
|
-
} catch {
|
|
393
|
-
depths.set(filePath, 0);
|
|
394
|
-
continue;
|
|
395
|
-
}
|
|
396
|
-
const leadings = [];
|
|
397
|
-
const deltaCounts = /* @__PURE__ */ new Map();
|
|
398
|
-
let prevSpaceWidth = 0;
|
|
399
|
-
for (const line of content.split("\n")) {
|
|
400
|
-
if (!line.trim()) continue;
|
|
401
|
-
const match = line.match(/^(\s+)/);
|
|
402
|
-
if (!match) {
|
|
403
|
-
prevSpaceWidth = 0;
|
|
404
|
-
continue;
|
|
405
|
-
}
|
|
406
|
-
const leading = match[1];
|
|
407
|
-
leadings.push(leading);
|
|
408
|
-
if (leading.includes(" ")) {
|
|
409
|
-
continue;
|
|
410
|
-
}
|
|
411
|
-
const width = leading.length;
|
|
412
|
-
const delta = width - prevSpaceWidth;
|
|
413
|
-
if (delta > 0) {
|
|
414
|
-
deltaCounts.set(delta, (deltaCounts.get(delta) ?? 0) + 1);
|
|
415
|
-
}
|
|
416
|
-
prevSpaceWidth = width;
|
|
417
|
-
}
|
|
418
|
-
let indentUnit = 4;
|
|
419
|
-
let bestCount = 0;
|
|
420
|
-
for (const [delta, count] of deltaCounts) {
|
|
421
|
-
if (count > bestCount || count === bestCount && delta < indentUnit) {
|
|
422
|
-
bestCount = count;
|
|
423
|
-
indentUnit = delta;
|
|
424
|
-
}
|
|
425
|
-
}
|
|
426
|
-
let maxDepth = 0;
|
|
427
|
-
for (const leading of leadings) {
|
|
428
|
-
let depth = 0;
|
|
429
|
-
for (const ch of leading) {
|
|
430
|
-
if (ch === " ") {
|
|
431
|
-
depth += 1;
|
|
432
|
-
} else if (ch === " ") {
|
|
433
|
-
depth += 1 / indentUnit;
|
|
434
|
-
}
|
|
435
|
-
}
|
|
436
|
-
depth = Math.floor(depth);
|
|
437
|
-
if (depth > maxDepth) maxDepth = depth;
|
|
438
|
-
}
|
|
439
|
-
depths.set(filePath, maxDepth);
|
|
440
|
-
}
|
|
441
|
-
return depths;
|
|
442
|
-
}
|
|
443
|
-
var INIT_DIR_RULES = [
|
|
444
|
-
{
|
|
445
|
-
dir: ".github",
|
|
446
|
-
pattern: ".github/**",
|
|
447
|
-
comment: "GitHub Actions and workflows"
|
|
448
|
-
},
|
|
449
|
-
{
|
|
450
|
-
dir: ".circleci",
|
|
451
|
-
pattern: ".circleci/**",
|
|
452
|
-
comment: "CircleCI configuration"
|
|
453
|
-
},
|
|
454
|
-
{ dir: ".husky", pattern: ".husky/**", comment: "Git hooks" },
|
|
455
|
-
{ dir: ".vscode", pattern: ".vscode/**", comment: "VS Code settings" },
|
|
456
|
-
{ dir: ".idea", pattern: ".idea/**", comment: "JetBrains settings" },
|
|
457
|
-
{
|
|
458
|
-
dir: "scripts",
|
|
459
|
-
pattern: "scripts/**",
|
|
460
|
-
comment: "Build and utility scripts"
|
|
461
|
-
},
|
|
462
|
-
{ dir: "docs", pattern: "docs/**", comment: "Documentation" },
|
|
463
|
-
{ dir: "docker", pattern: "docker/**", comment: "Docker configuration" },
|
|
464
|
-
{
|
|
465
|
-
dir: "fixtures",
|
|
466
|
-
pattern: "fixtures/**",
|
|
467
|
-
comment: "Test fixtures"
|
|
468
|
-
},
|
|
469
|
-
{
|
|
470
|
-
dir: "vendor",
|
|
471
|
-
pattern: "vendor/**",
|
|
472
|
-
comment: "Vendored dependencies"
|
|
473
|
-
}
|
|
474
|
-
];
|
|
475
|
-
var INIT_FILE_RULES = [
|
|
476
|
-
{
|
|
477
|
-
test: /\.generated\./,
|
|
478
|
-
pattern: "*.generated.*",
|
|
479
|
-
comment: "Generated code"
|
|
480
|
-
},
|
|
481
|
-
{ test: /\.gen\.[^.]+$/, pattern: "*.gen.*", comment: "Generated code" },
|
|
482
|
-
{
|
|
483
|
-
test: /\.config\.\w/,
|
|
484
|
-
pattern: "*.config.*",
|
|
485
|
-
comment: "Configuration files"
|
|
486
|
-
},
|
|
487
|
-
{
|
|
488
|
-
test: /(?:^|\/)\.gitlab-ci/,
|
|
489
|
-
pattern: ".gitlab-ci*",
|
|
490
|
-
comment: "GitLab CI configuration"
|
|
491
|
-
},
|
|
492
|
-
{
|
|
493
|
-
test: /^\.claude\/commands\//,
|
|
494
|
-
pattern: ".claude/commands/**",
|
|
495
|
-
comment: "Claude Code slash commands (often generated from sources)"
|
|
496
|
-
},
|
|
497
|
-
{
|
|
498
|
-
test: /^\.opencode\/commands\//,
|
|
499
|
-
pattern: ".opencode/commands/**",
|
|
500
|
-
comment: "OpenCode slash commands (often generated from sources)"
|
|
501
|
-
},
|
|
502
|
-
{
|
|
503
|
-
test: /^\.cursor\/rules\//,
|
|
504
|
-
pattern: ".cursor/rules/**",
|
|
505
|
-
comment: "Cursor rules (often generated from sources)"
|
|
506
|
-
}
|
|
507
|
-
];
|
|
508
|
-
function detectIgnorePatterns() {
|
|
509
|
-
const trackedFiles = getTrackedFiles();
|
|
510
|
-
const patterns = [];
|
|
511
|
-
const topDirs = /* @__PURE__ */ new Set();
|
|
512
|
-
for (const f of trackedFiles) {
|
|
513
|
-
const slash = f.indexOf("/");
|
|
514
|
-
if (slash > 0) topDirs.add(f.slice(0, slash));
|
|
515
|
-
}
|
|
516
|
-
for (const rule of INIT_DIR_RULES) {
|
|
517
|
-
if (topDirs.has(rule.dir)) {
|
|
518
|
-
patterns.push({ pattern: rule.pattern, comment: rule.comment });
|
|
519
|
-
}
|
|
520
|
-
}
|
|
521
|
-
for (const rule of INIT_FILE_RULES) {
|
|
522
|
-
for (const f of trackedFiles) {
|
|
523
|
-
if (rule.test.test(f)) {
|
|
524
|
-
patterns.push({ pattern: rule.pattern, comment: rule.comment });
|
|
525
|
-
break;
|
|
526
|
-
}
|
|
527
|
-
}
|
|
528
|
-
}
|
|
529
|
-
return patterns;
|
|
530
|
-
}
|
|
531
|
-
function formatIgnoreFile(detectedPatterns, universalGroups = UNIVERSAL_IGNORE_GROUPS) {
|
|
532
|
-
const lines = [
|
|
533
|
-
"# Generated by obscene init",
|
|
534
|
-
"# Edit this file to customize which files are excluded from analysis.",
|
|
535
|
-
"# Patterns use glob syntax (same as .gitignore).",
|
|
536
|
-
"# See: https://github.com/wbern/obscene#ignore-files",
|
|
537
|
-
""
|
|
538
|
-
];
|
|
539
|
-
for (const group of universalGroups) {
|
|
540
|
-
lines.push(`# ${group.title}`);
|
|
541
|
-
for (const p of group.patterns) {
|
|
542
|
-
lines.push(p.pattern);
|
|
543
|
-
}
|
|
544
|
-
lines.push("");
|
|
545
|
-
}
|
|
546
|
-
if (detectedPatterns.length > 0) {
|
|
547
|
-
lines.push("# Project-specific patterns");
|
|
548
|
-
for (const p of detectedPatterns) {
|
|
549
|
-
lines.push(`# ${p.comment}`);
|
|
550
|
-
lines.push(p.pattern);
|
|
551
|
-
}
|
|
552
|
-
lines.push("");
|
|
553
|
-
}
|
|
554
|
-
return lines.join("\n");
|
|
555
|
-
}
|
|
556
|
-
var RRF_K = 10;
|
|
557
|
-
function computeComposite(rankings, churn, top) {
|
|
558
|
-
const totalDimensions = Object.keys(rankings).length;
|
|
559
|
-
const fileScores = /* @__PURE__ */ new Map();
|
|
560
|
-
for (const ranking of Object.values(rankings)) {
|
|
561
|
-
for (let i = 0; i < ranking.entries.length; i++) {
|
|
562
|
-
const file = ranking.entries[i].file;
|
|
563
|
-
const rrf = 1 / (RRF_K + i + 1);
|
|
564
|
-
const existing = fileScores.get(file);
|
|
565
|
-
if (existing) {
|
|
566
|
-
existing.score += rrf;
|
|
567
|
-
existing.dims += 1;
|
|
568
|
-
} else {
|
|
569
|
-
fileScores.set(file, { score: rrf, dims: 1 });
|
|
570
|
-
}
|
|
571
|
-
}
|
|
572
|
-
}
|
|
573
|
-
const entries = [];
|
|
574
|
-
for (const [file, data] of fileScores) {
|
|
575
|
-
entries.push({
|
|
576
|
-
file,
|
|
577
|
-
score: Math.round(data.score * 1e4) / 1e4,
|
|
578
|
-
percentOfTotal: 0,
|
|
579
|
-
tier: "cool",
|
|
580
|
-
churn: churn.get(file) ?? 0,
|
|
581
|
-
dimensionCount: data.dims
|
|
582
|
-
});
|
|
583
|
-
}
|
|
584
|
-
entries.sort((a, b) => b.score - a.score);
|
|
585
|
-
const totalScore = entries.reduce((sum, e) => sum + e.score, 0);
|
|
586
|
-
if (totalScore === 0) {
|
|
587
|
-
return {
|
|
588
|
-
label: "Combined",
|
|
589
|
-
scoreFormula: "reciprocal rank fusion across all dimensions",
|
|
590
|
-
totalScore: 0,
|
|
591
|
-
tierCounts: { hot: 0, warm: 0, cool: 0 },
|
|
592
|
-
totalDimensions,
|
|
593
|
-
totalEntries: 0,
|
|
594
|
-
showing: 0,
|
|
595
|
-
entries: []
|
|
596
|
-
};
|
|
597
|
-
}
|
|
598
|
-
assignTiers(entries, totalScore);
|
|
599
|
-
const limited = top > 0 ? entries.slice(0, top) : entries;
|
|
600
|
-
const tierCounts = { hot: 0, warm: 0, cool: 0 };
|
|
601
|
-
for (const e of entries) {
|
|
602
|
-
tierCounts[e.tier]++;
|
|
603
|
-
}
|
|
604
|
-
return {
|
|
605
|
-
label: "Combined",
|
|
606
|
-
scoreFormula: "reciprocal rank fusion across all dimensions",
|
|
607
|
-
totalScore: Math.round(totalScore * 1e4) / 1e4,
|
|
608
|
-
tierCounts,
|
|
609
|
-
totalDimensions,
|
|
610
|
-
totalEntries: entries.length,
|
|
611
|
-
showing: limited.length,
|
|
612
|
-
entries: limited
|
|
613
|
-
};
|
|
614
|
-
}
|
|
615
|
-
|
|
616
|
-
// src/format.ts
|
|
617
|
-
import pc2 from "picocolors";
|
|
618
|
-
|
|
619
|
-
// src/color.ts
|
|
620
|
-
import pc from "picocolors";
|
|
621
|
-
var ANSI_RE = /\x1b\[[0-9;]*m/g;
|
|
622
|
-
function isWide(cp) {
|
|
623
|
-
return (
|
|
624
|
-
// CJK Radicals through Katakana (U+2E80–U+30FF) + CJK Symbols (U+3000–U+303F)
|
|
625
|
-
cp >= 11904 && cp <= 12543 || // Enclosed CJK Letters + CJK Compatibility (U+3200–U+33FF)
|
|
626
|
-
cp >= 12800 && cp <= 13311 || // CJK Extension A (U+3400–U+4DBF) + CJK Unified Ideographs (U+4E00–U+9FFF)
|
|
627
|
-
cp >= 13312 && cp <= 40959 || // Hangul Syllables (U+AC00–U+D7AF)
|
|
628
|
-
cp >= 44032 && cp <= 55215 || // CJK Compatibility Ideographs (U+F900–U+FAFF)
|
|
629
|
-
cp >= 63744 && cp <= 64255 || // Fullwidth Forms (U+FF01–U+FF60, U+FFE0–U+FFE6)
|
|
630
|
-
cp >= 65281 && cp <= 65376 || cp >= 65504 && cp <= 65510 || // Miscellaneous Symbols (U+2600–U+26FF) — includes ☀, ⚡, etc.
|
|
631
|
-
cp >= 9728 && cp <= 9983 || // Emoji and symbol blocks in supplementary planes (U+1F300–U+1FAFF)
|
|
632
|
-
cp >= 127744 && cp <= 129791 || // CJK Extension B+ and supplementary ideographs (U+20000–U+2FA1F)
|
|
633
|
-
cp >= 131072 && cp <= 195103
|
|
634
|
-
);
|
|
635
|
-
}
|
|
636
|
-
function visualWidth(s) {
|
|
637
|
-
const stripped = s.replace(ANSI_RE, "");
|
|
638
|
-
let width = 0;
|
|
639
|
-
for (const ch of stripped) {
|
|
640
|
-
const cp = ch.codePointAt(0);
|
|
641
|
-
if (cp === 65038 || cp === 65039) continue;
|
|
642
|
-
width += isWide(cp) ? 2 : 1;
|
|
643
|
-
}
|
|
644
|
-
return width;
|
|
645
|
-
}
|
|
646
|
-
function padRight(s, n) {
|
|
647
|
-
const w = visualWidth(s);
|
|
648
|
-
return w >= n ? s : s + " ".repeat(n - w);
|
|
649
|
-
}
|
|
650
|
-
function padLeft(s, n) {
|
|
651
|
-
const w = visualWidth(s);
|
|
652
|
-
return w >= n ? s : " ".repeat(n - w) + s;
|
|
653
|
-
}
|
|
654
|
-
function truncate(s, max) {
|
|
655
|
-
if (max <= 0) return "";
|
|
656
|
-
if (s.length <= max) return s;
|
|
657
|
-
if (max === 1) return "\u2026";
|
|
658
|
-
const remaining = max - 1;
|
|
659
|
-
const tail = Math.ceil(remaining * 0.6);
|
|
660
|
-
const head = remaining - tail;
|
|
661
|
-
return `${s.slice(0, head)}\u2026${s.slice(s.length - tail)}`;
|
|
662
|
-
}
|
|
663
|
-
function tierLabel(tier) {
|
|
664
|
-
if (tier === "hot") return pc.red("\u{1F525} HOT ");
|
|
665
|
-
if (tier === "warm") return pc.yellow("\u2600\uFE0F WARM");
|
|
666
|
-
return pc.blue("\u{1F9CA} COOL");
|
|
667
|
-
}
|
|
668
|
-
function colorRow(tier, text) {
|
|
669
|
-
if (tier === "hot") return pc.red(text);
|
|
670
|
-
if (tier === "warm") return pc.yellow(text);
|
|
671
|
-
return pc.blue(text);
|
|
672
|
-
}
|
|
673
|
-
function tierSummary(tierCounts, showing, total) {
|
|
674
|
-
const lines = [];
|
|
675
|
-
lines.push(
|
|
676
|
-
`Tiers: ${pc.red(`${tierCounts.hot} HOT`)}, ${pc.yellow(`${tierCounts.warm} WARM`)}, ${pc.blue(`${tierCounts.cool} COOL`)}`
|
|
677
|
-
);
|
|
678
|
-
lines.push(`Showing: ${showing} of ${total}`);
|
|
679
|
-
return lines;
|
|
680
|
-
}
|
|
681
|
-
|
|
682
|
-
// src/format.ts
|
|
683
|
-
var RANKING_LABELS_BY_KEY = Object.fromEntries(
|
|
684
|
-
RANKING_DEFS.map((d) => [d.key, d.label])
|
|
685
|
-
);
|
|
686
|
-
function formatReportTable(output) {
|
|
687
|
-
const lines = [];
|
|
688
|
-
const { summary, files } = output;
|
|
689
|
-
lines.push(
|
|
690
|
-
`Complexity Report \u2014 ${summary.fileCount} files, ${summary.totalComplexity} total complexity`
|
|
691
|
-
);
|
|
692
|
-
lines.push(
|
|
693
|
-
`Showing: ${summary.showing} | Avg complexity/file: ${summary.avgComplexityPerFile}`
|
|
694
|
-
);
|
|
695
|
-
lines.push("");
|
|
696
|
-
lines.push(
|
|
697
|
-
padRight("File", 60) + padLeft("Code", 8) + padLeft("Complexity", 12) + padLeft("Density", 9) + padLeft("Comments", 10)
|
|
698
|
-
);
|
|
699
|
-
lines.push("\u2500".repeat(99));
|
|
700
|
-
for (const f of files) {
|
|
701
|
-
lines.push(
|
|
702
|
-
padRight(truncate(f.file, 58), 60) + padLeft(String(f.code), 8) + padLeft(String(f.complexity), 12) + padLeft(f.complexityDensity.toFixed(2), 9) + padLeft(String(f.comments), 10)
|
|
703
|
-
);
|
|
704
|
-
}
|
|
705
|
-
lines.push("");
|
|
706
|
-
lines.push(
|
|
707
|
-
pc2.dim(
|
|
708
|
-
"Complexity=cyclomatic branch/loop count | Density=complexity/code | Comments=comment lines"
|
|
709
|
-
)
|
|
710
|
-
);
|
|
711
|
-
lines.push(
|
|
712
|
-
pc2.dim(
|
|
713
|
-
"High complexity is expected for parsers, state machines, and business logic. Compare density across files, not raw values."
|
|
714
|
-
)
|
|
715
|
-
);
|
|
716
|
-
lines.push(pc2.dim("Docs: https://github.com/wbern/obscene#metrics"));
|
|
717
|
-
return lines.join("\n");
|
|
718
|
-
}
|
|
719
|
-
function getRankingColumns(key) {
|
|
720
|
-
const base = [
|
|
721
|
-
{
|
|
722
|
-
header: "File",
|
|
723
|
-
width: 50,
|
|
724
|
-
align: "left",
|
|
725
|
-
value: (e) => truncate(e.file, 48)
|
|
726
|
-
},
|
|
727
|
-
{
|
|
728
|
-
header: "Score",
|
|
729
|
-
width: 8,
|
|
730
|
-
align: "right",
|
|
731
|
-
value: (e) => e.score.toLocaleString()
|
|
732
|
-
},
|
|
733
|
-
{
|
|
734
|
-
header: "%",
|
|
735
|
-
width: 7,
|
|
736
|
-
align: "right",
|
|
737
|
-
value: (e) => e.percentOfTotal.toFixed(1)
|
|
738
|
-
},
|
|
739
|
-
{
|
|
740
|
-
header: "Churn",
|
|
741
|
-
width: 7,
|
|
742
|
-
align: "right",
|
|
743
|
-
value: (e) => String(e.churn)
|
|
744
|
-
}
|
|
745
|
-
];
|
|
746
|
-
const metricCols = {
|
|
747
|
-
complexity: [
|
|
748
|
-
{
|
|
749
|
-
header: "Cmplx",
|
|
750
|
-
width: 7,
|
|
751
|
-
align: "right",
|
|
752
|
-
value: (e) => String(e.metricValue)
|
|
753
|
-
},
|
|
754
|
-
{
|
|
755
|
-
header: "Dens",
|
|
756
|
-
width: 7,
|
|
757
|
-
align: "right",
|
|
758
|
-
value: (e) => (e.metricDensity ?? 0).toFixed(2)
|
|
759
|
-
}
|
|
760
|
-
],
|
|
761
|
-
nesting: [
|
|
762
|
-
{
|
|
763
|
-
header: "Nest",
|
|
764
|
-
width: 6,
|
|
765
|
-
align: "right",
|
|
766
|
-
value: (e) => String(e.metricValue)
|
|
767
|
-
}
|
|
768
|
-
],
|
|
769
|
-
defects: [
|
|
770
|
-
{
|
|
771
|
-
header: "Fixes",
|
|
772
|
-
width: 6,
|
|
773
|
-
align: "right",
|
|
774
|
-
value: (e) => String(e.metricValue)
|
|
775
|
-
},
|
|
776
|
-
{
|
|
777
|
-
header: "FxDns",
|
|
778
|
-
width: 7,
|
|
779
|
-
align: "right",
|
|
780
|
-
value: (e) => (e.metricDensity ?? 0).toFixed(4)
|
|
781
|
-
}
|
|
782
|
-
],
|
|
783
|
-
authors: [
|
|
784
|
-
{
|
|
785
|
-
header: "Auth",
|
|
786
|
-
width: 6,
|
|
787
|
-
align: "right",
|
|
788
|
-
value: (e) => String(e.metricValue)
|
|
789
|
-
}
|
|
790
|
-
]
|
|
791
|
-
};
|
|
792
|
-
const tierCol = {
|
|
793
|
-
header: "Tier",
|
|
794
|
-
width: 12,
|
|
795
|
-
align: "right",
|
|
796
|
-
value: (e) => tierLabel(e.tier)
|
|
797
|
-
};
|
|
798
|
-
return [...base, ...metricCols[key] ?? [], tierCol];
|
|
799
|
-
}
|
|
800
|
-
var METRIC_EMOJI = {
|
|
801
|
-
complexity: "\u{1F9EC}",
|
|
802
|
-
nesting: "\u{1F4CF}",
|
|
803
|
-
defects: "\u{1F527}",
|
|
804
|
-
authors: "\u{1F465}"
|
|
805
|
-
};
|
|
806
|
-
function formatRankingTable(key, ranking, description) {
|
|
807
|
-
const lines = [];
|
|
808
|
-
const cols = getRankingColumns(key);
|
|
809
|
-
const emoji = METRIC_EMOJI[key];
|
|
810
|
-
const prefix = emoji ? `${emoji} ` : "";
|
|
811
|
-
const title = ranking.label.toUpperCase().replace("CHURN", "\u{1F504} CHURN");
|
|
812
|
-
lines.push(
|
|
813
|
-
`${prefix}${title} \u2014 Total score: ${ranking.totalScore.toLocaleString()}`
|
|
814
|
-
);
|
|
815
|
-
if (description) {
|
|
816
|
-
for (const line of description.split("\n")) {
|
|
817
|
-
lines.push(pc2.dim(line));
|
|
818
|
-
}
|
|
819
|
-
}
|
|
820
|
-
lines.push(
|
|
821
|
-
...tierSummary(ranking.tierCounts, ranking.showing, ranking.totalEntries)
|
|
822
|
-
);
|
|
823
|
-
lines.push("");
|
|
824
|
-
const headerLine = cols.map(
|
|
825
|
-
(c) => c.align === "left" ? padRight(c.header, c.width) : padLeft(c.header, c.width)
|
|
826
|
-
).join("");
|
|
827
|
-
lines.push(headerLine);
|
|
828
|
-
const totalWidth = cols.reduce((sum, c) => sum + c.width, 0);
|
|
829
|
-
lines.push("\u2500".repeat(totalWidth));
|
|
830
|
-
for (const entry of ranking.entries) {
|
|
831
|
-
const rowParts = cols.map((c) => {
|
|
832
|
-
const val = c.value(entry);
|
|
833
|
-
return c.align === "left" ? padRight(val, c.width) : padLeft(val, c.width);
|
|
834
|
-
});
|
|
835
|
-
const rawRow = rowParts.join("");
|
|
836
|
-
lines.push(colorRow(entry.tier, rawRow));
|
|
837
|
-
}
|
|
838
|
-
return lines;
|
|
839
|
-
}
|
|
840
|
-
function formatHotspotsTable(output) {
|
|
841
|
-
const lines = [];
|
|
842
|
-
const { churnWindow, rankings, corpus } = output;
|
|
843
|
-
lines.push(`Hotspots \u2014 ${churnWindow} churn window`);
|
|
844
|
-
if (corpus && corpus.fileCount > 0 && corpus.totalComplexity === 0) {
|
|
845
|
-
lines.push("");
|
|
846
|
-
lines.push(
|
|
847
|
-
pc2.yellow(
|
|
848
|
-
"Note: no measurable code complexity detected across this corpus (cyclomatic = 0)."
|
|
849
|
-
)
|
|
850
|
-
);
|
|
851
|
-
lines.push(
|
|
852
|
-
pc2.yellow(
|
|
853
|
-
"Rankings reflect size and churn only \u2014 HOT/WARM/COOL are relative groupings, not risk labels."
|
|
854
|
-
)
|
|
855
|
-
);
|
|
856
|
-
}
|
|
857
|
-
lines.push("");
|
|
858
|
-
const keys = Object.keys(rankings);
|
|
859
|
-
for (let i = 0; i < keys.length; i++) {
|
|
860
|
-
const key = keys[i];
|
|
861
|
-
lines.push(...formatRankingTable(key, rankings[key], output.guide[key]));
|
|
862
|
-
if (i < keys.length - 1) {
|
|
863
|
-
lines.push("");
|
|
864
|
-
lines.push("\xB7 \xB7 \xB7");
|
|
865
|
-
lines.push("");
|
|
866
|
-
}
|
|
867
|
-
}
|
|
868
|
-
if (output.skipped) {
|
|
869
|
-
for (const [key, info] of Object.entries(output.skipped)) {
|
|
870
|
-
lines.push("");
|
|
871
|
-
const label = RANKING_LABELS_BY_KEY[key] ?? `${key.charAt(0).toUpperCase() + key.slice(1)} \xD7 Churn`;
|
|
872
|
-
lines.push(`${label} \u2014 skipped (${info.reason})`);
|
|
873
|
-
if (info.suggestion) {
|
|
874
|
-
lines.push(` ${info.suggestion}`);
|
|
875
|
-
}
|
|
876
|
-
}
|
|
877
|
-
}
|
|
878
|
-
lines.push("");
|
|
879
|
-
lines.push(
|
|
880
|
-
pc2.dim(
|
|
881
|
-
"Score=metric\xD7churn | Tiers are relative to THIS codebase, not absolute quality grades."
|
|
882
|
-
)
|
|
883
|
-
);
|
|
884
|
-
const zeroComplexityCorpus = corpus !== void 0 && corpus.fileCount > 0 && corpus.totalComplexity === 0;
|
|
885
|
-
lines.push(
|
|
886
|
-
pc2.dim(
|
|
887
|
-
zeroComplexityCorpus ? "High scores flag files that change often and are sizable \u2014 neither is bad in itself." : "High scores flag review candidates, not bad code \u2014 stable complex files (parsers, engines) score high naturally."
|
|
888
|
-
)
|
|
889
|
-
);
|
|
890
|
-
lines.push(pc2.dim("Docs: https://github.com/wbern/obscene#metrics"));
|
|
891
|
-
return lines.join("\n");
|
|
892
|
-
}
|
|
893
|
-
function formatCouplingTable(output) {
|
|
894
|
-
const lines = [];
|
|
895
|
-
const { tierCounts, totalScore, churnWindow, couplings } = output;
|
|
896
|
-
lines.push(
|
|
897
|
-
`Coupling \u2014 ${churnWindow} churn window | Min shared: ${output.minCochanges} | Total score: ${totalScore.toLocaleString()}`
|
|
898
|
-
);
|
|
899
|
-
lines.push(...tierSummary(tierCounts, output.showing, output.totalCouplings));
|
|
900
|
-
lines.push(
|
|
901
|
-
padRight("File 1", 35) + padRight("File 2", 35) + padLeft("Shared", 7) + padLeft("Degree", 8) + padLeft("Cmplx", 7) + padLeft("Tier", 12)
|
|
902
|
-
);
|
|
903
|
-
lines.push("\u2500".repeat(104));
|
|
904
|
-
let anyDeleted = false;
|
|
905
|
-
let anyLockstep = false;
|
|
906
|
-
for (const c of couplings) {
|
|
907
|
-
if (c.file1Deleted || c.file2Deleted) anyDeleted = true;
|
|
908
|
-
if (c.lockstep) anyLockstep = true;
|
|
909
|
-
const file1Cell = c.file1Deleted ? `\u2020 ${truncate(c.file1, 31)}` : truncate(c.file1, 33);
|
|
910
|
-
const file2Cell = c.file2Deleted ? `\u2020 ${truncate(c.file2, 31)}` : truncate(c.file2, 33);
|
|
911
|
-
const degreeText = c.lockstep ? `${c.degree.toFixed(1)}\u21C4` : `${c.degree.toFixed(1)}%`;
|
|
912
|
-
const rawRow = padRight(file1Cell, 35) + padRight(file2Cell, 35) + padLeft(String(c.cochanges), 7) + padLeft(degreeText, 8) + padLeft(String(c.totalComplexity), 7) + padLeft(tierLabel(c.tier), 12);
|
|
913
|
-
lines.push(colorRow(c.tier, rawRow));
|
|
914
|
-
}
|
|
915
|
-
lines.push("");
|
|
916
|
-
lines.push(
|
|
917
|
-
pc2.dim(
|
|
918
|
-
"Shared=co-changed commits | Degree=shared/min(churn)\xD7100 | Cmplx=sum of both files"
|
|
919
|
-
)
|
|
920
|
-
);
|
|
921
|
-
if (anyDeleted) {
|
|
922
|
-
lines.push(
|
|
923
|
-
pc2.dim("\u2020 = file no longer present at HEAD (deleted or renamed)")
|
|
924
|
-
);
|
|
925
|
-
}
|
|
926
|
-
if (anyLockstep) {
|
|
927
|
-
lines.push(
|
|
928
|
-
pc2.dim(
|
|
929
|
-
"\u21C4 = lockstep pair (both files only ever changed together \u2014 signal is real but uninformative)"
|
|
930
|
-
)
|
|
931
|
-
);
|
|
932
|
-
}
|
|
933
|
-
lines.push(
|
|
934
|
-
pc2.dim(
|
|
935
|
-
"Tiers are relative to THIS codebase, not absolute quality grades. High coupling may be intentional and fine."
|
|
936
|
-
)
|
|
937
|
-
);
|
|
938
|
-
lines.push(
|
|
939
|
-
pc2.dim(
|
|
940
|
-
"Same-directory pairs excluded. Commits touching >20 files skipped. Only cross-directory dependencies shown."
|
|
941
|
-
)
|
|
942
|
-
);
|
|
943
|
-
lines.push(pc2.dim("Docs: https://github.com/wbern/obscene#metrics"));
|
|
944
|
-
return lines.join("\n");
|
|
945
|
-
}
|
|
946
|
-
function formatCompositeTable(output) {
|
|
947
|
-
const lines = [];
|
|
948
|
-
lines.push("\u2550".repeat(84));
|
|
949
|
-
lines.push(
|
|
950
|
-
`\u2605 ${output.label.toUpperCase()} \u2014 Total score: ${output.totalScore.toLocaleString()}`
|
|
951
|
-
);
|
|
952
|
-
lines.push(
|
|
953
|
-
...tierSummary(output.tierCounts, output.showing, output.totalEntries)
|
|
954
|
-
);
|
|
955
|
-
lines.push("");
|
|
956
|
-
lines.push(
|
|
957
|
-
padRight("File", 50) + padLeft("Score", 9) + padLeft("Churn", 7) + padLeft("Dims", 6) + padLeft("Tier", 12)
|
|
958
|
-
);
|
|
959
|
-
lines.push("\u2500".repeat(84));
|
|
960
|
-
for (const entry of output.entries) {
|
|
961
|
-
const rawRow = padRight(truncate(entry.file, 48), 50) + padLeft(entry.score.toFixed(4), 9) + padLeft(String(entry.churn), 7) + padLeft(`${entry.dimensionCount}/${output.totalDimensions}`, 6) + padLeft(tierLabel(entry.tier), 12);
|
|
962
|
-
lines.push(colorRow(entry.tier, rawRow));
|
|
963
|
-
}
|
|
964
|
-
return lines.join("\n");
|
|
965
|
-
}
|
|
966
|
-
|
|
967
|
-
// src/cli.ts
|
|
968
|
-
var program = new Command();
|
|
969
|
-
program.name("obscene").description("Identify hotspot files \u2014 complex code that changes frequently").version("2.1.1");
|
|
970
|
-
var REPORT_GUIDE = {
|
|
971
|
-
complexity: "Cyclomatic complexity (branch/loop count). NOT a quality judgment \u2014 a 500-line parser will naturally score high. Compare density, not raw values.",
|
|
972
|
-
complexityDensity: "Complexity per line of code. Normalizes for file size. >0.25 suggests dense logic worth reviewing; <0.10 is typical for straightforward code.",
|
|
973
|
-
comments: "Comment line count. Low comments in high-density files may indicate under-documented logic. High comments alone is not a problem."
|
|
974
|
-
};
|
|
975
|
-
var HOTSPOTS_GUIDE = {
|
|
976
|
-
rankings: "Four independent ranking tables, each scoring files by a different metric \xD7 churn. A file may rank high in one dimension but not others.",
|
|
977
|
-
complexity: "complexity \xD7 churn. Complex code that changes often poses maintenance risk.\nSource: McCabe cyclomatic complexity (1976) via scc \xB7 Strength: objective, language-agnostic \xB7 Limit: parsers and state machines score high naturally",
|
|
978
|
-
nesting: "maxNesting \xD7 churn. Deeply nested code that changes often is harder to reason about.\nSource: cognitive complexity research (SonarSource, G. Ann Campbell 2018) \xB7 Strength: catches hard-to-follow control flow \xB7 Limit: some patterns (error chains, config) legitimately nest deep",
|
|
979
|
-
defects: "fixes \xD7 churn. Count of fix: commits touching the file \xD7 churn. High values can mean latent fragility, but they also flag features that got debugged thoroughly \u2014 read the fix-commit history before concluding which.\nSource: change-history metrics (Moser, Pedrycz & Succi 2008) via conventional commits (fix: prefix) \xB7 Strength: direct fix-history signal \xB7 Limit: counts fix activity, not defects per se; requires consistent fix: convention",
|
|
980
|
-
authors: "authors \xD7 churn. Files touched by many authors and changing often may lack clear ownership.\nSource: code ownership research (Bird et al. 2011, Microsoft) \xB7 Strength: flags diffuse ownership risk \xB7 Limit: doesn't measure expertise depth, bot authors filtered automatically",
|
|
981
|
-
composite: "Combined ranking using Reciprocal Rank Fusion (RRF) across all dimensions. Files appearing near the top of multiple rankings score highest.\nSource: RRF (Cormack et al. 2009) \xB7 Strength: robust to outliers, no normalization needed \xB7 Limit: equal weight across all dimensions",
|
|
982
|
-
tier: "Relative ranking within THIS codebase (top 50% = hot, next 30% = warm, bottom 20% = cool). NOT an absolute quality grade \u2014 a hot file is under heavy load, not necessarily broken.",
|
|
983
|
-
corpus: "Aggregate stats for the analyzed file set (post-exclude \u2014 files filtered by .obsignore or --exclude are not counted). When totalComplexity is 0, the rankings reflect size and churn only; HOT/WARM/COOL become relative groupings rather than risk labels."
|
|
984
|
-
};
|
|
985
|
-
var COUPLING_GUIDE = {
|
|
986
|
-
cochanges: "Times both files appeared in the same commit. Higher values suggest a dependency between the files. Same-directory pairs are excluded \u2014 only cross-directory pairs are shown.",
|
|
987
|
-
degree: "Percentage: shared commits / min(churn of file1, file2) \xD7 100. Shows how tightly coupled the pair is relative to their individual change rates. 100% means every change to the less-active file also touched the other.",
|
|
988
|
-
totalComplexity: "Sum of both files' cyclomatic complexity. Highlights coupled pairs where the involved code is also complex \u2014 hidden dependency + high complexity compounds maintenance risk.",
|
|
989
|
-
tier: "Relative ranking within THIS codebase's coupling pairs (top 50% = hot, next 30% = warm, bottom 20% = cool). NOT an absolute quality grade. 'hot' means this pair co-changes more than most \u2014 it may be intentional and fine.",
|
|
990
|
-
deleted: "file1Deleted / file2Deleted are set when the file is no longer present at HEAD (deleted or renamed away). The coupling signal is historical \u2014 the pair is not actionable in the current tree.",
|
|
991
|
-
lockstep: "Set when shared commits / max(churn) \u2265 0.9 \u2014 both files almost always change together over the window. Typical of generator/mirror pairs (README \u2194 src/README, *.pb.go \u2194 *.proto). The coupling signal is real but uninformative; treat the pair as a single unit from git's perspective."
|
|
992
|
-
};
|
|
993
|
-
function addSharedOptions(cmd) {
|
|
994
|
-
return cmd.option("--top <n>", "limit to top N entries (0 = all)", "20").option("--format <type>", "output format: json | table", "json").option(
|
|
995
|
-
"--exclude <patterns...>",
|
|
996
|
-
"additional file patterns to exclude (also reads .obsignore / .obsceneignore)"
|
|
997
|
-
);
|
|
998
|
-
}
|
|
999
|
-
addSharedOptions(
|
|
1000
|
-
program.command("report").description("per-file complexity data")
|
|
1001
|
-
).action((opts) => {
|
|
1002
|
-
try {
|
|
1003
|
-
runReport(opts);
|
|
1004
|
-
} catch (err) {
|
|
1005
|
-
exitWithError(err);
|
|
1006
|
-
}
|
|
1007
|
-
});
|
|
1008
|
-
addSharedOptions(
|
|
1009
|
-
program.command("hotspots", { isDefault: true }).description("churn \xD7 complexity hotspot analysis (default)")
|
|
1010
|
-
).option("--months <n>", "churn window in months", "3").action((opts) => {
|
|
1011
|
-
try {
|
|
1012
|
-
runHotspots(opts);
|
|
1013
|
-
} catch (err) {
|
|
1014
|
-
exitWithError(err);
|
|
1015
|
-
}
|
|
1016
|
-
});
|
|
1017
|
-
addSharedOptions(
|
|
1018
|
-
program.command("coupling").description(
|
|
1019
|
-
"temporal coupling \u2014 files that change together across directories"
|
|
1020
|
-
)
|
|
1021
|
-
).option("--months <n>", "churn window in months", "3").option("--min-cochanges <n>", "minimum shared commits to include", "2").action((opts) => {
|
|
1022
|
-
try {
|
|
1023
|
-
runCoupling(opts);
|
|
1024
|
-
} catch (err) {
|
|
1025
|
-
exitWithError(err);
|
|
1026
|
-
}
|
|
1027
|
-
});
|
|
1028
|
-
program.command("init").description("generate a starter .obsignore based on project structure").action(() => {
|
|
1029
|
-
try {
|
|
1030
|
-
runInit();
|
|
1031
|
-
} catch (err) {
|
|
1032
|
-
exitWithError(err);
|
|
1033
|
-
}
|
|
1034
|
-
});
|
|
1035
|
-
function resolveExcludes(cliExcludes) {
|
|
1036
|
-
return [...readIgnoreFile(), ...cliExcludes ?? []];
|
|
1037
|
-
}
|
|
1038
|
-
function warnIfNoIgnoreFile() {
|
|
1039
|
-
if (!existsSync(".obsignore") && !existsSync(".obsceneignore")) {
|
|
1040
|
-
process.stderr.write(
|
|
1041
|
-
"hint: no .obsignore found \u2014 run `obscene init` to generate one with recommended exclusions\n"
|
|
1042
|
-
);
|
|
1043
|
-
}
|
|
1044
|
-
}
|
|
1045
|
-
function runReport(opts) {
|
|
1046
|
-
warnIfNoIgnoreFile();
|
|
1047
|
-
const top = parseInt(opts.top, 10);
|
|
1048
|
-
const allExcludes = resolveExcludes(opts.exclude);
|
|
1049
|
-
const files = runScc(allExcludes);
|
|
1050
|
-
const totals = files.reduce(
|
|
1051
|
-
(acc, f) => ({
|
|
1052
|
-
totalComplexity: acc.totalComplexity + f.complexity,
|
|
1053
|
-
totalCode: acc.totalCode + f.code,
|
|
1054
|
-
totalLines: acc.totalLines + f.lines
|
|
1055
|
-
}),
|
|
1056
|
-
{ totalComplexity: 0, totalCode: 0, totalLines: 0 }
|
|
1057
|
-
);
|
|
1058
|
-
const limited = top > 0 ? files.slice(0, top) : files;
|
|
1059
|
-
const output = {
|
|
1060
|
-
generated: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1061
|
-
guide: REPORT_GUIDE,
|
|
1062
|
-
summary: {
|
|
1063
|
-
...totals,
|
|
1064
|
-
fileCount: files.length,
|
|
1065
|
-
avgComplexityPerFile: files.length > 0 ? Math.round(totals.totalComplexity / files.length * 10) / 10 : 0,
|
|
1066
|
-
showing: limited.length
|
|
1067
|
-
},
|
|
1068
|
-
files: limited
|
|
1069
|
-
};
|
|
1070
|
-
if (opts.format === "table") {
|
|
1071
|
-
process.stdout.write(`${formatReportTable(output)}
|
|
1072
|
-
`);
|
|
1073
|
-
} else {
|
|
1074
|
-
process.stdout.write(`${JSON.stringify(output, null, 2)}
|
|
1075
|
-
`);
|
|
1076
|
-
}
|
|
1077
|
-
}
|
|
1078
|
-
function runHotspots(opts) {
|
|
1079
|
-
warnIfNoIgnoreFile();
|
|
1080
|
-
const top = parseInt(opts.top, 10);
|
|
1081
|
-
const months = parseInt(opts.months, 10);
|
|
1082
|
-
const allExcludes = resolveExcludes(opts.exclude);
|
|
1083
|
-
const files = runScc(allExcludes);
|
|
1084
|
-
const churn = getChurn(months);
|
|
1085
|
-
const defects = getDefects(months);
|
|
1086
|
-
const authors = getAuthors(months);
|
|
1087
|
-
const nestingDepths = getNestingDepths(files.map((f) => f.file));
|
|
1088
|
-
const { rankings, skipped } = computeAllRankings(
|
|
1089
|
-
files,
|
|
1090
|
-
churn,
|
|
1091
|
-
defects,
|
|
1092
|
-
nestingDepths,
|
|
1093
|
-
authors,
|
|
1094
|
-
top
|
|
1095
|
-
);
|
|
1096
|
-
const composite = computeComposite(rankings, churn, top);
|
|
1097
|
-
let corpusTotalComplexity = 0;
|
|
1098
|
-
for (const f of files) corpusTotalComplexity += f.complexity;
|
|
1099
|
-
const output = {
|
|
1100
|
-
generated: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1101
|
-
guide: HOTSPOTS_GUIDE,
|
|
1102
|
-
churnWindow: `${months} months`,
|
|
1103
|
-
rankings,
|
|
1104
|
-
skipped: Object.keys(skipped).length > 0 ? skipped : void 0,
|
|
1105
|
-
composite,
|
|
1106
|
-
corpus: {
|
|
1107
|
-
fileCount: files.length,
|
|
1108
|
-
totalComplexity: corpusTotalComplexity
|
|
1109
|
-
}
|
|
1110
|
-
};
|
|
1111
|
-
if (opts.format === "table") {
|
|
1112
|
-
process.stdout.write(`${formatHotspotsTable(output)}
|
|
1113
|
-
`);
|
|
1114
|
-
if (composite.entries.length > 0) {
|
|
1115
|
-
process.stdout.write(`
|
|
1116
|
-
${formatCompositeTable(composite)}
|
|
1117
|
-
`);
|
|
1118
|
-
}
|
|
1119
|
-
} else {
|
|
1120
|
-
process.stdout.write(`${JSON.stringify(output, null, 2)}
|
|
1121
|
-
`);
|
|
1122
|
-
}
|
|
1123
|
-
}
|
|
1124
|
-
function runCoupling(opts) {
|
|
1125
|
-
warnIfNoIgnoreFile();
|
|
1126
|
-
const top = parseInt(opts.top, 10);
|
|
1127
|
-
const months = parseInt(opts.months, 10);
|
|
1128
|
-
const minCochanges = parseInt(opts.minCochanges, 10);
|
|
1129
|
-
const allExcludes = resolveExcludes(opts.exclude);
|
|
1130
|
-
const files = runScc(allExcludes);
|
|
1131
|
-
const churn = getChurn(months);
|
|
1132
|
-
const cochanges = getCoChanges(months, allExcludes);
|
|
1133
|
-
const complexityMap = /* @__PURE__ */ new Map();
|
|
1134
|
-
for (const f of files) {
|
|
1135
|
-
complexityMap.set(f.file, f.complexity);
|
|
1136
|
-
}
|
|
1137
|
-
const trackedFiles = getTrackedFiles();
|
|
1138
|
-
const couplings = computeCoupling(
|
|
1139
|
-
cochanges,
|
|
1140
|
-
churn,
|
|
1141
|
-
complexityMap,
|
|
1142
|
-
minCochanges,
|
|
1143
|
-
trackedFiles
|
|
1144
|
-
);
|
|
1145
|
-
const limited = top > 0 ? couplings.slice(0, top) : couplings;
|
|
1146
|
-
const tierCounts = { hot: 0, warm: 0, cool: 0 };
|
|
1147
|
-
for (const c of couplings) {
|
|
1148
|
-
tierCounts[c.tier]++;
|
|
1149
|
-
}
|
|
1150
|
-
const totalScore = couplings.reduce((sum, c) => sum + c.couplingScore, 0);
|
|
1151
|
-
const output = {
|
|
1152
|
-
generated: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1153
|
-
guide: COUPLING_GUIDE,
|
|
1154
|
-
churnWindow: `${months} months`,
|
|
1155
|
-
minCochanges,
|
|
1156
|
-
totalScore,
|
|
1157
|
-
tierCounts,
|
|
1158
|
-
totalCouplings: couplings.length,
|
|
1159
|
-
showing: limited.length,
|
|
1160
|
-
couplings: limited
|
|
1161
|
-
};
|
|
1162
|
-
if (opts.format === "table") {
|
|
1163
|
-
process.stdout.write(`${formatCouplingTable(output)}
|
|
1164
|
-
`);
|
|
1165
|
-
} else {
|
|
1166
|
-
process.stdout.write(`${JSON.stringify(output, null, 2)}
|
|
1167
|
-
`);
|
|
1168
|
-
}
|
|
1169
|
-
}
|
|
1170
|
-
function runInit() {
|
|
1171
|
-
if (existsSync(".obsignore")) {
|
|
1172
|
-
throw new Error(
|
|
1173
|
-
".obsignore already exists. Remove it first to regenerate."
|
|
1174
|
-
);
|
|
1175
|
-
}
|
|
1176
|
-
if (existsSync(".obsceneignore")) {
|
|
1177
|
-
throw new Error(
|
|
1178
|
-
".obsceneignore already exists. Remove it first to regenerate."
|
|
1179
|
-
);
|
|
1180
|
-
}
|
|
1181
|
-
const detected = detectIgnorePatterns();
|
|
1182
|
-
const content = formatIgnoreFile(detected);
|
|
1183
|
-
writeFileSync(".obsignore", content);
|
|
1184
|
-
const universalCount = UNIVERSAL_IGNORE_GROUPS.reduce(
|
|
1185
|
-
(sum, g) => sum + g.patterns.length,
|
|
1186
|
-
0
|
|
1187
|
-
);
|
|
1188
|
-
process.stderr.write(
|
|
1189
|
-
`Created .obsignore with ${universalCount} universal exclusions`
|
|
1190
|
-
);
|
|
1191
|
-
if (detected.length > 0) {
|
|
1192
|
-
process.stderr.write(` + ${detected.length} detected patterns:
|
|
1193
|
-
`);
|
|
1194
|
-
for (const p of detected) {
|
|
1195
|
-
process.stderr.write(` ${p.pattern.padEnd(20)} ${p.comment}
|
|
1196
|
-
`);
|
|
1197
|
-
}
|
|
1198
|
-
} else {
|
|
1199
|
-
process.stderr.write(" (no project-specific patterns detected)\n");
|
|
1200
|
-
}
|
|
1201
|
-
}
|
|
1202
|
-
function exitWithError(err) {
|
|
1203
|
-
const message = err instanceof Error ? err.message : String(err);
|
|
1204
|
-
process.stderr.write(`Error: ${message}
|
|
1205
|
-
`);
|
|
1206
|
-
process.exit(1);
|
|
1207
|
-
}
|
|
1208
|
-
program.parse();
|
|
2
|
+
import{existsSync as L,writeFileSync as Ne}from"fs";import{Command as Ae}from"commander";import{execSync as E}from"child_process";import{readFileSync as Y}from"fs";var Ce=[".obsignore",".obsceneignore"];function Q(){for(let e of Ce)try{return Y(e,"utf-8").split(`
|
|
3
|
+
`).map(n=>n.trim()).filter(n=>n!==""&&!n.startsWith("#"))}catch(t){if(t&&typeof t=="object"&&"code"in t&&t.code==="ENOENT")continue;throw t}return[]}var H=[{title:"Test files and test infrastructure",patterns:[{pattern:"*.test.*",comment:"Unit test files"},{pattern:"*.spec.*",comment:"Spec test files"},{pattern:"*.integration.test.*",comment:"Integration tests"},{pattern:"test-setup.*",comment:"Test setup files"},{pattern:"test-utils.*",comment:"Test utility files"},{pattern:"test-helpers.*",comment:"Test helper files"},{pattern:"__tests__/**",comment:"Test directories"},{pattern:"__mocks__/**",comment:"Mock directories"},{pattern:"*.stories.*",comment:"Storybook stories"},{pattern:"*.d.ts",comment:"TypeScript declaration files"}]},{title:"Lock files and package manifests",patterns:[{pattern:"package.json",comment:"npm package manifest"},{pattern:"package-lock.json",comment:"npm lock file"},{pattern:"pnpm-lock.yaml",comment:"pnpm lock file"},{pattern:"yarn.lock",comment:"Yarn lock file"},{pattern:"bun.lock",comment:"Bun lock file"}]}],xe=.5,we=.8,_=5,j=3,R={complexity:{weak:3,plausible:10,acceptable:30},nesting:{weak:3,plausible:10,acceptable:30},defects:{weak:5,plausible:15,acceptable:50},authors:{weak:2,plausible:4,acceptable:8},coupling:{weak:5,plausible:30,acceptable:100}},v={complexity:"Engineering judgment: any rank ordering needs \u2265 3 items to be meaningful; higher tiers scale from there. No paper prescribes these exact cutoffs.",nesting:"Engineering judgment, informed by Campbell (SonarSource 2018) Cognitive Complexity which assigns a compounding penalty per nesting level. The 3/10/30 sample-count tiers are not from the paper.",defects:"code-maat's --min-revs default of 5 (Adam Tornhill); higher tiers are engineering judgment. Gall et al. (IWPSE 2003) and Hassan (ICSE 2009) study co-change and change-entropy but do not prescribe a specific commit-count floor.",authors:"Engineering judgment. Bird et al. (FSE 2011) Don't Touch My Code! shows minor contributors (< 5% of commits) correlate with elevated defects, motivating attention to contributor count \u2014 but the 2/4/8 tiers here are not from the paper.",coupling:"code-maat defaults (--min-revs 5, --max-changeset-size 30, Adam Tornhill). CodeScene's documented temporal-coupling default filters files with fewer than 10 commits. The 30/100 upper tiers are engineering judgment.",composite:"Reciprocal Rank Fusion (Cormack et al., SIGIR 2009) fuses multiple independent rankings; min-of-inputs is a strict monotone aggregator \u2014 when every input ranking is at confidence level L, the composite cannot exceed L."};function I(e,t,n,o,i){let c;return t<n.weak?c="inconclusive":t<n.plausible?c="weak":t<n.acceptable?c="plausible":c="acceptable",{level:c,reason:i(c),inputs:{metric:e,value:t,thresholds:n},source:o}}function Z(e,t){return t.some(n=>n.test(e))}function ee(e){let t=e.replace(/[.+^${}()|[\]\\]/g,"\\$&").replace(/\*\*/g,"\u27E8GLOBSTAR\u27E9").replace(/\*/g,"[^/]*").replace(/⟨GLOBSTAR⟩/g,".*").replace(/\?/g,".");return new RegExp(t)}function T(e){let t=e.replaceAll("\\","/");return t.startsWith("./")?t.slice(2):t}function $(e=[]){let t=e.map(ee),n;try{n=E("scc --by-file --format json --no-cocomo --no-gen",{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch(c){throw c&&typeof c=="object"&&"code"in c&&c.code==="ENOENT"?new Error("scc not found. Install it: https://github.com/boyter/scc#install"):c}let o=JSON.parse(n.toString()),i=[];for(let c of o)for(let a of c.Files){let l=T(a.Location);Z(l,t)||i.push({file:l,code:a.Code,lines:a.Lines,complexity:a.Complexity,comments:a.Comment,complexityDensity:a.Code>0?Math.round(a.Complexity/a.Code*100)/100:0})}return i.sort((c,a)=>a.complexity-c.complexity)}function te(e,t){let n;try{n=E(e,{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error(t)}let o=new Map;for(let i of n.toString().split(`
|
|
4
|
+
`)){let c=T(i.trim());c&&o.set(c,(o.get(c)??0)+1)}return o}function P(e){return te(`git log --since="${e} months ago" --format="" --name-only`,"Not a git repository or git is not installed.")}function ne(e){return te(`git log --since="${e} months ago" --grep="^fix" --format="" --name-only`,"Not a git repository or git is not installed.")}function oe(e){let t;try{t=E(`git log --since="${e} months ago" --format="COMMIT_SEP%n%aN" --name-only`,{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error("Not a git repository or git is not installed.")}let n=new Map,o=t.toString().split(`COMMIT_SEP
|
|
5
|
+
`);for(let c of o){if(!c.trim())continue;let a=c.split(`
|
|
6
|
+
`),l=a[0].trim();if(!(!l||l.endsWith("[bot]")))for(let r=1;r<a.length;r++){let m=T(a[r].trim());if(!m)continue;let s=n.get(m);s||(s=new Set,n.set(m,s)),s.add(l)}}let i=new Map;for(let[c,a]of n)i.set(c,a.size);return i}var Se=20;function ie(e,t=[]){let n=t.map(ee),o;try{o=E(`git log --since="${e} months ago" --format="COMMIT_SEP%n" --name-only`,{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error("Not a git repository or git is not installed.")}let i=new Map,c=o.toString().split(`COMMIT_SEP
|
|
7
|
+
`);for(let a of c){if(!a.trim())continue;let l=new Set;for(let m of a.split(`
|
|
8
|
+
`)){let s=T(m.trim());s&&(Z(s,n)||l.add(s))}let r=[...l];if(!(r.length<2||r.length>Se))for(let m=0;m<r.length;m++)for(let s=m+1;s<r.length;s++){let[u,f]=r[m]<r[s]?[r[m],r[s]]:[r[s],r[m]],h=u.includes("/")?u.slice(0,u.lastIndexOf("/")):"",g=f.includes("/")?f.slice(0,f.lastIndexOf("/")):"";if(h===g)continue;let b=`${u}\0${f}`;i.set(b,(i.get(b)??0)+1)}}return i}function U(e,t){let n=0;for(let o of e){o.percentOfTotal=Math.round(o.score/t*1e3)/10,n+=o.score;let i=n/t;i<=xe?o.tier="hot":i<=we?o.tier="warm":o.tier="cool"}}var W=[{key:"complexity",label:"Complexity \xD7 Churn",scoreFormula:"complexity \xD7 churn"},{key:"nesting",label:"Nesting \xD7 Churn",scoreFormula:"maxNesting \xD7 churn"},{key:"defects",label:"Fix Activity \xD7 Churn",scoreFormula:"fixes \xD7 churn"},{key:"authors",label:"Authors \xD7 Churn",scoreFormula:"authors \xD7 churn"}];function ke(e,t,n,o){let i=e.map(a=>{let l=t.get(a.file)??0,r=n(a);return{file:a.file,score:r*l,percentOfTotal:0,tier:"cool",churn:l,metricValue:r,metricDensity:o?o(a):void 0}}).filter(a=>a.score>0).sort((a,l)=>l.score-a.score),c=i.reduce((a,l)=>a+l.score,0);return c===0?[]:(U(i,c),i)}function re(e,t,n,o,i,c){let a={complexity:{extract:p=>p.complexity,density:p=>p.complexityDensity},nesting:{extract:p=>o.get(p.file)??0},defects:{extract:p=>n.get(p.file)??0,density:p=>{let C=n.get(p.file)??0;return p.code>0?Math.round(C/p.code*1e4)/1e4:0}},authors:{extract:p=>i.get(p.file)??0}},l={},r={},m=0;for(let p of e)p.complexity>0&&m++;r.complexity=I("filesWithComplexity",m,R.complexity,v.complexity,p=>p==="inconclusive"?`${m} files with measurable complexity \u2014 not enough to rank.`:`${m} files with measurable complexity (${p.toUpperCase()} sample size).`);let s=0;for(let p of o.values())p>=3&&s++;r.nesting=I("filesWithNesting>=3",s,R.nesting,v.nesting,p=>p==="inconclusive"?`${s} files with nesting depth \u2265 3 \u2014 not enough to rank.`:`${s} files with nesting depth \u2265 3 (${p.toUpperCase()} sample size).`);let u=[...n.values()].reduce((p,C)=>p+C,0),f=n.size,h=u<_||f<j;r.defects=I("fixCommits",u,R.defects,v.defects,p=>p==="inconclusive"||h?`${u} fix: commits across ${f} files \u2014 need \u2265 ${_} commits across \u2265 ${j} files (matches code-maat's --min-revs default).`:`${u} fix: commits across ${f} files (${p.toUpperCase()} sample size).`),h&&(r.defects={...r.defects,level:"inconclusive"},l.defects={reason:`insufficient data (${u} fix: commits across ${f} files, need ${_}+ commits across ${j}+ files)`,suggestion:"Adopt conventional commits with fix: prefix. See conventionalcommits.org",confidence:r.defects});let g=0;for(let p of i.values())p>g&&(g=p);r.authors=I("maxAuthors",g,R.authors,v.authors,p=>p==="inconclusive"?`${g} distinct authors on the most-touched file \u2014 not enough to rank ownership.`:`${g} distinct authors on the most-touched file (${p.toUpperCase()} sample size).`),g<=1&&(r.authors={...r.authors,level:"inconclusive"},l.authors={reason:"all files have the same author count \u2014 no variance to rank",confidence:r.authors});let b={};for(let p of W){if(l[p.key])continue;if(r[p.key].level==="inconclusive"){l[p.key]={reason:r[p.key].reason,confidence:r[p.key]};continue}let C=a[p.key],w=ke(e,t,C.extract,C.density);if(w.length===0)continue;let K=c>0?w.slice(0,c):w,J={hot:0,warm:0,cool:0};for(let A of w)J[A.tier]++;b[p.key]={label:p.label,scoreFormula:p.scoreFormula,totalScore:w.reduce((A,be)=>A+be.score,0),tierCounts:J,totalEntries:w.length,showing:K.length,entries:K,confidence:r[p.key]}}return{rankings:b,skipped:l}}function B(){let e;try{e=E("git ls-files",{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error("Not a git repository or git is not installed.")}let t=new Set;for(let n of e.toString().split(`
|
|
9
|
+
`)){let o=T(n.trim());o&&t.add(o)}return t}function se(e,t,n,o,i){let c=[];for(let[r,m]of e){if(m<o)continue;let[s,u]=r.split("\0"),f=t.get(s)??0,h=t.get(u)??0,g=Math.min(f,h),b=g>0?Math.round(m/g*1e3)/10:0,p=(n.get(s)??0)+(n.get(u)??0),C={file1:s,file2:u,cochanges:m,degree:b,totalComplexity:p,couplingScore:m,percentOfTotal:0,tier:"cool"},w=Math.max(f,h);m>0&&w>0&&m/w>=.9&&(C.lockstep=!0),i&&(i.has(s)||(C.file1Deleted=!0),i.has(u)||(C.file2Deleted=!0)),c.push(C)}c.sort((r,m)=>m.couplingScore-r.couplingScore);let a=c.reduce((r,m)=>r+m.couplingScore,0);if(a===0)return[];let l=c.map(r=>({...r,score:r.couplingScore}));U(l,a);for(let r=0;r<c.length;r++)c[r].percentOfTotal=l[r].percentOfTotal,c[r].tier=l[r].tier;return c}function ce(e){let t=new Map;for(let n of e){let o;try{o=Y(n,"utf-8")}catch{t.set(n,0);continue}let i=[],c=new Map,a=0;for(let s of o.split(`
|
|
10
|
+
`)){if(!s.trim())continue;let u=s.match(/^(\s+)/);if(!u){a=0;continue}let f=u[1];if(i.push(f),f.includes(" "))continue;let h=f.length,g=h-a;g>0&&c.set(g,(c.get(g)??0)+1),a=h}let l=4,r=0;for(let[s,u]of c)(u>r||u===r&&s<l)&&(r=u,l=s);let m=0;for(let s of i){let u=0;for(let f of s)f===" "?u+=1:f===" "&&(u+=1/l);u=Math.floor(u),u>m&&(m=u)}t.set(n,m)}return t}var ve=[{dir:".github",pattern:".github/**",comment:"GitHub Actions and workflows"},{dir:".circleci",pattern:".circleci/**",comment:"CircleCI configuration"},{dir:".husky",pattern:".husky/**",comment:"Git hooks"},{dir:".vscode",pattern:".vscode/**",comment:"VS Code settings"},{dir:".idea",pattern:".idea/**",comment:"JetBrains settings"},{dir:"scripts",pattern:"scripts/**",comment:"Build and utility scripts"},{dir:"docs",pattern:"docs/**",comment:"Documentation"},{dir:"docker",pattern:"docker/**",comment:"Docker configuration"},{dir:"fixtures",pattern:"fixtures/**",comment:"Test fixtures"},{dir:"vendor",pattern:"vendor/**",comment:"Vendored dependencies"}],Re=[{test:/\.generated\./,pattern:"*.generated.*",comment:"Generated code"},{test:/\.gen\.[^.]+$/,pattern:"*.gen.*",comment:"Generated code"},{test:/\.config\.\w/,pattern:"*.config.*",comment:"Configuration files"},{test:/(?:^|\/)\.gitlab-ci/,pattern:".gitlab-ci*",comment:"GitLab CI configuration"},{test:/^\.claude\/commands\//,pattern:".claude/commands/**",comment:"Claude Code slash commands (often generated from sources)"},{test:/^\.opencode\/commands\//,pattern:".opencode/commands/**",comment:"OpenCode slash commands (often generated from sources)"},{test:/^\.cursor\/rules\//,pattern:".cursor/rules/**",comment:"Cursor rules (often generated from sources)"}];function ae(){let e=B(),t=[],n=new Set;for(let o of e){let i=o.indexOf("/");i>0&&n.add(o.slice(0,i))}for(let o of ve)n.has(o.dir)&&t.push({pattern:o.pattern,comment:o.comment});for(let o of Re)for(let i of e)if(o.test.test(i)){t.push({pattern:o.pattern,comment:o.comment});break}return t}function le(e,t=H){let n=["# Generated by obscene init","# Edit this file to customize which files are excluded from analysis.","# Patterns use glob syntax (same as .gitignore).","# See: https://github.com/wbern/obscene#ignore-files",""];for(let o of t){n.push(`# ${o.title}`);for(let i of o.patterns)n.push(i.pattern);n.push("")}if(e.length>0){n.push("# Project-specific patterns");for(let o of e)n.push(`# ${o.comment}`),n.push(o.pattern);n.push("")}return n.join(`
|
|
11
|
+
`)}var Ee=10,X={inconclusive:0,weak:1,plausible:2,acceptable:3};function Oe(e){let t=Object.values(e).map(i=>i.confidence),n=t.length;if(n<2)return{level:"inconclusive",reason:`${n} input ranking \u2014 RRF requires \u2265 2 independent rankings.`,inputs:{metric:"inputRankings",value:n,thresholds:{weak:2,plausible:3,acceptable:4}},source:v.composite};let o="acceptable";for(let i of t)X[i.level]<X[o]&&(o=i.level);return{level:o,reason:`Composite inherits min-of-inputs across ${n} rankings (weakest: ${o.toUpperCase()}).`,inputs:{metric:"inputRankings",value:n,thresholds:{weak:2,plausible:3,acceptable:4}},source:v.composite}}function ue(e,t,n){let o=Object.keys(e).length,i=Oe(e),c=new Map;for(let s of Object.values(e))for(let u=0;u<s.entries.length;u++){let f=s.entries[u].file,h=1/(Ee+u+1),g=c.get(f);g?(g.score+=h,g.dims+=1):c.set(f,{score:h,dims:1})}let a=[];for(let[s,u]of c)a.push({file:s,score:Math.round(u.score*1e4)/1e4,percentOfTotal:0,tier:"cool",churn:t.get(s)??0,dimensionCount:u.dims});a.sort((s,u)=>u.score-s.score);let l=a.reduce((s,u)=>s+u.score,0);if(l===0)return{label:"Combined",scoreFormula:"reciprocal rank fusion across all dimensions",totalScore:0,tierCounts:{hot:0,warm:0,cool:0},totalDimensions:o,totalEntries:0,showing:0,entries:[],confidence:i};U(a,l);let r=n>0?a.slice(0,n):a,m={hot:0,warm:0,cool:0};for(let s of a)m[s.tier]++;return{label:"Combined",scoreFormula:"reciprocal rank fusion across all dimensions",totalScore:Math.round(l*1e4)/1e4,tierCounts:m,totalDimensions:o,totalEntries:a.length,showing:r.length,entries:r,confidence:i}}function pe(e){return I("commitsInWindow",e,R.coupling,v.coupling,t=>t==="inconclusive"?`${e} commits in window \u2014 need \u2265 ${R.coupling.weak} (matches code-maat's --min-revs default).`:`${e} commits in window (${t.toUpperCase()} sample size).`)}function me(e){try{let t=E(`git rev-list --count --since="${e} months ago" HEAD`,{stdio:["pipe","pipe","pipe"]});return parseInt(t.toString().trim(),10)||0}catch{throw new Error("Not a git repository or git is not installed.")}}import y from"picocolors";import S from"picocolors";var Ie=/\x1b\[[0-9;]*m/g;function Te(e){return e>=11904&&e<=12543||e>=12800&&e<=13311||e>=13312&&e<=40959||e>=44032&&e<=55215||e>=63744&&e<=64255||e>=65281&&e<=65376||e>=65504&&e<=65510||e>=9728&&e<=9983||e>=127744&&e<=129791||e>=131072&&e<=195103}function fe(e){let t=e.replace(Ie,""),n=0;for(let o of t){let i=o.codePointAt(0);i===65038||i===65039||(n+=Te(i)?2:1)}return n}function x(e,t){let n=fe(e);return n>=t?e:e+" ".repeat(t-n)}function d(e,t){let n=fe(e);return n>=t?e:" ".repeat(t-n)+e}function k(e,t){if(t<=0)return"";if(e.length<=t)return e;if(t===1)return"\u2026";let n=t-1,o=Math.ceil(n*.6),i=n-o;return`${e.slice(0,i)}\u2026${e.slice(e.length-o)}`}function D(e){return e==="hot"?S.red("\u{1F525} HOT "):e==="warm"?S.yellow("\u2600\uFE0F WARM"):S.blue("\u{1F9CA} COOL")}function M(e,t){return e==="hot"?S.red(t):e==="warm"?S.yellow(t):S.blue(t)}function F(e,t,n){let o=[];return o.push(`Tiers: ${S.red(`${e.hot} HOT`)}, ${S.yellow(`${e.warm} WARM`)}, ${S.blue(`${e.cool} COOL`)}`),o.push(`Showing: ${t} of ${n}`),o}var $e={inconclusive:y.gray,weak:y.yellow,plausible:y.cyan,acceptable:y.green};function G(e){let t=$e[e.level];return[t(`Confidence: ${e.level.toUpperCase()} \u2014 ${e.reason}`)]}var De=Object.fromEntries(W.map(e=>[e.key,e.label]));function ge(e){let t=[],{summary:n,files:o}=e;t.push(`Complexity Report \u2014 ${n.fileCount} files, ${n.totalComplexity} total complexity`),t.push(`Showing: ${n.showing} | Avg complexity/file: ${n.avgComplexityPerFile}`),t.push(""),t.push(x("File",60)+d("Code",8)+d("Complexity",12)+d("Density",9)+d("Comments",10)),t.push("\u2500".repeat(99));for(let i of o)t.push(x(k(i.file,58),60)+d(String(i.code),8)+d(String(i.complexity),12)+d(i.complexityDensity.toFixed(2),9)+d(String(i.comments),10));return t.push(""),t.push(y.dim("Complexity=cyclomatic branch/loop count | Density=complexity/code | Comments=comment lines")),t.push(y.dim("High complexity is expected for parsers, state machines, and business logic. Compare density across files, not raw values.")),t.push(y.dim("Docs: https://github.com/wbern/obscene#metrics")),t.join(`
|
|
12
|
+
`)}function Me(e){let t=[{header:"File",width:50,align:"left",value:i=>k(i.file,48)},{header:"Score",width:8,align:"right",value:i=>i.score.toLocaleString()},{header:"%",width:7,align:"right",value:i=>i.percentOfTotal.toFixed(1)},{header:"Churn",width:7,align:"right",value:i=>String(i.churn)}],n={complexity:[{header:"Cmplx",width:7,align:"right",value:i=>String(i.metricValue)},{header:"Dens",width:7,align:"right",value:i=>(i.metricDensity??0).toFixed(2)}],nesting:[{header:"Nest",width:6,align:"right",value:i=>String(i.metricValue)}],defects:[{header:"Fixes",width:6,align:"right",value:i=>String(i.metricValue)},{header:"FxDns",width:7,align:"right",value:i=>(i.metricDensity??0).toFixed(4)}],authors:[{header:"Auth",width:6,align:"right",value:i=>String(i.metricValue)}]},o={header:"Tier",width:12,align:"right",value:i=>D(i.tier)};return[...t,...n[e]??[],o]}var Fe={complexity:"\u{1F9EC}",nesting:"\u{1F4CF}",defects:"\u{1F527}",authors:"\u{1F465}"};function Le(e,t,n){let o=[],i=Me(e),c=Fe[e],a=c?`${c} `:"",l=t.label.toUpperCase().replace("CHURN","\u{1F504} CHURN");if(o.push(`${a}${l} \u2014 Total score: ${t.totalScore.toLocaleString()}`),o.push(...G(t.confidence)),n)for(let s of n.split(`
|
|
13
|
+
`))o.push(y.dim(s));o.push(...F(t.tierCounts,t.showing,t.totalEntries)),o.push("");let r=i.map(s=>s.align==="left"?x(s.header,s.width):d(s.header,s.width)).join("");o.push(r);let m=i.reduce((s,u)=>s+u.width,0);o.push("\u2500".repeat(m));for(let s of t.entries){let f=i.map(h=>{let g=h.value(s);return h.align==="left"?x(g,h.width):d(g,h.width)}).join("");o.push(M(s.tier,f))}return o}function de(e){let t=[],{churnWindow:n,rankings:o,corpus:i}=e;t.push(`Hotspots \u2014 ${n} churn window`),i&&i.fileCount>0&&i.totalComplexity===0&&(t.push(""),t.push(y.yellow("Note: no measurable code complexity detected across this corpus (cyclomatic = 0).")),t.push(y.yellow("Rankings reflect size and churn only \u2014 HOT/WARM/COOL are relative groupings, not risk labels."))),t.push("");let c=Object.keys(o);for(let l=0;l<c.length;l++){let r=c[l];t.push(...Le(r,o[r],e.guide[r])),l<c.length-1&&(t.push(""),t.push("\xB7 \xB7 \xB7"),t.push(""))}if(e.skipped)for(let[l,r]of Object.entries(e.skipped)){t.push("");let m=De[l]??`${l.charAt(0).toUpperCase()+l.slice(1)} \xD7 Churn`;t.push(`${m} \u2014 skipped (${r.reason})`),r.suggestion&&t.push(` ${r.suggestion}`)}t.push(""),t.push(y.dim("Score=metric\xD7churn | Tiers are relative to THIS codebase, not absolute quality grades."));let a=i!==void 0&&i.fileCount>0&&i.totalComplexity===0;return t.push(y.dim(a?"High scores flag files that change often and are sizable \u2014 neither is bad in itself.":"High scores flag review candidates, not bad code \u2014 stable complex files (parsers, engines) score high naturally.")),t.push(y.dim("Docs: https://github.com/wbern/obscene#metrics")),t.join(`
|
|
14
|
+
`)}function he(e){let t=[],{tierCounts:n,totalScore:o,churnWindow:i,couplings:c}=e;t.push(`Coupling \u2014 ${i} churn window | Min shared: ${e.minCochanges} | Total score: ${o.toLocaleString()}`),t.push(...G(e.confidence)),t.push(...F(n,e.showing,e.totalCouplings)),t.push(x("File 1",35)+x("File 2",35)+d("Shared",7)+d("Degree",8)+d("Cmplx",7)+d("Tier",12)),t.push("\u2500".repeat(104));let a=!1,l=!1;for(let r of c){(r.file1Deleted||r.file2Deleted)&&(a=!0),r.lockstep&&(l=!0);let m=r.file1Deleted?`\u2020 ${k(r.file1,31)}`:k(r.file1,33),s=r.file2Deleted?`\u2020 ${k(r.file2,31)}`:k(r.file2,33),u=r.lockstep?`${r.degree.toFixed(1)}\u21C4`:`${r.degree.toFixed(1)}%`,f=x(m,35)+x(s,35)+d(String(r.cochanges),7)+d(u,8)+d(String(r.totalComplexity),7)+d(D(r.tier),12);t.push(M(r.tier,f))}return t.push(""),t.push(y.dim("Shared=co-changed commits | Degree=shared/min(churn)\xD7100 | Cmplx=sum of both files")),a&&t.push(y.dim("\u2020 = file no longer present at HEAD (deleted or renamed)")),l&&t.push(y.dim("\u21C4 = lockstep pair (both files only ever changed together \u2014 signal is real but uninformative)")),t.push(y.dim("Tiers are relative to THIS codebase, not absolute quality grades. High coupling may be intentional and fine.")),t.push(y.dim("Same-directory pairs excluded. Commits touching >20 files skipped. Only cross-directory dependencies shown.")),t.push(y.dim("Docs: https://github.com/wbern/obscene#metrics")),t.join(`
|
|
15
|
+
`)}function ye(e){let t=[];t.push("\u2550".repeat(84)),t.push(`\u2605 ${e.label.toUpperCase()} \u2014 Total score: ${e.totalScore.toLocaleString()}`),t.push(...G(e.confidence)),t.push(...F(e.tierCounts,e.showing,e.totalEntries)),t.push(""),t.push(x("File",50)+d("Score",9)+d("Churn",7)+d("Dims",6)+d("Tier",12)),t.push("\u2500".repeat(84));for(let n of e.entries){let o=x(k(n.file,48),50)+d(n.score.toFixed(4),9)+d(String(n.churn),7)+d(`${n.dimensionCount}/${e.totalDimensions}`,6)+d(D(n.tier),12);t.push(M(n.tier,o))}return t.join(`
|
|
16
|
+
`)}var O=new Ae;O.name("obscene").description("Identify hotspot files \u2014 complex code that changes frequently").version("2.2.1");var _e={complexity:"Cyclomatic complexity (branch/loop count). NOT a quality judgment \u2014 a 500-line parser will naturally score high. Compare density, not raw values.",complexityDensity:"Complexity per line of code. Normalizes for file size. >0.25 suggests dense logic worth reviewing; <0.10 is typical for straightforward code.",comments:"Comment line count. Low comments in high-density files may indicate under-documented logic. High comments alone is not a problem."},je={rankings:"Four independent ranking tables, each scoring files by a different metric \xD7 churn. A file may rank high in one dimension but not others.",complexity:`complexity \xD7 churn. Complex code that changes often poses maintenance risk.
|
|
17
|
+
Source: McCabe cyclomatic complexity (1976) via scc \xB7 Strength: objective, language-agnostic \xB7 Limit: parsers and state machines score high naturally`,nesting:`maxNesting \xD7 churn. Deeply nested code that changes often is harder to reason about.
|
|
18
|
+
Source: cognitive complexity research (SonarSource, G. Ann Campbell 2018) \xB7 Strength: catches hard-to-follow control flow \xB7 Limit: some patterns (error chains, config) legitimately nest deep`,defects:`fixes \xD7 churn. Count of fix: commits touching the file \xD7 churn. High values can mean latent fragility, but they also flag features that got debugged thoroughly \u2014 read the fix-commit history before concluding which.
|
|
19
|
+
Source: change-history metrics (Moser, Pedrycz & Succi 2008) via conventional commits (fix: prefix) \xB7 Strength: direct fix-history signal \xB7 Limit: counts fix activity, not defects per se; requires consistent fix: convention`,authors:`authors \xD7 churn. Files touched by many authors and changing often may lack clear ownership.
|
|
20
|
+
Source: code ownership research (Bird et al. 2011, Microsoft) \xB7 Strength: flags diffuse ownership risk \xB7 Limit: doesn't measure expertise depth, bot authors filtered automatically`,composite:`Combined ranking using Reciprocal Rank Fusion (RRF) across all dimensions. Files appearing near the top of multiple rankings score highest.
|
|
21
|
+
Source: RRF (Cormack et al. 2009) \xB7 Strength: robust to outliers, no normalization needed \xB7 Limit: equal weight across all dimensions`,tier:"Relative ranking within THIS codebase (top 50% = hot, next 30% = warm, bottom 20% = cool). NOT an absolute quality grade \u2014 a hot file is under heavy load, not necessarily broken.",corpus:"Aggregate stats for the analyzed file set (post-exclude \u2014 files filtered by .obsignore or --exclude are not counted). When totalComplexity is 0, the rankings reflect size and churn only; HOT/WARM/COOL become relative groupings rather than risk labels.",confidence:"Epistemic stamp on each ranking \u2014 INCONCLUSIVE / WEAK / PLAUSIBLE / ACCEPTABLE. These are engineering-judgment sample-size tiers, with the weak floor for defects matching code-maat's --min-revs default of 5. ACCEPTABLE is the ceiling \u2014 the tool never claims certainty about code quality, only that the sample supports the ranking. INCONCLUSIVE rankings are surfaced under skipped rather than ranked."},He={cochanges:"Times both files appeared in the same commit. Higher values suggest a dependency between the files. Same-directory pairs are excluded \u2014 only cross-directory pairs are shown.",degree:"Percentage: shared commits / min(churn of file1, file2) \xD7 100. Shows how tightly coupled the pair is relative to their individual change rates. 100% means every change to the less-active file also touched the other.",totalComplexity:"Sum of both files' cyclomatic complexity. Highlights coupled pairs where the involved code is also complex \u2014 hidden dependency + high complexity compounds maintenance risk.",tier:"Relative ranking within THIS codebase's coupling pairs (top 50% = hot, next 30% = warm, bottom 20% = cool). NOT an absolute quality grade. 'hot' means this pair co-changes more than most \u2014 it may be intentional and fine.",deleted:"file1Deleted / file2Deleted are set when the file is no longer present at HEAD (deleted or renamed away). The coupling signal is historical \u2014 the pair is not actionable in the current tree.",lockstep:"Set when shared commits / max(churn) \u2265 0.9 \u2014 both files almost always change together over the window. Typical of generator/mirror pairs (README \u2194 src/README, *.pb.go \u2194 *.proto). The coupling signal is real but uninformative; treat the pair as a single unit from git's perspective.",confidence:"Epistemic stamp on the coupling table \u2014 INCONCLUSIVE / WEAK / PLAUSIBLE / ACCEPTABLE. Tied to the number of commits in the analysis window. The weak floor of 5 matches code-maat's --min-revs default (Adam Tornhill); higher tiers are engineering judgment. ACCEPTABLE means the sample supports the ranking; it never asserts the couplings themselves are bad."};function z(e){return e.option("--top <n>","limit to top N entries (0 = all)","20").option("--format <type>","output format: json | table","json").option("--exclude <patterns...>","additional file patterns to exclude (also reads .obsignore / .obsceneignore)")}z(O.command("report").description("per-file complexity data")).action(e=>{try{Pe(e)}catch(t){N(t)}});z(O.command("hotspots",{isDefault:!0}).description("churn \xD7 complexity hotspot analysis (default)")).option("--months <n>","churn window in months","3").action(e=>{try{Ue(e)}catch(t){N(t)}});z(O.command("coupling").description("temporal coupling \u2014 files that change together across directories")).option("--months <n>","churn window in months","3").option("--min-cochanges <n>","minimum shared commits to include","2").action(e=>{try{We(e)}catch(t){N(t)}});O.command("init").description("generate a starter .obsignore based on project structure").action(()=>{try{Be()}catch(e){N(e)}});function V(e){return[...Q(),...e??[]]}function q(){!L(".obsignore")&&!L(".obsceneignore")&&process.stderr.write("hint: no .obsignore found \u2014 run `obscene init` to generate one with recommended exclusions\n")}function Pe(e){q();let t=parseInt(e.top,10),n=V(e.exclude),o=$(n),i=o.reduce((l,r)=>({totalComplexity:l.totalComplexity+r.complexity,totalCode:l.totalCode+r.code,totalLines:l.totalLines+r.lines}),{totalComplexity:0,totalCode:0,totalLines:0}),c=t>0?o.slice(0,t):o,a={generated:new Date().toISOString(),guide:_e,summary:{...i,fileCount:o.length,avgComplexityPerFile:o.length>0?Math.round(i.totalComplexity/o.length*10)/10:0,showing:c.length},files:c};e.format==="table"?process.stdout.write(`${ge(a)}
|
|
22
|
+
`):process.stdout.write(`${JSON.stringify(a,null,2)}
|
|
23
|
+
`)}function Ue(e){q();let t=parseInt(e.top,10),n=parseInt(e.months,10),o=V(e.exclude),i=$(o),c=P(n),a=ne(n),l=oe(n),r=ce(i.map(g=>g.file)),{rankings:m,skipped:s}=re(i,c,a,r,l,t),u=ue(m,c,t),f=0;for(let g of i)f+=g.complexity;let h={generated:new Date().toISOString(),guide:je,churnWindow:`${n} months`,rankings:m,skipped:Object.keys(s).length>0?s:void 0,composite:u,corpus:{fileCount:i.length,totalComplexity:f}};e.format==="table"?(process.stdout.write(`${de(h)}
|
|
24
|
+
`),u.entries.length>0&&process.stdout.write(`
|
|
25
|
+
${ye(u)}
|
|
26
|
+
`)):process.stdout.write(`${JSON.stringify(h,null,2)}
|
|
27
|
+
`)}function We(e){q();let t=parseInt(e.top,10),n=parseInt(e.months,10),o=parseInt(e.minCochanges,10),i=V(e.exclude),c=$(i),a=P(n),l=ie(n,i),r=new Map;for(let b of c)r.set(b.file,b.complexity);let m=B(),s=se(l,a,r,o,m),u=t>0?s.slice(0,t):s,f={hot:0,warm:0,cool:0};for(let b of s)f[b.tier]++;let h=s.reduce((b,p)=>b+p.couplingScore,0),g={generated:new Date().toISOString(),guide:He,churnWindow:`${n} months`,minCochanges:o,totalScore:h,tierCounts:f,totalCouplings:s.length,showing:u.length,couplings:u,confidence:pe(me(n))};e.format==="table"?process.stdout.write(`${he(g)}
|
|
28
|
+
`):process.stdout.write(`${JSON.stringify(g,null,2)}
|
|
29
|
+
`)}function Be(){if(L(".obsignore"))throw new Error(".obsignore already exists. Remove it first to regenerate.");if(L(".obsceneignore"))throw new Error(".obsceneignore already exists. Remove it first to regenerate.");let e=ae(),t=le(e);Ne(".obsignore",t);let n=H.reduce((o,i)=>o+i.patterns.length,0);if(process.stderr.write(`Created .obsignore with ${n} universal exclusions`),e.length>0){process.stderr.write(` + ${e.length} detected patterns:
|
|
30
|
+
`);for(let o of e)process.stderr.write(` ${o.pattern.padEnd(20)} ${o.comment}
|
|
31
|
+
`)}else process.stderr.write(` (no project-specific patterns detected)
|
|
32
|
+
`)}function N(e){let t=e instanceof Error?e.message:String(e);process.stderr.write(`Error: ${t}
|
|
33
|
+
`),process.exit(1)}O.parse();
|