@wbern/obscene 2.3.0 → 2.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +20 -20
- package/dist/cli.js +31 -29
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,27 +1,19 @@
|
|
|
1
|
-
|
|
1
|
+
<p align="center">
|
|
2
|
+
<picture>
|
|
3
|
+
<source media="(prefers-color-scheme: dark)" srcset="./logo-dark.svg">
|
|
4
|
+
<img src="./logo.svg" width="160" alt="obscene logo">
|
|
5
|
+
</picture>
|
|
6
|
+
</p>
|
|
2
7
|
|
|
3
|
-
|
|
4
|
-
_==/ i i \==_
|
|
5
|
-
/XX/ |\___/| \XX\
|
|
6
|
-
/XXXX\ |XXXXX| /XXXX\
|
|
7
|
-
|XXXXXX\_ _XXXXXXX_ _/XXXXXX|
|
|
8
|
-
XXXXXXXXXXXxxxxxxxXXXXXXXXXXXxxxxxxxXXXXXXXXXXX
|
|
9
|
-
|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
|
|
10
|
-
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
|
11
|
-
|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
|
|
12
|
-
XXXXXX/^^^^"\XXXXXXXXXXXXXXXXXXXXX/^^^^^\XXXXXX
|
|
13
|
-
|XXX| \XXX/^^\XXXXX/^^\XXX/ |XXX|
|
|
14
|
-
\XX\ \X/ \XXX/ \X/ /XX/
|
|
15
|
-
"\ " \X/ " /"
|
|
16
|
-
```
|
|
8
|
+
<h1 align="center">@wbern/obscene</h1>
|
|
17
9
|
|
|
18
|
-
|
|
10
|
+
<p align="center">Find hotspot files — complex code that changes frequently.</p>
|
|
19
11
|
|
|
20
12
|
Combines [scc](https://github.com/boyter/scc) cyclomatic complexity with git churn to surface files that are both complex AND actively modified. Based on Adam Tornhill's *Your Code as a Crime Scene*.
|
|
21
13
|
|
|
22
14
|
Works on any language scc supports. No configuration needed.
|
|
23
15
|
|
|
24
|
-

|
|
25
17
|
|
|
26
18
|
> 💬 **Tried it on your codebase?** Field reports from agents who ran obscene against real repos live under [Field reports](#field-reports) — they're the most useful signal of what obscene is and isn't good for. After you've run it, please add yours: [CONTRIBUTING.md](./CONTRIBUTING.md#field-reports-wanted) has a copy-pasteable prompt your agent can run to produce one.
|
|
27
19
|
|
|
@@ -79,7 +71,7 @@ Produces **four independent ranking tables**, each scoring files by a different
|
|
|
79
71
|
| Complexity × Churn | `complexity × churn` | Cmplx, Dens |
|
|
80
72
|
| Nesting × Churn | `maxNesting × churn` | Nest |
|
|
81
73
|
| Fix Activity × Churn | `fixes × churn` | Fixes, FxDns |
|
|
82
|
-
| Authors × Churn | `authors × churn` | Auth |
|
|
74
|
+
| Authors × Churn | `authors × churn` | Auth, MinAuth |
|
|
83
75
|
|
|
84
76
|
Plus a **Combined** ranking using [Reciprocal Rank Fusion](https://doi.org/10.1145/1571941.1572114) (RRF) across all dimensions — files appearing near the top of multiple rankings score highest.
|
|
85
77
|
|
|
@@ -153,11 +145,17 @@ The literature in [Why churn × complexity?](#why-churn-x-complexity) talks abou
|
|
|
153
145
|
|
|
154
146
|
#### Nesting depth (`Nest`)
|
|
155
147
|
|
|
156
|
-
Maximum indentation level (tab stops) in the file. Deep nesting correlates with high cognitive load and defect likelihood. Harrison & Magel (1981) identified nesting depth as a significant complexity contributor. The indent unit is detected from the most common positive delta between consecutive non-blank line indents, which keeps single-space outlier lines (multiline strings, continuation alignment) from inflating the score. The metric measures whitespace depth, not AST control-flow depth — they usually agree, but a file with deep alignment and shallow logic can read higher than its true nesting.
|
|
148
|
+
Maximum indentation level (tab stops) in the file. Deep nesting correlates with high cognitive load and defect likelihood. Harrison & Magel (1981) identified nesting depth as a significant complexity contributor. The indent unit is detected from the most common positive delta between consecutive non-blank line indents, which keeps single-space outlier lines (multiline strings, continuation alignment) from inflating the score. The metric measures whitespace depth, not AST control-flow depth — they usually agree, but a file with deep alignment and shallow logic can read higher than its true nesting. Files where scc reports zero cyclomatic complexity are excluded from the Nesting × Churn ranking: their indentation is structural (YAML, JSON, templates) rather than control flow, so a deep `Nest` reading isn't evidence of branching difficulty.
|
|
157
149
|
|
|
158
150
|
#### Unique authors (`Auth`)
|
|
159
151
|
|
|
160
|
-
Number of distinct git authors who committed to the file within the churn window. Bot authors (names ending in `[bot]`, e.g. `dependabot[bot]`) are excluded automatically. Files touched by many authors may lack clear ownership and accumulate inconsistent patterns. Kamei et al. (2013) found developer count to be a significant predictor of defect-introducing changes.
|
|
152
|
+
Number of distinct git authors who committed to the file within the churn window. Bot authors (names ending in `[bot]`, e.g. `dependabot[bot]`) are excluded automatically. Files touched by many authors may lack clear ownership and accumulate inconsistent patterns. Kamei et al. (2013) found developer count to be a significant predictor of defect-introducing changes. `Co-authored-by:` trailers are folded into the author set so squash-merge workflows aren't undercounted.
|
|
153
|
+
|
|
154
|
+
#### Minor authors (`MinAuth`)
|
|
155
|
+
|
|
156
|
+
Number of contributors with strictly less than 5% of a file's commits within the churn window. Bird et al. (FSE 2011) found that a high minor-author count correlates with elevated post-release defects after controlling for size, churn, and complexity — the intuition being that drive-by contributors are less likely to internalize the file's invariants. The 5% cutoff is the canonical value from the original paper; a recent OSS replication (arXiv:2312.10861, 2023) found 10% to be more stable, so treat the absolute number as directional rather than definitive. Files with fewer than 2 commits in the window render as `—` rather than 0: there are too few commits to call any contributor *minor* vs *the only one*, a floor borrowed from Greiler et al. (MSR 2015).
|
|
157
|
+
|
|
158
|
+
**Limitation.** Greiler et al.'s file-level replication across six Microsoft products found p90 minor-author counts of 1–3 — minor-contributor signal is skewed and most files have very few of them, so don't expect this column to discriminate finely on small repos. Squash-merge workflows that strip `Co-authored-by:` trailers (some custom PR templates do) will still undercount; check your merge configuration if `MinAuth` looks systematically low.
|
|
161
159
|
|
|
162
160
|
### Coupling metrics
|
|
163
161
|
|
|
@@ -225,6 +223,8 @@ The thresholds are engineering judgment, not paper-prescribed. The defect/coupli
|
|
|
225
223
|
|
|
226
224
|
I want to be transparent: an earlier release of this section over-attributed thresholds to specific papers. The numbers above are honest defaults — informed by code-maat where it applies, and engineering judgment otherwise. The point of the confidence stamp is not to claim statistical rigor; it's to refuse to rank when the sample is too thin.
|
|
227
225
|
|
|
226
|
+
When the git history is shorter than the requested `--months` window, obscene prints a one-line stderr banner (`warning: git history covers ~Xd, but --months window is Yd ...`) and exposes a `historyCoverage` block in JSON output. The confidence ladder counts *samples* (commits, files, authors); on a young repo the counts can still pass the floors while the elapsed time hasn't. Treat ACCEPTABLE under this banner as count-based, not time-based, trust.
|
|
227
|
+
|
|
228
228
|
Every confidence stamp in JSON exposes its inputs so the rating is auditable:
|
|
229
229
|
|
|
230
230
|
```json
|
package/dist/cli.js
CHANGED
|
@@ -1,33 +1,35 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import{existsSync as L,writeFileSync as
|
|
3
|
-
`).map(n=>n.trim()).filter(n=>n!==""&&!n.startsWith("#"))}catch(t){if(t&&typeof t=="object"&&"code"in t&&t.code==="ENOENT")continue;throw t}return[]}var
|
|
4
|
-
`)){let
|
|
5
|
-
`);for(let
|
|
6
|
-
`),
|
|
7
|
-
`);for(let
|
|
8
|
-
`)){let
|
|
9
|
-
`)){let
|
|
10
|
-
`)){if(!
|
|
11
|
-
`)}var
|
|
12
|
-
|
|
13
|
-
`))o
|
|
14
|
-
`)
|
|
15
|
-
`)}function
|
|
16
|
-
`)}
|
|
2
|
+
import{existsSync as L,writeFileSync as We}from"fs";import{Command as Ge}from"commander";import{execSync as E}from"child_process";import{readFileSync as Q}from"fs";var Se=[".obsignore",".obsceneignore"];function Z(){for(let e of Se)try{return Q(e,"utf-8").split(`
|
|
3
|
+
`).map(n=>n.trim()).filter(n=>n!==""&&!n.startsWith("#"))}catch(t){if(t&&typeof t=="object"&&"code"in t&&t.code==="ENOENT")continue;throw t}return[]}var U=[{title:"Test files and test infrastructure",patterns:[{pattern:"*.test.*",comment:"Unit test files"},{pattern:"*.spec.*",comment:"Spec test files"},{pattern:"*.integration.test.*",comment:"Integration tests"},{pattern:"test-setup.*",comment:"Test setup files"},{pattern:"test-utils.*",comment:"Test utility files"},{pattern:"test-helpers.*",comment:"Test helper files"},{pattern:"__tests__/**",comment:"Test directories"},{pattern:"__mocks__/**",comment:"Mock directories"},{pattern:"*.stories.*",comment:"Storybook stories"},{pattern:"*.d.ts",comment:"TypeScript declaration files"}]},{title:"Lock files and package manifests",patterns:[{pattern:"package.json",comment:"npm package manifest"},{pattern:"package-lock.json",comment:"npm lock file"},{pattern:"pnpm-lock.yaml",comment:"pnpm lock file"},{pattern:"yarn.lock",comment:"Yarn lock file"},{pattern:"bun.lock",comment:"Bun lock file"}]}],ke=.5,ve=.8,H=5,j=3,I={complexity:{weak:3,plausible:10,acceptable:30},nesting:{weak:3,plausible:10,acceptable:30},defects:{weak:5,plausible:15,acceptable:50},authors:{weak:2,plausible:4,acceptable:8},coupling:{weak:5,plausible:30,acceptable:100}},R={complexity:"Engineering judgment: any rank ordering needs \u2265 3 items to be meaningful; higher tiers scale from there. No paper prescribes these exact cutoffs.",nesting:"Engineering judgment, informed by Campbell (SonarSource 2018) Cognitive Complexity which assigns a compounding penalty per nesting level. The 3/10/30 sample-count tiers are not from the paper.",defects:"code-maat's --min-revs default of 5 (Adam Tornhill); higher tiers are engineering judgment. Gall et al. (IWPSE 2003) and Hassan (ICSE 2009) study co-change and change-entropy but do not prescribe a specific commit-count floor.",authors:"Engineering judgment. Bird et al. (FSE 2011) Don't Touch My Code! shows minor contributors (< 5% of commits) correlate with elevated defects, motivating attention to contributor count \u2014 but the 2/4/8 tiers here are not from the paper.",coupling:"code-maat defaults (--min-revs 5, --max-changeset-size 30, Adam Tornhill). CodeScene's documented temporal-coupling default filters files with fewer than 10 commits. The 30/100 upper tiers are engineering judgment.",composite:"Reciprocal Rank Fusion (Cormack et al., SIGIR 2009) fuses multiple independent rankings; min-of-inputs is a strict monotone aggregator \u2014 when every input ranking is at confidence level L, the composite cannot exceed L."};function M(e,t,n,i,o){let s;return t<n.weak?s="inconclusive":t<n.plausible?s="weak":t<n.acceptable?s="plausible":s="acceptable",{level:s,reason:o(s),inputs:{metric:e,value:t,thresholds:n},source:i}}function ee(e,t){return t.some(n=>n.test(e))}function te(e){let t=e.replace(/[.+^${}()|[\]\\]/g,"\\$&").replace(/\*\*/g,"\u27E8GLOBSTAR\u27E9").replace(/\*/g,"[^/]*").replace(/⟨GLOBSTAR⟩/g,".*").replace(/\?/g,".");return new RegExp(t)}function $(e){let t=e.replaceAll("\\","/");return t.startsWith("./")?t.slice(2):t}function D(e=[]){let t=e.map(te),n;try{n=E("scc --by-file --format json --no-cocomo --no-gen",{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch(s){throw s&&typeof s=="object"&&"code"in s&&s.code==="ENOENT"?new Error("scc not found. Install it: https://github.com/boyter/scc#install"):s}let i=JSON.parse(n.toString()),o=[];for(let s of i)for(let c of s.Files){let m=$(c.Location);ee(m,t)||o.push({file:m,code:c.Code,lines:c.Lines,complexity:c.Complexity,comments:c.Comment,complexityDensity:c.Code>0?Math.round(c.Complexity/c.Code*100)/100:0})}return o.sort((s,c)=>c.complexity-s.complexity)}function ne(e,t){let n;try{n=E(e,{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error(t)}let i=new Map;for(let o of n.toString().split(`
|
|
4
|
+
`)){let s=$(o.trim());s&&i.set(s,(i.get(s)??0)+1)}return i}function P(e){return ne(`git log --since="${e} months ago" --format="" --name-only`,"Not a git repository or git is not installed.")}function oe(e){return ne(`git log --since="${e} months ago" --grep="^fix" --format="" --name-only`,"Not a git repository or git is not installed.")}function Re(e){let t=new Set,n=[];for(let i of e.split(" ")){let o=i.trim();if(!o)continue;let s=o.match(/^(.+?)\s*<[^>]+>\s*$/),c=(s?s[1]:o).trim();!c||c.endsWith("[bot]")||t.has(c)||(t.add(c),n.push(c))}return n}function ie(e){let t;try{t=E(`git log --since="${e} months ago" --format="COMMIT_SEP%n%aN%x09%(trailers:key=Co-authored-by,valueonly,separator=%x09)" --name-only`,{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error("Not a git repository or git is not installed.")}let n=new Map,i=t.toString().split(`COMMIT_SEP
|
|
5
|
+
`);for(let o of i){if(!o.trim())continue;let s=o.split(`
|
|
6
|
+
`),c=Re(s[0]);if(c.length!==0)for(let m=1;m<s.length;m++){let r=$(s[m].trim());if(!r)continue;let l=n.get(r);l||(l=new Map,n.set(r,l));for(let a of c)l.set(a,(l.get(a)??0)+1)}}return n}var Ee=20;function re(e,t=[]){let n=t.map(te),i;try{i=E(`git log --since="${e} months ago" --format="COMMIT_SEP%n" --name-only`,{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error("Not a git repository or git is not installed.")}let o=new Map,s=i.toString().split(`COMMIT_SEP
|
|
7
|
+
`);for(let c of s){if(!c.trim())continue;let m=new Set;for(let l of c.split(`
|
|
8
|
+
`)){let a=$(l.trim());a&&(ee(a,n)||m.add(a))}let r=[...m];if(!(r.length<2||r.length>Ee))for(let l=0;l<r.length;l++)for(let a=l+1;a<r.length;a++){let[p,f]=r[l]<r[a]?[r[l],r[a]]:[r[a],r[l]],g=p.includes("/")?p.slice(0,p.lastIndexOf("/")):"",h=f.includes("/")?f.slice(0,f.lastIndexOf("/")):"";if(g===h)continue;let b=`${p}\0${f}`;o.set(b,(o.get(b)??0)+1)}}return o}function B(e,t){let n=0;for(let i of e){i.percentOfTotal=Math.round(i.score/t*1e3)/10,n+=i.score;let o=n/t;o<=ke?i.tier="hot":o<=ve?i.tier="warm":i.tier="cool"}}var W=[{key:"complexity",label:"Complexity \xD7 Churn",scoreFormula:"complexity \xD7 churn"},{key:"nesting",label:"Nesting \xD7 Churn",scoreFormula:"maxNesting \xD7 churn"},{key:"defects",label:"Fix Activity \xD7 Churn",scoreFormula:"fixes \xD7 churn"},{key:"authors",label:"Authors \xD7 Churn",scoreFormula:"authors \xD7 churn"}];function Oe(e,t,n,i){let o=e.map(c=>{let m=t.get(c.file)??0,r=n(c);return{file:c.file,score:r*m,percentOfTotal:0,tier:"cool",churn:m,metricValue:r,metricDensity:i?i(c):void 0}}).filter(c=>c.score>0).sort((c,m)=>m.score-c.score),s=o.reduce((c,m)=>c+m.score,0);return s===0?[]:(B(o,s),o)}var Ie=.05,Te=2;function Me(e){if(!e||e.size===0)return null;let t=0;for(let o of e.values())t+=o;if(t<Te)return null;let n=t*Ie,i=0;for(let o of e.values())o<n&&i++;return i}function se(e,t,n,i,o,s,c){let m={complexity:{extract:u=>u.complexity,density:u=>u.complexityDensity},nesting:{extract:u=>u.complexity===0?0:i.get(u.file)??0},defects:{extract:u=>n.get(u.file)??0,density:u=>{let x=n.get(u.file)??0;return u.code>0?Math.round(x/u.code*1e4)/1e4:0}},authors:{extract:u=>o.get(u.file)??0}},r={},l={},a=0;for(let u of e)u.complexity>0&&a++;l.complexity=M("filesWithComplexity",a,I.complexity,R.complexity,u=>u==="inconclusive"?`${a} files with measurable complexity \u2014 not enough to rank.`:`${a} files with measurable complexity (${u.toUpperCase()} sample size).`);let p=0;for(let u of e)u.complexity>0&&(i.get(u.file)??0)>=3&&p++;l.nesting=M("filesWithNesting>=3",p,I.nesting,R.nesting,u=>u==="inconclusive"?`${p} files with nesting depth \u2265 3 \u2014 not enough to rank.`:`${p} files with nesting depth \u2265 3 (${u.toUpperCase()} sample size).`);let f=[...n.values()].reduce((u,x)=>u+x,0),g=n.size,h=f<H||g<j;l.defects=M("fixCommits",f,I.defects,R.defects,u=>u==="inconclusive"||h?`${f} fix: commits across ${g} files \u2014 need \u2265 ${H} commits across \u2265 ${j} files (matches code-maat's --min-revs default).`:`${f} fix: commits across ${g} files (${u.toUpperCase()} sample size).`),h&&(l.defects={...l.defects,level:"inconclusive"},r.defects={reason:`insufficient data (${f} fix: commits across ${g} files, need ${H}+ commits across ${j}+ files)`,suggestion:"Adopt conventional commits with fix: prefix. See conventionalcommits.org",confidence:l.defects});let b=0;for(let u of o.values())u>b&&(b=u);l.authors=M("maxAuthors",b,I.authors,R.authors,u=>u==="inconclusive"?`${b} distinct authors on the most-touched file \u2014 not enough to rank ownership.`:`${b} distinct authors on the most-touched file (${u.toUpperCase()} sample size).`),b<=1&&(l.authors={...l.authors,level:"inconclusive"},r.authors={reason:"all files have the same author count \u2014 no variance to rank",confidence:l.authors});let C={};for(let u of W){if(r[u.key])continue;if(l[u.key].level==="inconclusive"){r[u.key]={reason:l[u.key].reason,confidence:l[u.key]};continue}let x=m[u.key],v=Oe(e,t,x.extract,x.density);if(v.length===0)continue;if(u.key==="authors"&&c)for(let O of v)O.minorAuthors=Me(c.get(O.file));let J=s>0?v.slice(0,s):v,Y={hot:0,warm:0,cool:0};for(let O of v)Y[O.tier]++;C[u.key]={label:u.label,scoreFormula:u.scoreFormula,totalScore:v.reduce((O,xe)=>O+xe.score,0),tierCounts:Y,totalEntries:v.length,showing:J.length,entries:J,confidence:l[u.key]}}return{rankings:C,skipped:r}}function G(){let e;try{e=E("git ls-files",{maxBuffer:50*1024*1024,stdio:["pipe","pipe","pipe"]})}catch{throw new Error("Not a git repository or git is not installed.")}let t=new Set;for(let n of e.toString().split(`
|
|
9
|
+
`)){let i=$(n.trim());i&&t.add(i)}return t}function ce(e,t,n,i,o){let s=[];for(let[r,l]of e){if(l<i)continue;let[a,p]=r.split("\0"),f=t.get(a)??0,g=t.get(p)??0,h=Math.min(f,g),b=h>0?Math.round(l/h*1e3)/10:0,C=(n.get(a)??0)+(n.get(p)??0),u={file1:a,file2:p,cochanges:l,degree:b,totalComplexity:C,couplingScore:l,percentOfTotal:0,tier:"cool"},x=Math.max(f,g);l>0&&x>0&&l/x>=.9&&(u.lockstep=!0),o&&(o.has(a)||(u.file1Deleted=!0),o.has(p)||(u.file2Deleted=!0)),s.push(u)}s.sort((r,l)=>l.couplingScore-r.couplingScore);let c=s.reduce((r,l)=>r+l.couplingScore,0);if(c===0)return[];let m=s.map(r=>({...r,score:r.couplingScore}));B(m,c);for(let r=0;r<s.length;r++)s[r].percentOfTotal=m[r].percentOfTotal,s[r].tier=m[r].tier;return s}function ae(e){let t=new Map;for(let n of e){let i;try{i=Q(n,"utf-8")}catch{t.set(n,0);continue}let o=[],s=new Map,c=0;for(let a of i.split(`
|
|
10
|
+
`)){if(!a.trim())continue;let p=a.match(/^(\s+)/);if(!p){c=0;continue}let f=p[1];if(o.push(f),f.includes(" "))continue;let g=f.length,h=g-c;h>0&&s.set(h,(s.get(h)??0)+1),c=g}let m=4,r=0;for(let[a,p]of s)(p>r||p===r&&a<m)&&(r=p,m=a);let l=0;for(let a of o){let p=0;for(let f of a)f===" "?p+=1:f===" "&&(p+=1/m);p=Math.floor(p),p>l&&(l=p)}t.set(n,l)}return t}var $e=[{dir:".github",pattern:".github/**",comment:"GitHub Actions and workflows"},{dir:".circleci",pattern:".circleci/**",comment:"CircleCI configuration"},{dir:".husky",pattern:".husky/**",comment:"Git hooks"},{dir:".vscode",pattern:".vscode/**",comment:"VS Code settings"},{dir:".idea",pattern:".idea/**",comment:"JetBrains settings"},{dir:"scripts",pattern:"scripts/**",comment:"Build and utility scripts"},{dir:"docs",pattern:"docs/**",comment:"Documentation"},{dir:"docker",pattern:"docker/**",comment:"Docker configuration"},{dir:"fixtures",pattern:"fixtures/**",comment:"Test fixtures"},{dir:"vendor",pattern:"vendor/**",comment:"Vendored dependencies"}],De=[{test:/\.generated\./,pattern:"*.generated.*",comment:"Generated code"},{test:/\.gen\.[^.]+$/,pattern:"*.gen.*",comment:"Generated code"},{test:/\.config\.\w/,pattern:"*.config.*",comment:"Configuration files"},{test:/(?:^|\/)\.gitlab-ci/,pattern:".gitlab-ci*",comment:"GitLab CI configuration"},{test:/^\.claude\/commands\//,pattern:".claude/commands/**",comment:"Claude Code slash commands (often generated from sources)"},{test:/^\.opencode\/commands\//,pattern:".opencode/commands/**",comment:"OpenCode slash commands (often generated from sources)"},{test:/^\.cursor\/rules\//,pattern:".cursor/rules/**",comment:"Cursor rules (often generated from sources)"}];function le(){let e=G(),t=[],n=new Set;for(let i of e){let o=i.indexOf("/");o>0&&n.add(i.slice(0,o))}for(let i of $e)n.has(i.dir)&&t.push({pattern:i.pattern,comment:i.comment});for(let i of De)for(let o of e)if(i.test.test(o)){t.push({pattern:i.pattern,comment:i.comment});break}return t}function ue(e,t=U){let n=["# Generated by obscene init","# Edit this file to customize which files are excluded from analysis.","# Patterns use glob syntax (same as .gitignore).","# See: https://github.com/wbern/obscene#ignore-files",""];for(let i of t){n.push(`# ${i.title}`);for(let o of i.patterns)n.push(o.pattern);n.push("")}if(e.length>0){n.push("# Project-specific patterns");for(let i of e)n.push(`# ${i.comment}`),n.push(i.pattern);n.push("")}return n.join(`
|
|
11
|
+
`)}var Fe=10,X={inconclusive:0,weak:1,plausible:2,acceptable:3};function Ne(e){let t=Object.values(e).map(o=>o.confidence),n=t.length;if(n<2)return{level:"inconclusive",reason:`${n} input ranking \u2014 RRF requires \u2265 2 independent rankings.`,inputs:{metric:"inputRankings",value:n,thresholds:{weak:2,plausible:3,acceptable:4}},source:R.composite};let i="acceptable";for(let o of t)X[o.level]<X[i]&&(i=o.level);return{level:i,reason:`Composite inherits min-of-inputs across ${n} rankings (weakest: ${i.toUpperCase()}).`,inputs:{metric:"inputRankings",value:n,thresholds:{weak:2,plausible:3,acceptable:4}},source:R.composite}}function pe(e,t,n){let i=Object.keys(e).length,o=Ne(e),s=new Map;for(let a of Object.values(e))for(let p=0;p<a.entries.length;p++){let f=a.entries[p].file,g=1/(Fe+p+1),h=s.get(f);h?(h.score+=g,h.dims+=1):s.set(f,{score:g,dims:1})}let c=[];for(let[a,p]of s)c.push({file:a,score:Math.round(p.score*1e4)/1e4,percentOfTotal:0,tier:"cool",churn:t.get(a)??0,dimensionCount:p.dims});c.sort((a,p)=>p.score-a.score);let m=c.reduce((a,p)=>a+p.score,0);if(m===0)return{label:"Combined",scoreFormula:"reciprocal rank fusion across all dimensions",totalScore:0,tierCounts:{hot:0,warm:0,cool:0},totalDimensions:i,totalEntries:0,showing:0,entries:[],confidence:o};B(c,m);let r=n>0?c.slice(0,n):c,l={hot:0,warm:0,cool:0};for(let a of c)l[a.tier]++;return{label:"Combined",scoreFormula:"reciprocal rank fusion across all dimensions",totalScore:Math.round(m*1e4)/1e4,tierCounts:l,totalDimensions:i,totalEntries:c.length,showing:r.length,entries:r,confidence:o}}function me(e){return M("commitsInWindow",e,I.coupling,R.coupling,t=>t==="inconclusive"?`${e} commits in window \u2014 need \u2265 ${I.coupling.weak} (matches code-maat's --min-revs default).`:`${e} commits in window (${t.toUpperCase()} sample size).`)}function fe(e){try{let t=E(`git rev-list --count --since="${e} months ago" HEAD`,{stdio:["pipe","pipe","pipe"]});return parseInt(t.toString().trim(),10)||0}catch{throw new Error("Not a git repository or git is not installed.")}}var Ae=30;function ge(e){let t=e*Ae,n;try{let c=E("git log --format=%ct --reverse HEAD",{maxBuffer:52428800,stdio:["pipe","pipe","pipe"]}).toString().split(`
|
|
12
|
+
`,1)[0].trim();if(n=parseInt(c,10),!Number.isFinite(n)||n<=0)return{windowDays:t,spanDays:0,underCovered:!0}}catch{throw new Error("Not a git repository or git is not installed.")}let i=Math.floor(Date.now()/1e3),o=Math.max(0,Math.floor((i-n)/86400));return{windowDays:t,spanDays:o,underCovered:o<t}}import y from"picocolors";import S from"picocolors";var Le=/\x1b\[[0-9;]*m/g;function _e(e){return e>=11904&&e<=12543||e>=12800&&e<=13311||e>=13312&&e<=40959||e>=44032&&e<=55215||e>=63744&&e<=64255||e>=65281&&e<=65376||e>=65504&&e<=65510||e>=9728&&e<=9983||e>=127744&&e<=129791||e>=131072&&e<=195103}function de(e){let t=e.replace(Le,""),n=0;for(let i of t){let o=i.codePointAt(0);o===65038||o===65039||(n+=_e(o)?2:1)}return n}function w(e,t){let n=de(e);return n>=t?e:e+" ".repeat(t-n)}function d(e,t){let n=de(e);return n>=t?e:" ".repeat(t-n)+e}function k(e,t){if(t<=0)return"";if(e.length<=t)return e;if(t===1)return"\u2026";let n=t-1,i=Math.ceil(n*.6),o=n-i;return`${e.slice(0,o)}\u2026${e.slice(e.length-i)}`}function F(e){return e==="hot"?S.red("\u{1F525} HOT "):e==="warm"?S.yellow("\u2600\uFE0F WARM"):S.blue("\u{1F9CA} COOL")}function N(e,t){return e==="hot"?S.red(t):e==="warm"?S.yellow(t):S.blue(t)}function A(e,t,n){let i=[];return i.push(`Tiers: ${S.red(`${e.hot} HOT`)}, ${S.yellow(`${e.warm} WARM`)}, ${S.blue(`${e.cool} COOL`)}`),i.push(`Showing: ${t} of ${n}`),i}var He={inconclusive:y.gray,weak:y.yellow,plausible:y.cyan,acceptable:y.green};function z(e){let t=He[e.level];return[t(`Confidence: ${e.level.toUpperCase()} \u2014 ${e.reason}`)]}var je=Object.fromEntries(W.map(e=>[e.key,e.label]));function he(e){let t=[],{summary:n,files:i}=e;t.push(`Complexity Report \u2014 ${n.fileCount} files, ${n.totalComplexity} total complexity`),t.push(`Showing: ${n.showing} | Avg complexity/file: ${n.avgComplexityPerFile}`),t.push(""),t.push(w("File",60)+d("Code",8)+d("Complexity",12)+d("Density",9)+d("Comments",10)),t.push("\u2500".repeat(99));for(let o of i)t.push(w(k(o.file,58),60)+d(String(o.code),8)+d(String(o.complexity),12)+d(o.complexityDensity.toFixed(2),9)+d(String(o.comments),10));return t.push(""),t.push(y.dim("Complexity=cyclomatic branch/loop count | Density=complexity/code | Comments=comment lines")),t.push(y.dim("High complexity is expected for parsers, state machines, and business logic. Compare density across files, not raw values.")),t.push(y.dim("Docs: https://github.com/wbern/obscene#metrics")),t.join(`
|
|
13
|
+
`)}function Ue(e){let t=[{header:"File",width:50,align:"left",value:o=>k(o.file,48)},{header:"Score",width:8,align:"right",value:o=>o.score.toLocaleString()},{header:"%",width:7,align:"right",value:o=>o.percentOfTotal.toFixed(1)},{header:"Churn",width:7,align:"right",value:o=>String(o.churn)}],n={complexity:[{header:"Cmplx",width:7,align:"right",value:o=>String(o.metricValue)},{header:"Dens",width:7,align:"right",value:o=>(o.metricDensity??0).toFixed(2)}],nesting:[{header:"Nest",width:6,align:"right",value:o=>String(o.metricValue)}],defects:[{header:"Fixes",width:6,align:"right",value:o=>String(o.metricValue)},{header:"FxDns",width:7,align:"right",value:o=>(o.metricDensity??0).toFixed(4)}],authors:[{header:"Auth",width:6,align:"right",value:o=>String(o.metricValue)},{header:"MinAuth",width:9,align:"right",value:o=>o.minorAuthors===null||o.minorAuthors===void 0?"\u2014":String(o.minorAuthors)}]},i={header:"Tier",width:12,align:"right",value:o=>F(o.tier)};return[...t,...n[e]??[],i]}var Pe={complexity:"\u{1F9EC}",nesting:"\u{1F4CF}",defects:"\u{1F527}",authors:"\u{1F465}"};function Be(e,t,n){let i=[],o=Ue(e),s=Pe[e],c=s?`${s} `:"",m=t.label.toUpperCase().replace("CHURN","\u{1F504} CHURN");if(i.push(`${c}${m} \u2014 Total score: ${t.totalScore.toLocaleString()}`),i.push(...z(t.confidence)),n)for(let a of n.split(`
|
|
14
|
+
`))i.push(y.dim(a));i.push(...A(t.tierCounts,t.showing,t.totalEntries)),i.push("");let r=o.map(a=>a.align==="left"?w(a.header,a.width):d(a.header,a.width)).join("");i.push(r);let l=o.reduce((a,p)=>a+p.width,0);i.push("\u2500".repeat(l));for(let a of t.entries){let f=o.map(g=>{let h=g.value(a);return g.align==="left"?w(h,g.width):d(h,g.width)}).join("");i.push(N(a.tier,f))}return i}function ye(e){let t=[],{churnWindow:n,rankings:i,corpus:o}=e;t.push(`Hotspots \u2014 ${n} churn window`),o&&o.fileCount>0&&o.totalComplexity===0&&(t.push(""),t.push(y.yellow("Note: no measurable code complexity detected across this corpus (cyclomatic = 0).")),t.push(y.yellow("Rankings reflect size and churn only \u2014 HOT/WARM/COOL are relative groupings, not risk labels."))),t.push("");let s=Object.keys(i);for(let m=0;m<s.length;m++){let r=s[m];t.push(...Be(r,i[r],e.guide[r])),m<s.length-1&&(t.push(""),t.push("\xB7 \xB7 \xB7"),t.push(""))}if(e.skipped)for(let[m,r]of Object.entries(e.skipped)){t.push("");let l=je[m]??`${m.charAt(0).toUpperCase()+m.slice(1)} \xD7 Churn`;t.push(`${l} \u2014 skipped (${r.reason})`),r.suggestion&&t.push(` ${r.suggestion}`)}t.push(""),t.push(y.dim("Score=metric\xD7churn | Tiers are relative to THIS codebase, not absolute quality grades."));let c=o!==void 0&&o.fileCount>0&&o.totalComplexity===0;return t.push(y.dim(c?"High scores flag files that change often and are sizable \u2014 neither is bad in itself.":"High scores flag review candidates, not bad code \u2014 stable complex files (parsers, engines) score high naturally.")),t.push(y.dim("Docs: https://github.com/wbern/obscene#metrics")),t.join(`
|
|
15
|
+
`)}function be(e){let t=[],{tierCounts:n,totalScore:i,churnWindow:o,couplings:s}=e;t.push(`Coupling \u2014 ${o} churn window | Min shared: ${e.minCochanges} | Total score: ${i.toLocaleString()}`),t.push(...z(e.confidence)),t.push(...A(n,e.showing,e.totalCouplings)),t.push(w("File 1",35)+w("File 2",35)+d("Shared",7)+d("Degree",8)+d("Cmplx",7)+d("Tier",12)),t.push("\u2500".repeat(104));let c=!1,m=!1;for(let r of s){(r.file1Deleted||r.file2Deleted)&&(c=!0),r.lockstep&&(m=!0);let l=r.file1Deleted?`\u2020 ${k(r.file1,31)}`:k(r.file1,33),a=r.file2Deleted?`\u2020 ${k(r.file2,31)}`:k(r.file2,33),p=r.lockstep?`${r.degree.toFixed(1)}\u21C4`:`${r.degree.toFixed(1)}%`,f=w(l,35)+w(a,35)+d(String(r.cochanges),7)+d(p,8)+d(String(r.totalComplexity),7)+d(F(r.tier),12);t.push(N(r.tier,f))}return t.push(""),t.push(y.dim("Shared=co-changed commits | Degree=shared/min(churn)\xD7100 | Cmplx=sum of both files")),c&&t.push(y.dim("\u2020 = file no longer present at HEAD (deleted or renamed)")),m&&t.push(y.dim("\u21C4 = lockstep pair (both files only ever changed together \u2014 signal is real but uninformative)")),t.push(y.dim("Tiers are relative to THIS codebase, not absolute quality grades. High coupling may be intentional and fine.")),t.push(y.dim("Same-directory pairs excluded. Commits touching >20 files skipped. Only cross-directory dependencies shown.")),t.push(y.dim("Docs: https://github.com/wbern/obscene#metrics")),t.join(`
|
|
16
|
+
`)}function Ce(e){let t=[];t.push("\u2550".repeat(84)),t.push(`\u2605 ${e.label.toUpperCase()} \u2014 Total score: ${e.totalScore.toLocaleString()}`),t.push(...z(e.confidence)),t.push(...A(e.tierCounts,e.showing,e.totalEntries)),t.push(""),t.push(w("File",50)+d("Score",9)+d("Churn",7)+d("Dims",6)+d("Tier",12)),t.push("\u2500".repeat(84));for(let n of e.entries){let i=w(k(n.file,48),50)+d(n.score.toFixed(4),9)+d(String(n.churn),7)+d(`${n.dimensionCount}/${e.totalDimensions}`,6)+d(F(n.tier),12);t.push(N(n.tier,i))}return t.join(`
|
|
17
|
+
`)}var T=new Ge;T.name("obscene").description("Identify hotspot files \u2014 complex code that changes frequently").version("2.4.0");var ze={complexity:"Cyclomatic complexity (branch/loop count). NOT a quality judgment \u2014 a 500-line parser will naturally score high. Compare density, not raw values.",complexityDensity:"Complexity per line of code. Normalizes for file size. >0.25 suggests dense logic worth reviewing; <0.10 is typical for straightforward code.",comments:"Comment line count. Low comments in high-density files may indicate under-documented logic. High comments alone is not a problem."},Ve={rankings:"Four independent ranking tables, each scoring files by a different metric \xD7 churn. A file may rank high in one dimension but not others.",complexity:`complexity \xD7 churn. Complex code that changes often poses maintenance risk.
|
|
17
18
|
Metric concept: McCabe cyclomatic complexity (1976) via scc \xB7 Strength: objective, language-agnostic \xB7 Limit: parsers and state machines score high naturally`,nesting:`maxNesting \xD7 churn. Deeply nested code that changes often is harder to reason about.
|
|
18
19
|
Metric concept: cognitive complexity research (SonarSource, G. Ann Campbell 2018) \xB7 Strength: catches hard-to-follow control flow \xB7 Limit: some patterns (error chains, config) legitimately nest deep`,defects:`fixes \xD7 churn. Count of fix: commits touching the file \xD7 churn. High values can mean latent fragility, but they also flag features that got debugged thoroughly \u2014 read the fix-commit history before concluding which.
|
|
19
|
-
Metric concept: change-history metrics (Moser, Pedrycz & Succi 2008) via conventional commits (fix: prefix) \xB7 Strength: direct fix-history signal \xB7 Limit: counts fix activity, not defects per se; requires consistent fix: convention`,authors:`authors \xD7 churn. Files touched by many authors and changing often may lack clear ownership.
|
|
20
|
-
Metric concept: code ownership research (Bird et al. 2011, Microsoft) \xB7 Strength: flags diffuse ownership risk \xB7 Limit: doesn't measure expertise depth, bot authors filtered automatically`,composite:`Combined ranking using Reciprocal Rank Fusion (RRF) across all dimensions. Files appearing near the top of multiple rankings score highest.
|
|
21
|
-
Metric concept: RRF (Cormack et al. 2009) \xB7 Strength: robust to outliers, no normalization needed \xB7 Limit: equal weight across all dimensions`,tier:"Relative ranking within THIS codebase (top 50% = hot, next 30% = warm, bottom 20% = cool). NOT an absolute quality grade \u2014 a hot file is under heavy load, not necessarily broken.",corpus:"Aggregate stats for the analyzed file set (post-exclude \u2014 files filtered by .obsignore or --exclude are not counted). When totalComplexity is 0, the rankings reflect size and churn only; HOT/WARM/COOL become relative groupings rather than risk labels.",confidence:"Epistemic stamp on each ranking \u2014 INCONCLUSIVE / WEAK / PLAUSIBLE / ACCEPTABLE. These are engineering-judgment sample-size tiers, with the weak floor for defects matching code-maat's --min-revs default of 5. ACCEPTABLE is the ceiling \u2014 the tool never claims certainty about code quality, only that the sample supports the ranking. INCONCLUSIVE rankings are surfaced under skipped rather than ranked."},
|
|
22
|
-
`):process.stdout.write(`${
|
|
23
|
-
`)
|
|
24
|
-
`),u.
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
`)
|
|
28
|
-
`):
|
|
29
|
-
`)
|
|
30
|
-
`);
|
|
20
|
+
Metric concept: change-history metrics (Moser, Pedrycz & Succi 2008) via conventional commits (fix: prefix) \xB7 Strength: direct fix-history signal \xB7 Limit: counts fix activity, not defects per se; requires consistent fix: convention`,authors:`authors \xD7 churn. Files touched by many authors and changing often may lack clear ownership. MinAuth side-column counts contributors with <5% of file commits (Bird et al. FSE 2011) \u2014 '\u2014' means the file has fewer than 2 commits, too few to call anyone *minor*.
|
|
21
|
+
Metric concept: code ownership research (Bird et al. 2011, Microsoft); Co-authored-by trailers folded into author set to close the squash-merge gap \xB7 Strength: flags diffuse ownership risk \xB7 Limit: doesn't measure expertise depth, bot authors filtered automatically`,composite:`Combined ranking using Reciprocal Rank Fusion (RRF) across all dimensions. Files appearing near the top of multiple rankings score highest.
|
|
22
|
+
Metric concept: RRF (Cormack et al. 2009) \xB7 Strength: robust to outliers, no normalization needed \xB7 Limit: equal weight across all dimensions`,tier:"Relative ranking within THIS codebase (top 50% = hot, next 30% = warm, bottom 20% = cool). NOT an absolute quality grade \u2014 a hot file is under heavy load, not necessarily broken.",corpus:"Aggregate stats for the analyzed file set (post-exclude \u2014 files filtered by .obsignore or --exclude are not counted). When totalComplexity is 0, the rankings reflect size and churn only; HOT/WARM/COOL become relative groupings rather than risk labels.",confidence:"Epistemic stamp on each ranking \u2014 INCONCLUSIVE / WEAK / PLAUSIBLE / ACCEPTABLE. These are engineering-judgment sample-size tiers, with the weak floor for defects matching code-maat's --min-revs default of 5. ACCEPTABLE is the ceiling \u2014 the tool never claims certainty about code quality, only that the sample supports the ranking. INCONCLUSIVE rankings are surfaced under skipped rather than ranked."},qe={cochanges:"Times both files appeared in the same commit. Higher values suggest a dependency between the files. Same-directory pairs are excluded \u2014 only cross-directory pairs are shown.",degree:"Percentage: shared commits / min(churn of file1, file2) \xD7 100. Shows how tightly coupled the pair is relative to their individual change rates. 100% means every change to the less-active file also touched the other.",totalComplexity:"Sum of both files' cyclomatic complexity. Highlights coupled pairs where the involved code is also complex \u2014 hidden dependency + high complexity compounds maintenance risk.",tier:"Relative ranking within THIS codebase's coupling pairs (top 50% = hot, next 30% = warm, bottom 20% = cool). NOT an absolute quality grade. 'hot' means this pair co-changes more than most \u2014 it may be intentional and fine.",deleted:"file1Deleted / file2Deleted are set when the file is no longer present at HEAD (deleted or renamed away). The coupling signal is historical \u2014 the pair is not actionable in the current tree.",lockstep:"Set when shared commits / max(churn) \u2265 0.9 \u2014 both files almost always change together over the window. Typical of generator/mirror pairs (README \u2194 src/README, *.pb.go \u2194 *.proto). The coupling signal is real but uninformative; treat the pair as a single unit from git's perspective.",confidence:"Epistemic stamp on the coupling table \u2014 INCONCLUSIVE / WEAK / PLAUSIBLE / ACCEPTABLE. Tied to the number of commits in the analysis window. The weak floor of 5 matches code-maat's --min-revs default (Adam Tornhill); higher tiers are engineering judgment. ACCEPTABLE means the sample supports the ranking; it never asserts the couplings themselves are bad."};function V(e){return e.option("--top <n>","limit to top N entries (0 = all)","20").option("--format <type>","output format: json | table","json").option("--exclude <patterns...>","additional file patterns to exclude (also reads .obsignore / .obsceneignore)")}V(T.command("report").description("per-file complexity data")).action(e=>{try{Ke(e)}catch(t){_(t)}});V(T.command("hotspots",{isDefault:!0}).description("churn \xD7 complexity hotspot analysis (default)")).option("--months <n>","churn window in months","3").action(e=>{try{Je(e)}catch(t){_(t)}});V(T.command("coupling").description("temporal coupling \u2014 files that change together across directories")).option("--months <n>","churn window in months","3").option("--min-cochanges <n>","minimum shared commits to include","2").action(e=>{try{Ye(e)}catch(t){_(t)}});T.command("init").description("generate a starter .obsignore based on project structure").action(()=>{try{Xe()}catch(e){_(e)}});function q(e){return[...Z(),...e??[]]}function K(){!L(".obsignore")&&!L(".obsceneignore")&&process.stderr.write("hint: no .obsignore found \u2014 run `obscene init` to generate one with recommended exclusions\n")}function we(e){let t=ge(e);return t.underCovered&&process.stderr.write(`warning: git history covers ~${t.spanDays}d, but --months window is ${t.windowDays}d \u2014 count-based confidence won't reflect time-based trust on a young repo
|
|
23
|
+
`),t}function Ke(e){K();let t=parseInt(e.top,10),n=q(e.exclude),i=D(n),o=i.reduce((m,r)=>({totalComplexity:m.totalComplexity+r.complexity,totalCode:m.totalCode+r.code,totalLines:m.totalLines+r.lines}),{totalComplexity:0,totalCode:0,totalLines:0}),s=t>0?i.slice(0,t):i,c={generated:new Date().toISOString(),guide:ze,summary:{...o,fileCount:i.length,avgComplexityPerFile:i.length>0?Math.round(o.totalComplexity/i.length*10)/10:0,showing:s.length},files:s};e.format==="table"?process.stdout.write(`${he(c)}
|
|
24
|
+
`):process.stdout.write(`${JSON.stringify(c,null,2)}
|
|
25
|
+
`)}function Je(e){K();let t=parseInt(e.top,10),n=parseInt(e.months,10),i=we(n),o=q(e.exclude),s=D(o),c=P(n),m=oe(n),r=ie(n),l=new Map;for(let[C,u]of r)l.set(C,u.size);let a=ae(s.map(C=>C.file)),{rankings:p,skipped:f}=se(s,c,m,a,l,t,r),g=pe(p,c,t),h=0;for(let C of s)h+=C.complexity;let b={generated:new Date().toISOString(),guide:Ve,churnWindow:`${n} months`,historyCoverage:i,rankings:p,skipped:Object.keys(f).length>0?f:void 0,composite:g,corpus:{fileCount:s.length,totalComplexity:h}};e.format==="table"?(process.stdout.write(`${ye(b)}
|
|
26
|
+
`),g.entries.length>0&&process.stdout.write(`
|
|
27
|
+
${Ce(g)}
|
|
28
|
+
`)):process.stdout.write(`${JSON.stringify(b,null,2)}
|
|
29
|
+
`)}function Ye(e){K();let t=parseInt(e.top,10),n=parseInt(e.months,10),i=parseInt(e.minCochanges,10),o=we(n),s=q(e.exclude),c=D(s),m=P(n),r=re(n,s),l=new Map;for(let C of c)l.set(C.file,C.complexity);let a=G(),p=ce(r,m,l,i,a),f=t>0?p.slice(0,t):p,g={hot:0,warm:0,cool:0};for(let C of p)g[C.tier]++;let h=p.reduce((C,u)=>C+u.couplingScore,0),b={generated:new Date().toISOString(),guide:qe,churnWindow:`${n} months`,historyCoverage:o,minCochanges:i,totalScore:h,tierCounts:g,totalCouplings:p.length,showing:f.length,couplings:f,confidence:me(fe(n))};e.format==="table"?process.stdout.write(`${be(b)}
|
|
30
|
+
`):process.stdout.write(`${JSON.stringify(b,null,2)}
|
|
31
|
+
`)}function Xe(){if(L(".obsignore"))throw new Error(".obsignore already exists. Remove it first to regenerate.");if(L(".obsceneignore"))throw new Error(".obsceneignore already exists. Remove it first to regenerate.");let e=le(),t=ue(e);We(".obsignore",t);let n=U.reduce((i,o)=>i+o.patterns.length,0);if(process.stderr.write(`Created .obsignore with ${n} universal exclusions`),e.length>0){process.stderr.write(` + ${e.length} detected patterns:
|
|
32
|
+
`);for(let i of e)process.stderr.write(` ${i.pattern.padEnd(20)} ${i.comment}
|
|
31
33
|
`)}else process.stderr.write(` (no project-specific patterns detected)
|
|
32
|
-
`)}function
|
|
33
|
-
`),process.exit(1)}
|
|
34
|
+
`)}function _(e){let t=e instanceof Error?e.message:String(e);process.stderr.write(`Error: ${t}
|
|
35
|
+
`),process.exit(1)}T.parse();
|