tech-debt-visualizer 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js ADDED
@@ -0,0 +1,241 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * CLI entry: colorful terminal output, progress bars, actionable insights.
4
+ */
5
+ import { Command } from "commander";
6
+ import chalk from "chalk";
7
+ import cliProgress from "cli-progress";
8
+ import { readFile } from "node:fs/promises";
9
+ import { join } from "node:path";
10
+ import { getCleanlinessTier } from "./cleanliness-score.js";
11
+ import { runAnalysis } from "./engine.js";
12
+ import { assessFileCleanliness, assessOverallCleanliness, enrichDebtWithInsights, } from "./llm.js";
13
+ import { generateHtmlReport } from "./reports/html.js";
14
+ import { generateJsonReport } from "./reports/json.js";
15
+ import { generateMarkdownReport } from "./reports/markdown.js";
16
+ const program = new Command();
17
+ program
18
+ .name("tech-debt")
19
+ .description("Analyze repositories and visualize technical debt with AI-powered insights")
20
+ .version("0.1.0");
21
+ program
22
+ .command("analyze")
23
+ .description("Analyze a repository and output report")
24
+ .argument("[path]", "Repository path", ".")
25
+ .option("-o, --output <path>", "Output file path (default: report.html or stdout for CLI)")
26
+ .option("-f, --format <type>", "Output format: cli | html | json | markdown", "cli")
27
+ .option("--no-llm", "Skip LLM-powered insights")
28
+ .option("--ci", "CI mode: minimal output, exit with non-zero if debt score is high")
29
+ .action(async (path, opts) => {
30
+ const repoPath = join(process.cwd(), path);
31
+ const format = (opts.format ?? "cli");
32
+ const useLlm = opts.llm !== false;
33
+ const outputPath = opts.output ?? (format === "html" ? "tech-debt-report.html" : undefined);
34
+ const totalSteps = useLlm ? 6 : 4;
35
+ const progress = new cliProgress.SingleBar({
36
+ format: chalk.cyan(" {bar} ") + "| {task} | {value}/{total}",
37
+ barCompleteChar: "█",
38
+ barIncompleteChar: "░",
39
+ }, cliProgress.Presets.shades_classic);
40
+ try {
41
+ process.stderr.write(chalk.bold.blue("\n Technical Debt Visualizer\n\n"));
42
+ progress.start(totalSteps, 0, { task: "Discovering files..." });
43
+ progress.update(1, { task: "Discovering files..." });
44
+ const run = await runAnalysis({
45
+ repoPath,
46
+ maxFiles: 1500,
47
+ gitDays: 90,
48
+ });
49
+ progress.update(2, { task: "Analyzing..." });
50
+ const fileContents = new Map();
51
+ for (const f of run.fileMetrics.map((m) => m.file)) {
52
+ try {
53
+ fileContents.set(f, await readFile(join(repoPath, f), "utf-8"));
54
+ }
55
+ catch {
56
+ // ignore
57
+ }
58
+ }
59
+ if (useLlm) {
60
+ progress.update(3, { task: "LLM: per-file cleanliness..." });
61
+ const filesToAssess = run.fileMetrics
62
+ .sort((a, b) => (b.hotspotScore ?? 0) - (a.hotspotScore ?? 0))
63
+ .slice(0, 15);
64
+ for (const m of filesToAssess) {
65
+ const content = fileContents.get(m.file);
66
+ if (!content)
67
+ continue;
68
+ const result = await assessFileCleanliness(m.file, content, m);
69
+ if (result) {
70
+ const idx = run.fileMetrics.findIndex((x) => x.file === m.file);
71
+ if (idx >= 0)
72
+ run.fileMetrics[idx] = {
73
+ ...run.fileMetrics[idx],
74
+ llmAssessment: result.assessment,
75
+ llmSuggestedCode: result.suggestedCode,
76
+ };
77
+ }
78
+ }
79
+ progress.update(4, { task: "LLM: debt item insights..." });
80
+ let debtItems = run.debtItems;
81
+ if (debtItems.length > 0) {
82
+ debtItems = await enrichDebtWithInsights(debtItems.slice(0, 25), fileContents);
83
+ const byId = new Map(debtItems.map((d) => [d.id, d]));
84
+ run.debtItems = run.debtItems.map((d) => byId.get(d.id) ?? d);
85
+ }
86
+ progress.update(5, { task: "LLM: overall assessment..." });
87
+ const overall = await assessOverallCleanliness(run);
88
+ if (overall)
89
+ run.llmOverallAssessment = overall;
90
+ }
91
+ progress.update(totalSteps, { task: "Done" });
92
+ progress.stop();
93
+ if (format === "html" && outputPath) {
94
+ await generateHtmlReport(run, { outputPath, title: "Technical Debt Report", darkMode: true });
95
+ process.stdout.write(chalk.green(`\n Report written to ${outputPath}\n\n`));
96
+ }
97
+ else if (format === "json") {
98
+ const out = outputPath ?? undefined;
99
+ const json = generateJsonReport(run);
100
+ if (out) {
101
+ const { writeFile } = await import("node:fs/promises");
102
+ await writeFile(out, json, "utf-8");
103
+ process.stdout.write(chalk.green(`\n JSON written to ${out}\n\n`));
104
+ }
105
+ else {
106
+ process.stdout.write(json + "\n");
107
+ }
108
+ }
109
+ else if (format === "markdown") {
110
+ const out = outputPath ?? undefined;
111
+ const md = generateMarkdownReport(run);
112
+ if (out) {
113
+ const { writeFile } = await import("node:fs/promises");
114
+ await writeFile(out, md, "utf-8");
115
+ process.stdout.write(chalk.green(`\n Markdown written to ${out}\n\n`));
116
+ }
117
+ else {
118
+ process.stdout.write(md + "\n");
119
+ }
120
+ }
121
+ else {
122
+ printCliReport(run, opts.ci ?? false);
123
+ if (opts.ci && getDebtScore(run) > 60)
124
+ process.exit(1);
125
+ }
126
+ }
127
+ catch (e) {
128
+ progress.stop();
129
+ process.stderr.write(chalk.red("\n Error: " + (e instanceof Error ? e.message : String(e)) + "\n\n"));
130
+ process.exit(1);
131
+ }
132
+ });
133
+ function getDebtScore(run) {
134
+ const items = run.debtItems;
135
+ if (items.length === 0)
136
+ return 0;
137
+ const severityWeight = { low: 1, medium: 2, high: 3, critical: 4 };
138
+ const sum = items.reduce((a, b) => a + (severityWeight[b.severity] ?? 0) * b.confidence, 0);
139
+ return Math.min(100, Math.round((sum / items.length) * 25));
140
+ }
141
+ function printCliReport(run, ci) {
142
+ const { debtItems, fileMetrics, errors } = run;
143
+ const score = getDebtScore(run);
144
+ const cleanliness = getCleanlinessTier(score);
145
+ process.stdout.write("\n");
146
+ process.stdout.write(chalk.bold.dim(" Technical Debt Cleanliness Score\n"));
147
+ process.stdout.write(" " + "—".repeat(52) + "\n");
148
+ const tierColor = cleanlinessTierColor(cleanliness.tier);
149
+ process.stdout.write(tierColor(` ${cleanliness.label} (${cleanliness.tier}/5)\n`));
150
+ process.stdout.write(tierColor(` ${cleanliness.description}\n`));
151
+ process.stdout.write(" " + "—".repeat(52) + "\n\n");
152
+ process.stdout.write(chalk.bold(" Summary\n"));
153
+ process.stdout.write(chalk.dim(" " + "—".repeat(50) + "\n"));
154
+ process.stdout.write(` Files analyzed: ${chalk.cyan(String(fileMetrics.length))}\n`);
155
+ process.stdout.write(` Debt items: ${chalk.yellow(String(debtItems.length))}\n`);
156
+ if (run.debtTrend && run.debtTrend.length > 0) {
157
+ process.stdout.write(` Recent commits: ${chalk.cyan(String(run.debtTrend.length))}\n`);
158
+ }
159
+ process.stdout.write(` Debt score: ${severityColor(score)} / 100\n\n`);
160
+ const bySeverity = { critical: 0, high: 0, medium: 0, low: 0 };
161
+ for (const d of debtItems) {
162
+ bySeverity[d.severity]++;
163
+ }
164
+ process.stdout.write(chalk.bold(" By severity\n"));
165
+ process.stdout.write(` Critical: ${chalk.red(String(bySeverity.critical))} High: ${chalk.yellow(String(bySeverity.high))} Medium: ${chalk.hex("#b8860b")(String(bySeverity.medium))} Low: ${chalk.gray(String(bySeverity.low))}\n\n`);
166
+ if (run.llmOverallAssessment) {
167
+ process.stdout.write(chalk.bold(" LLM overall assessment\n"));
168
+ process.stdout.write(chalk.dim(" " + "—".repeat(50) + "\n"));
169
+ process.stdout.write(chalk.cyan(" " + run.llmOverallAssessment.replace(/\n/g, "\n ") + "\n\n"));
170
+ }
171
+ const hotspots = fileMetrics
172
+ .filter((m) => (m.hotspotScore ?? 0) > 0.3)
173
+ .sort((a, b) => (b.hotspotScore ?? 0) - (a.hotspotScore ?? 0))
174
+ .slice(0, 5);
175
+ if (hotspots.length > 0) {
176
+ process.stdout.write(chalk.bold(" Hotspot files (high churn + complexity)\n"));
177
+ for (const h of hotspots) {
178
+ process.stdout.write(` ${chalk.red("●")} ${h.file} ${chalk.dim(`(score ${(h.hotspotScore ?? 0).toFixed(2)})`)}\n`);
179
+ if (h.llmAssessment)
180
+ process.stdout.write(chalk.dim(` ${h.llmAssessment.replace(/\n/g, "\n ")}\n`));
181
+ if (h.llmSuggestedCode) {
182
+ process.stdout.write(chalk.cyan(" Suggested refactor:\n"));
183
+ process.stdout.write(chalk.dim(h.llmSuggestedCode.split("\n").map((l) => " " + l).join("\n") + "\n"));
184
+ }
185
+ }
186
+ process.stdout.write("\n");
187
+ }
188
+ process.stdout.write(chalk.bold(" Top debt items\n"));
189
+ const top = debtItems
190
+ .sort((a, b) => severityOrder(b.severity) - severityOrder(a.severity))
191
+ .slice(0, 10);
192
+ for (const d of top) {
193
+ const sev = chalkSeverity(d.severity);
194
+ process.stdout.write(` ${sev} ${d.title}\n`);
195
+ process.stdout.write(chalk.dim(` ${d.file}${d.line ? `:${d.line}` : ""}\n`));
196
+ if (d.insight)
197
+ process.stdout.write(chalk.dim(` ${d.insight.slice(0, 120)}${d.insight.length > 120 ? "…" : ""}\n`));
198
+ if (d.suggestedCode) {
199
+ process.stdout.write(chalk.cyan(" Suggested refactor:\n"));
200
+ process.stdout.write(chalk.dim(d.suggestedCode.split("\n").map((l) => " " + l).join("\n") + "\n"));
201
+ }
202
+ process.stdout.write("\n");
203
+ }
204
+ if (errors.length > 0 && !ci) {
205
+ process.stdout.write(chalk.bold.yellow(" Parse errors\n"));
206
+ for (const e of errors.slice(0, 5)) {
207
+ process.stdout.write(chalk.dim(` ${e.file}: ${e.message}\n`));
208
+ }
209
+ }
210
+ process.stdout.write(chalk.dim(" Run with --format html -o report.html for the interactive dashboard.\n\n"));
211
+ }
212
+ function severityOrder(s) {
213
+ return { critical: 4, high: 3, medium: 2, low: 1 }[s] ?? 0;
214
+ }
215
+ function chalkSeverity(s) {
216
+ const map = {
217
+ critical: chalk.red("◆ Critical"),
218
+ high: chalk.yellow("◇ High"),
219
+ medium: chalk.hex("#b8860b")("▸ Medium"),
220
+ low: chalk.gray("▹ Low"),
221
+ };
222
+ return map[s] ?? s;
223
+ }
224
+ function severityColor(score) {
225
+ if (score >= 70)
226
+ return chalk.red(String(score));
227
+ if (score >= 40)
228
+ return chalk.yellow(String(score));
229
+ return chalk.green(String(score));
230
+ }
231
+ function cleanlinessTierColor(tier) {
232
+ switch (tier) {
233
+ case 5: return chalk.green.bold;
234
+ case 4: return chalk.cyan.bold;
235
+ case 3: return chalk.yellow.bold;
236
+ case 2: return chalk.hex("#f97316").bold;
237
+ case 1: return chalk.red.bold;
238
+ default: return chalk.white.bold;
239
+ }
240
+ }
241
+ program.parse();
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Discover source files in a repository by extension and optional globs.
3
+ */
4
+ import type { IAnalyzer } from "./types.js";
5
+ export declare function discoverFiles(repoPath: string, analyzers: IAnalyzer[], options?: {
6
+ maxFiles?: number;
7
+ }): Promise<Map<string, string>>;
@@ -0,0 +1,70 @@
1
+ /**
2
+ * Discover source files in a repository by extension and optional globs.
3
+ */
4
+ import { readdir, readFile } from "node:fs/promises";
5
+ import { join } from "node:path";
6
+ const DEFAULT_IGNORE = new Set([
7
+ "node_modules",
8
+ ".git",
9
+ "dist",
10
+ "build",
11
+ "out",
12
+ "__pycache__",
13
+ ".venv",
14
+ "venv",
15
+ "vendor",
16
+ "coverage",
17
+ ".next",
18
+ ".nuxt",
19
+ ".cache",
20
+ ]);
21
+ export async function discoverFiles(repoPath, analyzers, options = {}) {
22
+ const { maxFiles = 2000 } = options;
23
+ const extensions = new Set();
24
+ for (const a of analyzers) {
25
+ for (const lang of a.languages) {
26
+ if (lang === "javascript" || lang === "typescript") {
27
+ extensions.add(".js").add(".jsx").add(".ts").add(".tsx").add(".mjs").add(".cjs");
28
+ }
29
+ else if (lang === "python") {
30
+ extensions.add(".py");
31
+ }
32
+ }
33
+ }
34
+ const result = new Map();
35
+ await walk(repoPath, repoPath, extensions, result, maxFiles);
36
+ return result;
37
+ }
38
+ async function walk(root, dir, extensions, result, maxFiles) {
39
+ if (result.size >= maxFiles)
40
+ return;
41
+ let entries;
42
+ try {
43
+ entries = await readdir(dir, { withFileTypes: true });
44
+ }
45
+ catch {
46
+ return;
47
+ }
48
+ for (const ent of entries) {
49
+ if (result.size >= maxFiles)
50
+ break;
51
+ const full = join(dir, ent.name);
52
+ const rel = full.slice(root.length).replace(/^[/\\]/, "");
53
+ if (ent.isDirectory()) {
54
+ if (DEFAULT_IGNORE.has(ent.name) || ent.name.startsWith("."))
55
+ continue;
56
+ await walk(root, full, extensions, result, maxFiles);
57
+ continue;
58
+ }
59
+ const ext = ent.name.includes(".") ? "." + ent.name.split(".").pop().toLowerCase() : "";
60
+ if (!extensions.has(ext))
61
+ continue;
62
+ try {
63
+ const content = await readFile(full, "utf-8");
64
+ result.set(rel, content);
65
+ }
66
+ catch {
67
+ // skip binary or unreadable
68
+ }
69
+ }
70
+ }
@@ -0,0 +1,12 @@
1
+ /**
2
+ * Analysis engine: discover files, run pluggable analyzers, merge git stats,
3
+ * compute hotspots and confidence, and produce a single AnalysisRun.
4
+ */
5
+ import type { AnalysisRun } from "./types.js";
6
+ export interface EngineOptions {
7
+ repoPath: string;
8
+ analyzerNames?: string[];
9
+ maxFiles?: number;
10
+ gitDays?: number;
11
+ }
12
+ export declare function runAnalysis(options: EngineOptions): Promise<AnalysisRun>;
package/dist/engine.js ADDED
@@ -0,0 +1,76 @@
1
+ /**
2
+ * Analysis engine: discover files, run pluggable analyzers, merge git stats,
3
+ * compute hotspots and confidence, and produce a single AnalysisRun.
4
+ */
5
+ import { computeHotspotScore, getDebtTrend, getGitStats, } from "./git-analyzer.js";
6
+ import { discoverFiles } from "./discover.js";
7
+ import { analyzers } from "./analyzers/index.js";
8
+ export async function runAnalysis(options) {
9
+ const { repoPath, analyzerNames, maxFiles = 1500, gitDays = 90, } = options;
10
+ const selectedAnalyzers = analyzerNames?.length
11
+ ? analyzers.filter((a) => analyzerNames.includes(a.name))
12
+ : analyzers;
13
+ const files = await discoverFiles(repoPath, selectedAnalyzers, { maxFiles });
14
+ if (files.size === 0) {
15
+ return {
16
+ repoPath,
17
+ startedAt: new Date().toISOString(),
18
+ analyzers: selectedAnalyzers.map((a) => a.name),
19
+ fileMetrics: [],
20
+ debtItems: [],
21
+ errors: [{ file: "", message: "No matching source files found." }],
22
+ };
23
+ }
24
+ const [gitStats, debtTrend] = await Promise.all([
25
+ getGitStats(repoPath, { days: gitDays }),
26
+ getDebtTrend(repoPath, 15),
27
+ ]);
28
+ const allMetrics = [];
29
+ const allDebt = [];
30
+ const allErrors = [];
31
+ for (const analyzer of selectedAnalyzers) {
32
+ const toAnalyze = new Map();
33
+ for (const [path, content] of files) {
34
+ if (analyzer.canAnalyze(path))
35
+ toAnalyze.set(path, content);
36
+ }
37
+ if (toAnalyze.size === 0)
38
+ continue;
39
+ const result = await analyzer.analyze(toAnalyze, { repoPath });
40
+ allMetrics.push(...result.metrics);
41
+ allDebt.push(...result.debtItems);
42
+ allErrors.push(...result.errors);
43
+ }
44
+ enrichWithGit(gitStats, allMetrics);
45
+ const run = {
46
+ repoPath,
47
+ startedAt: new Date().toISOString(),
48
+ completedAt: new Date().toISOString(),
49
+ analyzers: selectedAnalyzers.map((a) => a.name),
50
+ fileMetrics: allMetrics,
51
+ debtItems: allDebt,
52
+ debtTrend,
53
+ errors: allErrors,
54
+ };
55
+ return run;
56
+ }
57
+ function enrichWithGit(gitStats, metrics) {
58
+ let maxChange = 0, maxChurn = 0, maxComplexity = 0;
59
+ for (const m of metrics) {
60
+ const g = gitStats.byFile.get(m.file);
61
+ if (g) {
62
+ m.changeCount = g.commits;
63
+ m.churn = g.churn;
64
+ maxChange = Math.max(maxChange, g.commits);
65
+ maxChurn = Math.max(maxChurn, g.churn);
66
+ }
67
+ const c = m.cyclomaticComplexity ?? 0;
68
+ maxComplexity = Math.max(maxComplexity, c);
69
+ }
70
+ for (const m of metrics) {
71
+ const change = m.changeCount ?? 0;
72
+ const churn = m.churn ?? 0;
73
+ const complexity = m.cyclomaticComplexity ?? 0;
74
+ m.hotspotScore = computeHotspotScore(change, churn, complexity, maxChange, maxChurn, maxComplexity);
75
+ }
76
+ }
@@ -0,0 +1,27 @@
1
+ /**
2
+ * Git history analyzer: churn, hotspots, and change patterns.
3
+ * Used to enrich file metrics with change frequency and debt trends.
4
+ */
5
+ export interface GitStats {
6
+ byFile: Map<string, {
7
+ commits: number;
8
+ churn: number;
9
+ lastChange: string;
10
+ }>;
11
+ recentCommits: number;
12
+ defaultBranch?: string;
13
+ }
14
+ export declare function getGitStats(repoPath: string, options?: {
15
+ days?: number;
16
+ maxFiles?: number;
17
+ }): Promise<GitStats>;
18
+ /**
19
+ * Compute hotspot score: files that change often AND are complex are riskier.
20
+ * Normalize so we can combine with static metrics.
21
+ */
22
+ export declare function computeHotspotScore(changeCount: number, churn: number, complexity: number, maxChange: number, maxChurn: number, maxComplexity: number): number;
23
+ export declare function getDebtTrend(repoPath: string, sampleCommits?: number): Promise<Array<{
24
+ commit: string;
25
+ date: string;
26
+ score: number;
27
+ }>>;
@@ -0,0 +1,133 @@
1
+ /**
2
+ * Git history analyzer: churn, hotspots, and change patterns.
3
+ * Used to enrich file metrics with change frequency and debt trends.
4
+ */
5
+ import { simpleGit } from "simple-git";
6
+ const DEFAULT_DAYS = 90;
7
+ const DEFAULT_MAX_FILES = 5000;
8
+ export async function getGitStats(repoPath, options = {}) {
9
+ const { days = DEFAULT_DAYS, maxFiles = DEFAULT_MAX_FILES } = options;
10
+ const git = simpleGit(repoPath);
11
+ const since = new Date();
12
+ since.setDate(since.getDate() - days);
13
+ const byFile = new Map();
14
+ try {
15
+ // Get file names only (repeated per commit) so we can count commits per file
16
+ const nameOnly = await git.raw([
17
+ "log",
18
+ `--since=${since.toISOString()}`,
19
+ "--name-only",
20
+ "--format=",
21
+ ]);
22
+ const fileCommits = new Map();
23
+ for (const line of nameOnly.split("\n")) {
24
+ const f = line.trim().replace(/^\.\//, "");
25
+ if (!f || f.includes("=>"))
26
+ continue;
27
+ fileCommits.set(f, (fileCommits.get(f) ?? 0) + 1);
28
+ }
29
+ // Get churn (additions + deletions) per file via --numstat
30
+ const numstat = await git.raw([
31
+ "log",
32
+ `--since=${since.toISOString()}`,
33
+ "--numstat",
34
+ "--format=",
35
+ ]);
36
+ const fileChurn = new Map();
37
+ const fileLastChange = new Map();
38
+ const logWithDate = await git.log({ "--since": since.toISOString() });
39
+ for (const entry of logWithDate.all) {
40
+ const names = await getFilesInCommit(git, entry.hash);
41
+ for (const f of names) {
42
+ const norm = f.replace(/^\.\//, "");
43
+ fileLastChange.set(norm, entry.date);
44
+ }
45
+ }
46
+ for (const line of numstat.split("\n")) {
47
+ const parts = line.trim().split(/\s+/);
48
+ if (parts.length >= 3) {
49
+ const add = parseInt(parts[0], 10) || 0;
50
+ const del = parseInt(parts[1], 10) || 0;
51
+ const path = parts.slice(2).join(" ").replace(/^\.\//, "");
52
+ if (path && !path.includes("=>"))
53
+ fileChurn.set(path, (fileChurn.get(path) ?? 0) + add + del);
54
+ }
55
+ }
56
+ const sorted = [...fileCommits.entries()]
57
+ .sort((a, b) => b[1] - a[1])
58
+ .slice(0, maxFiles);
59
+ for (const [file, commits] of sorted) {
60
+ byFile.set(file, {
61
+ commits,
62
+ churn: fileChurn.get(file) ?? commits,
63
+ lastChange: fileLastChange.get(file) ?? "",
64
+ });
65
+ }
66
+ }
67
+ catch {
68
+ // Not a git repo or no history
69
+ }
70
+ let defaultBranch;
71
+ try {
72
+ defaultBranch = (await git.branch()).current;
73
+ }
74
+ catch {
75
+ // ignore
76
+ }
77
+ let recentCommits = 0;
78
+ try {
79
+ recentCommits = (await git.log({ "--since": since.toISOString() })).total;
80
+ }
81
+ catch {
82
+ // ignore
83
+ }
84
+ return {
85
+ byFile,
86
+ recentCommits,
87
+ defaultBranch,
88
+ };
89
+ }
90
+ async function getFilesInCommit(git, hash) {
91
+ try {
92
+ const diff = await git.show([hash, "--name-only", "--format="]);
93
+ return diff.split("\n").filter(Boolean).map((f) => f.trim());
94
+ }
95
+ catch {
96
+ return [];
97
+ }
98
+ }
99
+ /**
100
+ * Compute hotspot score: files that change often AND are complex are riskier.
101
+ * Normalize so we can combine with static metrics.
102
+ */
103
+ export function computeHotspotScore(changeCount, churn, complexity, maxChange, maxChurn, maxComplexity) {
104
+ if (maxChange === 0 && maxChurn === 0 && maxComplexity === 0)
105
+ return 0;
106
+ const changeNorm = maxChange > 0 ? changeCount / maxChange : 0;
107
+ const churnNorm = maxChurn > 0 ? churn / maxChurn : 0;
108
+ const complexityNorm = maxComplexity > 0 ? complexity / maxComplexity : 0;
109
+ // Weight: complexity and churn matter most
110
+ return (complexityNorm * 0.5 + (changeNorm + churnNorm) * 0.25);
111
+ }
112
+ export async function getDebtTrend(repoPath, sampleCommits = 20) {
113
+ const git = simpleGit(repoPath);
114
+ const result = [];
115
+ try {
116
+ const log = await git.log({ max: sampleCommits });
117
+ for (const entry of log.all) {
118
+ // Placeholder: real trend would run analysis at each commit (expensive).
119
+ // We use a simple heuristic: more files changed = potential debt churn.
120
+ const names = await getFilesInCommit(git, entry.hash);
121
+ const score = Math.min(100, names.length * 2 + 10);
122
+ result.push({
123
+ commit: entry.hash.slice(0, 7),
124
+ date: entry.date,
125
+ score,
126
+ });
127
+ }
128
+ }
129
+ catch {
130
+ // ignore
131
+ }
132
+ return result.reverse();
133
+ }
@@ -0,0 +1,6 @@
1
+ export { runAnalysis } from "./engine.js";
2
+ export { analyzers } from "./analyzers/index.js";
3
+ export type { AnalysisRun, DebtItem, FileMetrics, IAnalyzer, CliOptions, ReportOptions, } from "./types.js";
4
+ export { generateHtmlReport } from "./reports/html.js";
5
+ export { generateJsonReport } from "./reports/json.js";
6
+ export { generateMarkdownReport } from "./reports/markdown.js";
package/dist/index.js ADDED
@@ -0,0 +1,5 @@
1
+ export { runAnalysis } from "./engine.js";
2
+ export { analyzers } from "./analyzers/index.js";
3
+ export { generateHtmlReport } from "./reports/html.js";
4
+ export { generateJsonReport } from "./reports/json.js";
5
+ export { generateMarkdownReport } from "./reports/markdown.js";
package/dist/llm.d.ts ADDED
@@ -0,0 +1,28 @@
1
+ /**
2
+ * LLM integration: debt explanations, per-file cleanliness, and overall assessment.
3
+ * Supports OpenAI, OpenRouter (OpenAI-compatible), and Google Gemini.
4
+ */
5
+ import type { DebtItem, FileMetrics } from "./types.js";
6
+ import type { AnalysisRun } from "./types.js";
7
+ export interface LLMConfig {
8
+ apiKey?: string;
9
+ baseURL?: string;
10
+ model?: string;
11
+ maxTokens?: number;
12
+ }
13
+ export type LLMProvider = "openai" | "openrouter" | "gemini";
14
+ /** Resolve provider and auth from config + env. OpenRouter and Gemini take precedence when their keys are set. */
15
+ export declare function resolveLLMConfig(config?: LLMConfig): {
16
+ provider: LLMProvider;
17
+ apiKey: string;
18
+ baseURL: string;
19
+ model: string;
20
+ } | null;
21
+ export declare function enrichDebtWithInsights(items: DebtItem[], fileContents: Map<string, string>, config?: LLMConfig): Promise<DebtItem[]>;
22
+ /** Per-file: LLM assesses cleanliness and optionally suggests a concrete code simplification. */
23
+ export declare function assessFileCleanliness(filePath: string, content: string, metrics: FileMetrics, config?: LLMConfig): Promise<{
24
+ assessment: string;
25
+ suggestedCode?: string;
26
+ } | null>;
27
+ /** Overall: LLM assesses the whole codebase cleanliness in a short paragraph. */
28
+ export declare function assessOverallCleanliness(run: AnalysisRun, config?: LLMConfig): Promise<string | null>;