tech-debt-visualizer 0.1.5 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -10,11 +10,13 @@ import { Command } from "commander";
10
10
  import chalk from "chalk";
11
11
  import cliProgress from "cli-progress";
12
12
  import { getCleanlinessTier } from "./cleanliness-score.js";
13
+ import { getDebtScore } from "./debt-score.js";
13
14
  import { runAnalysis } from "./engine.js";
14
- import { assessFileCleanliness, assessOverallCleanliness, enrichDebtWithInsights, resolveLLMConfig, suggestNextSteps, } from "./llm.js";
15
+ import { assessFileCleanliness, assessOverallCleanliness, resolveLLMConfig, } from "./llm.js";
15
16
  import { generateHtmlReport } from "./reports/html.js";
16
17
  import { generateJsonReport } from "./reports/json.js";
17
18
  import { generateMarkdownReport } from "./reports/markdown.js";
19
+ import { SEVERITY_ORDER } from "./types.js";
18
20
  const program = new Command();
19
21
  program
20
22
  .name("tech-debt")
@@ -30,29 +32,32 @@ program
30
32
  .option("--llm", "Enable LLM (default). Use with --llm-key and/or --llm-model")
31
33
  .option("--llm-key <key>", "API key for LLM (overrides GEMINI_API_KEY / OPENAI_API_KEY / OPENROUTER_API_KEY)")
32
34
  .option("--llm-model <model>", "Model name (e.g. gemini-1.5-flash, gpt-4o-mini)")
35
+ .option("--llm-max-tokens <n>", "Max tokens for LLM responses (default: 2048 for overall, 1024 per-file)", (v) => parseInt(v, 10))
33
36
  .option("--ci", "CI mode: minimal output, exit with non-zero if debt score is high")
34
37
  .action(async (path, opts) => {
35
38
  const repoPath = join(process.cwd(), path);
36
39
  const format = (opts.format ?? "cli");
37
40
  const useLlm = opts.llm !== false;
38
41
  const outputPath = opts.output ?? (format === "html" ? "tech-debt-report.html" : undefined);
39
- const totalSteps = useLlm ? 6 : 4;
42
+ const llmConfigOverrides = {
43
+ apiKey: opts.llmKey,
44
+ model: opts.llmModel,
45
+ ...(opts.llmMaxTokens != null && opts.llmMaxTokens > 0 ? { maxTokens: opts.llmMaxTokens } : {}),
46
+ };
40
47
  const progress = new cliProgress.SingleBar({
41
- format: chalk.cyan(" {bar} ") + "| {task} | {value}/{total}",
48
+ format: chalk.cyan(" {bar} ") + "| {percentage}% | {value}/{total} | {task}",
42
49
  barCompleteChar: "█",
43
50
  barIncompleteChar: "░",
44
51
  }, cliProgress.Presets.shades_classic);
52
+ let run;
53
+ const fileContents = new Map();
45
54
  try {
46
55
  process.stderr.write(chalk.bold.blue("\n Technical Debt Visualizer\n\n"));
47
- progress.start(totalSteps, 0, { task: "Discovering files..." });
56
+ const discoverySteps = useLlm ? 2 : 4;
57
+ progress.start(discoverySteps, 0, { task: "Discovering files..." });
58
+ run = await runAnalysis({ repoPath, maxFiles: 1500, gitDays: 90 });
48
59
  progress.update(1, { task: "Discovering files..." });
49
- const run = await runAnalysis({
50
- repoPath,
51
- maxFiles: 1500,
52
- gitDays: 90,
53
- });
54
60
  progress.update(2, { task: "Analyzing..." });
55
- const fileContents = new Map();
56
61
  for (const f of run.fileMetrics.map((m) => m.file)) {
57
62
  try {
58
63
  fileContents.set(f, await readFile(join(repoPath, f), "utf-8"));
@@ -61,54 +66,77 @@ program
61
66
  // ignore
62
67
  }
63
68
  }
64
- const llmConfigOverrides = { apiKey: opts.llmKey, model: opts.llmModel };
65
- if (useLlm) {
69
+ if (!useLlm) {
70
+ progress.update(4, { task: "Done" });
71
+ progress.stop();
72
+ }
73
+ else {
74
+ progress.stop();
75
+ const maxFiles = 80;
76
+ const filesToAssess = run.fileMetrics.slice(0, maxFiles);
77
+ const totalSteps = 2 + filesToAssess.length + 1;
78
+ progress.start(totalSteps, 2, {
79
+ task: filesToAssess.length > 0 ? `LLM: file 0/${filesToAssess.length}` : "LLM: overall...",
80
+ });
66
81
  const llmConfig = resolveLLMConfig(llmConfigOverrides);
67
82
  if (!llmConfig) {
83
+ progress.update(totalSteps, { task: "Skipping LLM (no key)" });
84
+ progress.stop();
68
85
  process.stderr.write(chalk.yellow(" No LLM API key found. Set GEMINI_API_KEY or OPENAI_API_KEY (or use --llm-key <key>).\n" +
69
86
  " Example: export GEMINI_API_KEY=your_key or --llm-key your_key\n" +
70
87
  " Or add GEMINI_API_KEY=your_key to a .env file in the current directory.\n" +
71
88
  " Skipping AI insights for this run.\n\n"));
72
89
  }
73
90
  else {
74
- progress.update(3, { task: "LLM: per-file cleanliness..." });
75
- const allFilePaths = run.fileMetrics.map((m) => m.file);
76
- const maxFiles = 80;
77
- const filesToAssess = run.fileMetrics.slice(0, maxFiles);
91
+ run.llmAttempted = true;
78
92
  const config = { ...llmConfigOverrides };
79
- for (const m of filesToAssess) {
80
- const content = fileContents.get(m.file);
81
- if (!content)
82
- continue;
83
- const result = await assessFileCleanliness(m.file, content, m, config, { filePaths: allFilePaths });
84
- if (result) {
85
- const idx = run.fileMetrics.findIndex((x) => x.file === m.file);
86
- if (idx >= 0)
87
- run.fileMetrics[idx] = {
88
- ...run.fileMetrics[idx],
89
- llmAssessment: result.assessment,
90
- llmSuggestedCode: result.suggestedCode,
91
- };
93
+ const allFilePaths = run.fileMetrics.map((m) => m.file);
94
+ const FILE_BATCH_SIZE = 10;
95
+ for (let i = 0; i < filesToAssess.length; i += FILE_BATCH_SIZE) {
96
+ const batch = filesToAssess.slice(i, i + FILE_BATCH_SIZE);
97
+ const completedBefore = i;
98
+ const results = await Promise.allSettled(batch.map((m) => {
99
+ const content = fileContents.get(m.file);
100
+ if (!content)
101
+ return Promise.resolve(null);
102
+ return assessFileCleanliness(m.file, content, m, config, { filePaths: allFilePaths });
103
+ }));
104
+ for (let j = 0; j < batch.length; j++) {
105
+ const result = results[j];
106
+ if (result?.status === "fulfilled" && result.value) {
107
+ const m = batch[j];
108
+ const idx = run.fileMetrics.findIndex((x) => x.file === m.file);
109
+ if (idx >= 0)
110
+ run.fileMetrics[idx] = {
111
+ ...run.fileMetrics[idx],
112
+ llmAssessment: result.value.assessment,
113
+ llmSuggestedCode: result.value.suggestedCode,
114
+ llmFileScore: result.value.fileScore,
115
+ llmSeverity: result.value.severity,
116
+ llmRawAssessment: result.value.raw,
117
+ };
118
+ }
92
119
  }
120
+ const completedFiles = Math.min(completedBefore + batch.length, filesToAssess.length);
121
+ progress.update(2 + completedFiles, {
122
+ task: `LLM: file ${completedFiles}/${filesToAssess.length}`,
123
+ });
93
124
  }
94
- progress.update(4, { task: "LLM: debt item insights..." });
95
- let debtItems = run.debtItems;
96
- if (debtItems.length > 0) {
97
- debtItems = await enrichDebtWithInsights(debtItems.slice(0, 25), fileContents, config);
98
- const byId = new Map(debtItems.map((d) => [d.id, d]));
99
- run.debtItems = run.debtItems.map((d) => byId.get(d.id) ?? d);
100
- }
101
- progress.update(5, { task: "LLM: overall assessment..." });
125
+ const overallStep = 2 + filesToAssess.length;
126
+ progress.update(overallStep, { task: "LLM: overall assessment..." });
102
127
  const overall = await assessOverallCleanliness(run, config);
103
- if (overall)
104
- run.llmOverallAssessment = overall;
105
- const nextSteps = await suggestNextSteps(run, config);
106
- if (nextSteps?.length)
107
- run.llmNextSteps = nextSteps;
128
+ if (overall) {
129
+ run.llmOverallAssessment = overall.assessment;
130
+ if (overall.score != null)
131
+ run.llmOverallScore = overall.score;
132
+ if (overall.severity)
133
+ run.llmOverallSeverity = overall.severity;
134
+ run.llmOverallRaw = overall.raw;
135
+ }
136
+ progress.update(totalSteps, { task: "Done" });
137
+ progress.stop();
108
138
  }
109
139
  }
110
- progress.update(totalSteps, { task: "Done" });
111
- progress.stop();
112
140
  if (format === "html" && outputPath) {
113
141
  await generateHtmlReport(run, { outputPath, title: "Technical Debt Report", darkMode: true });
114
142
  process.stdout.write(chalk.green(`\n Report written to ${outputPath}\n\n`));
@@ -140,7 +168,12 @@ program
140
168
  else {
141
169
  printCliReport(run, opts.ci ?? false);
142
170
  if (!run.llmOverallAssessment) {
143
- process.stdout.write(chalk.dim(" To get AI insights: set GEMINI_API_KEY (or OPENAI_API_KEY) or use --llm-key <key>. Run without --no-llm.\n\n"));
171
+ if (run.llmAttempted) {
172
+ process.stdout.write(chalk.dim(" LLM was used but returned no insights. Check [LLM] errors above or verify your API key.\n\n"));
173
+ }
174
+ else {
175
+ process.stdout.write(chalk.dim(" To get AI insights: set GEMINI_API_KEY (or OPENAI_API_KEY) or use --llm-key <key>. Run without --no-llm.\n\n"));
176
+ }
144
177
  }
145
178
  if (opts.ci && getDebtScore(run) > 60)
146
179
  process.exit(1);
@@ -152,14 +185,6 @@ program
152
185
  process.exit(1);
153
186
  }
154
187
  });
155
- function getDebtScore(run) {
156
- const items = run.debtItems;
157
- if (items.length === 0)
158
- return 0;
159
- const severityWeight = { low: 1, medium: 2, high: 3, critical: 4 };
160
- const sum = items.reduce((a, b) => a + (severityWeight[b.severity] ?? 0) * b.confidence, 0);
161
- return Math.min(100, Math.round((sum / items.length) * 25));
162
- }
163
188
  function printCliReport(run, ci) {
164
189
  const { debtItems, fileMetrics, errors } = run;
165
190
  const score = getDebtScore(run);
@@ -198,7 +223,10 @@ function printCliReport(run, ci) {
198
223
  if (hotspots.length > 0) {
199
224
  process.stdout.write(chalk.bold(" Hotspot files (high churn + complexity)\n"));
200
225
  for (const h of hotspots) {
201
- process.stdout.write(` ${chalk.red("●")} ${h.file} ${chalk.dim(`(score ${(h.hotspotScore ?? 0).toFixed(2)})`)}\n`);
226
+ const hotspotInfo = `(score ${(h.hotspotScore ?? 0).toFixed(2)})`;
227
+ const llmInfo = h.llmFileScore != null ? ` LLM debt ${h.llmFileScore}/100` : "";
228
+ const llmSev = h.llmSeverity ? ` LLM severity ${h.llmSeverity}` : "";
229
+ process.stdout.write(` ${chalk.red("●")} ${h.file} ${chalk.dim(hotspotInfo + llmInfo + llmSev)}\n`);
202
230
  if (h.llmAssessment)
203
231
  process.stdout.write(chalk.dim(` ${h.llmAssessment.replace(/\n/g, "\n ")}\n`));
204
232
  if (h.llmSuggestedCode) {
@@ -217,7 +245,7 @@ function printCliReport(run, ci) {
217
245
  process.stdout.write(` ${sev} ${d.title}\n`);
218
246
  process.stdout.write(chalk.dim(` ${d.file}${d.line ? `:${d.line}` : ""}\n`));
219
247
  if (d.insight)
220
- process.stdout.write(chalk.dim(` ${d.insight.slice(0, 120)}${d.insight.length > 120 ? "" : ""}\n`));
248
+ process.stdout.write(chalk.dim(` ${d.insight.replace(/\n/g, "\n ")}\n`));
221
249
  if (d.suggestedCode) {
222
250
  process.stdout.write(chalk.cyan(" Suggested refactor:\n"));
223
251
  process.stdout.write(chalk.dim(d.suggestedCode.split("\n").map((l) => " " + l).join("\n") + "\n"));
@@ -245,18 +273,10 @@ function printCliReport(run, ci) {
245
273
  process.stdout.write(chalk.dim(" No debt items. Keep it up.\n"));
246
274
  }
247
275
  process.stdout.write("\n");
248
- if (run.llmNextSteps && run.llmNextSteps.length > 0) {
249
- process.stdout.write(chalk.bold.cyan(" Recommended next steps (AI)\n"));
250
- process.stdout.write(chalk.dim(" " + "—".repeat(50) + "\n"));
251
- for (const step of run.llmNextSteps) {
252
- process.stdout.write(chalk.cyan(" • ") + step + "\n");
253
- }
254
- process.stdout.write("\n");
255
- }
256
276
  process.stdout.write(chalk.dim(" Run with --format html -o report.html for the interactive dashboard.\n\n"));
257
277
  }
258
278
  function severityOrder(s) {
259
- return { critical: 4, high: 3, medium: 2, low: 1 }[s] ?? 0;
279
+ return SEVERITY_ORDER[s] ?? 0;
260
280
  }
261
281
  function chalkSeverity(s) {
262
282
  const map = {
@@ -0,0 +1,12 @@
1
+ /**
2
+ * Technical debt score 0–100 (higher = more debt).
3
+ * When LLM is used, the overall score is taken from LLM only so it matches per-file and overall LLM scores.
4
+ */
5
+ import type { AnalysisRun } from "./types.js";
6
+ /**
7
+ * Debt score 0–100. Uses a single consistent source when LLM is available so overall and file scores match:
8
+ * - If LLM overall score is set: use it as-is.
9
+ * - Else if any file has LLM file score: use average of those.
10
+ * - Else: static score from debt items.
11
+ */
12
+ export declare function getDebtScore(run: AnalysisRun): number;
@@ -0,0 +1,32 @@
1
+ /**
2
+ * Technical debt score 0–100 (higher = more debt).
3
+ * When LLM is used, the overall score is taken from LLM only so it matches per-file and overall LLM scores.
4
+ */
5
+ /** Compute static score from debt items (severity × confidence). Used when no LLM scores exist. */
6
+ function getStaticDebtScore(run) {
7
+ const items = run.debtItems;
8
+ if (items.length === 0)
9
+ return 0;
10
+ const severityWeight = { low: 1, medium: 2, high: 3, critical: 4 };
11
+ const sum = items.reduce((a, b) => a + (severityWeight[b.severity] ?? 0) * b.confidence, 0);
12
+ return Math.min(100, Math.round((sum / items.length) * 25));
13
+ }
14
+ /**
15
+ * Debt score 0–100. Uses a single consistent source when LLM is available so overall and file scores match:
16
+ * - If LLM overall score is set: use it as-is.
17
+ * - Else if any file has LLM file score: use average of those.
18
+ * - Else: static score from debt items.
19
+ */
20
+ export function getDebtScore(run) {
21
+ if (run.llmOverallScore != null) {
22
+ return Math.min(100, Math.max(0, Math.round(run.llmOverallScore)));
23
+ }
24
+ const fileScores = run.fileMetrics
25
+ .map((m) => m.llmFileScore)
26
+ .filter((s) => typeof s === "number");
27
+ if (fileScores.length > 0) {
28
+ const avg = fileScores.reduce((a, b) => a + b, 0) / fileScores.length;
29
+ return Math.min(100, Math.max(0, Math.round(avg)));
30
+ }
31
+ return getStaticDebtScore(run);
32
+ }
package/dist/llm.d.ts CHANGED
@@ -1,35 +1,49 @@
1
1
  /**
2
2
  * LLM integration: debt explanations, per-file cleanliness, and overall assessment.
3
3
  * Supports OpenAI, OpenRouter (OpenAI-compatible), and Google Gemini.
4
+ *
5
+ * No time limits: requests run until the API returns. Truncation is only from token limits.
6
+ * Override with LLMConfig.maxTokens or --llm-max-tokens. Defaults are generous to avoid cut-off:
7
+ * - Debt item insights (explainDebtItem): config.maxTokens ?? DEFAULT_MAX_TOKENS (2048)
8
+ * - Per-file assessment (assessFileCleanliness): config.maxTokens ?? DEFAULT_MAX_TOKENS_FILE (8192)
9
+ * - Overall assessment (assessOverallCleanliness): config.maxTokens ?? DEFAULT_MAX_TOKENS_OVERALL (8192)
10
+ * - enrichDebtWithInsights: passes config.maxTokens ?? DEFAULT_MAX_TOKENS to each item
4
11
  */
5
- import type { DebtItem, FileMetrics } from "./types.js";
6
- import type { AnalysisRun } from "./types.js";
12
+ import type { AnalysisRun, DebtItem, FileMetrics, LlmFileSeverity } from "./types.js";
7
13
  export interface LLMConfig {
8
14
  apiKey?: string;
9
15
  baseURL?: string;
10
16
  model?: string;
17
+ /** Overrides default token limits for LLM responses (used where applicable). */
11
18
  maxTokens?: number;
12
19
  }
13
20
  export type LLMProvider = "openai" | "openrouter" | "gemini";
14
- /** Resolve provider and auth from config + env. OpenRouter and Gemini take precedence when their keys are set. */
21
+ /** Resolve provider and auth from config + env. When --llm-key is used, provider is inferred from key format so a Gemini key is not sent to OpenRouter. */
15
22
  export declare function resolveLLMConfig(config?: LLMConfig): {
16
23
  provider: LLMProvider;
17
24
  apiKey: string;
18
25
  baseURL: string;
19
26
  model: string;
20
27
  } | null;
21
- export declare function enrichDebtWithInsights(items: DebtItem[], fileContents: Map<string, string>, config?: LLMConfig): Promise<DebtItem[]>;
28
+ /** Optional progress callback: (completedBatches, totalBatches) after each batch. */
29
+ export declare function enrichDebtWithInsights(items: DebtItem[], fileContents: Map<string, string>, config?: LLMConfig, onProgress?: (completed: number, total: number) => void): Promise<DebtItem[]>;
22
30
  /** Context about the rest of the repo for cross-file optimization suggestions. */
23
31
  export interface RepoContext {
24
32
  /** All analyzed file paths in this run (including the current file). */
25
33
  filePaths: string[];
26
34
  }
27
- /** Per-file: LLM assesses cleanliness and suggests optimizations with cross-file context. */
35
+ /** Per-file: LLM gives a short summary, a 0–100 debt score, and optionally one refactor. One request per file; call in parallel from CLI. */
28
36
  export declare function assessFileCleanliness(filePath: string, content: string, metrics: FileMetrics, config?: LLMConfig, repoContext?: RepoContext): Promise<{
29
37
  assessment: string;
30
38
  suggestedCode?: string;
39
+ fileScore?: number;
40
+ severity?: LlmFileSeverity;
41
+ raw: string;
42
+ } | null>;
43
+ /** Overall: LLM assesses the whole codebase and optionally a 0–100 debt score. */
44
+ export declare function assessOverallCleanliness(run: AnalysisRun, config?: LLMConfig): Promise<{
45
+ assessment: string;
46
+ score?: number;
47
+ severity?: LlmFileSeverity;
48
+ raw: string;
31
49
  } | null>;
32
- /** Overall: LLM assesses the whole codebase cleanliness in a short paragraph. */
33
- export declare function assessOverallCleanliness(run: AnalysisRun, config?: LLMConfig): Promise<string | null>;
34
- /** LLM suggests 3–5 prioritized next steps (actionable bullets). */
35
- export declare function suggestNextSteps(run: AnalysisRun, config?: LLMConfig): Promise<string[] | null>;