tech-debt-visualizer 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -35,7 +35,7 @@ The tool runs a fixed pipeline so you know exactly what you’re getting.
35
35
  Pluggable analyzers (today: JavaScript/TypeScript and Python) parse each file with **tree-sitter**, then:
36
36
  - Count **cyclomatic complexity** (if/else, loops, switch, ternaries, `&&`/`||`).
37
37
  - Count effective lines and check for **module-level docs** (JSDoc, docstrings).
38
- - Emit **debt items** (e.g. “High cyclomatic complexity”, “Missing documentation”, “Large file”) with severity and confidence.
38
+ - Emit **debt items** (e.g. “High cyclomatic complexity”, “Missing documentation”, “Large file”, “TODO/FIXME/HACK/XXX markers”) with severity and confidence.
39
39
 
40
40
  3. **Enrich with git**
41
41
  Uses `git log` (e.g. last 90 days) to compute per-file **churn** and **commit count**. Combines that with complexity into a **hotspot score**: files that change often and are complex are treated as higher risk. A simple **debt trend** over recent commits is also derived (heuristic, not full historical analysis).
@@ -43,11 +43,13 @@ The tool runs a fixed pipeline so you know exactly what you’re getting.
43
43
  4. **Score and tier**
44
44
  A single **debt score** (0–100) is computed from severity and confidence of all debt items. That score is mapped to a **Cleanliness tier** (1–5), e.g. “Thoughtful Prompter (3/5)” or “Pure Coder (5/5)”, shown at the top of the CLI and report.
45
45
 
46
+ **How the score is calculated:** Each debt item has a severity (low=1, medium=2, high=3, critical=4) and a confidence (0–1). The score is the weighted average of (severity × confidence) across all items, scaled by 25 and capped at 100: more or worse items → higher score (worse cleanliness).
47
+
46
48
  5. **Optional LLM pass**
47
49
  If an API key is set and you don’t use `--no-llm`, the tool:
48
- - Asks the LLM to assess **per-file cleanliness** for the top ~15 hotspot files (and optionally suggest a **concrete code refactor** in a code block).
50
+ - Asks the LLM to assess **per-file cleanliness** for each analyzed file (up to 80) with repo context for cross-file suggestions (and optionally suggest a **concrete code refactor** in a code block).
49
51
  - Asks the LLM to explain **each debt item** (why it matters, what to do) and optionally suggest a **simplified/refactored code snippet**.
50
- - Asks the LLM for one **overall codebase assessment** (a short paragraph).
52
+ - Asks the LLM for one **overall codebase assessment** (a short paragraph) and optional **prioritized next steps** (3–5 bullets).
51
53
 
52
54
  Responses are parsed: prose goes into insights/assessments; any markdown code block is stored as **suggested refactor** and shown in CLI and HTML.
53
55
 
@@ -8,6 +8,11 @@ import type { DebtCategory, DebtItem, Severity } from "../types.js";
8
8
  export declare function countComplexity(node: Parser.SyntaxNode): number;
9
9
  /** Count lines in source (excluding empty and comment-only). */
10
10
  export declare function effectiveLines(source: string): number;
11
+ /** Find TODO/FIXME/HACK/XXX markers (e.g. in comments). Returns 1-based line numbers and tag. */
12
+ export declare function findTodoFixmeHack(source: string): {
13
+ line: number;
14
+ tag: string;
15
+ }[];
11
16
  export declare function createDebtItem(file: string, category: DebtCategory, title: string, description: string, opts?: {
12
17
  line?: number;
13
18
  endLine?: number;
@@ -15,4 +20,11 @@ export declare function createDebtItem(file: string, category: DebtCategory, tit
15
20
  confidence?: number;
16
21
  metrics?: Record<string, number | string>;
17
22
  }): DebtItem;
23
+ /** Cyclomatic complexity → severity: low 15–24, medium 25–39, high 40–59, critical ≥ 60. */
24
+ export declare const CYCLOMATIC_THRESHOLDS: {
25
+ readonly low: 15;
26
+ readonly medium: 25;
27
+ readonly high: 40;
28
+ readonly critical: 60;
29
+ };
18
30
  export declare function inferSeverity(complexity: number): Severity;
@@ -42,6 +42,17 @@ export function effectiveLines(source) {
42
42
  return t.length > 0 && !t.startsWith("//") && !t.startsWith("#") && !t.startsWith("/*") && !t.startsWith("*");
43
43
  }).length;
44
44
  }
45
+ const TODO_FIXME_RE = /\b(TODO|FIXME|HACK|XXX)\b/i;
46
+ /** Find TODO/FIXME/HACK/XXX markers (e.g. in comments). Returns 1-based line numbers and tag. */
47
+ export function findTodoFixmeHack(source) {
48
+ const out = [];
49
+ source.split("\n").forEach((line, i) => {
50
+ const m = line.match(TODO_FIXME_RE);
51
+ if (m)
52
+ out.push({ line: i + 1, tag: m[1].toUpperCase() });
53
+ });
54
+ return out;
55
+ }
45
56
  export function createDebtItem(file, category, title, description, opts = {}) {
46
57
  const id = `${file}:${opts.line ?? 0}:${category}:${title.slice(0, 30)}`.replace(/\s/g, "_");
47
58
  return {
@@ -57,12 +68,16 @@ export function createDebtItem(file, category, title, description, opts = {}) {
57
68
  metrics: opts.metrics,
58
69
  };
59
70
  }
71
+ /** Cyclomatic complexity → severity: low 15–24, medium 25–39, high 40–59, critical ≥ 60. */
72
+ export const CYCLOMATIC_THRESHOLDS = { low: 15, medium: 25, high: 40, critical: 60 };
60
73
  export function inferSeverity(complexity) {
61
- if (complexity >= 20)
74
+ if (complexity >= CYCLOMATIC_THRESHOLDS.critical)
62
75
  return "critical";
63
- if (complexity >= 10)
76
+ if (complexity >= CYCLOMATIC_THRESHOLDS.high)
64
77
  return "high";
65
- if (complexity >= 5)
78
+ if (complexity >= CYCLOMATIC_THRESHOLDS.medium)
66
79
  return "medium";
80
+ if (complexity >= CYCLOMATIC_THRESHOLDS.low)
81
+ return "low";
67
82
  return "low";
68
83
  }
@@ -5,7 +5,7 @@
5
5
  import Parser from "tree-sitter";
6
6
  import JavaScript from "tree-sitter-javascript";
7
7
  import TypeScript from "tree-sitter-typescript";
8
- import { countComplexity, createDebtItem, effectiveLines, inferSeverity, } from "./base.js";
8
+ import { countComplexity, createDebtItem, effectiveLines, findTodoFixmeHack, inferSeverity, } from "./base.js";
9
9
  const LANG_JS = "javascript";
10
10
  const LANG_TS = "typescript";
11
11
  const JS_EXT = /\.(?:js|jsx|mjs|cjs)$/i;
@@ -56,7 +56,7 @@ export const javascriptAnalyzer = {
56
56
  hasDocumentation,
57
57
  };
58
58
  metrics.push(fileMetric);
59
- if (complexity >= 5) {
59
+ if (complexity >= 15) {
60
60
  debtItems.push(createDebtItem(path, "complexity", `High cyclomatic complexity (${complexity})`, `This file has ${complexity} decision points, which makes it harder to test and maintain.`, {
61
61
  severity: inferSeverity(complexity),
62
62
  confidence: 0.85,
@@ -73,6 +73,17 @@ export const javascriptAnalyzer = {
73
73
  if (!hasDocumentation && lineCount > 50) {
74
74
  debtItems.push(createDebtItem(path, "documentation", "Missing module-level documentation", "No JSDoc or file-level comment found. Document purpose and usage for maintainability.", { confidence: 0.7 }));
75
75
  }
76
+ const todoMarkers = findTodoFixmeHack(content);
77
+ if (todoMarkers.length > 0) {
78
+ const severity = todoMarkers.length >= 6 ? "high" : todoMarkers.length >= 3 ? "medium" : "low";
79
+ const first = todoMarkers[0];
80
+ debtItems.push(createDebtItem(path, "other", `${todoMarkers.length} TODO/FIXME/HACK/XXX marker(s)`, `Address or track these in issues: ${todoMarkers.map((m) => `${m.tag} L${m.line}`).join(", ")}.`, {
81
+ line: first.line,
82
+ severity,
83
+ confidence: 0.75,
84
+ metrics: { count: todoMarkers.length },
85
+ }));
86
+ }
76
87
  }
77
88
  catch (e) {
78
89
  errors.push({ file: path, message: e instanceof Error ? e.message : String(e) });
@@ -4,7 +4,7 @@
4
4
  */
5
5
  import Parser from "tree-sitter";
6
6
  import Python from "tree-sitter-python";
7
- import { countComplexity, createDebtItem, effectiveLines, inferSeverity, } from "./base.js";
7
+ import { countComplexity, createDebtItem, effectiveLines, findTodoFixmeHack, inferSeverity, } from "./base.js";
8
8
  const PY_EXT = /\.py$/i;
9
9
  function getParser() {
10
10
  const P = new Parser();
@@ -44,7 +44,7 @@ export const pythonAnalyzer = {
44
44
  hasDocumentation,
45
45
  };
46
46
  metrics.push(fileMetric);
47
- if (complexity >= 5) {
47
+ if (complexity >= 15) {
48
48
  debtItems.push(createDebtItem(path, "complexity", `High cyclomatic complexity (${complexity})`, `This module has ${complexity} decision points. Consider simplifying conditionals or extracting functions.`, {
49
49
  severity: inferSeverity(complexity),
50
50
  confidence: 0.85,
@@ -61,6 +61,17 @@ export const pythonAnalyzer = {
61
61
  if (!hasDocumentation && lineCount > 30) {
62
62
  debtItems.push(createDebtItem(path, "documentation", "Missing module docstring", "No module-level docstring found. Add a docstring describing the module's purpose.", { confidence: 0.7 }));
63
63
  }
64
+ const todoMarkers = findTodoFixmeHack(content);
65
+ if (todoMarkers.length > 0) {
66
+ const severity = todoMarkers.length >= 6 ? "high" : todoMarkers.length >= 3 ? "medium" : "low";
67
+ const first = todoMarkers[0];
68
+ debtItems.push(createDebtItem(path, "other", `${todoMarkers.length} TODO/FIXME/HACK/XXX marker(s)`, `Address or track these in issues: ${todoMarkers.map((m) => `${m.tag} L${m.line}`).join(", ")}.`, {
69
+ line: first.line,
70
+ severity,
71
+ confidence: 0.75,
72
+ metrics: { count: todoMarkers.length },
73
+ }));
74
+ }
64
75
  }
65
76
  catch (e) {
66
77
  errors.push({ file: path, message: e instanceof Error ? e.message : String(e) });
package/dist/cli.d.ts CHANGED
@@ -2,4 +2,4 @@
2
2
  /**
3
3
  * CLI entry: colorful terminal output, progress bars, actionable insights.
4
4
  */
5
- export {};
5
+ import "dotenv/config";
package/dist/cli.js CHANGED
@@ -2,6 +2,7 @@
2
2
  /**
3
3
  * CLI entry: colorful terminal output, progress bars, actionable insights.
4
4
  */
5
+ import "dotenv/config";
5
6
  import { Command } from "commander";
6
7
  import chalk from "chalk";
7
8
  import cliProgress from "cli-progress";
@@ -9,7 +10,7 @@ import { readFile } from "node:fs/promises";
9
10
  import { join } from "node:path";
10
11
  import { getCleanlinessTier } from "./cleanliness-score.js";
11
12
  import { runAnalysis } from "./engine.js";
12
- import { assessFileCleanliness, assessOverallCleanliness, enrichDebtWithInsights, } from "./llm.js";
13
+ import { assessFileCleanliness, assessOverallCleanliness, enrichDebtWithInsights, resolveLLMConfig, suggestNextSteps, } from "./llm.js";
13
14
  import { generateHtmlReport } from "./reports/html.js";
14
15
  import { generateJsonReport } from "./reports/json.js";
15
16
  import { generateMarkdownReport } from "./reports/markdown.js";
@@ -57,36 +58,47 @@ program
57
58
  }
58
59
  }
59
60
  if (useLlm) {
60
- progress.update(3, { task: "LLM: per-file cleanliness..." });
61
- const filesToAssess = run.fileMetrics
62
- .sort((a, b) => (b.hotspotScore ?? 0) - (a.hotspotScore ?? 0))
63
- .slice(0, 15);
64
- for (const m of filesToAssess) {
65
- const content = fileContents.get(m.file);
66
- if (!content)
67
- continue;
68
- const result = await assessFileCleanliness(m.file, content, m);
69
- if (result) {
70
- const idx = run.fileMetrics.findIndex((x) => x.file === m.file);
71
- if (idx >= 0)
72
- run.fileMetrics[idx] = {
73
- ...run.fileMetrics[idx],
74
- llmAssessment: result.assessment,
75
- llmSuggestedCode: result.suggestedCode,
76
- };
77
- }
61
+ const llmConfig = resolveLLMConfig();
62
+ if (!llmConfig) {
63
+ process.stderr.write(chalk.yellow(" No LLM API key found. Set one of: GEMINI_API_KEY, OPENAI_API_KEY, or OPENROUTER_API_KEY.\n" +
64
+ " Example: export GEMINI_API_KEY=your_key_here\n" +
65
+ " Skipping AI insights for this run.\n\n"));
78
66
  }
79
- progress.update(4, { task: "LLM: debt item insights..." });
80
- let debtItems = run.debtItems;
81
- if (debtItems.length > 0) {
82
- debtItems = await enrichDebtWithInsights(debtItems.slice(0, 25), fileContents);
83
- const byId = new Map(debtItems.map((d) => [d.id, d]));
84
- run.debtItems = run.debtItems.map((d) => byId.get(d.id) ?? d);
67
+ else {
68
+ progress.update(3, { task: "LLM: per-file cleanliness..." });
69
+ const allFilePaths = run.fileMetrics.map((m) => m.file);
70
+ const maxFiles = 80;
71
+ const filesToAssess = run.fileMetrics.slice(0, maxFiles);
72
+ for (const m of filesToAssess) {
73
+ const content = fileContents.get(m.file);
74
+ if (!content)
75
+ continue;
76
+ const result = await assessFileCleanliness(m.file, content, m, {}, { filePaths: allFilePaths });
77
+ if (result) {
78
+ const idx = run.fileMetrics.findIndex((x) => x.file === m.file);
79
+ if (idx >= 0)
80
+ run.fileMetrics[idx] = {
81
+ ...run.fileMetrics[idx],
82
+ llmAssessment: result.assessment,
83
+ llmSuggestedCode: result.suggestedCode,
84
+ };
85
+ }
86
+ }
87
+ progress.update(4, { task: "LLM: debt item insights..." });
88
+ let debtItems = run.debtItems;
89
+ if (debtItems.length > 0) {
90
+ debtItems = await enrichDebtWithInsights(debtItems.slice(0, 25), fileContents);
91
+ const byId = new Map(debtItems.map((d) => [d.id, d]));
92
+ run.debtItems = run.debtItems.map((d) => byId.get(d.id) ?? d);
93
+ }
94
+ progress.update(5, { task: "LLM: overall assessment..." });
95
+ const overall = await assessOverallCleanliness(run);
96
+ if (overall)
97
+ run.llmOverallAssessment = overall;
98
+ const nextSteps = await suggestNextSteps(run);
99
+ if (nextSteps?.length)
100
+ run.llmNextSteps = nextSteps;
85
101
  }
86
- progress.update(5, { task: "LLM: overall assessment..." });
87
- const overall = await assessOverallCleanliness(run);
88
- if (overall)
89
- run.llmOverallAssessment = overall;
90
102
  }
91
103
  progress.update(totalSteps, { task: "Done" });
92
104
  progress.stop();
@@ -120,6 +132,10 @@ program
120
132
  }
121
133
  else {
122
134
  printCliReport(run, opts.ci ?? false);
135
+ if (!run.llmOverallAssessment) {
136
+ process.stdout.write(chalk.dim(" To get AI insights, per-file optimization suggestions, and refactor recommendations:\n" +
137
+ " set GEMINI_API_KEY or OPENAI_API_KEY and run without --no-llm.\n\n"));
138
+ }
123
139
  if (opts.ci && getDebtScore(run) > 60)
124
140
  process.exit(1);
125
141
  }
@@ -156,7 +172,8 @@ function printCliReport(run, ci) {
156
172
  if (run.debtTrend && run.debtTrend.length > 0) {
157
173
  process.stdout.write(` Recent commits: ${chalk.cyan(String(run.debtTrend.length))}\n`);
158
174
  }
159
- process.stdout.write(` Debt score: ${severityColor(score)} / 100\n\n`);
175
+ process.stdout.write(` Debt score: ${severityColor(score)} / 100\n`);
176
+ process.stdout.write(chalk.dim(" (weighted average of debt item severity × confidence, 0–100)\n\n"));
160
177
  const bySeverity = { critical: 0, high: 0, medium: 0, low: 0 };
161
178
  for (const d of debtItems) {
162
179
  bySeverity[d.severity]++;
@@ -206,6 +223,29 @@ function printCliReport(run, ci) {
206
223
  for (const e of errors.slice(0, 5)) {
207
224
  process.stdout.write(chalk.dim(` ${e.file}: ${e.message}\n`));
208
225
  }
226
+ process.stdout.write("\n");
227
+ }
228
+ process.stdout.write(chalk.bold(" What to fix\n"));
229
+ process.stdout.write(chalk.dim(" " + "—".repeat(50) + "\n"));
230
+ const severityLabel = (s) => s.charAt(0).toUpperCase() + s.slice(1);
231
+ const fixList = debtItems
232
+ .sort((a, b) => severityOrder(b.severity) - severityOrder(a.severity))
233
+ .slice(0, 12)
234
+ .map((d) => ` • [${severityLabel(d.severity)}] ${d.title} — ${d.file}${d.line != null ? `:${d.line}` : ""}`);
235
+ if (fixList.length > 0) {
236
+ fixList.forEach((line) => process.stdout.write(line + "\n"));
237
+ }
238
+ else {
239
+ process.stdout.write(chalk.dim(" No debt items. Keep it up.\n"));
240
+ }
241
+ process.stdout.write("\n");
242
+ if (run.llmNextSteps && run.llmNextSteps.length > 0) {
243
+ process.stdout.write(chalk.bold.cyan(" Recommended next steps (AI)\n"));
244
+ process.stdout.write(chalk.dim(" " + "—".repeat(50) + "\n"));
245
+ for (const step of run.llmNextSteps) {
246
+ process.stdout.write(chalk.cyan(" • ") + step + "\n");
247
+ }
248
+ process.stdout.write("\n");
209
249
  }
210
250
  process.stdout.write(chalk.dim(" Run with --format html -o report.html for the interactive dashboard.\n\n"));
211
251
  }
package/dist/llm.d.ts CHANGED
@@ -19,10 +19,17 @@ export declare function resolveLLMConfig(config?: LLMConfig): {
19
19
  model: string;
20
20
  } | null;
21
21
  export declare function enrichDebtWithInsights(items: DebtItem[], fileContents: Map<string, string>, config?: LLMConfig): Promise<DebtItem[]>;
22
- /** Per-file: LLM assesses cleanliness and optionally suggests a concrete code simplification. */
23
- export declare function assessFileCleanliness(filePath: string, content: string, metrics: FileMetrics, config?: LLMConfig): Promise<{
22
+ /** Context about the rest of the repo for cross-file optimization suggestions. */
23
+ export interface RepoContext {
24
+ /** All analyzed file paths in this run (including the current file). */
25
+ filePaths: string[];
26
+ }
27
+ /** Per-file: LLM assesses cleanliness and suggests optimizations with cross-file context. */
28
+ export declare function assessFileCleanliness(filePath: string, content: string, metrics: FileMetrics, config?: LLMConfig, repoContext?: RepoContext): Promise<{
24
29
  assessment: string;
25
30
  suggestedCode?: string;
26
31
  } | null>;
27
32
  /** Overall: LLM assesses the whole codebase cleanliness in a short paragraph. */
28
33
  export declare function assessOverallCleanliness(run: AnalysisRun, config?: LLMConfig): Promise<string | null>;
34
+ /** LLM suggests 3–5 prioritized next steps (actionable bullets). */
35
+ export declare function suggestNextSteps(run: AnalysisRun, config?: LLMConfig): Promise<string[] | null>;
package/dist/llm.js CHANGED
@@ -169,17 +169,21 @@ Reply format: Short explanation first, then optionally a code block with the sug
169
169
  const { prose, code } = parseCodeBlockAndProse(raw);
170
170
  return { insight: prose || item.description, suggestedCode: code };
171
171
  }
172
- /** Per-file: LLM assesses cleanliness and optionally suggests a concrete code simplification. */
173
- export async function assessFileCleanliness(filePath, content, metrics, config = {}) {
172
+ /** Per-file: LLM assesses cleanliness and suggests optimizations with cross-file context. */
173
+ export async function assessFileCleanliness(filePath, content, metrics, config = {}, repoContext) {
174
174
  const resolved = resolveLLMConfig(config);
175
175
  if (!resolved)
176
176
  return null;
177
177
  const snippet = content.length > 4000 ? content.slice(0, 4000) + "\n\n[... truncated]" : content;
178
- const prompt = `You are a senior engineer assessing code cleanliness. For this file:
179
-
180
- 1) In 1-2 sentences: how clean and maintainable is it, and one concrete improvement (or "Looks good" if fine).
181
- 2) If one specific code simplification or streamlining is possible (e.g. simplify a function, reduce nesting, extract a helper), provide ONLY that refactored snippet in a markdown code block. Same language as the file. If no clear code change applies, omit the code block.
178
+ const otherFiles = repoContext?.filePaths?.filter((p) => p !== filePath).slice(0, 80) ?? [];
179
+ const repoContextBlock = otherFiles.length > 0
180
+ ? `\nRepository context (other files in this run): ${otherFiles.join(", ")}.\nWhen suggesting optimizations, you may reference other files (e.g. extract to a shared module, reuse from another file, or move code between files). Explain why each suggestion helps.\n`
181
+ : "";
182
+ const prompt = `You are a senior engineer assessing code cleanliness and possible optimizations for this file.
182
183
 
184
+ 1) In 1-3 sentences: how clean and maintainable is it, and one or two concrete improvements (or "Looks good" if fine). Explain why each improvement matters.
185
+ 2) If one specific optimization is possible (e.g. simplify a function, reduce nesting, extract a helper, or a cross-file refactor like moving code to a shared module), provide ONLY that refactored snippet in a markdown code block. Same language as the file. Briefly say why it helps. If no clear code change applies, omit the code block.
186
+ ${repoContextBlock}
183
187
  File: ${filePath}
184
188
  Metrics: complexity ${metrics.cyclomaticComplexity ?? "?"}, lines ${metrics.lineCount}, ${metrics.hasDocumentation ? "has docs" : "no module docs"}${metrics.hotspotScore != null ? `, hotspot ${metrics.hotspotScore.toFixed(2)}` : ""}.
185
189
 
@@ -188,10 +192,10 @@ Code:
188
192
  ${snippet}
189
193
  \`\`\`
190
194
 
191
- Reply: short assessment first, then optionally a code block with the suggested refactor. No preamble.`;
195
+ Reply: short assessment first (with brief "why"), then optionally a code block with the suggested refactor. No preamble.`;
192
196
  const raw = await chat(prompt, {
193
197
  ...resolved,
194
- maxTokens: config.maxTokens ?? 400,
198
+ maxTokens: config.maxTokens ?? 500,
195
199
  });
196
200
  if (!raw)
197
201
  return null;
@@ -225,3 +229,38 @@ In one short paragraph (3-5 sentences), assess overall cleanliness: main strengt
225
229
  maxTokens: config.maxTokens ?? DEFAULT_MAX_TOKENS_OVERALL,
226
230
  });
227
231
  }
232
+ /** LLM suggests 3–5 prioritized next steps (actionable bullets). */
233
+ export async function suggestNextSteps(run, config = {}) {
234
+ const resolved = resolveLLMConfig(config);
235
+ if (!resolved)
236
+ return null;
237
+ const fileCount = run.fileMetrics.length;
238
+ const debtCount = run.debtItems.length;
239
+ const bySeverity = run.debtItems.reduce((acc, d) => {
240
+ acc[d.severity] = (acc[d.severity] ?? 0) + 1;
241
+ return acc;
242
+ }, {});
243
+ const severityOrd = (s) => ({ critical: 4, high: 3, medium: 2, low: 1 }[s] ?? 0);
244
+ const topItems = run.debtItems
245
+ .sort((a, b) => severityOrd(b.severity) - severityOrd(a.severity))
246
+ .slice(0, 15)
247
+ .map((d) => `${d.severity}: ${d.title} (${d.file}${d.line ? `:${d.line}` : ""})`);
248
+ const prompt = `You are a senior engineer. Given this technical debt summary, suggest 3–5 concrete, prioritized next steps the team should take to reduce debt. Be specific (files, types of fixes). Output ONLY a short bullet list: one action per line, starting each line with "- " or "• ". No preamble or explanation.
249
+
250
+ Summary: ${fileCount} files, ${debtCount} debt items. By severity: ${JSON.stringify(bySeverity)}.
251
+ Sample items: ${topItems.join("; ")}
252
+
253
+ List 3–5 next steps:`;
254
+ const raw = await chat(prompt, {
255
+ ...resolved,
256
+ maxTokens: 300,
257
+ });
258
+ if (!raw)
259
+ return null;
260
+ const bullets = raw
261
+ .split(/\n/)
262
+ .map((s) => s.replace(/^[\s\-•*]+\s*/, "").trim())
263
+ .filter((s) => s.length > 0)
264
+ .slice(0, 5);
265
+ return bullets.length > 0 ? bullets : null;
266
+ }
@@ -22,6 +22,7 @@ function buildHtml(run, title, darkMode) {
22
22
  debtItems: run.debtItems,
23
23
  debtTrend: run.debtTrend ?? [],
24
24
  llmOverallAssessment: run.llmOverallAssessment ?? null,
25
+ llmNextSteps: run.llmNextSteps ?? null,
25
26
  summary: {
26
27
  filesAnalyzed: run.fileMetrics.length,
27
28
  debtCount: run.debtItems.length,
@@ -261,6 +262,14 @@ function buildHtml(run, title, darkMode) {
261
262
  <p class="llm-overall-text">${escapeHtml(run.llmOverallAssessment)}</p>
262
263
  </div>
263
264
  ` : ""}
265
+ ${run.llmNextSteps?.length ? `
266
+ <div class="section llm-next-steps">
267
+ <h2>Recommended next steps (AI)</h2>
268
+ <ul>
269
+ ${run.llmNextSteps.map((s) => `<li>${escapeHtml(s)}</li>`).join("\n ")}
270
+ </ul>
271
+ </div>
272
+ ` : ""}
264
273
 
265
274
  <div class="glossary">
266
275
  <h2>Understanding this report</h2>
@@ -14,6 +14,7 @@ export function generateJsonReport(run) {
14
14
  debtTrend: run.debtTrend,
15
15
  errors: run.errors,
16
16
  llmOverallAssessment: run.llmOverallAssessment ?? undefined,
17
+ llmNextSteps: run.llmNextSteps ?? undefined,
17
18
  };
18
19
  return JSON.stringify(payload, null, 2);
19
20
  }
@@ -60,5 +60,24 @@ export function generateMarkdownReport(run) {
60
60
  }
61
61
  lines.push("");
62
62
  }
63
+ if (run.debtItems.length > 0) {
64
+ lines.push("## What to fix");
65
+ lines.push("");
66
+ const top = run.debtItems
67
+ .sort((a, b) => ({ critical: 4, high: 3, medium: 2, low: 1 }[b.severity] ?? 0) - ({ critical: 4, high: 3, medium: 2, low: 1 }[a.severity] ?? 0))
68
+ .slice(0, 12);
69
+ for (const d of top) {
70
+ lines.push(`- **[${d.severity}]** ${d.title} — \`${d.file}${d.line != null ? `:${d.line}` : ""}\``);
71
+ }
72
+ lines.push("");
73
+ }
74
+ if (run.llmNextSteps?.length) {
75
+ lines.push("## Recommended next steps (AI)");
76
+ lines.push("");
77
+ for (const step of run.llmNextSteps) {
78
+ lines.push(`- ${step}`);
79
+ }
80
+ lines.push("");
81
+ }
63
82
  return lines.join("\n");
64
83
  }
package/dist/types.d.ts CHANGED
@@ -88,6 +88,8 @@ export interface AnalysisRun {
88
88
  }>;
89
89
  /** LLM-generated overall codebase cleanliness assessment (when LLM attached) */
90
90
  llmOverallAssessment?: string;
91
+ /** LLM-generated prioritized next steps (when LLM attached) */
92
+ llmNextSteps?: string[];
91
93
  }
92
94
  /** Pluggable analyzer: given file paths and content, returns metrics + debt items */
93
95
  export interface IAnalyzer {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "tech-debt-visualizer",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "description": "Language-agnostic CLI that analyzes repos and generates interactive technical debt visualizations with AI-powered insights",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -30,6 +30,7 @@
30
30
  "README.md"
31
31
  ],
32
32
  "dependencies": {
33
+ "dotenv": "^16.4.5",
33
34
  "chalk": "^5.3.0",
34
35
  "cli-progress": "^3.12.0",
35
36
  "commander": "^12.1.0",