tech-debt-visualizer 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -79,6 +79,9 @@ Requires **Node 18+**.
79
79
  | `-f, --format` | `cli` (default), `html`, `json`, or `markdown` |
80
80
  | `-o, --output` | Output path (e.g. `report.html` for HTML) |
81
81
  | `--no-llm` | Skip all LLM calls (no API key needed) |
82
+ | `--llm` | Enable LLM (default). Use with `--llm-key` and/or `--llm-model` |
83
+ | `--llm-key <key>` | API key (overrides env / `.env`) |
84
+ | `--llm-model <model>` | Model name (e.g. `gemini-1.5-flash`, `gpt-4o-mini`) |
82
85
  | `--ci` | Terse output; exit code 1 if debt score &gt; 60 |
83
86
 
84
87
  Examples:
@@ -87,6 +90,10 @@ Examples:
87
90
  node dist/cli.js analyze . -f html -o report.html
88
91
  node dist/cli.js analyze ./src -f json -o debt.json
89
92
  node dist/cli.js analyze . --ci
93
+ # With LLM key and model on the command line:
94
+ node dist/cli.js analyze . --llm-key YOUR_GEMINI_KEY --llm-model gemini-1.5-flash
95
+ # Or use a .env file in the current directory (GEMINI_API_KEY=...)
96
+ node dist/cli.js analyze .
90
97
  ```
91
98
 
92
99
  ---
@@ -95,6 +102,8 @@ node dist/cli.js analyze . --ci
95
102
 
96
103
  The tool can call an LLM to get **explanations** and **concrete code refactor suggestions**. You only need one provider; the first one with a key wins.
97
104
 
105
+ **Ways to pass the API key:** (1) Put `GEMINI_API_KEY=your_key` (or `OPENAI_API_KEY`, etc.) in a **`.env` file** in the directory you run the command from (current working directory). (2) **Export** in the same shell: `export GEMINI_API_KEY=your_key`. (3) **CLI flag**: `--llm-key your_key`. You can also set **model** via `--llm-model gemini-1.5-flash` (or another model name).
106
+
98
107
  | Provider | Env var(s) | Optional env |
99
108
  |----------|------------|---------------|
100
109
  | **OpenRouter** | `OPENROUTER_API_KEY` | `OPENROUTER_MODEL` (default: `google/gemini-2.0-flash-001`) |
package/dist/cli.d.ts CHANGED
@@ -1,5 +1,2 @@
1
1
  #!/usr/bin/env node
2
- /**
3
- * CLI entry: colorful terminal output, progress bars, actionable insights.
4
- */
5
- import "dotenv/config";
2
+ export {};
package/dist/cli.js CHANGED
@@ -1,13 +1,14 @@
1
1
  #!/usr/bin/env node
2
2
  /**
3
3
  * CLI entry: colorful terminal output, progress bars, actionable insights.
4
+ * Loads .env from cwd first; supports --llm-key and --llm-model.
4
5
  */
5
- import "dotenv/config";
6
+ import { readFile } from "node:fs/promises";
7
+ import { join } from "node:path";
8
+ import { loadEnv } from "./load-env.js";
6
9
  import { Command } from "commander";
7
10
  import chalk from "chalk";
8
11
  import cliProgress from "cli-progress";
9
- import { readFile } from "node:fs/promises";
10
- import { join } from "node:path";
11
12
  import { getCleanlinessTier } from "./cleanliness-score.js";
12
13
  import { runAnalysis } from "./engine.js";
13
14
  import { assessFileCleanliness, assessOverallCleanliness, enrichDebtWithInsights, resolveLLMConfig, suggestNextSteps, } from "./llm.js";
@@ -18,7 +19,7 @@ const program = new Command();
18
19
  program
19
20
  .name("tech-debt")
20
21
  .description("Analyze repositories and visualize technical debt with AI-powered insights")
21
- .version("0.1.0");
22
+ .version("0.1.2");
22
23
  program
23
24
  .command("analyze")
24
25
  .description("Analyze a repository and output report")
@@ -26,6 +27,9 @@ program
26
27
  .option("-o, --output <path>", "Output file path (default: report.html or stdout for CLI)")
27
28
  .option("-f, --format <type>", "Output format: cli | html | json | markdown", "cli")
28
29
  .option("--no-llm", "Skip LLM-powered insights")
30
+ .option("--llm", "Enable LLM (default). Use with --llm-key and/or --llm-model")
31
+ .option("--llm-key <key>", "API key for LLM (overrides GEMINI_API_KEY / OPENAI_API_KEY / OPENROUTER_API_KEY)")
32
+ .option("--llm-model <model>", "Model name (e.g. gemini-1.5-flash, gpt-4o-mini)")
29
33
  .option("--ci", "CI mode: minimal output, exit with non-zero if debt score is high")
30
34
  .action(async (path, opts) => {
31
35
  const repoPath = join(process.cwd(), path);
@@ -57,11 +61,13 @@ program
57
61
  // ignore
58
62
  }
59
63
  }
64
+ const llmConfigOverrides = { apiKey: opts.llmKey, model: opts.llmModel };
60
65
  if (useLlm) {
61
- const llmConfig = resolveLLMConfig();
66
+ const llmConfig = resolveLLMConfig(llmConfigOverrides);
62
67
  if (!llmConfig) {
63
- process.stderr.write(chalk.yellow(" No LLM API key found. Set one of: GEMINI_API_KEY, OPENAI_API_KEY, or OPENROUTER_API_KEY.\n" +
64
- " Example: export GEMINI_API_KEY=your_key_here\n" +
68
+ process.stderr.write(chalk.yellow(" No LLM API key found. Set GEMINI_API_KEY or OPENAI_API_KEY (or use --llm-key <key>).\n" +
69
+ " Example: export GEMINI_API_KEY=your_key or --llm-key your_key\n" +
70
+ " Or add GEMINI_API_KEY=your_key to a .env file in the current directory.\n" +
65
71
  " Skipping AI insights for this run.\n\n"));
66
72
  }
67
73
  else {
@@ -69,11 +75,12 @@ program
69
75
  const allFilePaths = run.fileMetrics.map((m) => m.file);
70
76
  const maxFiles = 80;
71
77
  const filesToAssess = run.fileMetrics.slice(0, maxFiles);
78
+ const config = { ...llmConfigOverrides };
72
79
  for (const m of filesToAssess) {
73
80
  const content = fileContents.get(m.file);
74
81
  if (!content)
75
82
  continue;
76
- const result = await assessFileCleanliness(m.file, content, m, {}, { filePaths: allFilePaths });
83
+ const result = await assessFileCleanliness(m.file, content, m, config, { filePaths: allFilePaths });
77
84
  if (result) {
78
85
  const idx = run.fileMetrics.findIndex((x) => x.file === m.file);
79
86
  if (idx >= 0)
@@ -87,15 +94,15 @@ program
87
94
  progress.update(4, { task: "LLM: debt item insights..." });
88
95
  let debtItems = run.debtItems;
89
96
  if (debtItems.length > 0) {
90
- debtItems = await enrichDebtWithInsights(debtItems.slice(0, 25), fileContents);
97
+ debtItems = await enrichDebtWithInsights(debtItems.slice(0, 25), fileContents, config);
91
98
  const byId = new Map(debtItems.map((d) => [d.id, d]));
92
99
  run.debtItems = run.debtItems.map((d) => byId.get(d.id) ?? d);
93
100
  }
94
101
  progress.update(5, { task: "LLM: overall assessment..." });
95
- const overall = await assessOverallCleanliness(run);
102
+ const overall = await assessOverallCleanliness(run, config);
96
103
  if (overall)
97
104
  run.llmOverallAssessment = overall;
98
- const nextSteps = await suggestNextSteps(run);
105
+ const nextSteps = await suggestNextSteps(run, config);
99
106
  if (nextSteps?.length)
100
107
  run.llmNextSteps = nextSteps;
101
108
  }
@@ -133,8 +140,7 @@ program
133
140
  else {
134
141
  printCliReport(run, opts.ci ?? false);
135
142
  if (!run.llmOverallAssessment) {
136
- process.stdout.write(chalk.dim(" To get AI insights, per-file optimization suggestions, and refactor recommendations:\n" +
137
- " set GEMINI_API_KEY or OPENAI_API_KEY and run without --no-llm.\n\n"));
143
+ process.stdout.write(chalk.dim(" To get AI insights: set GEMINI_API_KEY (or OPENAI_API_KEY) or use --llm-key <key>. Run without --no-llm.\n\n"));
138
144
  }
139
145
  if (opts.ci && getDebtScore(run) > 60)
140
146
  process.exit(1);
@@ -270,12 +276,19 @@ function severityColor(score) {
270
276
  }
271
277
  function cleanlinessTierColor(tier) {
272
278
  switch (tier) {
273
- case 5: return chalk.green.bold;
274
- case 4: return chalk.cyan.bold;
275
- case 3: return chalk.yellow.bold;
276
- case 2: return chalk.hex("#f97316").bold;
277
- case 1: return chalk.red.bold;
278
- default: return chalk.white.bold;
279
+ case 5:
280
+ return chalk.green.bold;
281
+ case 4:
282
+ return chalk.cyan.bold;
283
+ case 3:
284
+ return chalk.yellow.bold;
285
+ case 2:
286
+ return chalk.hex("#f97316").bold;
287
+ case 1:
288
+ return chalk.red.bold;
289
+ default:
290
+ return chalk.white.bold;
279
291
  }
280
292
  }
281
- program.parse();
293
+ // Load .env from cwd first, then run the CLI
294
+ loadEnv().then(() => program.parse());
@@ -0,0 +1 @@
1
+ export declare function loadEnv(): Promise<void>;
@@ -0,0 +1,32 @@
1
+ /**
2
+ * Load .env from current working directory. Uses dotenv if installed, else parses manually.
3
+ */
4
+ import { existsSync, readFileSync } from "node:fs";
5
+ import { join } from "node:path";
6
+ export async function loadEnv() {
7
+ const envPath = join(process.cwd(), ".env");
8
+ try {
9
+ const dotenv = await import("dotenv");
10
+ dotenv.config({ path: envPath });
11
+ return;
12
+ }
13
+ catch {
14
+ // dotenv not installed
15
+ }
16
+ if (!existsSync(envPath))
17
+ return;
18
+ const content = readFileSync(envPath, "utf-8");
19
+ for (const line of content.split("\n")) {
20
+ const trimmed = line.trim();
21
+ if (!trimmed || trimmed.startsWith("#"))
22
+ continue;
23
+ const eq = trimmed.indexOf("=");
24
+ if (eq <= 0)
25
+ continue;
26
+ const key = trimmed.slice(0, eq).trim();
27
+ let value = trimmed.slice(eq + 1).trim();
28
+ if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'")))
29
+ value = value.slice(1, -1);
30
+ process.env[key] = value;
31
+ }
32
+ }
package/package.json CHANGED
@@ -1,11 +1,12 @@
1
1
  {
2
2
  "name": "tech-debt-visualizer",
3
- "version": "0.1.2",
3
+ "version": "0.1.3",
4
4
  "description": "Language-agnostic CLI that analyzes repos and generates interactive technical debt visualizations with AI-powered insights",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
7
7
  "bin": {
8
- "tech-debt": "dist/cli.js"
8
+ "tech-debt": "dist/cli.js",
9
+ "tech-debt-visualizer": "dist/cli.js"
9
10
  },
10
11
  "scripts": {
11
12
  "build": "tsc",