@vibecheckai/cli 3.2.6 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/bin/registry.js +192 -5
  2. package/bin/runners/lib/agent-firewall/change-packet/builder.js +280 -6
  3. package/bin/runners/lib/agent-firewall/critic/index.js +151 -0
  4. package/bin/runners/lib/agent-firewall/critic/judge.js +432 -0
  5. package/bin/runners/lib/agent-firewall/critic/prompts.js +305 -0
  6. package/bin/runners/lib/agent-firewall/lawbook/distributor.js +465 -0
  7. package/bin/runners/lib/agent-firewall/lawbook/evaluator.js +604 -0
  8. package/bin/runners/lib/agent-firewall/lawbook/index.js +304 -0
  9. package/bin/runners/lib/agent-firewall/lawbook/registry.js +514 -0
  10. package/bin/runners/lib/agent-firewall/lawbook/schema.js +420 -0
  11. package/bin/runners/lib/agent-firewall/logger.js +141 -0
  12. package/bin/runners/lib/agent-firewall/policy/loader.js +312 -4
  13. package/bin/runners/lib/agent-firewall/policy/rules/ghost-env.js +113 -1
  14. package/bin/runners/lib/agent-firewall/policy/rules/ghost-route.js +133 -6
  15. package/bin/runners/lib/agent-firewall/proposal/extractor.js +394 -0
  16. package/bin/runners/lib/agent-firewall/proposal/index.js +212 -0
  17. package/bin/runners/lib/agent-firewall/proposal/schema.js +251 -0
  18. package/bin/runners/lib/agent-firewall/proposal/validator.js +386 -0
  19. package/bin/runners/lib/agent-firewall/reality/index.js +332 -0
  20. package/bin/runners/lib/agent-firewall/reality/state.js +625 -0
  21. package/bin/runners/lib/agent-firewall/reality/watcher.js +322 -0
  22. package/bin/runners/lib/agent-firewall/risk/index.js +173 -0
  23. package/bin/runners/lib/agent-firewall/risk/scorer.js +328 -0
  24. package/bin/runners/lib/agent-firewall/risk/thresholds.js +321 -0
  25. package/bin/runners/lib/agent-firewall/risk/vectors.js +421 -0
  26. package/bin/runners/lib/agent-firewall/simulator/diff-simulator.js +472 -0
  27. package/bin/runners/lib/agent-firewall/simulator/import-resolver.js +346 -0
  28. package/bin/runners/lib/agent-firewall/simulator/index.js +181 -0
  29. package/bin/runners/lib/agent-firewall/simulator/route-validator.js +380 -0
  30. package/bin/runners/lib/agent-firewall/time-machine/incident-correlator.js +661 -0
  31. package/bin/runners/lib/agent-firewall/time-machine/index.js +267 -0
  32. package/bin/runners/lib/agent-firewall/time-machine/replay-engine.js +436 -0
  33. package/bin/runners/lib/agent-firewall/time-machine/state-reconstructor.js +490 -0
  34. package/bin/runners/lib/agent-firewall/time-machine/timeline-builder.js +530 -0
  35. package/bin/runners/lib/analyzers.js +81 -18
  36. package/bin/runners/lib/authority-badge.js +425 -0
  37. package/bin/runners/lib/cli-output.js +7 -1
  38. package/bin/runners/lib/error-handler.js +16 -9
  39. package/bin/runners/lib/exit-codes.js +275 -0
  40. package/bin/runners/lib/global-flags.js +37 -0
  41. package/bin/runners/lib/help-formatter.js +413 -0
  42. package/bin/runners/lib/logger.js +38 -0
  43. package/bin/runners/lib/unified-cli-output.js +604 -0
  44. package/bin/runners/lib/upsell.js +148 -0
  45. package/bin/runners/runApprove.js +1200 -0
  46. package/bin/runners/runAuth.js +324 -95
  47. package/bin/runners/runCheckpoint.js +39 -21
  48. package/bin/runners/runClassify.js +859 -0
  49. package/bin/runners/runContext.js +136 -24
  50. package/bin/runners/runDoctor.js +108 -68
  51. package/bin/runners/runFix.js +6 -5
  52. package/bin/runners/runGuard.js +212 -118
  53. package/bin/runners/runInit.js +3 -2
  54. package/bin/runners/runMcp.js +130 -52
  55. package/bin/runners/runPolish.js +43 -20
  56. package/bin/runners/runProve.js +1 -2
  57. package/bin/runners/runReport.js +3 -2
  58. package/bin/runners/runScan.js +63 -44
  59. package/bin/runners/runShip.js +3 -4
  60. package/bin/runners/runValidate.js +19 -2
  61. package/bin/runners/runWatch.js +104 -53
  62. package/bin/vibecheck.js +106 -19
  63. package/mcp-server/HARDENING_SUMMARY.md +299 -0
  64. package/mcp-server/agent-firewall-interceptor.js +367 -31
  65. package/mcp-server/authority-tools.js +569 -0
  66. package/mcp-server/conductor/conflict-resolver.js +588 -0
  67. package/mcp-server/conductor/execution-planner.js +544 -0
  68. package/mcp-server/conductor/index.js +377 -0
  69. package/mcp-server/conductor/lock-manager.js +615 -0
  70. package/mcp-server/conductor/request-queue.js +550 -0
  71. package/mcp-server/conductor/session-manager.js +500 -0
  72. package/mcp-server/conductor/tools.js +510 -0
  73. package/mcp-server/index.js +1149 -243
  74. package/mcp-server/lib/{api-client.js → api-client.cjs} +40 -4
  75. package/mcp-server/lib/logger.cjs +30 -0
  76. package/mcp-server/logger.js +173 -0
  77. package/mcp-server/package.json +2 -2
  78. package/mcp-server/premium-tools.js +2 -2
  79. package/mcp-server/tier-auth.js +245 -35
  80. package/mcp-server/truth-firewall-tools.js +145 -15
  81. package/mcp-server/vibecheck-tools.js +2 -2
  82. package/package.json +2 -3
  83. package/mcp-server/index.old.js +0 -4137
  84. package/mcp-server/package-lock.json +0 -165
@@ -0,0 +1,859 @@
1
+ /**
2
+ * vibecheck classify - Inventory Authority Command
3
+ *
4
+ * Produces duplication maps, legacy code maps, and risk classifications.
5
+ * Available on FREE tier - read-only analysis with no enforcement.
6
+ *
7
+ * Output:
8
+ * - Duplication map (exact, near, semantic duplicates)
9
+ * - Legacy map (deprecated, backup, obsolete, dead code)
10
+ * - Risk classifications (LOW, MEDIUM, HIGH, CRITICAL)
11
+ *
12
+ * Part of the Authority System - "The AI That Says No"
13
+ */
14
+
15
+ const path = require("path");
16
+ const fs = require("fs");
17
+ const crypto = require("crypto");
18
+ const { withErrorHandling, createUserError } = require("./lib/error-handler");
19
+ const { parseGlobalFlags, shouldShowBanner } = require("./lib/global-flags");
20
+ const { EXIT } = require("./lib/exit-codes");
21
+
22
+ // ═══════════════════════════════════════════════════════════════════════════════
23
+ // TERMINAL UI
24
+ // ═══════════════════════════════════════════════════════════════════════════════
25
+
26
+ const {
27
+ ansi,
28
+ colors,
29
+ Spinner,
30
+ } = require("./lib/terminal-ui");
31
+
32
+ const BANNER = `
33
+ ${ansi.rgb(0, 200, 255)} ██╗ ██╗██╗██████╗ ███████╗ ██████╗██╗ ██╗███████╗ ██████╗██╗ ██╗${ansi.reset}
34
+ ${ansi.rgb(30, 180, 255)} ██║ ██║██║██╔══██╗██╔════╝██╔════╝██║ ██║██╔════╝██╔════╝██║ ██╔╝${ansi.reset}
35
+ ${ansi.rgb(60, 160, 255)} ██║ ██║██║██████╔╝█████╗ ██║ ███████║█████╗ ██║ █████╔╝ ${ansi.reset}
36
+ ${ansi.rgb(90, 140, 255)} ╚██╗ ██╔╝██║██╔══██╗██╔══╝ ██║ ██╔══██║██╔══╝ ██║ ██╔═██╗ ${ansi.reset}
37
+ ${ansi.rgb(120, 120, 255)} ╚████╔╝ ██║██████╔╝███████╗╚██████╗██║ ██║███████╗╚██████╗██║ ██╗${ansi.reset}
38
+ ${ansi.rgb(150, 100, 255)} ╚═══╝ ╚═╝╚═════╝ ╚══════╝ ╚═════╝╚═╝ ╚═╝╚══════╝ ╚═════╝╚═╝ ╚═╝${ansi.reset}
39
+
40
+ ${ansi.dim} ┌─────────────────────────────────────────────────────────────────────┐${ansi.reset}
41
+ ${ansi.dim} │${ansi.reset} ${ansi.rgb(255, 255, 255)}${ansi.bold}Authority System${ansi.reset} ${ansi.dim}•${ansi.reset} ${ansi.rgb(200, 200, 200)}Inventory${ansi.reset} ${ansi.dim}•${ansi.reset} ${ansi.rgb(150, 150, 150)}Read-Only Classification${ansi.reset} ${ansi.dim}│${ansi.reset}
42
+ ${ansi.dim} └─────────────────────────────────────────────────────────────────────┘${ansi.reset}
43
+ `;
44
+
45
+ function printBanner() {
46
+ console.log(BANNER);
47
+ }
48
+
49
+ // ═══════════════════════════════════════════════════════════════════════════════
50
+ // ARGS PARSER
51
+ // ═══════════════════════════════════════════════════════════════════════════════
52
+
53
+ function parseArgs(args) {
54
+ const { flags: globalFlags, cleanArgs } = parseGlobalFlags(args);
55
+
56
+ const opts = {
57
+ path: globalFlags.path || process.cwd(),
58
+ json: globalFlags.json || false,
59
+ verbose: globalFlags.verbose || false,
60
+ help: globalFlags.help || false,
61
+ noBanner: globalFlags.noBanner || false,
62
+ ci: globalFlags.ci || false,
63
+ quiet: globalFlags.quiet || false,
64
+ // Classification options
65
+ includeNear: true, // Include near-duplicates
66
+ includeSemantic: false, // Include semantic duplicates (slower)
67
+ threshold: 0.8, // Similarity threshold (0-1)
68
+ maxFiles: 5000, // Max files to analyze
69
+ // Output options
70
+ output: null, // Output file path
71
+ format: 'table', // table, json, markdown
72
+ };
73
+
74
+ for (let i = 0; i < cleanArgs.length; i++) {
75
+ const arg = cleanArgs[i];
76
+
77
+ if (arg === '--include-semantic' || arg === '-s') opts.includeSemantic = true;
78
+ else if (arg === '--no-near') opts.includeNear = false;
79
+ else if (arg === '--threshold' || arg === '-t') opts.threshold = parseFloat(cleanArgs[++i]) || 0.8;
80
+ else if (arg === '--max-files') opts.maxFiles = parseInt(cleanArgs[++i], 10) || 5000;
81
+ else if (arg === '--output' || arg === '-o') opts.output = cleanArgs[++i];
82
+ else if (arg === '--format' || arg === '-f') opts.format = cleanArgs[++i] || 'table';
83
+ else if (arg === '--path' || arg === '-p') opts.path = cleanArgs[++i] || process.cwd();
84
+ else if (arg.startsWith('--path=')) opts.path = arg.split('=')[1];
85
+ else if (!arg.startsWith('-')) opts.path = path.resolve(arg);
86
+ }
87
+
88
+ return opts;
89
+ }
90
+
91
+ function printHelp(showBanner = true) {
92
+ if (showBanner && shouldShowBanner({})) {
93
+ printBanner();
94
+ }
95
+ console.log(`
96
+ ${ansi.bold}USAGE${ansi.reset}
97
+ ${colors.accent}vibecheck classify${ansi.reset} [path] [options]
98
+
99
+ ${ansi.dim}Authority: inventory (FREE tier)${ansi.reset}
100
+
101
+ Produces a read-only inventory of your codebase:
102
+ - Duplication map (exact, near, semantic duplicates)
103
+ - Legacy code map (deprecated, backup, obsolete, dead)
104
+ - Risk classifications (LOW, MEDIUM, HIGH, CRITICAL)
105
+
106
+ ${ansi.bold}OPTIONS${ansi.reset}
107
+ ${colors.accent}--include-semantic, -s${ansi.reset} Include semantic duplicates ${ansi.dim}(slower)${ansi.reset}
108
+ ${colors.accent}--no-near${ansi.reset} Skip near-duplicate detection
109
+ ${colors.accent}--threshold, -t <n>${ansi.reset} Similarity threshold 0-1 ${ansi.dim}(default: 0.8)${ansi.reset}
110
+ ${colors.accent}--max-files <n>${ansi.reset} Max files to analyze ${ansi.dim}(default: 5000)${ansi.reset}
111
+
112
+ ${ansi.bold}OUTPUT OPTIONS${ansi.reset}
113
+ ${colors.accent}--json${ansi.reset} Output as JSON
114
+ ${colors.accent}--output, -o <file>${ansi.reset} Save output to file
115
+ ${colors.accent}--format, -f <fmt>${ansi.reset} Format: table, json, markdown
116
+
117
+ ${ansi.bold}GLOBAL OPTIONS${ansi.reset}
118
+ ${colors.accent}--path, -p <dir>${ansi.reset} Run in specified directory
119
+ ${colors.accent}--verbose, -v${ansi.reset} Show detailed progress
120
+ ${colors.accent}--quiet, -q${ansi.reset} Suppress non-essential output
121
+ ${colors.accent}--ci${ansi.reset} CI mode
122
+ ${colors.accent}--help, -h${ansi.reset} Show this help
123
+
124
+ ${ansi.bold}💡 EXAMPLES${ansi.reset}
125
+
126
+ ${ansi.dim}# Quick inventory of current directory${ansi.reset}
127
+ vibecheck classify
128
+
129
+ ${ansi.dim}# JSON output for processing${ansi.reset}
130
+ vibecheck classify --json > inventory.json
131
+
132
+ ${ansi.dim}# Include semantic duplicates (deeper analysis)${ansi.reset}
133
+ vibecheck classify --include-semantic
134
+
135
+ ${ansi.dim}# Classify specific directory${ansi.reset}
136
+ vibecheck classify ./packages/core
137
+
138
+ ${ansi.bold}📊 OUTPUT${ansi.reset}
139
+ Results include:
140
+ - duplicationMap: Files with duplicated code
141
+ - legacyMap: Deprecated/obsolete code locations
142
+ - riskClassifications: Per-file risk levels
143
+ - summary: Statistics overview
144
+
145
+ ${ansi.bold}🔗 RELATED COMMANDS${ansi.reset}
146
+ ${colors.accent}vibecheck approve${ansi.reset} Get authority verdicts ${ansi.cyan}[STARTER]${ansi.reset}
147
+ ${colors.accent}vibecheck scan${ansi.reset} Full code analysis
148
+
149
+ ${ansi.dim}─────────────────────────────────────────────────────────────${ansi.reset}
150
+ ${ansi.dim}Documentation: https://docs.vibecheckai.dev/cli/classify${ansi.reset}
151
+ `);
152
+ }
153
+
154
+ // ═══════════════════════════════════════════════════════════════════════════════
155
+ // INVENTORY ANALYSIS ENGINE
156
+ // ═══════════════════════════════════════════════════════════════════════════════
157
+
158
+ /**
159
+ * Legacy code indicators
160
+ */
161
+ const LEGACY_INDICATORS = [
162
+ { pattern: /\.old\.(js|ts|tsx|jsx)$/i, type: 'backup', confidence: 0.9 },
163
+ { pattern: /\.bak\.(js|ts|tsx|jsx)$/i, type: 'backup', confidence: 0.95 },
164
+ { pattern: /\.backup\.(js|ts|tsx|jsx)$/i, type: 'backup', confidence: 0.95 },
165
+ { pattern: /\.deprecated\.(js|ts|tsx|jsx)$/i, type: 'deprecated', confidence: 0.9 },
166
+ { pattern: /\.legacy\.(js|ts|tsx|jsx)$/i, type: 'obsolete', confidence: 0.85 },
167
+ { pattern: /_old\.(js|ts|tsx|jsx)$/i, type: 'backup', confidence: 0.8 },
168
+ { pattern: /-old\.(js|ts|tsx|jsx)$/i, type: 'backup', confidence: 0.8 },
169
+ { pattern: /\/deprecated\//i, type: 'deprecated', confidence: 0.85 },
170
+ { pattern: /\/legacy\//i, type: 'obsolete', confidence: 0.8 },
171
+ { pattern: /\/old\//i, type: 'backup', confidence: 0.7 },
172
+ ];
173
+
174
+ /**
175
+ * Content patterns indicating deprecated code
176
+ */
177
+ const DEPRECATED_CONTENT_PATTERNS = [
178
+ { regex: /@deprecated/gi, type: 'deprecated', confidence: 0.95 },
179
+ { regex: /TODO:\s*remove/gi, type: 'obsolete', confidence: 0.7 },
180
+ { regex: /FIXME:\s*delete/gi, type: 'obsolete', confidence: 0.7 },
181
+ { regex: /\/\/\s*LEGACY/gi, type: 'obsolete', confidence: 0.75 },
182
+ { regex: /\/\/\s*DEPRECATED/gi, type: 'deprecated', confidence: 0.9 },
183
+ { regex: /\/\*\*?\s*@deprecated/gi, type: 'deprecated', confidence: 0.95 },
184
+ ];
185
+
186
+ /**
187
+ * High-risk path patterns
188
+ */
189
+ const HIGH_RISK_PATHS = [
190
+ { pattern: /\/auth\//i, reason: 'Authentication code', level: 'HIGH' },
191
+ { pattern: /\/security\//i, reason: 'Security-related code', level: 'HIGH' },
192
+ { pattern: /\/migrations?\//i, reason: 'Database migrations', level: 'CRITICAL' },
193
+ { pattern: /\/billing\//i, reason: 'Billing/payment code', level: 'CRITICAL' },
194
+ { pattern: /\/stripe\//i, reason: 'Payment processing', level: 'CRITICAL' },
195
+ { pattern: /\.env/i, reason: 'Environment configuration', level: 'CRITICAL' },
196
+ { pattern: /secrets?\.(js|ts|json)/i, reason: 'Secrets configuration', level: 'CRITICAL' },
197
+ { pattern: /\/prisma\//i, reason: 'Database schema', level: 'HIGH' },
198
+ { pattern: /\/middleware\//i, reason: 'Request middleware', level: 'MEDIUM' },
199
+ ];
200
+
201
+ /**
202
+ * Excluded directories
203
+ */
204
+ const EXCLUDED_DIRS = new Set([
205
+ 'node_modules',
206
+ '.git',
207
+ 'dist',
208
+ 'build',
209
+ '.next',
210
+ 'coverage',
211
+ '.vibecheck',
212
+ '__pycache__',
213
+ '.cache',
214
+ 'vendor',
215
+ ]);
216
+
217
+ /**
218
+ * Find all source files
219
+ */
220
+ async function findSourceFiles(rootPath, maxFiles) {
221
+ const files = [];
222
+ const extensions = new Set(['.ts', '.tsx', '.js', '.jsx', '.mjs', '.cjs']);
223
+
224
+ async function walk(dir, depth = 0) {
225
+ if (depth > 20 || files.length >= maxFiles) return;
226
+
227
+ try {
228
+ const entries = await fs.promises.readdir(dir, { withFileTypes: true });
229
+
230
+ for (const entry of entries) {
231
+ if (files.length >= maxFiles) break;
232
+
233
+ const fullPath = path.join(dir, entry.name);
234
+ const relativePath = path.relative(rootPath, fullPath);
235
+
236
+ if (entry.isDirectory()) {
237
+ if (!EXCLUDED_DIRS.has(entry.name) && !entry.name.startsWith('.')) {
238
+ await walk(fullPath, depth + 1);
239
+ }
240
+ } else if (entry.isFile()) {
241
+ const ext = path.extname(entry.name).toLowerCase();
242
+ if (extensions.has(ext)) {
243
+ files.push({
244
+ path: fullPath,
245
+ relativePath,
246
+ name: entry.name,
247
+ ext,
248
+ });
249
+ }
250
+ }
251
+ }
252
+ } catch (err) {
253
+ // Skip inaccessible directories
254
+ }
255
+ }
256
+
257
+ await walk(rootPath);
258
+ return files;
259
+ }
260
+
261
+ /**
262
+ * Calculate file hash for duplicate detection
263
+ */
264
+ function hashContent(content) {
265
+ return crypto.createHash('sha256').update(content).digest('hex').slice(0, 16);
266
+ }
267
+
268
+ /**
269
+ * Normalize content for near-duplicate detection
270
+ */
271
+ function normalizeContent(content) {
272
+ return content
273
+ .replace(/\/\/.*$/gm, '') // Remove single-line comments
274
+ .replace(/\/\*[\s\S]*?\*\//g, '') // Remove multi-line comments
275
+ .replace(/\s+/g, ' ') // Normalize whitespace
276
+ .replace(/['"`]/g, '"') // Normalize quotes
277
+ .trim();
278
+ }
279
+
280
+ /**
281
+ * Calculate Jaccard similarity between two sets
282
+ */
283
+ function jaccardSimilarity(set1, set2) {
284
+ const intersection = new Set([...set1].filter(x => set2.has(x)));
285
+ const union = new Set([...set1, ...set2]);
286
+ return union.size === 0 ? 0 : intersection.size / union.size;
287
+ }
288
+
289
+ /**
290
+ * Tokenize content for similarity comparison
291
+ */
292
+ function tokenize(content) {
293
+ return new Set(
294
+ content
295
+ .split(/[\s\(\)\{\}\[\];,.:=<>!&|?]+/)
296
+ .filter(t => t.length > 2)
297
+ .map(t => t.toLowerCase())
298
+ );
299
+ }
300
+
301
+ /**
302
+ * Detect legacy code indicators
303
+ */
304
+ function detectLegacyIndicators(filePath, content) {
305
+ const indicators = [];
306
+
307
+ // Check file path patterns
308
+ for (const { pattern, type, confidence } of LEGACY_INDICATORS) {
309
+ if (pattern.test(filePath)) {
310
+ indicators.push({ type, confidence, evidence: `File path matches: ${pattern}` });
311
+ }
312
+ }
313
+
314
+ // Check content patterns
315
+ for (const { regex, type, confidence } of DEPRECATED_CONTENT_PATTERNS) {
316
+ const matches = content.match(regex);
317
+ if (matches) {
318
+ indicators.push({
319
+ type,
320
+ confidence,
321
+ evidence: `Found ${matches.length}x: ${regex.source}`,
322
+ count: matches.length,
323
+ });
324
+ }
325
+ }
326
+
327
+ return indicators;
328
+ }
329
+
330
+ /**
331
+ * Classify file risk level
332
+ */
333
+ function classifyRisk(filePath, content) {
334
+ // Check path patterns
335
+ for (const { pattern, reason, level } of HIGH_RISK_PATHS) {
336
+ if (pattern.test(filePath)) {
337
+ return { level, reason, tags: [reason.toLowerCase().replace(/\s+/g, '-')] };
338
+ }
339
+ }
340
+
341
+ // Check content indicators
342
+ const indicators = [];
343
+
344
+ if (/process\.env\./i.test(content)) {
345
+ indicators.push('uses-env-vars');
346
+ }
347
+ if (/import.*['"](crypto|bcrypt|argon2|jose)['"]/i.test(content)) {
348
+ indicators.push('cryptography');
349
+ }
350
+ if (/sql|query|execute/i.test(content) && /where|select|insert|update|delete/i.test(content)) {
351
+ indicators.push('database-operations');
352
+ }
353
+ if (/(Bearer|Authorization|api[_-]?key|secret|token)/i.test(content)) {
354
+ indicators.push('auth-related');
355
+ }
356
+
357
+ if (indicators.includes('cryptography') || indicators.includes('auth-related')) {
358
+ return { level: 'HIGH', reason: 'Security-sensitive operations', tags: indicators };
359
+ }
360
+ if (indicators.includes('database-operations')) {
361
+ return { level: 'MEDIUM', reason: 'Database operations', tags: indicators };
362
+ }
363
+ if (indicators.includes('uses-env-vars')) {
364
+ return { level: 'MEDIUM', reason: 'Environment-dependent', tags: indicators };
365
+ }
366
+
367
+ return { level: 'LOW', reason: 'Standard code', tags: [] };
368
+ }
369
+
370
+ /**
371
+ * Run inventory analysis
372
+ */
373
+ async function runInventoryAnalysis(projectPath, opts, spinner) {
374
+ const startTime = Date.now();
375
+
376
+ // Find source files
377
+ spinner.update('Discovering source files...');
378
+ const files = await findSourceFiles(projectPath, opts.maxFiles);
379
+
380
+ if (files.length === 0) {
381
+ return {
382
+ duplicationMap: [],
383
+ legacyMap: [],
384
+ riskClassifications: [],
385
+ summary: {
386
+ totalFiles: 0,
387
+ duplicatedFiles: 0,
388
+ legacyFiles: 0,
389
+ highRiskFiles: 0,
390
+ totalDuplicateLines: 0,
391
+ },
392
+ };
393
+ }
394
+
395
+ spinner.update(`Analyzing ${files.length} files...`);
396
+
397
+ // Read all file contents
398
+ const fileContents = new Map();
399
+ const fileHashes = new Map();
400
+ const normalizedHashes = new Map();
401
+
402
+ for (const file of files) {
403
+ try {
404
+ const content = await fs.promises.readFile(file.path, 'utf-8');
405
+ const lines = content.split('\n').length;
406
+ fileContents.set(file.relativePath, { content, lines });
407
+
408
+ // Calculate hashes
409
+ const hash = hashContent(content);
410
+ const normalizedHash = hashContent(normalizeContent(content));
411
+
412
+ fileHashes.set(file.relativePath, hash);
413
+ normalizedHashes.set(file.relativePath, normalizedHash);
414
+ } catch (err) {
415
+ // Skip unreadable files
416
+ }
417
+ }
418
+
419
+ // ═══════════════════════════════════════════════════════════════════════════
420
+ // DUPLICATE DETECTION
421
+ // ═══════════════════════════════════════════════════════════════════════════
422
+
423
+ spinner.update('Detecting duplicates...');
424
+
425
+ const duplicationMap = [];
426
+ const exactDuplicates = new Map(); // hash -> [files]
427
+ const nearDuplicates = new Map(); // normalizedHash -> [files]
428
+
429
+ // Group by exact hash
430
+ for (const [filePath, hash] of fileHashes.entries()) {
431
+ if (!exactDuplicates.has(hash)) {
432
+ exactDuplicates.set(hash, []);
433
+ }
434
+ exactDuplicates.get(hash).push(filePath);
435
+ }
436
+
437
+ // Find exact duplicates
438
+ for (const [hash, files] of exactDuplicates.entries()) {
439
+ if (files.length > 1) {
440
+ const primary = files[0];
441
+ const duplicates = files.slice(1);
442
+ const { lines } = fileContents.get(primary) || { lines: 0 };
443
+
444
+ duplicationMap.push({
445
+ primary,
446
+ duplicates,
447
+ similarity: 1.0,
448
+ type: 'exact',
449
+ lineCount: lines,
450
+ });
451
+ }
452
+ }
453
+
454
+ // Group by normalized hash for near-duplicates
455
+ if (opts.includeNear) {
456
+ for (const [filePath, hash] of normalizedHashes.entries()) {
457
+ if (!nearDuplicates.has(hash)) {
458
+ nearDuplicates.set(hash, []);
459
+ }
460
+ nearDuplicates.get(hash).push(filePath);
461
+ }
462
+
463
+ // Find near-duplicates (not already exact)
464
+ const exactPairs = new Set();
465
+ for (const entry of duplicationMap) {
466
+ for (const dup of entry.duplicates) {
467
+ exactPairs.add(`${entry.primary}:${dup}`);
468
+ exactPairs.add(`${dup}:${entry.primary}`);
469
+ }
470
+ }
471
+
472
+ for (const [hash, files] of nearDuplicates.entries()) {
473
+ if (files.length > 1) {
474
+ const primary = files[0];
475
+ const potentialDups = files.slice(1).filter(f => !exactPairs.has(`${primary}:${f}`));
476
+
477
+ if (potentialDups.length > 0) {
478
+ const { lines } = fileContents.get(primary) || { lines: 0 };
479
+ duplicationMap.push({
480
+ primary,
481
+ duplicates: potentialDups,
482
+ similarity: 0.95, // High similarity due to normalized match
483
+ type: 'near',
484
+ lineCount: lines,
485
+ });
486
+ }
487
+ }
488
+ }
489
+ }
490
+
491
+ // Semantic duplicates (token-based similarity)
492
+ if (opts.includeSemantic) {
493
+ spinner.update('Computing semantic similarity...');
494
+
495
+ const tokenSets = new Map();
496
+ for (const [filePath, { content }] of fileContents.entries()) {
497
+ tokenSets.set(filePath, tokenize(content));
498
+ }
499
+
500
+ const processedPairs = new Set();
501
+ const filePaths = Array.from(fileContents.keys());
502
+
503
+ for (let i = 0; i < filePaths.length && i < 500; i++) {
504
+ for (let j = i + 1; j < filePaths.length && j < 500; j++) {
505
+ const file1 = filePaths[i];
506
+ const file2 = filePaths[j];
507
+ const pairKey = `${file1}:${file2}`;
508
+
509
+ if (processedPairs.has(pairKey)) continue;
510
+ processedPairs.add(pairKey);
511
+
512
+ const tokens1 = tokenSets.get(file1);
513
+ const tokens2 = tokenSets.get(file2);
514
+
515
+ const similarity = jaccardSimilarity(tokens1, tokens2);
516
+
517
+ if (similarity >= opts.threshold) {
518
+ // Check not already in duplicationMap
519
+ const existing = duplicationMap.find(d =>
520
+ (d.primary === file1 && d.duplicates.includes(file2)) ||
521
+ (d.primary === file2 && d.duplicates.includes(file1))
522
+ );
523
+
524
+ if (!existing) {
525
+ const { lines } = fileContents.get(file1) || { lines: 0 };
526
+ duplicationMap.push({
527
+ primary: file1,
528
+ duplicates: [file2],
529
+ similarity: Math.round(similarity * 100) / 100,
530
+ type: 'semantic',
531
+ lineCount: lines,
532
+ });
533
+ }
534
+ }
535
+ }
536
+ }
537
+ }
538
+
539
+ // ═══════════════════════════════════════════════════════════════════════════
540
+ // LEGACY CODE DETECTION
541
+ // ═══════════════════════════════════════════════════════════════════════════
542
+
543
+ spinner.update('Detecting legacy code...');
544
+
545
+ const legacyMap = [];
546
+
547
+ for (const [filePath, { content }] of fileContents.entries()) {
548
+ const indicators = detectLegacyIndicators(filePath, content);
549
+
550
+ if (indicators.length > 0) {
551
+ // Use highest confidence indicator
552
+ const bestIndicator = indicators.reduce((a, b) => a.confidence > b.confidence ? a : b);
553
+
554
+ legacyMap.push({
555
+ file: filePath,
556
+ type: bestIndicator.type,
557
+ evidence: indicators.map(i => i.evidence),
558
+ confidence: bestIndicator.confidence,
559
+ });
560
+ }
561
+ }
562
+
563
+ // ═══════════════════════════════════════════════════════════════════════════
564
+ // RISK CLASSIFICATION
565
+ // ═══════════════════════════════════════════════════════════════════════════
566
+
567
+ spinner.update('Classifying risk levels...');
568
+
569
+ const riskClassifications = [];
570
+
571
+ for (const [filePath, { content }] of fileContents.entries()) {
572
+ const risk = classifyRisk(filePath, content);
573
+
574
+ if (risk.level !== 'LOW' || opts.verbose) {
575
+ riskClassifications.push({
576
+ file: filePath,
577
+ level: risk.level,
578
+ reason: risk.reason,
579
+ tags: risk.tags,
580
+ });
581
+ }
582
+ }
583
+
584
+ // ═══════════════════════════════════════════════════════════════════════════
585
+ // SUMMARY
586
+ // ═══════════════════════════════════════════════════════════════════════════
587
+
588
+ const duplicatedFiles = new Set();
589
+ let totalDuplicateLines = 0;
590
+
591
+ for (const entry of duplicationMap) {
592
+ duplicatedFiles.add(entry.primary);
593
+ for (const dup of entry.duplicates) {
594
+ duplicatedFiles.add(dup);
595
+ }
596
+ totalDuplicateLines += entry.lineCount * entry.duplicates.length;
597
+ }
598
+
599
+ const summary = {
600
+ totalFiles: files.length,
601
+ duplicatedFiles: duplicatedFiles.size,
602
+ legacyFiles: legacyMap.length,
603
+ highRiskFiles: riskClassifications.filter(r => r.level === 'HIGH' || r.level === 'CRITICAL').length,
604
+ totalDuplicateLines,
605
+ analysisTimeMs: Date.now() - startTime,
606
+ };
607
+
608
+ return {
609
+ duplicationMap,
610
+ legacyMap,
611
+ riskClassifications,
612
+ summary,
613
+ };
614
+ }
615
+
616
+ // ═══════════════════════════════════════════════════════════════════════════════
617
+ // OUTPUT FORMATTERS
618
+ // ═══════════════════════════════════════════════════════════════════════════════
619
+
620
+ function formatTableOutput(result, projectPath) {
621
+ const lines = [];
622
+ const { duplicationMap, legacyMap, riskClassifications, summary } = result;
623
+
624
+ lines.push('');
625
+ lines.push('┌────────────────────────────────────────────────────────────────────┐');
626
+ lines.push('│ Authority Verdict: INVENTORY (Read-Only) │');
627
+ lines.push('├────────────────────────────────────────────────────────────────────┤');
628
+ lines.push(`│ Project: ${path.basename(projectPath).padEnd(55)}│`);
629
+ lines.push(`│ Files analyzed: ${String(summary.totalFiles).padEnd(48)}│`);
630
+ lines.push(`│ Analysis time: ${String(summary.analysisTimeMs + 'ms').padEnd(49)}│`);
631
+ lines.push('└────────────────────────────────────────────────────────────────────┘');
632
+ lines.push('');
633
+
634
+ // Summary stats
635
+ lines.push('┌─────────────────────────────────────────┐');
636
+ lines.push('│ Summary │');
637
+ lines.push('├─────────────────────────────────────────┤');
638
+ lines.push(`│ Duplicated files: ${String(summary.duplicatedFiles).padStart(5)} │`);
639
+ lines.push(`│ Legacy files: ${String(summary.legacyFiles).padStart(5)} │`);
640
+ lines.push(`│ High-risk files: ${String(summary.highRiskFiles).padStart(5)} │`);
641
+ lines.push(`│ Duplicate lines: ${String(summary.totalDuplicateLines).padStart(5)} │`);
642
+ lines.push('└─────────────────────────────────────────┘');
643
+ lines.push('');
644
+
645
+ // Duplications
646
+ if (duplicationMap.length > 0) {
647
+ lines.push(`${ansi.bold}Duplications (${duplicationMap.length})${ansi.reset}`);
648
+ lines.push('');
649
+
650
+ for (const entry of duplicationMap.slice(0, 15)) {
651
+ const typeIcon = entry.type === 'exact' ? '=' : entry.type === 'near' ? '≈' : '~';
652
+ const similarity = Math.round(entry.similarity * 100);
653
+ lines.push(` ${typeIcon} ${ansi.dim}[${similarity}%]${ansi.reset} ${colors.accent}${entry.primary}${ansi.reset}`);
654
+ for (const dup of entry.duplicates.slice(0, 3)) {
655
+ lines.push(` └─ ${dup}`);
656
+ }
657
+ if (entry.duplicates.length > 3) {
658
+ lines.push(` ${ansi.dim}... and ${entry.duplicates.length - 3} more${ansi.reset}`);
659
+ }
660
+ }
661
+
662
+ if (duplicationMap.length > 15) {
663
+ lines.push(` ${ansi.dim}... and ${duplicationMap.length - 15} more duplicate groups${ansi.reset}`);
664
+ }
665
+ lines.push('');
666
+ }
667
+
668
+ // Legacy code
669
+ if (legacyMap.length > 0) {
670
+ lines.push(`${ansi.bold}Legacy Code (${legacyMap.length})${ansi.reset}`);
671
+ lines.push('');
672
+
673
+ for (const entry of legacyMap.slice(0, 15)) {
674
+ const typeIcon = entry.type === 'deprecated' ? '⚠' : entry.type === 'backup' ? '📦' : '👻';
675
+ const conf = Math.round(entry.confidence * 100);
676
+ lines.push(` ${typeIcon} ${ansi.dim}[${conf}%]${ansi.reset} ${colors.warning}${entry.file}${ansi.reset}`);
677
+ lines.push(` ${ansi.dim}Type: ${entry.type}${ansi.reset}`);
678
+ }
679
+
680
+ if (legacyMap.length > 15) {
681
+ lines.push(` ${ansi.dim}... and ${legacyMap.length - 15} more legacy files${ansi.reset}`);
682
+ }
683
+ lines.push('');
684
+ }
685
+
686
+ // High-risk files
687
+ const highRisk = riskClassifications.filter(r => r.level === 'HIGH' || r.level === 'CRITICAL');
688
+ if (highRisk.length > 0) {
689
+ lines.push(`${ansi.bold}High-Risk Files (${highRisk.length})${ansi.reset}`);
690
+ lines.push('');
691
+
692
+ for (const entry of highRisk.slice(0, 15)) {
693
+ const levelIcon = entry.level === 'CRITICAL' ? '🔴' : '🟠';
694
+ lines.push(` ${levelIcon} ${colors.error}${entry.file}${ansi.reset}`);
695
+ lines.push(` ${ansi.dim}${entry.reason}${ansi.reset}`);
696
+ }
697
+
698
+ if (highRisk.length > 15) {
699
+ lines.push(` ${ansi.dim}... and ${highRisk.length - 15} more high-risk files${ansi.reset}`);
700
+ }
701
+ lines.push('');
702
+ }
703
+
704
+ // Footer
705
+ lines.push('─────────────────────────────────────────────────────────────────────');
706
+ lines.push(`${ansi.dim}This is a read-only inventory. Use 'vibecheck approve' to get verdicts.${ansi.reset}`);
707
+ lines.push('');
708
+
709
+ return lines.join('\n');
710
+ }
711
+
712
+ function formatMarkdownOutput(result, projectPath) {
713
+ const { duplicationMap, legacyMap, riskClassifications, summary } = result;
714
+ const lines = [];
715
+
716
+ lines.push(`# Inventory Report`);
717
+ lines.push('');
718
+ lines.push(`**Project:** ${path.basename(projectPath)}`);
719
+ lines.push(`**Authority:** inventory (read-only)`);
720
+ lines.push(`**Generated:** ${new Date().toISOString()}`);
721
+ lines.push('');
722
+
723
+ lines.push('## Summary');
724
+ lines.push('');
725
+ lines.push('| Metric | Count |');
726
+ lines.push('|--------|-------|');
727
+ lines.push(`| Total files analyzed | ${summary.totalFiles} |`);
728
+ lines.push(`| Duplicated files | ${summary.duplicatedFiles} |`);
729
+ lines.push(`| Legacy files | ${summary.legacyFiles} |`);
730
+ lines.push(`| High-risk files | ${summary.highRiskFiles} |`);
731
+ lines.push(`| Duplicate lines | ${summary.totalDuplicateLines} |`);
732
+ lines.push('');
733
+
734
+ if (duplicationMap.length > 0) {
735
+ lines.push('## Duplications');
736
+ lines.push('');
737
+ for (const entry of duplicationMap) {
738
+ lines.push(`### ${entry.primary}`);
739
+ lines.push(`- Type: ${entry.type}`);
740
+ lines.push(`- Similarity: ${Math.round(entry.similarity * 100)}%`);
741
+ lines.push(`- Lines: ${entry.lineCount}`);
742
+ lines.push(`- Duplicates:`);
743
+ for (const dup of entry.duplicates) {
744
+ lines.push(` - \`${dup}\``);
745
+ }
746
+ lines.push('');
747
+ }
748
+ }
749
+
750
+ if (legacyMap.length > 0) {
751
+ lines.push('## Legacy Code');
752
+ lines.push('');
753
+ lines.push('| File | Type | Confidence |');
754
+ lines.push('|------|------|------------|');
755
+ for (const entry of legacyMap) {
756
+ lines.push(`| \`${entry.file}\` | ${entry.type} | ${Math.round(entry.confidence * 100)}% |`);
757
+ }
758
+ lines.push('');
759
+ }
760
+
761
+ const highRisk = riskClassifications.filter(r => r.level === 'HIGH' || r.level === 'CRITICAL');
762
+ if (highRisk.length > 0) {
763
+ lines.push('## High-Risk Files');
764
+ lines.push('');
765
+ lines.push('| File | Level | Reason |');
766
+ lines.push('|------|-------|--------|');
767
+ for (const entry of highRisk) {
768
+ lines.push(`| \`${entry.file}\` | ${entry.level} | ${entry.reason} |`);
769
+ }
770
+ lines.push('');
771
+ }
772
+
773
+ return lines.join('\n');
774
+ }
775
+
776
+ // ═══════════════════════════════════════════════════════════════════════════════
777
+ // MAIN COMMAND
778
+ // ═══════════════════════════════════════════════════════════════════════════════
779
+
780
+ async function runClassify(args) {
781
+ const opts = parseArgs(args);
782
+
783
+ // Show help
784
+ if (opts.help) {
785
+ printHelp(shouldShowBanner(opts));
786
+ return 0;
787
+ }
788
+
789
+ // Print banner
790
+ if (shouldShowBanner(opts)) {
791
+ printBanner();
792
+ }
793
+
794
+ const projectPath = path.resolve(opts.path);
795
+
796
+ // Validate project path
797
+ if (!fs.existsSync(projectPath)) {
798
+ throw createUserError(`Project path does not exist: ${projectPath}`, "ValidationError");
799
+ }
800
+
801
+ if (!opts.quiet) {
802
+ console.log(` ${ansi.dim}Project:${ansi.reset} ${ansi.bold}${path.basename(projectPath)}${ansi.reset}`);
803
+ console.log(` ${ansi.dim}Authority:${ansi.reset} ${colors.accent}inventory${ansi.reset} (FREE tier)`);
804
+ console.log();
805
+ }
806
+
807
+ // Run analysis
808
+ const spinner = new Spinner({ color: colors.primary });
809
+ spinner.start('Starting inventory analysis...');
810
+
811
+ try {
812
+ const result = await runInventoryAnalysis(projectPath, opts, spinner);
813
+
814
+ spinner.succeed(`Analysis complete (${result.summary.analysisTimeMs}ms)`);
815
+
816
+ // Construct full output
817
+ const output = {
818
+ authority: 'inventory',
819
+ version: '1.0.0',
820
+ timestamp: new Date().toISOString(),
821
+ action: 'PROCEED', // Inventory is read-only, always PROCEED
822
+ projectPath: projectPath,
823
+ ...result,
824
+ };
825
+
826
+ // Output based on format
827
+ if (opts.json) {
828
+ console.log(JSON.stringify(output, null, 2));
829
+ } else if (opts.format === 'markdown') {
830
+ console.log(formatMarkdownOutput(result, projectPath));
831
+ } else {
832
+ console.log(formatTableOutput(result, projectPath));
833
+ }
834
+
835
+ // Save to file if requested
836
+ if (opts.output) {
837
+ const outputPath = path.resolve(opts.output);
838
+ const content = opts.format === 'markdown'
839
+ ? formatMarkdownOutput(result, projectPath)
840
+ : JSON.stringify(output, null, 2);
841
+
842
+ await fs.promises.writeFile(outputPath, content);
843
+
844
+ if (!opts.quiet && !opts.json) {
845
+ console.log(` ${colors.success}✓${ansi.reset} Output saved to: ${outputPath}`);
846
+ }
847
+ }
848
+
849
+ return EXIT.SUCCESS;
850
+
851
+ } catch (error) {
852
+ spinner.fail(`Analysis failed: ${error.message}`);
853
+ throw error;
854
+ }
855
+ }
856
+
857
+ module.exports = {
858
+ runClassify: withErrorHandling(runClassify, "Classify failed"),
859
+ };