reposentry 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2276 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ askCopilot,
4
+ getCopilotBackendName,
5
+ isCopilotAvailable,
6
+ logger,
7
+ setCopilotModel,
8
+ setVerbose
9
+ } from "./chunk-ZW3DYU5Y.js";
10
+ import {
11
+ createProgress
12
+ } from "./chunk-RNKCNCJW.js";
13
+ import {
14
+ buildDirectoryTree,
15
+ detectConfigs,
16
+ detectLanguages,
17
+ detectRoutes,
18
+ getContributors,
19
+ getGitTags,
20
+ getRecentCommits,
21
+ getRepoName,
22
+ gitCommand,
23
+ isGitRepo,
24
+ scanFiles
25
+ } from "./chunk-NYYK2IZI.js";
26
+ import {
27
+ ensureDir,
28
+ readFileContentSync,
29
+ readFileTruncated,
30
+ writeOutput
31
+ } from "./chunk-JZB6VB4T.js";
32
+
33
+ // src/cli.ts
34
+ import { Command } from "commander";
35
+
36
+ // src/core/orchestrator.ts
37
+ import { resolve as resolve2 } from "path";
38
+ import chalk from "chalk";
39
+ import boxen from "boxen";
40
+
41
+ // src/core/output-manager.ts
42
+ import { join, extname } from "path";
43
+ import { existsSync, readdirSync, rmSync, readFileSync as readFileSync2, writeFileSync, mkdirSync } from "fs";
44
+ import { marked } from "marked";
45
+
46
+ // src/utils/version.ts
47
+ import { readFileSync } from "fs";
48
+ import { dirname, resolve } from "path";
49
+ import { fileURLToPath } from "url";
50
+ var cachedVersion = null;
51
+ function getCliVersion() {
52
+ if (cachedVersion) return cachedVersion;
53
+ try {
54
+ const here = dirname(fileURLToPath(import.meta.url));
55
+ const pkgPath = resolve(here, "../../package.json");
56
+ const pkg = JSON.parse(readFileSync(pkgPath, "utf-8"));
57
+ cachedVersion = pkg.version || "0.0.0";
58
+ } catch {
59
+ cachedVersion = "0.0.0";
60
+ }
61
+ return cachedVersion;
62
+ }
63
+
64
+ // src/core/output-manager.ts
65
+ var RAW_EXTENSIONS = /* @__PURE__ */ new Set([
66
+ ".yml",
67
+ ".yaml",
68
+ ".json",
69
+ ".toml",
70
+ ".ini",
71
+ ".cfg",
72
+ ".conf",
73
+ ".env",
74
+ ".sh",
75
+ ".bash",
76
+ ".ps1",
77
+ ".bat",
78
+ ".cmd",
79
+ ".tf",
80
+ ".hcl",
81
+ ".dockerfile",
82
+ ".mmd"
83
+ ]);
84
+ function stripCodeFences(content, filename) {
85
+ const ext = extname(filename).toLowerCase();
86
+ const baseName = filename.toLowerCase();
87
+ const isRaw = RAW_EXTENSIONS.has(ext) || baseName.startsWith("dockerfile") || baseName === ".env.example" || baseName.endsWith(".suggested");
88
+ if (!isRaw) return content;
89
+ let result = content;
90
+ const fencePattern = /^```[\w.-]*\s*\n([\s\S]*?)^```\s*$/gm;
91
+ const blocks = [];
92
+ let match;
93
+ while ((match = fencePattern.exec(result)) !== null) {
94
+ blocks.push(match[1]);
95
+ }
96
+ if (blocks.length > 0) {
97
+ const mainBlock = blocks.reduce((a, b) => a.length >= b.length ? a : b);
98
+ result = mainBlock;
99
+ }
100
+ result = result.replace(/^```[\w.-]*\s*$/gm, "").replace(/^```\s*$/gm, "");
101
+ if ([".yml", ".yaml"].includes(ext) || baseName.startsWith("dockerfile") || baseName === ".env.example") {
102
+ result = result.replace(/^#\s+[A-Z].*\n+/gm, (match2, offset) => {
103
+ if (offset === 0 || result.slice(0, offset).trim() === "") return "";
104
+ return match2;
105
+ });
106
+ }
107
+ return result.trim() + "\n";
108
+ }
109
+ var OutputManager = class _OutputManager {
110
+ constructor(options) {
111
+ this.options = options;
112
+ this.baseDir = options.baseDir;
113
+ }
114
+ baseDir;
115
+ filesWritten = [];
116
+ contentByPath = /* @__PURE__ */ new Map();
117
+ htmlExports = [];
118
+ /** Files that must survive a --force wipe (persistent across runs) */
119
+ static PRESERVED_FILES = ["history.json"];
120
+ async init() {
121
+ const exists = existsSync(this.baseDir);
122
+ if (exists) {
123
+ const entries = readdirSync(this.baseDir, { withFileTypes: true });
124
+ const nonEmpty = entries.some((e) => e.name !== ".DS_Store");
125
+ if (nonEmpty && !this.options.force) {
126
+ throw new Error(
127
+ `Output directory is not empty: ${this.baseDir}. Use --force or choose a different --output directory.`
128
+ );
129
+ }
130
+ if (nonEmpty && this.options.force) {
131
+ const backups = /* @__PURE__ */ new Map();
132
+ for (const name of _OutputManager.PRESERVED_FILES) {
133
+ const fp = join(this.baseDir, name);
134
+ if (existsSync(fp)) {
135
+ try {
136
+ backups.set(name, readFileSync2(fp, "utf-8"));
137
+ } catch {
138
+ }
139
+ }
140
+ }
141
+ rmSync(this.baseDir, { recursive: true, force: true });
142
+ if (backups.size > 0) {
143
+ mkdirSync(this.baseDir, { recursive: true });
144
+ for (const [name, content] of backups) {
145
+ writeFileSync(join(this.baseDir, name), content, "utf-8");
146
+ }
147
+ }
148
+ }
149
+ }
150
+ await ensureDir(this.baseDir);
151
+ }
152
+ async write(relativePath, content) {
153
+ const fullPath = join(this.baseDir, relativePath);
154
+ const cleaned = stripCodeFences(content, relativePath);
155
+ await writeOutput(fullPath, cleaned);
156
+ this.filesWritten.push(relativePath);
157
+ logger.debug(`Written: ${relativePath}`);
158
+ if (this.options.format === "json") {
159
+ this.contentByPath.set(relativePath, cleaned);
160
+ }
161
+ if (this.options.format === "html") {
162
+ await this.writeHtmlExport(relativePath, cleaned);
163
+ }
164
+ }
165
+ async writeToSubdir(subdir, filename, content) {
166
+ await this.write(join(subdir, filename), content);
167
+ }
168
+ getFilesWritten() {
169
+ return [...this.filesWritten];
170
+ }
171
+ getFileCount() {
172
+ return this.filesWritten.length;
173
+ }
174
+ getBaseDir() {
175
+ return this.baseDir;
176
+ }
177
+ async finalize() {
178
+ if (this.options.format === "json") {
179
+ const bundle = {
180
+ tool: "reposentry",
181
+ version: getCliVersion(),
182
+ generatedAt: (/* @__PURE__ */ new Date()).toISOString(),
183
+ files: [...this.contentByPath.entries()].map(([path, content]) => ({ path, content }))
184
+ };
185
+ await writeOutput(join(this.baseDir, "bundle.json"), JSON.stringify(bundle, null, 2));
186
+ }
187
+ if (this.options.format === "html") {
188
+ await this.writeHtmlIndex();
189
+ }
190
+ }
191
+ async writeHtmlIndex() {
192
+ const items = this.htmlExports.map((e) => `<li><a href="./${escapeAttr(e.htmlPath)}">${escapeHtml(e.title)}</a> <span class="muted">(${escapeHtml(e.sourcePath)})</span></li>`).join("\n");
193
+ const html = `<!doctype html>
194
+ <html lang="en">
195
+ <head>
196
+ <meta charset="utf-8" />
197
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
198
+ <title>RepoSentry HTML Export</title>
199
+ <style>
200
+ body{font-family:system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,Cantarell,Noto Sans,sans-serif;margin:32px;line-height:1.5}
201
+ h1{margin:0 0 8px 0}
202
+ .muted{color:#666;font-size:12px}
203
+ ul{padding-left:18px}
204
+ li{margin:6px 0}
205
+ </style>
206
+ </head>
207
+ <body>
208
+ <h1>RepoSentry HTML Export</h1>
209
+ <div class="muted">Generated by reposentry v${escapeHtml(getCliVersion())} on ${escapeHtml((/* @__PURE__ */ new Date()).toLocaleString())}</div>
210
+ <h2>Files</h2>
211
+ <ul>
212
+ ${items || '<li class="muted">No HTML exports generated.</li>'}
213
+ </ul>
214
+ </body>
215
+ </html>`;
216
+ await writeOutput(join(this.baseDir, "html", "index.html"), html);
217
+ }
218
+ async writeHtmlExport(relativePath, content) {
219
+ const ext = extname(relativePath).toLowerCase();
220
+ const outBase = join(this.baseDir, "html");
221
+ if (ext === ".md") {
222
+ const htmlPath2 = replaceExt(relativePath, ".html");
223
+ const fullOut2 = join(outBase, htmlPath2);
224
+ const rendered2 = renderMarkdownToHtml(content);
225
+ const page2 = wrapHtmlPage(relativePath, rendered2);
226
+ await writeOutput(fullOut2, page2);
227
+ this.htmlExports.push({ sourcePath: relativePath, htmlPath: htmlPath2.replace(/\\/g, "/"), title: relativePath });
228
+ return;
229
+ }
230
+ if (ext === ".mmd") {
231
+ const htmlPath2 = replaceExt(relativePath, ".html");
232
+ const fullOut2 = join(outBase, htmlPath2);
233
+ const rendered2 = `<div class="mermaid">${escapeHtml(content)}</div>`;
234
+ const page2 = wrapHtmlPage(relativePath, rendered2);
235
+ await writeOutput(fullOut2, page2);
236
+ this.htmlExports.push({ sourcePath: relativePath, htmlPath: htmlPath2.replace(/\\/g, "/"), title: relativePath });
237
+ return;
238
+ }
239
+ const htmlPath = `${relativePath}.html`;
240
+ const fullOut = join(outBase, htmlPath);
241
+ const rendered = `<pre><code>${escapeHtml(content)}</code></pre>`;
242
+ const page = wrapHtmlPage(relativePath, rendered);
243
+ await writeOutput(fullOut, page);
244
+ this.htmlExports.push({ sourcePath: relativePath, htmlPath: htmlPath.replace(/\\/g, "/"), title: relativePath });
245
+ }
246
+ };
247
+ function replaceExt(path, newExt) {
248
+ return path.replace(/\.[^.]+$/, newExt);
249
+ }
250
+ function escapeHtml(input) {
251
+ return input.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;").replace(/"/g, "&quot;").replace(/'/g, "&#39;");
252
+ }
253
+ function escapeAttr(input) {
254
+ return escapeHtml(input).replace(/\n/g, "");
255
+ }
256
+ function renderMarkdownToHtml(markdown) {
257
+ const renderer = new marked.Renderer();
258
+ renderer.html = (token) => {
259
+ const raw = token?.text ?? token?.raw ?? "";
260
+ return escapeHtml(String(raw));
261
+ };
262
+ marked.use({ renderer });
263
+ const html = marked.parse(markdown);
264
+ return html.replace(
265
+ /<pre><code class="language-mermaid">([\s\S]*?)<\/code><\/pre>/g,
266
+ '<div class="mermaid">$1</div>'
267
+ );
268
+ }
269
+ function wrapHtmlPage(title, bodyHtml) {
270
+ return `<!doctype html>
271
+ <html lang="en">
272
+ <head>
273
+ <meta charset="utf-8" />
274
+ <meta name="viewport" content="width=device-width, initial-scale=1" />
275
+ <title>${escapeHtml(title)} \u2014 RepoSentry Export</title>
276
+ <script src="https://cdn.jsdelivr.net/npm/mermaid@11/dist/mermaid.min.js"></script>
277
+ <style>
278
+ body{font-family:system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,Cantarell,Noto Sans,sans-serif;margin:24px;line-height:1.55}
279
+ pre{background:#0b1020;color:#e6edf3;padding:14px;border-radius:10px;overflow:auto}
280
+ code{font-family:ui-monospace,SFMono-Regular,Menlo,Consolas,monospace;font-size:12px}
281
+ .mermaid{background:#fff;border:1px solid #e5e7eb;border-radius:12px;padding:14px;overflow:auto}
282
+ h1,h2,h3{margin-top:18px}
283
+ </style>
284
+ </head>
285
+ <body>
286
+ ${bodyHtml}
287
+ <script>
288
+ try { mermaid.initialize({ startOnLoad: true, theme: 'default' }); } catch {}
289
+ </script>
290
+ </body>
291
+ </html>`;
292
+ }
293
+
294
+ // src/scanners/import-parser.ts
295
+ import { join as join2 } from "path";
296
+ function parseImports(rootDir, files) {
297
+ const jsFiles = files.filter(
298
+ (f) => f.endsWith(".ts") || f.endsWith(".tsx") || f.endsWith(".js") || f.endsWith(".jsx")
299
+ );
300
+ const results = [];
301
+ for (const file of jsFiles.slice(0, 100)) {
302
+ try {
303
+ const content = readFileContentSync(join2(rootDir, file));
304
+ const imports = [];
305
+ const esImports = content.matchAll(/import\s+(?:.*?\s+from\s+)?['"]([^'"]+)['"]/g);
306
+ for (const match of esImports) {
307
+ imports.push(match[1]);
308
+ }
309
+ const cjsRequires = content.matchAll(/require\s*\(\s*['"]([^'"]+)['"]\s*\)/g);
310
+ for (const match of cjsRequires) {
311
+ imports.push(match[1]);
312
+ }
313
+ if (file.endsWith(".py")) {
314
+ const pyImports = content.matchAll(/(?:from\s+(\S+)\s+import|import\s+(\S+))/g);
315
+ for (const match of pyImports) {
316
+ imports.push(match[1] || match[2]);
317
+ }
318
+ }
319
+ if (file.endsWith(".go")) {
320
+ const goImports = content.matchAll(/import\s+(?:\(\s*([\s\S]*?)\s*\)|"([^"]+)")/g);
321
+ for (const match of goImports) {
322
+ if (match[2]) {
323
+ imports.push(match[2]);
324
+ } else if (match[1]) {
325
+ const goLines = match[1].matchAll(/"([^"]+)"/g);
326
+ for (const l of goLines) imports.push(l[1]);
327
+ }
328
+ }
329
+ }
330
+ if (imports.length > 0) {
331
+ results.push({ file, imports });
332
+ }
333
+ } catch {
334
+ }
335
+ }
336
+ return results;
337
+ }
338
+
339
+ // src/scanners/model-detector.ts
340
+ import { join as join3 } from "path";
341
+ var ORM_PATTERNS = [
342
+ {
343
+ name: "Prisma",
344
+ filePattern: /\.prisma$/,
345
+ modelPatterns: [
346
+ /model\s+(\w+)\s*\{([^}]+)\}/g
347
+ ]
348
+ },
349
+ {
350
+ name: "Mongoose",
351
+ filePattern: /\.(ts|js)$/,
352
+ modelPatterns: [
353
+ /new\s+(?:mongoose\.)?Schema\s*\(\s*\{([^}]+)\}/g,
354
+ /mongoose\.model\s*\(\s*['"`](\w+)['"`]/g
355
+ ]
356
+ },
357
+ {
358
+ name: "Sequelize",
359
+ filePattern: /\.(ts|js)$/,
360
+ modelPatterns: [
361
+ /(?:sequelize\.define|Model\.init)\s*\(\s*['"`](\w+)['"`]\s*,\s*\{([^}]+)\}/g
362
+ ]
363
+ },
364
+ {
365
+ name: "TypeORM",
366
+ filePattern: /\.(ts|js)$/,
367
+ modelPatterns: [
368
+ /@Entity\s*\(\s*(?:['"`](\w+)['"`])?\s*\)/g,
369
+ /@Column\s*\(/g
370
+ ]
371
+ },
372
+ {
373
+ name: "Django ORM",
374
+ filePattern: /\.py$/,
375
+ modelPatterns: [
376
+ /class\s+(\w+)\s*\(\s*(?:models\.)?Model\s*\)\s*:/g
377
+ ]
378
+ },
379
+ {
380
+ name: "SQLAlchemy",
381
+ filePattern: /\.py$/,
382
+ modelPatterns: [
383
+ /class\s+(\w+)\s*\(\s*(?:Base|db\.Model)\s*\)\s*:/g
384
+ ]
385
+ },
386
+ {
387
+ name: "GORM",
388
+ filePattern: /\.go$/,
389
+ modelPatterns: [
390
+ /type\s+(\w+)\s+struct\s*\{[^}]*gorm/g
391
+ ]
392
+ },
393
+ {
394
+ name: "ActiveRecord",
395
+ filePattern: /\.rb$/,
396
+ modelPatterns: [
397
+ /class\s+(\w+)\s*<\s*(?:ApplicationRecord|ActiveRecord::Base)/g
398
+ ]
399
+ }
400
+ ];
401
+ function detectModels(rootDir, files) {
402
+ const models = [];
403
+ for (const file of files.slice(0, 200)) {
404
+ for (const orm of ORM_PATTERNS) {
405
+ if (!orm.filePattern.test(file)) continue;
406
+ try {
407
+ const content = readFileContentSync(join3(rootDir, file));
408
+ for (const pattern of orm.modelPatterns) {
409
+ const regex = new RegExp(pattern.source, pattern.flags);
410
+ let match;
411
+ while ((match = regex.exec(content)) !== null) {
412
+ const name = match[1] || file.split(/[/\\]/).pop()?.replace(/\.\w+$/, "") || "Unknown";
413
+ const fieldsStr = match[2] || "";
414
+ const fields = fieldsStr.split(/[,\n]/).map((f) => f.trim()).filter((f) => f && !f.startsWith("//") && !f.startsWith("#"));
415
+ models.push({
416
+ name,
417
+ file,
418
+ orm: orm.name,
419
+ fields: fields.slice(0, 20)
420
+ });
421
+ }
422
+ }
423
+ } catch {
424
+ }
425
+ }
426
+ }
427
+ return models;
428
+ }
429
+
430
+ // src/scanners/git-analyzer.ts
431
+ function analyzeGitHistory(cwd) {
432
+ const contributors = getContributors(cwd);
433
+ const recentCommits = getRecentCommits(cwd, 30);
434
+ const tags = getGitTags(cwd);
435
+ const totalCommitsStr = gitCommand("rev-list --count HEAD", cwd);
436
+ const totalCommits = parseInt(totalCommitsStr, 10) || 0;
437
+ const branches = gitCommand("branch -r --no-merged", cwd).split("\n").map((b) => b.trim()).filter((b) => b && !b.includes("HEAD")).slice(0, 20);
438
+ const firstCommitDate = gitCommand("log --reverse --format=%ci --max-count=1", cwd);
439
+ const lastCommitDate = gitCommand("log -1 --format=%ci", cwd);
440
+ const directoryOwnership = /* @__PURE__ */ new Map();
441
+ const topDirs = gitCommand("ls-tree -d --name-only HEAD", cwd).split("\n").filter(Boolean).slice(0, 15);
442
+ for (const dir of topDirs) {
443
+ const ownerOutput = gitCommand(`shortlog -sn --no-merges HEAD -- "${dir}"`, cwd);
444
+ const firstLine = ownerOutput.split("\n")[0] || "";
445
+ const match = firstLine.match(/^\s*\d+\s+(.+)$/);
446
+ if (match) {
447
+ directoryOwnership.set(dir, match[1].trim());
448
+ }
449
+ }
450
+ return {
451
+ contributors,
452
+ recentCommits,
453
+ totalCommits,
454
+ tags,
455
+ activeBranches: branches,
456
+ firstCommitDate,
457
+ lastCommitDate,
458
+ directoryOwnership
459
+ };
460
+ }
461
+
462
+ // src/core/prompt-builder.ts
463
+ function buildPrompt(task, context, outputFormat = "markdown") {
464
+ const parts = [];
465
+ parts.push(`You are analyzing a ${context.languages.join("/")} project called "${context.projectName}".`);
466
+ if (context.frameworks.length > 0) {
467
+ parts.push(`Frameworks/libraries: ${context.frameworks.join(", ")}.`);
468
+ }
469
+ if (context.packageManager) {
470
+ parts.push(`Package manager: ${context.packageManager}.`);
471
+ }
472
+ if (context.fileTree) {
473
+ parts.push(`Project structure:
474
+ ${context.fileTree}`);
475
+ }
476
+ if (context.codeContext) {
477
+ parts.push(`Relevant code:
478
+ ${context.codeContext}`);
479
+ }
480
+ if (context.additionalContext) {
481
+ parts.push(context.additionalContext);
482
+ }
483
+ parts.push(`Task: ${task}`);
484
+ if (outputFormat === "mermaid") {
485
+ parts.push("Output ONLY valid Mermaid diagram syntax. No markdown fences, no explanation.");
486
+ } else if (outputFormat === "json") {
487
+ parts.push("Output ONLY valid JSON. No markdown fences, no explanation.");
488
+ } else {
489
+ parts.push("Output in well-formatted Markdown.");
490
+ }
491
+ return parts.join("\n\n");
492
+ }
493
+
494
+ // src/prompts/docs.prompts.ts
495
+ function readmePrompt(ctx) {
496
+ return buildPrompt(
497
+ "Generate a complete README.md for this project. Include: project description, features, installation instructions, usage examples, configuration options, and contributing section. Make it professional and comprehensive.",
498
+ ctx
499
+ );
500
+ }
501
+ function apiDocsPrompt(ctx) {
502
+ return buildPrompt(
503
+ "Generate API documentation (API.md). For each detected endpoint/function, document: method, path/signature, parameters, request body schema, response format, and example usage with curl commands or code snippets.",
504
+ ctx
505
+ );
506
+ }
507
+ function setupPrompt(ctx) {
508
+ return buildPrompt(
509
+ "Generate a SETUP.md development environment setup guide. Include step-by-step instructions for macOS, Linux, and Windows. Cover: prerequisites, dependency installation, environment variables, database setup (if applicable), and running the project locally.",
510
+ ctx
511
+ );
512
+ }
513
+ function contributingPrompt(ctx) {
514
+ return buildPrompt(
515
+ "Generate a CONTRIBUTING.md guide. Include: how to fork and clone, branch naming conventions, commit message format, code style guidelines, PR process, testing requirements, and code review expectations.",
516
+ ctx
517
+ );
518
+ }
519
+ function changelogPrompt(ctx) {
520
+ return buildPrompt(
521
+ "Generate a CHANGELOG.md from the git commit history provided. Group changes by version tags (if any) or by date. Categorize entries as: Features, Bug Fixes, Documentation, Refactoring, and Other. Use Keep a Changelog format.",
522
+ ctx
523
+ );
524
+ }
525
+ function faqPrompt(ctx) {
526
+ return buildPrompt(
527
+ "Generate an FAQ.md based on the codebase analysis. Infer common questions from: TODO/FIXME/HACK comments found in code, complex setup requirements, common pitfalls, and configuration options. Provide clear answers for each.",
528
+ ctx
529
+ );
530
+ }
531
+
532
+ // src/engines/docs-engine.ts
533
+ async function runDocsEngine(input, output, progress) {
534
+ const steps = [
535
+ { key: "readme", label: "README.md", promptFn: readmePrompt, file: "README.md" },
536
+ { key: "api", label: "API.md", promptFn: apiDocsPrompt, file: "API.md" },
537
+ { key: "setup", label: "SETUP.md", promptFn: setupPrompt, file: "SETUP.md" },
538
+ { key: "contributing", label: "CONTRIBUTING.md", promptFn: contributingPrompt, file: "CONTRIBUTING.md" },
539
+ { key: "changelog", label: "CHANGELOG.md", promptFn: changelogPrompt, file: "CHANGELOG.md" },
540
+ { key: "faq", label: "FAQ.md", promptFn: faqPrompt, file: "FAQ.md" }
541
+ ];
542
+ progress.start("Documentation Engine", steps.length);
543
+ const ctx = { ...input.context };
544
+ if (input.routes.length > 0) {
545
+ ctx.additionalContext = (ctx.additionalContext || "") + `
546
+ Detected API routes:
547
+ ${input.routes.map((r) => `${r.method} ${r.path} (${r.file})`).join("\n")}`;
548
+ }
549
+ if (input.recentCommits.length > 0) {
550
+ ctx.additionalContext = (ctx.additionalContext || "") + `
551
+ Recent commits:
552
+ ${input.recentCommits.join("\n")}`;
553
+ }
554
+ if (input.tags.length > 0) {
555
+ ctx.additionalContext = (ctx.additionalContext || "") + `
556
+ Version tags: ${input.tags.join(", ")}`;
557
+ }
558
+ let filesGenerated = 0;
559
+ for (const step of steps) {
560
+ progress.increment(step.label);
561
+ const prompt = step.promptFn(ctx);
562
+ const result = await askCopilot(prompt);
563
+ await output.write(step.file, result);
564
+ filesGenerated++;
565
+ }
566
+ progress.succeed("Documentation Engine");
567
+ let score = 20;
568
+ if (input.hasReadme) score += 25;
569
+ if (input.routes.length > 0) score += 10;
570
+ if (input.recentCommits.length > 3) score += 10;
571
+ if (input.tags.length > 0) score += 15;
572
+ if (!input.hasReadme) score -= 10;
573
+ if (input.recentCommits.length === 0) score -= 5;
574
+ score = Math.max(0, Math.min(100, score));
575
+ return {
576
+ score,
577
+ details: `README: ${input.hasReadme ? "yes" : "missing"}, ${input.routes.length} routes, ${input.tags.length} tags, ${input.recentCommits.length} recent commits`
578
+ };
579
+ }
580
+
581
+ // src/utils/mermaid.ts
582
+ function embedMermaidInMarkdown(title, diagram) {
583
+ return `## ${title}
584
+
585
+ \`\`\`mermaid
586
+ ${diagram}
587
+ \`\`\`
588
+ `;
589
+ }
590
+
591
+ // src/prompts/architecture.prompts.ts
592
+ function systemArchitecturePrompt(ctx) {
593
+ return buildPrompt(
594
+ 'Generate ONLY a Mermaid flowchart diagram (flowchart TD syntax) showing the high-level system architecture. Show major components (frontend, backend services, databases, external APIs) and their connections with labeled arrows. Output the raw mermaid code starting with "flowchart TD". No text before or after the diagram.',
595
+ ctx,
596
+ "mermaid"
597
+ );
598
+ }
599
+ function dataFlowPrompt(ctx) {
600
+ return buildPrompt(
601
+ 'Generate ONLY a Mermaid sequence diagram (sequenceDiagram syntax) showing how data flows through the system. Show the request lifecycle: Client, Frontend, API/Backend, Auth, Database. Output the raw mermaid code starting with "sequenceDiagram". No text before or after the diagram.',
602
+ ctx,
603
+ "mermaid"
604
+ );
605
+ }
606
+ function dependencyGraphPrompt(ctx) {
607
+ return buildPrompt(
608
+ 'Generate ONLY a Mermaid flowchart (flowchart LR syntax) showing the module/package dependency graph based on the import structure. Show which modules import from which other modules. Group related modules. Output the raw mermaid code starting with "flowchart LR". No text before or after the diagram.',
609
+ ctx,
610
+ "mermaid"
611
+ );
612
+ }
613
+ function erDiagramPrompt(ctx) {
614
+ return buildPrompt(
615
+ 'Generate ONLY a Mermaid ER diagram (erDiagram syntax) showing database tables/models, their columns with types, and relationships (one-to-many, many-to-many). Output the raw mermaid code starting with "erDiagram". No text before or after the diagram.',
616
+ ctx,
617
+ "mermaid"
618
+ );
619
+ }
620
+ function apiFlowPrompt(ctx) {
621
+ return buildPrompt(
622
+ 'Generate ONLY a Mermaid sequence diagram (sequenceDiagram syntax) showing the API request flow: Client ->> API Gateway ->> Auth Middleware ->> Route Handler ->> Service Layer ->> Database. Show success and error paths. Output the raw mermaid code starting with "sequenceDiagram". No text before or after the diagram.',
623
+ ctx,
624
+ "mermaid"
625
+ );
626
+ }
627
+ function architectureDocPrompt(ctx) {
628
+ return buildPrompt(
629
+ "Generate an ARCHITECTURE.md document explaining the system architecture. Include: overview, component descriptions, data flow explanation, key design decisions, and where to find each component in the codebase.",
630
+ ctx
631
+ );
632
+ }
633
+
634
+ // src/engines/architecture-engine.ts
635
+ async function runArchitectureEngine(input, output, progress) {
636
+ progress.start("Architecture Engine", 6);
637
+ const ctx = { ...input.context };
638
+ if (input.imports.length > 0) {
639
+ const depSummary = input.imports.slice(0, 30).map((i) => `${i.file} \u2192 ${i.imports.join(", ")}`).join("\n");
640
+ ctx.additionalContext = (ctx.additionalContext || "") + `
641
+ Import graph:
642
+ ${depSummary}`;
643
+ }
644
+ if (input.models.length > 0) {
645
+ ctx.additionalContext = (ctx.additionalContext || "") + `
646
+ Database models:
647
+ ${input.models.map((m) => `${m.name} (${m.orm}) in ${m.file}: ${m.fields.slice(0, 5).join(", ")}`).join("\n")}`;
648
+ }
649
+ if (input.routes.length > 0) {
650
+ ctx.additionalContext = (ctx.additionalContext || "") + `
651
+ API routes:
652
+ ${input.routes.map((r) => `${r.method} ${r.path}`).join("\n")}`;
653
+ }
654
+ const diagrams = [
655
+ { key: "arch", label: "System Architecture", promptFn: systemArchitecturePrompt, file: "diagrams/architecture.mmd", title: "System Architecture" },
656
+ { key: "dataflow", label: "Data Flow", promptFn: dataFlowPrompt, file: "diagrams/data-flow.mmd", title: "Data Flow" },
657
+ { key: "deps", label: "Dependency Graph", promptFn: dependencyGraphPrompt, file: "diagrams/dependency-graph.mmd", title: "Dependency Graph" },
658
+ { key: "er", label: "Database Schema", promptFn: erDiagramPrompt, file: "diagrams/database-schema.mmd", title: "Database Schema" },
659
+ { key: "apiflow", label: "API Flow", promptFn: apiFlowPrompt, file: "diagrams/api-flow.mmd", title: "API Request Flow" }
660
+ ];
661
+ const mermaidSections = [];
662
+ for (const diag of diagrams) {
663
+ progress.increment(diag.label);
664
+ const prompt = diag.promptFn(ctx);
665
+ const result = await askCopilot(prompt);
666
+ await output.write(diag.file, result);
667
+ mermaidSections.push(embedMermaidInMarkdown(diag.title, result));
668
+ }
669
+ progress.increment("ARCHITECTURE.md");
670
+ const archDocPrompt = architectureDocPrompt(ctx);
671
+ const archDoc = await askCopilot(archDocPrompt);
672
+ const fullArchDoc = archDoc + "\n\n---\n\n" + mermaidSections.join("\n---\n\n");
673
+ await output.write("ARCHITECTURE.md", fullArchDoc);
674
+ progress.succeed("Architecture Engine");
675
+ let score = 30;
676
+ if (input.imports.length > 0) score += 15;
677
+ if (input.models.length > 0) score += 15;
678
+ if (input.routes.length > 0) score += 15;
679
+ const uniqueDirs = new Set(input.imports.map((i) => i.file.split("/")[0]));
680
+ if (uniqueDirs.size >= 3) score += 10;
681
+ if (uniqueDirs.size >= 5) score += 5;
682
+ if (input.imports.length === 0 && input.routes.length === 0) score -= 10;
683
+ score = Math.max(0, Math.min(100, score));
684
+ return {
685
+ score,
686
+ details: `${input.imports.length} imports, ${input.models.length} models, ${input.routes.length} routes, ${uniqueDirs.size} top-level dirs`
687
+ };
688
+ }
689
+
690
+ // src/engines/security-engine.ts
691
+ import { join as join4 } from "path";
692
+
693
+ // src/prompts/security.prompts.ts
694
+ function securityAuditPrompt(ctx) {
695
+ return buildPrompt(
696
+ "Perform a comprehensive security audit of this codebase. For each finding, provide: severity (Critical/High/Medium/Low), category (OWASP Top 10 mapping), description, exact location, and recommended fix. Include an overall risk grade (A-F). Check for: hardcoded secrets, SQL injection, XSS, path traversal, command injection, insecure crypto, authentication issues, authorization gaps, input validation, CORS misconfiguration, sensitive data exposure.",
697
+ ctx
698
+ );
699
+ }
700
+ function vulnerabilityReportPrompt(ctx) {
701
+ return buildPrompt(
702
+ "Generate a vulnerability report analyzing: 1) Known vulnerabilities in dependencies (based on package versions). 2) Code-level security issues found through pattern analysis. For each vulnerability, provide: CVE (if applicable), severity, affected component, description, and remediation steps.",
703
+ ctx
704
+ );
705
+ }
706
+ function secretsScanPrompt(ctx) {
707
+ return buildPrompt(
708
+ "Scan for hardcoded secrets in the codebase. Look for: API keys, passwords, tokens, private keys, connection strings, and other sensitive data in source code and config files. For each finding, provide the file, line pattern, type of secret, and risk level.",
709
+ ctx,
710
+ "json"
711
+ );
712
+ }
713
+ function threatModelPrompt(ctx) {
714
+ return buildPrompt(
715
+ "Generate a Mermaid threat model diagram. Show: trust boundaries, data flows across boundaries, potential attack surfaces, threat actors, and data stores. Use flowchart TD syntax with subgraphs for trust boundaries.",
716
+ ctx,
717
+ "mermaid"
718
+ );
719
+ }
720
+ function remediationPrompt(ctx) {
721
+ return buildPrompt(
722
+ "Generate a step-by-step remediation guide for the security issues found. For each issue, provide: the problem, why it matters, the exact fix (with code examples), and how to verify the fix works. Prioritize by severity.",
723
+ ctx
724
+ );
725
+ }
726
+
727
+ // src/engines/security-engine.ts
728
+ function quickSecurityScan(rootDir, files) {
729
+ const findings = [];
730
+ const patterns = [
731
+ { name: "Hardcoded password", pattern: /(?:password|passwd|pwd)\s*[:=]\s*['"][^'"]{3,}['"]/gi, severity: "High" },
732
+ { name: "Hardcoded API key", pattern: /(?:api[_-]?key|apikey)\s*[:=]\s*['"][^'"]{8,}['"]/gi, severity: "High" },
733
+ { name: "Hardcoded token", pattern: /(?:token|secret)\s*[:=]\s*['"][^'"]{8,}['"]/gi, severity: "High" },
734
+ { name: "SQL injection risk", pattern: /(?:query|execute)\s*\(\s*['"`].*?\$\{/gi, severity: "High" },
735
+ { name: "eval() usage", pattern: /\beval\s*\(/g, severity: "Medium" },
736
+ { name: "exec() with string", pattern: /\bexec(?:Sync)?\s*\(\s*['"`].*?\$\{/gi, severity: "High" },
737
+ { name: "MD5 usage", pattern: /\bmd5\b/gi, severity: "Medium" },
738
+ { name: "console.log in production", pattern: /console\.log\s*\(/g, severity: "Low" },
739
+ { name: "CORS wildcard", pattern: /cors\s*\(\s*\{[^}]*origin\s*:\s*['"]\*['"]/gi, severity: "Medium" },
740
+ { name: "Disabled SSL verification", pattern: /rejectUnauthorized\s*:\s*false/g, severity: "High" }
741
+ ];
742
+ const codeFiles = files.filter(
743
+ (f) => f.endsWith(".ts") || f.endsWith(".js") || f.endsWith(".py") || f.endsWith(".go") || f.endsWith(".java") || f.endsWith(".rb")
744
+ ).slice(0, 100);
745
+ for (const file of codeFiles) {
746
+ try {
747
+ const content = readFileContentSync(join4(rootDir, file));
748
+ for (const { name, pattern, severity } of patterns) {
749
+ const regex = new RegExp(pattern.source, pattern.flags);
750
+ if (regex.test(content)) {
751
+ findings.push(`[${severity}] ${name} in ${file}`);
752
+ }
753
+ }
754
+ } catch {
755
+ }
756
+ }
757
+ return findings;
758
+ }
759
+ async function runSecurityEngine(input, output, progress) {
760
+ progress.start("Security Engine", 6);
761
+ progress.increment("Pattern scanning");
762
+ const quickFindings = quickSecurityScan(input.rootDir, input.files);
763
+ const ctx = { ...input.context };
764
+ if (quickFindings.length > 0) {
765
+ ctx.additionalContext = (ctx.additionalContext || "") + `
766
+ Quick security scan findings:
767
+ ${quickFindings.join("\n")}`;
768
+ }
769
+ progress.increment("Security audit");
770
+ const auditResult = await askCopilot(securityAuditPrompt(ctx));
771
+ await output.writeToSubdir("security", "SECURITY_AUDIT.md", auditResult);
772
+ progress.increment("Vulnerability analysis");
773
+ const vulnResult = await askCopilot(vulnerabilityReportPrompt(ctx));
774
+ await output.writeToSubdir("security", "VULNERABILITY_REPORT.md", vulnResult);
775
+ progress.increment("Secrets scan");
776
+ const secretsResult = await askCopilot(secretsScanPrompt(ctx));
777
+ await output.writeToSubdir("security", "secrets-scan.json", secretsResult);
778
+ progress.increment("Threat model");
779
+ const threatResult = await askCopilot(threatModelPrompt(ctx));
780
+ await output.writeToSubdir("security", "threat-model.mmd", threatResult);
781
+ progress.increment("Remediation guide");
782
+ const remResult = await askCopilot(remediationPrompt(ctx));
783
+ await output.writeToSubdir("security", "REMEDIATION.md", remResult);
784
+ progress.succeed("Security Engine");
785
+ const highCount = quickFindings.filter((f) => f.startsWith("[High]")).length;
786
+ const medCount = quickFindings.filter((f) => f.startsWith("[Medium]")).length;
787
+ const lowCount = quickFindings.filter((f) => f.startsWith("[Low]")).length;
788
+ let score = 100;
789
+ score -= highCount * 20;
790
+ score -= medCount * 10;
791
+ score -= lowCount * 3;
792
+ if (!input.hasGitignore) score -= 15;
793
+ if (input.hasEnvFile) score -= 10;
794
+ const vulnLower = vulnResult.toLowerCase();
795
+ const aiHighMatches = (vulnLower.match(/\bcritical\b|\bhigh\s*severity\b|\bhigh\s*risk\b/g) || []).length;
796
+ const aiMedMatches = (vulnLower.match(/\bmedium\s*severity\b|\bmedium\s*risk\b|\bmoderate\b/g) || []).length;
797
+ score -= aiHighMatches * 5;
798
+ score -= aiMedMatches * 2;
799
+ score = Math.max(0, Math.min(100, score));
800
+ const totalFindings = quickFindings.length + aiHighMatches + aiMedMatches;
801
+ return {
802
+ score,
803
+ details: `${quickFindings.length} pattern findings (${highCount}H/${medCount}M/${lowCount}L) + ${aiHighMatches + aiMedMatches} AI findings | .gitignore: ${input.hasGitignore ? "yes" : "NO"} | .env committed: ${input.hasEnvFile ? "YES" : "no"}`
804
+ };
805
+ }
806
+
807
+ // src/engines/ci-engine.ts
808
+ import { join as join5 } from "path";
809
+
810
+ // src/prompts/ci.prompts.ts
811
+ function ciPipelinePrompt(ctx) {
812
+ return buildPrompt(
813
+ "Generate a complete GitHub Actions CI/CD workflow (ci.yml). Include: trigger on push to main and PRs, install dependencies with correct package manager, run linting, run tests, build the project, cache dependencies, and use appropriate Node.js/Python/Go version matrix. Make it production-ready.",
814
+ ctx
815
+ );
816
+ }
817
+ function dockerfileAuditPrompt(ctx) {
818
+ return buildPrompt(
819
+ "Audit this Dockerfile against best practices. Check for: running as root, missing multi-stage build, large base image, missing .dockerignore, no health check, exposed unnecessary ports, secrets in build args, layer optimization issues. For each finding, provide the issue, risk level, and corrected Dockerfile line.",
820
+ ctx
821
+ );
822
+ }
823
+ function dockerfileGeneratePrompt(ctx) {
824
+ return buildPrompt(
825
+ "Generate an optimized, production-ready Dockerfile for this project. Use multi-stage build, run as non-root user, use alpine base where possible, include health check, optimize layer caching, and add appropriate labels.",
826
+ ctx
827
+ );
828
+ }
829
+ function dockerComposePrompt(ctx) {
830
+ return buildPrompt(
831
+ "Generate a docker-compose.yml for local development. Include: the application service, database service (if detected), any required infrastructure (Redis, RabbitMQ, etc.), proper networking, volume mounts for live reload, environment variables, and health checks.",
832
+ ctx
833
+ );
834
+ }
835
+ function envExamplePrompt(ctx) {
836
+ return buildPrompt(
837
+ "Generate a .env.example file based on environment variables used in the codebase. For each variable, include a comment explaining its purpose and a placeholder value. Group by category (database, auth, external services, etc.).",
838
+ ctx
839
+ );
840
+ }
841
+ function takeItToProdPrompt(ctx) {
842
+ return buildPrompt(
843
+ `Generate a comprehensive production deployment guide called "Take It To Production". Include: 1) Pre-deployment checklist (environment variables, secrets, database migrations, build verification), 2) Deployment options with step-by-step instructions (cloud platforms like AWS/GCP/Azure, VPS, containerized with Docker/Kubernetes, static hosting if applicable), 3) DNS and SSL/TLS setup, 4) Monitoring and logging setup (health checks, APM, error tracking), 5) Scaling considerations (horizontal/vertical, load balancing, CDN), 6) Rollback strategy, 7) Post-deployment verification steps. Tailor specifically to this project's tech stack.`,
844
+ ctx
845
+ );
846
+ }
847
+
848
+ // src/engines/ci-engine.ts
849
+ async function runCIEngine(input, output, progress) {
850
+ const totalSteps = 4 + (input.hasDockerfile ? 1 : 1) + (input.hasDockerCompose ? 0 : 1);
851
+ progress.start("CI/CD Engine", totalSteps);
852
+ const ctx = { ...input.context };
853
+ const generated = [];
854
+ if (!input.hasCIConfig) {
855
+ progress.increment("Generating CI pipeline");
856
+ const ciResult = await askCopilot(ciPipelinePrompt(ctx));
857
+ await output.writeToSubdir("infrastructure", "ci.yml", ciResult);
858
+ generated.push("ci.yml");
859
+ } else {
860
+ progress.increment("CI config exists");
861
+ generated.push("CI config already present");
862
+ }
863
+ if (input.hasDockerfile) {
864
+ progress.increment("Auditing Dockerfile");
865
+ try {
866
+ const dockerContent = await readFileTruncated(join5(input.rootDir, "Dockerfile"));
867
+ ctx.codeContext = dockerContent;
868
+ const auditResult = await askCopilot(dockerfileAuditPrompt(ctx));
869
+ await output.writeToSubdir("infrastructure", "DOCKER_AUDIT.md", auditResult);
870
+ generated.push("DOCKER_AUDIT.md");
871
+ } catch {
872
+ progress.increment("Dockerfile read failed");
873
+ }
874
+ } else {
875
+ progress.increment("Generating Dockerfile");
876
+ const dockerResult = await askCopilot(dockerfileGeneratePrompt(ctx));
877
+ await output.writeToSubdir("infrastructure", "Dockerfile.suggested", dockerResult);
878
+ generated.push("Dockerfile.suggested");
879
+ }
880
+ if (!input.hasDockerCompose) {
881
+ progress.increment("Generating Docker Compose");
882
+ const composeResult = await askCopilot(dockerComposePrompt(ctx));
883
+ await output.writeToSubdir("infrastructure", "docker-compose.suggested.yml", composeResult);
884
+ generated.push("docker-compose.suggested.yml");
885
+ }
886
+ if (!input.hasEnvExample) {
887
+ progress.increment("Generating .env.example");
888
+ const envResult = await askCopilot(envExamplePrompt(ctx));
889
+ await output.writeToSubdir("infrastructure", ".env.example", envResult);
890
+ generated.push(".env.example");
891
+ } else {
892
+ progress.increment(".env.example exists");
893
+ }
894
+ progress.increment("Generating production guide");
895
+ const prodGuide = await askCopilot(takeItToProdPrompt(ctx));
896
+ await output.writeToSubdir("infrastructure", "take-it-to-prod.md", prodGuide);
897
+ generated.push("take-it-to-prod.md");
898
+ progress.succeed("CI/CD Engine");
899
+ let score = 15;
900
+ if (input.hasCIConfig) score += 35;
901
+ if (input.hasDockerfile) score += 20;
902
+ if (input.hasEnvExample) score += 15;
903
+ if (input.hasDockerCompose) score += 15;
904
+ score = Math.max(0, Math.min(100, score));
905
+ const missing = [];
906
+ if (!input.hasCIConfig) missing.push("CI pipeline");
907
+ if (!input.hasDockerfile) missing.push("Dockerfile");
908
+ if (!input.hasEnvExample) missing.push(".env.example");
909
+ if (!input.hasDockerCompose) missing.push("docker-compose");
910
+ return {
911
+ score,
912
+ details: missing.length > 0 ? `Missing: ${missing.join(", ")}` : "All CI/CD infrastructure present"
913
+ };
914
+ }
915
+
916
+ // src/prompts/api-test.prompts.ts
917
+ function apiTestsMarkdownPrompt(ctx) {
918
+ return buildPrompt(
919
+ "Generate API test documentation (API_TESTS.md). For each detected endpoint, provide: method, path, description, example request (curl command), expected response, and edge case tests (missing fields, invalid types, unauthorized access).",
920
+ ctx
921
+ );
922
+ }
923
+ function postmanCollectionPrompt(ctx) {
924
+ return buildPrompt(
925
+ "Generate a Postman collection JSON (v2.1 format) for all detected API endpoints. Include: request method, URL, headers, request body examples, and test scripts for response validation. Use {{baseUrl}} variable.",
926
+ ctx,
927
+ "json"
928
+ );
929
+ }
930
+ function testCoveragePrompt(ctx) {
931
+ return buildPrompt(
932
+ "Analyze the existing test coverage of this project. Report: which files/functions have tests, which ones are missing tests, overall estimated coverage percentage, and the test-to-code ratio. Identify the most critical untested areas.",
933
+ ctx
934
+ );
935
+ }
936
+ function missingTestsPrompt(ctx) {
937
+ return buildPrompt(
938
+ "Generate a list of missing tests for this project. For each untested function/route/component, provide: the file and function name, why it should be tested, a suggested test case description, and a code skeleton for the test.",
939
+ ctx
940
+ );
941
+ }
942
+ function shellTestsPrompt(ctx) {
943
+ return buildPrompt(
944
+ "Generate a shell script (api-tests.sh) that tests all detected API endpoints sequentially. Use curl commands with proper headers. Include success/failure checking, colored output, and a summary at the end.",
945
+ ctx
946
+ );
947
+ }
948
+
949
+ // src/engines/api-test-engine.ts
950
+ async function runAPITestEngine(input, output, progress) {
951
+ progress.start("API Testing Engine", 5);
952
+ const ctx = { ...input.context };
953
+ if (input.routes.length > 0) {
954
+ ctx.additionalContext = (ctx.additionalContext || "") + `
955
+ Detected API routes:
956
+ ${input.routes.map((r) => `${r.method} ${r.path} (${r.file})`).join("\n")}`;
957
+ }
958
+ const testFiles = input.files.filter(
959
+ (f) => f.includes(".test.") || f.includes(".spec.") || f.includes("__tests__") || f.includes("test_")
960
+ );
961
+ ctx.additionalContext = (ctx.additionalContext || "") + `
962
+ Existing test files (${testFiles.length}): ${testFiles.slice(0, 20).join(", ")}`;
963
+ progress.increment("API test documentation");
964
+ const testsDoc = await askCopilot(apiTestsMarkdownPrompt(ctx));
965
+ await output.writeToSubdir("testing", "API_TESTS.md", testsDoc);
966
+ progress.increment("Postman collection");
967
+ const postmanResult = await askCopilot(postmanCollectionPrompt(ctx));
968
+ await output.writeToSubdir("testing", "api-collection.json", postmanResult);
969
+ progress.increment("Shell test script");
970
+ const shellResult = await askCopilot(shellTestsPrompt(ctx));
971
+ await output.writeToSubdir("testing", "api-tests.sh", shellResult);
972
+ progress.increment("Test coverage analysis");
973
+ const coverageResult = await askCopilot(testCoveragePrompt(ctx));
974
+ await output.writeToSubdir("testing", "TEST_COVERAGE_REPORT.md", coverageResult);
975
+ progress.increment("Missing tests analysis");
976
+ const missingResult = await askCopilot(missingTestsPrompt(ctx));
977
+ await output.writeToSubdir("testing", "MISSING_TESTS.md", missingResult);
978
+ progress.succeed("API Testing Engine");
979
+ let score = 10;
980
+ if (testFiles.length > 0) score += 20;
981
+ if (testFiles.length > 5) score += 10;
982
+ if (testFiles.length > 10) score += 10;
983
+ if (testFiles.length > 20) score += 10;
984
+ if (input.routes.length > 0) {
985
+ const ratio = Math.min(testFiles.length / input.routes.length, 1);
986
+ score += Math.round(ratio * 30);
987
+ if (ratio < 0.3) score -= 10;
988
+ } else {
989
+ score += Math.min(testFiles.length * 3, 20);
990
+ }
991
+ if (testFiles.length === 0) score -= 5;
992
+ score = Math.max(0, Math.min(100, score));
993
+ return {
994
+ score,
995
+ details: `${testFiles.length} test files, ${input.routes.length} routes, coverage ratio: ${input.routes.length > 0 ? Math.round(testFiles.length / input.routes.length * 100) + "%" : "N/A"}`
996
+ };
997
+ }
998
+
999
+ // src/engines/performance-engine.ts
1000
+ import { join as join6 } from "path";
1001
+
1002
+ // src/prompts/performance.prompts.ts
1003
+ function performanceAuditPrompt(ctx) {
1004
+ return buildPrompt(
1005
+ "Perform a performance audit of this codebase. Detect anti-patterns: N+1 queries, missing database indexes, blocking I/O in async code, memory leak patterns, unbounded queries, missing caching, inefficient algorithms, large payload responses, missing compression, bundle size issues. For each finding: describe the issue, its performance impact, location, and provide an optimized solution.",
1006
+ ctx
1007
+ );
1008
+ }
1009
+ function performanceScorePrompt(ctx) {
1010
+ return buildPrompt(
1011
+ "Generate a performance score (0-100) for this codebase as JSON. Include scores for: backend performance, frontend performance (if applicable), database efficiency, caching strategy, and infrastructure optimization. Provide an overall weighted score.",
1012
+ ctx,
1013
+ "json"
1014
+ );
1015
+ }
1016
+
1017
+ // src/engines/performance-engine.ts
1018
+ function quickPerformanceScan(rootDir, files) {
1019
+ const findings = [];
1020
+ const patterns = [
1021
+ { name: "Sync file I/O in async context", pattern: /readFileSync|writeFileSync/g, impact: "Blocks event loop" },
1022
+ { name: "SELECT * query", pattern: /SELECT\s+\*/gi, impact: "Fetches unnecessary data" },
1023
+ { name: "Missing LIMIT clause", pattern: /\.find\(\s*\{[^}]*\}\s*\)(?!.*\.limit)/g, impact: "Unbounded query results" },
1024
+ { name: "Nested await in loop", pattern: /for\s*\([^)]*\)\s*\{[^}]*await\s/g, impact: "Sequential async operations" },
1025
+ { name: "console.log in hot path", pattern: /console\.log/g, impact: "I/O overhead in production" },
1026
+ { name: "Large JSON.stringify", pattern: /JSON\.stringify\s*\([^)]*\)/g, impact: "CPU-intensive for large objects" },
1027
+ { name: "No compression middleware", pattern: /compression/g, impact: "Check: compression middleware" }
1028
+ ];
1029
+ const codeFiles = files.filter(
1030
+ (f) => f.endsWith(".ts") || f.endsWith(".js") || f.endsWith(".py") || f.endsWith(".go")
1031
+ ).slice(0, 80);
1032
+ for (const file of codeFiles) {
1033
+ try {
1034
+ const content = readFileContentSync(join6(rootDir, file));
1035
+ for (const { name, pattern, impact } of patterns) {
1036
+ const regex = new RegExp(pattern.source, pattern.flags);
1037
+ const matches = content.match(regex);
1038
+ if (matches && matches.length > 0) {
1039
+ findings.push(`${name} in ${file} (${matches.length} occurrence(s)) \u2014 ${impact}`);
1040
+ }
1041
+ }
1042
+ } catch {
1043
+ }
1044
+ }
1045
+ return findings;
1046
+ }
1047
+ async function runPerformanceEngine(input, output, progress) {
1048
+ progress.start("Performance Engine", 3);
1049
+ progress.increment("Pattern scanning");
1050
+ const quickFindings = quickPerformanceScan(input.rootDir, input.files);
1051
+ const ctx = { ...input.context };
1052
+ if (quickFindings.length > 0) {
1053
+ ctx.additionalContext = (ctx.additionalContext || "") + `
1054
+ Performance scan findings:
1055
+ ${quickFindings.join("\n")}`;
1056
+ }
1057
+ progress.increment("Performance audit");
1058
+ const auditResult = await askCopilot(performanceAuditPrompt(ctx));
1059
+ await output.writeToSubdir("performance", "PERFORMANCE_AUDIT.md", auditResult);
1060
+ progress.increment("Performance scoring");
1061
+ const scoreResult = await askCopilot(performanceScorePrompt(ctx));
1062
+ await output.writeToSubdir("performance", "performance-score.json", scoreResult);
1063
+ progress.succeed("Performance Engine");
1064
+ let score = 70;
1065
+ score -= quickFindings.length * 5;
1066
+ if (quickFindings.length === 0) score = 95;
1067
+ score = Math.max(10, Math.min(100, score));
1068
+ return {
1069
+ score,
1070
+ details: `${quickFindings.length} performance anti-patterns detected`
1071
+ };
1072
+ }
1073
+
1074
+ // src/prompts/team.prompts.ts
1075
+ function prTemplatePrompt(ctx) {
1076
+ return buildPrompt(
1077
+ 'Generate a pull request template (PULL_REQUEST_TEMPLATE.md) tailored to this project. Include: description section, type of change checkboxes, checklist specific to the tech stack (e.g., "Did you run tests?", "Did you update types?"), screenshots section for UI changes, and related issues section.',
1078
+ ctx
1079
+ );
1080
+ }
1081
+ function bugReportPrompt(ctx) {
1082
+ return buildPrompt(
1083
+ "Generate a GitHub issue template for bug reports (bug_report.md) in YAML frontmatter format. Include fields: description, steps to reproduce, expected behavior, actual behavior, environment (with options detected from the project), screenshots, and additional context.",
1084
+ ctx
1085
+ );
1086
+ }
1087
+ function featureRequestPrompt(ctx) {
1088
+ return buildPrompt(
1089
+ "Generate a GitHub issue template for feature requests (feature_request.md) in YAML frontmatter format. Include fields: feature description, problem it solves, proposed solution, alternatives considered, and additional context.",
1090
+ ctx
1091
+ );
1092
+ }
1093
+ function codeReviewChecklistPrompt(ctx) {
1094
+ return buildPrompt(
1095
+ "Generate a code review checklist (CODE_REVIEW_CHECKLIST.md) specific to this project stack. Include checks for: code quality, type safety, error handling, testing, security, performance, accessibility (if frontend), database (if applicable), and documentation.",
1096
+ ctx
1097
+ );
1098
+ }
1099
+ function onboardingPrompt(ctx) {
1100
+ return buildPrompt(
1101
+ 'Generate an ONBOARDING.md guide for new contributors. Include: architecture overview, key files and what they do, development workflow, how to run/test locally, common tasks walkthrough, coding conventions, and a "start here" path for first contributions.',
1102
+ ctx
1103
+ );
1104
+ }
1105
+ function developmentWorkflowPrompt(ctx) {
1106
+ return buildPrompt(
1107
+ "Generate a DEVELOPMENT_WORKFLOW.md describing the recommended Git workflow. Include: branching strategy (feature branches, release branches), commit conventions, PR process, code review expectations, release process, and hotfix procedure.",
1108
+ ctx
1109
+ );
1110
+ }
1111
+
1112
+ // src/engines/team-engine.ts
1113
+ async function runTeamEngine(input, output, progress) {
1114
+ progress.start("Team Engine", 7);
1115
+ const ctx = { ...input.context };
1116
+ if (input.gitAnalysis.contributors.length > 0) {
1117
+ ctx.additionalContext = (ctx.additionalContext || "") + `
1118
+ Contributors:
1119
+ ${input.gitAnalysis.contributors.map((c) => `${c.name} (${c.commits} commits)`).join("\n")}`;
1120
+ }
1121
+ progress.increment("PR template");
1122
+ const prResult = await askCopilot(prTemplatePrompt(ctx));
1123
+ await output.writeToSubdir("team", "PULL_REQUEST_TEMPLATE.md", prResult);
1124
+ progress.increment("Bug report template");
1125
+ const bugResult = await askCopilot(bugReportPrompt(ctx));
1126
+ await output.writeToSubdir("team/ISSUE_TEMPLATE", "bug_report.md", bugResult);
1127
+ progress.increment("Feature request template");
1128
+ const featureResult = await askCopilot(featureRequestPrompt(ctx));
1129
+ await output.writeToSubdir("team/ISSUE_TEMPLATE", "feature_request.md", featureResult);
1130
+ progress.increment("Issue template config");
1131
+ const configYml = `blank_issues_enabled: true
1132
+ contact_links:
1133
+ - name: Documentation
1134
+ url: ./README.md
1135
+ about: Check the documentation before filing an issue
1136
+ `;
1137
+ await output.writeToSubdir("team/ISSUE_TEMPLATE", "config.yml", configYml);
1138
+ progress.increment("CODEOWNERS");
1139
+ const codeownersLines = ["# Auto-generated by RepoSentry", ""];
1140
+ for (const [dir, owner] of input.gitAnalysis.directoryOwnership) {
1141
+ codeownersLines.push(`/${dir}/ @${owner.replace(/\s+/g, "-").toLowerCase()}`);
1142
+ }
1143
+ if (codeownersLines.length <= 2) {
1144
+ codeownersLines.push("* @" + (input.gitAnalysis.contributors[0]?.name.replace(/\s+/g, "-").toLowerCase() || "owner"));
1145
+ }
1146
+ await output.writeToSubdir("team", "CODEOWNERS", codeownersLines.join("\n"));
1147
+ progress.increment("Code review checklist");
1148
+ const reviewResult = await askCopilot(codeReviewChecklistPrompt(ctx));
1149
+ await output.writeToSubdir("team", "CODE_REVIEW_CHECKLIST.md", reviewResult);
1150
+ progress.increment("Onboarding guide");
1151
+ const onboardResult = await askCopilot(onboardingPrompt(ctx));
1152
+ await output.writeToSubdir("team", "ONBOARDING.md", onboardResult);
1153
+ const workflowResult = await askCopilot(developmentWorkflowPrompt(ctx));
1154
+ await output.writeToSubdir("team", "DEVELOPMENT_WORKFLOW.md", workflowResult);
1155
+ progress.succeed("Team Engine");
1156
+ let score = 15;
1157
+ if (input.hasPRTemplate) score += 25;
1158
+ if (input.hasIssueTemplates) score += 20;
1159
+ if (input.hasCodeowners) score += 20;
1160
+ if (input.gitAnalysis.contributors.length > 1) score += 10;
1161
+ if (input.gitAnalysis.contributors.length > 3) score += 10;
1162
+ const missing = [];
1163
+ if (!input.hasPRTemplate) missing.push("PR template");
1164
+ if (!input.hasIssueTemplates) missing.push("issue templates");
1165
+ if (!input.hasCodeowners) missing.push("CODEOWNERS");
1166
+ score = Math.max(0, Math.min(100, score));
1167
+ return {
1168
+ score,
1169
+ details: missing.length > 0 ? `Missing: ${missing.join(", ")} | ${input.gitAnalysis.contributors.length} contributors` : `All collaboration files present | ${input.gitAnalysis.contributors.length} contributors`
1170
+ };
1171
+ }
1172
+
1173
+ // src/utils/scoring.ts
1174
+ function calculateGrade(score) {
1175
+ if (score >= 97) return "A+";
1176
+ if (score >= 93) return "A";
1177
+ if (score >= 90) return "A-";
1178
+ if (score >= 87) return "B+";
1179
+ if (score >= 83) return "B";
1180
+ if (score >= 80) return "B-";
1181
+ if (score >= 77) return "C+";
1182
+ if (score >= 73) return "C";
1183
+ if (score >= 70) return "C-";
1184
+ if (score >= 67) return "D+";
1185
+ if (score >= 63) return "D";
1186
+ if (score >= 60) return "D-";
1187
+ return "F";
1188
+ }
1189
+ function calculateOverallScore(categories) {
1190
+ if (categories.length === 0) return 0;
1191
+ const weights = {
1192
+ "Security": 2,
1193
+ "Testing": 1.5,
1194
+ "CI/CD": 1.2,
1195
+ "Documentation": 1,
1196
+ "Architecture": 1,
1197
+ "Performance": 1.2,
1198
+ "Collaboration": 0.8
1199
+ };
1200
+ let weightedSum = 0;
1201
+ let totalWeight = 0;
1202
+ for (const c of categories) {
1203
+ const w = weights[c.name] ?? 1;
1204
+ weightedSum += c.score * w;
1205
+ totalWeight += w;
1206
+ }
1207
+ return Math.round(weightedSum / totalWeight);
1208
+ }
1209
+
1210
+ // src/prompts/health.prompts.ts
1211
+ function healthSummaryPrompt(ctx) {
1212
+ return buildPrompt(
1213
+ "Generate a concise health report summary for this project. Based on the analysis results provided in the additional context, create a markdown health dashboard showing: overall grade (A-F), per-category grades and scores, top 3 priority actions, and key statistics (files scanned, languages, contributors).",
1214
+ ctx
1215
+ );
1216
+ }
1217
+
1218
+ // src/engines/health-engine.ts
1219
+ import { readFileSync as readFileSync3, writeFileSync as writeFileSync2, existsSync as existsSync2 } from "fs";
1220
+ import { join as join7 } from "path";
1221
+ function loadHistory(outputDir) {
1222
+ const historyPath = join7(outputDir, "history.json");
1223
+ if (!existsSync2(historyPath)) return [];
1224
+ try {
1225
+ const raw = readFileSync3(historyPath, "utf-8");
1226
+ const parsed = JSON.parse(raw);
1227
+ return Array.isArray(parsed) ? parsed : [];
1228
+ } catch {
1229
+ return [];
1230
+ }
1231
+ }
1232
+ function saveHistory(outputDir, history) {
1233
+ const historyPath = join7(outputDir, "history.json");
1234
+ writeFileSync2(historyPath, JSON.stringify(history, null, 2), "utf-8");
1235
+ }
1236
+ function buildMethodologyDoc(categories) {
1237
+ const weights = {
1238
+ "Security": 2,
1239
+ "Testing": 1.5,
1240
+ "CI/CD": 1.2,
1241
+ "Documentation": 1,
1242
+ "Architecture": 1,
1243
+ "Performance": 1.2,
1244
+ "Collaboration": 0.8
1245
+ };
1246
+ const lines = [
1247
+ "# Scoring Methodology",
1248
+ "",
1249
+ "> **Transparency note:** RepoSentry scores are based on what your project *already has*, not on what RepoSentry generates. Generated files do not inflate your score.",
1250
+ "",
1251
+ "---",
1252
+ "",
1253
+ "## Grade Scale",
1254
+ "",
1255
+ "| Grade | Score Range |",
1256
+ "|-------|------------|",
1257
+ "| A+ | 97 \u2013 100 |",
1258
+ "| A | 93 \u2013 96 |",
1259
+ "| A- | 90 \u2013 92 |",
1260
+ "| B+ | 87 \u2013 89 |",
1261
+ "| B | 83 \u2013 86 |",
1262
+ "| B- | 80 \u2013 82 |",
1263
+ "| C+ | 77 \u2013 79 |",
1264
+ "| C | 73 \u2013 76 |",
1265
+ "| C- | 70 \u2013 72 |",
1266
+ "| D+ | 67 \u2013 69 |",
1267
+ "| D | 63 \u2013 66 |",
1268
+ "| D- | 60 \u2013 62 |",
1269
+ "| F | 0 \u2013 59 |",
1270
+ "",
1271
+ "---",
1272
+ "",
1273
+ "## Overall Score = Weighted Average",
1274
+ "",
1275
+ "Not all categories weigh equally. Security and Testing carry more weight because they directly affect production readiness.",
1276
+ "",
1277
+ "| Category | Weight | Reason |",
1278
+ "|----------|--------|--------|",
1279
+ "| Security | 2.0\xD7 | Vulnerabilities directly impact production safety |",
1280
+ "| Testing | 1.5\xD7 | Test coverage is critical for reliability |",
1281
+ "| CI/CD | 1.2\xD7 | Automation reduces human error |",
1282
+ "| Performance | 1.2\xD7 | Anti-patterns affect user experience |",
1283
+ "| Documentation | 1.0\xD7 | Standard weight |",
1284
+ "| Architecture | 1.0\xD7 | Standard weight |",
1285
+ "| Collaboration | 0.8\xD7 | Important but less urgent than code quality |",
1286
+ "",
1287
+ "**Formula:** `Overall = \u03A3(category_score \xD7 weight) / \u03A3(weight)`",
1288
+ "",
1289
+ "---",
1290
+ "",
1291
+ "## Per-Category Scoring",
1292
+ "",
1293
+ "### Documentation (weight: 1.0\xD7)",
1294
+ "- Base: 20 points",
1295
+ "- Has existing README: +25",
1296
+ "- Has API routes documented: +10",
1297
+ "- Active development (>3 recent commits): +10",
1298
+ "- Has version tags: +15",
1299
+ "- No README: \u221210",
1300
+ "- No commits: \u22125",
1301
+ "",
1302
+ "### Architecture (weight: 1.0\xD7)",
1303
+ "- Base: 30 points",
1304
+ "- Has module imports (structured codebase): +15",
1305
+ "- Has data models: +15",
1306
+ "- Has API routes: +15",
1307
+ "- 3+ top-level directories (separation of concerns): +10",
1308
+ "- 5+ top-level directories: +5",
1309
+ "- No imports and no routes (monolithic): \u221210",
1310
+ "",
1311
+ "### Security (weight: 2.0\xD7)",
1312
+ "- Starts at 100 (clean baseline)",
1313
+ "- Per High-severity finding (hardcoded secrets, SQL injection, etc.): \u221220",
1314
+ "- Per Medium-severity finding (eval, CORS, MD5): \u221210",
1315
+ "- Per Low-severity finding (console.log, etc.): \u22123",
1316
+ "- No .gitignore: \u221215",
1317
+ "- .env file committed: \u221210",
1318
+ "- AI-identified critical/high-risk findings: \u22125 each",
1319
+ "- AI-identified medium-risk findings: \u22122 each",
1320
+ "",
1321
+ "### CI/CD (weight: 1.2\xD7)",
1322
+ "- Base: 15 points",
1323
+ "- Has CI/CD pipeline config: +35",
1324
+ "- Has Dockerfile: +20",
1325
+ "- Has .env.example: +15",
1326
+ "- Has docker-compose: +15",
1327
+ "",
1328
+ "### Testing (weight: 1.5\xD7)",
1329
+ "- Base: 10 points",
1330
+ "- Has any test files: +20",
1331
+ "- Has >5 test files: +10",
1332
+ "- Has >10 test files: +10",
1333
+ "- Has >20 test files: +10",
1334
+ "- Route coverage ratio \xD7 30 (if routes exist)",
1335
+ "- Very low test-to-route ratio (<30%): \u221210",
1336
+ "- Zero test files: \u22125",
1337
+ "",
1338
+ "### Performance (weight: 1.2\xD7)",
1339
+ "- Base: 70 points (if anti-patterns found) / 95 (if clean)",
1340
+ "- Per anti-pattern detected: \u22125",
1341
+ "- Scans for: sync I/O, SELECT *, unbounded queries, nested awaits, console.log in hot paths, uncompressed payloads",
1342
+ "",
1343
+ "### Collaboration (weight: 0.8\xD7)",
1344
+ "- Base: 15 points",
1345
+ "- Has PR template: +25",
1346
+ "- Has issue templates: +20",
1347
+ "- Has CODEOWNERS: +20",
1348
+ "- Multi-contributor (>1): +10",
1349
+ "- Active team (>3 contributors): +10",
1350
+ "",
1351
+ "---",
1352
+ "",
1353
+ "*Scoring is deterministic and reproducible. Run the same analysis twice on an unchanged codebase and you will get the same score.*",
1354
+ ""
1355
+ ];
1356
+ return lines.join("\n");
1357
+ }
1358
+ function buildHistorySection(history) {
1359
+ if (history.length <= 1) return "";
1360
+ const lines = [
1361
+ "",
1362
+ "---",
1363
+ "",
1364
+ "## Score History",
1365
+ "",
1366
+ "| # | Date | Overall | Grade | Documentation | Architecture | Security | CI/CD | Testing | Performance | Collaboration |",
1367
+ "|---|------|---------|-------|---------------|--------------|----------|-------|---------|-------------|---------------|"
1368
+ ];
1369
+ const recent = history.slice(-10).reverse();
1370
+ recent.forEach((entry, i) => {
1371
+ const date = new Date(entry.analyzedAt).toLocaleDateString("en-US", { year: "numeric", month: "short", day: "numeric" });
1372
+ const catMap = new Map(entry.categories.map((c) => [c.name, c]));
1373
+ const cats = ["Documentation", "Architecture", "Security", "CI/CD", "Testing", "Performance", "Collaboration"];
1374
+ const scores = cats.map((name) => {
1375
+ const c = catMap.get(name);
1376
+ return c ? `${c.score}` : "\u2014";
1377
+ });
1378
+ lines.push(`| ${i + 1} | ${date} | **${entry.overallScore}** | ${entry.overallGrade} | ${scores.join(" | ")} |`);
1379
+ });
1380
+ if (history.length >= 2) {
1381
+ const prev = history[history.length - 2];
1382
+ const curr = history[history.length - 1];
1383
+ const diff = curr.overallScore - prev.overallScore;
1384
+ const arrow = diff > 0 ? "\u{1F4C8}" : diff < 0 ? "\u{1F4C9}" : "\u27A1\uFE0F";
1385
+ const sign = diff > 0 ? "+" : "";
1386
+ lines.push("");
1387
+ lines.push(`**Trend:** ${arrow} ${sign}${diff} points since last analysis`);
1388
+ }
1389
+ lines.push("");
1390
+ return lines.join("\n");
1391
+ }
1392
+ async function runHealthEngine(input, output, progress) {
1393
+ progress.start("Health Report", 5);
1394
+ const overallScore = calculateOverallScore(input.categories);
1395
+ const overallGrade = calculateGrade(overallScore);
1396
+ const ctx = { ...input.context };
1397
+ ctx.additionalContext = (ctx.additionalContext || "") + `
1398
+ Analysis Results:
1399
+ Overall Score: ${overallScore}/100 (${overallGrade})
1400
+ ` + input.categories.map((c) => `${c.name}: ${c.score}/100 (${c.grade}) \u2014 ${c.details}`).join("\n");
1401
+ progress.increment("Health report");
1402
+ const reportResult = await askCopilot(healthSummaryPrompt(ctx));
1403
+ progress.increment("Score history");
1404
+ const outputDir = output.getBaseDir();
1405
+ const history = loadHistory(outputDir);
1406
+ const now = (/* @__PURE__ */ new Date()).toUTCString();
1407
+ const currentEntry = {
1408
+ analyzedAt: now,
1409
+ overallScore,
1410
+ overallGrade,
1411
+ categories: input.categories.map((c) => ({ name: c.name, score: c.score, grade: c.grade, details: c.details }))
1412
+ };
1413
+ history.push(currentEntry);
1414
+ saveHistory(outputDir, history);
1415
+ const historySection = buildHistorySection(history);
1416
+ const header = `# RepoSentry Health Report \u2014 ${input.context.projectName}
1417
+
1418
+ **Overall Grade: ${overallGrade}** (${overallScore}/100)
1419
+ **Analyzed:** ${now}
1420
+ **Files Scanned:** ${input.filesScanned} | **Languages:** ${input.context.languages.join(", ")}
1421
+
1422
+ | Category | Grade | Score | Details |
1423
+ |----------|-------|-------|---------|
1424
+ ${input.categories.map((c) => `| ${c.name} | ${c.grade} | ${c.score} | ${c.details} |`).join("\n")}
1425
+
1426
+ > \u{1F4CA} See [SCORING_METHODOLOGY.md](./SCORING_METHODOLOGY.md) for how these scores are calculated.
1427
+ ${historySection}
1428
+ ---
1429
+
1430
+ `;
1431
+ await output.write("HEALTH_REPORT.md", header + reportResult);
1432
+ progress.increment("analysis.json");
1433
+ const analysisJson = JSON.stringify({
1434
+ project: input.context.projectName,
1435
+ analyzedAt: now,
1436
+ overallScore,
1437
+ overallGrade,
1438
+ languages: input.context.languages,
1439
+ frameworks: input.context.frameworks,
1440
+ filesScanned: input.filesScanned,
1441
+ totalFiles: input.totalFiles,
1442
+ categories: input.categories.map((c) => ({
1443
+ name: c.name,
1444
+ score: c.score,
1445
+ grade: c.grade,
1446
+ details: c.details
1447
+ })),
1448
+ historyEntries: history.length
1449
+ }, null, 2);
1450
+ await output.write("analysis.json", analysisJson);
1451
+ progress.increment("Scoring methodology");
1452
+ const methodology = buildMethodologyDoc(input.categories);
1453
+ await output.write("SCORING_METHODOLOGY.md", methodology);
1454
+ progress.increment("Badge");
1455
+ const badgeColor = overallScore >= 80 ? "brightgreen" : overallScore >= 60 ? "yellow" : "red";
1456
+ const badgeUrl = `https://img.shields.io/badge/RepoSentry-${overallGrade}%20(${overallScore}%25)-${badgeColor}`;
1457
+ const badgeMd = `# RepoSentry Badge
1458
+
1459
+ [![RepoSentry Score: ${overallGrade}](${badgeUrl})](./HEALTH_REPORT.md)
1460
+
1461
+ Add this to your README:
1462
+ \`\`\`markdown
1463
+ [![RepoSentry Score: ${overallGrade}](${badgeUrl})](./HEALTH_REPORT.md)
1464
+ \`\`\`
1465
+ `;
1466
+ await output.write("badge.md", badgeMd);
1467
+ progress.succeed("Health Report");
1468
+ return { overallScore, overallGrade };
1469
+ }
1470
+
1471
+ // src/core/orchestrator.ts
1472
+ function shouldRunAll(opts) {
1473
+ return !opts.docs && !opts.architecture && !opts.security && !opts.ci && !opts.apiTests && !opts.performance && !opts.team && !opts.health;
1474
+ }
1475
+ async function runAnalysis(opts) {
1476
+ const cwd = process.cwd();
1477
+ const runAll = shouldRunAll(opts);
1478
+ if (opts.verbose) setVerbose(true);
1479
+ const banner = boxen(
1480
+ `${chalk.bold.cyan("RepoSentry")} ${chalk.dim(`v${getCliVersion()}`)}
1481
+ ${chalk.dim("AI-Powered Codebase Intelligence Platform")}`,
1482
+ { padding: 1, borderColor: "cyan", borderStyle: "round" }
1483
+ );
1484
+ console.log(banner);
1485
+ logger.blank();
1486
+ const isGit = isGitRepo(cwd);
1487
+ if (!isGit) {
1488
+ logger.warn("Not a Git repository \u2014 git-related analysis will be limited.");
1489
+ }
1490
+ if (opts.model) {
1491
+ setCopilotModel(opts.model);
1492
+ logger.info(`\u{1F9E0} Model: ${opts.model}`);
1493
+ }
1494
+ if (!isCopilotAvailable()) {
1495
+ logger.warn("No Copilot CLI detected. Install: npm i -g @github/copilot OR winget install GitHub.Copilot");
1496
+ logger.warn("Continuing \u2014 Copilot-powered analysis will return placeholder results.");
1497
+ } else {
1498
+ logger.info(`\u{1F916} Copilot backend: ${getCopilotBackendName()}`);
1499
+ }
1500
+ logger.info("\u{1F4C2} Scanning repository...");
1501
+ const scanResult = await scanFiles(cwd, opts.ignore);
1502
+ const langInfo = detectLanguages(cwd, scanResult.files);
1503
+ const configInfo = detectConfigs(cwd, scanResult.files);
1504
+ const projectName = getRepoName(cwd);
1505
+ logger.info(` Detected: ${langInfo.runtime || langInfo.languages[0] || "Unknown"} (${langInfo.languages.join(", ")}) | ${langInfo.frameworks.join(", ") || "No frameworks detected"}`);
1506
+ logger.info(` Files: ${scanResult.totalFiles} | Package Manager: ${langInfo.packageManager || "unknown"}`);
1507
+ logger.blank();
1508
+ logger.info("\u{1F50D} Running deep analysis...");
1509
+ const imports = parseImports(cwd, scanResult.files);
1510
+ const routes = detectRoutes(cwd, scanResult.files);
1511
+ const models = detectModels(cwd, scanResult.files);
1512
+ const gitAnalysis = analyzeGitHistory(cwd);
1513
+ if (routes.length > 0) logger.dim(` ${routes.length} API routes detected`);
1514
+ if (models.length > 0) logger.dim(` ${models.length} database models detected`);
1515
+ if (gitAnalysis.contributors.length > 0) logger.dim(` ${gitAnalysis.contributors.length} contributors found`);
1516
+ logger.blank();
1517
+ const fileTree = buildDirectoryTree(scanResult.files, 3);
1518
+ const context = {
1519
+ projectName,
1520
+ languages: langInfo.languages,
1521
+ frameworks: langInfo.frameworks,
1522
+ packageManager: langInfo.packageManager,
1523
+ fileTree
1524
+ };
1525
+ const outputDir = resolve2(cwd, opts.output);
1526
+ const outputManager = new OutputManager({
1527
+ baseDir: outputDir,
1528
+ format: opts.format,
1529
+ force: opts.force
1530
+ });
1531
+ await outputManager.init();
1532
+ const categories = [];
1533
+ const skipTeam = (runAll || opts.team) && !isGit;
1534
+ const engines = [
1535
+ runAll || opts.docs,
1536
+ runAll || opts.architecture,
1537
+ runAll || opts.security,
1538
+ runAll || opts.ci,
1539
+ runAll || opts.apiTests,
1540
+ runAll || opts.performance,
1541
+ (runAll || opts.team) && !skipTeam,
1542
+ runAll || opts.health
1543
+ ].filter(Boolean).length;
1544
+ const progress = createProgress();
1545
+ progress.setTotalSteps(engines);
1546
+ console.log(chalk.dim(" \u23F3 Each AI analysis step may take 30-60s \u2014 this is normal.\n"));
1547
+ if (runAll || opts.docs) {
1548
+ const result = await runDocsEngine(
1549
+ { context, routes, recentCommits: gitAnalysis.recentCommits, tags: gitAnalysis.tags, hasReadme: configInfo.hasReadme },
1550
+ outputManager,
1551
+ progress
1552
+ );
1553
+ categories.push({ name: "Documentation", score: result.score, grade: calculateGrade(result.score), details: result.details });
1554
+ }
1555
+ if (runAll || opts.architecture) {
1556
+ const result = await runArchitectureEngine(
1557
+ { context, imports, models, routes },
1558
+ outputManager,
1559
+ progress
1560
+ );
1561
+ categories.push({ name: "Architecture", score: result.score, grade: calculateGrade(result.score), details: result.details });
1562
+ }
1563
+ if (runAll || opts.security) {
1564
+ const result = await runSecurityEngine(
1565
+ { context, rootDir: cwd, files: scanResult.files, hasEnvFile: configInfo.hasEnvFile, hasGitignore: configInfo.hasGitignore, hasDockerfile: configInfo.hasDockerfile },
1566
+ outputManager,
1567
+ progress
1568
+ );
1569
+ categories.push({ name: "Security", score: result.score, grade: calculateGrade(result.score), details: result.details });
1570
+ }
1571
+ if (runAll || opts.ci) {
1572
+ const result = await runCIEngine(
1573
+ { context, rootDir: cwd, hasDockerfile: configInfo.hasDockerfile, hasDockerCompose: configInfo.hasDockerCompose, hasCIConfig: configInfo.hasCIConfig, hasEnvExample: configInfo.hasEnvExample },
1574
+ outputManager,
1575
+ progress
1576
+ );
1577
+ categories.push({ name: "CI/CD", score: result.score, grade: calculateGrade(result.score), details: result.details });
1578
+ }
1579
+ if (runAll || opts.apiTests) {
1580
+ const result = await runAPITestEngine(
1581
+ { context, routes, files: scanResult.files },
1582
+ outputManager,
1583
+ progress
1584
+ );
1585
+ categories.push({ name: "Testing", score: result.score, grade: calculateGrade(result.score), details: result.details });
1586
+ }
1587
+ if (runAll || opts.performance) {
1588
+ const result = await runPerformanceEngine(
1589
+ { context, rootDir: cwd, files: scanResult.files },
1590
+ outputManager,
1591
+ progress
1592
+ );
1593
+ categories.push({ name: "Performance", score: result.score, grade: calculateGrade(result.score), details: result.details });
1594
+ }
1595
+ if (runAll || opts.team) {
1596
+ if (!isGit) {
1597
+ logger.warn("Skipping Collaboration engine \u2014 not a Git repository.");
1598
+ } else {
1599
+ const result = await runTeamEngine(
1600
+ { context, gitAnalysis, hasPRTemplate: configInfo.hasPRTemplate, hasIssueTemplates: configInfo.hasIssueTemplates, hasCodeowners: configInfo.hasCodeowners },
1601
+ outputManager,
1602
+ progress
1603
+ );
1604
+ categories.push({ name: "Collaboration", score: result.score, grade: calculateGrade(result.score), details: result.details });
1605
+ }
1606
+ }
1607
+ if (runAll || opts.health) {
1608
+ const healthResult = await runHealthEngine(
1609
+ { context, categories, filesScanned: scanResult.files.length, totalFiles: scanResult.totalFiles },
1610
+ outputManager,
1611
+ progress
1612
+ );
1613
+ logger.blank();
1614
+ const emojiMap = {
1615
+ "Documentation": "\u{1F4DD}",
1616
+ "Architecture": "\u{1F3D7}\uFE0F",
1617
+ "Security": "\u{1F512}",
1618
+ "CI/CD": "\u{1F504}",
1619
+ "Testing": "\u{1F9EA}",
1620
+ "Performance": "\u26A1",
1621
+ "Infrastructure": "\u{1F3E2}",
1622
+ "Collaboration": "\u{1F91D}"
1623
+ };
1624
+ const summaryLines = categories.map((c) => {
1625
+ const emoji = emojiMap[c.name] || "\u{1F4CA}";
1626
+ return ` ${emoji} ${c.name.padEnd(16)} ${c.grade.padEnd(4)} (${c.score}/100)`;
1627
+ });
1628
+ const summaryBox = boxen(
1629
+ [
1630
+ `${chalk.bold("Analysis Complete!")}`,
1631
+ "",
1632
+ `${chalk.bold("Overall Grade:")} ${chalk.bold.cyan(healthResult.overallGrade)} (${healthResult.overallScore}/100)`,
1633
+ "",
1634
+ ...summaryLines,
1635
+ "",
1636
+ `${chalk.dim(`Output: ${opts.output}/ (${outputManager.getFileCount()} files generated)`)}`,
1637
+ `${chalk.dim("Run `reposentry serve` to preview")}`
1638
+ ].join("\n"),
1639
+ { padding: 1, borderColor: "green", borderStyle: "round" }
1640
+ );
1641
+ console.log(summaryBox);
1642
+ }
1643
+ await outputManager.finalize();
1644
+ }
1645
+
1646
+ // src/cli.ts
1647
+ import chalk2 from "chalk";
1648
+
1649
+ // src/config.ts
1650
+ import { cosmiconfig } from "cosmiconfig";
1651
+ var DEFAULT_CONFIG = {
1652
+ output: ".reposentry",
1653
+ format: "markdown",
1654
+ depth: "standard",
1655
+ engines: {
1656
+ docs: true,
1657
+ architecture: true,
1658
+ security: true,
1659
+ ci: true,
1660
+ apiTests: true,
1661
+ performance: true,
1662
+ team: true,
1663
+ health: true
1664
+ },
1665
+ ignore: ["node_modules", "dist", "*.test.ts"],
1666
+ security: {
1667
+ severityThreshold: "medium",
1668
+ ignorePatterns: ["*.test.*"]
1669
+ },
1670
+ ci: {
1671
+ provider: "github-actions",
1672
+ nodeVersions: ["18", "20"]
1673
+ }
1674
+ };
1675
+ function mergeConfig(userConfig) {
1676
+ return {
1677
+ ...DEFAULT_CONFIG,
1678
+ ...userConfig,
1679
+ engines: {
1680
+ ...DEFAULT_CONFIG.engines,
1681
+ ...userConfig.engines || {}
1682
+ },
1683
+ security: {
1684
+ ...DEFAULT_CONFIG.security,
1685
+ ...userConfig.security || {}
1686
+ },
1687
+ ci: {
1688
+ ...DEFAULT_CONFIG.ci,
1689
+ ...userConfig.ci || {}
1690
+ },
1691
+ ignore: Array.isArray(userConfig.ignore) ? userConfig.ignore : DEFAULT_CONFIG.ignore
1692
+ };
1693
+ }
1694
+ async function loadConfig() {
1695
+ const explorer = cosmiconfig("reposentry");
1696
+ try {
1697
+ const result = await explorer.search();
1698
+ if (result && result.config) {
1699
+ return mergeConfig(result.config);
1700
+ }
1701
+ } catch {
1702
+ }
1703
+ return DEFAULT_CONFIG;
1704
+ }
1705
+
1706
+ // src/cli.ts
1707
+ function createCLI() {
1708
+ const program = new Command();
1709
+ program.name("reposentry").description("RepoSentry \u2014 AI-powered codebase intelligence platform").version(getCliVersion());
1710
+ const analyze = program.command("analyze").description("Analyze the current repository and generate reports").option("--all", "Run all engines (overrides config engine selection)", false).option("-o, --output <dir>", "Output directory (default: from config or .reposentry)").option("-f, --format <type>", "Output format: markdown | html | json (default: from config or markdown)").option("--depth <level>", "Analysis depth: quick | standard | deep (default: from config or standard)").option("--ignore <patterns...>", "Glob patterns to ignore").option("--force", "Overwrite existing output", false).option("-v, --verbose", "Show detailed analysis progress", false).option("-m, --model <model>", "AI model to use (backend-specific)").option("--docs", "Generate documentation suite only").option("--architecture", "Generate architecture diagrams only").option("--security", "Run security audit + threat model only").option("--ci", "Generate CI/CD pipeline + infrastructure only").option("--api-tests", "Generate API test collection only").option("--performance", "Run performance anti-pattern detection only").option("--team", "Generate collaboration templates only").option("--health", "Generate health report + grade only").option("--no-color", "Disable colored output").addHelpText("after", `
1711
+ ${chalk2.bold("Engine Flags:")}
1712
+ Run specific engines instead of the full analysis:
1713
+ --docs README, API docs, SETUP, CONTRIBUTING, CHANGELOG, FAQ
1714
+ --architecture Mermaid diagrams + ARCHITECTURE.md
1715
+ --security Vulnerability scan, threat model, security report
1716
+ --ci GitHub Actions, Dockerfile, Docker Compose, .env
1717
+ --api-tests API test docs, Postman collection, test scripts
1718
+ --performance Anti-pattern detection + performance audit
1719
+ --team PR templates, issue templates, CODEOWNERS, onboarding
1720
+ --health Aggregate health score + grade badge
1721
+
1722
+ ${chalk2.bold("Examples:")}
1723
+ $ reposentry analyze Full analysis with all engines
1724
+ $ reposentry analyze --docs --ci Only docs + CI/CD
1725
+ $ reposentry analyze --model <name> Use a specific model (if supported)
1726
+ $ reposentry analyze --security -v Security audit with verbose output
1727
+ `).action(async (options) => {
1728
+ const config = await loadConfig();
1729
+ const hasAnyEngineFlag = Boolean(
1730
+ options.docs || options.architecture || options.security || options.ci || options.apiTests || options.performance || options.team || options.health
1731
+ );
1732
+ const useConfigEngines = !options.all && !hasAnyEngineFlag;
1733
+ const opts = {
1734
+ output: options.output ?? config.output,
1735
+ format: options.format ?? config.format,
1736
+ depth: options.depth ?? config.depth,
1737
+ ignore: options.ignore && options.ignore.length > 0 ? options.ignore : config.ignore,
1738
+ force: options.force,
1739
+ verbose: options.verbose,
1740
+ model: options.model,
1741
+ ...useConfigEngines ? {
1742
+ docs: config.engines.docs,
1743
+ architecture: config.engines.architecture,
1744
+ security: config.engines.security,
1745
+ ci: config.engines.ci,
1746
+ apiTests: config.engines.apiTests,
1747
+ performance: config.engines.performance,
1748
+ team: config.engines.team,
1749
+ health: config.engines.health
1750
+ } : {
1751
+ docs: options.docs,
1752
+ architecture: options.architecture,
1753
+ security: options.security,
1754
+ ci: options.ci,
1755
+ apiTests: options.apiTests,
1756
+ performance: options.performance,
1757
+ team: options.team,
1758
+ health: options.health
1759
+ }
1760
+ };
1761
+ try {
1762
+ await runAnalysis(opts);
1763
+ } catch (err) {
1764
+ console.error(`
1765
+ \u274C Analysis failed: ${err.message}`);
1766
+ if (options.verbose) console.error(err.stack);
1767
+ process.exit(1);
1768
+ }
1769
+ });
1770
+ program.command("serve").description("Preview generated reports in browser").option("--port <port>", "Port to listen on", "3000").option("-o, --output <dir>", "Output directory to serve (default: from config or .reposentry)").action(async (options) => {
1771
+ const { startServer } = await import("./server-UJNM6VUZ.js");
1772
+ const config = await loadConfig();
1773
+ await startServer({
1774
+ port: parseInt(options.port, 10),
1775
+ outputDir: options.output ?? config.output
1776
+ });
1777
+ });
1778
+ program.command("badge").description("Generate health/security badge for README").option("-o, --output <dir>", "Output directory (default: from config or .reposentry)").action(async (options) => {
1779
+ const { resolve: resolve3 } = await import("path");
1780
+ const { readFileContent } = await import("./fs-GEDK6OCC.js");
1781
+ try {
1782
+ const config = await loadConfig();
1783
+ const outputDir = options.output ?? config.output;
1784
+ const analysisPath = resolve3(process.cwd(), outputDir, "analysis.json");
1785
+ const data = JSON.parse(await readFileContent(analysisPath));
1786
+ console.log(`
1787
+ \u{1F4DB} RepoSentry Badge for ${data.project}:`);
1788
+ console.log(` Grade: ${data.overallGrade} (${data.overallScore}/100)`);
1789
+ const color = data.overallScore >= 80 ? "brightgreen" : data.overallScore >= 60 ? "yellow" : "red";
1790
+ const url = `https://img.shields.io/badge/RepoSentry-${data.overallGrade}%20(${data.overallScore}%25)-${color}`;
1791
+ console.log(`
1792
+ Markdown:
1793
+ [![RepoSentry Score: ${data.overallGrade}](${url})](./HEALTH_REPORT.md)
1794
+ `);
1795
+ } catch {
1796
+ console.error("\u274C No analysis found. Run `reposentry analyze` first.");
1797
+ }
1798
+ });
1799
+ program.command("compare").description("Compare current score against a previous analysis run").option("-o, --output <dir>", "Output directory (default: from config or .reposentry)").action(async (options) => {
1800
+ const { resolve: resolve3 } = await import("path");
1801
+ const { readFileSync: readFS, existsSync: existsFS } = await import("fs");
1802
+ const readline = await import("readline");
1803
+ const config = await loadConfig();
1804
+ const outputDir = options.output ?? config.output;
1805
+ const historyPath = resolve3(process.cwd(), outputDir, "history.json");
1806
+ if (!existsFS(historyPath)) {
1807
+ console.error("\u274C No scoring history found. Run `reposentry analyze` first.");
1808
+ process.exit(1);
1809
+ }
1810
+ let history;
1811
+ try {
1812
+ history = JSON.parse(readFS(historyPath, "utf-8"));
1813
+ } catch {
1814
+ console.error("\u274C Could not parse history.json.");
1815
+ process.exit(1);
1816
+ }
1817
+ if (history.length < 2) {
1818
+ console.error("\u274C Need at least 2 analysis runs to compare. Run `reposentry analyze` again after making changes.");
1819
+ process.exit(1);
1820
+ }
1821
+ const latest = history[history.length - 1];
1822
+ const latestId = history.length;
1823
+ console.log(`
1824
+ ${chalk2.bold.cyan("\u{1F6E1}\uFE0F RepoSentry Score History")}
1825
+ `);
1826
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1827
+ console.log(` ${chalk2.bold("ID".padEnd(5))}${chalk2.bold("Date".padEnd(28))}${chalk2.bold("Grade".padEnd(8))}${chalk2.bold("Score".padEnd(8))}${chalk2.bold("Categories")}`);
1828
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1829
+ for (let i = 0; i < history.length; i++) {
1830
+ const entry = history[i];
1831
+ const id = (i + 1).toString();
1832
+ const date = new Date(entry.analyzedAt).toLocaleString();
1833
+ const isLatest = i === history.length - 1;
1834
+ const label = isLatest ? chalk2.dim(" (latest)") : "";
1835
+ const catSummary = entry.categories.map((c) => `${c.name.substring(0, 3)}:${c.score}`).join(" ");
1836
+ const gradeColor = entry.overallScore >= 80 ? chalk2.green : entry.overallScore >= 60 ? chalk2.yellow : chalk2.red;
1837
+ console.log(
1838
+ ` ${chalk2.cyan(id.padEnd(5))}${date.padEnd(28)}${gradeColor(entry.overallGrade.padEnd(8))}${gradeColor(entry.overallScore.toString().padEnd(8))}${chalk2.dim(catSummary)}${label}`
1839
+ );
1840
+ }
1841
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1842
+ console.log(chalk2.dim(`
1843
+ Latest run: #${latestId}. Select an older run to compare against.
1844
+ `));
1845
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
1846
+ const ask = (q) => new Promise((resolve4) => rl.question(q, resolve4));
1847
+ const input = (await ask(chalk2.cyan(` Enter ID to compare (1-${latestId - 1}): `))).trim();
1848
+ rl.close();
1849
+ const selectedId = parseInt(input, 10);
1850
+ if (isNaN(selectedId) || selectedId < 1 || selectedId >= latestId) {
1851
+ console.error(chalk2.red(`
1852
+ \u274C Invalid ID. Must be between 1 and ${latestId - 1}.
1853
+ `));
1854
+ process.exit(1);
1855
+ }
1856
+ const older = history[selectedId - 1];
1857
+ console.log(`
1858
+ ${chalk2.bold(" Comparing Run #" + selectedId + " \u2192 Run #" + latestId + " (latest)")}
1859
+ `);
1860
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1861
+ console.log(` ${chalk2.bold("Category".padEnd(20))}${chalk2.bold("Before".padEnd(14))}${chalk2.bold("After".padEnd(14))}${chalk2.bold("Change")}`);
1862
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1863
+ const olderMap = new Map(older.categories.map((c) => [c.name, c]));
1864
+ const latestMap = new Map(latest.categories.map((c) => [c.name, c]));
1865
+ const allCats = [.../* @__PURE__ */ new Set([...olderMap.keys(), ...latestMap.keys()])];
1866
+ for (const cat of allCats) {
1867
+ const o = olderMap.get(cat);
1868
+ const l = latestMap.get(cat);
1869
+ const before = o ? `${o.grade} (${o.score})` : "\u2014";
1870
+ const after = l ? `${l.grade} (${l.score})` : "\u2014";
1871
+ const diff = (l?.score ?? 0) - (o?.score ?? 0);
1872
+ const arrow = diff > 0 ? chalk2.green(`+${diff}`) : diff < 0 ? chalk2.red(`${diff}`) : chalk2.dim(" 0");
1873
+ console.log(` ${cat.padEnd(20)}${before.padEnd(14)}${after.padEnd(14)}${arrow}`);
1874
+ }
1875
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1876
+ const overallDiff = latest.overallScore - older.overallScore;
1877
+ const overallArrow = overallDiff > 0 ? chalk2.green(`+${overallDiff}`) : overallDiff < 0 ? chalk2.red(`${overallDiff}`) : chalk2.dim(" 0");
1878
+ const overallBefore = `${older.overallGrade} (${older.overallScore})`;
1879
+ const overallAfter = `${latest.overallGrade} (${latest.overallScore})`;
1880
+ console.log(` ${chalk2.bold("OVERALL".padEnd(20))}${overallBefore.padEnd(14)}${overallAfter.padEnd(14)}${overallArrow}`);
1881
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n"));
1882
+ if (overallDiff > 0) {
1883
+ console.log(chalk2.green.bold(" \u{1F4C8} Great progress! Your codebase health improved.\n"));
1884
+ } else if (overallDiff < 0) {
1885
+ console.log(chalk2.red.bold(" \u{1F4C9} Score decreased. Check the category breakdown above.\n"));
1886
+ } else {
1887
+ console.log(chalk2.dim(" \u27A1\uFE0F No change in overall score.\n"));
1888
+ }
1889
+ });
1890
+ program.command("fix").description("Auto-fix detected issues in your project (creates missing files)").option("-o, --output <dir>", "RepoSentry output directory", ".reposentry").option("-m, --model <model>", "AI model to use").option("--all", "Fix all issues without prompting", false).option("--dry-run", "Show what would be fixed without making changes", false).action(async (options) => {
1891
+ const { resolve: resolvePath } = await import("path");
1892
+ const readline = await import("readline");
1893
+ const { scanForFixableIssues, CI_PROVIDERS, buildDeployGuidePrompt } = await import("./fix-engine-W5U2J3IJ.js");
1894
+ const { askCopilotWithWrite, isCopilotAvailable: isCopilotAvailable2, setCopilotModel: setCopilotModel2 } = await import("./copilot-OO6B2LU3.js");
1895
+ const { createProgress: createProgress2 } = await import("./progress-EQXHBUOU.js");
1896
+ const cwd = process.cwd();
1897
+ console.log(`
1898
+ ${chalk2.bold.cyan("\u{1F6E1}\uFE0F RepoSentry Fix")} ${chalk2.dim("\u2014 Auto-fix detected issues")}
1899
+ `);
1900
+ if (!isCopilotAvailable2()) {
1901
+ console.error(chalk2.red(" \u274C Copilot CLI not found. Install: npm i -g @github/copilot"));
1902
+ process.exit(1);
1903
+ }
1904
+ if (options.model) setCopilotModel2(options.model);
1905
+ console.log(chalk2.dim(" Scanning project for fixable issues...\n"));
1906
+ const { issues, context } = await scanForFixableIssues(cwd, ["node_modules", "dist", ".git"]);
1907
+ if (issues.length === 0) {
1908
+ console.log(chalk2.green(" \u2705 No fixable issues found! Your project looks great.\n"));
1909
+ return;
1910
+ }
1911
+ const priorityColors = {
1912
+ P0: chalk2.red.bold,
1913
+ P1: chalk2.yellow.bold,
1914
+ P2: chalk2.dim
1915
+ };
1916
+ const priorityLabels = {
1917
+ P0: "\u{1F534} Critical",
1918
+ P1: "\u{1F7E1} Important",
1919
+ P2: "\u{1F535} Nice-to-have"
1920
+ };
1921
+ console.log(chalk2.bold(` Found ${issues.length} fixable issue${issues.length > 1 ? "s" : ""}:
1922
+ `));
1923
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1924
+ console.log(` ${chalk2.bold("#".padEnd(4))}${chalk2.bold("Pri".padEnd(6))}${chalk2.bold("Category".padEnd(18))}${chalk2.bold("Issue")}`);
1925
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1926
+ for (let i = 0; i < issues.length; i++) {
1927
+ const issue = issues[i];
1928
+ const color = priorityColors[issue.priority] || chalk2.dim;
1929
+ const num = (i + 1).toString().padEnd(4);
1930
+ console.log(` ${chalk2.cyan(num)}${color(issue.priority.padEnd(6))}${issue.category.padEnd(18)}${issue.title}`);
1931
+ console.log(chalk2.dim(` ${issue.description}`));
1932
+ }
1933
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
1934
+ const p0Count = issues.filter((i) => i.priority === "P0").length;
1935
+ const p1Count = issues.filter((i) => i.priority === "P1").length;
1936
+ const p2Count = issues.filter((i) => i.priority === "P2").length;
1937
+ console.log(chalk2.dim(`
1938
+ ${priorityLabels.P0}: ${p0Count} ${priorityLabels.P1}: ${p1Count} ${priorityLabels.P2}: ${p2Count}
1939
+ `));
1940
+ if (options.dryRun) {
1941
+ console.log(chalk2.dim(" --dry-run: No changes made.\n"));
1942
+ return;
1943
+ }
1944
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
1945
+ const ask = (q) => new Promise((r) => rl.question(q, r));
1946
+ let mode = "step";
1947
+ if (options.all) {
1948
+ mode = "all";
1949
+ } else {
1950
+ console.log(chalk2.bold(" How would you like to proceed?\n"));
1951
+ console.log(" 1. \u{1F680} Fix all issues automatically");
1952
+ console.log(" 2. \u{1F504} Step-by-step (confirm each fix)");
1953
+ console.log(" 3. \u{1F3AF} Select specific issues to fix");
1954
+ console.log(" 0. \u274C Cancel\n");
1955
+ const choice = (await ask(chalk2.cyan(" Select mode (0-3): "))).trim();
1956
+ if (choice === "0") {
1957
+ console.log(chalk2.dim("\n Cancelled.\n"));
1958
+ rl.close();
1959
+ return;
1960
+ }
1961
+ mode = choice === "1" ? "all" : choice === "3" ? "select" : "step";
1962
+ }
1963
+ let toFix = [...issues];
1964
+ if (mode === "select") {
1965
+ const input = (await ask(chalk2.cyan(` Enter issue numbers to fix (e.g., 1,3,5): `))).trim();
1966
+ const nums = input.split(",").map((s) => parseInt(s.trim(), 10)).filter((n) => !isNaN(n) && n >= 1 && n <= issues.length);
1967
+ if (nums.length === 0) {
1968
+ console.log(chalk2.red("\n No valid issues selected.\n"));
1969
+ rl.close();
1970
+ return;
1971
+ }
1972
+ toFix = nums.map((n) => issues[n - 1]);
1973
+ }
1974
+ const ciIssue = toFix.find((i) => i.id === "ci-pipeline");
1975
+ if (ciIssue) {
1976
+ console.log(chalk2.bold("\n \u{1F527} CI/CD Provider Selection\n"));
1977
+ for (let i = 0; i < CI_PROVIDERS.length; i++) {
1978
+ const rec = i === 0 ? chalk2.dim(" (Recommended)") : "";
1979
+ console.log(` ${chalk2.cyan((i + 1).toString().padStart(2))}. ${CI_PROVIDERS[i].name}${rec}`);
1980
+ }
1981
+ const ciChoice = (await ask(chalk2.cyan(`
1982
+ Select CI provider (1-${CI_PROVIDERS.length}): `))).trim();
1983
+ const ciIdx = parseInt(ciChoice, 10) - 1;
1984
+ const provider = CI_PROVIDERS[ciIdx >= 0 && ciIdx < CI_PROVIDERS.length ? ciIdx : 0];
1985
+ context.ciProvider = provider.name;
1986
+ ciIssue.files = [provider.file];
1987
+ console.log(chalk2.green(` \u2713 Selected: ${provider.name}
1988
+ `));
1989
+ }
1990
+ rl.close();
1991
+ console.log(chalk2.bold(`
1992
+ Fixing ${toFix.length} issue${toFix.length > 1 ? "s" : ""}...
1993
+ `));
1994
+ console.log(chalk2.dim(" \u23F3 Each fix may take 30-60s \u2014 Copilot is generating project-aware files.\n"));
1995
+ const progress = createProgress2();
1996
+ progress.setTotalSteps(toFix.length);
1997
+ let fixed = 0;
1998
+ let failed = 0;
1999
+ const fixResults = [];
2000
+ for (const issue of toFix) {
2001
+ if (mode === "step") {
2002
+ const rl2 = readline.createInterface({ input: process.stdin, output: process.stdout });
2003
+ const answer = await new Promise((r) => rl2.question(
2004
+ chalk2.cyan(` Fix "${issue.title}"? (y/n/skip): `),
2005
+ r
2006
+ ));
2007
+ rl2.close();
2008
+ if (answer.trim().toLowerCase() !== "y") {
2009
+ progress.increment(`Skipped: ${issue.title}`);
2010
+ fixResults.push({ issue, success: false, output: "Skipped by user", filesCreated: [] });
2011
+ continue;
2012
+ }
2013
+ }
2014
+ progress.increment(issue.title);
2015
+ const prompt = issue.promptBuilder(context);
2016
+ console.log(chalk2.dim(`
2017
+ \u{1F916} Asking Copilot to fix: ${issue.title}`));
2018
+ console.log(chalk2.dim(` \u{1F4C1} Expected files: ${issue.files.length > 0 ? issue.files.join(", ") : "(Copilot decides)"}`));
2019
+ try {
2020
+ const { existsSync: checkExists } = await import("fs");
2021
+ const { join: joinPath } = await import("path");
2022
+ const result = await askCopilotWithWrite(prompt, { projectDir: cwd, timeoutMs: 24e4 });
2023
+ const isFailed = result.startsWith("[Fix failed");
2024
+ const filesCreated = [];
2025
+ for (const f of issue.files) {
2026
+ const fullPath = joinPath(cwd, f);
2027
+ if (checkExists(fullPath)) {
2028
+ filesCreated.push(f);
2029
+ }
2030
+ }
2031
+ const mentionedPaths = result.match(/(?:created|wrote|written|writing)\s+[`"']?([^\s`"']+\.\w+)/gi) || [];
2032
+ for (const match of mentionedPaths) {
2033
+ const pathMatch = match.match(/[`"']?([^\s`"']+\.\w+)/);
2034
+ if (pathMatch) {
2035
+ const mentioned = pathMatch[1];
2036
+ const fullPath = joinPath(cwd, mentioned);
2037
+ if (checkExists(fullPath) && !filesCreated.includes(mentioned)) {
2038
+ filesCreated.push(mentioned);
2039
+ }
2040
+ }
2041
+ }
2042
+ if (isFailed) {
2043
+ failed++;
2044
+ console.log(chalk2.red(` \u274C Failed: ${result.slice(0, 120)}`));
2045
+ fixResults.push({ issue, success: false, output: result.slice(0, 200), filesCreated });
2046
+ } else if (filesCreated.length > 0) {
2047
+ fixed++;
2048
+ for (const f of filesCreated) {
2049
+ console.log(chalk2.green(` \u2705 Created: ${f}`));
2050
+ }
2051
+ fixResults.push({ issue, success: true, output: result.slice(0, 200), filesCreated });
2052
+ } else if (issue.files.length === 0) {
2053
+ fixed++;
2054
+ console.log(chalk2.green(` \u2705 Copilot applied fix`));
2055
+ console.log(chalk2.dim(` \u{1F4DD} ${result.slice(0, 150)}`));
2056
+ fixResults.push({ issue, success: true, output: result.slice(0, 200), filesCreated });
2057
+ } else {
2058
+ failed++;
2059
+ console.log(chalk2.yellow(` \u26A0\uFE0F Copilot responded but files not found on disk`));
2060
+ console.log(chalk2.dim(` \u{1F4DD} ${result.slice(0, 200)}`));
2061
+ fixResults.push({ issue, success: false, output: "Files not created on disk", filesCreated });
2062
+ }
2063
+ } catch (err) {
2064
+ failed++;
2065
+ console.log(chalk2.red(` \u274C Error: ${err?.message?.slice(0, 120) || "Unknown error"}`));
2066
+ fixResults.push({ issue, success: false, output: err?.message?.slice(0, 200) || "Unknown error", filesCreated: [] });
2067
+ }
2068
+ }
2069
+ progress.succeed("All fixes");
2070
+ console.log(`
2071
+ ${chalk2.bold(" Fix Summary")}
2072
+ `);
2073
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
2074
+ for (const r of fixResults) {
2075
+ const icon = r.success ? chalk2.green("\u2705") : r.output === "Skipped by user" ? chalk2.dim("\u23ED\uFE0F") : chalk2.red("\u274C");
2076
+ const fileInfo = r.filesCreated.length > 0 ? chalk2.dim(` \u2192 ${r.filesCreated.join(", ")}`) : "";
2077
+ const failReason = !r.success && r.output !== "Skipped by user" ? chalk2.dim(` (${r.output.slice(0, 60)})`) : "";
2078
+ console.log(` ${icon} ${r.issue.title}${fileInfo}${failReason}`);
2079
+ }
2080
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
2081
+ console.log(` ${chalk2.green.bold(`${fixed} fixed`)} | ${chalk2.red(`${failed} failed`)} | ${chalk2.dim(`${toFix.length - fixed - failed} skipped`)}
2082
+ `);
2083
+ if (ciIssue && fixResults.find((r) => r.issue.id === "ci-pipeline" && r.success)) {
2084
+ const rl3 = readline.createInterface({ input: process.stdin, output: process.stdout });
2085
+ const wantDeploy = await new Promise(
2086
+ (r) => rl3.question(chalk2.cyan(" Generate production deployment guide? (y/n): "), r)
2087
+ );
2088
+ rl3.close();
2089
+ if (wantDeploy.trim().toLowerCase() === "y") {
2090
+ console.log(chalk2.dim("\n Generating take-it-to-prod.md...\n"));
2091
+ const deployPrompt = buildDeployGuidePrompt(context);
2092
+ await askCopilotWithWrite(deployPrompt, { projectDir: cwd, timeoutMs: 24e4 });
2093
+ console.log(chalk2.green(" \u2705 take-it-to-prod.md created!\n"));
2094
+ }
2095
+ }
2096
+ if (ciIssue && context.ciProvider) {
2097
+ console.log(chalk2.bold.yellow(" \u26A0\uFE0F CI/CD Setup Reminders:\n"));
2098
+ const envHints = {
2099
+ "GitHub Actions": [
2100
+ "Go to Settings \u2192 Secrets and variables \u2192 Actions",
2101
+ "Add required secrets (API keys, deploy tokens, etc.)",
2102
+ "Review the generated workflow file before pushing"
2103
+ ],
2104
+ "GitLab CI": [
2105
+ "Go to Settings \u2192 CI/CD \u2192 Variables",
2106
+ "Add required CI/CD variables",
2107
+ "Review .gitlab-ci.yml before committing"
2108
+ ],
2109
+ "CircleCI": [
2110
+ "Go to Project Settings \u2192 Environment Variables",
2111
+ "Add required environment variables",
2112
+ "Review .circleci/config.yml before pushing"
2113
+ ],
2114
+ "Jenkins": [
2115
+ "Configure credentials in Jenkins \u2192 Manage Credentials",
2116
+ "Set up the pipeline in Jenkins dashboard",
2117
+ "Review Jenkinsfile before committing"
2118
+ ],
2119
+ "Travis CI": [
2120
+ "Go to Travis CI settings for this repo",
2121
+ "Add required environment variables",
2122
+ "Review .travis.yml before pushing"
2123
+ ],
2124
+ "Azure Pipelines": [
2125
+ "Go to Azure DevOps \u2192 Pipelines \u2192 Library",
2126
+ "Add required variable groups and secrets",
2127
+ "Review azure-pipelines.yml before committing"
2128
+ ]
2129
+ };
2130
+ const hints = envHints[context.ciProvider] || envHints["GitHub Actions"];
2131
+ for (const hint of hints) {
2132
+ console.log(chalk2.dim(` \u2192 ${hint}`));
2133
+ }
2134
+ console.log("");
2135
+ }
2136
+ if (fixed > 0) {
2137
+ console.log(chalk2.green.bold(" \u{1F4C8} Run `reposentry analyze` again to see your improved scores!\n"));
2138
+ }
2139
+ });
2140
+ program.command("init").description("Interactive setup \u2014 choose what to generate").action(async () => {
2141
+ await runInteractiveMode();
2142
+ });
2143
+ program.action(async () => {
2144
+ await runInteractiveMode();
2145
+ });
2146
+ return program;
2147
+ }
2148
+ async function runInteractiveMode() {
2149
+ const readline = await import("readline");
2150
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
2151
+ const ask = (q) => new Promise((resolve3) => rl.question(q, resolve3));
2152
+ console.log(`
2153
+ ${chalk2.bold.cyan("\u{1F6E1}\uFE0F RepoSentry")} ${chalk2.dim("\u2014 AI-Powered Codebase Intelligence")}
2154
+ `);
2155
+ console.log(chalk2.bold(" What would you like to do?\n"));
2156
+ console.log(" 1. \u{1F680} Full Analysis Run all engines");
2157
+ console.log(" 2. \u{1F4DD} Documentation README, API, Setup, Changelog");
2158
+ console.log(" 3. \u{1F3D7}\uFE0F Architecture Mermaid diagrams + docs");
2159
+ console.log(" 4. \u{1F512} Security Audit Vulnerability scan + threat model");
2160
+ console.log(" 5. \u{1F527} CI/CD & Infrastructure Pipelines, Docker, deploy guide");
2161
+ console.log(" 6. \u{1F9EA} API Testing Test docs + Postman collection");
2162
+ console.log(" 7. \u26A1 Performance Anti-pattern detection");
2163
+ console.log(" 8. \u{1F91D} Team & Collaboration Templates + onboarding");
2164
+ console.log(" 9. \u2764\uFE0F Health Report Score + grade badge");
2165
+ console.log(" 10. \u{1F9E0} Change AI Model Select model for analysis");
2166
+ console.log(" 11. \u{1F4C1} Preview Reports Start preview server");
2167
+ console.log(" 12. \u{1F528} Fix Issues Auto-fix detected project issues");
2168
+ console.log(" 0. \u274C Exit\n");
2169
+ const choice = (await ask(chalk2.cyan(" Select option (0-12): "))).trim();
2170
+ const engineMap = {
2171
+ "1": {},
2172
+ "2": { docs: true },
2173
+ "3": { architecture: true },
2174
+ "4": { security: true },
2175
+ "5": { ci: true },
2176
+ "6": { apiTests: true },
2177
+ "7": { performance: true },
2178
+ "8": { team: true },
2179
+ "9": { health: true }
2180
+ };
2181
+ if (choice === "0") {
2182
+ console.log(chalk2.dim("\n Goodbye! \u{1F44B}\n"));
2183
+ rl.close();
2184
+ return;
2185
+ }
2186
+ if (choice === "10") {
2187
+ const { getAvailableModels, isCopilotAvailable: isCopilotAvailable2 } = await import("./copilot-OO6B2LU3.js");
2188
+ if (!isCopilotAvailable2()) {
2189
+ console.log(chalk2.red("\n \u274C No Copilot CLI detected. Install via: npm i -g @github/copilot\n"));
2190
+ rl.close();
2191
+ return;
2192
+ }
2193
+ console.log(chalk2.bold("\n \u{1F9E0} Fetching available models from Copilot CLI...\n"));
2194
+ const models = getAvailableModels();
2195
+ if (models.length === 0) {
2196
+ console.log(chalk2.yellow(" Could not fetch models. Enter a model name manually."));
2197
+ const selectedModel2 = (await ask(chalk2.cyan("\n Enter model name (or blank for default): "))).trim();
2198
+ if (selectedModel2) {
2199
+ console.log(chalk2.green(`
2200
+ \u2713 Model set to: ${selectedModel2}`));
2201
+ console.log(chalk2.dim(` Run: reposentry analyze --model ${selectedModel2}
2202
+ `));
2203
+ } else {
2204
+ console.log(chalk2.dim("\n Using backend default model.\n"));
2205
+ }
2206
+ rl.close();
2207
+ return;
2208
+ }
2209
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
2210
+ for (let i = 0; i < models.length; i++) {
2211
+ const num = (i + 1).toString().padStart(2, " ");
2212
+ const current = models[i] === "claude-haiku-4.5" ? chalk2.dim(" (current default)") : "";
2213
+ console.log(` ${chalk2.cyan(num)}. ${models[i]}${current}`);
2214
+ }
2215
+ console.log(chalk2.dim(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
2216
+ console.log(chalk2.dim(`
2217
+ ${models.length} models available.`));
2218
+ const input = (await ask(chalk2.cyan(`
2219
+ Select model (1-${models.length}) or name: `))).trim();
2220
+ const idx = parseInt(input, 10);
2221
+ let selectedModel;
2222
+ if (!isNaN(idx) && idx >= 1 && idx <= models.length) {
2223
+ selectedModel = models[idx - 1];
2224
+ } else if (input) {
2225
+ selectedModel = input;
2226
+ } else {
2227
+ console.log(chalk2.dim("\n Using backend default model.\n"));
2228
+ rl.close();
2229
+ return;
2230
+ }
2231
+ console.log(chalk2.green(`
2232
+ \u2713 Model set to: ${selectedModel}`));
2233
+ console.log(chalk2.dim(` Run: reposentry analyze --model ${selectedModel}
2234
+ `));
2235
+ rl.close();
2236
+ return;
2237
+ }
2238
+ if (choice === "11") {
2239
+ rl.close();
2240
+ const { startServer } = await import("./server-UJNM6VUZ.js");
2241
+ const config2 = await loadConfig();
2242
+ await startServer({ port: 3e3, outputDir: config2.output });
2243
+ return;
2244
+ }
2245
+ if (choice === "12") {
2246
+ rl.close();
2247
+ const { createCLI: createCLI2 } = await Promise.resolve().then(() => require_cli());
2248
+ const fixProgram = createCLI2();
2249
+ await fixProgram.parseAsync(["node", "reposentry", "fix"]);
2250
+ return;
2251
+ }
2252
+ const engineOpts = engineMap[choice];
2253
+ if (!engineOpts) {
2254
+ console.log(chalk2.red("\n Invalid selection.\n"));
2255
+ rl.close();
2256
+ return;
2257
+ }
2258
+ rl.close();
2259
+ const config = await loadConfig();
2260
+ const opts = {
2261
+ output: config.output,
2262
+ format: config.format,
2263
+ depth: config.depth,
2264
+ ignore: config.ignore,
2265
+ force: false,
2266
+ verbose: false,
2267
+ // Choice #1 is full analysis: omit engine flags so orchestrator runs everything
2268
+ ...choice === "1" ? {} : engineOpts
2269
+ };
2270
+ await runAnalysis(opts);
2271
+ }
2272
+
2273
+ export {
2274
+ createCLI
2275
+ };
2276
+ //# sourceMappingURL=chunk-I2CN2HU4.js.map