codebrief 1.1.8 → 1.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/package.json +1 -1
  2. package/src/ai.js +224 -68
  3. package/src/index.js +5 -59
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codebrief",
3
- "version": "1.1.8",
3
+ "version": "1.1.10",
4
4
  "description": "Generate AI context files for your project in seconds",
5
5
  "main": "src/index.js",
6
6
  "bin": {
package/src/ai.js CHANGED
@@ -11,31 +11,57 @@ const IMPORTANT_PATTERNS = [
11
11
  "package.json",
12
12
  "tsconfig.json",
13
13
  "next.config.*",
14
+ "nuxt.config.*",
14
15
  "vite.config.*",
16
+ "webpack.config.*",
15
17
  "tailwind.config.*",
18
+ "postcss.config.*",
16
19
  "prisma/schema.prisma",
17
20
  "drizzle.config.*",
18
- "README.md",
21
+ ".env.example",
22
+ "docker-compose.*",
23
+ "Dockerfile",
19
24
  "src/index.*",
20
25
  "src/main.*",
21
26
  "src/app.*",
22
27
  "src/server.*",
28
+ "src/config.*",
29
+ "src/routes.*",
30
+ "src/middleware.*",
23
31
  "lib/db.*",
24
32
  "lib/auth.*",
33
+ "lib/utils.*",
25
34
  "app/layout.*",
26
35
  "app/page.*",
36
+ "app/api/**/route.*",
37
+ "pages/_app.*",
38
+ "pages/index.*",
39
+ "server/index.*",
40
+ "server/api/**",
41
+ "controllers/*",
42
+ "models/*",
43
+ "services/*",
27
44
  ];
28
45
 
29
- function sampleSourceFiles(rootDir, fileTree, charBudget = 18000) {
46
+ function sampleSourceFiles(rootDir, fileTree, charBudget = 32000) {
30
47
  const samples = [];
31
48
  let budget = charBudget;
32
49
 
33
- // First pass: prioritised files
50
+ // Smart read: for large files, take head + tail to capture imports AND exports/env vars
51
+ function smartRead(filePath, maxChars) {
52
+ const raw = fs.readFileSync(filePath, "utf-8");
53
+ if (raw.length <= maxChars) return raw;
54
+ const head = Math.floor(maxChars * 0.6);
55
+ const tail = maxChars - head - 20; // 20 for separator
56
+ return raw.slice(0, head) + "\n// ... (truncated) ...\n" + raw.slice(-tail);
57
+ }
58
+
59
+ // First pass: prioritised files (generous budget — these are the most important)
34
60
  for (const pattern of IMPORTANT_PATTERNS) {
35
61
  const full = path.join(rootDir, pattern);
36
62
  if (fs.existsSync(full)) {
37
63
  try {
38
- const content = fs.readFileSync(full, "utf-8").slice(0, 3000);
64
+ const content = smartRead(full, 6000);
39
65
  samples.push({ file: pattern, content });
40
66
  budget -= content.length;
41
67
  if (budget <= 0) break;
@@ -68,18 +94,17 @@ function sampleSourceFiles(rootDir, fileTree, charBudget = 18000) {
68
94
  const ext = path.extname(entry.name).toLowerCase();
69
95
  if (!sourceExts.has(ext)) continue;
70
96
 
71
- const full = path.join(rootDir, entry.relativePath || entry.name);
97
+ const full = path.join(rootDir, entry.path || entry.name);
72
98
  // Skip already-read files
73
99
  if (samples.some((s) => full.endsWith(s.file))) continue;
74
100
 
75
101
  try {
76
- const raw = fs.readFileSync(full, "utf-8");
77
- const snippet = raw.slice(0, 1500);
102
+ const content = smartRead(full, 4000);
78
103
  samples.push({
79
- file: entry.relativePath || entry.name,
80
- content: snippet,
104
+ file: entry.path || entry.name,
105
+ content,
81
106
  });
82
- budget -= snippet.length;
107
+ budget -= content.length;
83
108
  } catch {
84
109
  /* skip */
85
110
  }
@@ -88,14 +113,80 @@ function sampleSourceFiles(rootDir, fileTree, charBudget = 18000) {
88
113
  return samples;
89
114
  }
90
115
 
116
+ // ── Env var extractor ────────────────────────────────────────
117
+ // Scans all source files for process.env.XXX references
118
+ function extractEnvVars(rootDir, fileTree) {
119
+ const envVars = new Map(); // name → [files]
120
+ const sourceExts = new Set([".js", ".ts", ".jsx", ".tsx", ".vue", ".svelte", ".py", ".go", ".rs", ".rb", ".php"]);
121
+ const envRegex = /process\.env\.([A-Z_][A-Z0-9_]*)/g;
122
+ const dotenvRegex = /^([A-Z_][A-Z0-9_]*)=/gm;
123
+
124
+ for (const entry of fileTree) {
125
+ if (entry.type !== "file") continue;
126
+ const ext = path.extname(entry.name).toLowerCase();
127
+ const name = entry.name.toLowerCase();
128
+ if (!sourceExts.has(ext) && !name.startsWith(".env")) continue;
129
+
130
+ const full = path.join(rootDir, entry.path || entry.name);
131
+ try {
132
+ const raw = fs.readFileSync(full, "utf-8");
133
+ const regex = name.startsWith(".env") ? dotenvRegex : envRegex;
134
+ let match;
135
+ while ((match = regex.exec(raw)) !== null) {
136
+ const varName = match[1];
137
+ // Skip generic placeholder names
138
+ if (varName === "XXX" || varName.length < 3) continue;
139
+ if (!envVars.has(varName)) envVars.set(varName, []);
140
+ const file = entry.path || entry.name;
141
+ if (!envVars.get(varName).includes(file)) envVars.get(varName).push(file);
142
+ }
143
+ } catch { /* skip */ }
144
+ }
145
+
146
+ return envVars;
147
+ }
148
+
149
+ // ── Export extractor ─────────────────────────────────────────
150
+ // Scans all source files for key function/class exports
151
+ function extractExports(rootDir, fileTree) {
152
+ const exports = [];
153
+ const sourceExts = new Set([".js", ".ts", ".jsx", ".tsx"]);
154
+ const patterns = [
155
+ /(?:module\.exports\s*=\s*\{([^}]+)\})/g,
156
+ /(?:exports\.(\w+)\s*=)/g,
157
+ /(?:export\s+(?:default\s+)?(?:function|class|const|let|var)\s+(\w+))/g,
158
+ ];
159
+
160
+ for (const entry of fileTree) {
161
+ if (entry.type !== "file") continue;
162
+ const ext = path.extname(entry.name).toLowerCase();
163
+ if (!sourceExts.has(ext)) continue;
164
+
165
+ const full = path.join(rootDir, entry.path || entry.name);
166
+ try {
167
+ const raw = fs.readFileSync(full, "utf-8");
168
+ const file = entry.path || entry.name;
169
+ for (const regex of patterns) {
170
+ let match;
171
+ regex.lastIndex = 0;
172
+ while ((match = regex.exec(raw)) !== null) {
173
+ exports.push({ file, exports: match[1] || match[0] });
174
+ }
175
+ }
176
+ } catch { /* skip */ }
177
+ }
178
+
179
+ return exports;
180
+ }
181
+
91
182
  // ── Prompt builder ───────────────────────────────────────────
92
- function buildPrompt(analysis, fileTree, fileSamples) {
183
+ function buildPrompt(analysis, fileTree, fileSamples, envVars, fileExports) {
93
184
  const fileList = fileTree
94
- .slice(0, 80)
185
+ .slice(0, 120)
95
186
  .map((e) =>
96
187
  e.type === "dir"
97
- ? ` ${e.relativePath || e.name}/`
98
- : ` ${e.relativePath || e.name}`,
188
+ ? ` ${e.path || e.name}/`
189
+ : ` ${e.path || e.name}`,
99
190
  )
100
191
  .join("\n");
101
192
 
@@ -108,77 +199,112 @@ function buildPrompt(analysis, fileTree, fileSamples) {
108
199
  .map(([k, v]) => ` ${k}: ${v}`)
109
200
  .join("\n") || " (none)";
110
201
 
111
- return `You are a senior software architect. A developer ran \`codebrief\` on their project and you must write a comprehensive, deeply detailed CONTEXT.md file that will help AI assistants (like GitHub Copilot or Cursor) understand this project perfectly.
202
+ const envVarsText = envVars.size > 0
203
+ ? Array.from(envVars.entries())
204
+ .map(([name, files]) => `- \`${name}\` — used in ${files.map((f) => "`" + f + "`").join(", ")}`)
205
+ .join("\n")
206
+ : "(none found)";
207
+
208
+ const exportsText = fileExports.length > 0
209
+ ? fileExports.slice(0, 30).map((e) => `- \`${e.file}\`: ${e.exports}`).join("\n")
210
+ : "(none found)";
211
+
212
+ const systemMessage = `You are a world-class software architect. You read source code and produce extremely precise, file-grounded documentation. You NEVER write generic advice. Every sentence you write must cite a real file path, function name, or pattern visible in the code you are given. If you cannot ground a claim in the actual source, you omit it entirely.`;
112
213
 
113
- ## Auto-detected project info
214
+ const userMessage = `I ran \`codebrief\` and need you to write a CONTEXT.md that lets an AI code assistant (Cursor, Copilot) understand this project so well it can write production code immediately.
215
+
216
+ ---
217
+ ## HARD RULES (violating these = failure)
218
+
219
+ 1. **File-path grounding**: Every bullet in Architecture Notes, Rules for AI, and Never Do MUST reference at least one real file path or function/export name from the code samples. No exceptions.
220
+ 2. **No negatives**: NEVER write "X is not used", "the project does not have Y", "no database detected". If something doesn't exist, simply don't mention it.
221
+ 3. **No generic advice**: NEVER write vague statements like "follow best practices", "maintain code quality", "adhere to coding standards", "ensure security". These are worthless.
222
+ 4. **Omit, don't guess**: If you can't infer something from the actual code samples, omit that section/bullet entirely. Empty sections should be removed.
223
+ 5. **Specific > exhaustive**: 5 deeply specific bullets beat 15 vague ones.
224
+
225
+ ## BAD (never write like this)
226
+ - "Authentication and session logic are not explicitly handled within the project"
227
+ - "Adhere to the project's coding standards and best practices"
228
+ - "Regular security audits are essential"
229
+ - "Error handling mechanisms are crucial for a robust application"
230
+ - "The project follows a modular structure, enhancing maintainability"
231
+
232
+ ## GOOD (write like this)
233
+ - "CLI entry point is \`src/index.js:main()\` — parses flags via \`hasFlag()\`/\`getFlagValue()\`, calls \`scanDirectory()\` → \`analyzeProject()\` → \`generateContextFile()\` in sequence"
234
+ - "AI enhancement in \`src/ai.js:enhanceWithAI()\` samples up to 32k chars of source via \`sampleSourceFiles()\`, builds a structured prompt, dispatches to the selected provider (Groq/OpenAI/Anthropic/Gemini/Grok/Ollama)"
235
+ - "Never add npm dependencies — this project uses zero deps (native \`https\`, \`fs\`, \`path\` only). See \`package.json\` dependencies field is empty."
236
+ - "All color output uses the \`c\` object from \`src/index.js\` (ANSI escape codes) — never use chalk or other color libraries"
237
+
238
+ ---
239
+ ## Project metadata
114
240
  - Name: ${analysis.name}
115
- - Type / Framework: ${analysis.type}
241
+ - Framework / Type: ${analysis.type}
116
242
  - Language: ${analysis.language}
117
243
  - Package manager: ${analysis.packageManager}
118
244
  - Stack: ${analysis.stack.join(", ") || "unknown"}
119
- - CSS framework: ${analysis.cssFramework || "none detected"}
120
- - UI library: ${analysis.uiLibrary || "none detected"}
121
- - State management: ${analysis.stateManagement || "none detected"}
122
- - Database: ${analysis.database || "none detected"}
123
- - Test framework: ${analysis.testFramework || "none detected"}
124
- - Deployment: ${analysis.deployment || "unknown"}
245
+ - CSS: ${analysis.cssFramework || "none"} · UI: ${analysis.uiLibrary || "none"} · State: ${analysis.stateManagement || "none"}
246
+ - DB: ${analysis.database || "none"} · Tests: ${analysis.testFramework || "none"} · Deploy: ${analysis.deployment || "unknown"}
125
247
  - Monorepo: ${analysis.isMonorepo}
126
248
 
127
249
  ## Scripts
128
250
  ${scripts}
129
251
 
130
- ## File tree (first 80 entries)
252
+ ## File tree
131
253
  ${fileList}
132
254
 
133
- ## Key source file samples
255
+ ## Source code samples (READ CAREFULLY — this is your evidence)
134
256
  ${samplesText}
135
257
 
258
+ ## Environment variables found in code
259
+ ${envVarsText}
260
+
261
+ ## Module exports detected
262
+ ${exportsText}
263
+
136
264
  ---
137
265
 
138
- Now produce a CONTEXT.md file in this exact Markdown structure. Be as specific and detailed as possible infer architecture from the actual code, don't be generic:
266
+ Now produce the CONTEXT.md in EXACTLY this structure. Remove any section where you have nothing concrete to say. Keep the emoji in every section header EXACTLY as shown.
139
267
 
140
268
  # Project Context: ${analysis.name}
141
- > Auto-generated and AI-enhanced by **codebrief**
142
- > Generated: ${new Date().toISOString().split("T")[0]}
269
+ > AI-enhanced by **codebrief** · ${new Date().toISOString().split("T")[0]}
143
270
 
144
271
  ---
145
272
 
146
273
  ## 🧱 Tech Stack
147
- (bullet list of every detected technology with brief description of its role in THIS project)
274
+ Bullet list. Each bullet: technology name + its specific role citing where it's used.
275
+ Example: "Node.js — runtime; entry point at \`src/index.js\`, all code is CommonJS with \`require()\`"
148
276
 
149
- ## 📁 Folder Conventions
150
- (bullet list explaining what each main folder/directory is responsible for, inferred from the file tree and source)
277
+ ## 🚀 Key Files
278
+ The 5–8 most important files to read first. Exact paths. One sentence each explaining what the file does and its key exports/functions.
151
279
 
152
- ## 🔧 Available Scripts
153
- (bullet list of scripts and what they actually do in context)
280
+ ## 📁 Folder Structure
281
+ One bullet per top-level directory, explaining its responsibility based on the actual files inside.
154
282
 
155
- ## 🗂️ Project Structure
156
- \`\`\`
157
- (the file tree, neatly formatted)
158
- \`\`\`
283
+ ## 🔧 Scripts
284
+ One bullet per script. Say what it actually does, not just its command.
159
285
 
160
286
  ## 🏗️ Architecture Notes
161
- (this is the most valuable section — write 8-15 detailed bullet points explaining:
162
- - how data flows through the app
163
- - where auth/session logic lives
164
- - how the database/ORM is used
165
- - how routing works
166
- - key patterns or abstractions in the codebase
167
- - anything a new developer MUST know before coding)
287
+ 815 bullets. Each MUST:
288
+ - Name specific file(s), function(s), or export(s)
289
+ - Describe a concrete data flow, dependency, or design decision
290
+ - Be something useful for an AI about to write code in this project
168
291
 
169
292
  ## 🤖 Rules for AI
170
- (bullet list of 8-12 concrete rules the AI assistant must follow when generating code for THIS specific project — based on the actual patterns observed in the source)
293
+ 812 rules extracted from the actual code patterns. Format:
294
+ - "Always/Never [specific action] — [file or pattern reference]"
171
295
 
172
296
  ## 🚫 Never Do
173
- (bullet list of 6-10 things an AI must NEVER do in this project inferred from the stack and code style)
297
+ 610 prohibitions grounded in the codebase. Each must cite WHY (a file, pattern, or convention).
174
298
 
175
- ## 🔐 Security & Environment
176
- (notes about environment variables, secrets, auth patterns seen in the code)
299
+ ## 🔐 Environment & Secrets
300
+ List actual env var names found in the code (e.g. \`GROQ_API_KEY\`, \`OPENAI_API_KEY\`). Describe how they're loaded and used. If none found, omit this section.
177
301
 
178
302
  ---
179
- *Re-run \`codebrief\` after major refactors to keep this file current.*
303
+ *Re-run \`codebrief --ai\` after major refactors to keep this file current.*
180
304
 
181
- Respond with ONLY the Markdown content. No explanations, no code fences around the whole output.`;
305
+ Respond with ONLY the Markdown. No preamble, no wrapping code fences.`;
306
+
307
+ return { systemMessage, userMessage };
182
308
  }
183
309
 
184
310
  // ── HTTP helper (native, no deps) ────────────────────────────
@@ -224,6 +350,10 @@ async function callGroq(prompt, model) {
224
350
  " Get a free key in ~30s at https://console.groq.com",
225
351
  );
226
352
 
353
+ const messages = typeof prompt === "string"
354
+ ? [{ role: "user", content: prompt }]
355
+ : [{ role: "system", content: prompt.systemMessage }, { role: "user", content: prompt.userMessage }];
356
+
227
357
  const res = await httpsPost(
228
358
  "api.groq.com",
229
359
  "/openai/v1/chat/completions",
@@ -233,9 +363,9 @@ async function callGroq(prompt, model) {
233
363
  },
234
364
  {
235
365
  model,
236
- messages: [{ role: "user", content: prompt }],
237
- temperature: 0.3,
238
- max_tokens: 4096,
366
+ messages,
367
+ temperature: 0.2,
368
+ max_tokens: 8192,
239
369
  },
240
370
  );
241
371
  return res.choices?.[0]?.message?.content || "";
@@ -247,6 +377,10 @@ async function callOpenAI(prompt, model) {
247
377
  if (!apiKey)
248
378
  throw new Error("OPENAI_API_KEY environment variable is not set.");
249
379
 
380
+ const messages = typeof prompt === "string"
381
+ ? [{ role: "user", content: prompt }]
382
+ : [{ role: "system", content: prompt.systemMessage }, { role: "user", content: prompt.userMessage }];
383
+
250
384
  const res = await httpsPost(
251
385
  "api.openai.com",
252
386
  "/v1/chat/completions",
@@ -256,9 +390,9 @@ async function callOpenAI(prompt, model) {
256
390
  },
257
391
  {
258
392
  model,
259
- messages: [{ role: "user", content: prompt }],
260
- temperature: 0.3,
261
- max_tokens: 4096,
393
+ messages,
394
+ temperature: 0.2,
395
+ max_tokens: 8192,
262
396
  },
263
397
  );
264
398
  return res.choices?.[0]?.message?.content || "";
@@ -270,6 +404,19 @@ async function callAnthropic(prompt, model) {
270
404
  if (!apiKey)
271
405
  throw new Error("ANTHROPIC_API_KEY environment variable is not set.");
272
406
 
407
+ const messages = typeof prompt === "string"
408
+ ? [{ role: "user", content: prompt }]
409
+ : [{ role: "user", content: prompt.userMessage }];
410
+
411
+ const system = typeof prompt === "string" ? undefined : prompt.systemMessage;
412
+
413
+ const body = {
414
+ model,
415
+ max_tokens: 8192,
416
+ messages,
417
+ };
418
+ if (system) body.system = system;
419
+
273
420
  const res = await httpsPost(
274
421
  "api.anthropic.com",
275
422
  "/v1/messages",
@@ -278,11 +425,7 @@ async function callAnthropic(prompt, model) {
278
425
  "x-api-key": apiKey,
279
426
  "anthropic-version": "2023-06-01",
280
427
  },
281
- {
282
- model,
283
- max_tokens: 4096,
284
- messages: [{ role: "user", content: prompt }],
285
- },
428
+ body,
286
429
  );
287
430
  return res.content?.[0]?.text || "";
288
431
  }
@@ -296,13 +439,17 @@ async function callGemini(prompt, model) {
296
439
  " Get a free key at https://aistudio.google.com/app/apikey",
297
440
  );
298
441
 
442
+ const fullText = typeof prompt === "string"
443
+ ? prompt
444
+ : `${prompt.systemMessage}\n\n${prompt.userMessage}`;
445
+
299
446
  const res = await httpsPost(
300
447
  "generativelanguage.googleapis.com",
301
448
  `/v1beta/models/${model}:generateContent?key=${apiKey}`,
302
449
  { "Content-Type": "application/json" },
303
450
  {
304
- contents: [{ parts: [{ text: prompt }] }],
305
- generationConfig: { temperature: 0.3, maxOutputTokens: 4096 },
451
+ contents: [{ parts: [{ text: fullText }] }],
452
+ generationConfig: { temperature: 0.2, maxOutputTokens: 8192 },
306
453
  },
307
454
  );
308
455
  return res.candidates?.[0]?.content?.parts?.[0]?.text || "";
@@ -317,6 +464,10 @@ async function callGrok(prompt, model) {
317
464
  " Get a key at https://console.x.ai",
318
465
  );
319
466
 
467
+ const messages = typeof prompt === "string"
468
+ ? [{ role: "user", content: prompt }]
469
+ : [{ role: "system", content: prompt.systemMessage }, { role: "user", content: prompt.userMessage }];
470
+
320
471
  const res = await httpsPost(
321
472
  "api.x.ai",
322
473
  "/v1/chat/completions",
@@ -326,9 +477,9 @@ async function callGrok(prompt, model) {
326
477
  },
327
478
  {
328
479
  model,
329
- messages: [{ role: "user", content: prompt }],
330
- temperature: 0.3,
331
- max_tokens: 4096,
480
+ messages,
481
+ temperature: 0.2,
482
+ max_tokens: 8192,
332
483
  },
333
484
  );
334
485
  return res.choices?.[0]?.message?.content || "";
@@ -338,7 +489,10 @@ async function callOllama(prompt, model) {
338
489
  model = model || getDefaultModel("ollama");
339
490
  // Ollama runs locally on port 11434 — use http
340
491
  const http = require("http");
341
- const body = JSON.stringify({ model, prompt, stream: false });
492
+ const fullText = typeof prompt === "string"
493
+ ? prompt
494
+ : `${prompt.systemMessage}\n\n${prompt.userMessage}`;
495
+ const body = JSON.stringify({ model, prompt: fullText, stream: false });
342
496
 
343
497
  return new Promise((resolve, reject) => {
344
498
  const req = http.request(
@@ -387,7 +541,9 @@ async function enhanceWithAI(analysis, fileTree, rootDir, options = {}) {
387
541
  const { provider = "openai", model } = options;
388
542
 
389
543
  const fileSamples = sampleSourceFiles(rootDir, fileTree);
390
- const prompt = buildPrompt(analysis, fileTree, fileSamples);
544
+ const envVars = extractEnvVars(rootDir, fileTree);
545
+ const fileExports = extractExports(rootDir, fileTree);
546
+ const prompt = buildPrompt(analysis, fileTree, fileSamples, envVars, fileExports);
391
547
 
392
548
  switch (provider.toLowerCase()) {
393
549
  case "groq":
package/src/index.js CHANGED
@@ -278,19 +278,16 @@ async function runInitPrompts(contextPath) {
278
278
  async function main() {
279
279
  print("");
280
280
  print(bold(cyan("⚡ codebrief") + " — AI Context Generator"));
281
- print(dim(" Scanning: " + rootDir));
282
281
  print("");
283
282
 
284
283
  // Step 1: Scan
285
- startSpinner("Scanning project files...");
284
+ startSpinner("Scanning project...");
286
285
  const fileTree = scanDirectory(rootDir, maxDepth);
287
- stopSpinner(`Found ${fileTree.length} files and folders`);
288
286
 
289
287
  // Step 2: Analyze
290
- startSpinner("Analyzing stack and conventions...");
291
288
  const analysis = analyzeProject(rootDir);
292
289
  stopSpinner(
293
- `Detected: ${analysis.stack.join(", ") || "Unknown project type"}`,
290
+ `${fileTree.length} files · ${analysis.stack.join(", ") || "unknown stack"}`,
294
291
  );
295
292
 
296
293
  // Step 3: Generate files
@@ -432,63 +429,12 @@ async function main() {
432
429
 
433
430
  // ── Summary ───────────────────────────────────────────────
434
431
  print("");
435
- print(bold(" 📄 Files Created:"));
436
- filesCreated.forEach((f) => {
437
- print(` ${green("✅")} ${bold(f.label)}`);
438
- });
439
-
440
- print("");
441
- print(bold(" 🧱 Detected Stack:"));
442
- if (analysis.stack.length > 0) {
443
- analysis.stack.forEach((s) => print(` ${cyan("→")} ${s}`));
444
- } else {
445
- print(
446
- ` ${c.yellow}Could not auto-detect stack — check CONTEXT.md${c.reset}`,
447
- );
448
- }
449
-
450
- if (analysis.conventions.length > 0) {
451
- print("");
452
- print(bold(" 📁 Detected Conventions:"));
453
- analysis.conventions.forEach((s) => print(` ${cyan("→")} ${s}`));
454
- }
432
+ filesCreated.forEach((f) => print(` ${green("✔")} ${bold(f.label)}`));
455
433
 
456
- if (analysis.isMonorepo && analysis.packages.length > 0) {
434
+ if (!aiMode) {
457
435
  print("");
458
- print(bold(" 📦 Monorepo Packages:"));
459
- analysis.packages.forEach((p) =>
460
- print(` ${cyan("→")} ${bold(p.name)} ${dim("(" + p.path + ")")}`),
461
- );
462
- }
463
-
464
- print("");
465
- print(bold(" ✏️ Next Steps:"));
466
- if (!initMode) {
467
- print(
468
- ` 1. Open ${cyan("CONTEXT.md")} and fill in the ${bold("Architecture Notes")} section`,
469
- );
470
- print(` 2. Add your own rules to the ${bold("Never Do")} section`);
436
+ print(dim(` Tip: use ${cyan("--ai")} for an AI-enhanced CONTEXT.md.`));
471
437
  }
472
- if (!skipCursor) {
473
- print(
474
- ` 3. In Cursor, open the Notepads panel and reference ${cyan("CONTEXT.md")}`,
475
- );
476
- print(
477
- ` 4. Your ${cyan(".cursor/rules/project.mdc")} is already active automatically`,
478
- );
479
- }
480
- print(
481
- dim(
482
- ` Tip: use ${cyan("--update")} next time to preserve your notes on re-run.`,
483
- ),
484
- );
485
- print(
486
- dim(
487
- ` Tip: use ${cyan("--ai")} for a deeply detailed AI-enhanced CONTEXT.md.`,
488
- ),
489
- );
490
- print("");
491
- print(dim(" Re-run codebrief after major refactors to keep context fresh."));
492
438
  print("");
493
439
 
494
440
  // ── Interactive --init prompts ───────────────────────────────