codebrief 1.1.3 → 1.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -54,19 +54,20 @@ codebrief [options]
54
54
 
55
55
  ### All Options
56
56
 
57
- | Flag | Description | Default |
58
- | ------------------ | -------------------------------------------------------------------- | ---------------- |
59
- | `--depth <n>` | Max folder depth to scan | `4` |
60
- | `--no-cursor` | Skip `.cursor/rules/project.mdc` generation | cursor on |
61
- | `--vscode` | Also generate `.github/copilot-instructions.md` | off |
62
- | `--output <dir>` | Write output files to a different directory | cwd |
63
- | `--update` | Re-generate but **preserve** your Architecture Notes & Never Do | — |
64
- | `--init` | Interactively fill in Architecture Notes & Never Do after generation | — |
65
- | `--ai` | Use AI to generate a deeply detailed `CONTEXT.md` | off |
66
- | `--provider <p>` | AI provider: `groq` (default), `openai`, `anthropic`, `ollama` | `groq` |
67
- | `--model <m>` | Override the default model for the chosen provider | provider default |
68
- | `--version` / `-v` | Print version | — |
69
- | `--help` / `-h` | Show help | — |
57
+ | Flag | Description | Default |
58
+ | ------------------ | -------------------------------------------------------------------------------- | ---------------- |
59
+ | `--depth <n>` | Max folder depth to scan | `4` |
60
+ | `--no-cursor` | Skip `.cursor/rules/project.mdc` generation | cursor on |
61
+ | `--vscode` | Also generate `.github/copilot-instructions.md` | off |
62
+ | `--output <dir>` | Write output files to a different directory | cwd |
63
+ | `--update` | Re-generate but **preserve** your Architecture Notes & Never Do | — |
64
+ | `--init` | Interactively fill in Architecture Notes & Never Do after generation | — |
65
+ | `--ai` | Use AI to generate a deeply detailed `CONTEXT.md` | off |
66
+ | `--provider <p>` | AI provider: `groq` (default), `openai`, `anthropic`, `gemini`, `grok`, `ollama` | `groq` |
67
+ | `--model <m>` | Override the default model for the chosen provider | provider default |
68
+ | `--models` | List all available models for the chosen `--provider` | — |
69
+ | `--version` / `-v` | Print version | — |
70
+ | `--help` / `-h` | Show help | — |
70
71
 
71
72
  ---
72
73
 
@@ -95,14 +96,45 @@ codebrief --ai
95
96
 
96
97
  ### AI Providers
97
98
 
98
- | Provider | Flag | Env Variable | Cost | Default Model |
99
- | ----------- | ---------------------- | ------------------- | ---------- | ---------------------------- |
100
- | **Groq** | `--provider groq` | `GROQ_API_KEY` | Free tier | `llama-3.3-70b-versatile` |
101
- | OpenAI | `--provider openai` | `OPENAI_API_KEY` | Paid | `gpt-4o` |
102
- | Anthropic | `--provider anthropic` | `ANTHROPIC_API_KEY` | Paid | `claude-3-5-sonnet-20241022` |
103
- | Ollama | `--provider ollama` | *(none)* | Free/local | `llama3` |
99
+ | Provider | Flag | Env Variable | Cost | Default Model |
100
+ | ---------- | ---------------------- | ------------------- | ---------- | ------------------------- |
101
+ | **Groq** | `--provider groq` | `GROQ_API_KEY` | Free tier | `llama-3.3-70b-versatile` |
102
+ | **Gemini** | `--provider gemini` | `GEMINI_API_KEY` | Free tier | `gemini-2.5-flash` |
103
+ | OpenAI | `--provider openai` | `OPENAI_API_KEY` | Paid | `gpt-4o` |
104
+ | Anthropic | `--provider anthropic` | `ANTHROPIC_API_KEY` | Paid | `claude-sonnet-4-5` |
105
+ | Grok (xAI) | `--provider grok` | `XAI_API_KEY` | Paid | `grok-4-fast` |
106
+ | Ollama | `--provider ollama` | *(none)* | Free/local | `llama3.3` |
104
107
 
105
- **Groq is the recommended default** — it's free, requires no credit card, and is the fastest inference available (~2–3s response time).
108
+ **Groq and Gemini are both free** — no credit card required. Groq is fastest (~2–3s), Gemini offers Google's latest models.
109
+
110
+ ### Browsing Available Models
111
+
112
+ Use `--models` to see all available models for any provider before running `--ai`:
113
+
114
+ ```bash
115
+ # List all models for a provider
116
+ codebrief --models --provider groq
117
+ codebrief --models --provider gemini
118
+ codebrief --models --provider openai
119
+
120
+ # List all providers (no --provider given)
121
+ codebrief --models
122
+ ```
123
+
124
+ Example output:
125
+
126
+ ```
127
+ Models for groq:
128
+
129
+ meta-llama/llama-4-maverick-17b-128e-instruct
130
+ llama-3.3-70b-versatile (default)
131
+ llama-3.1-8b-instant
132
+ compound-beta
133
+
134
+ Usage: codebrief --ai --provider groq --model <model>
135
+ ```
136
+
137
+ All available models are maintained in `src/models.js` — update that file when new models release or old ones are deprecated.
106
138
 
107
139
  ### Examples
108
140
 
@@ -110,13 +142,21 @@ codebrief --ai
110
142
  # Free (Groq, default)
111
143
  codebrief --ai
112
144
 
113
- # Specific provider
114
- codebrief --ai --provider anthropic
145
+ # Free (Google Gemini)
146
+ codebrief --ai --provider gemini
147
+
148
+ # Paid providers
115
149
  codebrief --ai --provider openai
150
+ codebrief --ai --provider anthropic
151
+ codebrief --ai --provider grok
152
+
153
+ # Use a specific model (see codebrief --models --provider <name>)
154
+ codebrief --ai --provider groq --model llama-3.1-8b-instant
155
+ codebrief --ai --provider gemini --model gemini-1.5-pro
116
156
 
117
157
  # Fully local, no API key (needs Ollama running)
118
158
  codebrief --ai --provider ollama
119
- codebrief --ai --provider ollama --model mistral
159
+ codebrief --ai --provider ollama --model codellama
120
160
 
121
161
  # AI + preserve existing notes
122
162
  codebrief --ai --update
@@ -285,19 +325,19 @@ Now help me with: ...
285
325
 
286
326
  ## Detected Stacks
287
327
 
288
- | Category | Detected |
289
- | ------------------- | ------------------------------------------------------------------------------------------------------------- |
328
+ | Category | Detected |
329
+ | ------------------- | ------------------------------------------------------------------------------------------------------------ |
290
330
  | **Frameworks** | Next.js, Remix, SvelteKit, Astro, Nuxt.js, React, Vue.js, Svelte, Angular, NestJS, Express.js, Fastify, Hono |
291
- | **Language** | TypeScript (via `tsconfig.json` or dep), Python |
292
- | **CSS** | Tailwind CSS, styled-components, Emotion, SASS |
293
- | **UI Libraries** | shadcn/ui, Material UI, Ant Design, Chakra UI, Mantine |
294
- | **State** | TanStack Query, Zustand, Jotai, Redux Toolkit, MobX |
295
- | **Database/ORM** | Prisma, Drizzle, Mongoose, Supabase, Firebase |
296
- | **Testing** | Vitest, Jest, Playwright, Cypress |
297
- | **Deployment** | Vercel, Netlify, Railway, Docker |
298
- | **Package Manager** | npm, pnpm, yarn, bun |
299
- | **Python** | Django, FastAPI, Flask |
300
- | **Monorepo** | pnpm workspaces, Turborepo, Lerna, npm workspaces |
331
+ | **Language** | TypeScript (via `tsconfig.json` or dep), Python |
332
+ | **CSS** | Tailwind CSS, styled-components, Emotion, SASS |
333
+ | **UI Libraries** | shadcn/ui, Material UI, Ant Design, Chakra UI, Mantine |
334
+ | **State** | TanStack Query, Zustand, Jotai, Redux Toolkit, MobX |
335
+ | **Database/ORM** | Prisma, Drizzle, Mongoose, Supabase, Firebase |
336
+ | **Testing** | Vitest, Jest, Playwright, Cypress |
337
+ | **Deployment** | Vercel, Netlify, Railway, Docker |
338
+ | **Package Manager** | npm, pnpm, yarn, bun |
339
+ | **Python** | Django, FastAPI, Flask |
340
+ | **Monorepo** | pnpm workspaces, Turborepo, Lerna, npm workspaces |
301
341
 
302
342
  ---
303
343
 
@@ -322,6 +362,7 @@ codebrief/
322
362
  analyzer.js ← Stack detection from package.json, lock files, config files
323
363
  generator.js ← Markdown/MDC generators + section parser for --update
324
364
  ai.js ← AI enhancement: file sampling, prompt builder, provider calls
365
+ models.js ← All provider model lists — edit here to update/add models
325
366
  package.json
326
367
  README.md
327
368
  ROADMAP.md
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codebrief",
3
- "version": "1.1.3",
3
+ "version": "1.1.7",
4
4
  "description": "Generate AI context files for your project in seconds",
5
5
  "main": "src/index.js",
6
6
  "bin": {
package/src/ai.js CHANGED
@@ -3,6 +3,7 @@
3
3
  const fs = require("fs");
4
4
  const path = require("path");
5
5
  const https = require("https");
6
+ const { getDefaultModel, getAllProviders } = require("./models");
6
7
 
7
8
  // ── File sampler ─────────────────────────────────────────────
8
9
  // Reads the most informative source files up to a character budget
@@ -214,7 +215,8 @@ function httpsPost(hostname, path, headers, body) {
214
215
  }
215
216
 
216
217
  // ── Provider implementations ─────────────────────────────────
217
- async function callGroq(prompt, model = "llama-3.3-70b-versatile") {
218
+ async function callGroq(prompt, model) {
219
+ model = model || getDefaultModel("groq");
218
220
  const apiKey = process.env.GROQ_API_KEY;
219
221
  if (!apiKey)
220
222
  throw new Error(
@@ -239,7 +241,8 @@ async function callGroq(prompt, model = "llama-3.3-70b-versatile") {
239
241
  return res.choices?.[0]?.message?.content || "";
240
242
  }
241
243
 
242
- async function callOpenAI(prompt, model = "gpt-4o") {
244
+ async function callOpenAI(prompt, model) {
245
+ model = model || getDefaultModel("openai");
243
246
  const apiKey = process.env.OPENAI_API_KEY;
244
247
  if (!apiKey)
245
248
  throw new Error("OPENAI_API_KEY environment variable is not set.");
@@ -261,7 +264,8 @@ async function callOpenAI(prompt, model = "gpt-4o") {
261
264
  return res.choices?.[0]?.message?.content || "";
262
265
  }
263
266
 
264
- async function callAnthropic(prompt, model = "claude-3-5-sonnet-20241022") {
267
+ async function callAnthropic(prompt, model) {
268
+ model = model || getDefaultModel("anthropic");
265
269
  const apiKey = process.env.ANTHROPIC_API_KEY;
266
270
  if (!apiKey)
267
271
  throw new Error("ANTHROPIC_API_KEY environment variable is not set.");
@@ -283,7 +287,55 @@ async function callAnthropic(prompt, model = "claude-3-5-sonnet-20241022") {
283
287
  return res.content?.[0]?.text || "";
284
288
  }
285
289
 
286
- async function callOllama(prompt, model = "llama3") {
290
+ async function callGemini(prompt, model) {
291
+ model = model || getDefaultModel("gemini");
292
+ const apiKey = process.env.GEMINI_API_KEY;
293
+ if (!apiKey)
294
+ throw new Error(
295
+ "GEMINI_API_KEY environment variable is not set.\n" +
296
+ " Get a free key at https://aistudio.google.com/app/apikey",
297
+ );
298
+
299
+ const res = await httpsPost(
300
+ "generativelanguage.googleapis.com",
301
+ `/v1beta/models/${model}:generateContent?key=${apiKey}`,
302
+ { "Content-Type": "application/json" },
303
+ {
304
+ contents: [{ parts: [{ text: prompt }] }],
305
+ generationConfig: { temperature: 0.3, maxOutputTokens: 4096 },
306
+ },
307
+ );
308
+ return res.candidates?.[0]?.content?.parts?.[0]?.text || "";
309
+ }
310
+
311
+ async function callGrok(prompt, model) {
312
+ model = model || getDefaultModel("grok");
313
+ const apiKey = process.env.XAI_API_KEY;
314
+ if (!apiKey)
315
+ throw new Error(
316
+ "XAI_API_KEY environment variable is not set.\n" +
317
+ " Get a key at https://console.x.ai",
318
+ );
319
+
320
+ const res = await httpsPost(
321
+ "api.x.ai",
322
+ "/v1/chat/completions",
323
+ {
324
+ "Content-Type": "application/json",
325
+ Authorization: `Bearer ${apiKey}`,
326
+ },
327
+ {
328
+ model,
329
+ messages: [{ role: "user", content: prompt }],
330
+ temperature: 0.3,
331
+ max_tokens: 4096,
332
+ },
333
+ );
334
+ return res.choices?.[0]?.message?.content || "";
335
+ }
336
+
337
+ async function callOllama(prompt, model) {
338
+ model = model || getDefaultModel("ollama");
287
339
  // Ollama runs locally on port 11434 — use http
288
340
  const http = require("http");
289
341
  const body = JSON.stringify({ model, prompt, stream: false });
@@ -339,16 +391,20 @@ async function enhanceWithAI(analysis, fileTree, rootDir, options = {}) {
339
391
 
340
392
  switch (provider.toLowerCase()) {
341
393
  case "groq":
342
- return await callGroq(prompt, model || "llama-3.3-70b-versatile");
394
+ return await callGroq(prompt, model);
343
395
  case "openai":
344
- return await callOpenAI(prompt, model || "gpt-4o");
396
+ return await callOpenAI(prompt, model);
345
397
  case "anthropic":
346
- return await callAnthropic(prompt, model || "claude-3-5-sonnet-20241022");
398
+ return await callAnthropic(prompt, model);
399
+ case "gemini":
400
+ return await callGemini(prompt, model);
401
+ case "grok":
402
+ return await callGrok(prompt, model);
347
403
  case "ollama":
348
- return await callOllama(prompt, model || "llama3");
404
+ return await callOllama(prompt, model);
349
405
  default:
350
406
  throw new Error(
351
- `Unknown provider "${provider}". Use: groq (default), openai, anthropic, or ollama`,
407
+ `Unknown provider "${provider}". Available: ${getAllProviders().join(", ")}`,
352
408
  );
353
409
  }
354
410
  }
package/src/index.js CHANGED
@@ -13,6 +13,32 @@ const {
13
13
  parseExistingContext,
14
14
  } = require("./generator");
15
15
  const { enhanceWithAI } = require("./ai");
16
+ const {
17
+ getAvailableModels,
18
+ getDefaultModel,
19
+ getAllProviders,
20
+ } = require("./models");
21
+
22
+ // ── Load env files (no external deps) ────────────────────────
23
+ // Reads KEY=VALUE lines from a file and sets missing process.env vars
24
+ function loadEnvFile(filePath) {
25
+ if (!fs.existsSync(filePath)) return;
26
+ const lines = fs.readFileSync(filePath, "utf-8").split("\n");
27
+ for (const raw of lines) {
28
+ const line = raw.trim();
29
+ if (!line || line.startsWith("#")) continue;
30
+ const eq = line.indexOf("=");
31
+ if (eq === -1) continue;
32
+ const key = line.slice(0, eq).trim();
33
+ const val = line.slice(eq + 1).trim().replace(/^['"]|['"]$/g, "");
34
+ if (key && !(key in process.env)) process.env[key] = val;
35
+ }
36
+ }
37
+
38
+ // Priority (lowest → highest): ~/.codebrief → .env → .env.local
39
+ loadEnvFile(path.join(require("os").homedir(), ".codebrief"));
40
+ loadEnvFile(path.join(process.cwd(), ".env"));
41
+ loadEnvFile(path.join(process.cwd(), ".env.local"));
16
42
 
17
43
  // ── Simple CLI argument parser (no dependencies needed) ───────
18
44
  const args = process.argv.slice(2);
@@ -42,15 +68,18 @@ if (hasFlag("--help") || hasFlag("-h")) {
42
68
  --update Re-generate but preserve your Architecture Notes & Never Do sections
43
69
  --init Interactively fill in Architecture Notes & Never Do after generation
44
70
  --ai Use AI to generate a deeply detailed CONTEXT.md (requires API key)
45
- --provider <p> AI provider: groq (default, free), openai, anthropic, or ollama
71
+ --provider <p> AI provider: groq (default, free), openai, anthropic, gemini, grok, ollama
46
72
  --model <m> Override the default model for the chosen provider
73
+ --models List all available models for the chosen provider (use with --provider)
47
74
  --help Show this help message
48
75
  --version Show version
49
76
 
50
77
  AI environment variables:
51
78
  GROQ_API_KEY Required for --provider groq (FREE at console.groq.com)
79
+ GEMINI_API_KEY Required for --provider gemini (FREE at aistudio.google.com)
52
80
  OPENAI_API_KEY Required for --provider openai
53
81
  ANTHROPIC_API_KEY Required for --provider anthropic
82
+ XAI_API_KEY Required for --provider grok (console.x.ai)
54
83
  (Ollama needs no key — just run ollama locally)
55
84
 
56
85
  Examples:
@@ -73,6 +102,34 @@ if (hasFlag("--version") || hasFlag("-v")) {
73
102
  process.exit(0);
74
103
  }
75
104
 
105
+ if (hasFlag("--models")) {
106
+ const provider = getFlagValue("--provider", null);
107
+ if (!provider) {
108
+ console.log("\n Usage: codebrief --models --provider <name>\n");
109
+ console.log(
110
+ " Available providers: " + getAllProviders().join(", ") + "\n",
111
+ );
112
+ } else {
113
+ const list = getAvailableModels(provider);
114
+ if (list.length === 0) {
115
+ console.log(
116
+ `\n Unknown provider "${provider}". Available: ${getAllProviders().join(", ")}\n`,
117
+ );
118
+ } else {
119
+ const def = getDefaultModel(provider);
120
+ console.log(`\n Models for ${provider}:\n`);
121
+ list.forEach((m) => {
122
+ const tag = m === def ? " (default)" : "";
123
+ console.log(` ${m}${tag}`);
124
+ });
125
+ console.log(
126
+ `\n Usage: codebrief --ai --provider ${provider} --model <model>\n`,
127
+ );
128
+ }
129
+ }
130
+ process.exit(0);
131
+ }
132
+
76
133
  const maxDepth = parseInt(getFlagValue("--depth", "4"), 10);
77
134
  const skipCursor = hasFlag("--no-cursor");
78
135
  const includeVSCode = hasFlag("--vscode");
@@ -278,7 +335,23 @@ async function main() {
278
335
  print("");
279
336
  const providerLabel =
280
337
  aiProvider.charAt(0).toUpperCase() + aiProvider.slice(1);
281
- startSpinner(`Enhancing CONTEXT.md with AI (${providerLabel})...`);
338
+
339
+ // Validate --model against known list and warn if unrecognised
340
+ if (aiModel) {
341
+ const known = getAvailableModels(aiProvider);
342
+ if (known.length > 0 && !known.includes(aiModel)) {
343
+ print(
344
+ `${c.yellow}⚠${c.reset} Unknown model "${aiModel}" for ${providerLabel}.`,
345
+ );
346
+ print(` Known models: ${known.join(", ")}`);
347
+ print(` ${dim("Proceeding anyway — the provider may accept it.")}`);
348
+ print("");
349
+ }
350
+ }
351
+
352
+ startSpinner(
353
+ `Enhancing CONTEXT.md with AI (${providerLabel} / ${aiModel || getDefaultModel(aiProvider)})...`,
354
+ );
282
355
  try {
283
356
  const enhanced = await enhanceWithAI(analysis, fileTree, rootDir, {
284
357
  provider: aiProvider,
@@ -307,14 +380,22 @@ async function main() {
307
380
  ? "ANTHROPIC_API_KEY"
308
381
  : aiProvider === "openai"
309
382
  ? "OPENAI_API_KEY"
310
- : "GROQ_API_KEY";
383
+ : aiProvider === "gemini"
384
+ ? "GEMINI_API_KEY"
385
+ : aiProvider === "grok"
386
+ ? "XAI_API_KEY"
387
+ : "GROQ_API_KEY";
311
388
  const keyUrl =
312
389
  aiProvider === "anthropic"
313
390
  ? "https://console.anthropic.com"
314
391
  : aiProvider === "openai"
315
392
  ? "https://platform.openai.com/api-keys"
316
- : "https://console.groq.com";
317
- const isFree = aiProvider === "groq";
393
+ : aiProvider === "gemini"
394
+ ? "https://aistudio.google.com/app/apikey"
395
+ : aiProvider === "grok"
396
+ ? "https://console.x.ai"
397
+ : "https://console.groq.com";
398
+ const isFree = aiProvider === "groq" || aiProvider === "gemini";
318
399
 
319
400
  process.stdout.write(
320
401
  `\r${c.yellow}⚠${c.reset} No API key found for ${bold(aiProvider)}.\n\n`,
package/src/models.js ADDED
@@ -0,0 +1,74 @@
1
+ "use strict";
2
+
3
+ const MODELS = {
4
+ groq: {
5
+ default: "llama-3.3-70b-versatile",
6
+ available: [
7
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
8
+ "meta-llama/llama-4-scout-17b-16e-instruct",
9
+ "llama-3.3-70b-versatile",
10
+ "llama-3.1-8b-instant",
11
+ "compound-beta",
12
+ ],
13
+ },
14
+
15
+ gemini: {
16
+ default: "gemini-2.5-flash",
17
+ available: [
18
+ "gemini-2.5-pro",
19
+ "gemini-2.5-flash",
20
+ "gemini-2.0-flash",
21
+ "gemini-2.0-flash-lite",
22
+ ],
23
+ },
24
+
25
+ openai: {
26
+ default: "gpt-4o",
27
+ available: ["gpt-4o", "gpt-4o-mini", "o3", "o4-mini"],
28
+ },
29
+
30
+ anthropic: {
31
+ default: "claude-sonnet-4-5",
32
+ available: [
33
+ "claude-opus-4-5",
34
+ "claude-sonnet-4-5",
35
+ "claude-haiku-4-5-20251001",
36
+ ],
37
+ },
38
+
39
+ grok: {
40
+ default: "grok-4-fast",
41
+ available: ["grok-4", "grok-4-fast", "grok-3", "grok-3-mini"],
42
+ },
43
+
44
+ ollama: {
45
+ default: "llama3.3",
46
+ available: [
47
+ "llama4",
48
+ "llama3.3",
49
+ "gemma3",
50
+ "qwen3",
51
+ "mistral",
52
+ "codellama",
53
+ ],
54
+ },
55
+ };
56
+
57
+ function getDefaultModel(provider) {
58
+ return MODELS[provider.toLowerCase()]?.default || null;
59
+ }
60
+
61
+ function getAvailableModels(provider) {
62
+ return MODELS[provider.toLowerCase()]?.available || [];
63
+ }
64
+
65
+ function getAllProviders() {
66
+ return Object.keys(MODELS);
67
+ }
68
+
69
+ module.exports = {
70
+ MODELS,
71
+ getDefaultModel,
72
+ getAvailableModels,
73
+ getAllProviders,
74
+ };