@chappibunny/repolens 1.7.0 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,38 @@
2
2
 
3
3
  All notable changes to RepoLens will be documented in this file.
4
4
 
5
+ ## 1.8.0
6
+
7
+ ### ✨ GitHub Models — First-Class AI Provider
8
+
9
+ - **GitHub Models provider**: Added `github` as a native AI provider. Uses the OpenAI-compatible endpoint at `models.inference.ai.github.com` with automatic `GITHUB_TOKEN` authentication — no extra API keys needed in GitHub Actions.
10
+ - **Zero-config AI in CI**: When `ai.provider: github` is set in `.repolens.yml`, RepoLens uses the default `GITHUB_TOKEN` injected by GitHub Actions. No secrets to create or manage.
11
+ - **Config-driven AI settings**: `ai.enabled`, `ai.provider`, `ai.model`, `ai.temperature`, and `ai.base_url` in `.repolens.yml` are now fully respected at runtime (env vars still take precedence). Previously these config values were ignored.
12
+ - **Init wizard fixes**: Provider selection now uses correct runtime values (`github`, `openai_compatible`, `anthropic`, `google`) instead of mismatched labels. The wizard now emits `ai.provider` to the generated YAML. Added `github_wiki` to publisher choices.
13
+ - **Demo AI upsell**: `repolens demo` now shows a hint about GitHub Models (free) when AI is not enabled, guiding users to `repolens init --interactive`.
14
+ - **Doctor validation**: `repolens doctor` now checks for `GITHUB_TOKEN` when provider is `github`, and `REPOLENS_AI_API_KEY` for other providers.
15
+
16
+ ### 📖 Documentation
17
+
18
+ - Updated AI.md, ENVIRONMENT.md, ONBOARDING.md, README.md with GitHub Models as recommended provider
19
+ - Init templates (.env.example, workflow, README) include GitHub Models setup instructions
20
+ - Onboarding shows dual-track: Option A (GitHub Models, free) and Option B (OpenAI / other)
21
+
22
+ ### 🧪 Tests
23
+
24
+ - 24 new tests: provider config fallbacks, `isAIEnabled` with config, `getAIConfig` with config, GitHub Models defaults, init wizard content, doctor env validation
25
+ - **374 tests passing** across 22 test files
26
+
27
+ ## 1.7.1
28
+
29
+ ### 🛡️ AI Output Guardrails
30
+
31
+ - **System prompt hardening**: Added anti-conversational rules — AI now instructed to never offer additional work, never ask questions, never use second-person address (except onboarding), and to back every claim with context evidence.
32
+ - **Evidence-only constraints**: Architecture weaknesses, exec summary risks, and onboarding complexity hotspots now require concrete evidence from context data (cycle counts, coupling metrics, orphan files). No speculation.
33
+ - **Output sanitizer**: New `sanitizeAIOutput()` strips conversational patterns (`"If you want"`, `"I can produce"`, `"Shall I"`, `"Let me know"`) from both structured JSON and plain-text AI responses before they reach documents.
34
+ - **Structured renderer sanitization**: `renderArchitectureOverviewJSON` now sanitizes weakness bullet items, removing conversational lines even if they survive prompt-level constraints.
35
+ - **Dual-path coverage**: Sanitization applies to both structured JSON mode (Path A) and plain-text fallback (Path B), closing all AI output paths.
36
+
5
37
  ## 1.7.0
6
38
 
7
39
  ### ✨ Features
package/README.md CHANGED
@@ -83,7 +83,7 @@ Run `npx @chappibunny/repolens migrate` to automatically update your workflow fi
83
83
  | **Everyone** | System Overview · Developer Onboarding · Change Impact |
84
84
  | **Engineers** | Architecture Overview · Module Catalog · API Surface · Route Map · System Map |
85
85
 
86
- **Two modes:** Deterministic (free, fast, always works) or AI-Enhanced (optional — OpenAI, Anthropic, Azure, Ollama).
86
+ **Two modes:** Deterministic (free, fast, always works) or AI-Enhanced (optional — GitHub Models, OpenAI, Anthropic, Google, Azure, Ollama).
87
87
 
88
88
  ---
89
89
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chappibunny/repolens",
3
- "version": "1.7.0",
3
+ "version": "1.8.0",
4
4
  "description": "AI-assisted documentation intelligence system for technical and non-technical audiences",
5
5
  "license": "MIT",
6
6
  "type": "module",
@@ -18,11 +18,32 @@ import {
18
18
  import { identifyFlowDependencies } from "../analyzers/flow-inference.js";
19
19
  import { info, warn } from "../utils/logger.js";
20
20
 
21
+ // Strip conversational patterns that LLMs sometimes inject into documentation
22
+ const CONVERSATIONAL_PATTERNS = [
23
+ /^(?:[-*]\s*)?if you (?:want|need|would like|prefer)[^.\n]*[.\n]/gmi,
24
+ /^(?:[-*]\s*)?(?:shall|should) I [^.\n]*[.\n]/gmi,
25
+ /^(?:[-*]\s*)?(?:let me know|feel free)[^.\n]*[.\n]/gmi,
26
+ /^(?:[-*]\s*)?I can (?:also |additionally )?(?:produce|create|generate|help|provide|suggest|recommend)[^.\n]*[.\n]/gmi,
27
+ /^(?:[-*]\s*)?(?:would you like|do you want)[^.\n]*[.\n]/gmi,
28
+ /^(?:[-*]\s*)?(?:here is|here's) (?:a |the )?(?:summary|overview|breakdown)[^.\n]*:\s*$/gmi,
29
+ ];
30
+
31
+ function sanitizeAIOutput(text) {
32
+ if (!text || typeof text !== "string") return text;
33
+ let cleaned = text;
34
+ for (const pattern of CONVERSATIONAL_PATTERNS) {
35
+ cleaned = cleaned.replace(pattern, "");
36
+ }
37
+ // Collapse multiple blank lines left by removals
38
+ cleaned = cleaned.replace(/\n{3,}/g, "\n\n").trim();
39
+ return cleaned;
40
+ }
41
+
21
42
  /**
22
43
  * Try structured JSON mode first, fall back to plain-text AI, then deterministic.
23
44
  */
24
- async function generateWithStructuredFallback(key, promptText, maxTokens, fallbackFn) {
25
- if (!isAIEnabled()) return fallbackFn();
45
+ async function generateWithStructuredFallback(key, promptText, maxTokens, fallbackFn, config) {
46
+ if (!isAIEnabled(config)) return fallbackFn();
26
47
 
27
48
  const schema = AI_SCHEMAS[key];
28
49
 
@@ -37,11 +58,12 @@ async function generateWithStructuredFallback(key, promptText, maxTokens, fallba
37
58
  maxTokens,
38
59
  jsonMode: true,
39
60
  jsonSchema: schema,
61
+ config,
40
62
  });
41
63
 
42
64
  if (result.success && result.parsed) {
43
65
  const md = renderStructuredToMarkdown(key, result.parsed);
44
- if (md) return md;
66
+ if (md) return sanitizeAIOutput(md);
45
67
  }
46
68
  // If structured mode failed, fall through to plain-text
47
69
  warn(`Structured AI failed for ${key}, trying plain-text mode...`);
@@ -53,6 +75,7 @@ async function generateWithStructuredFallback(key, promptText, maxTokens, fallba
53
75
  system: SYSTEM_PROMPT,
54
76
  user: promptText,
55
77
  maxTokens,
78
+ config,
56
79
  });
57
80
 
58
81
  if (!result.success) {
@@ -60,60 +83,66 @@ async function generateWithStructuredFallback(key, promptText, maxTokens, fallba
60
83
  return fallbackFn();
61
84
  }
62
85
 
63
- return result.text;
86
+ return sanitizeAIOutput(result.text);
64
87
  }
65
88
 
66
- export async function generateExecutiveSummary(context, enrichment = {}) {
89
+ export async function generateExecutiveSummary(context, enrichment = {}, config) {
67
90
  return generateWithStructuredFallback(
68
91
  "executive_summary",
69
92
  createExecutiveSummaryPrompt(context),
70
93
  1500,
71
94
  () => getFallbackExecutiveSummary(context, enrichment),
95
+ config,
72
96
  );
73
97
  }
74
98
 
75
- export async function generateSystemOverview(context, enrichment = {}) {
99
+ export async function generateSystemOverview(context, enrichment = {}, config) {
76
100
  return generateWithStructuredFallback(
77
101
  "system_overview",
78
102
  createSystemOverviewPrompt(context),
79
103
  1200,
80
104
  () => getFallbackSystemOverview(context, enrichment),
105
+ config,
81
106
  );
82
107
  }
83
108
 
84
- export async function generateBusinessDomains(context, enrichment = {}) {
109
+ export async function generateBusinessDomains(context, enrichment = {}, config) {
85
110
  return generateWithStructuredFallback(
86
111
  "business_domains",
87
112
  createBusinessDomainsPrompt(context),
88
113
  2000,
89
114
  () => getFallbackBusinessDomains(context, enrichment),
115
+ config,
90
116
  );
91
117
  }
92
118
 
93
- export async function generateArchitectureOverview(context, enrichment = {}) {
119
+ export async function generateArchitectureOverview(context, enrichment = {}, config) {
94
120
  return generateWithStructuredFallback(
95
121
  "architecture_overview",
96
122
  createArchitectureOverviewPrompt(context),
97
123
  1800,
98
124
  () => getFallbackArchitectureOverview(context, enrichment),
125
+ config,
99
126
  );
100
127
  }
101
128
 
102
- export async function generateDataFlows(flows, context, enrichment = {}) {
129
+ export async function generateDataFlows(flows, context, enrichment = {}, config) {
103
130
  return generateWithStructuredFallback(
104
131
  "data_flows",
105
132
  createDataFlowsPrompt(flows, context),
106
133
  1800,
107
134
  () => getFallbackDataFlows(flows, context, enrichment),
135
+ config,
108
136
  );
109
137
  }
110
138
 
111
- export async function generateDeveloperOnboarding(context, enrichment = {}) {
139
+ export async function generateDeveloperOnboarding(context, enrichment = {}, config) {
112
140
  return generateWithStructuredFallback(
113
141
  "developer_onboarding",
114
142
  createDeveloperOnboardingPrompt(context),
115
143
  2200,
116
144
  () => getFallbackDeveloperOnboarding(context, enrichment),
145
+ config,
117
146
  );
118
147
  }
119
148
 
package/src/ai/prompts.js CHANGED
@@ -57,7 +57,12 @@ Rules:
57
57
  - Do not mention AI, LLMs, or that you are an assistant.
58
58
  - No markdown tables unless specifically requested.
59
59
  - Use simple formatting: headings, paragraphs, lists.
60
- - Maximum 2 heading levels deep within sections.`;
60
+ - Maximum 2 heading levels deep within sections.
61
+ - You are producing a static document, not participating in a conversation.
62
+ - Never offer to do additional work (no "If you want", "I can also", "Let me know", "Shall I").
63
+ - Never ask the reader questions or invite follow-up.
64
+ - Never address the reader in second person ("you") unless the document type requires it (e.g. onboarding).
65
+ - Every claim must be supported by concrete evidence from the supplied context data.`;
61
66
 
62
67
  export function createExecutiveSummaryPrompt(context) {
63
68
  return `Write an executive summary for a mixed audience of technical and non-technical readers.
@@ -70,7 +75,7 @@ Requirements:
70
75
  - Explain the main system areas using the domain information.
71
76
  - Explain the business capabilities implied by the codebase structure.
72
77
  - Mention key external dependencies only if they are present in the context.
73
- - Mention architectural or operational risks if they are strongly supported by the context.
78
+ - Mention architectural or operational risks only if they are directly supported by concrete data in the context (e.g. cycle counts, orphan files, coupling metrics).
74
79
  - Do not mention file counts more than once.
75
80
  - Maximum 500 words.
76
81
  - Use this structure:
@@ -89,7 +94,9 @@ Requirements:
89
94
 
90
95
  ## Operational and architectural risks
91
96
 
92
- ## Recommended focus areas`;
97
+ ## Recommended focus areas
98
+
99
+ IMPORTANT: Only list risks and focus areas that are directly evidenced by the context data. Do not speculate.`;
93
100
  }
94
101
 
95
102
  export function createSystemOverviewPrompt(context) {
@@ -183,7 +190,9 @@ Requirements:
183
190
 
184
191
  ## Architectural strengths
185
192
 
186
- ## Architectural weaknesses`;
193
+ ## Architectural weaknesses
194
+
195
+ IMPORTANT: Only list weaknesses that are directly evidenced by the context data (e.g. cycle counts, orphan files, high coupling metrics, missing layers). Do not speculate about what the system lacks.`;
187
196
  }
188
197
 
189
198
  export function createDataFlowsPrompt(flows, context) {
@@ -249,7 +258,9 @@ Requirements:
249
258
 
250
259
  ## What to understand first
251
260
 
252
- ## Known complexity hotspots`;
261
+ ## Known complexity hotspots
262
+
263
+ IMPORTANT: Only cite complexity hotspots that are supported by concrete evidence in the context (e.g. high import counts, circular dependencies, large file counts). Do not speculate about what might be complex.`;
253
264
  }
254
265
 
255
266
  export function createModuleSummaryPrompt(module, context) {
@@ -520,12 +531,22 @@ function renderBusinessDomainsJSON(d) {
520
531
  return md;
521
532
  }
522
533
 
534
+ function sanitizeBulletList(val) {
535
+ const raw = toBulletList(val);
536
+ if (!raw) return raw;
537
+ // Strip conversational lines from bullet lists
538
+ return raw.split("\n").filter(line => {
539
+ const lower = line.toLowerCase();
540
+ return !/(^|\s)(if you (?:want|need|would)|shall i |let me know|i can (?:also )?(?:produce|create|generate|help)|would you like|do you want|feel free)/i.test(lower);
541
+ }).join("\n");
542
+ }
543
+
523
544
  function renderArchitectureOverviewJSON(d) {
524
545
  let md = `# Architecture Overview\n\n`;
525
546
  md += `## Architecture Style\n\n${safeStr(d.style)}\n\n`;
526
547
  md += `## Layers\n\n${toHeadingSections(d.layers)}\n\n`;
527
- md += `## Architectural Strengths\n\n${toBulletList(d.strengths)}\n\n`;
528
- md += `## Architectural Weaknesses\n\n${toBulletList(d.weaknesses)}\n`;
548
+ md += `## Architectural Strengths\n\n${sanitizeBulletList(d.strengths)}\n\n`;
549
+ md += `## Architectural Weaknesses\n\n${sanitizeBulletList(d.weaknesses)}\n`;
529
550
  return md;
530
551
  }
531
552
 
@@ -20,11 +20,13 @@ export async function generateText({ system, user, temperature, maxTokens, confi
20
20
  }
21
21
 
22
22
  // Get provider configuration (env vars take precedence, then config, then defaults)
23
- const provider = process.env.REPOLENS_AI_PROVIDER || "openai_compatible";
24
- const baseUrl = process.env.REPOLENS_AI_BASE_URL;
25
- const apiKey = process.env.REPOLENS_AI_API_KEY;
26
- const model = process.env.REPOLENS_AI_MODEL || "gpt-5-mini";
27
- const timeoutMs = parseInt(process.env.REPOLENS_AI_TIMEOUT_MS || DEFAULT_TIMEOUT_MS);
23
+ const provider = process.env.REPOLENS_AI_PROVIDER || aiConfig.provider || "openai_compatible";
24
+ const baseUrl = process.env.REPOLENS_AI_BASE_URL || aiConfig.base_url;
25
+ // For "github" provider, fall back to GITHUB_TOKEN when no explicit AI key is set
26
+ const apiKey = process.env.REPOLENS_AI_API_KEY
27
+ || (provider === "github" ? process.env.GITHUB_TOKEN : undefined);
28
+ const model = process.env.REPOLENS_AI_MODEL || aiConfig.model || getDefaultModel(provider);
29
+ const timeoutMs = parseInt(process.env.REPOLENS_AI_TIMEOUT_MS || aiConfig.timeout_ms || DEFAULT_TIMEOUT_MS);
28
30
 
29
31
  // Use config values as fallback for maxTokens; temperature only when explicitly set
30
32
  const resolvedTemp = temperature ?? aiConfig.temperature ?? undefined;
@@ -140,6 +142,18 @@ function validateSchema(obj, schema) {
140
142
  return null;
141
143
  }
142
144
 
145
+ /**
146
+ * Get default model for a provider.
147
+ */
148
+ function getDefaultModel(provider) {
149
+ switch (provider) {
150
+ case "anthropic": return "claude-sonnet-4-20250514";
151
+ case "google": return "gemini-pro";
152
+ case "github": return "gpt-4o-mini";
153
+ default: return "gpt-5-mini";
154
+ }
155
+ }
156
+
143
157
  /**
144
158
  * Get default base URL for a provider.
145
159
  */
@@ -148,6 +162,7 @@ function getDefaultBaseUrl(provider) {
148
162
  case "anthropic": return "https://api.anthropic.com";
149
163
  case "azure": return process.env.REPOLENS_AI_BASE_URL || "https://api.openai.com/v1";
150
164
  case "google": return "https://generativelanguage.googleapis.com";
165
+ case "github": return "https://models.inference.ai.github.com/v1";
151
166
  default: return "https://api.openai.com/v1";
152
167
  }
153
168
  }
@@ -159,7 +174,7 @@ function getProviderAdapter(provider) {
159
174
  switch (provider) {
160
175
  case "anthropic": return callAnthropicAPI;
161
176
  case "google": return callGoogleAPI;
162
- // "openai_compatible" and "azure" both use the OpenAI format
177
+ // "openai_compatible", "azure", and "github" all use the OpenAI chat/completions format
163
178
  default: return callOpenAICompatibleAPI;
164
179
  }
165
180
  }
@@ -334,21 +349,21 @@ async function callGoogleAPI({ baseUrl, apiKey, model, system, user, temperature
334
349
  });
335
350
  }
336
351
 
337
- export function isAIEnabled() {
338
- return process.env.REPOLENS_AI_ENABLED === "true";
352
+ export function isAIEnabled(config) {
353
+ return process.env.REPOLENS_AI_ENABLED === "true" || config?.ai?.enabled === true;
339
354
  }
340
355
 
341
- export function getAIConfig() {
342
- const provider = process.env.REPOLENS_AI_PROVIDER || "openai_compatible";
343
- const defaultModel = provider === "anthropic" ? "claude-sonnet-4-20250514"
344
- : provider === "google" ? "gemini-pro"
345
- : "gpt-5-mini";
356
+ export function getAIConfig(config) {
357
+ const aiConfig = config?.ai || {};
358
+ const provider = process.env.REPOLENS_AI_PROVIDER || aiConfig.provider || "openai_compatible";
359
+ const hasApiKey = !!(process.env.REPOLENS_AI_API_KEY
360
+ || (provider === "github" ? process.env.GITHUB_TOKEN : undefined));
346
361
  return {
347
- enabled: isAIEnabled(),
362
+ enabled: isAIEnabled(config),
348
363
  provider,
349
- model: process.env.REPOLENS_AI_MODEL || defaultModel,
350
- hasApiKey: !!process.env.REPOLENS_AI_API_KEY,
351
- temperature: process.env.REPOLENS_AI_TEMPERATURE ? parseFloat(process.env.REPOLENS_AI_TEMPERATURE) : undefined,
352
- maxTokens: parseInt(process.env.REPOLENS_AI_MAX_TOKENS || DEFAULT_MAX_TOKENS)
364
+ model: process.env.REPOLENS_AI_MODEL || aiConfig.model || getDefaultModel(provider),
365
+ hasApiKey,
366
+ temperature: process.env.REPOLENS_AI_TEMPERATURE ? parseFloat(process.env.REPOLENS_AI_TEMPERATURE) : (aiConfig.temperature != null ? aiConfig.temperature : undefined),
367
+ maxTokens: parseInt(process.env.REPOLENS_AI_MAX_TOKENS || aiConfig.max_tokens || DEFAULT_MAX_TOKENS)
353
368
  };
354
369
  }
package/src/cli.js CHANGED
@@ -492,6 +492,12 @@ async function main() {
492
492
  info("Browse your docs: open the .repolens/ directory");
493
493
  info("\nTo publish to Notion, Confluence, or GitHub Wiki, run: repolens publish");
494
494
 
495
+ // Upsell AI enhancement when not already enabled
496
+ if (!cfg.ai?.enabled && process.env.REPOLENS_AI_ENABLED !== "true") {
497
+ info("\n💡 Want richer, AI-enhanced docs? Run: repolens init --interactive");
498
+ info(" Select GitHub Models (free) — uses your existing GITHUB_TOKEN, no extra keys needed.");
499
+ }
500
+
495
501
  printPerformanceSummary();
496
502
 
497
503
  trackUsage("demo", "success", {
@@ -189,16 +189,16 @@ async function generateDocument(docPlan, context) {
189
189
 
190
190
  switch (key) {
191
191
  case "executive_summary":
192
- return await generateExecutiveSummary(aiContext, { depGraph, flows });
192
+ return await generateExecutiveSummary(aiContext, { depGraph, flows }, config);
193
193
 
194
194
  case "system_overview":
195
- return await generateSystemOverview(aiContext, { depGraph });
195
+ return await generateSystemOverview(aiContext, { depGraph }, config);
196
196
 
197
197
  case "business_domains":
198
- return await generateBusinessDomains(aiContext, { depGraph });
198
+ return await generateBusinessDomains(aiContext, { depGraph }, config);
199
199
 
200
200
  case "architecture_overview":
201
- return await generateArchitectureOverview(aiContext, { depGraph, driftResult });
201
+ return await generateArchitectureOverview(aiContext, { depGraph, driftResult }, config);
202
202
 
203
203
  case "module_catalog":
204
204
  // Hybrid: deterministic skeleton + ownership info + dep-graph roles
@@ -213,7 +213,7 @@ async function generateDocument(docPlan, context) {
213
213
  return renderApiSurfaceOriginal(config, scanResult);
214
214
 
215
215
  case "data_flows":
216
- return await generateDataFlows(flows, aiContext, { depGraph, scanResult, moduleContext });
216
+ return await generateDataFlows(flows, aiContext, { depGraph, scanResult, moduleContext }, config);
217
217
 
218
218
  case "arch_diff":
219
219
  if (!diffData) {
@@ -226,7 +226,7 @@ async function generateDocument(docPlan, context) {
226
226
  return renderSystemMap(scanResult, config, depGraph);
227
227
 
228
228
  case "developer_onboarding":
229
- return await generateDeveloperOnboarding(aiContext, { flows, depGraph });
229
+ return await generateDeveloperOnboarding(aiContext, { flows, depGraph }, config);
230
230
 
231
231
  case "graphql_schema":
232
232
  return renderGraphQLSchema(graphqlResult);
package/src/doctor.js CHANGED
@@ -182,9 +182,16 @@ export async function runDoctor(targetDir = process.cwd()) {
182
182
  }
183
183
 
184
184
  if (cfg.ai?.enabled || process.env.REPOLENS_AI_ENABLED === "true") {
185
- envChecks.push(
186
- { key: "REPOLENS_AI_API_KEY", required: true, publisher: "AI" },
187
- );
185
+ const aiProvider = process.env.REPOLENS_AI_PROVIDER || "openai_compatible";
186
+ if (aiProvider === "github") {
187
+ envChecks.push(
188
+ { key: "GITHUB_TOKEN", required: true, publisher: "AI (GitHub Models)" },
189
+ );
190
+ } else {
191
+ envChecks.push(
192
+ { key: "REPOLENS_AI_API_KEY", required: true, publisher: "AI" },
193
+ );
194
+ }
188
195
  }
189
196
 
190
197
  if (envChecks.length === 0) {
package/src/init.js CHANGED
@@ -3,8 +3,13 @@ import path from "node:path";
3
3
  import { createInterface } from "node:readline/promises";
4
4
  import { info, warn } from "./utils/logger.js";
5
5
 
6
- const PUBLISHER_CHOICES = ["markdown", "notion", "confluence"];
7
- const AI_PROVIDERS = ["openai", "anthropic", "azure", "ollama"];
6
+ const PUBLISHER_CHOICES = ["markdown", "notion", "confluence", "github_wiki"];
7
+ const AI_PROVIDERS = [
8
+ { value: "github", label: "GitHub Models (free in GitHub Actions)" },
9
+ { value: "openai_compatible", label: "OpenAI / Compatible (GPT-5, GPT-4o, etc.)" },
10
+ { value: "anthropic", label: "Anthropic (Claude)" },
11
+ { value: "google", label: "Google (Gemini)" },
12
+ ];
8
13
  const SCAN_PRESETS = {
9
14
  nextjs: {
10
15
  include: [
@@ -84,6 +89,10 @@ jobs:
84
89
  CONFLUENCE_API_TOKEN: \${{ secrets.CONFLUENCE_API_TOKEN }}
85
90
  CONFLUENCE_SPACE_KEY: \${{ secrets.CONFLUENCE_SPACE_KEY }}
86
91
  CONFLUENCE_PARENT_PAGE_ID: \${{ secrets.CONFLUENCE_PARENT_PAGE_ID }}
92
+ # Uncomment to enable free AI-enhanced docs via GitHub Models:
93
+ # REPOLENS_AI_ENABLED: true
94
+ # REPOLENS_AI_PROVIDER: github
95
+ # GITHUB_TOKEN: \${{ secrets.GITHUB_TOKEN }}
87
96
  run: npx @chappibunny/repolens@latest publish
88
97
  `;
89
98
 
@@ -106,6 +115,11 @@ CONFLUENCE_PARENT_PAGE_ID=
106
115
  # REPOLENS_AI_BASE_URL=https://api.openai.com/v1
107
116
  # REPOLENS_AI_MODEL=gpt-5-mini
108
117
  # REPOLENS_AI_MAX_TOKENS=2000
118
+
119
+ # GitHub Models (free tier — zero-config in GitHub Actions)
120
+ # REPOLENS_AI_PROVIDER=github
121
+ # Uses GITHUB_TOKEN automatically — no separate API key needed
122
+ # REPOLENS_AI_MODEL=gpt-4o-mini
109
123
  `;
110
124
 
111
125
  const DEFAULT_REPOLENS_README = `# RepoLens Documentation
@@ -194,7 +208,20 @@ Adds 5 natural language documents readable by non-technical audiences:
194
208
 
195
209
  AI features add natural language explanations for non-technical stakeholders.
196
210
 
197
- 1. Get an OpenAI API key from https://platform.openai.com/api-keys
211
+ ### Option A: GitHub Models (Free Recommended for GitHub Actions)
212
+
213
+ Every GitHub repo gets free access to AI models. In your workflow:
214
+ \`\`\`yaml
215
+ env:
216
+ REPOLENS_AI_ENABLED: true
217
+ REPOLENS_AI_PROVIDER: github
218
+ GITHUB_TOKEN: \${{ secrets.GITHUB_TOKEN }}
219
+ \`\`\`
220
+ No API key signup needed. Uses \`gpt-4o-mini\` by default.
221
+
222
+ ### Option B: OpenAI / Other Providers
223
+
224
+ 1. Get an API key from your chosen provider
198
225
  2. Add to your \`.env\` file:
199
226
  \`\`\`bash
200
227
  REPOLENS_AI_ENABLED=true
@@ -214,7 +241,7 @@ AI features add natural language explanations for non-technical stakeholders.
214
241
  developer_onboarding: true
215
242
  \`\`\`
216
243
 
217
- **Cost estimate**: $0.10-$0.40 per run for typical projects
244
+ **Cost estimate**: $0.10-$0.40 per run for typical projects (or free with GitHub Models)
218
245
 
219
246
  See [AI.md](https://github.com/CHAPIBUNNY/repolens/blob/main/AI.md) for full documentation
220
247
  - **Module Catalog** — Detected code modules
@@ -532,10 +559,15 @@ async function runInteractiveWizard(repoRoot) {
532
559
  let aiProvider = null;
533
560
  if (enableAi) {
534
561
  info("Select AI provider:");
535
- AI_PROVIDERS.forEach((p, i) => info(` ${i + 1}. ${p}`));
536
- const aiInput = (await ask(`Provider [1] (default: 1 openai): `)).trim() || "1";
562
+ AI_PROVIDERS.forEach((p, i) => info(` ${i + 1}. ${p.label}`));
563
+ const aiInput = (await ask(`Provider [1] (default: 1 GitHub Models — free): `)).trim() || "1";
537
564
  const idx = parseInt(aiInput, 10);
538
- aiProvider = AI_PROVIDERS[(idx >= 1 && idx <= AI_PROVIDERS.length) ? idx - 1 : 0];
565
+ const chosen = AI_PROVIDERS[(idx >= 1 && idx <= AI_PROVIDERS.length) ? idx - 1 : 0];
566
+ aiProvider = chosen.value;
567
+ if (aiProvider === "github") {
568
+ info("\n ✨ Great choice! GitHub Models uses your existing GITHUB_TOKEN — no extra API key needed.");
569
+ info(" Works automatically in GitHub Actions with the free tier.");
570
+ }
539
571
  }
540
572
 
541
573
  // 4. Scan preset
@@ -605,6 +637,9 @@ function buildWizardConfig(answers) {
605
637
  lines.push(`ai:`);
606
638
  lines.push(` enabled: true`);
607
639
  lines.push(` mode: hybrid`);
640
+ if (answers.aiProvider) {
641
+ lines.push(` provider: ${answers.aiProvider}`);
642
+ }
608
643
  lines.push(``);
609
644
  lines.push(`features:`);
610
645
  lines.push(` executive_summary: true`);
@@ -758,14 +793,18 @@ NOTION_VERSION=2022-06-28
758
793
  info("Next steps:");
759
794
  info(" 1. Review .repolens.yml to customize your documentation");
760
795
  info(" 2. Run 'npx @chappibunny/repolens publish' to generate your first docs (deterministic mode)");
761
- info(" 3. (Optional) Enable AI features by adding to .env:");
796
+ info(" 3. (Optional) Enable AI features:");
797
+ info(" ── FREE: GitHub Models (recommended for GitHub Actions) ──");
798
+ info(" REPOLENS_AI_ENABLED=true");
799
+ info(" REPOLENS_AI_PROVIDER=github");
800
+ info(" (Uses your GITHUB_TOKEN automatically — no API key signup needed)");
801
+ info(" ── Or: OpenAI / Anthropic / Google ──");
762
802
  info(" REPOLENS_AI_ENABLED=true");
763
803
  info(" REPOLENS_AI_API_KEY=sk-...");
764
804
  info(" See AI.md for full guide: https://github.com/CHAPIBUNNY/repolens/blob/main/AI.md");
765
805
  info(" 4. For GitHub Actions, add these repository secrets:");
766
806
  info(" - NOTION_TOKEN");
767
807
  info(" - NOTION_PARENT_PAGE_ID");
768
- info(" - REPOLENS_AI_API_KEY (if using AI features)");
769
808
  info(" 5. Commit the generated files (workflow will run automatically)");
770
809
  } else {
771
810
  info("Next steps:");
@@ -773,7 +812,12 @@ NOTION_VERSION=2022-06-28
773
812
  info(" 2. To enable Notion publishing:");
774
813
  info(" - Copy .env.example to .env and add your credentials, OR");
775
814
  info(" - Add GitHub secrets: NOTION_TOKEN, NOTION_PARENT_PAGE_ID");
776
- info(" 3. (Optional) Enable AI features by adding to .env:");
815
+ info(" 3. (Optional) Enable AI features:");
816
+ info(" ── FREE: GitHub Models (recommended for GitHub Actions) ──");
817
+ info(" REPOLENS_AI_ENABLED=true");
818
+ info(" REPOLENS_AI_PROVIDER=github");
819
+ info(" (Uses your GITHUB_TOKEN automatically — no API key signup needed)");
820
+ info(" ── Or: OpenAI / Anthropic / Google ──");
777
821
  info(" REPOLENS_AI_ENABLED=true");
778
822
  info(" REPOLENS_AI_API_KEY=sk-...");
779
823
  info(" See: https://github.com/CHAPIBUNNY/repolens/blob/main/AI.md");