@chappibunny/repolens 1.7.1 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -0
- package/README.md +1 -1
- package/package.json +1 -1
- package/src/ai/generate-sections.js +16 -8
- package/src/ai/provider.js +33 -18
- package/src/cli.js +6 -0
- package/src/docs/generate-doc-set.js +6 -6
- package/src/doctor.js +10 -3
- package/src/init.js +54 -10
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,28 @@
|
|
|
2
2
|
|
|
3
3
|
All notable changes to RepoLens will be documented in this file.
|
|
4
4
|
|
|
5
|
+
## 1.8.0
|
|
6
|
+
|
|
7
|
+
### ✨ GitHub Models — First-Class AI Provider
|
|
8
|
+
|
|
9
|
+
- **GitHub Models provider**: Added `github` as a native AI provider. Uses the OpenAI-compatible endpoint at `models.inference.ai.github.com` with automatic `GITHUB_TOKEN` authentication — no extra API keys needed in GitHub Actions.
|
|
10
|
+
- **Zero-config AI in CI**: When `ai.provider: github` is set in `.repolens.yml`, RepoLens uses the default `GITHUB_TOKEN` injected by GitHub Actions. No secrets to create or manage.
|
|
11
|
+
- **Config-driven AI settings**: `ai.enabled`, `ai.provider`, `ai.model`, `ai.temperature`, and `ai.base_url` in `.repolens.yml` are now fully respected at runtime (env vars still take precedence). Previously these config values were ignored.
|
|
12
|
+
- **Init wizard fixes**: Provider selection now uses correct runtime values (`github`, `openai_compatible`, `anthropic`, `google`) instead of mismatched labels. The wizard now emits `ai.provider` to the generated YAML. Added `github_wiki` to publisher choices.
|
|
13
|
+
- **Demo AI upsell**: `repolens demo` now shows a hint about GitHub Models (free) when AI is not enabled, guiding users to `repolens init --interactive`.
|
|
14
|
+
- **Doctor validation**: `repolens doctor` now checks for `GITHUB_TOKEN` when provider is `github`, and `REPOLENS_AI_API_KEY` for other providers.
|
|
15
|
+
|
|
16
|
+
### 📖 Documentation
|
|
17
|
+
|
|
18
|
+
- Updated AI.md, ENVIRONMENT.md, ONBOARDING.md, README.md with GitHub Models as recommended provider
|
|
19
|
+
- Init templates (.env.example, workflow, README) include GitHub Models setup instructions
|
|
20
|
+
- Onboarding shows dual-track: Option A (GitHub Models, free) and Option B (OpenAI / other)
|
|
21
|
+
|
|
22
|
+
### 🧪 Tests
|
|
23
|
+
|
|
24
|
+
- 24 new tests: provider config fallbacks, `isAIEnabled` with config, `getAIConfig` with config, GitHub Models defaults, init wizard content, doctor env validation
|
|
25
|
+
- **374 tests passing** across 22 test files
|
|
26
|
+
|
|
5
27
|
## 1.7.1
|
|
6
28
|
|
|
7
29
|
### 🛡️ AI Output Guardrails
|
package/README.md
CHANGED
|
@@ -83,7 +83,7 @@ Run `npx @chappibunny/repolens migrate` to automatically update your workflow fi
|
|
|
83
83
|
| **Everyone** | System Overview · Developer Onboarding · Change Impact |
|
|
84
84
|
| **Engineers** | Architecture Overview · Module Catalog · API Surface · Route Map · System Map |
|
|
85
85
|
|
|
86
|
-
**Two modes:** Deterministic (free, fast, always works) or AI-Enhanced (optional — OpenAI, Anthropic, Azure, Ollama).
|
|
86
|
+
**Two modes:** Deterministic (free, fast, always works) or AI-Enhanced (optional — GitHub Models, OpenAI, Anthropic, Google, Azure, Ollama).
|
|
87
87
|
|
|
88
88
|
---
|
|
89
89
|
|
package/package.json
CHANGED
|
@@ -42,8 +42,8 @@ function sanitizeAIOutput(text) {
|
|
|
42
42
|
/**
|
|
43
43
|
* Try structured JSON mode first, fall back to plain-text AI, then deterministic.
|
|
44
44
|
*/
|
|
45
|
-
async function generateWithStructuredFallback(key, promptText, maxTokens, fallbackFn) {
|
|
46
|
-
if (!isAIEnabled()) return fallbackFn();
|
|
45
|
+
async function generateWithStructuredFallback(key, promptText, maxTokens, fallbackFn, config) {
|
|
46
|
+
if (!isAIEnabled(config)) return fallbackFn();
|
|
47
47
|
|
|
48
48
|
const schema = AI_SCHEMAS[key];
|
|
49
49
|
|
|
@@ -58,6 +58,7 @@ async function generateWithStructuredFallback(key, promptText, maxTokens, fallba
|
|
|
58
58
|
maxTokens,
|
|
59
59
|
jsonMode: true,
|
|
60
60
|
jsonSchema: schema,
|
|
61
|
+
config,
|
|
61
62
|
});
|
|
62
63
|
|
|
63
64
|
if (result.success && result.parsed) {
|
|
@@ -74,6 +75,7 @@ async function generateWithStructuredFallback(key, promptText, maxTokens, fallba
|
|
|
74
75
|
system: SYSTEM_PROMPT,
|
|
75
76
|
user: promptText,
|
|
76
77
|
maxTokens,
|
|
78
|
+
config,
|
|
77
79
|
});
|
|
78
80
|
|
|
79
81
|
if (!result.success) {
|
|
@@ -84,57 +86,63 @@ async function generateWithStructuredFallback(key, promptText, maxTokens, fallba
|
|
|
84
86
|
return sanitizeAIOutput(result.text);
|
|
85
87
|
}
|
|
86
88
|
|
|
87
|
-
export async function generateExecutiveSummary(context, enrichment = {}) {
|
|
89
|
+
export async function generateExecutiveSummary(context, enrichment = {}, config) {
|
|
88
90
|
return generateWithStructuredFallback(
|
|
89
91
|
"executive_summary",
|
|
90
92
|
createExecutiveSummaryPrompt(context),
|
|
91
93
|
1500,
|
|
92
94
|
() => getFallbackExecutiveSummary(context, enrichment),
|
|
95
|
+
config,
|
|
93
96
|
);
|
|
94
97
|
}
|
|
95
98
|
|
|
96
|
-
export async function generateSystemOverview(context, enrichment = {}) {
|
|
99
|
+
export async function generateSystemOverview(context, enrichment = {}, config) {
|
|
97
100
|
return generateWithStructuredFallback(
|
|
98
101
|
"system_overview",
|
|
99
102
|
createSystemOverviewPrompt(context),
|
|
100
103
|
1200,
|
|
101
104
|
() => getFallbackSystemOverview(context, enrichment),
|
|
105
|
+
config,
|
|
102
106
|
);
|
|
103
107
|
}
|
|
104
108
|
|
|
105
|
-
export async function generateBusinessDomains(context, enrichment = {}) {
|
|
109
|
+
export async function generateBusinessDomains(context, enrichment = {}, config) {
|
|
106
110
|
return generateWithStructuredFallback(
|
|
107
111
|
"business_domains",
|
|
108
112
|
createBusinessDomainsPrompt(context),
|
|
109
113
|
2000,
|
|
110
114
|
() => getFallbackBusinessDomains(context, enrichment),
|
|
115
|
+
config,
|
|
111
116
|
);
|
|
112
117
|
}
|
|
113
118
|
|
|
114
|
-
export async function generateArchitectureOverview(context, enrichment = {}) {
|
|
119
|
+
export async function generateArchitectureOverview(context, enrichment = {}, config) {
|
|
115
120
|
return generateWithStructuredFallback(
|
|
116
121
|
"architecture_overview",
|
|
117
122
|
createArchitectureOverviewPrompt(context),
|
|
118
123
|
1800,
|
|
119
124
|
() => getFallbackArchitectureOverview(context, enrichment),
|
|
125
|
+
config,
|
|
120
126
|
);
|
|
121
127
|
}
|
|
122
128
|
|
|
123
|
-
export async function generateDataFlows(flows, context, enrichment = {}) {
|
|
129
|
+
export async function generateDataFlows(flows, context, enrichment = {}, config) {
|
|
124
130
|
return generateWithStructuredFallback(
|
|
125
131
|
"data_flows",
|
|
126
132
|
createDataFlowsPrompt(flows, context),
|
|
127
133
|
1800,
|
|
128
134
|
() => getFallbackDataFlows(flows, context, enrichment),
|
|
135
|
+
config,
|
|
129
136
|
);
|
|
130
137
|
}
|
|
131
138
|
|
|
132
|
-
export async function generateDeveloperOnboarding(context, enrichment = {}) {
|
|
139
|
+
export async function generateDeveloperOnboarding(context, enrichment = {}, config) {
|
|
133
140
|
return generateWithStructuredFallback(
|
|
134
141
|
"developer_onboarding",
|
|
135
142
|
createDeveloperOnboardingPrompt(context),
|
|
136
143
|
2200,
|
|
137
144
|
() => getFallbackDeveloperOnboarding(context, enrichment),
|
|
145
|
+
config,
|
|
138
146
|
);
|
|
139
147
|
}
|
|
140
148
|
|
package/src/ai/provider.js
CHANGED
|
@@ -20,11 +20,13 @@ export async function generateText({ system, user, temperature, maxTokens, confi
|
|
|
20
20
|
}
|
|
21
21
|
|
|
22
22
|
// Get provider configuration (env vars take precedence, then config, then defaults)
|
|
23
|
-
const provider = process.env.REPOLENS_AI_PROVIDER || "openai_compatible";
|
|
24
|
-
const baseUrl = process.env.REPOLENS_AI_BASE_URL;
|
|
25
|
-
|
|
26
|
-
const
|
|
27
|
-
|
|
23
|
+
const provider = process.env.REPOLENS_AI_PROVIDER || aiConfig.provider || "openai_compatible";
|
|
24
|
+
const baseUrl = process.env.REPOLENS_AI_BASE_URL || aiConfig.base_url;
|
|
25
|
+
// For "github" provider, fall back to GITHUB_TOKEN when no explicit AI key is set
|
|
26
|
+
const apiKey = process.env.REPOLENS_AI_API_KEY
|
|
27
|
+
|| (provider === "github" ? process.env.GITHUB_TOKEN : undefined);
|
|
28
|
+
const model = process.env.REPOLENS_AI_MODEL || aiConfig.model || getDefaultModel(provider);
|
|
29
|
+
const timeoutMs = parseInt(process.env.REPOLENS_AI_TIMEOUT_MS || aiConfig.timeout_ms || DEFAULT_TIMEOUT_MS);
|
|
28
30
|
|
|
29
31
|
// Use config values as fallback for maxTokens; temperature only when explicitly set
|
|
30
32
|
const resolvedTemp = temperature ?? aiConfig.temperature ?? undefined;
|
|
@@ -140,6 +142,18 @@ function validateSchema(obj, schema) {
|
|
|
140
142
|
return null;
|
|
141
143
|
}
|
|
142
144
|
|
|
145
|
+
/**
|
|
146
|
+
* Get default model for a provider.
|
|
147
|
+
*/
|
|
148
|
+
function getDefaultModel(provider) {
|
|
149
|
+
switch (provider) {
|
|
150
|
+
case "anthropic": return "claude-sonnet-4-20250514";
|
|
151
|
+
case "google": return "gemini-pro";
|
|
152
|
+
case "github": return "gpt-4o-mini";
|
|
153
|
+
default: return "gpt-5-mini";
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
143
157
|
/**
|
|
144
158
|
* Get default base URL for a provider.
|
|
145
159
|
*/
|
|
@@ -148,6 +162,7 @@ function getDefaultBaseUrl(provider) {
|
|
|
148
162
|
case "anthropic": return "https://api.anthropic.com";
|
|
149
163
|
case "azure": return process.env.REPOLENS_AI_BASE_URL || "https://api.openai.com/v1";
|
|
150
164
|
case "google": return "https://generativelanguage.googleapis.com";
|
|
165
|
+
case "github": return "https://models.inference.ai.github.com/v1";
|
|
151
166
|
default: return "https://api.openai.com/v1";
|
|
152
167
|
}
|
|
153
168
|
}
|
|
@@ -159,7 +174,7 @@ function getProviderAdapter(provider) {
|
|
|
159
174
|
switch (provider) {
|
|
160
175
|
case "anthropic": return callAnthropicAPI;
|
|
161
176
|
case "google": return callGoogleAPI;
|
|
162
|
-
// "openai_compatible" and "
|
|
177
|
+
// "openai_compatible", "azure", and "github" all use the OpenAI chat/completions format
|
|
163
178
|
default: return callOpenAICompatibleAPI;
|
|
164
179
|
}
|
|
165
180
|
}
|
|
@@ -334,21 +349,21 @@ async function callGoogleAPI({ baseUrl, apiKey, model, system, user, temperature
|
|
|
334
349
|
});
|
|
335
350
|
}
|
|
336
351
|
|
|
337
|
-
export function isAIEnabled() {
|
|
338
|
-
return process.env.REPOLENS_AI_ENABLED === "true";
|
|
352
|
+
export function isAIEnabled(config) {
|
|
353
|
+
return process.env.REPOLENS_AI_ENABLED === "true" || config?.ai?.enabled === true;
|
|
339
354
|
}
|
|
340
355
|
|
|
341
|
-
export function getAIConfig() {
|
|
342
|
-
const
|
|
343
|
-
const
|
|
344
|
-
|
|
345
|
-
|
|
356
|
+
export function getAIConfig(config) {
|
|
357
|
+
const aiConfig = config?.ai || {};
|
|
358
|
+
const provider = process.env.REPOLENS_AI_PROVIDER || aiConfig.provider || "openai_compatible";
|
|
359
|
+
const hasApiKey = !!(process.env.REPOLENS_AI_API_KEY
|
|
360
|
+
|| (provider === "github" ? process.env.GITHUB_TOKEN : undefined));
|
|
346
361
|
return {
|
|
347
|
-
enabled: isAIEnabled(),
|
|
362
|
+
enabled: isAIEnabled(config),
|
|
348
363
|
provider,
|
|
349
|
-
model: process.env.REPOLENS_AI_MODEL ||
|
|
350
|
-
hasApiKey
|
|
351
|
-
temperature: process.env.REPOLENS_AI_TEMPERATURE ? parseFloat(process.env.REPOLENS_AI_TEMPERATURE) : undefined,
|
|
352
|
-
maxTokens: parseInt(process.env.REPOLENS_AI_MAX_TOKENS || DEFAULT_MAX_TOKENS)
|
|
364
|
+
model: process.env.REPOLENS_AI_MODEL || aiConfig.model || getDefaultModel(provider),
|
|
365
|
+
hasApiKey,
|
|
366
|
+
temperature: process.env.REPOLENS_AI_TEMPERATURE ? parseFloat(process.env.REPOLENS_AI_TEMPERATURE) : (aiConfig.temperature != null ? aiConfig.temperature : undefined),
|
|
367
|
+
maxTokens: parseInt(process.env.REPOLENS_AI_MAX_TOKENS || aiConfig.max_tokens || DEFAULT_MAX_TOKENS)
|
|
353
368
|
};
|
|
354
369
|
}
|
package/src/cli.js
CHANGED
|
@@ -492,6 +492,12 @@ async function main() {
|
|
|
492
492
|
info("Browse your docs: open the .repolens/ directory");
|
|
493
493
|
info("\nTo publish to Notion, Confluence, or GitHub Wiki, run: repolens publish");
|
|
494
494
|
|
|
495
|
+
// Upsell AI enhancement when not already enabled
|
|
496
|
+
if (!cfg.ai?.enabled && process.env.REPOLENS_AI_ENABLED !== "true") {
|
|
497
|
+
info("\n💡 Want richer, AI-enhanced docs? Run: repolens init --interactive");
|
|
498
|
+
info(" Select GitHub Models (free) — uses your existing GITHUB_TOKEN, no extra keys needed.");
|
|
499
|
+
}
|
|
500
|
+
|
|
495
501
|
printPerformanceSummary();
|
|
496
502
|
|
|
497
503
|
trackUsage("demo", "success", {
|
|
@@ -189,16 +189,16 @@ async function generateDocument(docPlan, context) {
|
|
|
189
189
|
|
|
190
190
|
switch (key) {
|
|
191
191
|
case "executive_summary":
|
|
192
|
-
return await generateExecutiveSummary(aiContext, { depGraph, flows });
|
|
192
|
+
return await generateExecutiveSummary(aiContext, { depGraph, flows }, config);
|
|
193
193
|
|
|
194
194
|
case "system_overview":
|
|
195
|
-
return await generateSystemOverview(aiContext, { depGraph });
|
|
195
|
+
return await generateSystemOverview(aiContext, { depGraph }, config);
|
|
196
196
|
|
|
197
197
|
case "business_domains":
|
|
198
|
-
return await generateBusinessDomains(aiContext, { depGraph });
|
|
198
|
+
return await generateBusinessDomains(aiContext, { depGraph }, config);
|
|
199
199
|
|
|
200
200
|
case "architecture_overview":
|
|
201
|
-
return await generateArchitectureOverview(aiContext, { depGraph, driftResult });
|
|
201
|
+
return await generateArchitectureOverview(aiContext, { depGraph, driftResult }, config);
|
|
202
202
|
|
|
203
203
|
case "module_catalog":
|
|
204
204
|
// Hybrid: deterministic skeleton + ownership info + dep-graph roles
|
|
@@ -213,7 +213,7 @@ async function generateDocument(docPlan, context) {
|
|
|
213
213
|
return renderApiSurfaceOriginal(config, scanResult);
|
|
214
214
|
|
|
215
215
|
case "data_flows":
|
|
216
|
-
return await generateDataFlows(flows, aiContext, { depGraph, scanResult, moduleContext });
|
|
216
|
+
return await generateDataFlows(flows, aiContext, { depGraph, scanResult, moduleContext }, config);
|
|
217
217
|
|
|
218
218
|
case "arch_diff":
|
|
219
219
|
if (!diffData) {
|
|
@@ -226,7 +226,7 @@ async function generateDocument(docPlan, context) {
|
|
|
226
226
|
return renderSystemMap(scanResult, config, depGraph);
|
|
227
227
|
|
|
228
228
|
case "developer_onboarding":
|
|
229
|
-
return await generateDeveloperOnboarding(aiContext, { flows, depGraph });
|
|
229
|
+
return await generateDeveloperOnboarding(aiContext, { flows, depGraph }, config);
|
|
230
230
|
|
|
231
231
|
case "graphql_schema":
|
|
232
232
|
return renderGraphQLSchema(graphqlResult);
|
package/src/doctor.js
CHANGED
|
@@ -182,9 +182,16 @@ export async function runDoctor(targetDir = process.cwd()) {
|
|
|
182
182
|
}
|
|
183
183
|
|
|
184
184
|
if (cfg.ai?.enabled || process.env.REPOLENS_AI_ENABLED === "true") {
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
185
|
+
const aiProvider = process.env.REPOLENS_AI_PROVIDER || "openai_compatible";
|
|
186
|
+
if (aiProvider === "github") {
|
|
187
|
+
envChecks.push(
|
|
188
|
+
{ key: "GITHUB_TOKEN", required: true, publisher: "AI (GitHub Models)" },
|
|
189
|
+
);
|
|
190
|
+
} else {
|
|
191
|
+
envChecks.push(
|
|
192
|
+
{ key: "REPOLENS_AI_API_KEY", required: true, publisher: "AI" },
|
|
193
|
+
);
|
|
194
|
+
}
|
|
188
195
|
}
|
|
189
196
|
|
|
190
197
|
if (envChecks.length === 0) {
|
package/src/init.js
CHANGED
|
@@ -3,8 +3,13 @@ import path from "node:path";
|
|
|
3
3
|
import { createInterface } from "node:readline/promises";
|
|
4
4
|
import { info, warn } from "./utils/logger.js";
|
|
5
5
|
|
|
6
|
-
const PUBLISHER_CHOICES = ["markdown", "notion", "confluence"];
|
|
7
|
-
const AI_PROVIDERS = [
|
|
6
|
+
const PUBLISHER_CHOICES = ["markdown", "notion", "confluence", "github_wiki"];
|
|
7
|
+
const AI_PROVIDERS = [
|
|
8
|
+
{ value: "github", label: "GitHub Models (free in GitHub Actions)" },
|
|
9
|
+
{ value: "openai_compatible", label: "OpenAI / Compatible (GPT-5, GPT-4o, etc.)" },
|
|
10
|
+
{ value: "anthropic", label: "Anthropic (Claude)" },
|
|
11
|
+
{ value: "google", label: "Google (Gemini)" },
|
|
12
|
+
];
|
|
8
13
|
const SCAN_PRESETS = {
|
|
9
14
|
nextjs: {
|
|
10
15
|
include: [
|
|
@@ -84,6 +89,10 @@ jobs:
|
|
|
84
89
|
CONFLUENCE_API_TOKEN: \${{ secrets.CONFLUENCE_API_TOKEN }}
|
|
85
90
|
CONFLUENCE_SPACE_KEY: \${{ secrets.CONFLUENCE_SPACE_KEY }}
|
|
86
91
|
CONFLUENCE_PARENT_PAGE_ID: \${{ secrets.CONFLUENCE_PARENT_PAGE_ID }}
|
|
92
|
+
# Uncomment to enable free AI-enhanced docs via GitHub Models:
|
|
93
|
+
# REPOLENS_AI_ENABLED: true
|
|
94
|
+
# REPOLENS_AI_PROVIDER: github
|
|
95
|
+
# GITHUB_TOKEN: \${{ secrets.GITHUB_TOKEN }}
|
|
87
96
|
run: npx @chappibunny/repolens@latest publish
|
|
88
97
|
`;
|
|
89
98
|
|
|
@@ -106,6 +115,11 @@ CONFLUENCE_PARENT_PAGE_ID=
|
|
|
106
115
|
# REPOLENS_AI_BASE_URL=https://api.openai.com/v1
|
|
107
116
|
# REPOLENS_AI_MODEL=gpt-5-mini
|
|
108
117
|
# REPOLENS_AI_MAX_TOKENS=2000
|
|
118
|
+
|
|
119
|
+
# GitHub Models (free tier — zero-config in GitHub Actions)
|
|
120
|
+
# REPOLENS_AI_PROVIDER=github
|
|
121
|
+
# Uses GITHUB_TOKEN automatically — no separate API key needed
|
|
122
|
+
# REPOLENS_AI_MODEL=gpt-4o-mini
|
|
109
123
|
`;
|
|
110
124
|
|
|
111
125
|
const DEFAULT_REPOLENS_README = `# RepoLens Documentation
|
|
@@ -194,7 +208,20 @@ Adds 5 natural language documents readable by non-technical audiences:
|
|
|
194
208
|
|
|
195
209
|
AI features add natural language explanations for non-technical stakeholders.
|
|
196
210
|
|
|
197
|
-
|
|
211
|
+
### Option A: GitHub Models (Free — Recommended for GitHub Actions)
|
|
212
|
+
|
|
213
|
+
Every GitHub repo gets free access to AI models. In your workflow:
|
|
214
|
+
\`\`\`yaml
|
|
215
|
+
env:
|
|
216
|
+
REPOLENS_AI_ENABLED: true
|
|
217
|
+
REPOLENS_AI_PROVIDER: github
|
|
218
|
+
GITHUB_TOKEN: \${{ secrets.GITHUB_TOKEN }}
|
|
219
|
+
\`\`\`
|
|
220
|
+
No API key signup needed. Uses \`gpt-4o-mini\` by default.
|
|
221
|
+
|
|
222
|
+
### Option B: OpenAI / Other Providers
|
|
223
|
+
|
|
224
|
+
1. Get an API key from your chosen provider
|
|
198
225
|
2. Add to your \`.env\` file:
|
|
199
226
|
\`\`\`bash
|
|
200
227
|
REPOLENS_AI_ENABLED=true
|
|
@@ -214,7 +241,7 @@ AI features add natural language explanations for non-technical stakeholders.
|
|
|
214
241
|
developer_onboarding: true
|
|
215
242
|
\`\`\`
|
|
216
243
|
|
|
217
|
-
**Cost estimate**: $0.10-$0.40 per run for typical projects
|
|
244
|
+
**Cost estimate**: $0.10-$0.40 per run for typical projects (or free with GitHub Models)
|
|
218
245
|
|
|
219
246
|
See [AI.md](https://github.com/CHAPIBUNNY/repolens/blob/main/AI.md) for full documentation
|
|
220
247
|
- **Module Catalog** — Detected code modules
|
|
@@ -532,10 +559,15 @@ async function runInteractiveWizard(repoRoot) {
|
|
|
532
559
|
let aiProvider = null;
|
|
533
560
|
if (enableAi) {
|
|
534
561
|
info("Select AI provider:");
|
|
535
|
-
AI_PROVIDERS.forEach((p, i) => info(` ${i + 1}. ${p}`));
|
|
536
|
-
const aiInput = (await ask(`Provider [1] (default: 1
|
|
562
|
+
AI_PROVIDERS.forEach((p, i) => info(` ${i + 1}. ${p.label}`));
|
|
563
|
+
const aiInput = (await ask(`Provider [1] (default: 1 GitHub Models — free): `)).trim() || "1";
|
|
537
564
|
const idx = parseInt(aiInput, 10);
|
|
538
|
-
|
|
565
|
+
const chosen = AI_PROVIDERS[(idx >= 1 && idx <= AI_PROVIDERS.length) ? idx - 1 : 0];
|
|
566
|
+
aiProvider = chosen.value;
|
|
567
|
+
if (aiProvider === "github") {
|
|
568
|
+
info("\n ✨ Great choice! GitHub Models uses your existing GITHUB_TOKEN — no extra API key needed.");
|
|
569
|
+
info(" Works automatically in GitHub Actions with the free tier.");
|
|
570
|
+
}
|
|
539
571
|
}
|
|
540
572
|
|
|
541
573
|
// 4. Scan preset
|
|
@@ -605,6 +637,9 @@ function buildWizardConfig(answers) {
|
|
|
605
637
|
lines.push(`ai:`);
|
|
606
638
|
lines.push(` enabled: true`);
|
|
607
639
|
lines.push(` mode: hybrid`);
|
|
640
|
+
if (answers.aiProvider) {
|
|
641
|
+
lines.push(` provider: ${answers.aiProvider}`);
|
|
642
|
+
}
|
|
608
643
|
lines.push(``);
|
|
609
644
|
lines.push(`features:`);
|
|
610
645
|
lines.push(` executive_summary: true`);
|
|
@@ -758,14 +793,18 @@ NOTION_VERSION=2022-06-28
|
|
|
758
793
|
info("Next steps:");
|
|
759
794
|
info(" 1. Review .repolens.yml to customize your documentation");
|
|
760
795
|
info(" 2. Run 'npx @chappibunny/repolens publish' to generate your first docs (deterministic mode)");
|
|
761
|
-
info(" 3. (Optional) Enable AI features
|
|
796
|
+
info(" 3. (Optional) Enable AI features:");
|
|
797
|
+
info(" ── FREE: GitHub Models (recommended for GitHub Actions) ──");
|
|
798
|
+
info(" REPOLENS_AI_ENABLED=true");
|
|
799
|
+
info(" REPOLENS_AI_PROVIDER=github");
|
|
800
|
+
info(" (Uses your GITHUB_TOKEN automatically — no API key signup needed)");
|
|
801
|
+
info(" ── Or: OpenAI / Anthropic / Google ──");
|
|
762
802
|
info(" REPOLENS_AI_ENABLED=true");
|
|
763
803
|
info(" REPOLENS_AI_API_KEY=sk-...");
|
|
764
804
|
info(" See AI.md for full guide: https://github.com/CHAPIBUNNY/repolens/blob/main/AI.md");
|
|
765
805
|
info(" 4. For GitHub Actions, add these repository secrets:");
|
|
766
806
|
info(" - NOTION_TOKEN");
|
|
767
807
|
info(" - NOTION_PARENT_PAGE_ID");
|
|
768
|
-
info(" - REPOLENS_AI_API_KEY (if using AI features)");
|
|
769
808
|
info(" 5. Commit the generated files (workflow will run automatically)");
|
|
770
809
|
} else {
|
|
771
810
|
info("Next steps:");
|
|
@@ -773,7 +812,12 @@ NOTION_VERSION=2022-06-28
|
|
|
773
812
|
info(" 2. To enable Notion publishing:");
|
|
774
813
|
info(" - Copy .env.example to .env and add your credentials, OR");
|
|
775
814
|
info(" - Add GitHub secrets: NOTION_TOKEN, NOTION_PARENT_PAGE_ID");
|
|
776
|
-
info(" 3. (Optional) Enable AI features
|
|
815
|
+
info(" 3. (Optional) Enable AI features:");
|
|
816
|
+
info(" ── FREE: GitHub Models (recommended for GitHub Actions) ──");
|
|
817
|
+
info(" REPOLENS_AI_ENABLED=true");
|
|
818
|
+
info(" REPOLENS_AI_PROVIDER=github");
|
|
819
|
+
info(" (Uses your GITHUB_TOKEN automatically — no API key signup needed)");
|
|
820
|
+
info(" ── Or: OpenAI / Anthropic / Google ──");
|
|
777
821
|
info(" REPOLENS_AI_ENABLED=true");
|
|
778
822
|
info(" REPOLENS_AI_API_KEY=sk-...");
|
|
779
823
|
info(" See: https://github.com/CHAPIBUNNY/repolens/blob/main/AI.md");
|