wiggum-cli 0.2.6 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +73 -60
- package/dist/ai/agents/codebase-analyst.d.ts +11 -0
- package/dist/ai/agents/codebase-analyst.d.ts.map +1 -0
- package/dist/ai/agents/codebase-analyst.js +146 -0
- package/dist/ai/agents/codebase-analyst.js.map +1 -0
- package/dist/ai/agents/index.d.ts +16 -0
- package/dist/ai/agents/index.d.ts.map +1 -0
- package/dist/ai/agents/index.js +85 -0
- package/dist/ai/agents/index.js.map +1 -0
- package/dist/ai/agents/orchestrator.d.ts +15 -0
- package/dist/ai/agents/orchestrator.d.ts.map +1 -0
- package/dist/ai/agents/orchestrator.js +181 -0
- package/dist/ai/agents/orchestrator.js.map +1 -0
- package/dist/ai/agents/stack-researcher.d.ts +15 -0
- package/dist/ai/agents/stack-researcher.d.ts.map +1 -0
- package/dist/ai/agents/stack-researcher.js +269 -0
- package/dist/ai/agents/stack-researcher.js.map +1 -0
- package/dist/ai/agents/types.d.ts +123 -0
- package/dist/ai/agents/types.d.ts.map +1 -0
- package/dist/ai/agents/types.js +6 -0
- package/dist/ai/agents/types.js.map +1 -0
- package/dist/ai/enhancer.d.ts +39 -1
- package/dist/ai/enhancer.d.ts.map +1 -1
- package/dist/ai/enhancer.js +78 -36
- package/dist/ai/enhancer.js.map +1 -1
- package/dist/ai/index.d.ts +4 -2
- package/dist/ai/index.d.ts.map +1 -1
- package/dist/ai/index.js +5 -1
- package/dist/ai/index.js.map +1 -1
- package/dist/ai/prompts.d.ts +2 -2
- package/dist/ai/prompts.d.ts.map +1 -1
- package/dist/ai/prompts.js +66 -4
- package/dist/ai/prompts.js.map +1 -1
- package/dist/ai/providers.d.ts +28 -0
- package/dist/ai/providers.d.ts.map +1 -1
- package/dist/ai/providers.js +40 -0
- package/dist/ai/providers.js.map +1 -1
- package/dist/ai/tools/context7.d.ts +34 -0
- package/dist/ai/tools/context7.d.ts.map +1 -0
- package/dist/ai/tools/context7.js +135 -0
- package/dist/ai/tools/context7.js.map +1 -0
- package/dist/ai/tools/index.d.ts +7 -0
- package/dist/ai/tools/index.d.ts.map +1 -0
- package/dist/ai/tools/index.js +7 -0
- package/dist/ai/tools/index.js.map +1 -0
- package/dist/ai/tools/tavily.d.ts +27 -0
- package/dist/ai/tools/tavily.d.ts.map +1 -0
- package/dist/ai/tools/tavily.js +75 -0
- package/dist/ai/tools/tavily.js.map +1 -0
- package/dist/cli.d.ts.map +1 -1
- package/dist/cli.js +14 -12
- package/dist/cli.js.map +1 -1
- package/dist/commands/init.d.ts +2 -5
- package/dist/commands/init.d.ts.map +1 -1
- package/dist/commands/init.js +233 -154
- package/dist/commands/init.js.map +1 -1
- package/dist/utils/colors.d.ts.map +1 -1
- package/dist/utils/colors.js +10 -3
- package/dist/utils/colors.js.map +1 -1
- package/dist/utils/header.d.ts +1 -1
- package/dist/utils/header.js +3 -3
- package/dist/utils/header.js.map +1 -1
- package/dist/utils/json-repair.d.ts +14 -0
- package/dist/utils/json-repair.d.ts.map +1 -0
- package/dist/utils/json-repair.js +103 -0
- package/dist/utils/json-repair.js.map +1 -0
- package/dist/utils/tracing.d.ts +25 -0
- package/dist/utils/tracing.d.ts.map +1 -0
- package/dist/utils/tracing.js +64 -0
- package/dist/utils/tracing.js.map +1 -0
- package/package.json +5 -3
- package/src/ai/agents/codebase-analyst.ts +169 -0
- package/src/ai/agents/index.ts +147 -0
- package/src/ai/agents/orchestrator.ts +218 -0
- package/src/ai/agents/stack-researcher.ts +294 -0
- package/src/ai/agents/types.ts +132 -0
- package/src/ai/enhancer.ts +128 -38
- package/src/ai/index.ts +31 -1
- package/src/ai/prompts.ts +67 -4
- package/src/ai/providers.ts +48 -0
- package/src/ai/tools/context7.ts +167 -0
- package/src/ai/tools/index.ts +17 -0
- package/src/ai/tools/tavily.ts +101 -0
- package/src/cli.ts +14 -12
- package/src/commands/init.ts +278 -173
- package/src/utils/colors.ts +11 -3
- package/src/utils/header.ts +3 -3
- package/src/utils/json-repair.ts +113 -0
- package/src/utils/tracing.ts +76 -0
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tavily Web Search Tool
|
|
3
|
+
* Enables web search for current best practices and documentation
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { tool, zodSchema } from 'ai';
|
|
7
|
+
import { z } from 'zod';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Tavily search result
|
|
11
|
+
*/
|
|
12
|
+
export interface TavilySearchResult {
|
|
13
|
+
title: string;
|
|
14
|
+
url: string;
|
|
15
|
+
content: string;
|
|
16
|
+
score: number;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Tavily API response
|
|
21
|
+
*/
|
|
22
|
+
interface TavilyApiResponse {
|
|
23
|
+
results: TavilySearchResult[];
|
|
24
|
+
query: string;
|
|
25
|
+
answer?: string;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Create a Tavily web search tool
|
|
30
|
+
* @param apiKey - Tavily API key
|
|
31
|
+
*/
|
|
32
|
+
export function createTavilySearchTool(apiKey: string) {
|
|
33
|
+
return tool({
|
|
34
|
+
description: `Search the web for current best practices, documentation, and recent information.
|
|
35
|
+
Use this to find:
|
|
36
|
+
- Current best practices for technologies
|
|
37
|
+
- Testing patterns and tools
|
|
38
|
+
- Library documentation and examples
|
|
39
|
+
- Recent updates and changes`,
|
|
40
|
+
inputSchema: zodSchema(z.object({
|
|
41
|
+
query: z.string().describe('Search query - be specific about what you want to find'),
|
|
42
|
+
searchDepth: z.enum(['basic', 'advanced']).optional()
|
|
43
|
+
.describe('Search depth - use "advanced" for comprehensive results'),
|
|
44
|
+
maxResults: z.number().min(1).max(10).optional()
|
|
45
|
+
.describe('Maximum number of results (default 5)'),
|
|
46
|
+
})),
|
|
47
|
+
execute: async ({ query, searchDepth, maxResults }) => {
|
|
48
|
+
try {
|
|
49
|
+
const response = await fetch('https://api.tavily.com/search', {
|
|
50
|
+
method: 'POST',
|
|
51
|
+
headers: {
|
|
52
|
+
'Content-Type': 'application/json',
|
|
53
|
+
},
|
|
54
|
+
body: JSON.stringify({
|
|
55
|
+
api_key: apiKey,
|
|
56
|
+
query,
|
|
57
|
+
search_depth: searchDepth || 'basic',
|
|
58
|
+
max_results: maxResults || 5,
|
|
59
|
+
include_answer: true,
|
|
60
|
+
include_raw_content: false,
|
|
61
|
+
}),
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
if (!response.ok) {
|
|
65
|
+
const errorText = await response.text();
|
|
66
|
+
return `Search failed: ${response.status} - ${errorText}`;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
const data = await response.json() as TavilyApiResponse;
|
|
70
|
+
|
|
71
|
+
// Format results for the AI
|
|
72
|
+
const results: string[] = [];
|
|
73
|
+
|
|
74
|
+
if (data.answer) {
|
|
75
|
+
results.push(`Summary: ${data.answer}`);
|
|
76
|
+
results.push('');
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
results.push('Sources:');
|
|
80
|
+
for (const result of data.results) {
|
|
81
|
+
results.push(`- ${result.title}`);
|
|
82
|
+
results.push(` URL: ${result.url}`);
|
|
83
|
+
results.push(` ${result.content.substring(0, 300)}...`);
|
|
84
|
+
results.push('');
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
return results.join('\n');
|
|
88
|
+
} catch (error) {
|
|
89
|
+
const errMsg = error instanceof Error ? error.message : String(error);
|
|
90
|
+
return `Search error: ${errMsg}`;
|
|
91
|
+
}
|
|
92
|
+
},
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Create a function that checks if Tavily search can be performed
|
|
98
|
+
*/
|
|
99
|
+
export function canUseTavily(apiKey?: string): boolean {
|
|
100
|
+
return !!apiKey && apiKey.length > 0;
|
|
101
|
+
}
|
package/src/cli.ts
CHANGED
|
@@ -27,8 +27,7 @@ export function createCli(): Command {
|
|
|
27
27
|
'after',
|
|
28
28
|
`
|
|
29
29
|
Examples:
|
|
30
|
-
$ ralph init Initialize Ralph
|
|
31
|
-
$ ralph init --ai Initialize with AI-enhanced analysis
|
|
30
|
+
$ ralph init Initialize Ralph with AI analysis
|
|
32
31
|
$ ralph new my-feature Create a new feature specification
|
|
33
32
|
$ ralph run my-feature Run the feature development loop
|
|
34
33
|
$ ralph monitor my-feature Monitor progress in real-time
|
|
@@ -43,10 +42,9 @@ Documentation:
|
|
|
43
42
|
.command('init')
|
|
44
43
|
.description(
|
|
45
44
|
'Initialize Ralph in the current project.\n\n' +
|
|
46
|
-
'
|
|
47
|
-
'
|
|
45
|
+
'Uses AI to analyze your codebase, detect the tech stack, and generate\n' +
|
|
46
|
+
'intelligent configuration files in .ralph/'
|
|
48
47
|
)
|
|
49
|
-
.option('--ai', 'Enable AI-enhanced analysis for deeper project insights')
|
|
50
48
|
.option(
|
|
51
49
|
'--provider <name>',
|
|
52
50
|
'AI provider to use (anthropic, openai, openrouter)',
|
|
@@ -57,15 +55,19 @@ Documentation:
|
|
|
57
55
|
'after',
|
|
58
56
|
`
|
|
59
57
|
Examples:
|
|
60
|
-
$ ralph init
|
|
61
|
-
$ ralph init --
|
|
62
|
-
$ ralph init --ai --provider openai With OpenAI provider
|
|
58
|
+
$ ralph init Initialize with AI analysis
|
|
59
|
+
$ ralph init --provider openai Use OpenAI provider
|
|
63
60
|
$ ralph init --yes Non-interactive mode
|
|
64
61
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
62
|
+
API Keys (BYOK - Bring Your Own Keys):
|
|
63
|
+
Required (one of):
|
|
64
|
+
ANTHROPIC_API_KEY For Anthropic (Claude) provider
|
|
65
|
+
OPENAI_API_KEY For OpenAI provider
|
|
66
|
+
OPENROUTER_API_KEY For OpenRouter provider
|
|
67
|
+
|
|
68
|
+
Optional (for enhanced research):
|
|
69
|
+
TAVILY_API_KEY Enable web search for best practices
|
|
70
|
+
CONTEXT7_API_KEY Enable documentation lookup
|
|
69
71
|
`
|
|
70
72
|
)
|
|
71
73
|
.action(async (options) => {
|
package/src/commands/init.ts
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Init Command
|
|
3
|
-
* Initialize Ralph in the current project -
|
|
3
|
+
* Initialize Ralph in the current project - BYOK multi-agent AI analysis
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
6
|
import { logger } from '../utils/logger.js';
|
|
7
|
-
import { Scanner,
|
|
7
|
+
import { Scanner, type ScanResult } from '../scanner/index.js';
|
|
8
8
|
import { Generator, formatGenerationResult } from '../generator/index.js';
|
|
9
9
|
import {
|
|
10
10
|
AIEnhancer,
|
|
@@ -12,7 +12,15 @@ import {
|
|
|
12
12
|
type AIProvider,
|
|
13
13
|
type EnhancedScanResult,
|
|
14
14
|
} from '../ai/index.js';
|
|
15
|
-
import {
|
|
15
|
+
import {
|
|
16
|
+
hasApiKey,
|
|
17
|
+
getApiKeyEnvVar,
|
|
18
|
+
getAvailableProvider,
|
|
19
|
+
AVAILABLE_MODELS,
|
|
20
|
+
OPTIONAL_SERVICE_ENV_VARS,
|
|
21
|
+
hasTavilyKey,
|
|
22
|
+
hasContext7Key,
|
|
23
|
+
} from '../ai/providers.js';
|
|
16
24
|
import * as prompts from '@clack/prompts';
|
|
17
25
|
import pc from 'picocolors';
|
|
18
26
|
import fs from 'fs';
|
|
@@ -20,16 +28,225 @@ import path from 'path';
|
|
|
20
28
|
import { simpson, sectionHeader, drawLine } from '../utils/colors.js';
|
|
21
29
|
|
|
22
30
|
export interface InitOptions {
|
|
23
|
-
ai?: boolean;
|
|
24
31
|
provider?: AIProvider;
|
|
25
32
|
yes?: boolean;
|
|
26
|
-
|
|
27
|
-
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Save API keys to .env.local file
|
|
37
|
+
*/
|
|
38
|
+
function saveKeysToEnvLocal(
|
|
39
|
+
projectRoot: string,
|
|
40
|
+
keys: Record<string, string>
|
|
41
|
+
): void {
|
|
42
|
+
const envLocalPath = path.join(projectRoot, '.env.local');
|
|
43
|
+
let envContent = '';
|
|
44
|
+
|
|
45
|
+
// Read existing content if file exists
|
|
46
|
+
if (fs.existsSync(envLocalPath)) {
|
|
47
|
+
envContent = fs.readFileSync(envLocalPath, 'utf-8');
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Update or add each key
|
|
51
|
+
for (const [envVar, value] of Object.entries(keys)) {
|
|
52
|
+
if (!value) continue;
|
|
53
|
+
|
|
54
|
+
const keyRegex = new RegExp(`^${envVar}=.*$`, 'm');
|
|
55
|
+
if (keyRegex.test(envContent)) {
|
|
56
|
+
// Replace existing key
|
|
57
|
+
envContent = envContent.replace(keyRegex, `${envVar}=${value}`);
|
|
58
|
+
} else {
|
|
59
|
+
// Append new key
|
|
60
|
+
envContent = envContent.trimEnd() + (envContent ? '\n' : '') + `${envVar}=${value}\n`;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
fs.writeFileSync(envLocalPath, envContent);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Get the default model for a provider (the one marked as 'recommended' or first)
|
|
69
|
+
*/
|
|
70
|
+
function getDefaultModel(provider: AIProvider): string {
|
|
71
|
+
const models = AVAILABLE_MODELS[provider];
|
|
72
|
+
const recommended = models.find(m => m.hint?.includes('recommended'));
|
|
73
|
+
return recommended?.value || models[0].value;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* BYOK Flow: Collect API keys from user
|
|
78
|
+
*/
|
|
79
|
+
async function collectApiKeys(
|
|
80
|
+
projectRoot: string,
|
|
81
|
+
options: InitOptions
|
|
82
|
+
): Promise<{
|
|
83
|
+
provider: AIProvider;
|
|
84
|
+
model: string;
|
|
85
|
+
tavilyKey?: string;
|
|
86
|
+
context7Key?: string;
|
|
87
|
+
} | null> {
|
|
88
|
+
// Check if we already have an LLM key
|
|
89
|
+
let provider: AIProvider = options.provider || 'anthropic';
|
|
90
|
+
const existingProvider = getAvailableProvider();
|
|
91
|
+
let hadLlmKeyBefore = options.provider ? hasApiKey(options.provider) : !!existingProvider;
|
|
92
|
+
let llmKeyEnteredThisSession = false;
|
|
93
|
+
|
|
94
|
+
if (!hadLlmKeyBefore) {
|
|
95
|
+
// In --yes mode, fail if no API key is available
|
|
96
|
+
if (options.yes) {
|
|
97
|
+
logger.error('No API key found. Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or OPENROUTER_API_KEY.');
|
|
98
|
+
return null;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Need to collect LLM API key interactively
|
|
102
|
+
console.log('');
|
|
103
|
+
console.log(simpson.yellow('─── API Key Setup ───'));
|
|
104
|
+
console.log('');
|
|
105
|
+
console.log('Ralph uses AI to analyze your codebase and generate configuration.');
|
|
106
|
+
console.log('');
|
|
107
|
+
|
|
108
|
+
// Select provider
|
|
109
|
+
const providerChoice = await prompts.select({
|
|
110
|
+
message: 'Select your AI provider:',
|
|
111
|
+
options: [
|
|
112
|
+
{ value: 'anthropic', label: 'Anthropic (Claude)', hint: 'recommended' },
|
|
113
|
+
{ value: 'openai', label: 'OpenAI (GPT-4/5)' },
|
|
114
|
+
{ value: 'openrouter', label: 'OpenRouter', hint: 'multiple providers' },
|
|
115
|
+
],
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
if (prompts.isCancel(providerChoice)) {
|
|
119
|
+
return null;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
provider = providerChoice as AIProvider;
|
|
123
|
+
const envVar = getApiKeyEnvVar(provider);
|
|
124
|
+
|
|
125
|
+
// Get API key
|
|
126
|
+
const apiKeyInput = await prompts.password({
|
|
127
|
+
message: `Enter your ${envVar}:`,
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
if (prompts.isCancel(apiKeyInput) || !apiKeyInput) {
|
|
131
|
+
logger.error('LLM API key is required to use Ralph.');
|
|
132
|
+
return null;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// Set in process.env for this session
|
|
136
|
+
process.env[envVar] = apiKeyInput;
|
|
137
|
+
llmKeyEnteredThisSession = true;
|
|
138
|
+
} else if (!options.provider) {
|
|
139
|
+
// Use the available provider
|
|
140
|
+
provider = existingProvider || 'anthropic';
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// Select model (skip in --yes mode, use default)
|
|
144
|
+
let selectedModel: string;
|
|
145
|
+
|
|
146
|
+
if (options.yes) {
|
|
147
|
+
selectedModel = getDefaultModel(provider);
|
|
148
|
+
} else {
|
|
149
|
+
const modelOptions = AVAILABLE_MODELS[provider];
|
|
150
|
+
const modelChoice = await prompts.select({
|
|
151
|
+
message: 'Select model:',
|
|
152
|
+
options: modelOptions.map(m => ({
|
|
153
|
+
value: m.value,
|
|
154
|
+
label: m.label,
|
|
155
|
+
hint: m.hint,
|
|
156
|
+
})),
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
if (prompts.isCancel(modelChoice)) {
|
|
160
|
+
return null;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
selectedModel = modelChoice as string;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// Collect optional Tavily API key (skip in --yes mode)
|
|
167
|
+
let tavilyKey: string | undefined;
|
|
168
|
+
let tavilyKeyEnteredThisSession = false;
|
|
169
|
+
|
|
170
|
+
if (hasTavilyKey()) {
|
|
171
|
+
tavilyKey = process.env[OPTIONAL_SERVICE_ENV_VARS.tavily];
|
|
172
|
+
} else if (!options.yes) {
|
|
173
|
+
console.log('');
|
|
174
|
+
console.log(pc.dim('Tavily enables web search for current best practices (optional)'));
|
|
175
|
+
|
|
176
|
+
const tavilyInput = await prompts.password({
|
|
177
|
+
message: `Enter ${OPTIONAL_SERVICE_ENV_VARS.tavily} (press Enter to skip):`,
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
if (!prompts.isCancel(tavilyInput) && tavilyInput) {
|
|
181
|
+
tavilyKey = tavilyInput;
|
|
182
|
+
tavilyKeyEnteredThisSession = true;
|
|
183
|
+
process.env[OPTIONAL_SERVICE_ENV_VARS.tavily] = tavilyInput;
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Collect optional Context7 API key (skip in --yes mode)
|
|
188
|
+
let context7Key: string | undefined;
|
|
189
|
+
let context7KeyEnteredThisSession = false;
|
|
190
|
+
|
|
191
|
+
if (hasContext7Key()) {
|
|
192
|
+
context7Key = process.env[OPTIONAL_SERVICE_ENV_VARS.context7];
|
|
193
|
+
} else if (!options.yes) {
|
|
194
|
+
console.log(pc.dim('Context7 enables documentation lookup for your stack (optional)'));
|
|
195
|
+
|
|
196
|
+
const context7Input = await prompts.password({
|
|
197
|
+
message: `Enter ${OPTIONAL_SERVICE_ENV_VARS.context7} (press Enter to skip):`,
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
if (!prompts.isCancel(context7Input) && context7Input) {
|
|
201
|
+
context7Key = context7Input;
|
|
202
|
+
context7KeyEnteredThisSession = true;
|
|
203
|
+
process.env[OPTIONAL_SERVICE_ENV_VARS.context7] = context7Input;
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// Save keys entered this session to .env.local
|
|
208
|
+
const keysToSave: Record<string, string> = {};
|
|
209
|
+
|
|
210
|
+
if (llmKeyEnteredThisSession) {
|
|
211
|
+
const llmEnvVar = getApiKeyEnvVar(provider);
|
|
212
|
+
keysToSave[llmEnvVar] = process.env[llmEnvVar]!;
|
|
213
|
+
}
|
|
214
|
+
if (tavilyKeyEnteredThisSession && tavilyKey) {
|
|
215
|
+
keysToSave[OPTIONAL_SERVICE_ENV_VARS.tavily] = tavilyKey;
|
|
216
|
+
}
|
|
217
|
+
if (context7KeyEnteredThisSession && context7Key) {
|
|
218
|
+
keysToSave[OPTIONAL_SERVICE_ENV_VARS.context7] = context7Key;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
if (Object.keys(keysToSave).length > 0) {
|
|
222
|
+
// In --yes mode, auto-save keys
|
|
223
|
+
if (options.yes) {
|
|
224
|
+
saveKeysToEnvLocal(projectRoot, keysToSave);
|
|
225
|
+
logger.success('API keys saved to .env.local');
|
|
226
|
+
} else {
|
|
227
|
+
const saveKeys = await prompts.confirm({
|
|
228
|
+
message: 'Save API keys to .env.local?',
|
|
229
|
+
initialValue: true,
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
if (!prompts.isCancel(saveKeys) && saveKeys) {
|
|
233
|
+
saveKeysToEnvLocal(projectRoot, keysToSave);
|
|
234
|
+
logger.success('API keys saved to .env.local');
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
return {
|
|
240
|
+
provider,
|
|
241
|
+
model: selectedModel,
|
|
242
|
+
tavilyKey,
|
|
243
|
+
context7Key,
|
|
244
|
+
};
|
|
28
245
|
}
|
|
29
246
|
|
|
30
247
|
/**
|
|
31
248
|
* Initialize Ralph in the current project
|
|
32
|
-
*
|
|
249
|
+
* Uses BYOK (Bring Your Own Keys) model with multi-agent AI analysis
|
|
33
250
|
*/
|
|
34
251
|
export async function initCommand(options: InitOptions): Promise<void> {
|
|
35
252
|
const projectRoot = process.cwd();
|
|
@@ -38,7 +255,20 @@ export async function initCommand(options: InitOptions): Promise<void> {
|
|
|
38
255
|
logger.info(`Project: ${projectRoot}`);
|
|
39
256
|
console.log('');
|
|
40
257
|
|
|
41
|
-
// Step 1:
|
|
258
|
+
// Step 1: Collect API keys (BYOK)
|
|
259
|
+
const apiKeys = await collectApiKeys(projectRoot, options);
|
|
260
|
+
|
|
261
|
+
if (!apiKeys) {
|
|
262
|
+
// In --yes mode, null means missing API key (hard failure)
|
|
263
|
+
// In interactive mode, null means user cancelled
|
|
264
|
+
if (options.yes) {
|
|
265
|
+
process.exit(1);
|
|
266
|
+
}
|
|
267
|
+
logger.info('Initialization cancelled');
|
|
268
|
+
return;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Step 2: Scan the project (background)
|
|
42
272
|
const spinner = prompts.spinner();
|
|
43
273
|
spinner.start('Scanning project...');
|
|
44
274
|
|
|
@@ -47,189 +277,64 @@ export async function initCommand(options: InitOptions): Promise<void> {
|
|
|
47
277
|
|
|
48
278
|
try {
|
|
49
279
|
scanResult = await scanner.scan(projectRoot);
|
|
50
|
-
spinner.stop('Project scanned
|
|
280
|
+
spinner.stop('Project scanned');
|
|
51
281
|
} catch (error) {
|
|
52
282
|
spinner.stop('Scan failed');
|
|
53
283
|
logger.error(`Failed to scan project: ${error instanceof Error ? error.message : String(error)}`);
|
|
54
284
|
process.exit(1);
|
|
55
285
|
}
|
|
56
286
|
|
|
57
|
-
//
|
|
58
|
-
console.log('');
|
|
59
|
-
console.log(simpson.yellow('─── Scan Results ───'));
|
|
60
|
-
console.log(formatScanResult(scanResult));
|
|
287
|
+
// Step 3: Run multi-agent AI analysis
|
|
61
288
|
console.log('');
|
|
289
|
+
const modelLabel = AVAILABLE_MODELS[apiKeys.provider].find(m => m.value === apiKeys.model)?.label || apiKeys.model;
|
|
62
290
|
|
|
63
|
-
//
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
291
|
+
// Show capabilities status
|
|
292
|
+
const capabilities: string[] = ['Codebase Analysis'];
|
|
293
|
+
if (apiKeys.tavilyKey) capabilities.push('Web Research');
|
|
294
|
+
if (apiKeys.context7Key) capabilities.push('Doc Lookup');
|
|
67
295
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
message: 'Enable AI-enhanced analysis?',
|
|
72
|
-
initialValue: true,
|
|
73
|
-
});
|
|
296
|
+
console.log(simpson.yellow(`─── AI Analysis (${apiKeys.provider} / ${modelLabel}) ───`));
|
|
297
|
+
console.log(pc.dim(`Capabilities: ${capabilities.join(' • ')}`));
|
|
298
|
+
console.log('');
|
|
74
299
|
|
|
75
|
-
|
|
76
|
-
logger.info('Initialization cancelled');
|
|
77
|
-
return;
|
|
78
|
-
}
|
|
300
|
+
spinner.start('Running AI analysis...');
|
|
79
301
|
|
|
80
|
-
|
|
81
|
-
|
|
302
|
+
const aiEnhancer = new AIEnhancer({
|
|
303
|
+
provider: apiKeys.provider,
|
|
304
|
+
model: apiKeys.model,
|
|
305
|
+
verbose: true,
|
|
306
|
+
agentic: true, // Always use agentic mode for deeper analysis
|
|
307
|
+
tavilyApiKey: apiKeys.tavilyKey,
|
|
308
|
+
context7ApiKey: apiKeys.context7Key,
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
let enhancedResult: EnhancedScanResult;
|
|
82
312
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
let hasKey = options.provider ? hasApiKey(options.provider) : !!getAvailableProvider();
|
|
313
|
+
try {
|
|
314
|
+
enhancedResult = await aiEnhancer.enhance(scanResult);
|
|
86
315
|
|
|
87
|
-
|
|
88
|
-
|
|
316
|
+
if (enhancedResult.aiEnhanced && enhancedResult.aiAnalysis) {
|
|
317
|
+
spinner.stop('AI analysis complete');
|
|
89
318
|
console.log('');
|
|
90
|
-
console.log(
|
|
319
|
+
console.log(formatAIAnalysis(enhancedResult.aiAnalysis));
|
|
320
|
+
} else if (enhancedResult.aiError) {
|
|
321
|
+
spinner.stop('AI analysis failed');
|
|
322
|
+
logger.warn(`AI error: ${enhancedResult.aiError}`);
|
|
91
323
|
console.log('');
|
|
92
324
|
|
|
93
|
-
//
|
|
94
|
-
|
|
95
|
-
message: 'Which AI provider would you like to use?',
|
|
96
|
-
options: [
|
|
97
|
-
{ value: 'anthropic', label: 'Anthropic (Claude)', hint: 'recommended' },
|
|
98
|
-
{ value: 'openai', label: 'OpenAI (GPT-4)' },
|
|
99
|
-
{ value: 'openrouter', label: 'OpenRouter' },
|
|
100
|
-
],
|
|
101
|
-
});
|
|
102
|
-
|
|
103
|
-
if (prompts.isCancel(providerChoice)) {
|
|
104
|
-
logger.info('Initialization cancelled');
|
|
105
|
-
return;
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
provider = providerChoice as AIProvider;
|
|
109
|
-
const envVar = getApiKeyEnvVar(provider);
|
|
110
|
-
|
|
111
|
-
// Prompt for API key
|
|
112
|
-
const apiKeyInput = await prompts.password({
|
|
113
|
-
message: `Enter your ${envVar}:`,
|
|
114
|
-
});
|
|
115
|
-
|
|
116
|
-
if (prompts.isCancel(apiKeyInput) || !apiKeyInput) {
|
|
117
|
-
logger.warn('No API key provided, skipping AI enhancement');
|
|
118
|
-
useAI = false;
|
|
119
|
-
} else {
|
|
120
|
-
// Set the API key in process.env for current session
|
|
121
|
-
process.env[envVar] = apiKeyInput;
|
|
122
|
-
hasKey = true;
|
|
123
|
-
|
|
124
|
-
// Offer to save to .env.local
|
|
125
|
-
const saveKey = await prompts.confirm({
|
|
126
|
-
message: 'Save API key to .env.local?',
|
|
127
|
-
initialValue: true,
|
|
128
|
-
});
|
|
129
|
-
|
|
130
|
-
if (!prompts.isCancel(saveKey) && saveKey) {
|
|
131
|
-
const envLocalPath = path.join(projectRoot, '.env.local');
|
|
132
|
-
let envContent = '';
|
|
133
|
-
|
|
134
|
-
// Read existing content if file exists
|
|
135
|
-
if (fs.existsSync(envLocalPath)) {
|
|
136
|
-
envContent = fs.readFileSync(envLocalPath, 'utf-8');
|
|
137
|
-
// Check if key already exists
|
|
138
|
-
const keyRegex = new RegExp(`^${envVar}=.*$`, 'm');
|
|
139
|
-
if (keyRegex.test(envContent)) {
|
|
140
|
-
// Replace existing key
|
|
141
|
-
envContent = envContent.replace(keyRegex, `${envVar}=${apiKeyInput}`);
|
|
142
|
-
} else {
|
|
143
|
-
// Append new key
|
|
144
|
-
envContent = envContent.trimEnd() + '\n' + `${envVar}=${apiKeyInput}\n`;
|
|
145
|
-
}
|
|
146
|
-
} else {
|
|
147
|
-
envContent = `${envVar}=${apiKeyInput}\n`;
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
fs.writeFileSync(envLocalPath, envContent);
|
|
151
|
-
logger.success(`API key saved to .env.local`);
|
|
152
|
-
}
|
|
153
|
-
}
|
|
154
|
-
} else if (!options.provider) {
|
|
155
|
-
// If we have a key but no provider specified, use the available one
|
|
156
|
-
provider = getAvailableProvider() || 'anthropic';
|
|
325
|
+
// Fall back to basic scan result
|
|
326
|
+
enhancedResult = { ...scanResult, aiEnhanced: false };
|
|
157
327
|
}
|
|
328
|
+
} catch (error) {
|
|
329
|
+
spinner.stop('AI analysis failed');
|
|
330
|
+
logger.warn(`AI error: ${error instanceof Error ? error.message : String(error)}`);
|
|
331
|
+
console.log('');
|
|
158
332
|
|
|
159
|
-
//
|
|
160
|
-
|
|
161
|
-
// Ask which model to use
|
|
162
|
-
const modelOptions = AVAILABLE_MODELS[provider];
|
|
163
|
-
const modelChoice = await prompts.select({
|
|
164
|
-
message: 'Which model would you like to use?',
|
|
165
|
-
options: modelOptions.map(m => ({
|
|
166
|
-
value: m.value,
|
|
167
|
-
label: m.label,
|
|
168
|
-
hint: m.hint,
|
|
169
|
-
})),
|
|
170
|
-
});
|
|
171
|
-
|
|
172
|
-
if (prompts.isCancel(modelChoice)) {
|
|
173
|
-
logger.info('Initialization cancelled');
|
|
174
|
-
return;
|
|
175
|
-
}
|
|
176
|
-
|
|
177
|
-
const selectedModel = modelChoice as string;
|
|
178
|
-
const modelLabel = modelOptions.find(m => m.value === selectedModel)?.label || selectedModel;
|
|
179
|
-
|
|
180
|
-
// Ask about agentic mode (deep exploration)
|
|
181
|
-
let useAgentic = options.agentic;
|
|
182
|
-
if (useAgentic === undefined && !options.yes) {
|
|
183
|
-
const wantAgentic = await prompts.confirm({
|
|
184
|
-
message: 'Enable deep codebase exploration? (AI will search files and directories)',
|
|
185
|
-
initialValue: true,
|
|
186
|
-
});
|
|
187
|
-
|
|
188
|
-
if (prompts.isCancel(wantAgentic)) {
|
|
189
|
-
logger.info('Initialization cancelled');
|
|
190
|
-
return;
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
useAgentic = wantAgentic;
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
console.log('');
|
|
197
|
-
const modeLabel = useAgentic ? 'agentic' : 'simple';
|
|
198
|
-
console.log(simpson.yellow(`─── AI Enhancement (${provider} / ${modelLabel} / ${modeLabel}) ───`));
|
|
199
|
-
|
|
200
|
-
const aiEnhancer = new AIEnhancer({
|
|
201
|
-
provider,
|
|
202
|
-
model: selectedModel,
|
|
203
|
-
verbose: true,
|
|
204
|
-
agentic: useAgentic,
|
|
205
|
-
});
|
|
206
|
-
|
|
207
|
-
spinner.start('Running AI analysis...');
|
|
208
|
-
|
|
209
|
-
try {
|
|
210
|
-
enhancedResult = await aiEnhancer.enhance(scanResult);
|
|
211
|
-
|
|
212
|
-
if (enhancedResult.aiEnhanced && enhancedResult.aiAnalysis) {
|
|
213
|
-
spinner.stop('AI analysis complete');
|
|
214
|
-
console.log('');
|
|
215
|
-
console.log(formatAIAnalysis(enhancedResult.aiAnalysis));
|
|
216
|
-
|
|
217
|
-
// Use enhanced result for generation
|
|
218
|
-
scanResult = enhancedResult;
|
|
219
|
-
} else if (enhancedResult.aiError) {
|
|
220
|
-
spinner.stop('AI analysis failed');
|
|
221
|
-
logger.warn(`AI enhancement error: ${enhancedResult.aiError}`);
|
|
222
|
-
console.log('');
|
|
223
|
-
}
|
|
224
|
-
} catch (error) {
|
|
225
|
-
spinner.stop('AI analysis failed');
|
|
226
|
-
logger.warn(`AI enhancement error: ${error instanceof Error ? error.message : String(error)}`);
|
|
227
|
-
console.log('');
|
|
228
|
-
}
|
|
229
|
-
}
|
|
333
|
+
// Fall back to basic scan result
|
|
334
|
+
enhancedResult = { ...scanResult, aiEnhanced: false };
|
|
230
335
|
}
|
|
231
336
|
|
|
232
|
-
// Step
|
|
337
|
+
// Step 4: Confirm with user (unless --yes)
|
|
233
338
|
if (!options.yes) {
|
|
234
339
|
const shouldContinue = await prompts.confirm({
|
|
235
340
|
message: 'Generate Ralph configuration files?',
|
|
@@ -242,7 +347,7 @@ export async function initCommand(options: InitOptions): Promise<void> {
|
|
|
242
347
|
}
|
|
243
348
|
}
|
|
244
349
|
|
|
245
|
-
// Step
|
|
350
|
+
// Step 5: Generate configuration files
|
|
246
351
|
console.log('');
|
|
247
352
|
spinner.start('Generating configuration files...');
|
|
248
353
|
|
|
@@ -253,7 +358,7 @@ export async function initCommand(options: InitOptions): Promise<void> {
|
|
|
253
358
|
});
|
|
254
359
|
|
|
255
360
|
try {
|
|
256
|
-
const generationResult = await generator.generate(
|
|
361
|
+
const generationResult = await generator.generate(enhancedResult);
|
|
257
362
|
spinner.stop('Configuration files generated');
|
|
258
363
|
|
|
259
364
|
console.log('');
|