@iservu-inc/adf-cli 0.3.0 → 0.4.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.project/chats/{current → complete}/2025-10-03_AGENTS-MD-AND-TOOL-GENERATORS.md +82 -17
- package/.project/chats/complete/2025-10-03_AI-PROVIDER-INTEGRATION.md +568 -0
- package/.project/chats/complete/2025-10-03_FRAMEWORK-UPDATE-SYSTEM.md +497 -0
- package/.project/chats/complete/2025-10-04_CONFIG-COMMAND.md +503 -0
- package/.project/chats/current/2025-10-04_PHASE-4-1-SMART-FILTERING.md +381 -0
- package/.project/chats/current/SESSION-STATUS.md +168 -0
- package/.project/docs/AI-PROVIDER-INTEGRATION.md +600 -0
- package/.project/docs/FRAMEWORK-UPDATE-INTEGRATION.md +421 -0
- package/.project/docs/FRAMEWORK-UPDATE-SYSTEM.md +832 -0
- package/.project/docs/PHASE-4-2-LEARNING-SYSTEM.md +881 -0
- package/.project/docs/PROJECT-STRUCTURE-EXPLANATION.md +500 -0
- package/.project/docs/SMART-FILTERING-SYSTEM.md +385 -0
- package/.project/docs/architecture/SYSTEM-DESIGN.md +122 -1
- package/.project/docs/goals/PROJECT-VISION.md +61 -34
- package/CHANGELOG.md +257 -1
- package/README.md +476 -292
- package/bin/adf.js +7 -0
- package/lib/ai/ai-client.js +328 -0
- package/lib/ai/ai-config.js +398 -0
- package/lib/analyzers/project-analyzer.js +380 -0
- package/lib/commands/config.js +221 -0
- package/lib/commands/init.js +56 -10
- package/lib/filters/question-filter.js +480 -0
- package/lib/frameworks/interviewer.js +271 -12
- package/lib/frameworks/progress-tracker.js +8 -1
- package/lib/learning/learning-manager.js +447 -0
- package/lib/learning/pattern-detector.js +376 -0
- package/lib/learning/rule-generator.js +304 -0
- package/lib/learning/skip-tracker.js +260 -0
- package/lib/learning/storage.js +296 -0
- package/package.json +70 -57
- package/tests/learning-storage.test.js +184 -0
- package/tests/pattern-detector.test.js +297 -0
- package/tests/project-analyzer.test.js +221 -0
- package/tests/question-filter.test.js +297 -0
- package/tests/skip-tracker.test.js +198 -0
package/bin/adf.js
CHANGED
|
@@ -7,6 +7,7 @@ const packageJson = require('../package.json');
|
|
|
7
7
|
const initCommand = require('../lib/commands/init');
|
|
8
8
|
const deployCommand = require('../lib/commands/deploy');
|
|
9
9
|
const updateCommand = require('../lib/commands/update');
|
|
10
|
+
const configCommand = require('../lib/commands/config');
|
|
10
11
|
|
|
11
12
|
const program = new Command();
|
|
12
13
|
|
|
@@ -39,6 +40,12 @@ program
|
|
|
39
40
|
.option('--check', 'Only check for updates, don\'t install')
|
|
40
41
|
.action(updateCommand);
|
|
41
42
|
|
|
43
|
+
// adf config
|
|
44
|
+
program
|
|
45
|
+
.command('config')
|
|
46
|
+
.description('Configure ADF settings (AI provider, etc.)')
|
|
47
|
+
.action(configCommand);
|
|
48
|
+
|
|
42
49
|
// Handle unknown commands
|
|
43
50
|
program.on('command:*', () => {
|
|
44
51
|
console.error(chalk.red(`\nInvalid command: ${program.args.join(' ')}`));
|
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unified AI Client
|
|
3
|
+
* Provides a consistent interface across multiple AI providers
|
|
4
|
+
* Supports: Anthropic, OpenAI, Google Gemini, OpenRouter
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
class AIClient {
|
|
8
|
+
constructor(config) {
|
|
9
|
+
this.config = config;
|
|
10
|
+
this.provider = config.provider;
|
|
11
|
+
this.model = config.model;
|
|
12
|
+
this.apiKey = config.apiKey;
|
|
13
|
+
this.client = null;
|
|
14
|
+
|
|
15
|
+
this.initializeClient();
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Initialize provider-specific client
|
|
20
|
+
*/
|
|
21
|
+
initializeClient() {
|
|
22
|
+
switch (this.provider) {
|
|
23
|
+
case 'anthropic':
|
|
24
|
+
const Anthropic = require('@anthropic-ai/sdk');
|
|
25
|
+
this.client = new Anthropic({
|
|
26
|
+
apiKey: this.apiKey
|
|
27
|
+
});
|
|
28
|
+
break;
|
|
29
|
+
|
|
30
|
+
case 'openai':
|
|
31
|
+
const OpenAI = require('openai');
|
|
32
|
+
this.client = new OpenAI({
|
|
33
|
+
apiKey: this.apiKey
|
|
34
|
+
});
|
|
35
|
+
break;
|
|
36
|
+
|
|
37
|
+
case 'google':
|
|
38
|
+
const { GoogleGenerativeAI } = require('@google/generative-ai');
|
|
39
|
+
this.client = new GoogleGenerativeAI(this.apiKey);
|
|
40
|
+
break;
|
|
41
|
+
|
|
42
|
+
case 'openrouter':
|
|
43
|
+
const OpenAIForRouter = require('openai');
|
|
44
|
+
this.client = new OpenAIForRouter({
|
|
45
|
+
apiKey: this.apiKey,
|
|
46
|
+
baseURL: 'https://openrouter.ai/api/v1'
|
|
47
|
+
});
|
|
48
|
+
break;
|
|
49
|
+
|
|
50
|
+
default:
|
|
51
|
+
throw new Error(`Unsupported provider: ${this.provider}`);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Send a message and get a response (unified interface)
|
|
57
|
+
*/
|
|
58
|
+
async sendMessage(prompt, options = {}) {
|
|
59
|
+
const maxTokens = options.maxTokens || 2048;
|
|
60
|
+
const temperature = options.temperature || 0.7;
|
|
61
|
+
|
|
62
|
+
try {
|
|
63
|
+
switch (this.provider) {
|
|
64
|
+
case 'anthropic':
|
|
65
|
+
return await this.anthropicRequest(prompt, maxTokens, temperature);
|
|
66
|
+
|
|
67
|
+
case 'openai':
|
|
68
|
+
return await this.openaiRequest(prompt, maxTokens, temperature);
|
|
69
|
+
|
|
70
|
+
case 'google':
|
|
71
|
+
return await this.googleRequest(prompt, maxTokens, temperature);
|
|
72
|
+
|
|
73
|
+
case 'openrouter':
|
|
74
|
+
return await this.openrouterRequest(prompt, maxTokens, temperature);
|
|
75
|
+
|
|
76
|
+
default:
|
|
77
|
+
throw new Error(`Provider ${this.provider} not implemented`);
|
|
78
|
+
}
|
|
79
|
+
} catch (error) {
|
|
80
|
+
throw new Error(`AI request failed: ${error.message}`);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Anthropic Claude request
|
|
86
|
+
*/
|
|
87
|
+
async anthropicRequest(prompt, maxTokens, temperature) {
|
|
88
|
+
const response = await this.client.messages.create({
|
|
89
|
+
model: this.model,
|
|
90
|
+
max_tokens: maxTokens,
|
|
91
|
+
temperature,
|
|
92
|
+
messages: [
|
|
93
|
+
{
|
|
94
|
+
role: 'user',
|
|
95
|
+
content: prompt
|
|
96
|
+
}
|
|
97
|
+
]
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
return {
|
|
101
|
+
content: response.content[0].text,
|
|
102
|
+
model: this.model,
|
|
103
|
+
provider: 'anthropic',
|
|
104
|
+
usage: {
|
|
105
|
+
promptTokens: response.usage.input_tokens,
|
|
106
|
+
completionTokens: response.usage.output_tokens,
|
|
107
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens
|
|
108
|
+
}
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* OpenAI GPT request
|
|
114
|
+
*/
|
|
115
|
+
async openaiRequest(prompt, maxTokens, temperature) {
|
|
116
|
+
const response = await this.client.chat.completions.create({
|
|
117
|
+
model: this.model,
|
|
118
|
+
max_tokens: maxTokens,
|
|
119
|
+
temperature,
|
|
120
|
+
messages: [
|
|
121
|
+
{
|
|
122
|
+
role: 'user',
|
|
123
|
+
content: prompt
|
|
124
|
+
}
|
|
125
|
+
]
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
return {
|
|
129
|
+
content: response.choices[0].message.content,
|
|
130
|
+
model: this.model,
|
|
131
|
+
provider: 'openai',
|
|
132
|
+
usage: {
|
|
133
|
+
promptTokens: response.usage.prompt_tokens,
|
|
134
|
+
completionTokens: response.usage.completion_tokens,
|
|
135
|
+
totalTokens: response.usage.total_tokens
|
|
136
|
+
}
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Google Gemini request
|
|
142
|
+
*/
|
|
143
|
+
async googleRequest(prompt, maxTokens, temperature) {
|
|
144
|
+
const model = this.client.getGenerativeModel({
|
|
145
|
+
model: this.model,
|
|
146
|
+
generationConfig: {
|
|
147
|
+
maxOutputTokens: maxTokens,
|
|
148
|
+
temperature
|
|
149
|
+
}
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
const result = await model.generateContent(prompt);
|
|
153
|
+
const response = result.response;
|
|
154
|
+
|
|
155
|
+
return {
|
|
156
|
+
content: response.text(),
|
|
157
|
+
model: this.model,
|
|
158
|
+
provider: 'google',
|
|
159
|
+
usage: {
|
|
160
|
+
promptTokens: response.usageMetadata?.promptTokenCount || 0,
|
|
161
|
+
completionTokens: response.usageMetadata?.candidatesTokenCount || 0,
|
|
162
|
+
totalTokens: response.usageMetadata?.totalTokenCount || 0
|
|
163
|
+
}
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
/**
|
|
168
|
+
* OpenRouter request (uses OpenAI-compatible API)
|
|
169
|
+
*/
|
|
170
|
+
async openrouterRequest(prompt, maxTokens, temperature) {
|
|
171
|
+
const response = await this.client.chat.completions.create({
|
|
172
|
+
model: this.model,
|
|
173
|
+
max_tokens: maxTokens,
|
|
174
|
+
temperature,
|
|
175
|
+
messages: [
|
|
176
|
+
{
|
|
177
|
+
role: 'user',
|
|
178
|
+
content: prompt
|
|
179
|
+
}
|
|
180
|
+
]
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
return {
|
|
184
|
+
content: response.choices[0].message.content,
|
|
185
|
+
model: this.model,
|
|
186
|
+
provider: 'openrouter',
|
|
187
|
+
usage: {
|
|
188
|
+
promptTokens: response.usage?.prompt_tokens || 0,
|
|
189
|
+
completionTokens: response.usage?.completion_tokens || 0,
|
|
190
|
+
totalTokens: response.usage?.total_tokens || 0
|
|
191
|
+
}
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Test connection with a simple prompt
|
|
197
|
+
*/
|
|
198
|
+
async test() {
|
|
199
|
+
const testPrompt = 'Respond with exactly: "Connection successful"';
|
|
200
|
+
|
|
201
|
+
const response = await this.sendMessage(testPrompt, {
|
|
202
|
+
maxTokens: 50,
|
|
203
|
+
temperature: 0
|
|
204
|
+
});
|
|
205
|
+
|
|
206
|
+
if (!response.content) {
|
|
207
|
+
throw new Error('No response from AI provider');
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
return {
|
|
211
|
+
success: true,
|
|
212
|
+
provider: this.provider,
|
|
213
|
+
model: this.model,
|
|
214
|
+
response: response.content
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
/**
|
|
219
|
+
* Analyze answer quality (used by interviewer)
|
|
220
|
+
*/
|
|
221
|
+
async analyzeAnswerQuality(question, answer) {
|
|
222
|
+
const prompt = `Analyze this answer to a software requirements question.
|
|
223
|
+
|
|
224
|
+
Question: ${question}
|
|
225
|
+
|
|
226
|
+
Answer: ${answer}
|
|
227
|
+
|
|
228
|
+
Evaluate the answer based on:
|
|
229
|
+
1. Specificity (is it concrete and detailed?)
|
|
230
|
+
2. Completeness (does it address all aspects of the question?)
|
|
231
|
+
3. Clarity (is it clear and unambiguous?)
|
|
232
|
+
4. Technical depth (does it include relevant technical details?)
|
|
233
|
+
|
|
234
|
+
Respond with ONLY a JSON object in this exact format:
|
|
235
|
+
{
|
|
236
|
+
"score": <number 0-100>,
|
|
237
|
+
"issues": ["issue 1", "issue 2"],
|
|
238
|
+
"suggestions": ["suggestion 1", "suggestion 2"],
|
|
239
|
+
"missingElements": ["element 1", "element 2"]
|
|
240
|
+
}`;
|
|
241
|
+
|
|
242
|
+
const response = await this.sendMessage(prompt, {
|
|
243
|
+
maxTokens: 500,
|
|
244
|
+
temperature: 0.3
|
|
245
|
+
});
|
|
246
|
+
|
|
247
|
+
try {
|
|
248
|
+
// Extract JSON from response (handle markdown code blocks)
|
|
249
|
+
const jsonMatch = response.content.match(/```json\n([\s\S]*?)\n```/) ||
|
|
250
|
+
response.content.match(/{[\s\S]*}/);
|
|
251
|
+
|
|
252
|
+
if (!jsonMatch) {
|
|
253
|
+
throw new Error('No JSON found in response');
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
const analysis = JSON.parse(jsonMatch[1] || jsonMatch[0]);
|
|
257
|
+
|
|
258
|
+
return {
|
|
259
|
+
score: analysis.score || 0,
|
|
260
|
+
issues: analysis.issues || [],
|
|
261
|
+
suggestions: analysis.suggestions || [],
|
|
262
|
+
missingElements: analysis.missingElements || []
|
|
263
|
+
};
|
|
264
|
+
} catch (error) {
|
|
265
|
+
// Fallback if JSON parsing fails
|
|
266
|
+
return {
|
|
267
|
+
score: 50,
|
|
268
|
+
issues: ['Unable to parse AI analysis'],
|
|
269
|
+
suggestions: ['Please provide more detail'],
|
|
270
|
+
missingElements: []
|
|
271
|
+
};
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Generate follow-up question based on answer quality
|
|
277
|
+
*/
|
|
278
|
+
async generateFollowUp(originalQuestion, answer, issues) {
|
|
279
|
+
const prompt = `You are an expert software requirements analyst. Based on the user's answer, generate a helpful follow-up question.
|
|
280
|
+
|
|
281
|
+
Original Question: ${originalQuestion}
|
|
282
|
+
|
|
283
|
+
User's Answer: ${answer}
|
|
284
|
+
|
|
285
|
+
Issues Identified: ${issues.join(', ')}
|
|
286
|
+
|
|
287
|
+
Generate ONE concise follow-up question (max 20 words) to help the user provide more specific information. Focus on the most critical missing element.
|
|
288
|
+
|
|
289
|
+
Respond with ONLY the follow-up question, no explanation.`;
|
|
290
|
+
|
|
291
|
+
const response = await this.sendMessage(prompt, {
|
|
292
|
+
maxTokens: 100,
|
|
293
|
+
temperature: 0.7
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
return response.content.trim().replace(/^["']|["']$/g, ''); // Remove quotes if present
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
/**
|
|
300
|
+
* Extract insights from all answers (used for final analysis)
|
|
301
|
+
*/
|
|
302
|
+
async extractInsights(framework, answers) {
|
|
303
|
+
const answersText = Object.entries(answers)
|
|
304
|
+
.map(([id, answer]) => `${id}: ${answer}`)
|
|
305
|
+
.join('\n');
|
|
306
|
+
|
|
307
|
+
const prompt = `Analyze these software requirements gathered using the ${framework.toUpperCase()} framework:
|
|
308
|
+
|
|
309
|
+
${answersText}
|
|
310
|
+
|
|
311
|
+
Provide insights on:
|
|
312
|
+
1. Key technical challenges
|
|
313
|
+
2. Critical success factors
|
|
314
|
+
3. Potential risks
|
|
315
|
+
4. Architecture recommendations
|
|
316
|
+
|
|
317
|
+
Respond with a structured analysis in markdown format.`;
|
|
318
|
+
|
|
319
|
+
const response = await this.sendMessage(prompt, {
|
|
320
|
+
maxTokens: 1500,
|
|
321
|
+
temperature: 0.5
|
|
322
|
+
});
|
|
323
|
+
|
|
324
|
+
return response.content;
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
module.exports = AIClient;
|