@iservu-inc/adf-cli 0.12.0 → 0.12.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +12 -6
- package/CHANGELOG.md +109 -0
- package/README.md +33 -14
- package/bin/adf.js +339 -1
- package/lib/ai/ai-client.js +161 -44
- package/lib/ai/ai-config.js +276 -104
- package/lib/commands/deploy.js +36 -8
- package/lib/generators/deepagent-generator.js +144 -0
- package/lib/generators/gemini-cli-generator.js +241 -0
- package/lib/generators/index.js +33 -0
- package/lib/generators/opencode-generator.js +153 -0
- package/package.json +1 -1
package/lib/ai/ai-config.js
CHANGED
|
@@ -43,19 +43,18 @@ const AI_PROVIDERS = {
|
|
|
43
43
|
website: 'https://ai.google.dev/',
|
|
44
44
|
setup: 'Get your API key from https://aistudio.google.com/app/apikey',
|
|
45
45
|
defaultModels: [
|
|
46
|
-
// Gemini 2.
|
|
47
|
-
'gemini-2.
|
|
48
|
-
'gemini-2.
|
|
49
|
-
|
|
50
|
-
'gemini-
|
|
51
|
-
// Gemini 2.0 Series (Stable)
|
|
52
|
-
'gemini-2.0-flash',
|
|
53
|
-
'gemini-2.0-flash-lite',
|
|
54
|
-
// Gemini 1.5 Series (Legacy - still supported)
|
|
46
|
+
// Gemini 2.0 Series (Latest - Experimental)
|
|
47
|
+
'gemini-2.0-flash-exp',
|
|
48
|
+
'gemini-2.0-flash-thinking-exp',
|
|
49
|
+
// Gemini 1.5 Series (Stable - Recommended)
|
|
50
|
+
'gemini-1.5-pro',
|
|
55
51
|
'gemini-1.5-pro-latest',
|
|
52
|
+
'gemini-1.5-flash',
|
|
56
53
|
'gemini-1.5-flash-latest',
|
|
57
|
-
'gemini-1.5-
|
|
58
|
-
|
|
54
|
+
'gemini-1.5-flash-8b',
|
|
55
|
+
// Gemini 1.0 Series (Legacy)
|
|
56
|
+
'gemini-pro',
|
|
57
|
+
'gemini-pro-vision'
|
|
59
58
|
]
|
|
60
59
|
},
|
|
61
60
|
OPENROUTER: {
|
|
@@ -192,6 +191,67 @@ function loadEnvIntoProcess(envPath) {
|
|
|
192
191
|
require('dotenv').config({ path: envPath });
|
|
193
192
|
}
|
|
194
193
|
|
|
194
|
+
/**
|
|
195
|
+
* Validate API key with provider by making a simple API call
|
|
196
|
+
*/
|
|
197
|
+
async function validateAPIKeyWithProvider(provider, apiKey) {
|
|
198
|
+
switch (provider.id) {
|
|
199
|
+
case 'anthropic':
|
|
200
|
+
const Anthropic = require('@anthropic-ai/sdk');
|
|
201
|
+
const anthropicClient = new Anthropic({ apiKey });
|
|
202
|
+
// Make a minimal request to validate the key
|
|
203
|
+
await anthropicClient.messages.create({
|
|
204
|
+
model: 'claude-3-haiku-20240307',
|
|
205
|
+
max_tokens: 10,
|
|
206
|
+
messages: [{ role: 'user', content: 'test' }]
|
|
207
|
+
});
|
|
208
|
+
break;
|
|
209
|
+
|
|
210
|
+
case 'openai':
|
|
211
|
+
const OpenAI = require('openai');
|
|
212
|
+
const openaiClient = new OpenAI({ apiKey });
|
|
213
|
+
// List models is a simple GET request that validates the key
|
|
214
|
+
await openaiClient.models.list();
|
|
215
|
+
break;
|
|
216
|
+
|
|
217
|
+
case 'google':
|
|
218
|
+
const fetchGoogle = require('node-fetch');
|
|
219
|
+
// Validate by fetching models list (validates key and shows available models)
|
|
220
|
+
const googleResponse = await fetchGoogle(
|
|
221
|
+
`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`
|
|
222
|
+
);
|
|
223
|
+
|
|
224
|
+
if (!googleResponse.ok) {
|
|
225
|
+
const errorText = await googleResponse.text();
|
|
226
|
+
throw new Error(`HTTP ${googleResponse.status}: ${errorText || googleResponse.statusText}`);
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
const googleData = await googleResponse.json();
|
|
230
|
+
if (!googleData.models || !Array.isArray(googleData.models) || googleData.models.length === 0) {
|
|
231
|
+
throw new Error('No models available for this API key');
|
|
232
|
+
}
|
|
233
|
+
break;
|
|
234
|
+
|
|
235
|
+
case 'openrouter':
|
|
236
|
+
const fetchOR = require('node-fetch');
|
|
237
|
+
const response = await fetchOR('https://openrouter.ai/api/v1/models', {
|
|
238
|
+
headers: {
|
|
239
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
240
|
+
'Content-Type': 'application/json'
|
|
241
|
+
}
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
if (!response.ok) {
|
|
245
|
+
const errorText = await response.text();
|
|
246
|
+
throw new Error(`HTTP ${response.status}: ${errorText || response.statusText}`);
|
|
247
|
+
}
|
|
248
|
+
break;
|
|
249
|
+
|
|
250
|
+
default:
|
|
251
|
+
throw new Error(`Validation not implemented for provider: ${provider.id}`);
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
195
255
|
/**
|
|
196
256
|
* Fetch available models from provider API
|
|
197
257
|
*/
|
|
@@ -202,9 +262,53 @@ async function fetchAvailableModels(provider, apiKey) {
|
|
|
202
262
|
try {
|
|
203
263
|
switch (provider.id) {
|
|
204
264
|
case 'anthropic':
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
265
|
+
try {
|
|
266
|
+
const Anthropic = require('@anthropic-ai/sdk');
|
|
267
|
+
const anthropic = new Anthropic({ apiKey });
|
|
268
|
+
|
|
269
|
+
// Try to fetch models from Anthropic API
|
|
270
|
+
// Note: As of SDK v0.65.0, models endpoint may not be available
|
|
271
|
+
// If it exists, it would be similar to OpenAI's API
|
|
272
|
+
try {
|
|
273
|
+
const response = await anthropic.models.list();
|
|
274
|
+
const models = response.data
|
|
275
|
+
.map(m => m.id)
|
|
276
|
+
.sort();
|
|
277
|
+
|
|
278
|
+
if (models.length > 0) {
|
|
279
|
+
spinner.succeed(`Found ${models.length} Anthropic models`);
|
|
280
|
+
return models;
|
|
281
|
+
}
|
|
282
|
+
} catch (listError) {
|
|
283
|
+
// Models endpoint not available, use comprehensive default list
|
|
284
|
+
spinner.warn('Model listing not available, using curated list');
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// Comprehensive list of known Anthropic models
|
|
288
|
+
const knownModels = [
|
|
289
|
+
// Claude 4.5 (Latest)
|
|
290
|
+
'claude-sonnet-4-5-20250929',
|
|
291
|
+
'claude-opus-4-5-20251101',
|
|
292
|
+
// Claude 3.5
|
|
293
|
+
'claude-3-5-sonnet-20241022',
|
|
294
|
+
'claude-3-5-sonnet-20240620',
|
|
295
|
+
'claude-3-5-haiku-20241022',
|
|
296
|
+
// Claude 3
|
|
297
|
+
'claude-3-opus-20240229',
|
|
298
|
+
'claude-3-sonnet-20240229',
|
|
299
|
+
'claude-3-haiku-20240307',
|
|
300
|
+
// Claude 2 (Legacy)
|
|
301
|
+
'claude-2.1',
|
|
302
|
+
'claude-2.0',
|
|
303
|
+
'claude-instant-1.2'
|
|
304
|
+
];
|
|
305
|
+
|
|
306
|
+
return knownModels;
|
|
307
|
+
} catch (error) {
|
|
308
|
+
spinner.fail(`Failed to load Anthropic models: ${error.message}`);
|
|
309
|
+
console.log(chalk.yellow(' Using default model list\n'));
|
|
310
|
+
return provider.defaultModels;
|
|
311
|
+
}
|
|
208
312
|
|
|
209
313
|
case 'openai':
|
|
210
314
|
const OpenAI = require('openai');
|
|
@@ -224,33 +328,81 @@ async function fetchAvailableModels(provider, apiKey) {
|
|
|
224
328
|
|
|
225
329
|
case 'google':
|
|
226
330
|
try {
|
|
227
|
-
const
|
|
228
|
-
|
|
331
|
+
const fetch = require('node-fetch');
|
|
332
|
+
|
|
333
|
+
// Use Google's REST API to list available models
|
|
334
|
+
const response = await fetch(
|
|
335
|
+
`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`
|
|
336
|
+
);
|
|
229
337
|
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
338
|
+
if (!response.ok) {
|
|
339
|
+
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
|
340
|
+
}
|
|
233
341
|
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
342
|
+
const data = await response.json();
|
|
343
|
+
|
|
344
|
+
if (data.models && Array.isArray(data.models)) {
|
|
345
|
+
// Filter for models that support generateContent
|
|
346
|
+
const availableModels = data.models
|
|
347
|
+
.filter(m =>
|
|
348
|
+
m.supportedGenerationMethods &&
|
|
349
|
+
m.supportedGenerationMethods.includes('generateContent')
|
|
350
|
+
)
|
|
351
|
+
.map(m => m.name.replace('models/', ''))
|
|
352
|
+
.sort();
|
|
353
|
+
|
|
354
|
+
if (availableModels.length > 0) {
|
|
355
|
+
spinner.succeed(`Found ${availableModels.length} Google Gemini models`);
|
|
356
|
+
return availableModels;
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// Fallback to default list
|
|
361
|
+
spinner.warn('No models found via API, using curated list');
|
|
237
362
|
return provider.defaultModels;
|
|
238
363
|
} catch (error) {
|
|
239
|
-
spinner.
|
|
364
|
+
spinner.fail(`Failed to fetch Google models: ${error.message}`);
|
|
365
|
+
console.log(chalk.yellow(' Using default model list\n'));
|
|
240
366
|
return provider.defaultModels;
|
|
241
367
|
}
|
|
242
368
|
|
|
243
369
|
case 'openrouter':
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
370
|
+
try {
|
|
371
|
+
const fetch = require('node-fetch');
|
|
372
|
+
const orResponse = await fetch('https://openrouter.ai/api/v1/models', {
|
|
373
|
+
headers: {
|
|
374
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
375
|
+
'Content-Type': 'application/json'
|
|
376
|
+
}
|
|
377
|
+
});
|
|
378
|
+
|
|
379
|
+
if (!orResponse.ok) {
|
|
380
|
+
throw new Error(`HTTP ${orResponse.status}: ${orResponse.statusText}`);
|
|
248
381
|
}
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
382
|
+
|
|
383
|
+
const orData = await orResponse.json();
|
|
384
|
+
|
|
385
|
+
if (!orData.data || !Array.isArray(orData.data)) {
|
|
386
|
+
throw new Error('Invalid response format from OpenRouter API');
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
const orModels = orData.data
|
|
390
|
+
.map(m => m.id)
|
|
391
|
+
.filter(id => id) // Remove any null/undefined IDs
|
|
392
|
+
.sort();
|
|
393
|
+
|
|
394
|
+
if (orModels.length > 0) {
|
|
395
|
+
spinner.succeed(`Found ${orModels.length} OpenRouter models`);
|
|
396
|
+
return orModels;
|
|
397
|
+
} else {
|
|
398
|
+
spinner.warn('No models found, using defaults');
|
|
399
|
+
return provider.defaultModels;
|
|
400
|
+
}
|
|
401
|
+
} catch (error) {
|
|
402
|
+
spinner.fail(`Failed to fetch OpenRouter models: ${error.message}`);
|
|
403
|
+
console.log(chalk.yellow(' Using default model list\n'));
|
|
404
|
+
return provider.defaultModels;
|
|
405
|
+
}
|
|
254
406
|
|
|
255
407
|
default:
|
|
256
408
|
spinner.warn('Model fetching not supported, using defaults');
|
|
@@ -416,16 +568,51 @@ async function configureAIProvider(projectPath = process.cwd()) {
|
|
|
416
568
|
console.log(chalk.green(`\n✓ Using API key from ${keySource}`));
|
|
417
569
|
}
|
|
418
570
|
|
|
419
|
-
//
|
|
571
|
+
// Validate API key by testing basic connectivity
|
|
572
|
+
const ora = require('ora');
|
|
573
|
+
const validationSpinner = ora('Validating API key...').start();
|
|
574
|
+
|
|
575
|
+
try {
|
|
576
|
+
await validateAPIKeyWithProvider(selectedProvider, apiKey);
|
|
577
|
+
validationSpinner.succeed(chalk.green('API key validated successfully'));
|
|
578
|
+
} catch (error) {
|
|
579
|
+
validationSpinner.fail(chalk.red('API key validation failed'));
|
|
580
|
+
console.log(chalk.red(`\nError: ${error.message}\n`));
|
|
581
|
+
console.log(chalk.yellow('The API key appears to be invalid or has connectivity issues.'));
|
|
582
|
+
console.log(chalk.gray('Please check your API key and try again.\n'));
|
|
583
|
+
|
|
584
|
+
const { retry } = await inquirer.prompt([
|
|
585
|
+
{
|
|
586
|
+
type: 'confirm',
|
|
587
|
+
name: 'retry',
|
|
588
|
+
message: 'Try again with a different API key?',
|
|
589
|
+
default: true
|
|
590
|
+
}
|
|
591
|
+
]);
|
|
592
|
+
|
|
593
|
+
if (retry) {
|
|
594
|
+
return configureAIProvider(projectPath);
|
|
595
|
+
} else {
|
|
596
|
+
process.exit(1);
|
|
597
|
+
}
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
// Fetch available models (only after successful validation)
|
|
420
601
|
const availableModels = await fetchAvailableModels(selectedProvider, apiKey);
|
|
421
602
|
|
|
422
|
-
// Show helpful
|
|
603
|
+
// Show helpful tips about model selection
|
|
423
604
|
if (selectedProvider.id === 'openrouter' && availableModels.length > 50) {
|
|
424
605
|
console.log(chalk.gray('\n💡 Tip: Recommended models for best compatibility:'));
|
|
425
606
|
console.log(chalk.gray(' • anthropic/claude-sonnet-4-5'));
|
|
426
607
|
console.log(chalk.gray(' • openai/gpt-4-turbo'));
|
|
427
608
|
console.log(chalk.gray(' • google/gemini-pro-1.5'));
|
|
428
609
|
console.log(chalk.yellow(' ⚠️ Free models may require specific privacy settings\n'));
|
|
610
|
+
} else if (selectedProvider.id === 'google') {
|
|
611
|
+
console.log(chalk.gray('\n💡 Tip: Recommended models for free tier:'));
|
|
612
|
+
console.log(chalk.gray(' • gemini-1.5-flash (fastest, lowest quota usage)'));
|
|
613
|
+
console.log(chalk.gray(' • gemini-1.5-flash-8b (ultra-fast, minimal quota)'));
|
|
614
|
+
console.log(chalk.gray(' • gemini-2.0-flash-exp (newer, experimental)'));
|
|
615
|
+
console.log(chalk.yellow(' ⚠️ Pro models (gemini-pro, gemini-2.5-pro) may exceed free tier quota\n'));
|
|
429
616
|
}
|
|
430
617
|
|
|
431
618
|
// Model selection with autocomplete
|
|
@@ -454,81 +641,66 @@ async function configureAIProvider(projectPath = process.cwd()) {
|
|
|
454
641
|
envPath
|
|
455
642
|
};
|
|
456
643
|
|
|
457
|
-
//
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
type: 'confirm',
|
|
461
|
-
name: 'testConnection',
|
|
462
|
-
message: 'Test AI connection before starting?',
|
|
463
|
-
default: true
|
|
464
|
-
}
|
|
465
|
-
]);
|
|
644
|
+
// Verify the selected model is operational
|
|
645
|
+
console.log('');
|
|
646
|
+
const modelTestSpinner = ora(`Testing model ${model}...`).start();
|
|
466
647
|
|
|
467
|
-
|
|
468
|
-
const
|
|
469
|
-
const
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
}
|
|
477
|
-
|
|
648
|
+
try {
|
|
649
|
+
const AIClient = require('./ai-client');
|
|
650
|
+
const testClient = new AIClient({
|
|
651
|
+
provider: selectedProvider.id,
|
|
652
|
+
model,
|
|
653
|
+
apiKey
|
|
654
|
+
});
|
|
655
|
+
|
|
656
|
+
await testClient.test();
|
|
657
|
+
modelTestSpinner.succeed(chalk.green(`Model ${model} verified successfully`));
|
|
658
|
+
} catch (error) {
|
|
659
|
+
modelTestSpinner.fail(chalk.red(`Model ${model} test failed`));
|
|
660
|
+
|
|
661
|
+
// Check if this is a rate limit error
|
|
662
|
+
const isRateLimit = error.message.includes('429') ||
|
|
663
|
+
error.message.includes('Too Many Requests') ||
|
|
664
|
+
error.message.includes('quota') ||
|
|
665
|
+
error.message.includes('rate limit');
|
|
666
|
+
|
|
667
|
+
if (isRateLimit) {
|
|
668
|
+
console.log(chalk.yellow('\n⚠️ Rate Limit / Quota Exceeded'));
|
|
669
|
+
console.log(chalk.gray('\nThis model is operational but you\'ve exceeded your API quota.'));
|
|
670
|
+
console.log(chalk.gray('Common causes:'));
|
|
671
|
+
console.log(chalk.gray(' • Free tier daily/hourly limits reached'));
|
|
672
|
+
console.log(chalk.gray(' • Too many recent requests'));
|
|
673
|
+
console.log(chalk.gray(' • Model requires paid API tier\n'));
|
|
674
|
+
|
|
675
|
+
console.log(chalk.cyan('Solutions:'));
|
|
676
|
+
console.log(chalk.gray(' 1. Select a different model with available quota'));
|
|
677
|
+
console.log(chalk.gray(' 2. Wait for quota reset (usually hourly or daily)'));
|
|
678
|
+
console.log(chalk.gray(' 3. Upgrade to paid tier at: https://ai.google.dev/pricing'));
|
|
679
|
+
console.log(chalk.gray(' 4. Use a different AI provider (OpenAI, Anthropic, etc.)\n'));
|
|
680
|
+
} else {
|
|
478
681
|
console.log(chalk.red(`\nError: ${error.message}\n`));
|
|
682
|
+
console.log(chalk.yellow('⚠️ This model appears to be listed but not operational.'));
|
|
683
|
+
console.log(chalk.gray('Possible reasons:'));
|
|
684
|
+
console.log(chalk.gray(' • Model requires higher API tier or special access'));
|
|
685
|
+
console.log(chalk.gray(' • Model is experimental or in preview'));
|
|
686
|
+
console.log(chalk.gray(' • Model has specific parameter requirements'));
|
|
687
|
+
console.log(chalk.gray(' • Provider-side issue\n'));
|
|
688
|
+
}
|
|
479
689
|
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
console.log(chalk.gray(' OpenRouter: Free models require specific privacy settings.'));
|
|
487
|
-
console.log(chalk.cyan(' Solutions:'));
|
|
488
|
-
console.log(chalk.gray(' 1. Configure privacy: https://openrouter.ai/settings/privacy'));
|
|
489
|
-
console.log(chalk.gray(' 2. Select a paid model instead (recommended)'));
|
|
490
|
-
console.log(chalk.gray(' 3. Enable "Free model publication" in settings\n'));
|
|
491
|
-
} else {
|
|
492
|
-
console.log(chalk.gray(' The selected model may require specific account settings.'));
|
|
493
|
-
console.log(chalk.gray(' Try selecting a different model or check your provider settings.\n'));
|
|
494
|
-
}
|
|
495
|
-
} else if (errorMsg.includes('404') || errorMsg.includes('not found') || errorMsg.includes('no endpoints')) {
|
|
496
|
-
console.log(chalk.yellow('📋 Model Not Available:\n'));
|
|
497
|
-
console.log(chalk.gray(' The selected model may no longer be available or accessible.'));
|
|
498
|
-
console.log(chalk.cyan(' Solutions:'));
|
|
499
|
-
console.log(chalk.gray(' 1. Select a different, more widely supported model'));
|
|
500
|
-
console.log(chalk.gray(' 2. Check the provider\'s model availability'));
|
|
501
|
-
console.log(chalk.gray(' 3. Some models require specific API endpoints or settings\n'));
|
|
502
|
-
} else if (errorMsg.includes('400') || errorMsg.includes('unsupported parameter')) {
|
|
503
|
-
console.log(chalk.yellow('📋 Model Parameter Incompatibility:\n'));
|
|
504
|
-
console.log(chalk.gray(' The selected model may use different API parameters.'));
|
|
505
|
-
console.log(chalk.cyan(' Solutions:'));
|
|
506
|
-
console.log(chalk.gray(' 1. Try a more standard model (e.g., gpt-4-turbo, claude-3-5-sonnet)'));
|
|
507
|
-
console.log(chalk.gray(' 2. Some newer/experimental models may not be fully supported yet\n'));
|
|
508
|
-
} else if (errorMsg.includes('401') || errorMsg.includes('invalid') || errorMsg.includes('unauthorized')) {
|
|
509
|
-
console.log(chalk.yellow('📋 Invalid API Key:\n'));
|
|
510
|
-
console.log(chalk.gray(' Please check that your API key is correct and active.\n'));
|
|
511
|
-
} else if (errorMsg.includes('429') || errorMsg.includes('rate limit')) {
|
|
512
|
-
console.log(chalk.yellow('📋 Rate Limit:\n'));
|
|
513
|
-
console.log(chalk.gray(' You\'ve hit the API rate limit. Please wait a moment and try again.\n'));
|
|
514
|
-
} else {
|
|
515
|
-
console.log(chalk.yellow('💡 Tip: Try selecting a more widely supported model\n'));
|
|
690
|
+
const { retry } = await inquirer.prompt([
|
|
691
|
+
{
|
|
692
|
+
type: 'confirm',
|
|
693
|
+
name: 'retry',
|
|
694
|
+
message: isRateLimit ? 'Try selecting a different model?' : 'Try selecting a different model?',
|
|
695
|
+
default: true
|
|
516
696
|
}
|
|
697
|
+
]);
|
|
517
698
|
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
default: true
|
|
524
|
-
}
|
|
525
|
-
]);
|
|
526
|
-
|
|
527
|
-
if (retry) {
|
|
528
|
-
return configureAIProvider(projectPath); // Recursive retry
|
|
529
|
-
} else {
|
|
530
|
-
process.exit(1);
|
|
531
|
-
}
|
|
699
|
+
if (retry) {
|
|
700
|
+
// Recursively retry model selection (keep same provider)
|
|
701
|
+
return configureAIProvider(projectPath);
|
|
702
|
+
} else {
|
|
703
|
+
process.exit(1);
|
|
532
704
|
}
|
|
533
705
|
}
|
|
534
706
|
|
package/lib/commands/deploy.js
CHANGED
|
@@ -8,22 +8,30 @@ const {
|
|
|
8
8
|
generateCursor,
|
|
9
9
|
generateVSCode,
|
|
10
10
|
generateZed,
|
|
11
|
-
generateAntigravity
|
|
11
|
+
generateAntigravity,
|
|
12
|
+
generateOpenCode,
|
|
13
|
+
generateGeminiCLI,
|
|
14
|
+
generateDeepAgent
|
|
12
15
|
} = require('../generators');
|
|
13
16
|
const ContextManager = require('../utils/context-manager');
|
|
14
17
|
|
|
15
18
|
const TOOLS = {
|
|
19
|
+
// IDEs
|
|
16
20
|
windsurf: { name: 'Windsurf', configFile: '.windsurfrules' },
|
|
17
21
|
cursor: { name: 'Cursor', configFile: '.cursorrules' },
|
|
18
22
|
vscode: { name: 'VS Code', configFile: '.vscode/settings.json' },
|
|
19
23
|
'vscode-insider': { name: 'VS Code Insider', configFile: '.vscode-insider/settings.json' },
|
|
20
24
|
zed: { name: 'Zed Editor', configFile: '.zed/settings.json' },
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
'
|
|
25
|
-
'
|
|
26
|
-
|
|
25
|
+
antigravity: { name: 'Google Antigravity', configFile: '.antigravity/agents.yaml' },
|
|
26
|
+
|
|
27
|
+
// CLI Tools
|
|
28
|
+
'claude-code': { name: 'Claude Code', configFile: '.framework/agents/' },
|
|
29
|
+
'opencode': { name: 'OpenCode CLI', configFile: '.opencode.json' },
|
|
30
|
+
'gemini-cli': { name: 'Gemini CLI', configFile: 'GEMINI.md' },
|
|
31
|
+
'deepagent': { name: 'DeepAgent (Abacus.ai)', configFile: '.deepagent/agents/' },
|
|
32
|
+
|
|
33
|
+
// Generic
|
|
34
|
+
generic: { name: 'Generic AI Tools', configFile: '.framework/agents/' }
|
|
27
35
|
};
|
|
28
36
|
|
|
29
37
|
/**
|
|
@@ -190,6 +198,25 @@ async function deployToTool(tool, options = {}) {
|
|
|
190
198
|
console.log(chalk.green('✓ Generated Antigravity configurations'));
|
|
191
199
|
console.log(chalk.gray(` - .antigravity/agents.yaml (AGENTS.md mount, .context access)`));
|
|
192
200
|
}
|
|
201
|
+
} else if (tool === 'opencode') {
|
|
202
|
+
generatedFiles = await generateOpenCode(sessionPath, cwd, framework);
|
|
203
|
+
if (!options.silent && !spinner) {
|
|
204
|
+
console.log(chalk.green('✓ Generated OpenCode CLI configurations'));
|
|
205
|
+
console.log(chalk.gray(` - .opencode.json (project config, agents, MCP servers)`));
|
|
206
|
+
}
|
|
207
|
+
} else if (tool === 'gemini-cli') {
|
|
208
|
+
generatedFiles = await generateGeminiCLI(sessionPath, cwd, framework);
|
|
209
|
+
if (!options.silent && !spinner) {
|
|
210
|
+
console.log(chalk.green('✓ Generated Gemini CLI configurations'));
|
|
211
|
+
console.log(chalk.gray(` - GEMINI.md (project context and instructions)`));
|
|
212
|
+
}
|
|
213
|
+
} else if (tool === 'deepagent') {
|
|
214
|
+
generatedFiles = await generateDeepAgent(sessionPath, cwd, framework);
|
|
215
|
+
if (!options.silent && !spinner) {
|
|
216
|
+
console.log(chalk.green('✓ Generated DeepAgent configurations'));
|
|
217
|
+
console.log(chalk.gray(` - .deepagent/agents/ (agent markdown files)`));
|
|
218
|
+
console.log(chalk.gray(` - .deepagent/README.md (project overview)`));
|
|
219
|
+
}
|
|
193
220
|
}
|
|
194
221
|
} catch (error) {
|
|
195
222
|
console.warn(chalk.yellow(`\n⚠️ Warning: Could not generate ${TOOLS[tool]?.name || tool} configurations: ${error.message}`));
|
|
@@ -214,8 +241,9 @@ async function deployToTool(tool, options = {}) {
|
|
|
214
241
|
}
|
|
215
242
|
|
|
216
243
|
// Copy tool-specific config (simplified)
|
|
244
|
+
// Skip for directory-based tools (claude-code, deepagent, generic) - they use dedicated generators
|
|
217
245
|
const toolConfig = TOOLS[tool];
|
|
218
|
-
if (toolConfig) {
|
|
246
|
+
if (toolConfig && !toolConfig.configFile.endsWith('/')) {
|
|
219
247
|
const configPath = path.join(cwd, toolConfig.configFile);
|
|
220
248
|
const configDir = path.dirname(configPath);
|
|
221
249
|
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
const fs = require('fs-extra');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const ToolConfigGenerator = require('./tool-config-generator');
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Generator for Abacus.ai DeepAgent configurations
|
|
7
|
+
* Creates agent markdown files in .deepagent/agents/ directory
|
|
8
|
+
* Similar to Claude Code but for DeepAgent CLI
|
|
9
|
+
*/
|
|
10
|
+
class DeepAgentGenerator extends ToolConfigGenerator {
|
|
11
|
+
/**
|
|
12
|
+
* Generate DeepAgent configuration
|
|
13
|
+
* @returns {Object} Generated file paths
|
|
14
|
+
*/
|
|
15
|
+
async generate() {
|
|
16
|
+
await this.initialize();
|
|
17
|
+
|
|
18
|
+
const deepagentDir = path.join(this.projectPath, '.deepagent');
|
|
19
|
+
const agentsDir = path.join(deepagentDir, 'agents');
|
|
20
|
+
|
|
21
|
+
await fs.ensureDir(agentsDir);
|
|
22
|
+
|
|
23
|
+
// Copy agent files from session
|
|
24
|
+
const agentFiles = await this.deployAgentFiles(agentsDir);
|
|
25
|
+
|
|
26
|
+
// Create README with project context
|
|
27
|
+
const readmePath = await this.generateReadme(deepagentDir);
|
|
28
|
+
|
|
29
|
+
return {
|
|
30
|
+
agents: agentFiles,
|
|
31
|
+
readme: readmePath
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Generate README.md with project context
|
|
37
|
+
*/
|
|
38
|
+
async generateReadme(deepagentDir) {
|
|
39
|
+
const readmePath = path.join(deepagentDir, 'README.md');
|
|
40
|
+
const projectContext = await this.getProjectContext();
|
|
41
|
+
|
|
42
|
+
const content = `# DeepAgent Configuration
|
|
43
|
+
|
|
44
|
+
> Generated by ADF CLI v${this.getADFVersion()}
|
|
45
|
+
> Framework: ${this.getFrameworkName()}
|
|
46
|
+
|
|
47
|
+
## Project Overview
|
|
48
|
+
|
|
49
|
+
**${projectContext.name || 'Project'}**
|
|
50
|
+
|
|
51
|
+
${projectContext.overview || 'AI-assisted development project'}
|
|
52
|
+
|
|
53
|
+
## Agent Files
|
|
54
|
+
|
|
55
|
+
This directory contains agent definitions for DeepAgent CLI:
|
|
56
|
+
|
|
57
|
+
${this.getAgentList()}
|
|
58
|
+
|
|
59
|
+
## Workflow Level
|
|
60
|
+
|
|
61
|
+
**${this.framework.toUpperCase()}**: ${this.getWorkflowDescription()}
|
|
62
|
+
|
|
63
|
+
## Quick Commands
|
|
64
|
+
|
|
65
|
+
\`\`\`bash
|
|
66
|
+
# Build
|
|
67
|
+
${projectContext.buildCommand || 'npm run build'}
|
|
68
|
+
|
|
69
|
+
# Test
|
|
70
|
+
${projectContext.testCommand || 'npm test'}
|
|
71
|
+
|
|
72
|
+
# Lint
|
|
73
|
+
${projectContext.lintCommand || 'npm run lint'}
|
|
74
|
+
\`\`\`
|
|
75
|
+
|
|
76
|
+
## Requirements Location
|
|
77
|
+
|
|
78
|
+
Complete requirements and documentation:
|
|
79
|
+
- \`.adf/sessions/${this.getSessionId()}/outputs/\`
|
|
80
|
+
|
|
81
|
+
## Usage
|
|
82
|
+
|
|
83
|
+
DeepAgent will automatically load agents from this directory.
|
|
84
|
+
Each agent file contains role-specific instructions and context.
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
*Generated by AgentDevFramework (ADF CLI)*
|
|
89
|
+
`;
|
|
90
|
+
|
|
91
|
+
await fs.writeFile(readmePath, content, 'utf-8');
|
|
92
|
+
return readmePath;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Get list of agents based on framework
|
|
97
|
+
*/
|
|
98
|
+
getAgentList() {
|
|
99
|
+
const agentLists = {
|
|
100
|
+
'rapid': '- `dev.md` - Developer agent\n- `qa.md` - QA engineer agent',
|
|
101
|
+
'balanced': '- `analyst.md` - Business analyst agent\n- `pm.md` - Product manager agent\n- `dev.md` - Developer agent\n- `qa.md` - QA engineer agent',
|
|
102
|
+
'comprehensive': '- `analyst.md` - Business analyst agent\n- `pm.md` - Product manager agent\n- `architect.md` - Solutions architect agent\n- `sm.md` - Scrum master agent\n- `dev.md` - Developer agent\n- `qa.md` - QA engineer agent'
|
|
103
|
+
};
|
|
104
|
+
return agentLists[this.framework] || agentLists['balanced'];
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Get workflow description
|
|
109
|
+
*/
|
|
110
|
+
getWorkflowDescription() {
|
|
111
|
+
const descriptions = {
|
|
112
|
+
'rapid': 'Fast iteration with core features (PRP)',
|
|
113
|
+
'balanced': 'Specification-driven development (Spec-Kit)',
|
|
114
|
+
'comprehensive': 'Enterprise-grade with full BMAD documentation'
|
|
115
|
+
};
|
|
116
|
+
return descriptions[this.framework] || 'Standard development workflow';
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Get framework display name
|
|
121
|
+
*/
|
|
122
|
+
getFrameworkName() {
|
|
123
|
+
const names = {
|
|
124
|
+
'rapid': 'Rapid Development (PRP)',
|
|
125
|
+
'balanced': 'Balanced (Specification-Driven)',
|
|
126
|
+
'comprehensive': 'BMAD Comprehensive (Enterprise)'
|
|
127
|
+
};
|
|
128
|
+
return names[this.framework] || this.framework;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Get ADF CLI version
|
|
133
|
+
*/
|
|
134
|
+
getADFVersion() {
|
|
135
|
+
try {
|
|
136
|
+
const packageJson = require('../../package.json');
|
|
137
|
+
return packageJson.version;
|
|
138
|
+
} catch (error) {
|
|
139
|
+
return '0.12.0';
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
module.exports = DeepAgentGenerator;
|