@iservu-inc/adf-cli 0.4.13 ā 0.4.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +55 -0
- package/lib/ai/ai-client.js +16 -4
- package/lib/ai/ai-config.js +1 -1
- package/lib/frameworks/interviewer.js +67 -14
- package/lib/frameworks/session-manager.js +5 -2
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -5,6 +5,61 @@ All notable changes to `@iservu-inc/adf-cli` will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [0.4.15] - 2025-10-04
|
|
9
|
+
|
|
10
|
+
### š Bug Fixes & Enhancements
|
|
11
|
+
|
|
12
|
+
**Fixed: Block Numbering Confusion (Issue #3)**
|
|
13
|
+
- **Problem:** AI Configuration showed as "Block 1" then interview questions also started with "Block 1 of 5" - creating TWO "Block 1"s
|
|
14
|
+
- **Root Cause:** AI config was labeled as a numbered block when it's actually pre-interview setup
|
|
15
|
+
- **Fix:** Changed label from "š Block 1: AI Configuration" to "š¤ AI Provider Configuration (Pre-Interview)"
|
|
16
|
+
- **Impact:** Clearer distinction between pre-interview setup and actual interview blocks
|
|
17
|
+
|
|
18
|
+
**Added: OpenAI o1 Model Support**
|
|
19
|
+
- **Added:** Support for OpenAI's new o1 reasoning models: `o1`, `o1-mini`, `o1-preview`
|
|
20
|
+
- **Problem Solved:** o1 models were failing with parameter errors
|
|
21
|
+
- Error 1: "Unsupported parameter: 'max_tokens' is not supported with this model"
|
|
22
|
+
- Error 2: o1 models don't support temperature parameter
|
|
23
|
+
- **Solution:** Added conditional API parameter handling in `openaiRequest()`
|
|
24
|
+
- o1 models use `max_completion_tokens` instead of `max_tokens`
|
|
25
|
+
- o1 models skip `temperature` parameter entirely
|
|
26
|
+
- Standard GPT models continue to use `max_tokens` and `temperature`
|
|
27
|
+
- **Impact:** o1 models now work correctly for AI-assisted interviews
|
|
28
|
+
|
|
29
|
+
**Technical Details:**
|
|
30
|
+
- `interviewer.js:365`: Updated AI config label to clarify pre-interview status
|
|
31
|
+
- `ai-config.js:36`: Added o1 models to OpenAI defaultModels array
|
|
32
|
+
- `ai-client.js:115-150`: Rewrote `openaiRequest()` with o1 model detection
|
|
33
|
+
- Detects o1 models with `this.model.startsWith('o1')`
|
|
34
|
+
- Uses conditional logic for parameter selection
|
|
35
|
+
- All 120 tests passing
|
|
36
|
+
|
|
37
|
+
## [0.4.14] - 2025-10-04
|
|
38
|
+
|
|
39
|
+
### š Critical Bug Fixes
|
|
40
|
+
|
|
41
|
+
**Fixed: False .adf Directory Detection (Issue #1)**
|
|
42
|
+
- **Problem:** `adf init` in empty projects incorrectly detected `.adf` directory and asked to overwrite
|
|
43
|
+
- **Root Cause:** `SessionManager.listSessions()` was creating `.adf/sessions/` directory during resume check
|
|
44
|
+
- **Fix:** Modified `listSessions()` to return empty array if directory doesn't exist instead of creating it
|
|
45
|
+
- **Impact:** Clean projects no longer show false "overwrite" prompts
|
|
46
|
+
|
|
47
|
+
**Fixed: Skip AI Config if Already Configured (Issue #2)**
|
|
48
|
+
- **Problem:** Users who configured AI via `adf config` were still prompted during `adf init`
|
|
49
|
+
- **Solution:** Interview now detects existing `.adf/.env` configuration
|
|
50
|
+
- **Behavior:**
|
|
51
|
+
- **If configured:** Shows "ā AI Provider already configured: [Provider Name]" and uses existing config
|
|
52
|
+
- **If not configured:** Shows "š Block 1: AI Configuration" with prompt to configure
|
|
53
|
+
- **Impact:** Better UX - respects user's configuration choices
|
|
54
|
+
|
|
55
|
+
**Technical Details:**
|
|
56
|
+
- `session-manager.js`: Added existence check before creating directory
|
|
57
|
+
- `interviewer.js`: Added AI config detection logic
|
|
58
|
+
- Checks for existing API keys in `.adf/.env`
|
|
59
|
+
- Auto-loads first available provider (Anthropic > OpenAI > Google > OpenRouter)
|
|
60
|
+
- Shows helpful reminder about `adf config` for managing providers
|
|
61
|
+
- All 120 tests passing
|
|
62
|
+
|
|
8
63
|
## [0.4.13] - 2025-10-04
|
|
9
64
|
|
|
10
65
|
### š Bug Fixes & UX Improvements
|
package/lib/ai/ai-client.js
CHANGED
|
@@ -113,17 +113,29 @@ class AIClient {
|
|
|
113
113
|
* OpenAI GPT request
|
|
114
114
|
*/
|
|
115
115
|
async openaiRequest(prompt, maxTokens, temperature) {
|
|
116
|
-
|
|
116
|
+
// o1 models use max_completion_tokens instead of max_tokens
|
|
117
|
+
const isO1Model = this.model.startsWith('o1');
|
|
118
|
+
|
|
119
|
+
const requestParams = {
|
|
117
120
|
model: this.model,
|
|
118
|
-
max_tokens: maxTokens,
|
|
119
|
-
temperature,
|
|
120
121
|
messages: [
|
|
121
122
|
{
|
|
122
123
|
role: 'user',
|
|
123
124
|
content: prompt
|
|
124
125
|
}
|
|
125
126
|
]
|
|
126
|
-
}
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
// o1 models use different parameter names
|
|
130
|
+
if (isO1Model) {
|
|
131
|
+
requestParams.max_completion_tokens = maxTokens;
|
|
132
|
+
// o1 models don't support temperature parameter
|
|
133
|
+
} else {
|
|
134
|
+
requestParams.max_tokens = maxTokens;
|
|
135
|
+
requestParams.temperature = temperature;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
const response = await this.client.chat.completions.create(requestParams);
|
|
127
139
|
|
|
128
140
|
return {
|
|
129
141
|
content: response.choices[0].message.content,
|
package/lib/ai/ai-config.js
CHANGED
|
@@ -33,7 +33,7 @@ const AI_PROVIDERS = {
|
|
|
33
33
|
requiredFormat: 'sk-',
|
|
34
34
|
website: 'https://platform.openai.com/',
|
|
35
35
|
setup: 'Get your API key from https://platform.openai.com/api-keys',
|
|
36
|
-
defaultModels: ['gpt-4-turbo', 'gpt-
|
|
36
|
+
defaultModels: ['gpt-4o', 'gpt-4-turbo', 'gpt-4', 'o1', 'o1-mini', 'o1-preview', 'gpt-3.5-turbo']
|
|
37
37
|
},
|
|
38
38
|
GOOGLE: {
|
|
39
39
|
id: 'google',
|
|
@@ -63,24 +63,55 @@ class Interviewer {
|
|
|
63
63
|
|
|
64
64
|
// Configure AI if not already configured (new sessions only)
|
|
65
65
|
if (!this.aiConfig && !this.isResuming) {
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
66
|
+
// Check if user already configured AI via 'adf config'
|
|
67
|
+
const { getEnvFilePath, loadEnvFile, loadEnvIntoProcess } = require('../ai/ai-config');
|
|
68
|
+
const envPath = getEnvFilePath(this.projectPath);
|
|
69
|
+
|
|
70
|
+
if (await fs.pathExists(envPath)) {
|
|
71
|
+
const envVars = await loadEnvFile(envPath);
|
|
72
|
+
|
|
73
|
+
// Check which provider is configured
|
|
74
|
+
const providerMap = {
|
|
75
|
+
'ANTHROPIC_API_KEY': { id: 'anthropic', name: 'Anthropic Claude', models: ['claude-sonnet-4-5-20250929', 'claude-3-5-sonnet-20241022'] },
|
|
76
|
+
'OPENAI_API_KEY': { id: 'openai', name: 'OpenAI GPT', models: ['gpt-4-turbo', 'gpt-4o', 'gpt-4'] },
|
|
77
|
+
'GOOGLE_API_KEY': { id: 'google', name: 'Google Gemini', models: ['gemini-2.0-flash-exp', 'gemini-1.5-pro'] },
|
|
78
|
+
'OPENROUTER_API_KEY': { id: 'openrouter', name: 'OpenRouter', models: ['anthropic/claude-sonnet-4-5', 'openai/gpt-4-turbo'] }
|
|
79
|
+
};
|
|
69
80
|
|
|
70
|
-
|
|
71
|
-
{
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
81
|
+
let configuredProvider = null;
|
|
82
|
+
for (const [envVar, provider] of Object.entries(providerMap)) {
|
|
83
|
+
if (envVars[envVar] && envVars[envVar].length > 0) {
|
|
84
|
+
configuredProvider = { ...provider, envVar, apiKey: envVars[envVar] };
|
|
85
|
+
break;
|
|
86
|
+
}
|
|
76
87
|
}
|
|
77
|
-
]);
|
|
78
88
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
89
|
+
if (configuredProvider) {
|
|
90
|
+
// User already configured AI via 'adf config' - just show reminder and use it
|
|
91
|
+
console.log(chalk.cyan.bold('ā'.repeat(60)));
|
|
92
|
+
console.log(chalk.green(`\nā AI Provider already configured: ${configuredProvider.name}`));
|
|
93
|
+
console.log(chalk.gray(' (configured via: adf config)'));
|
|
94
|
+
console.log(chalk.gray(' š” To manage AI providers: adf config\n'));
|
|
95
|
+
|
|
96
|
+
// Load env into process
|
|
97
|
+
loadEnvIntoProcess(envPath);
|
|
98
|
+
|
|
99
|
+
// Create config object for this session
|
|
100
|
+
this.aiConfig = {
|
|
101
|
+
provider: configuredProvider.id,
|
|
102
|
+
providerName: configuredProvider.name,
|
|
103
|
+
model: configuredProvider.models[0], // Use first available model
|
|
104
|
+
apiKey: configuredProvider.apiKey,
|
|
105
|
+
envVar: configuredProvider.envVar,
|
|
106
|
+
envPath
|
|
107
|
+
};
|
|
108
|
+
} else {
|
|
109
|
+
// .env exists but no AI keys - prompt to configure
|
|
110
|
+
await this.promptAIConfiguration();
|
|
111
|
+
}
|
|
82
112
|
} else {
|
|
83
|
-
|
|
113
|
+
// No .env file - prompt to configure
|
|
114
|
+
await this.promptAIConfiguration();
|
|
84
115
|
}
|
|
85
116
|
}
|
|
86
117
|
|
|
@@ -329,6 +360,28 @@ class Interviewer {
|
|
|
329
360
|
return this.sessionPath;
|
|
330
361
|
}
|
|
331
362
|
|
|
363
|
+
async promptAIConfiguration() {
|
|
364
|
+
console.log(chalk.cyan.bold('ā'.repeat(60)));
|
|
365
|
+
console.log(chalk.cyan.bold('\nš¤ AI Provider Configuration (Pre-Interview)\n'));
|
|
366
|
+
console.log(chalk.gray('Configure your AI provider for intelligent follow-up questions\n'));
|
|
367
|
+
|
|
368
|
+
const { configureAI } = await inquirer.prompt([
|
|
369
|
+
{
|
|
370
|
+
type: 'confirm',
|
|
371
|
+
name: 'configureAI',
|
|
372
|
+
message: 'Configure AI provider now? (Recommended for best experience)',
|
|
373
|
+
default: true
|
|
374
|
+
}
|
|
375
|
+
]);
|
|
376
|
+
|
|
377
|
+
if (configureAI) {
|
|
378
|
+
const { configureAIProvider } = require('../ai/ai-config');
|
|
379
|
+
this.aiConfig = await configureAIProvider(this.projectPath);
|
|
380
|
+
} else {
|
|
381
|
+
console.log(chalk.yellow('\nš” You can configure AI later with: adf config\n'));
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
|
|
332
385
|
getFrameworkName() {
|
|
333
386
|
const names = {
|
|
334
387
|
rapid: 'Product Requirement Prompt (PRP)',
|
|
@@ -15,9 +15,12 @@ class SessionManager {
|
|
|
15
15
|
}
|
|
16
16
|
|
|
17
17
|
async listSessions() {
|
|
18
|
-
|
|
19
|
-
|
|
18
|
+
// Don't create directory if it doesn't exist - just return empty array
|
|
19
|
+
if (!await fs.pathExists(this.sessionsDir)) {
|
|
20
|
+
return [];
|
|
21
|
+
}
|
|
20
22
|
|
|
23
|
+
const sessions = await fs.readdir(this.sessionsDir);
|
|
21
24
|
const sessionDetails = [];
|
|
22
25
|
|
|
23
26
|
for (const sessionId of sessions) {
|