@iservu-inc/adf-cli 0.4.14 ā 0.4.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +29 -0
- package/lib/ai/ai-client.js +16 -4
- package/lib/ai/ai-config.js +1 -1
- package/lib/frameworks/interviewer.js +3 -3
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -5,6 +5,35 @@ All notable changes to `@iservu-inc/adf-cli` will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [0.4.15] - 2025-10-04
|
|
9
|
+
|
|
10
|
+
### š Bug Fixes & Enhancements
|
|
11
|
+
|
|
12
|
+
**Fixed: Block Numbering Confusion (Issue #3)**
|
|
13
|
+
- **Problem:** AI Configuration showed as "Block 1" then interview questions also started with "Block 1 of 5" - creating TWO "Block 1"s
|
|
14
|
+
- **Root Cause:** AI config was labeled as a numbered block when it's actually pre-interview setup
|
|
15
|
+
- **Fix:** Changed label from "š Block 1: AI Configuration" to "š¤ AI Provider Configuration (Pre-Interview)"
|
|
16
|
+
- **Impact:** Clearer distinction between pre-interview setup and actual interview blocks
|
|
17
|
+
|
|
18
|
+
**Added: OpenAI o1 Model Support**
|
|
19
|
+
- **Added:** Support for OpenAI's new o1 reasoning models: `o1`, `o1-mini`, `o1-preview`
|
|
20
|
+
- **Problem Solved:** o1 models were failing with parameter errors
|
|
21
|
+
- Error 1: "Unsupported parameter: 'max_tokens' is not supported with this model"
|
|
22
|
+
- Error 2: o1 models don't support temperature parameter
|
|
23
|
+
- **Solution:** Added conditional API parameter handling in `openaiRequest()`
|
|
24
|
+
- o1 models use `max_completion_tokens` instead of `max_tokens`
|
|
25
|
+
- o1 models skip `temperature` parameter entirely
|
|
26
|
+
- Standard GPT models continue to use `max_tokens` and `temperature`
|
|
27
|
+
- **Impact:** o1 models now work correctly for AI-assisted interviews
|
|
28
|
+
|
|
29
|
+
**Technical Details:**
|
|
30
|
+
- `interviewer.js:365`: Updated AI config label to clarify pre-interview status
|
|
31
|
+
- `ai-config.js:36`: Added o1 models to OpenAI defaultModels array
|
|
32
|
+
- `ai-client.js:115-150`: Rewrote `openaiRequest()` with o1 model detection
|
|
33
|
+
- Detects o1 models with `this.model.startsWith('o1')`
|
|
34
|
+
- Uses conditional logic for parameter selection
|
|
35
|
+
- All 120 tests passing
|
|
36
|
+
|
|
8
37
|
## [0.4.14] - 2025-10-04
|
|
9
38
|
|
|
10
39
|
### š Critical Bug Fixes
|
package/lib/ai/ai-client.js
CHANGED
|
@@ -113,17 +113,29 @@ class AIClient {
|
|
|
113
113
|
* OpenAI GPT request
|
|
114
114
|
*/
|
|
115
115
|
async openaiRequest(prompt, maxTokens, temperature) {
|
|
116
|
-
|
|
116
|
+
// o1 models use max_completion_tokens instead of max_tokens
|
|
117
|
+
const isO1Model = this.model.startsWith('o1');
|
|
118
|
+
|
|
119
|
+
const requestParams = {
|
|
117
120
|
model: this.model,
|
|
118
|
-
max_tokens: maxTokens,
|
|
119
|
-
temperature,
|
|
120
121
|
messages: [
|
|
121
122
|
{
|
|
122
123
|
role: 'user',
|
|
123
124
|
content: prompt
|
|
124
125
|
}
|
|
125
126
|
]
|
|
126
|
-
}
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
// o1 models use different parameter names
|
|
130
|
+
if (isO1Model) {
|
|
131
|
+
requestParams.max_completion_tokens = maxTokens;
|
|
132
|
+
// o1 models don't support temperature parameter
|
|
133
|
+
} else {
|
|
134
|
+
requestParams.max_tokens = maxTokens;
|
|
135
|
+
requestParams.temperature = temperature;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
const response = await this.client.chat.completions.create(requestParams);
|
|
127
139
|
|
|
128
140
|
return {
|
|
129
141
|
content: response.choices[0].message.content,
|
package/lib/ai/ai-config.js
CHANGED
|
@@ -33,7 +33,7 @@ const AI_PROVIDERS = {
|
|
|
33
33
|
requiredFormat: 'sk-',
|
|
34
34
|
website: 'https://platform.openai.com/',
|
|
35
35
|
setup: 'Get your API key from https://platform.openai.com/api-keys',
|
|
36
|
-
defaultModels: ['gpt-4-turbo', 'gpt-
|
|
36
|
+
defaultModels: ['gpt-4o', 'gpt-4-turbo', 'gpt-4', 'o1', 'o1-mini', 'o1-preview', 'gpt-3.5-turbo']
|
|
37
37
|
},
|
|
38
38
|
GOOGLE: {
|
|
39
39
|
id: 'google',
|
|
@@ -90,8 +90,8 @@ class Interviewer {
|
|
|
90
90
|
// User already configured AI via 'adf config' - just show reminder and use it
|
|
91
91
|
console.log(chalk.cyan.bold('ā'.repeat(60)));
|
|
92
92
|
console.log(chalk.green(`\nā AI Provider already configured: ${configuredProvider.name}`));
|
|
93
|
-
console.log(chalk.gray(' (configured via: adf config)
|
|
94
|
-
console.log(chalk.gray('š” To manage AI providers: adf config\n'));
|
|
93
|
+
console.log(chalk.gray(' (configured via: adf config)'));
|
|
94
|
+
console.log(chalk.gray(' š” To manage AI providers: adf config\n'));
|
|
95
95
|
|
|
96
96
|
// Load env into process
|
|
97
97
|
loadEnvIntoProcess(envPath);
|
|
@@ -362,7 +362,7 @@ class Interviewer {
|
|
|
362
362
|
|
|
363
363
|
async promptAIConfiguration() {
|
|
364
364
|
console.log(chalk.cyan.bold('ā'.repeat(60)));
|
|
365
|
-
console.log(chalk.cyan.bold('\n
|
|
365
|
+
console.log(chalk.cyan.bold('\nš¤ AI Provider Configuration (Pre-Interview)\n'));
|
|
366
366
|
console.log(chalk.gray('Configure your AI provider for intelligent follow-up questions\n'));
|
|
367
367
|
|
|
368
368
|
const { configureAI } = await inquirer.prompt([
|