@iservu-inc/adf-cli 0.11.0 → 0.12.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.adf/feature-audit.md +208 -0
  2. package/.adf/final-summary.md +347 -0
  3. package/.adf/implementation-plan.md +244 -0
  4. package/.adf/implementation-progress.md +203 -0
  5. package/.adf/learning/answer-history.json +995 -0
  6. package/.adf/learning/config.json +25 -0
  7. package/.adf/learning/learned-rules.json +59 -0
  8. package/.adf/learning/patterns.json +277 -0
  9. package/.adf/learning/skip-history.json +1451 -0
  10. package/.adf/learning/stats.json +9 -0
  11. package/.claude/settings.local.json +12 -5
  12. package/CHANGELOG.md +110 -0
  13. package/CLAUDE.md +479 -0
  14. package/bin/adf.js +339 -1
  15. package/lib/ai/ai-client.js +161 -44
  16. package/lib/ai/ai-config.js +249 -105
  17. package/lib/commands/deploy.js +73 -6
  18. package/lib/generators/agents-md-generator.js +431 -161
  19. package/lib/generators/antigravity-generator.js +140 -0
  20. package/lib/generators/deepagent-generator.js +144 -0
  21. package/lib/generators/gemini-cli-generator.js +241 -0
  22. package/lib/generators/index.js +55 -0
  23. package/lib/generators/opencode-generator.js +153 -0
  24. package/lib/generators/zed-generator.js +252 -0
  25. package/lib/templates/shared/agents/architect.md +24 -24
  26. package/lib/templates/shared/agents/dev.md +25 -20
  27. package/lib/templates/shared/agents/pm.md +14 -4
  28. package/lib/templates/shared/agents/sm.md +18 -14
  29. package/lib/templates/shared/templates/openspec-delta.md +16 -0
  30. package/lib/templates/shared/templates/openspec-proposal.md +18 -0
  31. package/lib/templates/shared/templates/openspec-tasks.md +21 -0
  32. package/lib/utils/context-manager.js +484 -0
  33. package/package.json +6 -1
  34. package/tests/agents-md-generator.test.js +47 -10
package/bin/adf.js CHANGED
@@ -29,7 +29,68 @@ program
29
29
  .addHelpText('before', () => {
30
30
  const adfInstallPath = path.resolve(__dirname, '..');
31
31
  return `\n${chalk.cyan.bold('ADF CLI')} ${chalk.gray(`v${packageJson.version}`)}\n${chalk.gray('Install Path:')} ${chalk.white(adfInstallPath)}\n`;
32
- });
32
+ })
33
+ .addHelpText('after', `
34
+ ${chalk.cyan.bold('Quick Start:')}
35
+ ${chalk.gray('1. Configure your AI provider')}
36
+ $ adf config
37
+
38
+ ${chalk.gray('2. Initialize a project with AI-guided interview')}
39
+ $ adf init
40
+
41
+ ${chalk.gray('3. Deploy to your AI coding assistant')}
42
+ $ adf deploy
43
+
44
+ ${chalk.cyan.bold('What is ADF?')}
45
+ AgentDevFramework (ADF) is an AI-assisted development framework that helps
46
+ you gather requirements through intelligent AI-guided interviews. It supports
47
+ multiple AI providers and workflow levels, from rapid prototyping to
48
+ comprehensive enterprise documentation.
49
+
50
+ ${chalk.cyan.bold('Key Features:')}
51
+ • ${chalk.yellow('Multi-Provider AI')} - Anthropic, OpenAI, Google, OpenRouter
52
+ • ${chalk.yellow('3 Workflow Levels')} - Rapid (PRP), Balanced (Spec-Kit), Comprehensive (BMAD)
53
+ • ${chalk.yellow('Smart Filtering')} - AI-powered question filtering based on context
54
+ • ${chalk.yellow('Learning System')} - Adapts to your preferences over time
55
+ • ${chalk.yellow('Session Resume')} - Pause and resume interviews anytime
56
+ • ${chalk.yellow('Multi-Tool Deploy')} - 10+ IDEs and CLI tools supported
57
+ ${chalk.gray('IDEs:')} Windsurf, Cursor, VS Code, Zed, Antigravity
58
+ ${chalk.gray('CLI:')} Claude Code, OpenCode, Gemini CLI, DeepAgent
59
+
60
+ ${chalk.cyan.bold('Getting Help:')}
61
+ ${chalk.gray('# Detailed help for specific commands')}
62
+ $ adf init --help
63
+ $ adf deploy --help
64
+ $ adf config --help
65
+ $ adf update --help
66
+
67
+ ${chalk.gray('# Check version and install path')}
68
+ $ adf --version
69
+
70
+ ${chalk.cyan.bold('Common Workflows:')}
71
+ ${chalk.yellow('Quick Prototype:')}
72
+ $ adf init --rapid --tool windsurf
73
+
74
+ ${chalk.yellow('Standard Project:')}
75
+ $ adf config ${chalk.gray('# Configure AI provider once')}
76
+ $ adf init --balanced ${chalk.gray('# Run interview')}
77
+ $ adf deploy cursor ${chalk.gray('# Deploy to Cursor')}
78
+
79
+ ${chalk.yellow('Enterprise Project:')}
80
+ $ adf config ${chalk.gray('# Configure with comprehensive mode')}
81
+ $ adf init --comprehensive
82
+ $ adf deploy vscode ${chalk.gray('# Deploy to multiple tools')}
83
+ $ adf deploy claude-code
84
+
85
+ ${chalk.cyan.bold('Documentation & Support:')}
86
+ • GitHub: https://github.com/iservu/adf-cli
87
+ • Issues: https://github.com/iservu/adf-cli/issues
88
+ • Docs: https://github.com/iservu/adf-cli#readme
89
+
90
+ ${chalk.cyan.bold('Learn More:')}
91
+ Run any command with ${chalk.white('--help')} for detailed information:
92
+ ${chalk.gray('$ adf <command> --help')}
93
+ `);
33
94
 
34
95
  // adf init
35
96
  program
@@ -39,6 +100,51 @@ program
39
100
  .option('--balanced', 'Skip questions, use Level 2 (Balanced)')
40
101
  .option('--comprehensive', 'Skip questions, use Level 3 (Comprehensive)')
41
102
  .option('--tool <tool>', 'Specify deployment tool (windsurf, cursor, etc.)')
103
+ .addHelpText('after', `
104
+ ${chalk.cyan.bold('Description:')}
105
+ Start an interactive AI-guided interview to gather project requirements.
106
+ The interview uses AI to analyze your answers and generate comprehensive
107
+ documentation based on your chosen workflow level.
108
+
109
+ ${chalk.cyan.bold('Workflow Levels:')}
110
+ ${chalk.yellow('Rapid (PRP)')} - 20 core questions, fast setup
111
+ Agents: dev, qa
112
+ Output: prp.md
113
+ Best for: Quick prototypes, MVPs
114
+
115
+ ${chalk.yellow('Balanced (Spec-Kit)')} - 30+ questions, detailed specs
116
+ Agents: analyst, pm, dev, qa
117
+ Output: constitution.md, specification.md, plan.md, tasks.md
118
+ Best for: Standard projects with clear requirements
119
+
120
+ ${chalk.yellow('Comprehensive (BMAD)')} - 40+ questions, full documentation
121
+ Agents: analyst, pm, architect, sm, dev, qa
122
+ Output: prd.md, architecture.md, stories.md
123
+ Best for: Enterprise projects, complex systems
124
+
125
+ ${chalk.cyan.bold('Examples:')}
126
+ ${chalk.gray('# Interactive mode - choose workflow interactively')}
127
+ $ adf init
128
+
129
+ ${chalk.gray('# Quick start with Rapid workflow')}
130
+ $ adf init --rapid
131
+
132
+ ${chalk.gray('# Balanced workflow deployed to Windsurf')}
133
+ $ adf init --balanced --tool windsurf
134
+
135
+ ${chalk.gray('# Comprehensive workflow for enterprise project')}
136
+ $ adf init --comprehensive
137
+
138
+ ${chalk.cyan.bold('Features:')}
139
+ • AI-powered question filtering based on project context
140
+ • Answer quality analysis and suggestions
141
+ • Session resume capability (Ctrl+C to pause)
142
+ • Learning system that adapts to your preferences
143
+ • Multi-provider AI support (Anthropic, OpenAI, Google, OpenRouter)
144
+
145
+ ${chalk.cyan.bold('Output Directory:')}
146
+ All outputs saved to: ${chalk.white('.adf/sessions/{timestamp}_{workflow}/')}
147
+ `)
42
148
  .action(initCommand);
43
149
 
44
150
  // adf deploy
@@ -46,6 +152,96 @@ program
46
152
  .command('deploy [tool]')
47
153
  .description('Deploy framework to specified tool')
48
154
  .option('-l, --list', 'List available deployment tools')
155
+ .addHelpText('after', `
156
+ ${chalk.cyan.bold('Description:')}
157
+ Deploy ADF framework outputs to your AI coding assistant or IDE.
158
+ Converts session outputs (PRD, specs, stories) into tool-specific
159
+ configuration files and agent definitions.
160
+
161
+ ${chalk.cyan.bold('Command Syntax:')}
162
+ ${chalk.white('adf deploy [tool]')}
163
+
164
+ ${chalk.yellow('[tool]')} - (Optional) Target tool name
165
+ If omitted, you'll be prompted to select interactively
166
+ Supported: windsurf, cursor, vscode, claude-code, zed, generic
167
+
168
+ ${chalk.cyan.bold('Supported Tools:')}
169
+ ${chalk.white.bold('IDEs:')}
170
+ ${chalk.yellow('windsurf')} - Codeium Windsurf IDE
171
+ Output: .windsurfrules
172
+
173
+ ${chalk.yellow('cursor')} - Cursor AI IDE
174
+ Output: .cursorrules
175
+
176
+ ${chalk.yellow('vscode')} - Visual Studio Code
177
+ Output: .vscode/settings.json, .github/copilot-instructions.md
178
+
179
+ ${chalk.yellow('zed')} - Zed Editor
180
+ Output: .zed/settings.json, .zed/rules (symlink to AGENTS.md)
181
+
182
+ ${chalk.yellow('antigravity')} - Google Antigravity IDE
183
+ Output: .antigravity/agents.yaml
184
+
185
+ ${chalk.white.bold('CLI Tools:')}
186
+ ${chalk.yellow('claude-code')} - Anthropic Claude Code CLI
187
+ Output: .framework/agents/*.md
188
+
189
+ ${chalk.yellow('opencode')} - OpenCode CLI
190
+ Output: .opencode.json (JSON config with agents & MCP)
191
+
192
+ ${chalk.yellow('gemini-cli')} - Google Gemini CLI
193
+ Output: GEMINI.md (project context markdown)
194
+
195
+ ${chalk.yellow('deepagent')} - Abacus.ai DeepAgent CLI
196
+ Output: .deepagent/agents/*.md, .deepagent/README.md
197
+
198
+ ${chalk.white.bold('Generic:')}
199
+ ${chalk.yellow('generic')} - Generic AI tools (agent markdown files)
200
+ Output: .framework/agents/*.md
201
+
202
+ ${chalk.cyan.bold('Examples:')}
203
+ ${chalk.gray('# List available tools')}
204
+ $ adf deploy --list
205
+
206
+ ${chalk.gray('# Interactive tool selection')}
207
+ $ adf deploy
208
+
209
+ ${chalk.gray('# Deploy to specific tool')}
210
+ $ adf deploy windsurf
211
+
212
+ ${chalk.gray('# Deploy to multiple tools (run multiple times)')}
213
+ $ adf deploy cursor
214
+ $ adf deploy vscode
215
+
216
+ ${chalk.cyan.bold('How It Works:')}
217
+ 1. Finds latest completed interview session
218
+ 2. Loads session outputs (PRD, architecture, stories, etc.)
219
+ 3. Transforms outputs into tool-specific format
220
+ 4. Writes configuration files to project root
221
+ 5. Deploys agent definitions (if applicable)
222
+
223
+ ${chalk.cyan.bold('Requirements:')}
224
+ • Must have completed at least one 'adf init' session
225
+ • Session must have generated outputs
226
+ • Outputs located in: .adf/sessions/{timestamp}_{workflow}/outputs/
227
+
228
+ ${chalk.cyan.bold('Output Files by Tool:')}
229
+ ${chalk.white.bold('IDEs:')}
230
+ Windsurf: ${chalk.white('.windsurfrules')}
231
+ Cursor: ${chalk.white('.cursorrules')}
232
+ VS Code: ${chalk.white('.vscode/settings.json + .github/copilot-instructions.md')}
233
+ Zed: ${chalk.white('.zed/settings.json + .zed/rules')}
234
+ Antigravity: ${chalk.white('.antigravity/agents.yaml')}
235
+
236
+ ${chalk.white.bold('CLI Tools:')}
237
+ Claude Code: ${chalk.white('.framework/agents/*.md')}
238
+ OpenCode: ${chalk.white('.opencode.json')}
239
+ Gemini CLI: ${chalk.white('GEMINI.md')}
240
+ DeepAgent: ${chalk.white('.deepagent/agents/*.md + .deepagent/README.md')}
241
+
242
+ ${chalk.white.bold('Generic:')}
243
+ Generic: ${chalk.white('.framework/agents/*.md')}
244
+ `)
49
245
  .action(deployCommand);
50
246
 
51
247
  // adf update
@@ -53,12 +249,154 @@ program
53
249
  .command('update')
54
250
  .description('Check for CLI updates and update to latest version')
55
251
  .option('--check', 'Only check for updates, don\'t install')
252
+ .addHelpText('after', `
253
+ ${chalk.cyan.bold('Description:')}
254
+ Check for new versions of ADF CLI and update to the latest release.
255
+ Compares your current version with the latest published on npm registry.
256
+
257
+ ${chalk.cyan.bold('Command Syntax:')}
258
+ ${chalk.white('adf update [options]')}
259
+
260
+ ${chalk.cyan.bold('Options:')}
261
+ ${chalk.yellow('--check')} - Check for updates without installing
262
+ Shows version comparison and changelog preview
263
+ No modifications made to your installation
264
+
265
+ ${chalk.cyan.bold('Examples:')}
266
+ ${chalk.gray('# Check if updates are available')}
267
+ $ adf update --check
268
+
269
+ ${chalk.gray('# Update to latest version')}
270
+ $ adf update
271
+
272
+ ${chalk.gray('# Check current version')}
273
+ $ adf --version
274
+
275
+ ${chalk.cyan.bold('Update Process:')}
276
+ 1. Fetches latest version info from npm registry
277
+ 2. Compares with your installed version
278
+ 3. Shows changelog and new features
279
+ 4. Prompts for confirmation (if installing)
280
+ 5. Downloads and installs latest version
281
+ 6. Verifies successful installation
282
+
283
+ ${chalk.cyan.bold('Installation Methods:')}
284
+ ${chalk.yellow('Global Install')} (recommended)
285
+ $ npm install -g @iservu-inc/adf-cli
286
+ Updates system-wide installation
287
+
288
+ ${chalk.yellow('Local Development')} (npm link)
289
+ $ cd adf-cli && git pull && npm link
290
+ Updates linked development version
291
+
292
+ ${chalk.cyan.bold('What Gets Updated:')}
293
+ • Core CLI functionality
294
+ • AI provider integrations (Anthropic, OpenAI, Google, OpenRouter)
295
+ • Interview question sets
296
+ • Output generators and templates
297
+ • Bug fixes and performance improvements
298
+
299
+ ${chalk.cyan.bold('Backwards Compatibility:')}
300
+ • Existing .adf directories are preserved
301
+ • Sessions and learning data remain intact
302
+ • API keys and configurations are not affected
303
+ • May require re-running 'adf deploy' for new features
304
+ `)
56
305
  .action(updateCommand);
57
306
 
58
307
  // adf config
59
308
  program
60
309
  .command('config')
61
310
  .description('Configure ADF settings (AI provider, etc.)')
311
+ .addHelpText('after', `
312
+ ${chalk.cyan.bold('Description:')}
313
+ Interactive configuration wizard for ADF settings.
314
+ Configure AI providers, analysis settings, and learning system preferences.
315
+
316
+ ${chalk.cyan.bold('Configuration Categories:')}
317
+ ${chalk.yellow('1. AI Provider Setup')}
318
+ Configure which AI provider to use for interviews
319
+ • Select provider (Anthropic, OpenAI, Google, OpenRouter)
320
+ • Enter/validate API key
321
+ • Choose model from dynamically fetched list
322
+ • Test connection before saving
323
+
324
+ ${chalk.yellow('2. Analysis Settings')}
325
+ Control AI analysis features and performance
326
+ • Performance Mode: Fast / Balanced / Comprehensive
327
+ • Answer Quality Analysis: ON / OFF
328
+ • Smart Question Filtering: ON / OFF
329
+ • Question Reordering: ON / OFF
330
+ • Context Extraction: ON / OFF
331
+
332
+ ${chalk.yellow('3. Learning System')}
333
+ Manage skip pattern detection and auto-filtering
334
+ • Enable/Disable learning system
335
+ • View detected patterns and rules
336
+ • Manage learned preferences
337
+ • Export analytics to JSON/CSV
338
+ • Configure decay thresholds
339
+
340
+ ${chalk.cyan.bold('AI Providers Supported:')}
341
+ ${chalk.yellow('Anthropic Claude')}
342
+ Models: claude-sonnet-4-5, claude-opus-4-5, claude-3-5-sonnet, etc.
343
+ API Key: Get from https://console.anthropic.com/
344
+ Format: sk-ant-...
345
+
346
+ ${chalk.yellow('OpenAI GPT')}
347
+ Models: gpt-5.2-chat, gpt-4o, o1, o3, etc. (116+ models)
348
+ API Key: Get from https://platform.openai.com/api-keys
349
+ Format: sk-...
350
+
351
+ ${chalk.yellow('Google Gemini')}
352
+ Models: gemini-2.0-flash, gemini-1.5-pro, gemini-1.5-flash, etc.
353
+ API Key: Get from https://aistudio.google.com/app/apikey
354
+ Format: (varies)
355
+
356
+ ${chalk.yellow('OpenRouter')}
357
+ Models: 100+ models from multiple providers
358
+ API Key: Get from https://openrouter.ai/keys
359
+ Format: sk-or-...
360
+
361
+ ${chalk.cyan.bold('Examples:')}
362
+ ${chalk.gray('# Open configuration menu')}
363
+ $ adf config
364
+
365
+ ${chalk.gray('# After configuration, verify with:')}
366
+ $ cat .adf/.env
367
+ $ adf init
368
+
369
+ ${chalk.cyan.bold('Performance Modes:')}
370
+ ${chalk.yellow('Fast')} - Minimal AI calls (~0.5s per answer)
371
+ Answer quality: OFF
372
+ Smart filtering: OFF
373
+ Best for: Quick interviews, testing
374
+
375
+ ${chalk.yellow('Balanced')} - Moderate AI usage (~2-3s per answer)
376
+ Answer quality: ON
377
+ Smart filtering: ON
378
+ Best for: Most projects (recommended)
379
+
380
+ ${chalk.yellow('Comprehensive')} - Full AI analysis (~4-6s per answer)
381
+ All features: ON
382
+ Context extraction: ON
383
+ Question reordering: ON
384
+ Best for: Critical projects, detailed requirements
385
+
386
+ ${chalk.cyan.bold('Storage Locations:')}
387
+ API Keys: ${chalk.white('.adf/.env')} ${chalk.gray('(gitignored)')}
388
+ Analysis Config: ${chalk.white('.adf/analysis-config.json')}
389
+ Learning Data: ${chalk.white('.adf/learning/')}
390
+ Active Provider: ${chalk.white('.adf/.env')} ${chalk.gray('(ADF_CURRENT_PROVIDER)')}
391
+ Active Model: ${chalk.white('.adf/.env')} ${chalk.gray('(ADF_CURRENT_MODEL)')}
392
+
393
+ ${chalk.cyan.bold('Security Notes:')}
394
+ • API keys stored in .adf/.env (automatically gitignored)
395
+ • Never committed to version control
396
+ • Keys validated before saving
397
+ • Models tested for operational status
398
+ • All data stored locally on your machine
399
+ `)
62
400
  .action(configCommand);
63
401
 
64
402
  // Handle unknown commands
@@ -97,8 +97,19 @@ class AIClient {
97
97
  ]
98
98
  });
99
99
 
100
+ // Validate response structure
101
+ if (!response.content || response.content.length === 0) {
102
+ throw new Error(`Anthropic model '${this.model}' returned no content.`);
103
+ }
104
+
105
+ const content = response.content[0].text;
106
+
107
+ if (!content || content.trim().length === 0) {
108
+ throw new Error(`Anthropic model '${this.model}' returned empty text.`);
109
+ }
110
+
100
111
  return {
101
- content: response.content[0].text,
112
+ content,
102
113
  model: this.model,
103
114
  provider: 'anthropic',
104
115
  usage: {
@@ -113,9 +124,9 @@ class AIClient {
113
124
  * OpenAI GPT request
114
125
  */
115
126
  async openaiRequest(prompt, maxTokens, temperature) {
116
- // o-series models (o1, o3, etc.) use max_completion_tokens instead of max_tokens
117
- // Check for any model starting with 'o' followed by a digit
118
- const isOSeriesModel = /^o\d/.test(this.model);
127
+ // Newer OpenAI models use max_completion_tokens instead of max_tokens
128
+ // This includes: o-series (o1, o3), gpt-5 series, and potentially others
129
+ const usesNewParameters = /^(o1|o3|gpt-5)/i.test(this.model);
119
130
 
120
131
  const requestParams = {
121
132
  model: this.model,
@@ -127,27 +138,56 @@ class AIClient {
127
138
  ]
128
139
  };
129
140
 
130
- // o-series models use different parameter names
131
- if (isOSeriesModel) {
141
+ // Newer models use different parameter names
142
+ if (usesNewParameters) {
132
143
  requestParams.max_completion_tokens = maxTokens;
133
- // o-series models don't support temperature parameter
144
+ // Newer reasoning models don't support temperature parameter
134
145
  } else {
135
146
  requestParams.max_tokens = maxTokens;
136
147
  requestParams.temperature = temperature;
137
148
  }
138
149
 
139
- const response = await this.client.chat.completions.create(requestParams);
150
+ try {
151
+ const response = await this.client.chat.completions.create(requestParams);
140
152
 
141
- return {
142
- content: response.choices[0].message.content,
143
- model: this.model,
144
- provider: 'openai',
145
- usage: {
146
- promptTokens: response.usage.prompt_tokens,
147
- completionTokens: response.usage.completion_tokens,
148
- totalTokens: response.usage.total_tokens
153
+ return {
154
+ content: response.choices[0].message.content,
155
+ model: this.model,
156
+ provider: 'openai',
157
+ usage: {
158
+ promptTokens: response.usage.prompt_tokens,
159
+ completionTokens: response.usage.completion_tokens,
160
+ totalTokens: response.usage.total_tokens
161
+ }
162
+ };
163
+ } catch (error) {
164
+ // If we get a 400 error about max_tokens not being supported,
165
+ // automatically retry with max_completion_tokens
166
+ if (error.status === 400 && error.message.includes('max_tokens')) {
167
+ const retryParams = {
168
+ model: this.model,
169
+ messages: requestParams.messages,
170
+ max_completion_tokens: maxTokens
171
+ // Don't include temperature for new parameter format models
172
+ };
173
+
174
+ const response = await this.client.chat.completions.create(retryParams);
175
+
176
+ return {
177
+ content: response.choices[0].message.content,
178
+ model: this.model,
179
+ provider: 'openai',
180
+ usage: {
181
+ promptTokens: response.usage.prompt_tokens,
182
+ completionTokens: response.usage.completion_tokens,
183
+ totalTokens: response.usage.total_tokens
184
+ }
185
+ };
149
186
  }
150
- };
187
+
188
+ // Re-throw other errors
189
+ throw error;
190
+ }
151
191
  }
152
192
 
153
193
  /**
@@ -165,8 +205,20 @@ class AIClient {
165
205
  const result = await model.generateContent(prompt);
166
206
  const response = result.response;
167
207
 
208
+ // Validate response
209
+ let content;
210
+ try {
211
+ content = response.text();
212
+ } catch (error) {
213
+ throw new Error(`Google Gemini model '${this.model}' failed to generate text: ${error.message}`);
214
+ }
215
+
216
+ if (!content || content.trim().length === 0) {
217
+ throw new Error(`Google Gemini model '${this.model}' returned empty content.`);
218
+ }
219
+
168
220
  return {
169
- content: response.text(),
221
+ content,
170
222
  model: this.model,
171
223
  provider: 'google',
172
224
  usage: {
@@ -181,28 +233,85 @@ class AIClient {
181
233
  * OpenRouter request (uses OpenAI-compatible API)
182
234
  */
183
235
  async openrouterRequest(prompt, maxTokens, temperature) {
184
- const response = await this.client.chat.completions.create({
236
+ // Newer OpenAI models on OpenRouter may use max_completion_tokens
237
+ // This includes: o-series (o1, o3), gpt-5 series
238
+ const usesNewParameters = /^openai\/(o1|o3|gpt-5)/i.test(this.model);
239
+
240
+ const requestParams = {
185
241
  model: this.model,
186
- max_tokens: maxTokens,
187
- temperature,
188
242
  messages: [
189
243
  {
190
244
  role: 'user',
191
245
  content: prompt
192
246
  }
193
247
  ]
194
- });
248
+ };
195
249
 
196
- return {
197
- content: response.choices[0].message.content,
198
- model: this.model,
199
- provider: 'openrouter',
200
- usage: {
201
- promptTokens: response.usage?.prompt_tokens || 0,
202
- completionTokens: response.usage?.completion_tokens || 0,
203
- totalTokens: response.usage?.total_tokens || 0
250
+ // Newer models use different parameter names
251
+ if (usesNewParameters) {
252
+ requestParams.max_completion_tokens = maxTokens;
253
+ // Newer reasoning models don't support temperature parameter
254
+ } else {
255
+ requestParams.max_tokens = maxTokens;
256
+ requestParams.temperature = temperature;
257
+ }
258
+
259
+ try {
260
+ const response = await this.client.chat.completions.create(requestParams);
261
+
262
+ // Validate response structure
263
+ if (!response.choices || response.choices.length === 0) {
264
+ throw new Error(`OpenRouter model '${this.model}' returned no choices. The model may not exist or be unavailable.`);
204
265
  }
205
- };
266
+
267
+ if (!response.choices[0].message) {
268
+ throw new Error(`OpenRouter model '${this.model}' returned invalid response structure.`);
269
+ }
270
+
271
+ const content = response.choices[0].message.content;
272
+
273
+ if (!content || content.trim().length === 0) {
274
+ throw new Error(`OpenRouter model '${this.model}' returned empty content. The model may not support this request format.`);
275
+ }
276
+
277
+ return {
278
+ content,
279
+ model: this.model,
280
+ provider: 'openrouter',
281
+ usage: {
282
+ promptTokens: response.usage?.prompt_tokens || 0,
283
+ completionTokens: response.usage?.completion_tokens || 0,
284
+ totalTokens: response.usage?.total_tokens || 0
285
+ }
286
+ };
287
+ } catch (error) {
288
+ // If we get a 400 error about max_tokens not being supported,
289
+ // automatically retry with max_completion_tokens
290
+ if (error.status === 400 && error.message.includes('max_tokens')) {
291
+ const retryParams = {
292
+ model: this.model,
293
+ messages: requestParams.messages,
294
+ max_completion_tokens: maxTokens
295
+ // Don't include temperature for new parameter format models
296
+ };
297
+
298
+ const response = await this.client.chat.completions.create(retryParams);
299
+
300
+ return {
301
+ content: response.choices[0].message.content,
302
+ model: this.model,
303
+ provider: 'openrouter',
304
+ usage: {
305
+ promptTokens: response.usage?.prompt_tokens || 0,
306
+ completionTokens: response.usage?.completion_tokens || 0,
307
+ totalTokens: response.usage?.total_tokens || 0
308
+ }
309
+ };
310
+ }
311
+
312
+ // Re-throw other errors
313
+ throw error;
314
+ }
206
315
  }
207
316
 
208
317
  /**
@@ -211,21 +320,29 @@ class AIClient {
211
320
  async test() {
212
321
  const testPrompt = 'Respond with exactly: "Connection successful"';
213
322
 
214
- const response = await this.sendMessage(testPrompt, {
215
- maxTokens: 50,
216
- temperature: 0
217
- });
323
+ try {
324
+ const response = await this.sendMessage(testPrompt, {
325
+ maxTokens: 50,
326
+ temperature: 0
327
+ });
218
328
 
219
- if (!response.content) {
220
- throw new Error('No response from AI provider');
221
- }
329
+ if (!response.content) {
330
+ throw new Error(`Model '${this.model}' on ${this.provider} returned no content. The model may not exist or be unavailable.`);
331
+ }
222
332
 
223
- return {
224
- success: true,
225
- provider: this.provider,
226
- model: this.model,
227
- response: response.content
228
- };
333
+ return {
334
+ success: true,
335
+ provider: this.provider,
336
+ model: this.model,
337
+ response: response.content
338
+ };
339
+ } catch (error) {
340
+ // Add context to the error message
341
+ if (error.message.includes('returned no content') || error.message.includes('returned no choices')) {
342
+ throw error; // Already has good context
343
+ }
344
+ throw new Error(`Test failed for ${this.provider} model '${this.model}': ${error.message}`);
345
+ }
229
346
  }
230
347
 
231
348
  /**