jbai-cli 1.5.0 → 1.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -35,9 +35,10 @@ Expected output:
35
35
  ```
36
36
  Testing JetBrains AI Platform (staging)
37
37
 
38
- 1. OpenAI Proxy (GPT): ✅ Working
39
- 2. Anthropic Proxy (Claude): ✅ Working
40
- 3. Google Proxy (Gemini): ✅ Working
38
+ 1. OpenAI Proxy (Chat): ✅ Working
39
+ 2. OpenAI Proxy (Codex /responses): ✅ Working
40
+ 3. Anthropic Proxy (Claude): ✅ Working
41
+ 4. Google Proxy (Gemini): ✅ Working
41
42
  ```
42
43
 
43
44
  ## Usage
@@ -103,7 +104,7 @@ Each tool has a sensible default, but you can specify any available model:
103
104
  jbai-claude --model claude-opus-4-1-20250805
104
105
 
105
106
  # Codex with GPT-5
106
- jbai-codex --model gpt-5-2025-08-07
107
+ jbai-codex --model gpt-5.2-codex
107
108
 
108
109
  # Aider with Gemini Pro
109
110
  jbai-aider --model gemini/gemini-2.5-pro
@@ -126,10 +127,19 @@ jbai-aider --model gemini/gemini-2.5-pro
126
127
  | `gpt-4o-2024-11-20` | Default |
127
128
  | `gpt-5-2025-08-07` | Latest |
128
129
  | `gpt-5.1-2025-11-13` | |
130
+ | `gpt-5.2-2025-12-11` | |
129
131
  | `gpt-5-mini-2025-08-07` | Fast |
130
132
  | `o3-2025-04-16` | Reasoning |
131
133
  | `o3-mini-2025-01-31` | |
132
134
 
135
+ **Codex (OpenAI Responses)** - Use with Codex CLI: `jbai-codex --model <model>`
136
+ | Model | Notes |
137
+ |-------|-------|
138
+ | `gpt-5.2-codex` | Default, coding-optimized |
139
+ | `gpt-5.1-codex` | |
140
+ | `gpt-5.1-codex-mini` | Faster |
141
+ | `gpt-5.1-codex-max` | |
142
+
133
143
  **Gemini (Google)** - Use with Aider: `jbai-aider --model gemini/<model>`
134
144
  | Model | Notes |
135
145
  |-------|-------|
package/bin/jbai-codex.js CHANGED
@@ -59,7 +59,7 @@ const hasModel = args.includes('--model');
59
59
  const finalArgs = ['-c', `model_provider=${providerName}`];
60
60
 
61
61
  if (!hasModel) {
62
- finalArgs.push('--model', config.MODELS.openai.default);
62
+ finalArgs.push('--model', config.MODELS.codex?.default || config.MODELS.openai.default);
63
63
  }
64
64
 
65
65
  // Add super mode flags (full-auto)
@@ -73,10 +73,15 @@ opencodeConfig.provider[providerName] = {
73
73
  };
74
74
 
75
75
  // Add OpenAI models
76
+ // O-series models (o1, o3, o4) don't support max_tokens, they use max_completion_tokens
77
+ // For these models, we omit the output limit to prevent the SDK from sending max_tokens
76
78
  config.MODELS.openai.available.forEach(model => {
79
+ const isOSeries = /^o[1-9]/.test(model);
77
80
  opencodeConfig.provider[providerName].models[model] = {
78
81
  name: model,
79
- limit: { context: 128000, output: 8192 }
82
+ limit: isOSeries
83
+ ? { context: 200000 } // O-series have larger context, no output limit
84
+ : { context: 128000, output: 8192 }
80
85
  };
81
86
  });
82
87
 
package/bin/jbai.js CHANGED
@@ -41,7 +41,7 @@ COMMANDS:
41
41
  jbai token Show token status
42
42
  jbai token set Set token interactively
43
43
  jbai token refresh Refresh expired token
44
- jbai test Test all API endpoints
44
+ jbai test Test API endpoints (incl. Codex /responses)
45
45
  jbai env [staging|production] Switch environment
46
46
  jbai models List available models
47
47
  jbai install Install all AI tools (claude, codex, gemini, opencode)
@@ -139,40 +139,61 @@ async function testEndpoints() {
139
139
  console.log(`Testing JetBrains AI Platform (${config.getEnvironment()})\n`);
140
140
 
141
141
  // Test OpenAI
142
- process.stdout.write('1. OpenAI Proxy (GPT): ');
142
+ process.stdout.write('1. OpenAI Proxy (Chat): ');
143
143
  try {
144
144
  const result = await httpPost(
145
145
  `${endpoints.openai}/chat/completions`,
146
- { model: 'gpt-4o-2024-11-20', messages: [{ role: 'user', content: 'Say OK' }], max_tokens: 5 },
146
+ { model: config.MODELS.openai.default, messages: [{ role: 'user', content: 'Say OK' }], max_tokens: 5 },
147
147
  { 'Grazie-Authenticate-JWT': token }
148
148
  );
149
- console.log(result.choices ? '✅ Working' : '❌ Failed');
149
+ const ok = result.statusCode === 200 && Array.isArray(result.json?.choices);
150
+ console.log(ok ? '✅ Working' : `❌ Failed (${result.statusCode})`);
151
+ } catch (e) {
152
+ console.log(`❌ ${e.message}`);
153
+ }
154
+
155
+ // Test Codex (Responses)
156
+ process.stdout.write('2. OpenAI Proxy (Codex /responses): ');
157
+ try {
158
+ const result = await httpPost(
159
+ `${endpoints.openai}/responses`,
160
+ { model: config.MODELS.codex.default, input: 'Say OK', max_output_tokens: 64 },
161
+ { 'Grazie-Authenticate-JWT': token }
162
+ );
163
+ const outputText = result.json?.output
164
+ ?.find(o => o.type === 'message')
165
+ ?.content?.find(c => c.type === 'output_text')
166
+ ?.text;
167
+ const ok = result.statusCode === 200 && typeof outputText === 'string';
168
+ console.log(ok ? '✅ Working' : `❌ Failed (${result.statusCode})`);
150
169
  } catch (e) {
151
170
  console.log(`❌ ${e.message}`);
152
171
  }
153
172
 
154
173
  // Test Anthropic
155
- process.stdout.write('2. Anthropic Proxy (Claude): ');
174
+ process.stdout.write('3. Anthropic Proxy (Claude): ');
156
175
  try {
157
176
  const result = await httpPost(
158
177
  `${endpoints.anthropic}/messages`,
159
178
  { model: 'claude-sonnet-4-5-20250929', messages: [{ role: 'user', content: 'Say OK' }], max_tokens: 10 },
160
179
  { 'Grazie-Authenticate-JWT': token, 'anthropic-version': '2023-06-01' }
161
180
  );
162
- console.log(result.content ? '✅ Working' : '❌ Failed');
181
+ const ok = result.statusCode === 200 && Array.isArray(result.json?.content);
182
+ console.log(ok ? '✅ Working' : `❌ Failed (${result.statusCode})`);
163
183
  } catch (e) {
164
184
  console.log(`❌ ${e.message}`);
165
185
  }
166
186
 
167
187
  // Test Google
168
- process.stdout.write('3. Google Proxy (Gemini): ');
188
+ process.stdout.write('4. Google Proxy (Gemini): ');
169
189
  try {
170
190
  const result = await httpPost(
171
191
  `${endpoints.google}/v1/projects/default/locations/default/publishers/google/models/gemini-2.5-flash:generateContent`,
172
192
  { contents: [{ role: 'user', parts: [{ text: 'Say OK' }] }] },
173
193
  { 'Grazie-Authenticate-JWT': token }
174
194
  );
175
- console.log(result.candidates ? '✅ Working' : '❌ Failed');
195
+ const ok = result.statusCode === 200 && Array.isArray(result.json?.candidates);
196
+ console.log(ok ? '✅ Working' : `❌ Failed (${result.statusCode})`);
176
197
  } catch (e) {
177
198
  console.log(`❌ ${e.message}`);
178
199
  }
@@ -194,14 +215,16 @@ function httpPost(url, body, headers) {
194
215
  ...headers
195
216
  }
196
217
  }, (res) => {
197
- let body = '';
198
- res.on('data', chunk => body += chunk);
218
+ let rawBody = '';
219
+ res.on('data', chunk => rawBody += chunk);
199
220
  res.on('end', () => {
221
+ let json = null;
200
222
  try {
201
- resolve(JSON.parse(body));
223
+ json = JSON.parse(rawBody);
202
224
  } catch {
203
- reject(new Error('Invalid response'));
225
+ // Some proxy errors are plain text.
204
226
  }
227
+ resolve({ statusCode: res.statusCode, json, rawBody });
205
228
  });
206
229
  });
207
230
 
@@ -220,12 +243,18 @@ function showModels() {
220
243
  console.log(` - ${m}${def}`);
221
244
  });
222
245
 
223
- console.log('\nGPT (OpenAI) - jbai-codex, jbai-opencode:');
246
+ console.log('\nGPT (OpenAI Chat) - jbai-aider, jbai-opencode:');
224
247
  config.MODELS.openai.available.forEach((m) => {
225
248
  const def = m === config.MODELS.openai.default ? ' (default)' : '';
226
249
  console.log(` - ${m}${def}`);
227
250
  });
228
251
 
252
+ console.log('\nCodex (OpenAI Responses) - jbai-codex:');
253
+ config.MODELS.codex.available.forEach((m) => {
254
+ const def = m === config.MODELS.codex.default ? ' (default)' : '';
255
+ console.log(` - ${m}${def}`);
256
+ });
257
+
229
258
  console.log('\nGemini (Google) - jbai-gemini:');
230
259
  config.MODELS.gemini.available.forEach((m) => {
231
260
  const def = m === config.MODELS.gemini.default ? ' (default)' : '';
@@ -235,6 +264,7 @@ function showModels() {
235
264
  // Count total
236
265
  const total = config.MODELS.claude.available.length +
237
266
  config.MODELS.openai.available.length +
267
+ config.MODELS.codex.available.length +
238
268
  config.MODELS.gemini.available.length;
239
269
  console.log(`\nTotal: ${total} models`);
240
270
  console.log('\nNote: Other providers (DeepSeek, Mistral, Qwen, XAI, Meta) are available');
package/lib/config.js CHANGED
@@ -46,6 +46,8 @@ const MODELS = {
46
46
  ]
47
47
  },
48
48
  openai: {
49
+ // Chat/Completions models (used by Aider/OpenCode)
50
+ // Keep in sync with the OpenAI proxy's advertised list.
49
51
  default: 'gpt-4o-2024-11-20',
50
52
  available: [
51
53
  // GPT-5.x series (latest) - require date-versioned names
@@ -55,20 +57,33 @@ const MODELS = {
55
57
  'gpt-5-2025-08-07',
56
58
  'gpt-5-mini-2025-08-07',
57
59
  'gpt-5-nano-2025-08-07',
58
- // GPT-4.x series
60
+ // GPT-4.1 series
59
61
  'gpt-4.1-2025-04-14',
60
62
  'gpt-4.1-mini-2025-04-14',
61
63
  'gpt-4.1-nano-2025-04-14',
64
+ // GPT-4o/4-turbo
62
65
  'gpt-4o-2024-11-20',
63
66
  'gpt-4o-mini-2024-07-18',
64
67
  'gpt-4-turbo-2024-04-09',
65
68
  'gpt-4-0613',
66
- 'gpt-3.5-turbo-0125',
67
69
  // O-series (reasoning) - use max_completion_tokens instead of max_tokens
68
70
  'o4-mini-2025-04-16',
69
71
  'o3-2025-04-16',
70
72
  'o3-mini-2025-01-31',
71
- 'o1-2024-12-17'
73
+ 'o1-2024-12-17',
74
+ // Legacy
75
+ 'gpt-3.5-turbo-0125'
76
+ ]
77
+ },
78
+ // Codex CLI uses the OpenAI "responses" API (wire_api = "responses")
79
+ codex: {
80
+ default: 'gpt-5.2-codex',
81
+ available: [
82
+ 'gpt-5-codex',
83
+ 'gpt-5.1-codex',
84
+ 'gpt-5.1-codex-mini',
85
+ 'gpt-5.1-codex-max',
86
+ 'gpt-5.2-codex'
72
87
  ]
73
88
  },
74
89
  gemini: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "jbai-cli",
3
- "version": "1.5.0",
3
+ "version": "1.5.2",
4
4
  "description": "CLI wrappers to use AI coding tools (Claude Code, Codex, Gemini CLI, OpenCode) with JetBrains AI Platform",
5
5
  "keywords": [
6
6
  "jetbrains",