jbai-cli 1.1.1 → 1.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  **Use AI coding tools with your JetBrains AI subscription** — no separate API keys needed.
4
4
 
5
- One token, all tools: Claude Code, Codex, Aider, Gemini, OpenCode.
5
+ One token, all tools: Claude Code, Codex, Aider, OpenCode.
6
6
 
7
7
  ## Install
8
8
 
@@ -59,15 +59,9 @@ jbai-codex exec "explain this codebase"
59
59
  ### Aider
60
60
  ```bash
61
61
  jbai-aider
62
- ```
63
-
64
- ### Gemini
65
- ```bash
66
- # Interactive chat
67
- jbai-gemini
68
62
 
69
- # One-shot question
70
- jbai-gemini "What is Kubernetes?"
63
+ # Use Gemini models with Aider
64
+ jbai-aider --model gemini/gemini-2.5-pro
71
65
  ```
72
66
 
73
67
  ### OpenCode
@@ -110,8 +104,8 @@ jbai-claude --model claude-opus-4-1-20250805
110
104
  # Codex with GPT-5
111
105
  jbai-codex --model gpt-5-2025-08-07
112
106
 
113
- # Gemini with Pro
114
- jbai-gemini --model gemini-2.5-pro "Your question"
107
+ # Aider with Gemini Pro
108
+ jbai-aider --model gemini/gemini-2.5-pro
115
109
  ```
116
110
 
117
111
  ### Available Models
@@ -135,10 +129,10 @@ jbai-gemini --model gemini-2.5-pro "Your question"
135
129
  | `o3-2025-04-16` | Reasoning |
136
130
  | `o3-mini-2025-01-31` | |
137
131
 
138
- **Gemini (Google)**
132
+ **Gemini (Google)** - Use with Aider: `jbai-aider --model gemini/<model>`
139
133
  | Model | Notes |
140
134
  |-------|-------|
141
- | `gemini-2.5-flash` | Default, fast |
135
+ | `gemini-2.5-flash` | Fast |
142
136
  | `gemini-2.5-pro` | More capable |
143
137
  | `gemini-3-pro-preview` | Preview |
144
138
  | `gemini-3-flash-preview` | Preview |
@@ -181,9 +175,8 @@ jbai doctor
181
175
  |------|-----------------|
182
176
  | Claude Code | `npm i -g @anthropic-ai/claude-code` |
183
177
  | Codex | `npm i -g @openai/codex` |
184
- | Aider | `pip install aider-chat` |
178
+ | Aider | `python3 -m pip install aider-chat` |
185
179
  | OpenCode | `go install github.com/opencode-ai/opencode@latest` |
186
- | Gemini | Built-in, no install needed |
187
180
 
188
181
  ## Token Management
189
182
 
@@ -33,13 +33,16 @@ if (superMode) {
33
33
  }
34
34
 
35
35
  // Set environment for Claude Code
36
+ // Only set API_KEY (not AUTH_TOKEN) to avoid conflicts
36
37
  const env = {
37
38
  ...process.env,
38
39
  ANTHROPIC_BASE_URL: endpoints.anthropic,
39
- ANTHROPIC_API_KEY: token,
40
- ANTHROPIC_AUTH_TOKEN: token
40
+ ANTHROPIC_API_KEY: token
41
41
  };
42
42
 
43
+ // Remove any existing auth token that might conflict
44
+ delete env.ANTHROPIC_AUTH_TOKEN;
45
+
43
46
  const child = spawn('claude', finalArgs, {
44
47
  stdio: 'inherit',
45
48
  env
@@ -1,6 +1,9 @@
1
1
  #!/usr/bin/env node
2
2
 
3
3
  const { spawn } = require('child_process');
4
+ const fs = require('fs');
5
+ const path = require('path');
6
+ const os = require('os');
4
7
  const config = require('../lib/config');
5
8
 
6
9
  const token = config.getToken();
@@ -15,6 +18,7 @@ if (config.isTokenExpired(token)) {
15
18
  }
16
19
 
17
20
  const endpoints = config.getEndpoints();
21
+ const environment = config.getEnvironment();
18
22
  let args = process.argv.slice(2);
19
23
 
20
24
  // Check for super mode (--super, --yolo, -s)
@@ -22,28 +26,84 @@ const superFlags = ['--super', '--yolo', '-s'];
22
26
  const superMode = args.some(a => superFlags.includes(a));
23
27
  args = args.filter(a => !superFlags.includes(a));
24
28
 
29
+ // Setup OpenCode config with JetBrains provider
30
+ const configDir = path.join(os.homedir(), '.config', 'opencode');
31
+ const configFile = path.join(configDir, 'opencode.json');
32
+
33
+ if (!fs.existsSync(configDir)) {
34
+ fs.mkdirSync(configDir, { recursive: true });
35
+ }
36
+
37
+ // Create or update OpenCode config with JetBrains provider
38
+ const providerName = environment === 'staging' ? 'jbai-staging' : 'jbai';
39
+ let opencodeConfig = {};
40
+
41
+ if (fs.existsSync(configFile)) {
42
+ try {
43
+ opencodeConfig = JSON.parse(fs.readFileSync(configFile, 'utf-8'));
44
+ } catch {
45
+ opencodeConfig = {};
46
+ }
47
+ }
48
+
49
+ // Ensure provider section exists
50
+ if (!opencodeConfig.provider) {
51
+ opencodeConfig.provider = {};
52
+ }
53
+
54
+ // Environment variable name for the token
55
+ const envVarName = environment === 'staging' ? 'GRAZIE_STAGING_TOKEN' : 'GRAZIE_API_TOKEN';
56
+
57
+ // Add/update JetBrains provider with custom header (using env var reference)
58
+ opencodeConfig.provider[providerName] = {
59
+ npm: '@ai-sdk/anthropic',
60
+ name: `JetBrains AI (${environment})`,
61
+ options: {
62
+ baseURL: endpoints.anthropic,
63
+ apiKey: `{env:${envVarName}}`,
64
+ headers: {
65
+ 'Grazie-Authenticate-JWT': `{env:${envVarName}}`
66
+ }
67
+ },
68
+ models: {}
69
+ };
70
+
71
+ // Add Claude models
72
+ config.MODELS.claude.available.forEach(model => {
73
+ opencodeConfig.provider[providerName].models[model] = {
74
+ name: model
75
+ };
76
+ });
77
+
78
+ // Write config
79
+ fs.writeFileSync(configFile, JSON.stringify(opencodeConfig, null, 2));
80
+
25
81
  // Check if model specified
26
82
  const hasModel = args.includes('--model') || args.includes('-m');
27
- let finalArgs = hasModel ? args : ['--model', config.MODELS.claude.default, ...args];
83
+ let finalArgs = [];
84
+
85
+ if (!hasModel) {
86
+ // Use provider/model format for OpenCode
87
+ finalArgs.push('--model', `${providerName}/${config.MODELS.claude.default}`);
88
+ }
28
89
 
29
90
  // Add super mode flags
30
91
  if (superMode) {
31
- finalArgs = ['--yes', ...finalArgs];
92
+ finalArgs.push('--yes');
32
93
  console.log('🚀 Super mode: --yes (auto-confirm) enabled');
33
94
  }
34
95
 
35
- // Set environment for OpenCode
36
- const env = {
96
+ finalArgs.push(...args);
97
+
98
+ // Set the token in environment variable for OpenCode config to reference
99
+ const childEnv = {
37
100
  ...process.env,
38
- ANTHROPIC_BASE_URL: endpoints.anthropic,
39
- ANTHROPIC_API_KEY: token,
40
- OPENAI_API_BASE: endpoints.openai,
41
- OPENAI_API_KEY: token
101
+ [envVarName]: token
42
102
  };
43
103
 
44
104
  const child = spawn('opencode', finalArgs, {
45
105
  stdio: 'inherit',
46
- env
106
+ env: childEnv
47
107
  });
48
108
 
49
109
  child.on('error', (err) => {
package/bin/jbai.js CHANGED
@@ -21,7 +21,7 @@ const TOOLS = {
21
21
  aider: {
22
22
  name: 'Aider',
23
23
  command: 'aider',
24
- install: 'pip install aider-chat',
24
+ install: 'python3 -m pip install aider-chat',
25
25
  check: 'aider --version'
26
26
  },
27
27
  opencode: {
@@ -53,7 +53,6 @@ TOOL WRAPPERS:
53
53
  jbai-claude Launch Claude Code with JetBrains AI
54
54
  jbai-codex Launch Codex CLI with JetBrains AI
55
55
  jbai-aider Launch Aider with JetBrains AI
56
- jbai-gemini Launch Gemini with JetBrains AI
57
56
  jbai-opencode Launch OpenCode with JetBrains AI
58
57
 
59
58
  SUPER MODE:
@@ -279,7 +278,7 @@ function doctor() {
279
278
  }
280
279
  }
281
280
 
282
- console.log('\n✅ Gemini Built-in (no install needed)');
281
+ console.log('\nTip: For Gemini models, use Aider: jbai-aider --model gemini/gemini-2.5-pro');
283
282
  }
284
283
 
285
284
  async function installTools(toolKey) {
package/lib/config.js CHANGED
@@ -136,7 +136,7 @@ const TOOLS = {
136
136
  aider: {
137
137
  name: 'Aider',
138
138
  command: 'aider',
139
- install: 'pip install aider-chat'
139
+ install: 'python3 -m pip install aider-chat'
140
140
  },
141
141
  opencode: {
142
142
  name: 'OpenCode',
package/package.json CHANGED
@@ -1,14 +1,14 @@
1
1
  {
2
2
  "name": "jbai-cli",
3
- "version": "1.1.1",
4
- "description": "CLI wrappers to use AI coding tools (Claude Code, Codex, Aider, Gemini) with JetBrains AI Platform",
3
+ "version": "1.2.3",
4
+ "description": "CLI wrappers to use AI coding tools (Claude Code, Codex, Aider, OpenCode) with JetBrains AI Platform",
5
5
  "keywords": [
6
6
  "jetbrains",
7
7
  "ai",
8
8
  "claude",
9
9
  "codex",
10
10
  "aider",
11
- "gemini",
11
+ "opencode",
12
12
  "cli",
13
13
  "openai",
14
14
  "anthropic"
@@ -28,7 +28,6 @@
28
28
  "jbai-claude": "./bin/jbai-claude.js",
29
29
  "jbai-codex": "./bin/jbai-codex.js",
30
30
  "jbai-aider": "./bin/jbai-aider.js",
31
- "jbai-gemini": "./bin/jbai-gemini.js",
32
31
  "jbai-opencode": "./bin/jbai-opencode.js"
33
32
  },
34
33
  "files": [
@@ -1,136 +0,0 @@
1
- #!/usr/bin/env node
2
-
3
- const https = require('https');
4
- const readline = require('readline');
5
- const config = require('../lib/config');
6
-
7
- const token = config.getToken();
8
- if (!token) {
9
- console.error('❌ No token found. Run: jbai token set');
10
- process.exit(1);
11
- }
12
-
13
- if (config.isTokenExpired(token)) {
14
- console.error('⚠️ Token expired. Run: jbai token refresh');
15
- process.exit(1);
16
- }
17
-
18
- const endpoints = config.getEndpoints();
19
- const args = process.argv.slice(2);
20
-
21
- // Get model from args or use default
22
- let model = config.MODELS.gemini.default;
23
- const modelIdx = args.indexOf('--model');
24
- if (modelIdx !== -1 && args[modelIdx + 1]) {
25
- model = args[modelIdx + 1];
26
- }
27
-
28
- // If prompt provided as argument, run one-shot
29
- const prompt = args.filter((a, i) =>
30
- a !== '--model' && (modelIdx === -1 || i !== modelIdx + 1)
31
- ).join(' ');
32
-
33
- if (prompt) {
34
- runPrompt(prompt, model);
35
- } else {
36
- runInteractive(model);
37
- }
38
-
39
- async function runPrompt(prompt, model) {
40
- const url = `${endpoints.google}/v1/projects/default/locations/default/publishers/google/models/${model}:generateContent`;
41
-
42
- try {
43
- const result = await httpPost(url, {
44
- contents: [{ role: 'user', parts: [{ text: prompt }] }]
45
- }, { 'Grazie-Authenticate-JWT': token });
46
-
47
- if (result.candidates && result.candidates[0]) {
48
- console.log(result.candidates[0].content.parts[0].text);
49
- } else if (result.error) {
50
- console.error(`Error: ${result.error.message}`);
51
- }
52
- } catch (e) {
53
- console.error(`Error: ${e.message}`);
54
- }
55
- }
56
-
57
- async function runInteractive(model) {
58
- console.log(`Gemini Interactive (${model})`);
59
- console.log('Type your message, press Enter to send. Ctrl+C to exit.\n');
60
-
61
- const rl = readline.createInterface({
62
- input: process.stdin,
63
- output: process.stdout
64
- });
65
-
66
- const history = [];
67
-
68
- const askQuestion = () => {
69
- rl.question('You: ', async (input) => {
70
- if (!input.trim()) {
71
- askQuestion();
72
- return;
73
- }
74
-
75
- history.push({ role: 'user', parts: [{ text: input }] });
76
-
77
- const url = `${endpoints.google}/v1/projects/default/locations/default/publishers/google/models/${model}:generateContent`;
78
-
79
- try {
80
- const result = await httpPost(url, { contents: history }, { 'Grazie-Authenticate-JWT': token });
81
-
82
- if (result.candidates && result.candidates[0]) {
83
- const response = result.candidates[0].content.parts[0].text;
84
- history.push({ role: 'model', parts: [{ text: response }] });
85
- console.log(`\nGemini: ${response}\n`);
86
- } else if (result.error) {
87
- console.error(`Error: ${result.error.message}\n`);
88
- }
89
- } catch (e) {
90
- console.error(`Error: ${e.message}\n`);
91
- }
92
-
93
- askQuestion();
94
- });
95
- };
96
-
97
- rl.on('close', () => {
98
- console.log('\nGoodbye!');
99
- process.exit(0);
100
- });
101
-
102
- askQuestion();
103
- }
104
-
105
- function httpPost(url, body, headers) {
106
- return new Promise((resolve, reject) => {
107
- const urlObj = new URL(url);
108
- const data = JSON.stringify(body);
109
-
110
- const req = https.request({
111
- hostname: urlObj.hostname,
112
- port: 443,
113
- path: urlObj.pathname,
114
- method: 'POST',
115
- headers: {
116
- 'Content-Type': 'application/json',
117
- 'Content-Length': Buffer.byteLength(data),
118
- ...headers
119
- }
120
- }, (res) => {
121
- let body = '';
122
- res.on('data', chunk => body += chunk);
123
- res.on('end', () => {
124
- try {
125
- resolve(JSON.parse(body));
126
- } catch {
127
- reject(new Error('Invalid response'));
128
- }
129
- });
130
- });
131
-
132
- req.on('error', reject);
133
- req.write(data);
134
- req.end();
135
- });
136
- }