jbai-cli 1.9.2 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,15 +8,22 @@
8
8
 
9
9
  const { runWithHandoff, stripHandoffFlag } = require('../lib/interactive-handoff');
10
10
  const config = require('../lib/config');
11
+ const { isModelsCommand, showModelsForTool } = require('../lib/model-list');
11
12
  const { ensureToken } = require('../lib/ensure-token');
13
+ const { PROXY_PORT, ensureProxy } = require('../lib/proxy');
12
14
 
13
15
  (async () => {
14
- const token = await ensureToken();
15
- const endpoints = config.getEndpoints();
16
16
  let args = process.argv.slice(2);
17
17
  const handoffConfig = stripHandoffFlag(args);
18
18
  args = handoffConfig.args;
19
19
 
20
+ if (isModelsCommand(args)) {
21
+ showModelsForTool('gemini', 'Available Grazie models for jbai-gemini:');
22
+ return;
23
+ }
24
+
25
+ const token = await ensureToken();
26
+
20
27
  // Check for super mode (--super, --yolo, -s)
21
28
  const superFlags = ['--super', '--yolo', '-s'];
22
29
  const superMode = args.some(a => superFlags.includes(a));
@@ -32,15 +39,19 @@ const { ensureToken } = require('../lib/ensure-token');
32
39
  console.log('🚀 Super mode: --yolo (auto-confirm) enabled');
33
40
  }
34
41
 
35
- // Set environment for Gemini CLI
36
- // Uses GEMINI_CLI_CUSTOM_HEADERS for auth (supported since Nov 2025)
42
+ // Auto-start proxy if not running (serves /google/v1/models with only Grazie Gemini models)
43
+ await ensureProxy();
44
+
45
+ // Route through proxy — it handles JWT injection and serves provider-filtered model list
37
46
  const env = {
38
47
  ...process.env,
39
- GEMINI_BASE_URL: endpoints.google,
48
+ GEMINI_BASE_URL: `http://127.0.0.1:${PROXY_PORT}/google`,
40
49
  GEMINI_API_KEY: 'placeholder',
41
- GEMINI_CLI_CUSTOM_HEADERS: `Grazie-Authenticate-JWT: ${token}`
42
50
  };
43
51
 
52
+ // Remove any existing custom headers that might conflict
53
+ delete env.GEMINI_CLI_CUSTOM_HEADERS;
54
+
44
55
  const child = runWithHandoff({
45
56
  command: 'gemini',
46
57
  args: finalArgs,
package/bin/jbai-goose.js CHANGED
@@ -8,45 +8,12 @@
8
8
  */
9
9
 
10
10
  const { runWithHandoff, stripHandoffFlag } = require('../lib/interactive-handoff');
11
- const path = require('path');
12
- const http = require('http');
13
11
  const config = require('../lib/config');
12
+ const { isModelsCommand, showModelsForTool } = require('../lib/model-list');
14
13
  const { ensureToken } = require('../lib/ensure-token');
15
-
16
- const PROXY_PORT = 18080;
17
-
18
- function isProxyRunning() {
19
- return new Promise((resolve) => {
20
- const req = http.get(`http://127.0.0.1:${PROXY_PORT}/health`, { timeout: 1000 }, (res) => {
21
- let body = '';
22
- res.on('data', chunk => body += chunk);
23
- res.on('end', () => {
24
- try {
25
- const info = JSON.parse(body);
26
- resolve(info.status === 'ok');
27
- } catch {
28
- resolve(false);
29
- }
30
- });
31
- });
32
- req.on('error', () => resolve(false));
33
- req.on('timeout', () => { req.destroy(); resolve(false); });
34
- });
35
- }
36
-
37
- function startProxy() {
38
- const { spawn } = require('child_process');
39
- const proxyScript = path.join(__dirname, 'jbai-proxy.js');
40
- const child = spawn(process.execPath, [proxyScript, '--port', String(PROXY_PORT), '--_daemon'], {
41
- detached: true,
42
- stdio: 'ignore',
43
- });
44
- child.unref();
45
- }
14
+ const { PROXY_PORT, ensureProxy } = require('../lib/proxy');
46
15
 
47
16
  (async () => {
48
- const token = await ensureToken();
49
- const environment = config.getEnvironment();
50
17
  let args = process.argv.slice(2);
51
18
  const handoffConfig = stripHandoffFlag(args);
52
19
  args = handoffConfig.args;
@@ -56,12 +23,17 @@ function startProxy() {
56
23
  const superMode = args.some(a => superFlags.includes(a));
57
24
  args = args.filter(a => !superFlags.includes(a));
58
25
 
59
- // Auto-start proxy if not running
60
- if (!await isProxyRunning()) {
61
- startProxy();
62
- await new Promise(r => setTimeout(r, 500));
26
+ if (isModelsCommand(args)) {
27
+ showModelsForTool('goose', 'Available Grazie models for jbai-goose:');
28
+ return;
63
29
  }
64
30
 
31
+ const token = await ensureToken();
32
+ const environment = config.getEnvironment();
33
+
34
+ // Auto-start proxy if not running
35
+ await ensureProxy();
36
+
65
37
  // Determine if this is a "run" or "session" command
66
38
  const isRun = args[0] === 'run';
67
39
  const isSession = args[0] === 'session' || args.length === 0;
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env node
2
+ require('../lib/shortcut').run({
3
+ tool: 'opencode',
4
+ model: 'deepseek-r1',
5
+ label: 'OpenCode + DeepSeek R1',
6
+ });
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env node
2
+ require('../lib/shortcut').run({
3
+ tool: 'opencode',
4
+ model: 'xai-grok-4',
5
+ label: 'OpenCode + Grok 4 (xAI)',
6
+ });
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env node
2
+ require('../lib/shortcut').run({
3
+ tool: 'opencode',
4
+ model: 'rockhopper-alpha',
5
+ label: 'OpenCode + Rockhopper Alpha (OpenAI EAP)',
6
+ });
@@ -4,13 +4,60 @@ const { runWithHandoff, stripHandoffFlag } = require('../lib/interactive-handoff
4
4
  const fs = require('fs');
5
5
  const path = require('path');
6
6
  const os = require('os');
7
+ const https = require('https');
7
8
  const config = require('../lib/config');
9
+ const { isModelsCommand, showModelsForTool } = require('../lib/model-list');
8
10
  const { ensureToken } = require('../lib/ensure-token');
11
+ const { PROXY_PORT, ensureProxy } = require('../lib/proxy');
12
+
13
+ function fetchGrazieProfiles(token, environment) {
14
+ return new Promise((resolve, reject) => {
15
+ const endpoints = config.getEndpoints();
16
+ const url = new URL(endpoints.profiles);
17
+
18
+ const req = https.request({
19
+ hostname: url.hostname,
20
+ port: 443,
21
+ path: url.pathname + url.search,
22
+ method: 'GET',
23
+ headers: {
24
+ 'Content-Type': 'application/json',
25
+ 'Grazie-Authenticate-JWT': token,
26
+ 'Grazie-Agent': JSON.stringify({ name: 'jbai-opencode', version: '1.0', env: environment }),
27
+ },
28
+ }, (res) => {
29
+ const chunks = [];
30
+ res.on('data', (c) => chunks.push(c));
31
+ res.on('end', () => {
32
+ if (res.statusCode !== 200) {
33
+ reject(new Error(`Failed to fetch profiles (HTTP ${res.statusCode})`));
34
+ return;
35
+ }
36
+
37
+ let parsed;
38
+ try {
39
+ parsed = JSON.parse(Buffer.concat(chunks).toString('utf-8'));
40
+ } catch {
41
+ reject(new Error('Failed to parse profiles response'));
42
+ return;
43
+ }
44
+
45
+ const profiles = Array.isArray(parsed) ? parsed : (parsed && Array.isArray(parsed.profiles) ? parsed.profiles : null);
46
+ if (!profiles) {
47
+ reject(new Error('Unexpected profiles response shape'));
48
+ return;
49
+ }
50
+
51
+ resolve(profiles);
52
+ });
53
+ });
54
+
55
+ req.on('error', reject);
56
+ req.end();
57
+ });
58
+ }
9
59
 
10
60
  (async () => {
11
- const token = await ensureToken();
12
- const endpoints = config.getEndpoints();
13
- const environment = config.getEnvironment();
14
61
  let args = process.argv.slice(2);
15
62
  const handoffConfig = stripHandoffFlag(args);
16
63
  args = handoffConfig.args;
@@ -20,6 +67,26 @@ const { ensureToken } = require('../lib/ensure-token');
20
67
  const superMode = args.some(a => superFlags.includes(a));
21
68
  args = args.filter(a => !superFlags.includes(a));
22
69
 
70
+ // OpenCode may try to read the clipboard tool even for simple prompts.
71
+ // If the clipboard currently contains an image, OpenCode requires the selected
72
+ // model to support image input; many text-only models will fail with:
73
+ // Cannot read "clipboard" (this model does not support image input)
74
+ // Disable clipboard tool by default for more predictable behavior.
75
+ // Set JBAI_OPENCODE_ENABLE_CLIPBOARD=1 to keep clipboard enabled.
76
+ const enableClipboard = process.env.JBAI_OPENCODE_ENABLE_CLIPBOARD === '1';
77
+
78
+ if (isModelsCommand(args)) {
79
+ showModelsForTool('opencode', 'Available Grazie models for jbai-opencode:');
80
+ return;
81
+ }
82
+
83
+ const token = await ensureToken();
84
+ const environment = config.getEnvironment();
85
+ const proxyBase = `http://127.0.0.1:${PROXY_PORT}`;
86
+
87
+ // Auto-start proxy if not running
88
+ await ensureProxy();
89
+
23
90
  // Setup OpenCode config with JetBrains provider
24
91
  const configDir = path.join(os.homedir(), '.config', 'opencode');
25
92
  const configFile = path.join(configDir, 'opencode.json');
@@ -45,11 +112,17 @@ const { ensureToken } = require('../lib/ensure-token');
45
112
  opencodeConfig.provider = {};
46
113
  }
47
114
 
48
- // Environment variable name for the token
49
- const envVarName = environment === 'staging' ? 'GRAZIE_STAGING_TOKEN' : 'GRAZIE_API_TOKEN';
50
-
51
115
  // Provider names for OpenAI and Anthropic
52
116
  const anthropicProviderName = environment === 'staging' ? 'jbai-anthropic-staging' : 'jbai-anthropic';
117
+ const grazieOpenAiProviderName = environment === 'staging' ? 'jbai-grazie-openai-staging' : 'jbai-grazie-openai';
118
+
119
+ // Remove any providers outside our Grazie set for this tool
120
+ const allowedProviders = new Set([providerName, anthropicProviderName, grazieOpenAiProviderName]);
121
+ for (const key of Object.keys(opencodeConfig.provider)) {
122
+ if (!allowedProviders.has(key)) {
123
+ delete opencodeConfig.provider[key];
124
+ }
125
+ }
53
126
 
54
127
  // Add/update JetBrains OpenAI provider with custom header (using env var reference)
55
128
  // Use OpenAI SDK to support max_completion_tokens for GPT-5.x
@@ -57,11 +130,8 @@ const { ensureToken } = require('../lib/ensure-token');
57
130
  npm: '@ai-sdk/openai',
58
131
  name: `JetBrains AI OpenAI (${environment})`,
59
132
  options: {
60
- baseURL: endpoints.openai,
61
- apiKey: `{env:${envVarName}}`,
62
- headers: {
63
- 'Grazie-Authenticate-JWT': `{env:${envVarName}}`
64
- }
133
+ baseURL: `${proxyBase}/openai/v1`,
134
+ apiKey: 'placeholder',
65
135
  },
66
136
  models: {}
67
137
  };
@@ -84,11 +154,8 @@ const { ensureToken } = require('../lib/ensure-token');
84
154
  npm: '@ai-sdk/anthropic',
85
155
  name: `JetBrains AI Anthropic (${environment})`,
86
156
  options: {
87
- baseURL: endpoints.anthropic,
88
- apiKey: `{env:${envVarName}}`,
89
- headers: {
90
- 'Grazie-Authenticate-JWT': `{env:${envVarName}}`
91
- }
157
+ baseURL: `${proxyBase}/anthropic/v1`,
158
+ apiKey: 'placeholder',
92
159
  },
93
160
  models: {}
94
161
  };
@@ -101,6 +168,37 @@ const { ensureToken } = require('../lib/ensure-token');
101
168
  };
102
169
  });
103
170
 
171
+ // Add Grazie-native models via an OpenAI-compatible adapter (all providers)
172
+ // This makes non-OpenAI/Anthropic models visible to OpenCode.
173
+ // Note: tool/function calling support depends on Grazie chat capabilities.
174
+ opencodeConfig.provider[grazieOpenAiProviderName] = {
175
+ npm: '@ai-sdk/openai',
176
+ name: `JetBrains AI Grazie Chat (${environment})`,
177
+ options: {
178
+ baseURL: `${proxyBase}/grazie-openai/v1`,
179
+ apiKey: 'placeholder',
180
+ },
181
+ models: {}
182
+ };
183
+
184
+ try {
185
+ const profiles = await fetchGrazieProfiles(token, environment);
186
+ for (const p of profiles) {
187
+ if (!p || !p.id || p.deprecated) continue;
188
+ if (Array.isArray(p.features) && !p.features.includes('Chat')) continue;
189
+
190
+ const context = Number.isInteger(p.contextLimit) && p.contextLimit > 0 ? p.contextLimit : 128000;
191
+ const output = Number.isInteger(p.maxOutputTokens) && p.maxOutputTokens > 0 ? p.maxOutputTokens : 8192;
192
+
193
+ opencodeConfig.provider[grazieOpenAiProviderName].models[p.id] = {
194
+ name: p.id,
195
+ limit: { context, output }
196
+ };
197
+ }
198
+ } catch {
199
+ // If profiles can't be fetched, keep provider but without models.
200
+ }
201
+
104
202
  // NOTE: Gemini models are NOT available via Grazie OpenAI-compatible proxy.
105
203
  // The /user/v5/llm/google/v1/vertex endpoint only works with native Google API
106
204
  // format (used by jbai-gemini), not OpenAI chat/completions format.
@@ -115,8 +213,13 @@ const { ensureToken } = require('../lib/ensure-token');
115
213
  }
116
214
  opencodeConfig.agent.build.permission = 'allow';
117
215
 
216
+ if (!enableClipboard) {
217
+ if (!opencodeConfig.tools) opencodeConfig.tools = {};
218
+ opencodeConfig.tools.clipboard = false;
219
+ }
220
+
118
221
  // Only show JetBrains AI providers in the model picker
119
- opencodeConfig.enabled_providers = [providerName, anthropicProviderName];
222
+ opencodeConfig.enabled_providers = [providerName, anthropicProviderName, grazieOpenAiProviderName];
120
223
 
121
224
  // Clean up legacy keys that newer OpenCode versions reject
122
225
  delete opencodeConfig.yolo;
@@ -135,10 +238,9 @@ const { ensureToken } = require('../lib/ensure-token');
135
238
 
136
239
  finalArgs.push(...args);
137
240
 
138
- // Set the token in environment variable for OpenCode config to reference
241
+ // No extra env needed: token is written into the config for this run
139
242
  const childEnv = {
140
- ...process.env,
141
- [envVarName]: token
243
+ ...process.env
142
244
  };
143
245
 
144
246
  const child = runWithHandoff({