vibecodingmachine-core 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.babelrc +13 -13
- package/README.md +28 -28
- package/__tests__/applescript-manager-claude-fix.test.js +286 -286
- package/__tests__/requirement-2-auto-start-looping.test.js +69 -69
- package/__tests__/requirement-3-auto-start-looping.test.js +69 -69
- package/__tests__/requirement-4-auto-start-looping.test.js +69 -69
- package/__tests__/requirement-6-auto-start-looping.test.js +73 -73
- package/__tests__/requirement-7-status-tracking.test.js +332 -332
- package/jest.config.js +18 -18
- package/jest.setup.js +12 -12
- package/package.json +47 -45
- package/src/auth/access-denied.html +119 -119
- package/src/auth/shared-auth-storage.js +230 -230
- package/src/autonomous-mode/feature-implementer.cjs +70 -70
- package/src/autonomous-mode/feature-implementer.js +425 -425
- package/src/chat-management/chat-manager.cjs +71 -71
- package/src/chat-management/chat-manager.js +342 -342
- package/src/ide-integration/__tests__/applescript-manager-thread-closure.test.js +227 -227
- package/src/ide-integration/aider-cli-manager.cjs +850 -850
- package/src/ide-integration/applescript-manager.cjs +1088 -1088
- package/src/ide-integration/applescript-manager.js +2802 -2802
- package/src/ide-integration/applescript-utils.js +306 -306
- package/src/ide-integration/cdp-manager.cjs +221 -221
- package/src/ide-integration/cdp-manager.js +321 -321
- package/src/ide-integration/claude-code-cli-manager.cjs +301 -301
- package/src/ide-integration/cline-cli-manager.cjs +2252 -2252
- package/src/ide-integration/continue-cli-manager.js +431 -431
- package/src/ide-integration/provider-manager.cjs +354 -354
- package/src/ide-integration/quota-detector.cjs +34 -34
- package/src/ide-integration/quota-detector.js +349 -349
- package/src/ide-integration/windows-automation-manager.js +262 -262
- package/src/index.cjs +43 -43
- package/src/index.js +17 -17
- package/src/llm/direct-llm-manager.cjs +609 -609
- package/src/ui/ButtonComponents.js +247 -247
- package/src/ui/ChatInterface.js +499 -499
- package/src/ui/StateManager.js +259 -259
- package/src/utils/audit-logger.cjs +116 -116
- package/src/utils/config-helpers.cjs +94 -94
- package/src/utils/config-helpers.js +94 -94
- package/src/utils/electron-update-checker.js +85 -78
- package/src/utils/gcloud-auth.cjs +394 -394
- package/src/utils/logger.cjs +193 -193
- package/src/utils/logger.js +191 -191
- package/src/utils/repo-helpers.cjs +120 -120
- package/src/utils/repo-helpers.js +120 -120
- package/src/utils/requirement-helpers.js +432 -432
- package/src/utils/update-checker.js +167 -167
|
@@ -1,2252 +1,2252 @@
|
|
|
1
|
-
// Cline CLI Manager - handles Cline CLI installation and execution
|
|
2
|
-
const { execSync, spawn } = require('child_process');
|
|
3
|
-
const fs = require('fs');
|
|
4
|
-
const path = require('path');
|
|
5
|
-
const os = require('os');
|
|
6
|
-
|
|
7
|
-
class ClineCLIManager {
|
|
8
|
-
constructor() {
|
|
9
|
-
this.logger = console;
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
/**
|
|
13
|
-
* Check if Cline CLI is installed
|
|
14
|
-
*/
|
|
15
|
-
isInstalled() {
|
|
16
|
-
try {
|
|
17
|
-
execSync('which cline-cli', { stdio: 'pipe' });
|
|
18
|
-
return true;
|
|
19
|
-
} catch {
|
|
20
|
-
return false;
|
|
21
|
-
}
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
/**
|
|
25
|
-
* Get Cline CLI version
|
|
26
|
-
*/
|
|
27
|
-
getVersion() {
|
|
28
|
-
try {
|
|
29
|
-
const version = execSync('cline-cli --version', { encoding: 'utf8', stdio: 'pipe' });
|
|
30
|
-
return version.trim();
|
|
31
|
-
} catch {
|
|
32
|
-
return null;
|
|
33
|
-
}
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
/**
|
|
37
|
-
* Validate an API key by making a test API call
|
|
38
|
-
* Returns { valid: boolean, error?: string }
|
|
39
|
-
*/
|
|
40
|
-
async validateApiKey(provider, apiKey) {
|
|
41
|
-
if (!apiKey || apiKey.trim().length === 0) {
|
|
42
|
-
return { valid: false, error: 'API key is empty' };
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
try {
|
|
46
|
-
if (provider === 'openrouter') {
|
|
47
|
-
// Validate OpenRouter key by making a test API call
|
|
48
|
-
// This catches both invalid keys AND rate limit issues
|
|
49
|
-
const https = require('https');
|
|
50
|
-
return new Promise((resolve) => {
|
|
51
|
-
const testPayload = JSON.stringify({
|
|
52
|
-
model: 'meta-llama/llama-3.3-70b-instruct:free',
|
|
53
|
-
messages: [{ role: 'user', content: 'hi' }],
|
|
54
|
-
max_tokens: 1
|
|
55
|
-
});
|
|
56
|
-
|
|
57
|
-
const req = https.request({
|
|
58
|
-
hostname: 'openrouter.ai',
|
|
59
|
-
path: '/api/v1/chat/completions',
|
|
60
|
-
method: 'POST',
|
|
61
|
-
headers: {
|
|
62
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
63
|
-
'Content-Type': 'application/json',
|
|
64
|
-
'Content-Length': Buffer.byteLength(testPayload),
|
|
65
|
-
'User-Agent': '
|
|
66
|
-
},
|
|
67
|
-
timeout: 5000 // 5 second timeout
|
|
68
|
-
}, (res) => {
|
|
69
|
-
let data = '';
|
|
70
|
-
res.on('data', (chunk) => { data += chunk; });
|
|
71
|
-
res.on('end', () => {
|
|
72
|
-
if (res.statusCode === 200) {
|
|
73
|
-
resolve({ valid: true });
|
|
74
|
-
} else if (res.statusCode === 403) {
|
|
75
|
-
// Check if it's a rate limit error
|
|
76
|
-
try {
|
|
77
|
-
const errorData = JSON.parse(data);
|
|
78
|
-
if (errorData.error && errorData.error.message) {
|
|
79
|
-
const errorMsg = errorData.error.message;
|
|
80
|
-
if (errorMsg.includes('limit exceeded')) {
|
|
81
|
-
resolve({
|
|
82
|
-
valid: false,
|
|
83
|
-
error: 'API key rate limit exceeded. Please create a new free API key at https://openrouter.ai/keys',
|
|
84
|
-
rateLimited: true
|
|
85
|
-
});
|
|
86
|
-
return;
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
} catch {}
|
|
90
|
-
resolve({ valid: false, error: 'Invalid or expired API key' });
|
|
91
|
-
} else if (res.statusCode === 401) {
|
|
92
|
-
resolve({ valid: false, error: 'Invalid API key' });
|
|
93
|
-
} else {
|
|
94
|
-
resolve({ valid: false, error: `API returned status ${res.statusCode}` });
|
|
95
|
-
}
|
|
96
|
-
});
|
|
97
|
-
});
|
|
98
|
-
|
|
99
|
-
req.on('error', (error) => {
|
|
100
|
-
// Network errors don't necessarily mean invalid key
|
|
101
|
-
// Allow it but warn
|
|
102
|
-
resolve({ valid: true, warning: `Could not validate key: ${error.message}` });
|
|
103
|
-
});
|
|
104
|
-
|
|
105
|
-
req.on('timeout', () => {
|
|
106
|
-
req.destroy();
|
|
107
|
-
resolve({ valid: true, warning: 'Validation timed out, but key format looks correct' });
|
|
108
|
-
});
|
|
109
|
-
|
|
110
|
-
req.write(testPayload);
|
|
111
|
-
req.end();
|
|
112
|
-
});
|
|
113
|
-
} else if (provider === 'anthropic') {
|
|
114
|
-
// Validate Anthropic key by making a minimal API call
|
|
115
|
-
const https = require('https');
|
|
116
|
-
return new Promise((resolve) => {
|
|
117
|
-
const payload = JSON.stringify({
|
|
118
|
-
model: 'claude-3-haiku-20240307',
|
|
119
|
-
max_tokens: 1,
|
|
120
|
-
messages: [{ role: 'user', content: 'test' }]
|
|
121
|
-
});
|
|
122
|
-
|
|
123
|
-
const req = https.request({
|
|
124
|
-
hostname: 'api.anthropic.com',
|
|
125
|
-
path: '/v1/messages',
|
|
126
|
-
method: 'POST',
|
|
127
|
-
headers: {
|
|
128
|
-
'x-api-key': apiKey,
|
|
129
|
-
'anthropic-version': '2023-06-01',
|
|
130
|
-
'Content-Type': 'application/json',
|
|
131
|
-
'Content-Length': Buffer.byteLength(payload),
|
|
132
|
-
'User-Agent': '
|
|
133
|
-
},
|
|
134
|
-
timeout: 5000 // 5 second timeout
|
|
135
|
-
}, (res) => {
|
|
136
|
-
let data = '';
|
|
137
|
-
res.on('data', (chunk) => { data += chunk; });
|
|
138
|
-
res.on('end', () => {
|
|
139
|
-
if (res.statusCode === 200 || res.statusCode === 400) {
|
|
140
|
-
// 200 = success, 400 = bad request but key is valid (wrong params)
|
|
141
|
-
resolve({ valid: true });
|
|
142
|
-
} else if (res.statusCode === 401 || res.statusCode === 403) {
|
|
143
|
-
resolve({ valid: false, error: 'Invalid or expired API key' });
|
|
144
|
-
} else {
|
|
145
|
-
resolve({ valid: false, error: `API returned status ${res.statusCode}` });
|
|
146
|
-
}
|
|
147
|
-
});
|
|
148
|
-
});
|
|
149
|
-
|
|
150
|
-
req.on('error', (error) => {
|
|
151
|
-
// Network errors don't necessarily mean invalid key
|
|
152
|
-
resolve({ valid: true, warning: `Could not validate key: ${error.message}` });
|
|
153
|
-
});
|
|
154
|
-
|
|
155
|
-
req.on('timeout', () => {
|
|
156
|
-
req.destroy();
|
|
157
|
-
resolve({ valid: true, warning: 'Validation timed out, but key format looks correct' });
|
|
158
|
-
});
|
|
159
|
-
|
|
160
|
-
req.write(payload);
|
|
161
|
-
req.end();
|
|
162
|
-
});
|
|
163
|
-
} else if (provider === 'gemini') {
|
|
164
|
-
// Validate Google Gemini key by making a test API call
|
|
165
|
-
const https = require('https');
|
|
166
|
-
return new Promise((resolve) => {
|
|
167
|
-
const payload = JSON.stringify({
|
|
168
|
-
contents: [{
|
|
169
|
-
parts: [{ text: 'hi' }]
|
|
170
|
-
}]
|
|
171
|
-
});
|
|
172
|
-
|
|
173
|
-
const req = https.request({
|
|
174
|
-
hostname: 'generativelanguage.googleapis.com',
|
|
175
|
-
path: `/v1beta/models/gemini-2.0-flash-exp:generateContent?key=${apiKey}`,
|
|
176
|
-
method: 'POST',
|
|
177
|
-
headers: {
|
|
178
|
-
'Content-Type': 'application/json',
|
|
179
|
-
'Content-Length': Buffer.byteLength(payload)
|
|
180
|
-
},
|
|
181
|
-
timeout: 5000
|
|
182
|
-
}, (res) => {
|
|
183
|
-
let data = '';
|
|
184
|
-
res.on('data', (chunk) => { data += chunk; });
|
|
185
|
-
res.on('end', () => {
|
|
186
|
-
if (res.statusCode === 200) {
|
|
187
|
-
resolve({ valid: true });
|
|
188
|
-
} else if (res.statusCode === 403 || res.statusCode === 401) {
|
|
189
|
-
resolve({ valid: false, error: 'Invalid or expired API key' });
|
|
190
|
-
} else {
|
|
191
|
-
resolve({ valid: false, error: `API returned status ${res.statusCode}` });
|
|
192
|
-
}
|
|
193
|
-
});
|
|
194
|
-
});
|
|
195
|
-
|
|
196
|
-
req.on('error', (error) => {
|
|
197
|
-
resolve({ valid: true, warning: `Could not validate key: ${error.message}` });
|
|
198
|
-
});
|
|
199
|
-
|
|
200
|
-
req.on('timeout', () => {
|
|
201
|
-
req.destroy();
|
|
202
|
-
resolve({ valid: true, warning: 'Validation timed out, but key format looks correct' });
|
|
203
|
-
});
|
|
204
|
-
|
|
205
|
-
req.write(payload);
|
|
206
|
-
req.end();
|
|
207
|
-
});
|
|
208
|
-
}
|
|
209
|
-
|
|
210
|
-
return { valid: false, error: `Unknown provider: ${provider}` };
|
|
211
|
-
} catch (error) {
|
|
212
|
-
return { valid: false, error: `Validation error: ${error.message}` };
|
|
213
|
-
}
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
/**
|
|
217
|
-
* Check if Cline CLI is configured with any API provider
|
|
218
|
-
*/
|
|
219
|
-
isConfigured() {
|
|
220
|
-
try {
|
|
221
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
222
|
-
if (!fs.existsSync(configPath)) {
|
|
223
|
-
return false;
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
227
|
-
const apiProvider = config.globalState?.apiProvider;
|
|
228
|
-
const apiModelId = config.globalState?.apiModelId;
|
|
229
|
-
|
|
230
|
-
if (!apiProvider || !apiModelId || apiModelId === '') {
|
|
231
|
-
return false;
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
// Check if API key is available for the configured provider (and not empty)
|
|
235
|
-
// IMPORTANT: Cline CLI needs the key in the config file, not just in saved file
|
|
236
|
-
if (apiProvider === 'anthropic') {
|
|
237
|
-
let configKey = config.globalState?.anthropicApiKey;
|
|
238
|
-
const envKey = process.env.ANTHROPIC_API_KEY;
|
|
239
|
-
const savedKey = this.getSavedAnthropicKey();
|
|
240
|
-
|
|
241
|
-
// Check for invalid key marker (prevents re-syncing invalid keys)
|
|
242
|
-
const invalidKeyMarker = path.join(os.homedir(), '.allnightai', '.anthropic-key-invalid');
|
|
243
|
-
const isMarkedInvalid = fs.existsSync(invalidKeyMarker);
|
|
244
|
-
|
|
245
|
-
// If config file doesn't have key but saved file does, sync it to config
|
|
246
|
-
// BUT only if the key hasn't been marked as invalid
|
|
247
|
-
if (!configKey && savedKey && savedKey.trim().length > 0 && !isMarkedInvalid) {
|
|
248
|
-
this.logger.warn('Anthropic API key missing from config file but found in saved file - syncing...');
|
|
249
|
-
try {
|
|
250
|
-
config.globalState.anthropicApiKey = savedKey;
|
|
251
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
252
|
-
this.logger.log('✓ Synced API key from saved file to Cline CLI config');
|
|
253
|
-
// Update configKey to the newly synced value
|
|
254
|
-
configKey = savedKey;
|
|
255
|
-
} catch (error) {
|
|
256
|
-
this.logger.warn('Failed to sync API key to config:', error.message);
|
|
257
|
-
}
|
|
258
|
-
} else if (isMarkedInvalid) {
|
|
259
|
-
// Key was marked as invalid - don't use it, return false to force reconfiguration
|
|
260
|
-
this.logger.warn('Anthropic API key was marked as invalid - reconfiguration required');
|
|
261
|
-
return false;
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
// Check all sources, but ensure the key is non-empty
|
|
265
|
-
const anthropicApiKey = configKey || envKey || savedKey;
|
|
266
|
-
const isValid = anthropicApiKey && typeof anthropicApiKey === 'string' && anthropicApiKey.trim().length > 0;
|
|
267
|
-
|
|
268
|
-
if (!isValid) {
|
|
269
|
-
this.logger.warn('Anthropic API key found but is empty or invalid');
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
return isValid;
|
|
273
|
-
} else if (apiProvider === 'openrouter') {
|
|
274
|
-
let configKey = config.globalState?.openRouterApiKey;
|
|
275
|
-
const envKey = process.env.OPENROUTER_API_KEY;
|
|
276
|
-
const savedKey = this.getSavedOpenRouterKey();
|
|
277
|
-
|
|
278
|
-
// Check for invalid key marker (prevents re-syncing invalid keys)
|
|
279
|
-
const invalidKeyMarker = path.join(os.homedir(), '.allnightai', '.openrouter-key-invalid');
|
|
280
|
-
const isMarkedInvalid = fs.existsSync(invalidKeyMarker);
|
|
281
|
-
|
|
282
|
-
// If config file doesn't have key but saved file does, sync it to config
|
|
283
|
-
// BUT only if the key hasn't been marked as invalid
|
|
284
|
-
if (!configKey && savedKey && savedKey.trim().length > 0 && !isMarkedInvalid) {
|
|
285
|
-
this.logger.warn('OpenRouter API key missing from config file but found in saved file - syncing...');
|
|
286
|
-
try {
|
|
287
|
-
config.globalState.openRouterApiKey = savedKey;
|
|
288
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
289
|
-
this.logger.log('✓ Synced API key from saved file to Cline CLI config');
|
|
290
|
-
// Update configKey to the newly synced value
|
|
291
|
-
configKey = savedKey;
|
|
292
|
-
} catch (error) {
|
|
293
|
-
this.logger.warn('Failed to sync API key to config:', error.message);
|
|
294
|
-
}
|
|
295
|
-
} else if (isMarkedInvalid) {
|
|
296
|
-
// Key was marked as invalid - don't use it, return false to force reconfiguration
|
|
297
|
-
this.logger.warn('OpenRouter API key was marked as invalid - reconfiguration required');
|
|
298
|
-
return false;
|
|
299
|
-
}
|
|
300
|
-
|
|
301
|
-
// Check all sources, but ensure the key is non-empty
|
|
302
|
-
const openRouterApiKey = configKey || envKey || savedKey;
|
|
303
|
-
const isValid = openRouterApiKey && typeof openRouterApiKey === 'string' && openRouterApiKey.trim().length > 0;
|
|
304
|
-
|
|
305
|
-
if (!isValid) {
|
|
306
|
-
this.logger.warn('OpenRouter API key found but is empty or invalid');
|
|
307
|
-
}
|
|
308
|
-
|
|
309
|
-
return isValid;
|
|
310
|
-
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl?.includes('generativelanguage.googleapis.com')) {
|
|
311
|
-
// Gemini via OpenAI-compatible endpoint
|
|
312
|
-
let configKey = config.globalState?.openAiApiKey;
|
|
313
|
-
const envKey = process.env.GEMINI_API_KEY;
|
|
314
|
-
const savedKey = this.getSavedGeminiKey();
|
|
315
|
-
|
|
316
|
-
// Check for invalid key marker (prevents re-syncing invalid keys)
|
|
317
|
-
const invalidKeyMarker = path.join(os.homedir(), '.allnightai', '.gemini-key-invalid');
|
|
318
|
-
const isMarkedInvalid = fs.existsSync(invalidKeyMarker);
|
|
319
|
-
|
|
320
|
-
// If config file doesn't have key but saved file does, sync it to config
|
|
321
|
-
// BUT only if the key hasn't been marked as invalid
|
|
322
|
-
if (!configKey && savedKey && savedKey.trim().length > 0 && !isMarkedInvalid) {
|
|
323
|
-
this.logger.warn('Gemini API key missing from config file but found in saved file - syncing...');
|
|
324
|
-
try {
|
|
325
|
-
config.globalState.openAiApiKey = savedKey;
|
|
326
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
327
|
-
this.logger.log('✓ Synced API key from saved file to Cline CLI config');
|
|
328
|
-
// Update configKey to the newly synced value
|
|
329
|
-
configKey = savedKey;
|
|
330
|
-
} catch (error) {
|
|
331
|
-
this.logger.warn('Failed to sync API key to config:', error.message);
|
|
332
|
-
}
|
|
333
|
-
} else if (isMarkedInvalid) {
|
|
334
|
-
// Key was marked as invalid - don't use it, return false to force reconfiguration
|
|
335
|
-
this.logger.warn('Gemini API key was marked as invalid - reconfiguration required');
|
|
336
|
-
return false;
|
|
337
|
-
}
|
|
338
|
-
|
|
339
|
-
// Check all sources, but ensure the key is non-empty
|
|
340
|
-
const geminiApiKey = configKey || envKey || savedKey;
|
|
341
|
-
const isValid = geminiApiKey && typeof geminiApiKey === 'string' && geminiApiKey.trim().length > 0;
|
|
342
|
-
|
|
343
|
-
if (!isValid) {
|
|
344
|
-
this.logger.warn('Gemini API key found but is empty or invalid');
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
return isValid;
|
|
348
|
-
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
349
|
-
// Ollama configuration - check if Ollama is installed and running
|
|
350
|
-
const isInstalled = this.isOllamaInstalled();
|
|
351
|
-
if (!isInstalled) {
|
|
352
|
-
this.logger.warn('Ollama is configured but not installed');
|
|
353
|
-
return false;
|
|
354
|
-
}
|
|
355
|
-
|
|
356
|
-
// Check if Ollama is running (async, but we'll treat as configured if installed)
|
|
357
|
-
// The actual running check will happen during startup
|
|
358
|
-
return true;
|
|
359
|
-
}
|
|
360
|
-
|
|
361
|
-
return false;
|
|
362
|
-
} catch (error) {
|
|
363
|
-
this.logger.warn('Failed to check Cline CLI configuration:', error.message);
|
|
364
|
-
return false;
|
|
365
|
-
}
|
|
366
|
-
}
|
|
367
|
-
|
|
368
|
-
/**
|
|
369
|
-
* Get saved Anthropic API key
|
|
370
|
-
*/
|
|
371
|
-
getSavedAnthropicKey() {
|
|
372
|
-
try {
|
|
373
|
-
const apiKeyFile = path.join(os.homedir(), '.allnightai', 'anthropic-api-key.txt');
|
|
374
|
-
if (fs.existsSync(apiKeyFile)) {
|
|
375
|
-
const key = fs.readFileSync(apiKeyFile, 'utf8').trim();
|
|
376
|
-
// Return null if key is empty (not just whitespace)
|
|
377
|
-
return key.length > 0 ? key : null;
|
|
378
|
-
}
|
|
379
|
-
return null;
|
|
380
|
-
} catch {
|
|
381
|
-
return null;
|
|
382
|
-
}
|
|
383
|
-
}
|
|
384
|
-
|
|
385
|
-
/**
|
|
386
|
-
* Save Anthropic API key
|
|
387
|
-
*/
|
|
388
|
-
saveAnthropicKey(apiKey) {
|
|
389
|
-
try {
|
|
390
|
-
const configDir = path.join(os.homedir(), '.allnightai');
|
|
391
|
-
fs.mkdirSync(configDir, { recursive: true});
|
|
392
|
-
const apiKeyFile = path.join(configDir, 'anthropic-api-key.txt');
|
|
393
|
-
fs.writeFileSync(apiKeyFile, apiKey, { mode: 0o600 }); // Secure permissions
|
|
394
|
-
return true;
|
|
395
|
-
} catch {
|
|
396
|
-
return false;
|
|
397
|
-
}
|
|
398
|
-
}
|
|
399
|
-
|
|
400
|
-
/**
|
|
401
|
-
* Get saved OpenRouter API key
|
|
402
|
-
*/
|
|
403
|
-
getSavedOpenRouterKey() {
|
|
404
|
-
try {
|
|
405
|
-
const apiKeyFile = path.join(os.homedir(), '.allnightai', 'openrouter-api-key.txt');
|
|
406
|
-
if (fs.existsSync(apiKeyFile)) {
|
|
407
|
-
const key = fs.readFileSync(apiKeyFile, 'utf8').trim();
|
|
408
|
-
// Return null if key is empty (not just whitespace)
|
|
409
|
-
return key.length > 0 ? key : null;
|
|
410
|
-
}
|
|
411
|
-
return null;
|
|
412
|
-
} catch {
|
|
413
|
-
return null;
|
|
414
|
-
}
|
|
415
|
-
}
|
|
416
|
-
|
|
417
|
-
/**
|
|
418
|
-
* Save OpenRouter API key
|
|
419
|
-
*/
|
|
420
|
-
saveOpenRouterKey(apiKey) {
|
|
421
|
-
try {
|
|
422
|
-
const configDir = path.join(os.homedir(), '.allnightai');
|
|
423
|
-
fs.mkdirSync(configDir, { recursive: true});
|
|
424
|
-
const apiKeyFile = path.join(configDir, 'openrouter-api-key.txt');
|
|
425
|
-
fs.writeFileSync(apiKeyFile, apiKey, { mode: 0o600 }); // Secure permissions
|
|
426
|
-
return true;
|
|
427
|
-
} catch {
|
|
428
|
-
return false;
|
|
429
|
-
}
|
|
430
|
-
}
|
|
431
|
-
|
|
432
|
-
/**
|
|
433
|
-
* Get saved Google Gemini API key
|
|
434
|
-
*/
|
|
435
|
-
getSavedGeminiKey() {
|
|
436
|
-
try {
|
|
437
|
-
const apiKeyFile = path.join(os.homedir(), '.allnightai', 'gemini-api-key.txt');
|
|
438
|
-
if (fs.existsSync(apiKeyFile)) {
|
|
439
|
-
const key = fs.readFileSync(apiKeyFile, 'utf8').trim();
|
|
440
|
-
return key.length > 0 ? key : null;
|
|
441
|
-
}
|
|
442
|
-
return null;
|
|
443
|
-
} catch {
|
|
444
|
-
return null;
|
|
445
|
-
}
|
|
446
|
-
}
|
|
447
|
-
|
|
448
|
-
/**
|
|
449
|
-
* Save Google Gemini API key
|
|
450
|
-
*/
|
|
451
|
-
saveGeminiKey(apiKey) {
|
|
452
|
-
try {
|
|
453
|
-
const configDir = path.join(os.homedir(), '.allnightai');
|
|
454
|
-
fs.mkdirSync(configDir, { recursive: true});
|
|
455
|
-
const apiKeyFile = path.join(configDir, 'gemini-api-key.txt');
|
|
456
|
-
fs.writeFileSync(apiKeyFile, apiKey, { mode: 0o600 }); // Secure permissions
|
|
457
|
-
return true;
|
|
458
|
-
} catch {
|
|
459
|
-
return false;
|
|
460
|
-
}
|
|
461
|
-
}
|
|
462
|
-
|
|
463
|
-
/**
|
|
464
|
-
* Get the path to the Ollama binary
|
|
465
|
-
* Returns the binary path if found, or null
|
|
466
|
-
*/
|
|
467
|
-
getOllamaBinaryPath() {
|
|
468
|
-
// First check if command is in PATH
|
|
469
|
-
try {
|
|
470
|
-
const whichResult = execSync('which ollama', { encoding: 'utf8', stdio: 'pipe' });
|
|
471
|
-
const pathFound = whichResult.trim();
|
|
472
|
-
// Verify it's actually executable
|
|
473
|
-
if (pathFound && fs.existsSync(pathFound)) {
|
|
474
|
-
try {
|
|
475
|
-
fs.accessSync(pathFound, fs.constants.F_OK | fs.constants.X_OK);
|
|
476
|
-
// Resolve symlinks to get actual binary path
|
|
477
|
-
try {
|
|
478
|
-
const realPath = fs.realpathSync(pathFound);
|
|
479
|
-
return realPath;
|
|
480
|
-
} catch {
|
|
481
|
-
return pathFound;
|
|
482
|
-
}
|
|
483
|
-
} catch {
|
|
484
|
-
// Not executable, continue searching
|
|
485
|
-
}
|
|
486
|
-
}
|
|
487
|
-
} catch {
|
|
488
|
-
// Command not in PATH, continue to search app bundle
|
|
489
|
-
}
|
|
490
|
-
|
|
491
|
-
// Not in PATH, try to find in app bundle (macOS)
|
|
492
|
-
if (os.platform() === 'darwin') {
|
|
493
|
-
const appPath = '/Applications/Ollama.app';
|
|
494
|
-
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
495
|
-
|
|
496
|
-
// Check common locations for the binary in the app bundle
|
|
497
|
-
const possiblePaths = [
|
|
498
|
-
path.join(appPath, 'Contents', 'Resources', 'ollama'),
|
|
499
|
-
path.join(appPath, 'Contents', 'MacOS', 'ollama'),
|
|
500
|
-
path.join(userAppPath, 'Contents', 'Resources', 'ollama'),
|
|
501
|
-
path.join(userAppPath, 'Contents', 'MacOS', 'ollama'),
|
|
502
|
-
'/usr/local/bin/ollama',
|
|
503
|
-
'/usr/bin/ollama'
|
|
504
|
-
];
|
|
505
|
-
|
|
506
|
-
for (const binPath of possiblePaths) {
|
|
507
|
-
try {
|
|
508
|
-
if (fs.existsSync(binPath)) {
|
|
509
|
-
// Check if it's executable
|
|
510
|
-
fs.accessSync(binPath, fs.constants.F_OK | fs.constants.X_OK);
|
|
511
|
-
// Resolve symlinks to get actual binary path
|
|
512
|
-
try {
|
|
513
|
-
const realPath = fs.realpathSync(binPath);
|
|
514
|
-
// Verify resolved path exists and is executable
|
|
515
|
-
if (fs.existsSync(realPath)) {
|
|
516
|
-
fs.accessSync(realPath, fs.constants.F_OK | fs.constants.X_OK);
|
|
517
|
-
return realPath;
|
|
518
|
-
}
|
|
519
|
-
} catch {
|
|
520
|
-
// If realpath fails, use original path
|
|
521
|
-
return binPath;
|
|
522
|
-
}
|
|
523
|
-
}
|
|
524
|
-
} catch {
|
|
525
|
-
// Not executable or doesn't exist, try next path
|
|
526
|
-
continue;
|
|
527
|
-
}
|
|
528
|
-
}
|
|
529
|
-
}
|
|
530
|
-
|
|
531
|
-
return null;
|
|
532
|
-
}
|
|
533
|
-
|
|
534
|
-
/**
|
|
535
|
-
* Check if Ollama is installed
|
|
536
|
-
*/
|
|
537
|
-
isOllamaInstalled() {
|
|
538
|
-
// First check if the command is in PATH (preferred method)
|
|
539
|
-
try {
|
|
540
|
-
execSync('which ollama', { stdio: 'pipe' });
|
|
541
|
-
return true;
|
|
542
|
-
} catch {
|
|
543
|
-
// Command not in PATH, check if app bundle exists (macOS)
|
|
544
|
-
if (os.platform() === 'darwin') {
|
|
545
|
-
const appPath = '/Applications/Ollama.app';
|
|
546
|
-
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
547
|
-
if (fs.existsSync(appPath) || fs.existsSync(userAppPath)) {
|
|
548
|
-
return true;
|
|
549
|
-
}
|
|
550
|
-
}
|
|
551
|
-
return false;
|
|
552
|
-
}
|
|
553
|
-
}
|
|
554
|
-
|
|
555
|
-
/**
|
|
556
|
-
* Attempt to automatically fix Ollama PATH issue on macOS
|
|
557
|
-
* Returns true if fixed, false otherwise
|
|
558
|
-
*/
|
|
559
|
-
async fixOllamaPath() {
|
|
560
|
-
if (os.platform() !== 'darwin') {
|
|
561
|
-
return false;
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
const appPath = '/Applications/Ollama.app';
|
|
565
|
-
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
566
|
-
const appExists = fs.existsSync(appPath) || fs.existsSync(userAppPath);
|
|
567
|
-
|
|
568
|
-
if (!appExists) {
|
|
569
|
-
return false;
|
|
570
|
-
}
|
|
571
|
-
|
|
572
|
-
try {
|
|
573
|
-
// Method 1: Try to launch Ollama.app once to initialize CLI
|
|
574
|
-
// This is the recommended way - launching the app adds the command to PATH
|
|
575
|
-
const appToLaunch = fs.existsSync(appPath) ? appPath : userAppPath;
|
|
576
|
-
this.logger.log('Launching Ollama.app to initialize CLI tool...');
|
|
577
|
-
|
|
578
|
-
spawn('open', [appToLaunch], {
|
|
579
|
-
detached: true,
|
|
580
|
-
stdio: 'ignore'
|
|
581
|
-
});
|
|
582
|
-
|
|
583
|
-
// Wait a moment for the app to initialize
|
|
584
|
-
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
585
|
-
|
|
586
|
-
// Check if command is now available
|
|
587
|
-
try {
|
|
588
|
-
execSync('which ollama', { stdio: 'pipe' });
|
|
589
|
-
this.logger.log('✓ Ollama CLI tool is now available');
|
|
590
|
-
return true;
|
|
591
|
-
} catch {
|
|
592
|
-
// Still not in PATH, try method 2
|
|
593
|
-
}
|
|
594
|
-
|
|
595
|
-
// Method 2: Find binary in app bundle and create symlink
|
|
596
|
-
const binaryPath = this.getOllamaBinaryPath();
|
|
597
|
-
if (binaryPath && binaryPath !== '/usr/local/bin/ollama') {
|
|
598
|
-
try {
|
|
599
|
-
const targetPath = '/usr/local/bin/ollama';
|
|
600
|
-
const targetDir = path.dirname(targetPath);
|
|
601
|
-
|
|
602
|
-
// Ensure /usr/local/bin exists
|
|
603
|
-
if (!fs.existsSync(targetDir)) {
|
|
604
|
-
fs.mkdirSync(targetDir, { recursive: true });
|
|
605
|
-
}
|
|
606
|
-
|
|
607
|
-
// Remove existing symlink/file if it exists
|
|
608
|
-
if (fs.existsSync(targetPath)) {
|
|
609
|
-
fs.unlinkSync(targetPath);
|
|
610
|
-
}
|
|
611
|
-
|
|
612
|
-
// Create symlink
|
|
613
|
-
fs.symlinkSync(binaryPath, targetPath);
|
|
614
|
-
this.logger.log('✓ Created symlink to Ollama CLI tool');
|
|
615
|
-
|
|
616
|
-
// Verify it works
|
|
617
|
-
try {
|
|
618
|
-
execSync('which ollama', { stdio: 'pipe' });
|
|
619
|
-
return true;
|
|
620
|
-
} catch {
|
|
621
|
-
// Symlink created but still not in PATH - might need shell restart
|
|
622
|
-
return false;
|
|
623
|
-
}
|
|
624
|
-
} catch (error) {
|
|
625
|
-
// Symlink creation failed (might need sudo)
|
|
626
|
-
this.logger.log(`⚠ Could not create symlink: ${error.message}`);
|
|
627
|
-
return false;
|
|
628
|
-
}
|
|
629
|
-
}
|
|
630
|
-
|
|
631
|
-
// Method 3: Try to find and use the binary directly from app bundle
|
|
632
|
-
const directPath = this.getOllamaBinaryPath();
|
|
633
|
-
if (directPath) {
|
|
634
|
-
// At least we found it, even if not in PATH
|
|
635
|
-
// The pullOllamaModel will handle using the direct path
|
|
636
|
-
return true;
|
|
637
|
-
}
|
|
638
|
-
|
|
639
|
-
return false;
|
|
640
|
-
} catch (error) {
|
|
641
|
-
this.logger.log(`⚠ Error fixing Ollama PATH: ${error.message}`);
|
|
642
|
-
return false;
|
|
643
|
-
}
|
|
644
|
-
}
|
|
645
|
-
|
|
646
|
-
/**
|
|
647
|
-
* Check if Ollama service is running
|
|
648
|
-
*/
|
|
649
|
-
async isOllamaRunning() {
|
|
650
|
-
try {
|
|
651
|
-
const http = require('http');
|
|
652
|
-
return new Promise((resolve) => {
|
|
653
|
-
const req = http.request({
|
|
654
|
-
hostname: 'localhost',
|
|
655
|
-
port: 11434,
|
|
656
|
-
path: '/api/tags',
|
|
657
|
-
method: 'GET',
|
|
658
|
-
timeout: 2000
|
|
659
|
-
}, (res) => {
|
|
660
|
-
resolve(res.statusCode === 200);
|
|
661
|
-
});
|
|
662
|
-
req.on('error', () => resolve(false));
|
|
663
|
-
req.on('timeout', () => {
|
|
664
|
-
req.destroy();
|
|
665
|
-
resolve(false);
|
|
666
|
-
});
|
|
667
|
-
req.end();
|
|
668
|
-
});
|
|
669
|
-
} catch {
|
|
670
|
-
return Promise.resolve(false);
|
|
671
|
-
}
|
|
672
|
-
}
|
|
673
|
-
|
|
674
|
-
/**
|
|
675
|
-
* Verify Ollama API is accessible at the endpoint Cline CLI uses
|
|
676
|
-
* Tests the /v1/models endpoint that Cline CLI will use
|
|
677
|
-
*/
|
|
678
|
-
async verifyOllamaAPI() {
|
|
679
|
-
try {
|
|
680
|
-
const http = require('http');
|
|
681
|
-
return new Promise((resolve) => {
|
|
682
|
-
const req = http.request({
|
|
683
|
-
hostname: 'localhost',
|
|
684
|
-
port: 11434,
|
|
685
|
-
path: '/v1/models',
|
|
686
|
-
method: 'GET',
|
|
687
|
-
timeout: 3000,
|
|
688
|
-
headers: {
|
|
689
|
-
'Authorization': 'Bearer ollama'
|
|
690
|
-
}
|
|
691
|
-
}, (res) => {
|
|
692
|
-
let data = '';
|
|
693
|
-
res.on('data', chunk => data += chunk);
|
|
694
|
-
res.on('end', () => {
|
|
695
|
-
try {
|
|
696
|
-
const models = JSON.parse(data);
|
|
697
|
-
resolve({ success: true, models: models.data || [] });
|
|
698
|
-
} catch {
|
|
699
|
-
// Response received but not valid JSON - service is running but may be misconfigured
|
|
700
|
-
resolve({ success: res.statusCode === 200, models: [] });
|
|
701
|
-
}
|
|
702
|
-
});
|
|
703
|
-
});
|
|
704
|
-
req.on('error', () => resolve({ success: false, models: [] }));
|
|
705
|
-
req.on('timeout', () => {
|
|
706
|
-
req.destroy();
|
|
707
|
-
resolve({ success: false, models: [] });
|
|
708
|
-
});
|
|
709
|
-
req.end();
|
|
710
|
-
});
|
|
711
|
-
} catch {
|
|
712
|
-
return Promise.resolve({ success: false, models: [] });
|
|
713
|
-
}
|
|
714
|
-
}
|
|
715
|
-
|
|
716
|
-
/**
|
|
717
|
-
* Verify a specific model is accessible via Ollama API
|
|
718
|
-
*/
|
|
719
|
-
async verifyModelAccessible(modelName) {
|
|
720
|
-
try {
|
|
721
|
-
const apiResult = await this.verifyOllamaAPI();
|
|
722
|
-
if (!apiResult.success) {
|
|
723
|
-
return false;
|
|
724
|
-
}
|
|
725
|
-
// Check if model is in the list (might be named with or without tag)
|
|
726
|
-
const modelBase = modelName.split(':')[0];
|
|
727
|
-
return apiResult.models.some(m => {
|
|
728
|
-
const modelId = m.id || '';
|
|
729
|
-
return modelId.includes(modelName) || modelId.includes(modelBase);
|
|
730
|
-
});
|
|
731
|
-
} catch {
|
|
732
|
-
return false;
|
|
733
|
-
}
|
|
734
|
-
}
|
|
735
|
-
|
|
736
|
-
/**
|
|
737
|
-
* Start the Ollama service
|
|
738
|
-
* Returns true if started successfully, false otherwise
|
|
739
|
-
*/
|
|
740
|
-
async startOllamaService() {
|
|
741
|
-
try {
|
|
742
|
-
// Check if already running
|
|
743
|
-
if (await this.isOllamaRunning()) {
|
|
744
|
-
return true;
|
|
745
|
-
}
|
|
746
|
-
|
|
747
|
-
// Get the ollama binary path
|
|
748
|
-
const binaryPath = this.getOllamaBinaryPath() || 'ollama';
|
|
749
|
-
|
|
750
|
-
this.logger.log('Starting Ollama service...');
|
|
751
|
-
|
|
752
|
-
// Start the service in background
|
|
753
|
-
spawn(binaryPath, ['serve'], {
|
|
754
|
-
detached: true,
|
|
755
|
-
stdio: 'ignore'
|
|
756
|
-
});
|
|
757
|
-
|
|
758
|
-
// Wait for service to start (check every 500ms, max 10 seconds)
|
|
759
|
-
for (let i = 0; i < 20; i++) {
|
|
760
|
-
await new Promise(resolve => setTimeout(resolve, 500));
|
|
761
|
-
if (await this.isOllamaRunning()) {
|
|
762
|
-
this.logger.log('✓ Ollama service started');
|
|
763
|
-
return true;
|
|
764
|
-
}
|
|
765
|
-
}
|
|
766
|
-
|
|
767
|
-
// Still not running after 10 seconds
|
|
768
|
-
this.logger.log('⚠ Ollama service may still be starting...');
|
|
769
|
-
return false;
|
|
770
|
-
} catch (error) {
|
|
771
|
-
this.logger.log(`⚠ Error starting Ollama service: ${error.message}`);
|
|
772
|
-
return false;
|
|
773
|
-
}
|
|
774
|
-
}
|
|
775
|
-
|
|
776
|
-
/**
|
|
777
|
-
* Get list of installed Ollama models
|
|
778
|
-
* Returns empty array if command fails (e.g., command not in PATH)
|
|
779
|
-
*/
|
|
780
|
-
async getOllamaModels() {
|
|
781
|
-
try {
|
|
782
|
-
// Try to get the binary path (handles PATH issues automatically)
|
|
783
|
-
const binaryPath = this.getOllamaBinaryPath();
|
|
784
|
-
const ollamaCmd = binaryPath || 'ollama';
|
|
785
|
-
|
|
786
|
-
const result = execSync(`${ollamaCmd} list`, { encoding: 'utf8', stdio: 'pipe' });
|
|
787
|
-
const lines = result.split('\n').slice(1); // Skip header
|
|
788
|
-
const models = lines
|
|
789
|
-
.filter(line => line.trim().length > 0)
|
|
790
|
-
.map(line => {
|
|
791
|
-
const parts = line.trim().split(/\s+/);
|
|
792
|
-
return parts[0]; // Model name is first column
|
|
793
|
-
});
|
|
794
|
-
return models;
|
|
795
|
-
} catch (error) {
|
|
796
|
-
// If command not found, try to fix automatically (macOS)
|
|
797
|
-
if (error.code === 'ENOENT' && os.platform() === 'darwin' && this.isOllamaInstalled()) {
|
|
798
|
-
try {
|
|
799
|
-
await this.fixOllamaPath();
|
|
800
|
-
// Retry after fixing
|
|
801
|
-
const binaryPath = this.getOllamaBinaryPath();
|
|
802
|
-
const ollamaCmd = binaryPath || 'ollama';
|
|
803
|
-
const result = execSync(`${ollamaCmd} list`, { encoding: 'utf8', stdio: 'pipe' });
|
|
804
|
-
const lines = result.split('\n').slice(1);
|
|
805
|
-
const models = lines
|
|
806
|
-
.filter(line => line.trim().length > 0)
|
|
807
|
-
.map(line => {
|
|
808
|
-
const parts = line.trim().split(/\s+/);
|
|
809
|
-
return parts[0];
|
|
810
|
-
});
|
|
811
|
-
return models;
|
|
812
|
-
} catch {
|
|
813
|
-
// Still failed, return empty array
|
|
814
|
-
return [];
|
|
815
|
-
}
|
|
816
|
-
}
|
|
817
|
-
// If command not found or other error, return empty array (will trigger download prompt)
|
|
818
|
-
return [];
|
|
819
|
-
}
|
|
820
|
-
}
|
|
821
|
-
|
|
822
|
-
/**
|
|
823
|
-
* Pull an Ollama model
|
|
824
|
-
* Returns { success: boolean, error?: string, errorCode?: string, resolution?: string }
|
|
825
|
-
*/
|
|
826
|
-
async pullOllamaModel(modelName, onProgress) {
|
|
827
|
-
try {
|
|
828
|
-
return new Promise(async (resolve) => {
|
|
829
|
-
// Get the path to ollama binary (may need to auto-fix PATH on macOS)
|
|
830
|
-
let ollamaPath = 'ollama';
|
|
831
|
-
let shouldRetry = false;
|
|
832
|
-
|
|
833
|
-
try {
|
|
834
|
-
// Try to find the binary
|
|
835
|
-
const binaryPath = this.getOllamaBinaryPath();
|
|
836
|
-
if (binaryPath) {
|
|
837
|
-
ollamaPath = binaryPath;
|
|
838
|
-
} else {
|
|
839
|
-
// Command not in PATH - try to fix automatically (macOS)
|
|
840
|
-
if (os.platform() === 'darwin' && this.isOllamaInstalled()) {
|
|
841
|
-
this.logger.log('⚠ Ollama command not in PATH, attempting to fix automatically...');
|
|
842
|
-
const fixed = await this.fixOllamaPath();
|
|
843
|
-
if (fixed) {
|
|
844
|
-
shouldRetry = true;
|
|
845
|
-
// Wait a moment for PATH to update
|
|
846
|
-
await new Promise(r => setTimeout(r, 1000));
|
|
847
|
-
// Re-get binary path after fixing
|
|
848
|
-
const newBinaryPath = this.getOllamaBinaryPath();
|
|
849
|
-
if (newBinaryPath) {
|
|
850
|
-
ollamaPath = newBinaryPath;
|
|
851
|
-
}
|
|
852
|
-
}
|
|
853
|
-
}
|
|
854
|
-
}
|
|
855
|
-
} catch {
|
|
856
|
-
// Continue with default 'ollama' command
|
|
857
|
-
}
|
|
858
|
-
|
|
859
|
-
// Ensure Ollama service is running before attempting to pull
|
|
860
|
-
if (!(await this.isOllamaRunning())) {
|
|
861
|
-
this.logger.log('⚠ Ollama service not running, starting it automatically...');
|
|
862
|
-
await this.startOllamaService();
|
|
863
|
-
}
|
|
864
|
-
|
|
865
|
-
// Helper functions (defined before use)
|
|
866
|
-
// Helper to parse size strings like "397 MB", "1.5 KB", "490 B"
|
|
867
|
-
const parseSize = (sizeStr) => {
|
|
868
|
-
const match = sizeStr.match(/([\d.]+)\s*(B|KB|MB|GB|TB)/i);
|
|
869
|
-
if (!match) return 0;
|
|
870
|
-
const value = parseFloat(match[1]);
|
|
871
|
-
const unit = match[2].toUpperCase();
|
|
872
|
-
const multipliers = { 'B': 1, 'KB': 1024, 'MB': 1024 * 1024, 'GB': 1024 * 1024 * 1024, 'TB': 1024 * 1024 * 1024 * 1024 };
|
|
873
|
-
return Math.floor(value * (multipliers[unit] || 1));
|
|
874
|
-
};
|
|
875
|
-
|
|
876
|
-
// Helper to format bytes
|
|
877
|
-
const formatBytes = (bytes) => {
|
|
878
|
-
if (bytes === 0) return '0 B';
|
|
879
|
-
const k = 1024;
|
|
880
|
-
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
|
881
|
-
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
|
882
|
-
return `${(bytes / Math.pow(k, i)).toFixed(i === 0 ? 0 : 1)} ${sizes[i]}`;
|
|
883
|
-
};
|
|
884
|
-
|
|
885
|
-
// Helper to create progress bar
|
|
886
|
-
const createProgressBar = (percentage, width = 25) => {
|
|
887
|
-
const filled = Math.floor((percentage / 100) * width);
|
|
888
|
-
const empty = width - filled;
|
|
889
|
-
return '█'.repeat(filled) + '░'.repeat(empty);
|
|
890
|
-
};
|
|
891
|
-
|
|
892
|
-
// Send initial progress update (0%)
|
|
893
|
-
if (onProgress) {
|
|
894
|
-
onProgress({
|
|
895
|
-
percentage: 0,
|
|
896
|
-
downloaded: '0 B',
|
|
897
|
-
total: 'Unknown',
|
|
898
|
-
currentPercent: 0,
|
|
899
|
-
progressBar: createProgressBar(0)
|
|
900
|
-
});
|
|
901
|
-
}
|
|
902
|
-
|
|
903
|
-
const proc = spawn(ollamaPath, ['pull', modelName]);
|
|
904
|
-
|
|
905
|
-
let output = '';
|
|
906
|
-
let lastProgressValue = -1;
|
|
907
|
-
let layers = new Map(); // Track each layer's size and progress
|
|
908
|
-
let totalSizeBytes = 0;
|
|
909
|
-
let downloadedBytes = 0;
|
|
910
|
-
|
|
911
|
-
proc.stdout.on('data', (data) => {
|
|
912
|
-
const text = data.toString();
|
|
913
|
-
output += text;
|
|
914
|
-
|
|
915
|
-
// Parse each line for progress information
|
|
916
|
-
const lines = text.split(/\r?\n/); // Handle both \n and \r\n
|
|
917
|
-
let foundProgress = false;
|
|
918
|
-
|
|
919
|
-
for (const line of lines) {
|
|
920
|
-
const trimmedLine = line.trim();
|
|
921
|
-
|
|
922
|
-
// Skip empty lines
|
|
923
|
-
if (!trimmedLine) continue;
|
|
924
|
-
|
|
925
|
-
// Multiple patterns to match different Ollama output formats:
|
|
926
|
-
// Pattern 1: "pulling <hash>... XX% ▕██████████████████████████▏ <size> MB"
|
|
927
|
-
// Pattern 2: "pulling <hash>... XX% <size> MB"
|
|
928
|
-
// Pattern 3: "<hash>... XX% <size> MB"
|
|
929
|
-
// Pattern 4: Just percentage in any format
|
|
930
|
-
|
|
931
|
-
let progressMatch = trimmedLine.match(/pulling\s+(\S+).*?(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
932
|
-
|
|
933
|
-
// Try alternative patterns if first doesn't match
|
|
934
|
-
if (!progressMatch) {
|
|
935
|
-
progressMatch = trimmedLine.match(/(\w+)\s*\.\.\.\s*(\d+)%\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
936
|
-
}
|
|
937
|
-
|
|
938
|
-
if (!progressMatch) {
|
|
939
|
-
progressMatch = trimmedLine.match(/(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
940
|
-
}
|
|
941
|
-
|
|
942
|
-
if (progressMatch) {
|
|
943
|
-
const layerHash = progressMatch[1] ? progressMatch[1].substring(0, 12) : 'layer';
|
|
944
|
-
const progressPercent = parseInt(progressMatch[2] || progressMatch[1]);
|
|
945
|
-
const sizeStr = progressMatch[3] || progressMatch[2];
|
|
946
|
-
const layerTotalBytes = parseSize(sizeStr);
|
|
947
|
-
|
|
948
|
-
// Track this layer
|
|
949
|
-
if (!layers.has(layerHash)) {
|
|
950
|
-
layers.set(layerHash, { total: layerTotalBytes, progress: 0 });
|
|
951
|
-
// Add to total size
|
|
952
|
-
totalSizeBytes += layerTotalBytes;
|
|
953
|
-
}
|
|
954
|
-
|
|
955
|
-
// Update layer progress
|
|
956
|
-
const layer = layers.get(layerHash);
|
|
957
|
-
layer.progress = progressPercent;
|
|
958
|
-
|
|
959
|
-
// Calculate total downloaded across all layers
|
|
960
|
-
downloadedBytes = 0;
|
|
961
|
-
for (const [hash, layerData] of layers.entries()) {
|
|
962
|
-
downloadedBytes += Math.floor((layerData.progress / 100) * layerData.total);
|
|
963
|
-
}
|
|
964
|
-
|
|
965
|
-
// Calculate overall percentage
|
|
966
|
-
const overallProgress = totalSizeBytes > 0
|
|
967
|
-
? Math.min(100, Math.floor((downloadedBytes / totalSizeBytes) * 100))
|
|
968
|
-
: progressPercent;
|
|
969
|
-
|
|
970
|
-
// Update progress (even small changes for better UX)
|
|
971
|
-
if (overallProgress !== lastProgressValue && onProgress) {
|
|
972
|
-
lastProgressValue = overallProgress;
|
|
973
|
-
|
|
974
|
-
const progressInfo = {
|
|
975
|
-
percentage: overallProgress,
|
|
976
|
-
downloaded: formatBytes(downloadedBytes),
|
|
977
|
-
total: formatBytes(totalSizeBytes),
|
|
978
|
-
currentPercent: progressPercent,
|
|
979
|
-
progressBar: createProgressBar(overallProgress)
|
|
980
|
-
};
|
|
981
|
-
|
|
982
|
-
onProgress(progressInfo);
|
|
983
|
-
foundProgress = true;
|
|
984
|
-
}
|
|
985
|
-
}
|
|
986
|
-
|
|
987
|
-
// Always try to find percentage as fallback (even if size parsing failed)
|
|
988
|
-
const simpleProgressMatch = trimmedLine.match(/(\d+)%/);
|
|
989
|
-
if (simpleProgressMatch && onProgress) {
|
|
990
|
-
foundProgress = true;
|
|
991
|
-
const progressPercent = parseInt(simpleProgressMatch[1]);
|
|
992
|
-
// Update if percentage changed (allow 0% change for initial display)
|
|
993
|
-
if (lastProgressValue === -1 || Math.abs(progressPercent - lastProgressValue) >= 1) {
|
|
994
|
-
lastProgressValue = progressPercent;
|
|
995
|
-
onProgress({
|
|
996
|
-
percentage: progressPercent,
|
|
997
|
-
downloaded: totalSizeBytes > 0 ? formatBytes(downloadedBytes) : 'Unknown',
|
|
998
|
-
total: totalSizeBytes > 0 ? formatBytes(totalSizeBytes) : 'Unknown',
|
|
999
|
-
currentPercent: progressPercent,
|
|
1000
|
-
progressBar: createProgressBar(progressPercent)
|
|
1001
|
-
});
|
|
1002
|
-
}
|
|
1003
|
-
}
|
|
1004
|
-
}
|
|
1005
|
-
|
|
1006
|
-
// If we got output but no progress yet, show a minimal update
|
|
1007
|
-
// This handles cases where Ollama outputs non-standard formats
|
|
1008
|
-
if (!foundProgress && text.length > 0 && onProgress && lastProgressValue < 1) {
|
|
1009
|
-
// Show 1% to indicate something is happening
|
|
1010
|
-
if (lastProgressValue < 1) {
|
|
1011
|
-
lastProgressValue = 1;
|
|
1012
|
-
onProgress({
|
|
1013
|
-
percentage: 1,
|
|
1014
|
-
downloaded: 'Unknown',
|
|
1015
|
-
total: 'Unknown',
|
|
1016
|
-
currentPercent: 1,
|
|
1017
|
-
progressBar: createProgressBar(1)
|
|
1018
|
-
});
|
|
1019
|
-
}
|
|
1020
|
-
}
|
|
1021
|
-
});
|
|
1022
|
-
|
|
1023
|
-
proc.stderr.on('data', (data) => {
|
|
1024
|
-
const stderrText = data.toString();
|
|
1025
|
-
output += stderrText;
|
|
1026
|
-
|
|
1027
|
-
// Ollama may output progress to stderr, so parse it too
|
|
1028
|
-
const lines = stderrText.split('\n');
|
|
1029
|
-
for (const line of lines) {
|
|
1030
|
-
const trimmedLine = line.trim();
|
|
1031
|
-
|
|
1032
|
-
// Try to parse progress from stderr as well
|
|
1033
|
-
let progressMatch = trimmedLine.match(/pulling\s+(\S+).*?(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
1034
|
-
|
|
1035
|
-
if (!progressMatch) {
|
|
1036
|
-
progressMatch = trimmedLine.match(/(\w+)\s*\.\.\.\s*(\d+)%\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
1037
|
-
}
|
|
1038
|
-
|
|
1039
|
-
if (!progressMatch) {
|
|
1040
|
-
progressMatch = trimmedLine.match(/(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
1041
|
-
}
|
|
1042
|
-
|
|
1043
|
-
if (progressMatch && onProgress) {
|
|
1044
|
-
const layerHash = progressMatch[1] ? progressMatch[1].substring(0, 12) : 'layer';
|
|
1045
|
-
const progressPercent = parseInt(progressMatch[2] || progressMatch[1]);
|
|
1046
|
-
const sizeStr = progressMatch[3] || progressMatch[2];
|
|
1047
|
-
const layerTotalBytes = parseSize(sizeStr);
|
|
1048
|
-
|
|
1049
|
-
if (!layers.has(layerHash)) {
|
|
1050
|
-
layers.set(layerHash, { total: layerTotalBytes, progress: 0 });
|
|
1051
|
-
totalSizeBytes += layerTotalBytes;
|
|
1052
|
-
}
|
|
1053
|
-
|
|
1054
|
-
const layer = layers.get(layerHash);
|
|
1055
|
-
layer.progress = progressPercent;
|
|
1056
|
-
|
|
1057
|
-
downloadedBytes = 0;
|
|
1058
|
-
for (const [hash, layerData] of layers.entries()) {
|
|
1059
|
-
downloadedBytes += Math.floor((layerData.progress / 100) * layerData.total);
|
|
1060
|
-
}
|
|
1061
|
-
|
|
1062
|
-
const overallProgress = totalSizeBytes > 0
|
|
1063
|
-
? Math.min(100, Math.floor((downloadedBytes / totalSizeBytes) * 100))
|
|
1064
|
-
: progressPercent;
|
|
1065
|
-
|
|
1066
|
-
if (overallProgress !== lastProgressValue) {
|
|
1067
|
-
lastProgressValue = overallProgress;
|
|
1068
|
-
|
|
1069
|
-
const progressInfo = {
|
|
1070
|
-
percentage: overallProgress,
|
|
1071
|
-
downloaded: formatBytes(downloadedBytes),
|
|
1072
|
-
total: formatBytes(totalSizeBytes),
|
|
1073
|
-
currentPercent: progressPercent,
|
|
1074
|
-
progressBar: createProgressBar(overallProgress)
|
|
1075
|
-
};
|
|
1076
|
-
|
|
1077
|
-
onProgress(progressInfo);
|
|
1078
|
-
}
|
|
1079
|
-
}
|
|
1080
|
-
|
|
1081
|
-
// Always try to find percentage as fallback (even if size parsing failed)
|
|
1082
|
-
const simpleProgressMatch = trimmedLine.match(/(\d+)%/);
|
|
1083
|
-
if (simpleProgressMatch && onProgress) {
|
|
1084
|
-
const progressPercent = parseInt(simpleProgressMatch[1]);
|
|
1085
|
-
// Update if percentage changed (allow 0% change for initial display)
|
|
1086
|
-
if (lastProgressValue === -1 || Math.abs(progressPercent - lastProgressValue) >= 1) {
|
|
1087
|
-
lastProgressValue = progressPercent;
|
|
1088
|
-
onProgress({
|
|
1089
|
-
percentage: progressPercent,
|
|
1090
|
-
downloaded: totalSizeBytes > 0 ? formatBytes(downloadedBytes) : 'Unknown',
|
|
1091
|
-
total: totalSizeBytes > 0 ? formatBytes(totalSizeBytes) : 'Unknown',
|
|
1092
|
-
currentPercent: progressPercent,
|
|
1093
|
-
progressBar: createProgressBar(progressPercent)
|
|
1094
|
-
});
|
|
1095
|
-
}
|
|
1096
|
-
}
|
|
1097
|
-
}
|
|
1098
|
-
});
|
|
1099
|
-
|
|
1100
|
-
proc.on('close', async (code) => {
|
|
1101
|
-
if (code === 0) {
|
|
1102
|
-
resolve({ success: true });
|
|
1103
|
-
} else {
|
|
1104
|
-
// Check if error is about server not responding
|
|
1105
|
-
const errorText = output || '';
|
|
1106
|
-
if (errorText.includes('server not responding') || errorText.includes('connection refused')) {
|
|
1107
|
-
// Service might not be running - try to start it and retry
|
|
1108
|
-
if (!(await this.isOllamaRunning())) {
|
|
1109
|
-
this.logger.log('⚠ Ollama service not running, attempting to start...');
|
|
1110
|
-
const started = await this.startOllamaService();
|
|
1111
|
-
if (started) {
|
|
1112
|
-
// Retry after starting service
|
|
1113
|
-
this.logger.log('✓ Service started, retrying model download...');
|
|
1114
|
-
const retryResult = await this.pullOllamaModel(modelName, onProgress);
|
|
1115
|
-
resolve(retryResult);
|
|
1116
|
-
return;
|
|
1117
|
-
}
|
|
1118
|
-
}
|
|
1119
|
-
|
|
1120
|
-
resolve({
|
|
1121
|
-
success: false,
|
|
1122
|
-
error: 'Ollama service is not responding. Please ensure Ollama is running.',
|
|
1123
|
-
errorCode: 'SERVER_NOT_RESPONDING',
|
|
1124
|
-
resolution: 'The Ollama service needs to be running to download models.\n' +
|
|
1125
|
-
'Try one of these:\n' +
|
|
1126
|
-
' 1. Launch Ollama.app from /Applications\n' +
|
|
1127
|
-
' 2. Or run: ollama serve\n' +
|
|
1128
|
-
' 3. Then try downloading the model again'
|
|
1129
|
-
});
|
|
1130
|
-
} else {
|
|
1131
|
-
resolve({
|
|
1132
|
-
success: false,
|
|
1133
|
-
error: output || 'Unknown error',
|
|
1134
|
-
errorCode: 'PULL_FAILED'
|
|
1135
|
-
});
|
|
1136
|
-
}
|
|
1137
|
-
}
|
|
1138
|
-
});
|
|
1139
|
-
|
|
1140
|
-
proc.on('error', async (error) => {
|
|
1141
|
-
// Detect common errors and try to fix automatically
|
|
1142
|
-
if (error.code === 'ENOENT') {
|
|
1143
|
-
// Command not found - try to fix automatically (macOS)
|
|
1144
|
-
if (os.platform() === 'darwin' && this.isOllamaInstalled() && !shouldRetry) {
|
|
1145
|
-
this.logger.log('⚠ Ollama command not found, attempting to fix automatically...');
|
|
1146
|
-
const fixed = await this.fixOllamaPath();
|
|
1147
|
-
|
|
1148
|
-
if (fixed) {
|
|
1149
|
-
// Retry the command after fixing
|
|
1150
|
-
this.logger.log('✓ Fixed! Retrying model download...');
|
|
1151
|
-
const retryResult = await this.pullOllamaModel(modelName, onProgress);
|
|
1152
|
-
resolve(retryResult);
|
|
1153
|
-
return;
|
|
1154
|
-
}
|
|
1155
|
-
}
|
|
1156
|
-
|
|
1157
|
-
// Auto-fix failed or not applicable - provide manual resolution
|
|
1158
|
-
const platform = os.platform();
|
|
1159
|
-
let resolution = '';
|
|
1160
|
-
|
|
1161
|
-
if (platform === 'darwin') {
|
|
1162
|
-
resolution = 'Ollama.app is installed but the command-line tool is not in your PATH.\n' +
|
|
1163
|
-
'To fix this:\n' +
|
|
1164
|
-
' 1. Open Ollama.app from /Applications (launch it once)\n' +
|
|
1165
|
-
' 2. This will add the ollama command to your PATH\n' +
|
|
1166
|
-
' 3. Or manually add Ollama to PATH: export PATH="$PATH:/usr/local/bin"\n' +
|
|
1167
|
-
' 4. Then try downloading the model again';
|
|
1168
|
-
} else {
|
|
1169
|
-
resolution = 'The ollama command is not found in your PATH.\n' +
|
|
1170
|
-
'Please ensure Ollama is properly installed and the command is available.';
|
|
1171
|
-
}
|
|
1172
|
-
|
|
1173
|
-
resolve({
|
|
1174
|
-
success: false,
|
|
1175
|
-
error: `Command not found: ollama`,
|
|
1176
|
-
errorCode: 'ENOENT',
|
|
1177
|
-
resolution: resolution
|
|
1178
|
-
});
|
|
1179
|
-
} else {
|
|
1180
|
-
resolve({
|
|
1181
|
-
success: false,
|
|
1182
|
-
error: error.message,
|
|
1183
|
-
errorCode: error.code || 'UNKNOWN'
|
|
1184
|
-
});
|
|
1185
|
-
}
|
|
1186
|
-
});
|
|
1187
|
-
});
|
|
1188
|
-
} catch (error) {
|
|
1189
|
-
return {
|
|
1190
|
-
success: false,
|
|
1191
|
-
error: error.message,
|
|
1192
|
-
errorCode: error.code || 'UNKNOWN'
|
|
1193
|
-
};
|
|
1194
|
-
}
|
|
1195
|
-
}
|
|
1196
|
-
|
|
1197
|
-
/**
|
|
1198
|
-
* Check if Homebrew is available
|
|
1199
|
-
*/
|
|
1200
|
-
hasHomebrew() {
|
|
1201
|
-
try {
|
|
1202
|
-
execSync('which brew', { stdio: 'pipe' });
|
|
1203
|
-
return true;
|
|
1204
|
-
} catch {
|
|
1205
|
-
return false;
|
|
1206
|
-
}
|
|
1207
|
-
}
|
|
1208
|
-
|
|
1209
|
-
/**
|
|
1210
|
-
* Install Ollama on macOS using Homebrew
|
|
1211
|
-
*/
|
|
1212
|
-
async installOllamaViaHomebrew() {
|
|
1213
|
-
if (!this.hasHomebrew()) {
|
|
1214
|
-
throw new Error('Homebrew is not installed');
|
|
1215
|
-
}
|
|
1216
|
-
|
|
1217
|
-
// Check if Ollama is already installed
|
|
1218
|
-
if (this.isOllamaInstalled()) {
|
|
1219
|
-
this.logger.log('Ollama is already installed, skipping installation.');
|
|
1220
|
-
return { success: true, method: 'homebrew', alreadyInstalled: true };
|
|
1221
|
-
}
|
|
1222
|
-
|
|
1223
|
-
this.logger.log('Installing Ollama via Homebrew (this may take a few minutes)...');
|
|
1224
|
-
|
|
1225
|
-
try {
|
|
1226
|
-
execSync('brew install --cask ollama', {
|
|
1227
|
-
stdio: 'inherit',
|
|
1228
|
-
timeout: 300000 // 5 minutes
|
|
1229
|
-
});
|
|
1230
|
-
|
|
1231
|
-
// Wait a moment for installation to complete
|
|
1232
|
-
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
1233
|
-
|
|
1234
|
-
// Verify installation
|
|
1235
|
-
if (this.isOllamaInstalled()) {
|
|
1236
|
-
return { success: true, method: 'homebrew' };
|
|
1237
|
-
} else {
|
|
1238
|
-
// App might be installed but CLI not in PATH yet - this is OK
|
|
1239
|
-
if (os.platform() === 'darwin') {
|
|
1240
|
-
const appPath = '/Applications/Ollama.app';
|
|
1241
|
-
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
1242
|
-
if (fs.existsSync(appPath) || fs.existsSync(userAppPath)) {
|
|
1243
|
-
return { success: true, method: 'homebrew', note: 'Ollama app installed. The CLI tool will be available after launching Ollama.app once.' };
|
|
1244
|
-
}
|
|
1245
|
-
}
|
|
1246
|
-
throw new Error('Installation completed but Ollama not found in PATH');
|
|
1247
|
-
}
|
|
1248
|
-
} catch (error) {
|
|
1249
|
-
// If error is because app already exists, that's OK
|
|
1250
|
-
if (error.message.includes('already an App') && this.isOllamaInstalled()) {
|
|
1251
|
-
return { success: true, method: 'homebrew', alreadyInstalled: true };
|
|
1252
|
-
}
|
|
1253
|
-
throw new Error(`Homebrew installation failed: ${error.message}`);
|
|
1254
|
-
}
|
|
1255
|
-
}
|
|
1256
|
-
|
|
1257
|
-
/**
|
|
1258
|
-
* Install Ollama on macOS by downloading and installing DMG
|
|
1259
|
-
*/
|
|
1260
|
-
async installOllamaViaDMG() {
|
|
1261
|
-
// Check if Ollama is already installed
|
|
1262
|
-
if (this.isOllamaInstalled()) {
|
|
1263
|
-
this.logger.log('Ollama is already installed, skipping installation.');
|
|
1264
|
-
return { success: true, method: 'dmg', alreadyInstalled: true };
|
|
1265
|
-
}
|
|
1266
|
-
|
|
1267
|
-
const https = require('https');
|
|
1268
|
-
const { createWriteStream } = require('fs');
|
|
1269
|
-
|
|
1270
|
-
// Ollama DMG download URL for macOS
|
|
1271
|
-
const downloadUrl = 'https://ollama.com/download/Ollama-darwin.dmg';
|
|
1272
|
-
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ollama-install-'));
|
|
1273
|
-
const downloadPath = path.join(tmpDir, 'Ollama.dmg');
|
|
1274
|
-
|
|
1275
|
-
try {
|
|
1276
|
-
this.logger.log('Downloading Ollama DMG...');
|
|
1277
|
-
|
|
1278
|
-
// Download the DMG file (handle redirects)
|
|
1279
|
-
await new Promise((resolve, reject) => {
|
|
1280
|
-
const downloadFile = (url) => {
|
|
1281
|
-
https.get(url, (response) => {
|
|
1282
|
-
// Handle redirects
|
|
1283
|
-
if (response.statusCode === 301 || response.statusCode === 302 || response.statusCode === 307 || response.statusCode === 308) {
|
|
1284
|
-
if (response.headers.location) {
|
|
1285
|
-
return downloadFile(response.headers.location);
|
|
1286
|
-
}
|
|
1287
|
-
}
|
|
1288
|
-
|
|
1289
|
-
if (response.statusCode !== 200) {
|
|
1290
|
-
reject(new Error(`Download failed: HTTP ${response.statusCode}`));
|
|
1291
|
-
return;
|
|
1292
|
-
}
|
|
1293
|
-
|
|
1294
|
-
const fileStream = createWriteStream(downloadPath);
|
|
1295
|
-
response.pipe(fileStream);
|
|
1296
|
-
|
|
1297
|
-
fileStream.on('finish', () => {
|
|
1298
|
-
fileStream.close();
|
|
1299
|
-
resolve();
|
|
1300
|
-
});
|
|
1301
|
-
|
|
1302
|
-
fileStream.on('error', (error) => {
|
|
1303
|
-
fs.unlink(downloadPath, () => {});
|
|
1304
|
-
reject(error);
|
|
1305
|
-
});
|
|
1306
|
-
}).on('error', reject);
|
|
1307
|
-
};
|
|
1308
|
-
|
|
1309
|
-
downloadFile(downloadUrl);
|
|
1310
|
-
});
|
|
1311
|
-
|
|
1312
|
-
this.logger.log('Mounting and installing Ollama...');
|
|
1313
|
-
|
|
1314
|
-
const { execSync } = require('child_process');
|
|
1315
|
-
|
|
1316
|
-
// Mount the DMG
|
|
1317
|
-
const mountOutput = execSync(`hdiutil attach "${downloadPath}" -nobrowse -noverify`, {
|
|
1318
|
-
encoding: 'utf8',
|
|
1319
|
-
stdio: 'pipe'
|
|
1320
|
-
});
|
|
1321
|
-
|
|
1322
|
-
// Extract mount point
|
|
1323
|
-
const mountPoint = mountOutput.split('\n').find(line => line.includes('/Volumes/'))?.split('\t').pop()?.trim();
|
|
1324
|
-
if (!mountPoint) {
|
|
1325
|
-
throw new Error('Failed to find DMG mount point');
|
|
1326
|
-
}
|
|
1327
|
-
|
|
1328
|
-
try {
|
|
1329
|
-
// Find the Ollama.app in the mounted volume
|
|
1330
|
-
const appInVolume = path.join(mountPoint, 'Ollama.app');
|
|
1331
|
-
if (!fs.existsSync(appInVolume)) {
|
|
1332
|
-
throw new Error('Ollama.app not found in mounted DMG');
|
|
1333
|
-
}
|
|
1334
|
-
|
|
1335
|
-
// Copy to Applications
|
|
1336
|
-
const appPath = '/Applications/Ollama.app';
|
|
1337
|
-
if (fs.existsSync(appPath)) {
|
|
1338
|
-
execSync(`rm -rf "${appPath}"`, { stdio: 'pipe' });
|
|
1339
|
-
}
|
|
1340
|
-
execSync(`cp -R "${appInVolume}" "${appPath}"`, { stdio: 'pipe' });
|
|
1341
|
-
|
|
1342
|
-
// Remove quarantine attribute to prevent Gatekeeper issues
|
|
1343
|
-
try {
|
|
1344
|
-
execSync(`xattr -dr com.apple.quarantine "${appPath}"`, { stdio: 'pipe' });
|
|
1345
|
-
} catch {
|
|
1346
|
-
// Non-fatal if xattr fails
|
|
1347
|
-
}
|
|
1348
|
-
|
|
1349
|
-
// Eject the DMG
|
|
1350
|
-
execSync(`hdiutil detach "${mountPoint}"`, { stdio: 'pipe' });
|
|
1351
|
-
} catch (error) {
|
|
1352
|
-
// Try to eject even on error
|
|
1353
|
-
try {
|
|
1354
|
-
execSync(`hdiutil detach "${mountPoint}"`, { stdio: 'pipe' });
|
|
1355
|
-
} catch {}
|
|
1356
|
-
throw error;
|
|
1357
|
-
}
|
|
1358
|
-
|
|
1359
|
-
// Clean up downloaded file
|
|
1360
|
-
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
1361
|
-
|
|
1362
|
-
// Wait a moment for installation to settle
|
|
1363
|
-
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
1364
|
-
|
|
1365
|
-
// Verify installation
|
|
1366
|
-
if (this.isOllamaInstalled()) {
|
|
1367
|
-
return { success: true, method: 'dmg' };
|
|
1368
|
-
} else {
|
|
1369
|
-
// Check if app exists in Applications
|
|
1370
|
-
if (fs.existsSync('/Applications/Ollama.app')) {
|
|
1371
|
-
// App is installed but not in PATH yet - this is normal, user may need to launch it once
|
|
1372
|
-
return { success: true, method: 'dmg', note: 'Ollama installed. Please launch Ollama.app once to complete setup.' };
|
|
1373
|
-
}
|
|
1374
|
-
throw new Error('Installation completed but Ollama not found');
|
|
1375
|
-
}
|
|
1376
|
-
} catch (error) {
|
|
1377
|
-
// Clean up on error
|
|
1378
|
-
if (fs.existsSync(tmpDir)) {
|
|
1379
|
-
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
1380
|
-
}
|
|
1381
|
-
throw new Error(`DMG installation failed: ${error.message}`);
|
|
1382
|
-
}
|
|
1383
|
-
}
|
|
1384
|
-
|
|
1385
|
-
/**
|
|
1386
|
-
* Install Ollama
|
|
1387
|
-
*/
|
|
1388
|
-
async installOllama() {
|
|
1389
|
-
try {
|
|
1390
|
-
// Check if Ollama is already installed
|
|
1391
|
-
if (this.isOllamaInstalled()) {
|
|
1392
|
-
this.logger.log('Ollama is already installed.');
|
|
1393
|
-
return { success: true, method: 'already-installed', alreadyInstalled: true };
|
|
1394
|
-
}
|
|
1395
|
-
|
|
1396
|
-
const platform = os.platform();
|
|
1397
|
-
|
|
1398
|
-
if (platform === 'darwin') {
|
|
1399
|
-
// macOS - try Homebrew first, then fall back to DMG download
|
|
1400
|
-
try {
|
|
1401
|
-
if (this.hasHomebrew()) {
|
|
1402
|
-
this.logger.log('🍺 Using Homebrew to install Ollama...');
|
|
1403
|
-
return await this.installOllamaViaHomebrew();
|
|
1404
|
-
}
|
|
1405
|
-
} catch (error) {
|
|
1406
|
-
this.logger.log(`⚠ Homebrew installation failed: ${error.message}`);
|
|
1407
|
-
this.logger.log('📥 Falling back to direct download...');
|
|
1408
|
-
}
|
|
1409
|
-
|
|
1410
|
-
// Fall back to direct DMG download
|
|
1411
|
-
try {
|
|
1412
|
-
this.logger.log('📥 Downloading and installing Ollama DMG...');
|
|
1413
|
-
return await this.installOllamaViaDMG();
|
|
1414
|
-
} catch (dmgError) {
|
|
1415
|
-
// If all methods fail, provide manual instructions
|
|
1416
|
-
this.logger.log('⚠ Automatic installation failed. Opening browser for manual installation...');
|
|
1417
|
-
const { spawn } = require('child_process');
|
|
1418
|
-
spawn('open', ['https://ollama.com/download']);
|
|
1419
|
-
|
|
1420
|
-
return {
|
|
1421
|
-
success: false,
|
|
1422
|
-
error: 'Automatic installation failed. Please download and install Ollama from https://ollama.com/download\n' +
|
|
1423
|
-
'After installation, run this command again.',
|
|
1424
|
-
needsManualInstall: true
|
|
1425
|
-
};
|
|
1426
|
-
}
|
|
1427
|
-
} else if (platform === 'linux') {
|
|
1428
|
-
// Linux - use install script
|
|
1429
|
-
this.logger.log('Downloading Ollama installer...');
|
|
1430
|
-
const installScript = 'curl -fsSL https://ollama.com/install.sh | sh';
|
|
1431
|
-
|
|
1432
|
-
execSync(installScript, {
|
|
1433
|
-
stdio: 'inherit',
|
|
1434
|
-
timeout: 300000 // 5 minutes
|
|
1435
|
-
});
|
|
1436
|
-
|
|
1437
|
-
return { success: true };
|
|
1438
|
-
} else if (platform === 'win32') {
|
|
1439
|
-
// Windows - need to download installer
|
|
1440
|
-
this.logger.log('Ollama installation on Windows requires manual download.');
|
|
1441
|
-
this.logger.log('Opening browser to https://ollama.com/download');
|
|
1442
|
-
|
|
1443
|
-
// Open browser to download page
|
|
1444
|
-
const { exec } = require('child_process');
|
|
1445
|
-
exec('start https://ollama.com/download');
|
|
1446
|
-
|
|
1447
|
-
return {
|
|
1448
|
-
success: false,
|
|
1449
|
-
error: 'Please download and install Ollama from https://ollama.com/download\n' +
|
|
1450
|
-
'After installation, run this command again.',
|
|
1451
|
-
needsManualInstall: true
|
|
1452
|
-
};
|
|
1453
|
-
} else {
|
|
1454
|
-
return {
|
|
1455
|
-
success: false,
|
|
1456
|
-
error: `Unsupported platform: ${platform}`
|
|
1457
|
-
};
|
|
1458
|
-
}
|
|
1459
|
-
} catch (error) {
|
|
1460
|
-
return {
|
|
1461
|
-
success: false,
|
|
1462
|
-
error: error.message
|
|
1463
|
-
};
|
|
1464
|
-
}
|
|
1465
|
-
}
|
|
1466
|
-
|
|
1467
|
-
/**
|
|
1468
|
-
* Configure Cline CLI with Ollama (local)
|
|
1469
|
-
* @param {string} modelName - Model name (e.g., 'deepseek-coder:33b')
|
|
1470
|
-
*/
|
|
1471
|
-
async configureWithOllama(modelName = 'deepseek-coder:33b') {
|
|
1472
|
-
try {
|
|
1473
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1474
|
-
|
|
1475
|
-
// Read existing config or create new one
|
|
1476
|
-
let config = {};
|
|
1477
|
-
if (fs.existsSync(configPath)) {
|
|
1478
|
-
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1479
|
-
}
|
|
1480
|
-
|
|
1481
|
-
// Ensure globalState exists
|
|
1482
|
-
config.globalState = config.globalState || {};
|
|
1483
|
-
|
|
1484
|
-
// Configure for Ollama (OpenAI-compatible API)
|
|
1485
|
-
config.globalState.apiProvider = 'openai-native';
|
|
1486
|
-
// Set BOTH apiModelId and openAiModelId - Cline CLI may check either
|
|
1487
|
-
config.globalState.apiModelId = modelName;
|
|
1488
|
-
config.globalState.openAiModelId = modelName;
|
|
1489
|
-
config.globalState.openAiBaseUrl = 'http://localhost:11434/v1';
|
|
1490
|
-
config.globalState.openAiApiKey = 'sk-ollama-placeholder-key-for-local-api'; // Ollama doesn't need a key, but Cline CLI requires one in sk- format
|
|
1491
|
-
|
|
1492
|
-
// Remove any leftover Gemini or other provider configs that might interfere
|
|
1493
|
-
delete config.globalState.geminiApiKey;
|
|
1494
|
-
delete config.globalState.openRouterApiKey;
|
|
1495
|
-
delete config.globalState.anthropicApiKey;
|
|
1496
|
-
|
|
1497
|
-
// Enable auto-approval for autonomous operation
|
|
1498
|
-
config.globalState.autoApprovalSettings = {
|
|
1499
|
-
enabled: true,
|
|
1500
|
-
actions: {
|
|
1501
|
-
readFiles: true,
|
|
1502
|
-
editFiles: true,
|
|
1503
|
-
executeSafeCommands: true,
|
|
1504
|
-
useMcp: true
|
|
1505
|
-
},
|
|
1506
|
-
maxRequests: 1000
|
|
1507
|
-
};
|
|
1508
|
-
|
|
1509
|
-
// Ensure settings exists
|
|
1510
|
-
config.settings = config.settings || {};
|
|
1511
|
-
config.settings['cline.enableCheckpoints'] = false;
|
|
1512
|
-
|
|
1513
|
-
// Write updated config
|
|
1514
|
-
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1515
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1516
|
-
|
|
1517
|
-
// Verify the config was written correctly
|
|
1518
|
-
const verifyConfig = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1519
|
-
const actualModelId = verifyConfig.globalState?.openAiModelId || verifyConfig.globalState?.apiModelId;
|
|
1520
|
-
|
|
1521
|
-
this.logger.log('Cline CLI configured with Ollama');
|
|
1522
|
-
this.logger.log(` Provider: Ollama (Local)`);
|
|
1523
|
-
this.logger.log(` Model: ${actualModelId || modelName}`);
|
|
1524
|
-
this.logger.log(` Endpoint: http://localhost:11434/v1`);
|
|
1525
|
-
|
|
1526
|
-
if (actualModelId !== modelName) {
|
|
1527
|
-
this.logger.warn(` ⚠ Warning: Config shows model as ${actualModelId}, expected ${modelName}`);
|
|
1528
|
-
}
|
|
1529
|
-
|
|
1530
|
-
return { success: true, configPath };
|
|
1531
|
-
} catch (error) {
|
|
1532
|
-
return {
|
|
1533
|
-
success: false,
|
|
1534
|
-
error: error.message
|
|
1535
|
-
};
|
|
1536
|
-
}
|
|
1537
|
-
}
|
|
1538
|
-
|
|
1539
|
-
/**
|
|
1540
|
-
* Initialize Cline CLI configuration
|
|
1541
|
-
*/
|
|
1542
|
-
async init() {
|
|
1543
|
-
try {
|
|
1544
|
-
execSync('cline-cli init', {
|
|
1545
|
-
stdio: 'inherit',
|
|
1546
|
-
timeout: 30000
|
|
1547
|
-
});
|
|
1548
|
-
return { success: true };
|
|
1549
|
-
} catch (error) {
|
|
1550
|
-
return {
|
|
1551
|
-
success: false,
|
|
1552
|
-
error: error.message
|
|
1553
|
-
};
|
|
1554
|
-
}
|
|
1555
|
-
}
|
|
1556
|
-
|
|
1557
|
-
/**
|
|
1558
|
-
* Configure Cline CLI with Anthropic API
|
|
1559
|
-
* @param {string} apiKey - Anthropic API key
|
|
1560
|
-
*/
|
|
1561
|
-
async configureWithAnthropic(apiKey) {
|
|
1562
|
-
try {
|
|
1563
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1564
|
-
|
|
1565
|
-
// Read existing config or create new one
|
|
1566
|
-
let config = {};
|
|
1567
|
-
if (fs.existsSync(configPath)) {
|
|
1568
|
-
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1569
|
-
}
|
|
1570
|
-
|
|
1571
|
-
// Ensure globalState exists
|
|
1572
|
-
config.globalState = config.globalState || {};
|
|
1573
|
-
|
|
1574
|
-
// Remove invalid key marker if it exists (key is being reconfigured)
|
|
1575
|
-
const invalidMarker = path.join(os.homedir(), '.allnightai', '.anthropic-key-invalid');
|
|
1576
|
-
if (fs.existsSync(invalidMarker)) {
|
|
1577
|
-
fs.unlinkSync(invalidMarker);
|
|
1578
|
-
}
|
|
1579
|
-
|
|
1580
|
-
// Configure for Anthropic API (direct)
|
|
1581
|
-
config.globalState.apiProvider = 'anthropic';
|
|
1582
|
-
config.globalState.apiModelId = 'claude-3-5-sonnet-20241022';
|
|
1583
|
-
config.globalState.anthropicApiKey = apiKey;
|
|
1584
|
-
|
|
1585
|
-
// Enable auto-approval for autonomous operation
|
|
1586
|
-
config.globalState.autoApprovalSettings = {
|
|
1587
|
-
enabled: true,
|
|
1588
|
-
actions: {
|
|
1589
|
-
readFiles: true,
|
|
1590
|
-
editFiles: true,
|
|
1591
|
-
executeSafeCommands: true,
|
|
1592
|
-
useMcp: true
|
|
1593
|
-
},
|
|
1594
|
-
maxRequests: 1000
|
|
1595
|
-
};
|
|
1596
|
-
|
|
1597
|
-
// Ensure settings exists
|
|
1598
|
-
config.settings = config.settings || {};
|
|
1599
|
-
config.settings['cline.enableCheckpoints'] = false;
|
|
1600
|
-
|
|
1601
|
-
// Write updated config
|
|
1602
|
-
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1603
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1604
|
-
|
|
1605
|
-
this.logger.log('Cline CLI configured with Anthropic API');
|
|
1606
|
-
this.logger.log(` Provider: Anthropic (direct)`);
|
|
1607
|
-
this.logger.log(` Model: claude-3-5-sonnet-20241022`);
|
|
1608
|
-
this.logger.log(` API Key: ${apiKey.substring(0, 20)}...`);
|
|
1609
|
-
|
|
1610
|
-
return { success: true, configPath };
|
|
1611
|
-
} catch (error) {
|
|
1612
|
-
return {
|
|
1613
|
-
success: false,
|
|
1614
|
-
error: error.message
|
|
1615
|
-
};
|
|
1616
|
-
}
|
|
1617
|
-
}
|
|
1618
|
-
|
|
1619
|
-
/**
|
|
1620
|
-
* Configure Cline CLI with OpenRouter API
|
|
1621
|
-
* @param {string} apiKey - OpenRouter API key
|
|
1622
|
-
* @param {string} modelId - Model ID (optional, defaults to meta-llama/llama-3.3-70b-instruct:free)
|
|
1623
|
-
*/
|
|
1624
|
-
async configureWithOpenRouter(apiKey, modelId = 'meta-llama/llama-3.3-70b-instruct:free') {
|
|
1625
|
-
try {
|
|
1626
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1627
|
-
|
|
1628
|
-
// Read existing config or create new one
|
|
1629
|
-
let config = {};
|
|
1630
|
-
if (fs.existsSync(configPath)) {
|
|
1631
|
-
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1632
|
-
}
|
|
1633
|
-
|
|
1634
|
-
// Ensure globalState exists
|
|
1635
|
-
config.globalState = config.globalState || {};
|
|
1636
|
-
|
|
1637
|
-
// Remove invalid key marker if it exists (key is being reconfigured)
|
|
1638
|
-
const invalidMarker = path.join(os.homedir(), '.allnightai', '.openrouter-key-invalid');
|
|
1639
|
-
if (fs.existsSync(invalidMarker)) {
|
|
1640
|
-
fs.unlinkSync(invalidMarker);
|
|
1641
|
-
}
|
|
1642
|
-
|
|
1643
|
-
// Configure for OpenRouter API
|
|
1644
|
-
config.globalState.apiProvider = 'openrouter';
|
|
1645
|
-
config.globalState.apiModelId = modelId; // Cline CLI checks this field
|
|
1646
|
-
config.globalState.openRouterModelId = modelId;
|
|
1647
|
-
config.globalState.openRouterApiKey = apiKey;
|
|
1648
|
-
|
|
1649
|
-
// OpenRouter uses OpenAI-compatible API, so we need to set these as well
|
|
1650
|
-
config.globalState.openAiBaseUrl = 'https://openrouter.ai/api/v1';
|
|
1651
|
-
config.globalState.openAiModelId = modelId;
|
|
1652
|
-
|
|
1653
|
-
// Enable auto-approval for autonomous operation
|
|
1654
|
-
config.globalState.autoApprovalSettings = {
|
|
1655
|
-
enabled: true,
|
|
1656
|
-
actions: {
|
|
1657
|
-
readFiles: true,
|
|
1658
|
-
editFiles: true,
|
|
1659
|
-
executeSafeCommands: true,
|
|
1660
|
-
useMcp: true
|
|
1661
|
-
},
|
|
1662
|
-
maxRequests: 1000
|
|
1663
|
-
};
|
|
1664
|
-
|
|
1665
|
-
// Ensure settings exists
|
|
1666
|
-
config.settings = config.settings || {};
|
|
1667
|
-
config.settings['cline.enableCheckpoints'] = false;
|
|
1668
|
-
|
|
1669
|
-
// Write updated config
|
|
1670
|
-
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1671
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1672
|
-
|
|
1673
|
-
this.logger.log('Cline CLI configured with OpenRouter API');
|
|
1674
|
-
this.logger.log(` Provider: OpenRouter`);
|
|
1675
|
-
this.logger.log(` Model: ${modelId}`);
|
|
1676
|
-
this.logger.log(` API Key: ${apiKey.substring(0, 20)}...`);
|
|
1677
|
-
|
|
1678
|
-
return { success: true, configPath };
|
|
1679
|
-
} catch (error) {
|
|
1680
|
-
return {
|
|
1681
|
-
success: false,
|
|
1682
|
-
error: error.message
|
|
1683
|
-
};
|
|
1684
|
-
}
|
|
1685
|
-
}
|
|
1686
|
-
|
|
1687
|
-
/**
|
|
1688
|
-
* Configure Cline CLI with Google Gemini API
|
|
1689
|
-
* @param {string} apiKey - Google Gemini API key
|
|
1690
|
-
* @param {string} modelId - Model ID (optional, defaults to gemini-2.0-flash-exp)
|
|
1691
|
-
*/
|
|
1692
|
-
async configureWithGemini(apiKey, modelId = 'gemini-2.0-flash-exp') {
|
|
1693
|
-
try {
|
|
1694
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1695
|
-
|
|
1696
|
-
// Read existing config or create new one
|
|
1697
|
-
let config = {};
|
|
1698
|
-
if (fs.existsSync(configPath)) {
|
|
1699
|
-
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1700
|
-
}
|
|
1701
|
-
|
|
1702
|
-
// Ensure globalState exists
|
|
1703
|
-
config.globalState = config.globalState || {};
|
|
1704
|
-
|
|
1705
|
-
// Remove invalid key marker if it exists (key is being reconfigured)
|
|
1706
|
-
const invalidMarker = path.join(os.homedir(), '.allnightai', '.gemini-key-invalid');
|
|
1707
|
-
if (fs.existsSync(invalidMarker)) {
|
|
1708
|
-
fs.unlinkSync(invalidMarker);
|
|
1709
|
-
}
|
|
1710
|
-
|
|
1711
|
-
// Configure for Google Gemini API using OpenAI-compatible endpoint
|
|
1712
|
-
// Cline CLI uses OpenAI-compatible format, not a native 'gemini' provider
|
|
1713
|
-
config.globalState.apiProvider = 'openai-native';
|
|
1714
|
-
config.globalState.openAiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai';
|
|
1715
|
-
config.globalState.openAiModelId = modelId;
|
|
1716
|
-
config.globalState.openAiApiKey = apiKey;
|
|
1717
|
-
|
|
1718
|
-
// Enable auto-approval for autonomous operation
|
|
1719
|
-
config.globalState.autoApprovalSettings = {
|
|
1720
|
-
enabled: true,
|
|
1721
|
-
actions: {
|
|
1722
|
-
readFiles: true,
|
|
1723
|
-
editFiles: true,
|
|
1724
|
-
executeSafeCommands: true,
|
|
1725
|
-
useMcp: true
|
|
1726
|
-
},
|
|
1727
|
-
maxRequests: 1000
|
|
1728
|
-
};
|
|
1729
|
-
|
|
1730
|
-
// Ensure settings exists
|
|
1731
|
-
config.settings = config.settings || {};
|
|
1732
|
-
config.settings['cline.enableCheckpoints'] = false;
|
|
1733
|
-
|
|
1734
|
-
// Write updated config
|
|
1735
|
-
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1736
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1737
|
-
|
|
1738
|
-
this.logger.log('Cline CLI configured with Google Gemini API (OpenAI-compatible)');
|
|
1739
|
-
this.logger.log(` Provider: Google Gemini`);
|
|
1740
|
-
this.logger.log(` Base URL: https://generativelanguage.googleapis.com/v1beta/openai`);
|
|
1741
|
-
this.logger.log(` Model: ${modelId}`);
|
|
1742
|
-
this.logger.log(` API Key: ${apiKey.substring(0, 20)}...`);
|
|
1743
|
-
|
|
1744
|
-
return { success: true, configPath };
|
|
1745
|
-
} catch (error) {
|
|
1746
|
-
return {
|
|
1747
|
-
success: false,
|
|
1748
|
-
error: error.message
|
|
1749
|
-
};
|
|
1750
|
-
}
|
|
1751
|
-
}
|
|
1752
|
-
|
|
1753
|
-
/**
|
|
1754
|
-
* Install Cline CLI via npm
|
|
1755
|
-
*/
|
|
1756
|
-
async install() {
|
|
1757
|
-
this.logger.log('Installing Cline CLI via npm...');
|
|
1758
|
-
|
|
1759
|
-
try {
|
|
1760
|
-
// Install globally
|
|
1761
|
-
execSync('npm install -g @yaegaki/cline-cli', {
|
|
1762
|
-
stdio: 'inherit',
|
|
1763
|
-
timeout: 120000 // 2 minute timeout
|
|
1764
|
-
});
|
|
1765
|
-
|
|
1766
|
-
this.logger.log('Cline CLI installed successfully');
|
|
1767
|
-
return { success: true };
|
|
1768
|
-
} catch (error) {
|
|
1769
|
-
this.logger.error('Failed to install Cline CLI:', error.message);
|
|
1770
|
-
return {
|
|
1771
|
-
success: false,
|
|
1772
|
-
error: error.message
|
|
1773
|
-
};
|
|
1774
|
-
}
|
|
1775
|
-
}
|
|
1776
|
-
|
|
1777
|
-
/**
|
|
1778
|
-
* Send text to Cline CLI and execute
|
|
1779
|
-
* @param {string} text - The instruction text to send
|
|
1780
|
-
* @param {string} cwd - Working directory (defaults to current)
|
|
1781
|
-
* @param {boolean} fullAuto - Run in fully automated mode (default: true)
|
|
1782
|
-
* @returns {Promise<Object>} Result with success, output, and error
|
|
1783
|
-
*/
|
|
1784
|
-
async sendText(text, cwd = process.cwd(), fullAuto = true) {
|
|
1785
|
-
if (!this.isInstalled()) {
|
|
1786
|
-
return {
|
|
1787
|
-
success: false,
|
|
1788
|
-
error: 'Cline CLI is not installed. Run install() first.',
|
|
1789
|
-
needsInstall: true
|
|
1790
|
-
};
|
|
1791
|
-
}
|
|
1792
|
-
|
|
1793
|
-
try {
|
|
1794
|
-
const args = ['task', text, '--workspace', cwd];
|
|
1795
|
-
if (fullAuto) {
|
|
1796
|
-
args.push('--full-auto'); // Auto-approve changes
|
|
1797
|
-
}
|
|
1798
|
-
|
|
1799
|
-
this.logger.log(`Executing: cline-cli task "${text}" --workspace ${cwd} ${fullAuto ? '--full-auto' : ''}`);
|
|
1800
|
-
this.logger.log(`Working directory: ${cwd}`);
|
|
1801
|
-
|
|
1802
|
-
// Set up environment with API key based on configured provider
|
|
1803
|
-
const env = { ...process.env };
|
|
1804
|
-
|
|
1805
|
-
// Check which provider is configured and ensure API key is set
|
|
1806
|
-
try {
|
|
1807
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1808
|
-
if (fs.existsSync(configPath)) {
|
|
1809
|
-
let config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1810
|
-
const apiProvider = config.globalState?.apiProvider;
|
|
1811
|
-
|
|
1812
|
-
if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
1813
|
-
// Ollama - uses openai-native provider with local endpoint
|
|
1814
|
-
// Cline CLI needs OPENAI_API_KEY env var even for Ollama (it validates the format)
|
|
1815
|
-
env.OPENAI_API_KEY = 'sk-ollama-placeholder-key-for-local-api';
|
|
1816
|
-
env.OPENAI_BASE_URL = 'http://localhost:11434/v1';
|
|
1817
|
-
} else if (apiProvider === 'openrouter') {
|
|
1818
|
-
const apiKey = config.globalState?.openRouterApiKey || this.getSavedOpenRouterKey() || process.env.OPENROUTER_API_KEY;
|
|
1819
|
-
if (apiKey) {
|
|
1820
|
-
env.OPENROUTER_API_KEY = apiKey;
|
|
1821
|
-
env.OPENAI_API_KEY = apiKey; // OpenRouter uses OpenAI-compatible API
|
|
1822
|
-
}
|
|
1823
|
-
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl?.includes('generativelanguage.googleapis.com')) {
|
|
1824
|
-
const apiKey = config.globalState?.openAiApiKey || this.getSavedGeminiKey() || process.env.GEMINI_API_KEY;
|
|
1825
|
-
if (apiKey) {
|
|
1826
|
-
env.OPENAI_API_KEY = apiKey;
|
|
1827
|
-
}
|
|
1828
|
-
} else {
|
|
1829
|
-
// Default to Anthropic
|
|
1830
|
-
const apiKey = config.globalState?.anthropicApiKey || this.getSavedAnthropicKey() || process.env.ANTHROPIC_API_KEY;
|
|
1831
|
-
if (apiKey) {
|
|
1832
|
-
env.ANTHROPIC_API_KEY = apiKey;
|
|
1833
|
-
}
|
|
1834
|
-
}
|
|
1835
|
-
}
|
|
1836
|
-
} catch (error) {
|
|
1837
|
-
this.logger.warn('Warning: Failed to load API key from config:', error.message);
|
|
1838
|
-
}
|
|
1839
|
-
|
|
1840
|
-
return new Promise((resolve) => {
|
|
1841
|
-
const proc = spawn('cline-cli', args, {
|
|
1842
|
-
stdio: ['pipe', 'pipe', 'pipe'],
|
|
1843
|
-
env: env,
|
|
1844
|
-
cwd: cwd
|
|
1845
|
-
// Do NOT use shell: true - it causes escaping issues with special characters
|
|
1846
|
-
});
|
|
1847
|
-
|
|
1848
|
-
let stdout = '';
|
|
1849
|
-
let stderr = '';
|
|
1850
|
-
|
|
1851
|
-
proc.stdout.on('data', (data) => {
|
|
1852
|
-
const chunk = data.toString();
|
|
1853
|
-
stdout += chunk;
|
|
1854
|
-
this.logger.log('[Cline]', chunk);
|
|
1855
|
-
});
|
|
1856
|
-
|
|
1857
|
-
proc.stderr.on('data', (data) => {
|
|
1858
|
-
const chunk = data.toString();
|
|
1859
|
-
stderr += chunk;
|
|
1860
|
-
this.logger.log('[Cline Error]', chunk);
|
|
1861
|
-
});
|
|
1862
|
-
|
|
1863
|
-
proc.on('close', (code) => {
|
|
1864
|
-
if (code === 0) {
|
|
1865
|
-
resolve({
|
|
1866
|
-
success: true,
|
|
1867
|
-
output: stdout,
|
|
1868
|
-
stderr: stderr,
|
|
1869
|
-
exitCode: code
|
|
1870
|
-
});
|
|
1871
|
-
} else {
|
|
1872
|
-
resolve({
|
|
1873
|
-
success: false,
|
|
1874
|
-
output: stdout,
|
|
1875
|
-
error: stderr || `Process exited with code ${code}`,
|
|
1876
|
-
exitCode: code
|
|
1877
|
-
});
|
|
1878
|
-
}
|
|
1879
|
-
});
|
|
1880
|
-
|
|
1881
|
-
proc.on('error', (error) => {
|
|
1882
|
-
resolve({
|
|
1883
|
-
success: false,
|
|
1884
|
-
error: error.message,
|
|
1885
|
-
exitCode: -1
|
|
1886
|
-
});
|
|
1887
|
-
});
|
|
1888
|
-
});
|
|
1889
|
-
} catch (error) {
|
|
1890
|
-
return {
|
|
1891
|
-
success: false,
|
|
1892
|
-
error: error.message
|
|
1893
|
-
};
|
|
1894
|
-
}
|
|
1895
|
-
}
|
|
1896
|
-
|
|
1897
|
-
/**
|
|
1898
|
-
* Run Cline CLI in background and return process
|
|
1899
|
-
* @param {string} text - The instruction text
|
|
1900
|
-
* @param {string} cwd - Working directory
|
|
1901
|
-
* @param {Function} onOutput - Callback for stdout chunks
|
|
1902
|
-
* @param {Function} onError - Callback for stderr chunks
|
|
1903
|
-
* @returns {ChildProcess} The spawned process
|
|
1904
|
-
*/
|
|
1905
|
-
runInBackground(text, cwd = process.cwd(), onOutput, onError) {
|
|
1906
|
-
// Use Cline CLI with --full-auto for actual code execution
|
|
1907
|
-
// The direct Ollama API approach doesn't execute code, only streams text
|
|
1908
|
-
// Cline CLI with proper Ollama config should work now
|
|
1909
|
-
const args = ['task', text, '--workspace', cwd, '--full-auto'];
|
|
1910
|
-
|
|
1911
|
-
// Set up environment with API key based on configured provider
|
|
1912
|
-
const env = { ...process.env };
|
|
1913
|
-
|
|
1914
|
-
// Check which provider is configured and ensure API key is in config file
|
|
1915
|
-
try {
|
|
1916
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1917
|
-
if (fs.existsSync(configPath)) {
|
|
1918
|
-
let config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1919
|
-
const apiProvider = config.globalState?.apiProvider;
|
|
1920
|
-
|
|
1921
|
-
// Log current configuration for debugging
|
|
1922
|
-
if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
1923
|
-
const modelId = config.globalState?.openAiModelId || config.globalState?.apiModelId;
|
|
1924
|
-
this.logger.log(`✓ Cline CLI config: Provider=${apiProvider}, Model=${modelId}, BaseURL=${config.globalState?.openAiBaseUrl}`);
|
|
1925
|
-
}
|
|
1926
|
-
|
|
1927
|
-
if (apiProvider === 'openrouter') {
|
|
1928
|
-
// Ensure key is in config file (sync from saved file if needed)
|
|
1929
|
-
if (!config.globalState?.openRouterApiKey) {
|
|
1930
|
-
const savedKey = this.getSavedOpenRouterKey();
|
|
1931
|
-
if (savedKey && savedKey.trim().length > 0) {
|
|
1932
|
-
config.globalState = config.globalState || {};
|
|
1933
|
-
config.globalState.openRouterApiKey = savedKey;
|
|
1934
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1935
|
-
this.logger.log('✓ Synced OpenRouter API key to Cline CLI config');
|
|
1936
|
-
}
|
|
1937
|
-
}
|
|
1938
|
-
|
|
1939
|
-
// Set environment variables - OpenRouter uses OpenAI-compatible API
|
|
1940
|
-
const apiKey = config.globalState?.openRouterApiKey || this.getSavedOpenRouterKey() || process.env.OPENROUTER_API_KEY;
|
|
1941
|
-
if (apiKey) {
|
|
1942
|
-
env.OPENROUTER_API_KEY = apiKey;
|
|
1943
|
-
env.OPENAI_API_KEY = apiKey; // OpenRouter uses OpenAI-compatible API
|
|
1944
|
-
} else {
|
|
1945
|
-
this.logger.warn('Warning: OPENROUTER_API_KEY not set');
|
|
1946
|
-
}
|
|
1947
|
-
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl?.includes('generativelanguage.googleapis.com')) {
|
|
1948
|
-
// Gemini via OpenAI-compatible endpoint
|
|
1949
|
-
// Ensure key is in config file (sync from saved file if needed)
|
|
1950
|
-
if (!config.globalState?.openAiApiKey) {
|
|
1951
|
-
const savedKey = this.getSavedGeminiKey();
|
|
1952
|
-
if (savedKey && savedKey.trim().length > 0) {
|
|
1953
|
-
config.globalState = config.globalState || {};
|
|
1954
|
-
config.globalState.openAiApiKey = savedKey;
|
|
1955
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1956
|
-
this.logger.log('✓ Synced Gemini API key to Cline CLI config');
|
|
1957
|
-
}
|
|
1958
|
-
}
|
|
1959
|
-
|
|
1960
|
-
// Set OPENAI_API_KEY environment variable - Cline CLI with openai-native provider expects this
|
|
1961
|
-
const apiKey = config.globalState?.openAiApiKey || this.getSavedGeminiKey() || process.env.GEMINI_API_KEY;
|
|
1962
|
-
if (apiKey) {
|
|
1963
|
-
env.OPENAI_API_KEY = apiKey; // CRITICAL: Cline CLI needs this for openai-native provider
|
|
1964
|
-
// Also ensure it's definitely in the config file
|
|
1965
|
-
if (!config.globalState?.openAiApiKey) {
|
|
1966
|
-
config.globalState.openAiApiKey = apiKey;
|
|
1967
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1968
|
-
this.logger.log('✓ Synced Gemini API key to Cline CLI config');
|
|
1969
|
-
}
|
|
1970
|
-
} else {
|
|
1971
|
-
this.logger.warn('Warning: Gemini API key not set');
|
|
1972
|
-
}
|
|
1973
|
-
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
1974
|
-
// Ollama - uses openai-native provider with local endpoint
|
|
1975
|
-
// Cline CLI needs OPENAI_API_KEY env var even for Ollama (it validates the format)
|
|
1976
|
-
// Also ensure OPENAI_BASE_URL is set for Ollama
|
|
1977
|
-
env.OPENAI_API_KEY = 'sk-ollama-placeholder-key-for-local-api';
|
|
1978
|
-
env.OPENAI_BASE_URL = 'http://localhost:11434/v1';
|
|
1979
|
-
|
|
1980
|
-
this.logger.log('✓ Set OPENAI_API_KEY environment variable for Ollama');
|
|
1981
|
-
|
|
1982
|
-
// Verify Ollama service is running and model is accessible
|
|
1983
|
-
this.logger.log('✓ Verifying Ollama service is running...');
|
|
1984
|
-
const { execSync } = require('child_process');
|
|
1985
|
-
try {
|
|
1986
|
-
// Test basic connectivity
|
|
1987
|
-
execSync('curl -s http://localhost:11434/api/tags > /dev/null 2>&1', { timeout: 2000 });
|
|
1988
|
-
this.logger.log('✓ Ollama service is running');
|
|
1989
|
-
|
|
1990
|
-
// Test if the configured model is accessible
|
|
1991
|
-
const modelId = config.globalState?.openAiModelId || config.globalState?.apiModelId;
|
|
1992
|
-
if (modelId) {
|
|
1993
|
-
try {
|
|
1994
|
-
// Test if model can be accessed via API
|
|
1995
|
-
const testResponse = execSync(`curl -s -X POST http://localhost:11434/api/generate -d '{"model":"${modelId}","prompt":"test","stream":false}' -H "Content-Type: application/json"`, { timeout: 3000 });
|
|
1996
|
-
this.logger.log(`✓ Model ${modelId} is accessible`);
|
|
1997
|
-
} catch (modelError) {
|
|
1998
|
-
this.logger.warn(`⚠ Model ${modelId} may not be accessible or may need to be pulled`);
|
|
1999
|
-
this.logger.warn(` Try: ollama pull ${modelId}`);
|
|
2000
|
-
}
|
|
2001
|
-
}
|
|
2002
|
-
} catch (error) {
|
|
2003
|
-
this.logger.warn('⚠ Ollama service may not be running - Cline CLI may fail');
|
|
2004
|
-
this.logger.warn(' Start it with: ollama serve');
|
|
2005
|
-
this.logger.warn(' Or launch Ollama.app from /Applications');
|
|
2006
|
-
}
|
|
2007
|
-
} else {
|
|
2008
|
-
// Default to Anthropic for backwards compatibility
|
|
2009
|
-
// Ensure key is in config file (sync from saved file if needed)
|
|
2010
|
-
if (!config.globalState?.anthropicApiKey) {
|
|
2011
|
-
const savedKey = this.getSavedAnthropicKey();
|
|
2012
|
-
if (savedKey && savedKey.trim().length > 0) {
|
|
2013
|
-
config.globalState = config.globalState || {};
|
|
2014
|
-
config.globalState.anthropicApiKey = savedKey;
|
|
2015
|
-
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
2016
|
-
this.logger.log('✓ Synced Anthropic API key to Cline CLI config');
|
|
2017
|
-
}
|
|
2018
|
-
}
|
|
2019
|
-
|
|
2020
|
-
// Set environment variable as fallback
|
|
2021
|
-
const apiKey = config.globalState?.anthropicApiKey || this.getSavedAnthropicKey() || process.env.ANTHROPIC_API_KEY;
|
|
2022
|
-
if (apiKey) {
|
|
2023
|
-
env.ANTHROPIC_API_KEY = apiKey;
|
|
2024
|
-
} else {
|
|
2025
|
-
this.logger.warn('Warning: ANTHROPIC_API_KEY not set');
|
|
2026
|
-
}
|
|
2027
|
-
}
|
|
2028
|
-
} else {
|
|
2029
|
-
// No config file, try Anthropic for backwards compatibility
|
|
2030
|
-
const apiKey = this.getSavedAnthropicKey();
|
|
2031
|
-
if (apiKey) {
|
|
2032
|
-
env.ANTHROPIC_API_KEY = apiKey;
|
|
2033
|
-
} else if (!env.ANTHROPIC_API_KEY) {
|
|
2034
|
-
this.logger.warn('Warning: ANTHROPIC_API_KEY not set');
|
|
2035
|
-
}
|
|
2036
|
-
}
|
|
2037
|
-
} catch (error) {
|
|
2038
|
-
this.logger.warn('Warning: Failed to load API key from config:', error.message);
|
|
2039
|
-
}
|
|
2040
|
-
|
|
2041
|
-
// Add error handler for spawn failures
|
|
2042
|
-
let proc;
|
|
2043
|
-
try {
|
|
2044
|
-
proc = spawn('cline-cli', args, {
|
|
2045
|
-
stdio: ['pipe', 'pipe', 'pipe'],
|
|
2046
|
-
env: env,
|
|
2047
|
-
cwd: cwd
|
|
2048
|
-
// DO NOT use shell: true - it causes escaping issues with special characters
|
|
2049
|
-
});
|
|
2050
|
-
|
|
2051
|
-
// Handle spawn errors (e.g., cline-cli not found)
|
|
2052
|
-
proc.on('error', (spawnError) => {
|
|
2053
|
-
if (onError) {
|
|
2054
|
-
onError(`Failed to spawn Cline CLI: ${spawnError.message}\n`);
|
|
2055
|
-
if (spawnError.code === 'ENOENT') {
|
|
2056
|
-
onError('Cline CLI is not installed or not in PATH. Install with: npm install -g @yaegaki/cline-cli\n');
|
|
2057
|
-
}
|
|
2058
|
-
}
|
|
2059
|
-
});
|
|
2060
|
-
|
|
2061
|
-
if (onOutput) {
|
|
2062
|
-
proc.stdout.on('data', (data) => {
|
|
2063
|
-
onOutput(data.toString());
|
|
2064
|
-
});
|
|
2065
|
-
}
|
|
2066
|
-
|
|
2067
|
-
if (onError) {
|
|
2068
|
-
proc.stderr.on('data', (data) => {
|
|
2069
|
-
onError(data.toString());
|
|
2070
|
-
});
|
|
2071
|
-
}
|
|
2072
|
-
} catch (spawnError) {
|
|
2073
|
-
// If spawn itself fails (shouldn't happen, but handle it)
|
|
2074
|
-
if (onError) {
|
|
2075
|
-
onError(`Failed to start Cline CLI: ${spawnError.message}\n`);
|
|
2076
|
-
}
|
|
2077
|
-
// Return a mock process that will fail immediately
|
|
2078
|
-
const { EventEmitter } = require('events');
|
|
2079
|
-
const mockProc = new EventEmitter();
|
|
2080
|
-
mockProc.pid = null;
|
|
2081
|
-
mockProc.kill = () => {};
|
|
2082
|
-
mockProc.on = () => {};
|
|
2083
|
-
mockProc.stdout = { on: () => {} };
|
|
2084
|
-
mockProc.stderr = { on: () => {} };
|
|
2085
|
-
setTimeout(() => {
|
|
2086
|
-
mockProc.emit('close', 1);
|
|
2087
|
-
}, 0);
|
|
2088
|
-
return mockProc;
|
|
2089
|
-
}
|
|
2090
|
-
|
|
2091
|
-
return proc;
|
|
2092
|
-
}
|
|
2093
|
-
|
|
2094
|
-
/**
|
|
2095
|
-
* Run Ollama directly via HTTP API (bypasses Cline CLI)
|
|
2096
|
-
* This is used when Cline CLI has issues with Ollama API key validation
|
|
2097
|
-
* @param {string} text - The instruction text
|
|
2098
|
-
* @param {string} cwd - Working directory
|
|
2099
|
-
* @param {Function} onOutput - Callback for response chunks
|
|
2100
|
-
* @param {Function} onError - Callback for errors
|
|
2101
|
-
* @returns {Object} Mock process object with kill() method
|
|
2102
|
-
*/
|
|
2103
|
-
async runOllamaDirectly(text, cwd = process.cwd(), onOutput, onError) {
|
|
2104
|
-
const https = require('https');
|
|
2105
|
-
const http = require('http');
|
|
2106
|
-
|
|
2107
|
-
// Get configured model from Cline CLI config
|
|
2108
|
-
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
2109
|
-
let modelId = 'deepseek-coder:33b'; // default
|
|
2110
|
-
|
|
2111
|
-
try {
|
|
2112
|
-
if (fs.existsSync(configPath)) {
|
|
2113
|
-
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
2114
|
-
modelId = config.globalState?.openAiModelId || config.globalState?.apiModelId || modelId;
|
|
2115
|
-
}
|
|
2116
|
-
} catch (error) {
|
|
2117
|
-
this.logger.warn('Could not read model from config, using default:', modelId);
|
|
2118
|
-
}
|
|
2119
|
-
|
|
2120
|
-
// Create system prompt that mimics Cline's behavior with tool capabilities
|
|
2121
|
-
const systemPrompt = `You are Cline, an autonomous AI coding assistant with FULL access to the filesystem and command execution.
|
|
2122
|
-
|
|
2123
|
-
CAPABILITIES - You CAN and MUST:
|
|
2124
|
-
✓ Read any file in the project
|
|
2125
|
-
✓ Write/edit files to implement features and fix bugs
|
|
2126
|
-
✓ Execute bash commands to test changes
|
|
2127
|
-
✓ Install packages and dependencies
|
|
2128
|
-
✓ Run tests and verify functionality
|
|
2129
|
-
✓ Take screenshots to verify UI changes
|
|
2130
|
-
|
|
2131
|
-
WORKING DIRECTORY: ${cwd}
|
|
2132
|
-
|
|
2133
|
-
TASK WORKFLOW - Follow ALL stages in order:
|
|
2134
|
-
1. PREPARE - Read relevant files, understand requirements, review code structure
|
|
2135
|
-
2. ACT - Implement the actual functionality with complete, working code
|
|
2136
|
-
3. CLEAN UP - Apply DRY principles, lint, remove temp files, fix any issues
|
|
2137
|
-
4. VERIFY - Test implementation, run automation, take screenshots to confirm it works
|
|
2138
|
-
5. DONE - Update status ONLY when everything is working perfectly
|
|
2139
|
-
|
|
2140
|
-
CRITICAL RULES:
|
|
2141
|
-
❌ NO placeholder code, NO "// rest of code" comments, NO incomplete implementations
|
|
2142
|
-
❌ NO saying "I don't have access to files" - YOU DO, just read/write them
|
|
2143
|
-
✓ ALWAYS provide COMPLETE, FUNCTIONAL code
|
|
2144
|
-
✓ ACTUALLY run commands and verify they work
|
|
2145
|
-
✓ UPDATE status in REQUIREMENTS file at EACH stage
|
|
2146
|
-
✓ NEVER mark DONE until you've VERIFIED everything works
|
|
2147
|
-
|
|
2148
|
-
RESPONSE FORMAT:
|
|
2149
|
-
To read a file: "Let me read <filename>..." then show the content
|
|
2150
|
-
To write a file: "I'll update <filename>..." then show the complete new content
|
|
2151
|
-
To run command: "Running: <command>..." then show the output
|
|
2152
|
-
To verify: "Testing the changes..." then show test results
|
|
2153
|
-
|
|
2154
|
-
CURRENT TASK: ${text}
|
|
2155
|
-
|
|
2156
|
-
Begin with PREPARE stage. Read necessary files and understand the requirements before coding.`;
|
|
2157
|
-
|
|
2158
|
-
const requestBody = JSON.stringify({
|
|
2159
|
-
model: modelId,
|
|
2160
|
-
messages: [
|
|
2161
|
-
{ role: 'system', content: systemPrompt },
|
|
2162
|
-
{ role: 'user', content: text }
|
|
2163
|
-
],
|
|
2164
|
-
stream: true,
|
|
2165
|
-
temperature: 0.7,
|
|
2166
|
-
max_tokens: 4000
|
|
2167
|
-
});
|
|
2168
|
-
|
|
2169
|
-
return new Promise((resolve, reject) => {
|
|
2170
|
-
let aborted = false;
|
|
2171
|
-
let response = '';
|
|
2172
|
-
|
|
2173
|
-
const options = {
|
|
2174
|
-
hostname: 'localhost',
|
|
2175
|
-
port: 11434,
|
|
2176
|
-
path: '/v1/chat/completions',
|
|
2177
|
-
method: 'POST',
|
|
2178
|
-
headers: {
|
|
2179
|
-
'Content-Type': 'application/json',
|
|
2180
|
-
'Content-Length': Buffer.byteLength(requestBody)
|
|
2181
|
-
}
|
|
2182
|
-
};
|
|
2183
|
-
|
|
2184
|
-
const req = http.request(options, (res) => {
|
|
2185
|
-
res.on('data', (chunk) => {
|
|
2186
|
-
if (aborted) return;
|
|
2187
|
-
|
|
2188
|
-
const lines = chunk.toString().split('\n').filter(line => line.trim());
|
|
2189
|
-
|
|
2190
|
-
for (const line of lines) {
|
|
2191
|
-
if (line.startsWith('data: ')) {
|
|
2192
|
-
const data = line.slice(6);
|
|
2193
|
-
if (data === '[DONE]') continue;
|
|
2194
|
-
|
|
2195
|
-
try {
|
|
2196
|
-
const parsed = JSON.parse(data);
|
|
2197
|
-
const content = parsed.choices?.[0]?.delta?.content;
|
|
2198
|
-
if (content && onOutput) {
|
|
2199
|
-
response += content;
|
|
2200
|
-
onOutput(content);
|
|
2201
|
-
}
|
|
2202
|
-
} catch (e) {
|
|
2203
|
-
// Ignore parse errors for streaming chunks
|
|
2204
|
-
}
|
|
2205
|
-
}
|
|
2206
|
-
}
|
|
2207
|
-
});
|
|
2208
|
-
|
|
2209
|
-
res.on('end', () => {
|
|
2210
|
-
if (!aborted) {
|
|
2211
|
-
resolve({
|
|
2212
|
-
killed: false,
|
|
2213
|
-
kill: () => { aborted = true; },
|
|
2214
|
-
on: () => {},
|
|
2215
|
-
stdout: { on: () => {} },
|
|
2216
|
-
stderr: { on: () => {} }
|
|
2217
|
-
});
|
|
2218
|
-
}
|
|
2219
|
-
});
|
|
2220
|
-
});
|
|
2221
|
-
|
|
2222
|
-
req.on('error', (error) => {
|
|
2223
|
-
if (onError) {
|
|
2224
|
-
onError(`Ollama API error: ${error.message}\n`);
|
|
2225
|
-
}
|
|
2226
|
-
reject(error);
|
|
2227
|
-
});
|
|
2228
|
-
|
|
2229
|
-
req.write(requestBody);
|
|
2230
|
-
req.end();
|
|
2231
|
-
|
|
2232
|
-
// Return mock process object immediately
|
|
2233
|
-
resolve({
|
|
2234
|
-
killed: false,
|
|
2235
|
-
kill: () => {
|
|
2236
|
-
aborted = true;
|
|
2237
|
-
req.destroy();
|
|
2238
|
-
},
|
|
2239
|
-
on: (event, handler) => {
|
|
2240
|
-
if (event === 'close') {
|
|
2241
|
-
// Call handler when request completes
|
|
2242
|
-
req.on('close', () => handler(0));
|
|
2243
|
-
}
|
|
2244
|
-
},
|
|
2245
|
-
stdout: { on: () => {} },
|
|
2246
|
-
stderr: { on: () => {} }
|
|
2247
|
-
});
|
|
2248
|
-
});
|
|
2249
|
-
}
|
|
2250
|
-
}
|
|
2251
|
-
|
|
2252
|
-
module.exports = { ClineCLIManager };
|
|
1
|
+
// Cline CLI Manager - handles Cline CLI installation and execution
|
|
2
|
+
const { execSync, spawn } = require('child_process');
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
const os = require('os');
|
|
6
|
+
|
|
7
|
+
class ClineCLIManager {
|
|
8
|
+
constructor() {
|
|
9
|
+
this.logger = console;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Check if Cline CLI is installed
|
|
14
|
+
*/
|
|
15
|
+
isInstalled() {
|
|
16
|
+
try {
|
|
17
|
+
execSync('which cline-cli', { stdio: 'pipe' });
|
|
18
|
+
return true;
|
|
19
|
+
} catch {
|
|
20
|
+
return false;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Get Cline CLI version
|
|
26
|
+
*/
|
|
27
|
+
getVersion() {
|
|
28
|
+
try {
|
|
29
|
+
const version = execSync('cline-cli --version', { encoding: 'utf8', stdio: 'pipe' });
|
|
30
|
+
return version.trim();
|
|
31
|
+
} catch {
|
|
32
|
+
return null;
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Validate an API key by making a test API call
|
|
38
|
+
* Returns { valid: boolean, error?: string }
|
|
39
|
+
*/
|
|
40
|
+
async validateApiKey(provider, apiKey) {
|
|
41
|
+
if (!apiKey || apiKey.trim().length === 0) {
|
|
42
|
+
return { valid: false, error: 'API key is empty' };
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
try {
|
|
46
|
+
if (provider === 'openrouter') {
|
|
47
|
+
// Validate OpenRouter key by making a test API call
|
|
48
|
+
// This catches both invalid keys AND rate limit issues
|
|
49
|
+
const https = require('https');
|
|
50
|
+
return new Promise((resolve) => {
|
|
51
|
+
const testPayload = JSON.stringify({
|
|
52
|
+
model: 'meta-llama/llama-3.3-70b-instruct:free',
|
|
53
|
+
messages: [{ role: 'user', content: 'hi' }],
|
|
54
|
+
max_tokens: 1
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
const req = https.request({
|
|
58
|
+
hostname: 'openrouter.ai',
|
|
59
|
+
path: '/api/v1/chat/completions',
|
|
60
|
+
method: 'POST',
|
|
61
|
+
headers: {
|
|
62
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
63
|
+
'Content-Type': 'application/json',
|
|
64
|
+
'Content-Length': Buffer.byteLength(testPayload),
|
|
65
|
+
'User-Agent': 'VibeCodingMachine/1.0'
|
|
66
|
+
},
|
|
67
|
+
timeout: 5000 // 5 second timeout
|
|
68
|
+
}, (res) => {
|
|
69
|
+
let data = '';
|
|
70
|
+
res.on('data', (chunk) => { data += chunk; });
|
|
71
|
+
res.on('end', () => {
|
|
72
|
+
if (res.statusCode === 200) {
|
|
73
|
+
resolve({ valid: true });
|
|
74
|
+
} else if (res.statusCode === 403) {
|
|
75
|
+
// Check if it's a rate limit error
|
|
76
|
+
try {
|
|
77
|
+
const errorData = JSON.parse(data);
|
|
78
|
+
if (errorData.error && errorData.error.message) {
|
|
79
|
+
const errorMsg = errorData.error.message;
|
|
80
|
+
if (errorMsg.includes('limit exceeded')) {
|
|
81
|
+
resolve({
|
|
82
|
+
valid: false,
|
|
83
|
+
error: 'API key rate limit exceeded. Please create a new free API key at https://openrouter.ai/keys',
|
|
84
|
+
rateLimited: true
|
|
85
|
+
});
|
|
86
|
+
return;
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
} catch {}
|
|
90
|
+
resolve({ valid: false, error: 'Invalid or expired API key' });
|
|
91
|
+
} else if (res.statusCode === 401) {
|
|
92
|
+
resolve({ valid: false, error: 'Invalid API key' });
|
|
93
|
+
} else {
|
|
94
|
+
resolve({ valid: false, error: `API returned status ${res.statusCode}` });
|
|
95
|
+
}
|
|
96
|
+
});
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
req.on('error', (error) => {
|
|
100
|
+
// Network errors don't necessarily mean invalid key
|
|
101
|
+
// Allow it but warn
|
|
102
|
+
resolve({ valid: true, warning: `Could not validate key: ${error.message}` });
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
req.on('timeout', () => {
|
|
106
|
+
req.destroy();
|
|
107
|
+
resolve({ valid: true, warning: 'Validation timed out, but key format looks correct' });
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
req.write(testPayload);
|
|
111
|
+
req.end();
|
|
112
|
+
});
|
|
113
|
+
} else if (provider === 'anthropic') {
|
|
114
|
+
// Validate Anthropic key by making a minimal API call
|
|
115
|
+
const https = require('https');
|
|
116
|
+
return new Promise((resolve) => {
|
|
117
|
+
const payload = JSON.stringify({
|
|
118
|
+
model: 'claude-3-haiku-20240307',
|
|
119
|
+
max_tokens: 1,
|
|
120
|
+
messages: [{ role: 'user', content: 'test' }]
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
const req = https.request({
|
|
124
|
+
hostname: 'api.anthropic.com',
|
|
125
|
+
path: '/v1/messages',
|
|
126
|
+
method: 'POST',
|
|
127
|
+
headers: {
|
|
128
|
+
'x-api-key': apiKey,
|
|
129
|
+
'anthropic-version': '2023-06-01',
|
|
130
|
+
'Content-Type': 'application/json',
|
|
131
|
+
'Content-Length': Buffer.byteLength(payload),
|
|
132
|
+
'User-Agent': 'VibeCodingMachine/1.0'
|
|
133
|
+
},
|
|
134
|
+
timeout: 5000 // 5 second timeout
|
|
135
|
+
}, (res) => {
|
|
136
|
+
let data = '';
|
|
137
|
+
res.on('data', (chunk) => { data += chunk; });
|
|
138
|
+
res.on('end', () => {
|
|
139
|
+
if (res.statusCode === 200 || res.statusCode === 400) {
|
|
140
|
+
// 200 = success, 400 = bad request but key is valid (wrong params)
|
|
141
|
+
resolve({ valid: true });
|
|
142
|
+
} else if (res.statusCode === 401 || res.statusCode === 403) {
|
|
143
|
+
resolve({ valid: false, error: 'Invalid or expired API key' });
|
|
144
|
+
} else {
|
|
145
|
+
resolve({ valid: false, error: `API returned status ${res.statusCode}` });
|
|
146
|
+
}
|
|
147
|
+
});
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
req.on('error', (error) => {
|
|
151
|
+
// Network errors don't necessarily mean invalid key
|
|
152
|
+
resolve({ valid: true, warning: `Could not validate key: ${error.message}` });
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
req.on('timeout', () => {
|
|
156
|
+
req.destroy();
|
|
157
|
+
resolve({ valid: true, warning: 'Validation timed out, but key format looks correct' });
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
req.write(payload);
|
|
161
|
+
req.end();
|
|
162
|
+
});
|
|
163
|
+
} else if (provider === 'gemini') {
|
|
164
|
+
// Validate Google Gemini key by making a test API call
|
|
165
|
+
const https = require('https');
|
|
166
|
+
return new Promise((resolve) => {
|
|
167
|
+
const payload = JSON.stringify({
|
|
168
|
+
contents: [{
|
|
169
|
+
parts: [{ text: 'hi' }]
|
|
170
|
+
}]
|
|
171
|
+
});
|
|
172
|
+
|
|
173
|
+
const req = https.request({
|
|
174
|
+
hostname: 'generativelanguage.googleapis.com',
|
|
175
|
+
path: `/v1beta/models/gemini-2.0-flash-exp:generateContent?key=${apiKey}`,
|
|
176
|
+
method: 'POST',
|
|
177
|
+
headers: {
|
|
178
|
+
'Content-Type': 'application/json',
|
|
179
|
+
'Content-Length': Buffer.byteLength(payload)
|
|
180
|
+
},
|
|
181
|
+
timeout: 5000
|
|
182
|
+
}, (res) => {
|
|
183
|
+
let data = '';
|
|
184
|
+
res.on('data', (chunk) => { data += chunk; });
|
|
185
|
+
res.on('end', () => {
|
|
186
|
+
if (res.statusCode === 200) {
|
|
187
|
+
resolve({ valid: true });
|
|
188
|
+
} else if (res.statusCode === 403 || res.statusCode === 401) {
|
|
189
|
+
resolve({ valid: false, error: 'Invalid or expired API key' });
|
|
190
|
+
} else {
|
|
191
|
+
resolve({ valid: false, error: `API returned status ${res.statusCode}` });
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
req.on('error', (error) => {
|
|
197
|
+
resolve({ valid: true, warning: `Could not validate key: ${error.message}` });
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
req.on('timeout', () => {
|
|
201
|
+
req.destroy();
|
|
202
|
+
resolve({ valid: true, warning: 'Validation timed out, but key format looks correct' });
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
req.write(payload);
|
|
206
|
+
req.end();
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
return { valid: false, error: `Unknown provider: ${provider}` };
|
|
211
|
+
} catch (error) {
|
|
212
|
+
return { valid: false, error: `Validation error: ${error.message}` };
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
/**
|
|
217
|
+
* Check if Cline CLI is configured with any API provider
|
|
218
|
+
*/
|
|
219
|
+
isConfigured() {
|
|
220
|
+
try {
|
|
221
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
222
|
+
if (!fs.existsSync(configPath)) {
|
|
223
|
+
return false;
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
227
|
+
const apiProvider = config.globalState?.apiProvider;
|
|
228
|
+
const apiModelId = config.globalState?.apiModelId;
|
|
229
|
+
|
|
230
|
+
if (!apiProvider || !apiModelId || apiModelId === '') {
|
|
231
|
+
return false;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
// Check if API key is available for the configured provider (and not empty)
|
|
235
|
+
// IMPORTANT: Cline CLI needs the key in the config file, not just in saved file
|
|
236
|
+
if (apiProvider === 'anthropic') {
|
|
237
|
+
let configKey = config.globalState?.anthropicApiKey;
|
|
238
|
+
const envKey = process.env.ANTHROPIC_API_KEY;
|
|
239
|
+
const savedKey = this.getSavedAnthropicKey();
|
|
240
|
+
|
|
241
|
+
// Check for invalid key marker (prevents re-syncing invalid keys)
|
|
242
|
+
const invalidKeyMarker = path.join(os.homedir(), '.allnightai', '.anthropic-key-invalid');
|
|
243
|
+
const isMarkedInvalid = fs.existsSync(invalidKeyMarker);
|
|
244
|
+
|
|
245
|
+
// If config file doesn't have key but saved file does, sync it to config
|
|
246
|
+
// BUT only if the key hasn't been marked as invalid
|
|
247
|
+
if (!configKey && savedKey && savedKey.trim().length > 0 && !isMarkedInvalid) {
|
|
248
|
+
this.logger.warn('Anthropic API key missing from config file but found in saved file - syncing...');
|
|
249
|
+
try {
|
|
250
|
+
config.globalState.anthropicApiKey = savedKey;
|
|
251
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
252
|
+
this.logger.log('✓ Synced API key from saved file to Cline CLI config');
|
|
253
|
+
// Update configKey to the newly synced value
|
|
254
|
+
configKey = savedKey;
|
|
255
|
+
} catch (error) {
|
|
256
|
+
this.logger.warn('Failed to sync API key to config:', error.message);
|
|
257
|
+
}
|
|
258
|
+
} else if (isMarkedInvalid) {
|
|
259
|
+
// Key was marked as invalid - don't use it, return false to force reconfiguration
|
|
260
|
+
this.logger.warn('Anthropic API key was marked as invalid - reconfiguration required');
|
|
261
|
+
return false;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Check all sources, but ensure the key is non-empty
|
|
265
|
+
const anthropicApiKey = configKey || envKey || savedKey;
|
|
266
|
+
const isValid = anthropicApiKey && typeof anthropicApiKey === 'string' && anthropicApiKey.trim().length > 0;
|
|
267
|
+
|
|
268
|
+
if (!isValid) {
|
|
269
|
+
this.logger.warn('Anthropic API key found but is empty or invalid');
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
return isValid;
|
|
273
|
+
} else if (apiProvider === 'openrouter') {
|
|
274
|
+
let configKey = config.globalState?.openRouterApiKey;
|
|
275
|
+
const envKey = process.env.OPENROUTER_API_KEY;
|
|
276
|
+
const savedKey = this.getSavedOpenRouterKey();
|
|
277
|
+
|
|
278
|
+
// Check for invalid key marker (prevents re-syncing invalid keys)
|
|
279
|
+
const invalidKeyMarker = path.join(os.homedir(), '.allnightai', '.openrouter-key-invalid');
|
|
280
|
+
const isMarkedInvalid = fs.existsSync(invalidKeyMarker);
|
|
281
|
+
|
|
282
|
+
// If config file doesn't have key but saved file does, sync it to config
|
|
283
|
+
// BUT only if the key hasn't been marked as invalid
|
|
284
|
+
if (!configKey && savedKey && savedKey.trim().length > 0 && !isMarkedInvalid) {
|
|
285
|
+
this.logger.warn('OpenRouter API key missing from config file but found in saved file - syncing...');
|
|
286
|
+
try {
|
|
287
|
+
config.globalState.openRouterApiKey = savedKey;
|
|
288
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
289
|
+
this.logger.log('✓ Synced API key from saved file to Cline CLI config');
|
|
290
|
+
// Update configKey to the newly synced value
|
|
291
|
+
configKey = savedKey;
|
|
292
|
+
} catch (error) {
|
|
293
|
+
this.logger.warn('Failed to sync API key to config:', error.message);
|
|
294
|
+
}
|
|
295
|
+
} else if (isMarkedInvalid) {
|
|
296
|
+
// Key was marked as invalid - don't use it, return false to force reconfiguration
|
|
297
|
+
this.logger.warn('OpenRouter API key was marked as invalid - reconfiguration required');
|
|
298
|
+
return false;
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// Check all sources, but ensure the key is non-empty
|
|
302
|
+
const openRouterApiKey = configKey || envKey || savedKey;
|
|
303
|
+
const isValid = openRouterApiKey && typeof openRouterApiKey === 'string' && openRouterApiKey.trim().length > 0;
|
|
304
|
+
|
|
305
|
+
if (!isValid) {
|
|
306
|
+
this.logger.warn('OpenRouter API key found but is empty or invalid');
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
return isValid;
|
|
310
|
+
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl?.includes('generativelanguage.googleapis.com')) {
|
|
311
|
+
// Gemini via OpenAI-compatible endpoint
|
|
312
|
+
let configKey = config.globalState?.openAiApiKey;
|
|
313
|
+
const envKey = process.env.GEMINI_API_KEY;
|
|
314
|
+
const savedKey = this.getSavedGeminiKey();
|
|
315
|
+
|
|
316
|
+
// Check for invalid key marker (prevents re-syncing invalid keys)
|
|
317
|
+
const invalidKeyMarker = path.join(os.homedir(), '.allnightai', '.gemini-key-invalid');
|
|
318
|
+
const isMarkedInvalid = fs.existsSync(invalidKeyMarker);
|
|
319
|
+
|
|
320
|
+
// If config file doesn't have key but saved file does, sync it to config
|
|
321
|
+
// BUT only if the key hasn't been marked as invalid
|
|
322
|
+
if (!configKey && savedKey && savedKey.trim().length > 0 && !isMarkedInvalid) {
|
|
323
|
+
this.logger.warn('Gemini API key missing from config file but found in saved file - syncing...');
|
|
324
|
+
try {
|
|
325
|
+
config.globalState.openAiApiKey = savedKey;
|
|
326
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
327
|
+
this.logger.log('✓ Synced API key from saved file to Cline CLI config');
|
|
328
|
+
// Update configKey to the newly synced value
|
|
329
|
+
configKey = savedKey;
|
|
330
|
+
} catch (error) {
|
|
331
|
+
this.logger.warn('Failed to sync API key to config:', error.message);
|
|
332
|
+
}
|
|
333
|
+
} else if (isMarkedInvalid) {
|
|
334
|
+
// Key was marked as invalid - don't use it, return false to force reconfiguration
|
|
335
|
+
this.logger.warn('Gemini API key was marked as invalid - reconfiguration required');
|
|
336
|
+
return false;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// Check all sources, but ensure the key is non-empty
|
|
340
|
+
const geminiApiKey = configKey || envKey || savedKey;
|
|
341
|
+
const isValid = geminiApiKey && typeof geminiApiKey === 'string' && geminiApiKey.trim().length > 0;
|
|
342
|
+
|
|
343
|
+
if (!isValid) {
|
|
344
|
+
this.logger.warn('Gemini API key found but is empty or invalid');
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
return isValid;
|
|
348
|
+
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
349
|
+
// Ollama configuration - check if Ollama is installed and running
|
|
350
|
+
const isInstalled = this.isOllamaInstalled();
|
|
351
|
+
if (!isInstalled) {
|
|
352
|
+
this.logger.warn('Ollama is configured but not installed');
|
|
353
|
+
return false;
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// Check if Ollama is running (async, but we'll treat as configured if installed)
|
|
357
|
+
// The actual running check will happen during startup
|
|
358
|
+
return true;
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
return false;
|
|
362
|
+
} catch (error) {
|
|
363
|
+
this.logger.warn('Failed to check Cline CLI configuration:', error.message);
|
|
364
|
+
return false;
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
/**
|
|
369
|
+
* Get saved Anthropic API key
|
|
370
|
+
*/
|
|
371
|
+
getSavedAnthropicKey() {
|
|
372
|
+
try {
|
|
373
|
+
const apiKeyFile = path.join(os.homedir(), '.allnightai', 'anthropic-api-key.txt');
|
|
374
|
+
if (fs.existsSync(apiKeyFile)) {
|
|
375
|
+
const key = fs.readFileSync(apiKeyFile, 'utf8').trim();
|
|
376
|
+
// Return null if key is empty (not just whitespace)
|
|
377
|
+
return key.length > 0 ? key : null;
|
|
378
|
+
}
|
|
379
|
+
return null;
|
|
380
|
+
} catch {
|
|
381
|
+
return null;
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
/**
|
|
386
|
+
* Save Anthropic API key
|
|
387
|
+
*/
|
|
388
|
+
saveAnthropicKey(apiKey) {
|
|
389
|
+
try {
|
|
390
|
+
const configDir = path.join(os.homedir(), '.allnightai');
|
|
391
|
+
fs.mkdirSync(configDir, { recursive: true});
|
|
392
|
+
const apiKeyFile = path.join(configDir, 'anthropic-api-key.txt');
|
|
393
|
+
fs.writeFileSync(apiKeyFile, apiKey, { mode: 0o600 }); // Secure permissions
|
|
394
|
+
return true;
|
|
395
|
+
} catch {
|
|
396
|
+
return false;
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Get saved OpenRouter API key
|
|
402
|
+
*/
|
|
403
|
+
getSavedOpenRouterKey() {
|
|
404
|
+
try {
|
|
405
|
+
const apiKeyFile = path.join(os.homedir(), '.allnightai', 'openrouter-api-key.txt');
|
|
406
|
+
if (fs.existsSync(apiKeyFile)) {
|
|
407
|
+
const key = fs.readFileSync(apiKeyFile, 'utf8').trim();
|
|
408
|
+
// Return null if key is empty (not just whitespace)
|
|
409
|
+
return key.length > 0 ? key : null;
|
|
410
|
+
}
|
|
411
|
+
return null;
|
|
412
|
+
} catch {
|
|
413
|
+
return null;
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
/**
|
|
418
|
+
* Save OpenRouter API key
|
|
419
|
+
*/
|
|
420
|
+
saveOpenRouterKey(apiKey) {
|
|
421
|
+
try {
|
|
422
|
+
const configDir = path.join(os.homedir(), '.allnightai');
|
|
423
|
+
fs.mkdirSync(configDir, { recursive: true});
|
|
424
|
+
const apiKeyFile = path.join(configDir, 'openrouter-api-key.txt');
|
|
425
|
+
fs.writeFileSync(apiKeyFile, apiKey, { mode: 0o600 }); // Secure permissions
|
|
426
|
+
return true;
|
|
427
|
+
} catch {
|
|
428
|
+
return false;
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
/**
|
|
433
|
+
* Get saved Google Gemini API key
|
|
434
|
+
*/
|
|
435
|
+
getSavedGeminiKey() {
|
|
436
|
+
try {
|
|
437
|
+
const apiKeyFile = path.join(os.homedir(), '.allnightai', 'gemini-api-key.txt');
|
|
438
|
+
if (fs.existsSync(apiKeyFile)) {
|
|
439
|
+
const key = fs.readFileSync(apiKeyFile, 'utf8').trim();
|
|
440
|
+
return key.length > 0 ? key : null;
|
|
441
|
+
}
|
|
442
|
+
return null;
|
|
443
|
+
} catch {
|
|
444
|
+
return null;
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
/**
|
|
449
|
+
* Save Google Gemini API key
|
|
450
|
+
*/
|
|
451
|
+
saveGeminiKey(apiKey) {
|
|
452
|
+
try {
|
|
453
|
+
const configDir = path.join(os.homedir(), '.allnightai');
|
|
454
|
+
fs.mkdirSync(configDir, { recursive: true});
|
|
455
|
+
const apiKeyFile = path.join(configDir, 'gemini-api-key.txt');
|
|
456
|
+
fs.writeFileSync(apiKeyFile, apiKey, { mode: 0o600 }); // Secure permissions
|
|
457
|
+
return true;
|
|
458
|
+
} catch {
|
|
459
|
+
return false;
|
|
460
|
+
}
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
/**
|
|
464
|
+
* Get the path to the Ollama binary
|
|
465
|
+
* Returns the binary path if found, or null
|
|
466
|
+
*/
|
|
467
|
+
getOllamaBinaryPath() {
|
|
468
|
+
// First check if command is in PATH
|
|
469
|
+
try {
|
|
470
|
+
const whichResult = execSync('which ollama', { encoding: 'utf8', stdio: 'pipe' });
|
|
471
|
+
const pathFound = whichResult.trim();
|
|
472
|
+
// Verify it's actually executable
|
|
473
|
+
if (pathFound && fs.existsSync(pathFound)) {
|
|
474
|
+
try {
|
|
475
|
+
fs.accessSync(pathFound, fs.constants.F_OK | fs.constants.X_OK);
|
|
476
|
+
// Resolve symlinks to get actual binary path
|
|
477
|
+
try {
|
|
478
|
+
const realPath = fs.realpathSync(pathFound);
|
|
479
|
+
return realPath;
|
|
480
|
+
} catch {
|
|
481
|
+
return pathFound;
|
|
482
|
+
}
|
|
483
|
+
} catch {
|
|
484
|
+
// Not executable, continue searching
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
} catch {
|
|
488
|
+
// Command not in PATH, continue to search app bundle
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
// Not in PATH, try to find in app bundle (macOS)
|
|
492
|
+
if (os.platform() === 'darwin') {
|
|
493
|
+
const appPath = '/Applications/Ollama.app';
|
|
494
|
+
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
495
|
+
|
|
496
|
+
// Check common locations for the binary in the app bundle
|
|
497
|
+
const possiblePaths = [
|
|
498
|
+
path.join(appPath, 'Contents', 'Resources', 'ollama'),
|
|
499
|
+
path.join(appPath, 'Contents', 'MacOS', 'ollama'),
|
|
500
|
+
path.join(userAppPath, 'Contents', 'Resources', 'ollama'),
|
|
501
|
+
path.join(userAppPath, 'Contents', 'MacOS', 'ollama'),
|
|
502
|
+
'/usr/local/bin/ollama',
|
|
503
|
+
'/usr/bin/ollama'
|
|
504
|
+
];
|
|
505
|
+
|
|
506
|
+
for (const binPath of possiblePaths) {
|
|
507
|
+
try {
|
|
508
|
+
if (fs.existsSync(binPath)) {
|
|
509
|
+
// Check if it's executable
|
|
510
|
+
fs.accessSync(binPath, fs.constants.F_OK | fs.constants.X_OK);
|
|
511
|
+
// Resolve symlinks to get actual binary path
|
|
512
|
+
try {
|
|
513
|
+
const realPath = fs.realpathSync(binPath);
|
|
514
|
+
// Verify resolved path exists and is executable
|
|
515
|
+
if (fs.existsSync(realPath)) {
|
|
516
|
+
fs.accessSync(realPath, fs.constants.F_OK | fs.constants.X_OK);
|
|
517
|
+
return realPath;
|
|
518
|
+
}
|
|
519
|
+
} catch {
|
|
520
|
+
// If realpath fails, use original path
|
|
521
|
+
return binPath;
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
} catch {
|
|
525
|
+
// Not executable or doesn't exist, try next path
|
|
526
|
+
continue;
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
return null;
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
/**
|
|
535
|
+
* Check if Ollama is installed
|
|
536
|
+
*/
|
|
537
|
+
isOllamaInstalled() {
|
|
538
|
+
// First check if the command is in PATH (preferred method)
|
|
539
|
+
try {
|
|
540
|
+
execSync('which ollama', { stdio: 'pipe' });
|
|
541
|
+
return true;
|
|
542
|
+
} catch {
|
|
543
|
+
// Command not in PATH, check if app bundle exists (macOS)
|
|
544
|
+
if (os.platform() === 'darwin') {
|
|
545
|
+
const appPath = '/Applications/Ollama.app';
|
|
546
|
+
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
547
|
+
if (fs.existsSync(appPath) || fs.existsSync(userAppPath)) {
|
|
548
|
+
return true;
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
return false;
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
/**
|
|
556
|
+
* Attempt to automatically fix Ollama PATH issue on macOS
|
|
557
|
+
* Returns true if fixed, false otherwise
|
|
558
|
+
*/
|
|
559
|
+
async fixOllamaPath() {
|
|
560
|
+
if (os.platform() !== 'darwin') {
|
|
561
|
+
return false;
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
const appPath = '/Applications/Ollama.app';
|
|
565
|
+
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
566
|
+
const appExists = fs.existsSync(appPath) || fs.existsSync(userAppPath);
|
|
567
|
+
|
|
568
|
+
if (!appExists) {
|
|
569
|
+
return false;
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
try {
|
|
573
|
+
// Method 1: Try to launch Ollama.app once to initialize CLI
|
|
574
|
+
// This is the recommended way - launching the app adds the command to PATH
|
|
575
|
+
const appToLaunch = fs.existsSync(appPath) ? appPath : userAppPath;
|
|
576
|
+
this.logger.log('Launching Ollama.app to initialize CLI tool...');
|
|
577
|
+
|
|
578
|
+
spawn('open', [appToLaunch], {
|
|
579
|
+
detached: true,
|
|
580
|
+
stdio: 'ignore'
|
|
581
|
+
});
|
|
582
|
+
|
|
583
|
+
// Wait a moment for the app to initialize
|
|
584
|
+
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
585
|
+
|
|
586
|
+
// Check if command is now available
|
|
587
|
+
try {
|
|
588
|
+
execSync('which ollama', { stdio: 'pipe' });
|
|
589
|
+
this.logger.log('✓ Ollama CLI tool is now available');
|
|
590
|
+
return true;
|
|
591
|
+
} catch {
|
|
592
|
+
// Still not in PATH, try method 2
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
// Method 2: Find binary in app bundle and create symlink
|
|
596
|
+
const binaryPath = this.getOllamaBinaryPath();
|
|
597
|
+
if (binaryPath && binaryPath !== '/usr/local/bin/ollama') {
|
|
598
|
+
try {
|
|
599
|
+
const targetPath = '/usr/local/bin/ollama';
|
|
600
|
+
const targetDir = path.dirname(targetPath);
|
|
601
|
+
|
|
602
|
+
// Ensure /usr/local/bin exists
|
|
603
|
+
if (!fs.existsSync(targetDir)) {
|
|
604
|
+
fs.mkdirSync(targetDir, { recursive: true });
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
// Remove existing symlink/file if it exists
|
|
608
|
+
if (fs.existsSync(targetPath)) {
|
|
609
|
+
fs.unlinkSync(targetPath);
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
// Create symlink
|
|
613
|
+
fs.symlinkSync(binaryPath, targetPath);
|
|
614
|
+
this.logger.log('✓ Created symlink to Ollama CLI tool');
|
|
615
|
+
|
|
616
|
+
// Verify it works
|
|
617
|
+
try {
|
|
618
|
+
execSync('which ollama', { stdio: 'pipe' });
|
|
619
|
+
return true;
|
|
620
|
+
} catch {
|
|
621
|
+
// Symlink created but still not in PATH - might need shell restart
|
|
622
|
+
return false;
|
|
623
|
+
}
|
|
624
|
+
} catch (error) {
|
|
625
|
+
// Symlink creation failed (might need sudo)
|
|
626
|
+
this.logger.log(`⚠ Could not create symlink: ${error.message}`);
|
|
627
|
+
return false;
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
// Method 3: Try to find and use the binary directly from app bundle
|
|
632
|
+
const directPath = this.getOllamaBinaryPath();
|
|
633
|
+
if (directPath) {
|
|
634
|
+
// At least we found it, even if not in PATH
|
|
635
|
+
// The pullOllamaModel will handle using the direct path
|
|
636
|
+
return true;
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
return false;
|
|
640
|
+
} catch (error) {
|
|
641
|
+
this.logger.log(`⚠ Error fixing Ollama PATH: ${error.message}`);
|
|
642
|
+
return false;
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
/**
|
|
647
|
+
* Check if Ollama service is running
|
|
648
|
+
*/
|
|
649
|
+
async isOllamaRunning() {
|
|
650
|
+
try {
|
|
651
|
+
const http = require('http');
|
|
652
|
+
return new Promise((resolve) => {
|
|
653
|
+
const req = http.request({
|
|
654
|
+
hostname: 'localhost',
|
|
655
|
+
port: 11434,
|
|
656
|
+
path: '/api/tags',
|
|
657
|
+
method: 'GET',
|
|
658
|
+
timeout: 2000
|
|
659
|
+
}, (res) => {
|
|
660
|
+
resolve(res.statusCode === 200);
|
|
661
|
+
});
|
|
662
|
+
req.on('error', () => resolve(false));
|
|
663
|
+
req.on('timeout', () => {
|
|
664
|
+
req.destroy();
|
|
665
|
+
resolve(false);
|
|
666
|
+
});
|
|
667
|
+
req.end();
|
|
668
|
+
});
|
|
669
|
+
} catch {
|
|
670
|
+
return Promise.resolve(false);
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
/**
|
|
675
|
+
* Verify Ollama API is accessible at the endpoint Cline CLI uses
|
|
676
|
+
* Tests the /v1/models endpoint that Cline CLI will use
|
|
677
|
+
*/
|
|
678
|
+
async verifyOllamaAPI() {
|
|
679
|
+
try {
|
|
680
|
+
const http = require('http');
|
|
681
|
+
return new Promise((resolve) => {
|
|
682
|
+
const req = http.request({
|
|
683
|
+
hostname: 'localhost',
|
|
684
|
+
port: 11434,
|
|
685
|
+
path: '/v1/models',
|
|
686
|
+
method: 'GET',
|
|
687
|
+
timeout: 3000,
|
|
688
|
+
headers: {
|
|
689
|
+
'Authorization': 'Bearer ollama'
|
|
690
|
+
}
|
|
691
|
+
}, (res) => {
|
|
692
|
+
let data = '';
|
|
693
|
+
res.on('data', chunk => data += chunk);
|
|
694
|
+
res.on('end', () => {
|
|
695
|
+
try {
|
|
696
|
+
const models = JSON.parse(data);
|
|
697
|
+
resolve({ success: true, models: models.data || [] });
|
|
698
|
+
} catch {
|
|
699
|
+
// Response received but not valid JSON - service is running but may be misconfigured
|
|
700
|
+
resolve({ success: res.statusCode === 200, models: [] });
|
|
701
|
+
}
|
|
702
|
+
});
|
|
703
|
+
});
|
|
704
|
+
req.on('error', () => resolve({ success: false, models: [] }));
|
|
705
|
+
req.on('timeout', () => {
|
|
706
|
+
req.destroy();
|
|
707
|
+
resolve({ success: false, models: [] });
|
|
708
|
+
});
|
|
709
|
+
req.end();
|
|
710
|
+
});
|
|
711
|
+
} catch {
|
|
712
|
+
return Promise.resolve({ success: false, models: [] });
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
/**
|
|
717
|
+
* Verify a specific model is accessible via Ollama API
|
|
718
|
+
*/
|
|
719
|
+
async verifyModelAccessible(modelName) {
|
|
720
|
+
try {
|
|
721
|
+
const apiResult = await this.verifyOllamaAPI();
|
|
722
|
+
if (!apiResult.success) {
|
|
723
|
+
return false;
|
|
724
|
+
}
|
|
725
|
+
// Check if model is in the list (might be named with or without tag)
|
|
726
|
+
const modelBase = modelName.split(':')[0];
|
|
727
|
+
return apiResult.models.some(m => {
|
|
728
|
+
const modelId = m.id || '';
|
|
729
|
+
return modelId.includes(modelName) || modelId.includes(modelBase);
|
|
730
|
+
});
|
|
731
|
+
} catch {
|
|
732
|
+
return false;
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
/**
|
|
737
|
+
* Start the Ollama service
|
|
738
|
+
* Returns true if started successfully, false otherwise
|
|
739
|
+
*/
|
|
740
|
+
async startOllamaService() {
|
|
741
|
+
try {
|
|
742
|
+
// Check if already running
|
|
743
|
+
if (await this.isOllamaRunning()) {
|
|
744
|
+
return true;
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
// Get the ollama binary path
|
|
748
|
+
const binaryPath = this.getOllamaBinaryPath() || 'ollama';
|
|
749
|
+
|
|
750
|
+
this.logger.log('Starting Ollama service...');
|
|
751
|
+
|
|
752
|
+
// Start the service in background
|
|
753
|
+
spawn(binaryPath, ['serve'], {
|
|
754
|
+
detached: true,
|
|
755
|
+
stdio: 'ignore'
|
|
756
|
+
});
|
|
757
|
+
|
|
758
|
+
// Wait for service to start (check every 500ms, max 10 seconds)
|
|
759
|
+
for (let i = 0; i < 20; i++) {
|
|
760
|
+
await new Promise(resolve => setTimeout(resolve, 500));
|
|
761
|
+
if (await this.isOllamaRunning()) {
|
|
762
|
+
this.logger.log('✓ Ollama service started');
|
|
763
|
+
return true;
|
|
764
|
+
}
|
|
765
|
+
}
|
|
766
|
+
|
|
767
|
+
// Still not running after 10 seconds
|
|
768
|
+
this.logger.log('⚠ Ollama service may still be starting...');
|
|
769
|
+
return false;
|
|
770
|
+
} catch (error) {
|
|
771
|
+
this.logger.log(`⚠ Error starting Ollama service: ${error.message}`);
|
|
772
|
+
return false;
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
/**
|
|
777
|
+
* Get list of installed Ollama models
|
|
778
|
+
* Returns empty array if command fails (e.g., command not in PATH)
|
|
779
|
+
*/
|
|
780
|
+
async getOllamaModels() {
|
|
781
|
+
try {
|
|
782
|
+
// Try to get the binary path (handles PATH issues automatically)
|
|
783
|
+
const binaryPath = this.getOllamaBinaryPath();
|
|
784
|
+
const ollamaCmd = binaryPath || 'ollama';
|
|
785
|
+
|
|
786
|
+
const result = execSync(`${ollamaCmd} list`, { encoding: 'utf8', stdio: 'pipe' });
|
|
787
|
+
const lines = result.split('\n').slice(1); // Skip header
|
|
788
|
+
const models = lines
|
|
789
|
+
.filter(line => line.trim().length > 0)
|
|
790
|
+
.map(line => {
|
|
791
|
+
const parts = line.trim().split(/\s+/);
|
|
792
|
+
return parts[0]; // Model name is first column
|
|
793
|
+
});
|
|
794
|
+
return models;
|
|
795
|
+
} catch (error) {
|
|
796
|
+
// If command not found, try to fix automatically (macOS)
|
|
797
|
+
if (error.code === 'ENOENT' && os.platform() === 'darwin' && this.isOllamaInstalled()) {
|
|
798
|
+
try {
|
|
799
|
+
await this.fixOllamaPath();
|
|
800
|
+
// Retry after fixing
|
|
801
|
+
const binaryPath = this.getOllamaBinaryPath();
|
|
802
|
+
const ollamaCmd = binaryPath || 'ollama';
|
|
803
|
+
const result = execSync(`${ollamaCmd} list`, { encoding: 'utf8', stdio: 'pipe' });
|
|
804
|
+
const lines = result.split('\n').slice(1);
|
|
805
|
+
const models = lines
|
|
806
|
+
.filter(line => line.trim().length > 0)
|
|
807
|
+
.map(line => {
|
|
808
|
+
const parts = line.trim().split(/\s+/);
|
|
809
|
+
return parts[0];
|
|
810
|
+
});
|
|
811
|
+
return models;
|
|
812
|
+
} catch {
|
|
813
|
+
// Still failed, return empty array
|
|
814
|
+
return [];
|
|
815
|
+
}
|
|
816
|
+
}
|
|
817
|
+
// If command not found or other error, return empty array (will trigger download prompt)
|
|
818
|
+
return [];
|
|
819
|
+
}
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
/**
|
|
823
|
+
* Pull an Ollama model
|
|
824
|
+
* Returns { success: boolean, error?: string, errorCode?: string, resolution?: string }
|
|
825
|
+
*/
|
|
826
|
+
async pullOllamaModel(modelName, onProgress) {
|
|
827
|
+
try {
|
|
828
|
+
return new Promise(async (resolve) => {
|
|
829
|
+
// Get the path to ollama binary (may need to auto-fix PATH on macOS)
|
|
830
|
+
let ollamaPath = 'ollama';
|
|
831
|
+
let shouldRetry = false;
|
|
832
|
+
|
|
833
|
+
try {
|
|
834
|
+
// Try to find the binary
|
|
835
|
+
const binaryPath = this.getOllamaBinaryPath();
|
|
836
|
+
if (binaryPath) {
|
|
837
|
+
ollamaPath = binaryPath;
|
|
838
|
+
} else {
|
|
839
|
+
// Command not in PATH - try to fix automatically (macOS)
|
|
840
|
+
if (os.platform() === 'darwin' && this.isOllamaInstalled()) {
|
|
841
|
+
this.logger.log('⚠ Ollama command not in PATH, attempting to fix automatically...');
|
|
842
|
+
const fixed = await this.fixOllamaPath();
|
|
843
|
+
if (fixed) {
|
|
844
|
+
shouldRetry = true;
|
|
845
|
+
// Wait a moment for PATH to update
|
|
846
|
+
await new Promise(r => setTimeout(r, 1000));
|
|
847
|
+
// Re-get binary path after fixing
|
|
848
|
+
const newBinaryPath = this.getOllamaBinaryPath();
|
|
849
|
+
if (newBinaryPath) {
|
|
850
|
+
ollamaPath = newBinaryPath;
|
|
851
|
+
}
|
|
852
|
+
}
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
} catch {
|
|
856
|
+
// Continue with default 'ollama' command
|
|
857
|
+
}
|
|
858
|
+
|
|
859
|
+
// Ensure Ollama service is running before attempting to pull
|
|
860
|
+
if (!(await this.isOllamaRunning())) {
|
|
861
|
+
this.logger.log('⚠ Ollama service not running, starting it automatically...');
|
|
862
|
+
await this.startOllamaService();
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
// Helper functions (defined before use)
|
|
866
|
+
// Helper to parse size strings like "397 MB", "1.5 KB", "490 B"
|
|
867
|
+
const parseSize = (sizeStr) => {
|
|
868
|
+
const match = sizeStr.match(/([\d.]+)\s*(B|KB|MB|GB|TB)/i);
|
|
869
|
+
if (!match) return 0;
|
|
870
|
+
const value = parseFloat(match[1]);
|
|
871
|
+
const unit = match[2].toUpperCase();
|
|
872
|
+
const multipliers = { 'B': 1, 'KB': 1024, 'MB': 1024 * 1024, 'GB': 1024 * 1024 * 1024, 'TB': 1024 * 1024 * 1024 * 1024 };
|
|
873
|
+
return Math.floor(value * (multipliers[unit] || 1));
|
|
874
|
+
};
|
|
875
|
+
|
|
876
|
+
// Helper to format bytes
|
|
877
|
+
const formatBytes = (bytes) => {
|
|
878
|
+
if (bytes === 0) return '0 B';
|
|
879
|
+
const k = 1024;
|
|
880
|
+
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
|
881
|
+
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
|
882
|
+
return `${(bytes / Math.pow(k, i)).toFixed(i === 0 ? 0 : 1)} ${sizes[i]}`;
|
|
883
|
+
};
|
|
884
|
+
|
|
885
|
+
// Helper to create progress bar
|
|
886
|
+
const createProgressBar = (percentage, width = 25) => {
|
|
887
|
+
const filled = Math.floor((percentage / 100) * width);
|
|
888
|
+
const empty = width - filled;
|
|
889
|
+
return '█'.repeat(filled) + '░'.repeat(empty);
|
|
890
|
+
};
|
|
891
|
+
|
|
892
|
+
// Send initial progress update (0%)
|
|
893
|
+
if (onProgress) {
|
|
894
|
+
onProgress({
|
|
895
|
+
percentage: 0,
|
|
896
|
+
downloaded: '0 B',
|
|
897
|
+
total: 'Unknown',
|
|
898
|
+
currentPercent: 0,
|
|
899
|
+
progressBar: createProgressBar(0)
|
|
900
|
+
});
|
|
901
|
+
}
|
|
902
|
+
|
|
903
|
+
const proc = spawn(ollamaPath, ['pull', modelName]);
|
|
904
|
+
|
|
905
|
+
let output = '';
|
|
906
|
+
let lastProgressValue = -1;
|
|
907
|
+
let layers = new Map(); // Track each layer's size and progress
|
|
908
|
+
let totalSizeBytes = 0;
|
|
909
|
+
let downloadedBytes = 0;
|
|
910
|
+
|
|
911
|
+
proc.stdout.on('data', (data) => {
|
|
912
|
+
const text = data.toString();
|
|
913
|
+
output += text;
|
|
914
|
+
|
|
915
|
+
// Parse each line for progress information
|
|
916
|
+
const lines = text.split(/\r?\n/); // Handle both \n and \r\n
|
|
917
|
+
let foundProgress = false;
|
|
918
|
+
|
|
919
|
+
for (const line of lines) {
|
|
920
|
+
const trimmedLine = line.trim();
|
|
921
|
+
|
|
922
|
+
// Skip empty lines
|
|
923
|
+
if (!trimmedLine) continue;
|
|
924
|
+
|
|
925
|
+
// Multiple patterns to match different Ollama output formats:
|
|
926
|
+
// Pattern 1: "pulling <hash>... XX% ▕██████████████████████████▏ <size> MB"
|
|
927
|
+
// Pattern 2: "pulling <hash>... XX% <size> MB"
|
|
928
|
+
// Pattern 3: "<hash>... XX% <size> MB"
|
|
929
|
+
// Pattern 4: Just percentage in any format
|
|
930
|
+
|
|
931
|
+
let progressMatch = trimmedLine.match(/pulling\s+(\S+).*?(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
932
|
+
|
|
933
|
+
// Try alternative patterns if first doesn't match
|
|
934
|
+
if (!progressMatch) {
|
|
935
|
+
progressMatch = trimmedLine.match(/(\w+)\s*\.\.\.\s*(\d+)%\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
936
|
+
}
|
|
937
|
+
|
|
938
|
+
if (!progressMatch) {
|
|
939
|
+
progressMatch = trimmedLine.match(/(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
if (progressMatch) {
|
|
943
|
+
const layerHash = progressMatch[1] ? progressMatch[1].substring(0, 12) : 'layer';
|
|
944
|
+
const progressPercent = parseInt(progressMatch[2] || progressMatch[1]);
|
|
945
|
+
const sizeStr = progressMatch[3] || progressMatch[2];
|
|
946
|
+
const layerTotalBytes = parseSize(sizeStr);
|
|
947
|
+
|
|
948
|
+
// Track this layer
|
|
949
|
+
if (!layers.has(layerHash)) {
|
|
950
|
+
layers.set(layerHash, { total: layerTotalBytes, progress: 0 });
|
|
951
|
+
// Add to total size
|
|
952
|
+
totalSizeBytes += layerTotalBytes;
|
|
953
|
+
}
|
|
954
|
+
|
|
955
|
+
// Update layer progress
|
|
956
|
+
const layer = layers.get(layerHash);
|
|
957
|
+
layer.progress = progressPercent;
|
|
958
|
+
|
|
959
|
+
// Calculate total downloaded across all layers
|
|
960
|
+
downloadedBytes = 0;
|
|
961
|
+
for (const [hash, layerData] of layers.entries()) {
|
|
962
|
+
downloadedBytes += Math.floor((layerData.progress / 100) * layerData.total);
|
|
963
|
+
}
|
|
964
|
+
|
|
965
|
+
// Calculate overall percentage
|
|
966
|
+
const overallProgress = totalSizeBytes > 0
|
|
967
|
+
? Math.min(100, Math.floor((downloadedBytes / totalSizeBytes) * 100))
|
|
968
|
+
: progressPercent;
|
|
969
|
+
|
|
970
|
+
// Update progress (even small changes for better UX)
|
|
971
|
+
if (overallProgress !== lastProgressValue && onProgress) {
|
|
972
|
+
lastProgressValue = overallProgress;
|
|
973
|
+
|
|
974
|
+
const progressInfo = {
|
|
975
|
+
percentage: overallProgress,
|
|
976
|
+
downloaded: formatBytes(downloadedBytes),
|
|
977
|
+
total: formatBytes(totalSizeBytes),
|
|
978
|
+
currentPercent: progressPercent,
|
|
979
|
+
progressBar: createProgressBar(overallProgress)
|
|
980
|
+
};
|
|
981
|
+
|
|
982
|
+
onProgress(progressInfo);
|
|
983
|
+
foundProgress = true;
|
|
984
|
+
}
|
|
985
|
+
}
|
|
986
|
+
|
|
987
|
+
// Always try to find percentage as fallback (even if size parsing failed)
|
|
988
|
+
const simpleProgressMatch = trimmedLine.match(/(\d+)%/);
|
|
989
|
+
if (simpleProgressMatch && onProgress) {
|
|
990
|
+
foundProgress = true;
|
|
991
|
+
const progressPercent = parseInt(simpleProgressMatch[1]);
|
|
992
|
+
// Update if percentage changed (allow 0% change for initial display)
|
|
993
|
+
if (lastProgressValue === -1 || Math.abs(progressPercent - lastProgressValue) >= 1) {
|
|
994
|
+
lastProgressValue = progressPercent;
|
|
995
|
+
onProgress({
|
|
996
|
+
percentage: progressPercent,
|
|
997
|
+
downloaded: totalSizeBytes > 0 ? formatBytes(downloadedBytes) : 'Unknown',
|
|
998
|
+
total: totalSizeBytes > 0 ? formatBytes(totalSizeBytes) : 'Unknown',
|
|
999
|
+
currentPercent: progressPercent,
|
|
1000
|
+
progressBar: createProgressBar(progressPercent)
|
|
1001
|
+
});
|
|
1002
|
+
}
|
|
1003
|
+
}
|
|
1004
|
+
}
|
|
1005
|
+
|
|
1006
|
+
// If we got output but no progress yet, show a minimal update
|
|
1007
|
+
// This handles cases where Ollama outputs non-standard formats
|
|
1008
|
+
if (!foundProgress && text.length > 0 && onProgress && lastProgressValue < 1) {
|
|
1009
|
+
// Show 1% to indicate something is happening
|
|
1010
|
+
if (lastProgressValue < 1) {
|
|
1011
|
+
lastProgressValue = 1;
|
|
1012
|
+
onProgress({
|
|
1013
|
+
percentage: 1,
|
|
1014
|
+
downloaded: 'Unknown',
|
|
1015
|
+
total: 'Unknown',
|
|
1016
|
+
currentPercent: 1,
|
|
1017
|
+
progressBar: createProgressBar(1)
|
|
1018
|
+
});
|
|
1019
|
+
}
|
|
1020
|
+
}
|
|
1021
|
+
});
|
|
1022
|
+
|
|
1023
|
+
proc.stderr.on('data', (data) => {
|
|
1024
|
+
const stderrText = data.toString();
|
|
1025
|
+
output += stderrText;
|
|
1026
|
+
|
|
1027
|
+
// Ollama may output progress to stderr, so parse it too
|
|
1028
|
+
const lines = stderrText.split('\n');
|
|
1029
|
+
for (const line of lines) {
|
|
1030
|
+
const trimmedLine = line.trim();
|
|
1031
|
+
|
|
1032
|
+
// Try to parse progress from stderr as well
|
|
1033
|
+
let progressMatch = trimmedLine.match(/pulling\s+(\S+).*?(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
1034
|
+
|
|
1035
|
+
if (!progressMatch) {
|
|
1036
|
+
progressMatch = trimmedLine.match(/(\w+)\s*\.\.\.\s*(\d+)%\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
1037
|
+
}
|
|
1038
|
+
|
|
1039
|
+
if (!progressMatch) {
|
|
1040
|
+
progressMatch = trimmedLine.match(/(\d+)%\s*[▏▕█░\s]*\s*([\d.]+\s*(?:B|KB|MB|GB|TB))/i);
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1043
|
+
if (progressMatch && onProgress) {
|
|
1044
|
+
const layerHash = progressMatch[1] ? progressMatch[1].substring(0, 12) : 'layer';
|
|
1045
|
+
const progressPercent = parseInt(progressMatch[2] || progressMatch[1]);
|
|
1046
|
+
const sizeStr = progressMatch[3] || progressMatch[2];
|
|
1047
|
+
const layerTotalBytes = parseSize(sizeStr);
|
|
1048
|
+
|
|
1049
|
+
if (!layers.has(layerHash)) {
|
|
1050
|
+
layers.set(layerHash, { total: layerTotalBytes, progress: 0 });
|
|
1051
|
+
totalSizeBytes += layerTotalBytes;
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
const layer = layers.get(layerHash);
|
|
1055
|
+
layer.progress = progressPercent;
|
|
1056
|
+
|
|
1057
|
+
downloadedBytes = 0;
|
|
1058
|
+
for (const [hash, layerData] of layers.entries()) {
|
|
1059
|
+
downloadedBytes += Math.floor((layerData.progress / 100) * layerData.total);
|
|
1060
|
+
}
|
|
1061
|
+
|
|
1062
|
+
const overallProgress = totalSizeBytes > 0
|
|
1063
|
+
? Math.min(100, Math.floor((downloadedBytes / totalSizeBytes) * 100))
|
|
1064
|
+
: progressPercent;
|
|
1065
|
+
|
|
1066
|
+
if (overallProgress !== lastProgressValue) {
|
|
1067
|
+
lastProgressValue = overallProgress;
|
|
1068
|
+
|
|
1069
|
+
const progressInfo = {
|
|
1070
|
+
percentage: overallProgress,
|
|
1071
|
+
downloaded: formatBytes(downloadedBytes),
|
|
1072
|
+
total: formatBytes(totalSizeBytes),
|
|
1073
|
+
currentPercent: progressPercent,
|
|
1074
|
+
progressBar: createProgressBar(overallProgress)
|
|
1075
|
+
};
|
|
1076
|
+
|
|
1077
|
+
onProgress(progressInfo);
|
|
1078
|
+
}
|
|
1079
|
+
}
|
|
1080
|
+
|
|
1081
|
+
// Always try to find percentage as fallback (even if size parsing failed)
|
|
1082
|
+
const simpleProgressMatch = trimmedLine.match(/(\d+)%/);
|
|
1083
|
+
if (simpleProgressMatch && onProgress) {
|
|
1084
|
+
const progressPercent = parseInt(simpleProgressMatch[1]);
|
|
1085
|
+
// Update if percentage changed (allow 0% change for initial display)
|
|
1086
|
+
if (lastProgressValue === -1 || Math.abs(progressPercent - lastProgressValue) >= 1) {
|
|
1087
|
+
lastProgressValue = progressPercent;
|
|
1088
|
+
onProgress({
|
|
1089
|
+
percentage: progressPercent,
|
|
1090
|
+
downloaded: totalSizeBytes > 0 ? formatBytes(downloadedBytes) : 'Unknown',
|
|
1091
|
+
total: totalSizeBytes > 0 ? formatBytes(totalSizeBytes) : 'Unknown',
|
|
1092
|
+
currentPercent: progressPercent,
|
|
1093
|
+
progressBar: createProgressBar(progressPercent)
|
|
1094
|
+
});
|
|
1095
|
+
}
|
|
1096
|
+
}
|
|
1097
|
+
}
|
|
1098
|
+
});
|
|
1099
|
+
|
|
1100
|
+
proc.on('close', async (code) => {
|
|
1101
|
+
if (code === 0) {
|
|
1102
|
+
resolve({ success: true });
|
|
1103
|
+
} else {
|
|
1104
|
+
// Check if error is about server not responding
|
|
1105
|
+
const errorText = output || '';
|
|
1106
|
+
if (errorText.includes('server not responding') || errorText.includes('connection refused')) {
|
|
1107
|
+
// Service might not be running - try to start it and retry
|
|
1108
|
+
if (!(await this.isOllamaRunning())) {
|
|
1109
|
+
this.logger.log('⚠ Ollama service not running, attempting to start...');
|
|
1110
|
+
const started = await this.startOllamaService();
|
|
1111
|
+
if (started) {
|
|
1112
|
+
// Retry after starting service
|
|
1113
|
+
this.logger.log('✓ Service started, retrying model download...');
|
|
1114
|
+
const retryResult = await this.pullOllamaModel(modelName, onProgress);
|
|
1115
|
+
resolve(retryResult);
|
|
1116
|
+
return;
|
|
1117
|
+
}
|
|
1118
|
+
}
|
|
1119
|
+
|
|
1120
|
+
resolve({
|
|
1121
|
+
success: false,
|
|
1122
|
+
error: 'Ollama service is not responding. Please ensure Ollama is running.',
|
|
1123
|
+
errorCode: 'SERVER_NOT_RESPONDING',
|
|
1124
|
+
resolution: 'The Ollama service needs to be running to download models.\n' +
|
|
1125
|
+
'Try one of these:\n' +
|
|
1126
|
+
' 1. Launch Ollama.app from /Applications\n' +
|
|
1127
|
+
' 2. Or run: ollama serve\n' +
|
|
1128
|
+
' 3. Then try downloading the model again'
|
|
1129
|
+
});
|
|
1130
|
+
} else {
|
|
1131
|
+
resolve({
|
|
1132
|
+
success: false,
|
|
1133
|
+
error: output || 'Unknown error',
|
|
1134
|
+
errorCode: 'PULL_FAILED'
|
|
1135
|
+
});
|
|
1136
|
+
}
|
|
1137
|
+
}
|
|
1138
|
+
});
|
|
1139
|
+
|
|
1140
|
+
proc.on('error', async (error) => {
|
|
1141
|
+
// Detect common errors and try to fix automatically
|
|
1142
|
+
if (error.code === 'ENOENT') {
|
|
1143
|
+
// Command not found - try to fix automatically (macOS)
|
|
1144
|
+
if (os.platform() === 'darwin' && this.isOllamaInstalled() && !shouldRetry) {
|
|
1145
|
+
this.logger.log('⚠ Ollama command not found, attempting to fix automatically...');
|
|
1146
|
+
const fixed = await this.fixOllamaPath();
|
|
1147
|
+
|
|
1148
|
+
if (fixed) {
|
|
1149
|
+
// Retry the command after fixing
|
|
1150
|
+
this.logger.log('✓ Fixed! Retrying model download...');
|
|
1151
|
+
const retryResult = await this.pullOllamaModel(modelName, onProgress);
|
|
1152
|
+
resolve(retryResult);
|
|
1153
|
+
return;
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
// Auto-fix failed or not applicable - provide manual resolution
|
|
1158
|
+
const platform = os.platform();
|
|
1159
|
+
let resolution = '';
|
|
1160
|
+
|
|
1161
|
+
if (platform === 'darwin') {
|
|
1162
|
+
resolution = 'Ollama.app is installed but the command-line tool is not in your PATH.\n' +
|
|
1163
|
+
'To fix this:\n' +
|
|
1164
|
+
' 1. Open Ollama.app from /Applications (launch it once)\n' +
|
|
1165
|
+
' 2. This will add the ollama command to your PATH\n' +
|
|
1166
|
+
' 3. Or manually add Ollama to PATH: export PATH="$PATH:/usr/local/bin"\n' +
|
|
1167
|
+
' 4. Then try downloading the model again';
|
|
1168
|
+
} else {
|
|
1169
|
+
resolution = 'The ollama command is not found in your PATH.\n' +
|
|
1170
|
+
'Please ensure Ollama is properly installed and the command is available.';
|
|
1171
|
+
}
|
|
1172
|
+
|
|
1173
|
+
resolve({
|
|
1174
|
+
success: false,
|
|
1175
|
+
error: `Command not found: ollama`,
|
|
1176
|
+
errorCode: 'ENOENT',
|
|
1177
|
+
resolution: resolution
|
|
1178
|
+
});
|
|
1179
|
+
} else {
|
|
1180
|
+
resolve({
|
|
1181
|
+
success: false,
|
|
1182
|
+
error: error.message,
|
|
1183
|
+
errorCode: error.code || 'UNKNOWN'
|
|
1184
|
+
});
|
|
1185
|
+
}
|
|
1186
|
+
});
|
|
1187
|
+
});
|
|
1188
|
+
} catch (error) {
|
|
1189
|
+
return {
|
|
1190
|
+
success: false,
|
|
1191
|
+
error: error.message,
|
|
1192
|
+
errorCode: error.code || 'UNKNOWN'
|
|
1193
|
+
};
|
|
1194
|
+
}
|
|
1195
|
+
}
|
|
1196
|
+
|
|
1197
|
+
/**
|
|
1198
|
+
* Check if Homebrew is available
|
|
1199
|
+
*/
|
|
1200
|
+
hasHomebrew() {
|
|
1201
|
+
try {
|
|
1202
|
+
execSync('which brew', { stdio: 'pipe' });
|
|
1203
|
+
return true;
|
|
1204
|
+
} catch {
|
|
1205
|
+
return false;
|
|
1206
|
+
}
|
|
1207
|
+
}
|
|
1208
|
+
|
|
1209
|
+
/**
|
|
1210
|
+
* Install Ollama on macOS using Homebrew
|
|
1211
|
+
*/
|
|
1212
|
+
async installOllamaViaHomebrew() {
|
|
1213
|
+
if (!this.hasHomebrew()) {
|
|
1214
|
+
throw new Error('Homebrew is not installed');
|
|
1215
|
+
}
|
|
1216
|
+
|
|
1217
|
+
// Check if Ollama is already installed
|
|
1218
|
+
if (this.isOllamaInstalled()) {
|
|
1219
|
+
this.logger.log('Ollama is already installed, skipping installation.');
|
|
1220
|
+
return { success: true, method: 'homebrew', alreadyInstalled: true };
|
|
1221
|
+
}
|
|
1222
|
+
|
|
1223
|
+
this.logger.log('Installing Ollama via Homebrew (this may take a few minutes)...');
|
|
1224
|
+
|
|
1225
|
+
try {
|
|
1226
|
+
execSync('brew install --cask ollama', {
|
|
1227
|
+
stdio: 'inherit',
|
|
1228
|
+
timeout: 300000 // 5 minutes
|
|
1229
|
+
});
|
|
1230
|
+
|
|
1231
|
+
// Wait a moment for installation to complete
|
|
1232
|
+
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
1233
|
+
|
|
1234
|
+
// Verify installation
|
|
1235
|
+
if (this.isOllamaInstalled()) {
|
|
1236
|
+
return { success: true, method: 'homebrew' };
|
|
1237
|
+
} else {
|
|
1238
|
+
// App might be installed but CLI not in PATH yet - this is OK
|
|
1239
|
+
if (os.platform() === 'darwin') {
|
|
1240
|
+
const appPath = '/Applications/Ollama.app';
|
|
1241
|
+
const userAppPath = path.join(os.homedir(), 'Applications', 'Ollama.app');
|
|
1242
|
+
if (fs.existsSync(appPath) || fs.existsSync(userAppPath)) {
|
|
1243
|
+
return { success: true, method: 'homebrew', note: 'Ollama app installed. The CLI tool will be available after launching Ollama.app once.' };
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
throw new Error('Installation completed but Ollama not found in PATH');
|
|
1247
|
+
}
|
|
1248
|
+
} catch (error) {
|
|
1249
|
+
// If error is because app already exists, that's OK
|
|
1250
|
+
if (error.message.includes('already an App') && this.isOllamaInstalled()) {
|
|
1251
|
+
return { success: true, method: 'homebrew', alreadyInstalled: true };
|
|
1252
|
+
}
|
|
1253
|
+
throw new Error(`Homebrew installation failed: ${error.message}`);
|
|
1254
|
+
}
|
|
1255
|
+
}
|
|
1256
|
+
|
|
1257
|
+
/**
|
|
1258
|
+
* Install Ollama on macOS by downloading and installing DMG
|
|
1259
|
+
*/
|
|
1260
|
+
async installOllamaViaDMG() {
|
|
1261
|
+
// Check if Ollama is already installed
|
|
1262
|
+
if (this.isOllamaInstalled()) {
|
|
1263
|
+
this.logger.log('Ollama is already installed, skipping installation.');
|
|
1264
|
+
return { success: true, method: 'dmg', alreadyInstalled: true };
|
|
1265
|
+
}
|
|
1266
|
+
|
|
1267
|
+
const https = require('https');
|
|
1268
|
+
const { createWriteStream } = require('fs');
|
|
1269
|
+
|
|
1270
|
+
// Ollama DMG download URL for macOS
|
|
1271
|
+
const downloadUrl = 'https://ollama.com/download/Ollama-darwin.dmg';
|
|
1272
|
+
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ollama-install-'));
|
|
1273
|
+
const downloadPath = path.join(tmpDir, 'Ollama.dmg');
|
|
1274
|
+
|
|
1275
|
+
try {
|
|
1276
|
+
this.logger.log('Downloading Ollama DMG...');
|
|
1277
|
+
|
|
1278
|
+
// Download the DMG file (handle redirects)
|
|
1279
|
+
await new Promise((resolve, reject) => {
|
|
1280
|
+
const downloadFile = (url) => {
|
|
1281
|
+
https.get(url, (response) => {
|
|
1282
|
+
// Handle redirects
|
|
1283
|
+
if (response.statusCode === 301 || response.statusCode === 302 || response.statusCode === 307 || response.statusCode === 308) {
|
|
1284
|
+
if (response.headers.location) {
|
|
1285
|
+
return downloadFile(response.headers.location);
|
|
1286
|
+
}
|
|
1287
|
+
}
|
|
1288
|
+
|
|
1289
|
+
if (response.statusCode !== 200) {
|
|
1290
|
+
reject(new Error(`Download failed: HTTP ${response.statusCode}`));
|
|
1291
|
+
return;
|
|
1292
|
+
}
|
|
1293
|
+
|
|
1294
|
+
const fileStream = createWriteStream(downloadPath);
|
|
1295
|
+
response.pipe(fileStream);
|
|
1296
|
+
|
|
1297
|
+
fileStream.on('finish', () => {
|
|
1298
|
+
fileStream.close();
|
|
1299
|
+
resolve();
|
|
1300
|
+
});
|
|
1301
|
+
|
|
1302
|
+
fileStream.on('error', (error) => {
|
|
1303
|
+
fs.unlink(downloadPath, () => {});
|
|
1304
|
+
reject(error);
|
|
1305
|
+
});
|
|
1306
|
+
}).on('error', reject);
|
|
1307
|
+
};
|
|
1308
|
+
|
|
1309
|
+
downloadFile(downloadUrl);
|
|
1310
|
+
});
|
|
1311
|
+
|
|
1312
|
+
this.logger.log('Mounting and installing Ollama...');
|
|
1313
|
+
|
|
1314
|
+
const { execSync } = require('child_process');
|
|
1315
|
+
|
|
1316
|
+
// Mount the DMG
|
|
1317
|
+
const mountOutput = execSync(`hdiutil attach "${downloadPath}" -nobrowse -noverify`, {
|
|
1318
|
+
encoding: 'utf8',
|
|
1319
|
+
stdio: 'pipe'
|
|
1320
|
+
});
|
|
1321
|
+
|
|
1322
|
+
// Extract mount point
|
|
1323
|
+
const mountPoint = mountOutput.split('\n').find(line => line.includes('/Volumes/'))?.split('\t').pop()?.trim();
|
|
1324
|
+
if (!mountPoint) {
|
|
1325
|
+
throw new Error('Failed to find DMG mount point');
|
|
1326
|
+
}
|
|
1327
|
+
|
|
1328
|
+
try {
|
|
1329
|
+
// Find the Ollama.app in the mounted volume
|
|
1330
|
+
const appInVolume = path.join(mountPoint, 'Ollama.app');
|
|
1331
|
+
if (!fs.existsSync(appInVolume)) {
|
|
1332
|
+
throw new Error('Ollama.app not found in mounted DMG');
|
|
1333
|
+
}
|
|
1334
|
+
|
|
1335
|
+
// Copy to Applications
|
|
1336
|
+
const appPath = '/Applications/Ollama.app';
|
|
1337
|
+
if (fs.existsSync(appPath)) {
|
|
1338
|
+
execSync(`rm -rf "${appPath}"`, { stdio: 'pipe' });
|
|
1339
|
+
}
|
|
1340
|
+
execSync(`cp -R "${appInVolume}" "${appPath}"`, { stdio: 'pipe' });
|
|
1341
|
+
|
|
1342
|
+
// Remove quarantine attribute to prevent Gatekeeper issues
|
|
1343
|
+
try {
|
|
1344
|
+
execSync(`xattr -dr com.apple.quarantine "${appPath}"`, { stdio: 'pipe' });
|
|
1345
|
+
} catch {
|
|
1346
|
+
// Non-fatal if xattr fails
|
|
1347
|
+
}
|
|
1348
|
+
|
|
1349
|
+
// Eject the DMG
|
|
1350
|
+
execSync(`hdiutil detach "${mountPoint}"`, { stdio: 'pipe' });
|
|
1351
|
+
} catch (error) {
|
|
1352
|
+
// Try to eject even on error
|
|
1353
|
+
try {
|
|
1354
|
+
execSync(`hdiutil detach "${mountPoint}"`, { stdio: 'pipe' });
|
|
1355
|
+
} catch {}
|
|
1356
|
+
throw error;
|
|
1357
|
+
}
|
|
1358
|
+
|
|
1359
|
+
// Clean up downloaded file
|
|
1360
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
1361
|
+
|
|
1362
|
+
// Wait a moment for installation to settle
|
|
1363
|
+
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
1364
|
+
|
|
1365
|
+
// Verify installation
|
|
1366
|
+
if (this.isOllamaInstalled()) {
|
|
1367
|
+
return { success: true, method: 'dmg' };
|
|
1368
|
+
} else {
|
|
1369
|
+
// Check if app exists in Applications
|
|
1370
|
+
if (fs.existsSync('/Applications/Ollama.app')) {
|
|
1371
|
+
// App is installed but not in PATH yet - this is normal, user may need to launch it once
|
|
1372
|
+
return { success: true, method: 'dmg', note: 'Ollama installed. Please launch Ollama.app once to complete setup.' };
|
|
1373
|
+
}
|
|
1374
|
+
throw new Error('Installation completed but Ollama not found');
|
|
1375
|
+
}
|
|
1376
|
+
} catch (error) {
|
|
1377
|
+
// Clean up on error
|
|
1378
|
+
if (fs.existsSync(tmpDir)) {
|
|
1379
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
1380
|
+
}
|
|
1381
|
+
throw new Error(`DMG installation failed: ${error.message}`);
|
|
1382
|
+
}
|
|
1383
|
+
}
|
|
1384
|
+
|
|
1385
|
+
/**
|
|
1386
|
+
* Install Ollama
|
|
1387
|
+
*/
|
|
1388
|
+
async installOllama() {
|
|
1389
|
+
try {
|
|
1390
|
+
// Check if Ollama is already installed
|
|
1391
|
+
if (this.isOllamaInstalled()) {
|
|
1392
|
+
this.logger.log('Ollama is already installed.');
|
|
1393
|
+
return { success: true, method: 'already-installed', alreadyInstalled: true };
|
|
1394
|
+
}
|
|
1395
|
+
|
|
1396
|
+
const platform = os.platform();
|
|
1397
|
+
|
|
1398
|
+
if (platform === 'darwin') {
|
|
1399
|
+
// macOS - try Homebrew first, then fall back to DMG download
|
|
1400
|
+
try {
|
|
1401
|
+
if (this.hasHomebrew()) {
|
|
1402
|
+
this.logger.log('🍺 Using Homebrew to install Ollama...');
|
|
1403
|
+
return await this.installOllamaViaHomebrew();
|
|
1404
|
+
}
|
|
1405
|
+
} catch (error) {
|
|
1406
|
+
this.logger.log(`⚠ Homebrew installation failed: ${error.message}`);
|
|
1407
|
+
this.logger.log('📥 Falling back to direct download...');
|
|
1408
|
+
}
|
|
1409
|
+
|
|
1410
|
+
// Fall back to direct DMG download
|
|
1411
|
+
try {
|
|
1412
|
+
this.logger.log('📥 Downloading and installing Ollama DMG...');
|
|
1413
|
+
return await this.installOllamaViaDMG();
|
|
1414
|
+
} catch (dmgError) {
|
|
1415
|
+
// If all methods fail, provide manual instructions
|
|
1416
|
+
this.logger.log('⚠ Automatic installation failed. Opening browser for manual installation...');
|
|
1417
|
+
const { spawn } = require('child_process');
|
|
1418
|
+
spawn('open', ['https://ollama.com/download']);
|
|
1419
|
+
|
|
1420
|
+
return {
|
|
1421
|
+
success: false,
|
|
1422
|
+
error: 'Automatic installation failed. Please download and install Ollama from https://ollama.com/download\n' +
|
|
1423
|
+
'After installation, run this command again.',
|
|
1424
|
+
needsManualInstall: true
|
|
1425
|
+
};
|
|
1426
|
+
}
|
|
1427
|
+
} else if (platform === 'linux') {
|
|
1428
|
+
// Linux - use install script
|
|
1429
|
+
this.logger.log('Downloading Ollama installer...');
|
|
1430
|
+
const installScript = 'curl -fsSL https://ollama.com/install.sh | sh';
|
|
1431
|
+
|
|
1432
|
+
execSync(installScript, {
|
|
1433
|
+
stdio: 'inherit',
|
|
1434
|
+
timeout: 300000 // 5 minutes
|
|
1435
|
+
});
|
|
1436
|
+
|
|
1437
|
+
return { success: true };
|
|
1438
|
+
} else if (platform === 'win32') {
|
|
1439
|
+
// Windows - need to download installer
|
|
1440
|
+
this.logger.log('Ollama installation on Windows requires manual download.');
|
|
1441
|
+
this.logger.log('Opening browser to https://ollama.com/download');
|
|
1442
|
+
|
|
1443
|
+
// Open browser to download page
|
|
1444
|
+
const { exec } = require('child_process');
|
|
1445
|
+
exec('start https://ollama.com/download');
|
|
1446
|
+
|
|
1447
|
+
return {
|
|
1448
|
+
success: false,
|
|
1449
|
+
error: 'Please download and install Ollama from https://ollama.com/download\n' +
|
|
1450
|
+
'After installation, run this command again.',
|
|
1451
|
+
needsManualInstall: true
|
|
1452
|
+
};
|
|
1453
|
+
} else {
|
|
1454
|
+
return {
|
|
1455
|
+
success: false,
|
|
1456
|
+
error: `Unsupported platform: ${platform}`
|
|
1457
|
+
};
|
|
1458
|
+
}
|
|
1459
|
+
} catch (error) {
|
|
1460
|
+
return {
|
|
1461
|
+
success: false,
|
|
1462
|
+
error: error.message
|
|
1463
|
+
};
|
|
1464
|
+
}
|
|
1465
|
+
}
|
|
1466
|
+
|
|
1467
|
+
/**
|
|
1468
|
+
* Configure Cline CLI with Ollama (local)
|
|
1469
|
+
* @param {string} modelName - Model name (e.g., 'deepseek-coder:33b')
|
|
1470
|
+
*/
|
|
1471
|
+
async configureWithOllama(modelName = 'deepseek-coder:33b') {
|
|
1472
|
+
try {
|
|
1473
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1474
|
+
|
|
1475
|
+
// Read existing config or create new one
|
|
1476
|
+
let config = {};
|
|
1477
|
+
if (fs.existsSync(configPath)) {
|
|
1478
|
+
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1479
|
+
}
|
|
1480
|
+
|
|
1481
|
+
// Ensure globalState exists
|
|
1482
|
+
config.globalState = config.globalState || {};
|
|
1483
|
+
|
|
1484
|
+
// Configure for Ollama (OpenAI-compatible API)
|
|
1485
|
+
config.globalState.apiProvider = 'openai-native';
|
|
1486
|
+
// Set BOTH apiModelId and openAiModelId - Cline CLI may check either
|
|
1487
|
+
config.globalState.apiModelId = modelName;
|
|
1488
|
+
config.globalState.openAiModelId = modelName;
|
|
1489
|
+
config.globalState.openAiBaseUrl = 'http://localhost:11434/v1';
|
|
1490
|
+
config.globalState.openAiApiKey = 'sk-ollama-placeholder-key-for-local-api'; // Ollama doesn't need a key, but Cline CLI requires one in sk- format
|
|
1491
|
+
|
|
1492
|
+
// Remove any leftover Gemini or other provider configs that might interfere
|
|
1493
|
+
delete config.globalState.geminiApiKey;
|
|
1494
|
+
delete config.globalState.openRouterApiKey;
|
|
1495
|
+
delete config.globalState.anthropicApiKey;
|
|
1496
|
+
|
|
1497
|
+
// Enable auto-approval for autonomous operation
|
|
1498
|
+
config.globalState.autoApprovalSettings = {
|
|
1499
|
+
enabled: true,
|
|
1500
|
+
actions: {
|
|
1501
|
+
readFiles: true,
|
|
1502
|
+
editFiles: true,
|
|
1503
|
+
executeSafeCommands: true,
|
|
1504
|
+
useMcp: true
|
|
1505
|
+
},
|
|
1506
|
+
maxRequests: 1000
|
|
1507
|
+
};
|
|
1508
|
+
|
|
1509
|
+
// Ensure settings exists
|
|
1510
|
+
config.settings = config.settings || {};
|
|
1511
|
+
config.settings['cline.enableCheckpoints'] = false;
|
|
1512
|
+
|
|
1513
|
+
// Write updated config
|
|
1514
|
+
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1515
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1516
|
+
|
|
1517
|
+
// Verify the config was written correctly
|
|
1518
|
+
const verifyConfig = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1519
|
+
const actualModelId = verifyConfig.globalState?.openAiModelId || verifyConfig.globalState?.apiModelId;
|
|
1520
|
+
|
|
1521
|
+
this.logger.log('Cline CLI configured with Ollama');
|
|
1522
|
+
this.logger.log(` Provider: Ollama (Local)`);
|
|
1523
|
+
this.logger.log(` Model: ${actualModelId || modelName}`);
|
|
1524
|
+
this.logger.log(` Endpoint: http://localhost:11434/v1`);
|
|
1525
|
+
|
|
1526
|
+
if (actualModelId !== modelName) {
|
|
1527
|
+
this.logger.warn(` ⚠ Warning: Config shows model as ${actualModelId}, expected ${modelName}`);
|
|
1528
|
+
}
|
|
1529
|
+
|
|
1530
|
+
return { success: true, configPath };
|
|
1531
|
+
} catch (error) {
|
|
1532
|
+
return {
|
|
1533
|
+
success: false,
|
|
1534
|
+
error: error.message
|
|
1535
|
+
};
|
|
1536
|
+
}
|
|
1537
|
+
}
|
|
1538
|
+
|
|
1539
|
+
/**
|
|
1540
|
+
* Initialize Cline CLI configuration
|
|
1541
|
+
*/
|
|
1542
|
+
async init() {
|
|
1543
|
+
try {
|
|
1544
|
+
execSync('cline-cli init', {
|
|
1545
|
+
stdio: 'inherit',
|
|
1546
|
+
timeout: 30000
|
|
1547
|
+
});
|
|
1548
|
+
return { success: true };
|
|
1549
|
+
} catch (error) {
|
|
1550
|
+
return {
|
|
1551
|
+
success: false,
|
|
1552
|
+
error: error.message
|
|
1553
|
+
};
|
|
1554
|
+
}
|
|
1555
|
+
}
|
|
1556
|
+
|
|
1557
|
+
/**
|
|
1558
|
+
* Configure Cline CLI with Anthropic API
|
|
1559
|
+
* @param {string} apiKey - Anthropic API key
|
|
1560
|
+
*/
|
|
1561
|
+
async configureWithAnthropic(apiKey) {
|
|
1562
|
+
try {
|
|
1563
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1564
|
+
|
|
1565
|
+
// Read existing config or create new one
|
|
1566
|
+
let config = {};
|
|
1567
|
+
if (fs.existsSync(configPath)) {
|
|
1568
|
+
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1569
|
+
}
|
|
1570
|
+
|
|
1571
|
+
// Ensure globalState exists
|
|
1572
|
+
config.globalState = config.globalState || {};
|
|
1573
|
+
|
|
1574
|
+
// Remove invalid key marker if it exists (key is being reconfigured)
|
|
1575
|
+
const invalidMarker = path.join(os.homedir(), '.allnightai', '.anthropic-key-invalid');
|
|
1576
|
+
if (fs.existsSync(invalidMarker)) {
|
|
1577
|
+
fs.unlinkSync(invalidMarker);
|
|
1578
|
+
}
|
|
1579
|
+
|
|
1580
|
+
// Configure for Anthropic API (direct)
|
|
1581
|
+
config.globalState.apiProvider = 'anthropic';
|
|
1582
|
+
config.globalState.apiModelId = 'claude-3-5-sonnet-20241022';
|
|
1583
|
+
config.globalState.anthropicApiKey = apiKey;
|
|
1584
|
+
|
|
1585
|
+
// Enable auto-approval for autonomous operation
|
|
1586
|
+
config.globalState.autoApprovalSettings = {
|
|
1587
|
+
enabled: true,
|
|
1588
|
+
actions: {
|
|
1589
|
+
readFiles: true,
|
|
1590
|
+
editFiles: true,
|
|
1591
|
+
executeSafeCommands: true,
|
|
1592
|
+
useMcp: true
|
|
1593
|
+
},
|
|
1594
|
+
maxRequests: 1000
|
|
1595
|
+
};
|
|
1596
|
+
|
|
1597
|
+
// Ensure settings exists
|
|
1598
|
+
config.settings = config.settings || {};
|
|
1599
|
+
config.settings['cline.enableCheckpoints'] = false;
|
|
1600
|
+
|
|
1601
|
+
// Write updated config
|
|
1602
|
+
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1603
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1604
|
+
|
|
1605
|
+
this.logger.log('Cline CLI configured with Anthropic API');
|
|
1606
|
+
this.logger.log(` Provider: Anthropic (direct)`);
|
|
1607
|
+
this.logger.log(` Model: claude-3-5-sonnet-20241022`);
|
|
1608
|
+
this.logger.log(` API Key: ${apiKey.substring(0, 20)}...`);
|
|
1609
|
+
|
|
1610
|
+
return { success: true, configPath };
|
|
1611
|
+
} catch (error) {
|
|
1612
|
+
return {
|
|
1613
|
+
success: false,
|
|
1614
|
+
error: error.message
|
|
1615
|
+
};
|
|
1616
|
+
}
|
|
1617
|
+
}
|
|
1618
|
+
|
|
1619
|
+
/**
|
|
1620
|
+
* Configure Cline CLI with OpenRouter API
|
|
1621
|
+
* @param {string} apiKey - OpenRouter API key
|
|
1622
|
+
* @param {string} modelId - Model ID (optional, defaults to meta-llama/llama-3.3-70b-instruct:free)
|
|
1623
|
+
*/
|
|
1624
|
+
async configureWithOpenRouter(apiKey, modelId = 'meta-llama/llama-3.3-70b-instruct:free') {
|
|
1625
|
+
try {
|
|
1626
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1627
|
+
|
|
1628
|
+
// Read existing config or create new one
|
|
1629
|
+
let config = {};
|
|
1630
|
+
if (fs.existsSync(configPath)) {
|
|
1631
|
+
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1632
|
+
}
|
|
1633
|
+
|
|
1634
|
+
// Ensure globalState exists
|
|
1635
|
+
config.globalState = config.globalState || {};
|
|
1636
|
+
|
|
1637
|
+
// Remove invalid key marker if it exists (key is being reconfigured)
|
|
1638
|
+
const invalidMarker = path.join(os.homedir(), '.allnightai', '.openrouter-key-invalid');
|
|
1639
|
+
if (fs.existsSync(invalidMarker)) {
|
|
1640
|
+
fs.unlinkSync(invalidMarker);
|
|
1641
|
+
}
|
|
1642
|
+
|
|
1643
|
+
// Configure for OpenRouter API
|
|
1644
|
+
config.globalState.apiProvider = 'openrouter';
|
|
1645
|
+
config.globalState.apiModelId = modelId; // Cline CLI checks this field
|
|
1646
|
+
config.globalState.openRouterModelId = modelId;
|
|
1647
|
+
config.globalState.openRouterApiKey = apiKey;
|
|
1648
|
+
|
|
1649
|
+
// OpenRouter uses OpenAI-compatible API, so we need to set these as well
|
|
1650
|
+
config.globalState.openAiBaseUrl = 'https://openrouter.ai/api/v1';
|
|
1651
|
+
config.globalState.openAiModelId = modelId;
|
|
1652
|
+
|
|
1653
|
+
// Enable auto-approval for autonomous operation
|
|
1654
|
+
config.globalState.autoApprovalSettings = {
|
|
1655
|
+
enabled: true,
|
|
1656
|
+
actions: {
|
|
1657
|
+
readFiles: true,
|
|
1658
|
+
editFiles: true,
|
|
1659
|
+
executeSafeCommands: true,
|
|
1660
|
+
useMcp: true
|
|
1661
|
+
},
|
|
1662
|
+
maxRequests: 1000
|
|
1663
|
+
};
|
|
1664
|
+
|
|
1665
|
+
// Ensure settings exists
|
|
1666
|
+
config.settings = config.settings || {};
|
|
1667
|
+
config.settings['cline.enableCheckpoints'] = false;
|
|
1668
|
+
|
|
1669
|
+
// Write updated config
|
|
1670
|
+
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1671
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1672
|
+
|
|
1673
|
+
this.logger.log('Cline CLI configured with OpenRouter API');
|
|
1674
|
+
this.logger.log(` Provider: OpenRouter`);
|
|
1675
|
+
this.logger.log(` Model: ${modelId}`);
|
|
1676
|
+
this.logger.log(` API Key: ${apiKey.substring(0, 20)}...`);
|
|
1677
|
+
|
|
1678
|
+
return { success: true, configPath };
|
|
1679
|
+
} catch (error) {
|
|
1680
|
+
return {
|
|
1681
|
+
success: false,
|
|
1682
|
+
error: error.message
|
|
1683
|
+
};
|
|
1684
|
+
}
|
|
1685
|
+
}
|
|
1686
|
+
|
|
1687
|
+
/**
|
|
1688
|
+
* Configure Cline CLI with Google Gemini API
|
|
1689
|
+
* @param {string} apiKey - Google Gemini API key
|
|
1690
|
+
* @param {string} modelId - Model ID (optional, defaults to gemini-2.0-flash-exp)
|
|
1691
|
+
*/
|
|
1692
|
+
async configureWithGemini(apiKey, modelId = 'gemini-2.0-flash-exp') {
|
|
1693
|
+
try {
|
|
1694
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1695
|
+
|
|
1696
|
+
// Read existing config or create new one
|
|
1697
|
+
let config = {};
|
|
1698
|
+
if (fs.existsSync(configPath)) {
|
|
1699
|
+
config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1700
|
+
}
|
|
1701
|
+
|
|
1702
|
+
// Ensure globalState exists
|
|
1703
|
+
config.globalState = config.globalState || {};
|
|
1704
|
+
|
|
1705
|
+
// Remove invalid key marker if it exists (key is being reconfigured)
|
|
1706
|
+
const invalidMarker = path.join(os.homedir(), '.allnightai', '.gemini-key-invalid');
|
|
1707
|
+
if (fs.existsSync(invalidMarker)) {
|
|
1708
|
+
fs.unlinkSync(invalidMarker);
|
|
1709
|
+
}
|
|
1710
|
+
|
|
1711
|
+
// Configure for Google Gemini API using OpenAI-compatible endpoint
|
|
1712
|
+
// Cline CLI uses OpenAI-compatible format, not a native 'gemini' provider
|
|
1713
|
+
config.globalState.apiProvider = 'openai-native';
|
|
1714
|
+
config.globalState.openAiBaseUrl = 'https://generativelanguage.googleapis.com/v1beta/openai';
|
|
1715
|
+
config.globalState.openAiModelId = modelId;
|
|
1716
|
+
config.globalState.openAiApiKey = apiKey;
|
|
1717
|
+
|
|
1718
|
+
// Enable auto-approval for autonomous operation
|
|
1719
|
+
config.globalState.autoApprovalSettings = {
|
|
1720
|
+
enabled: true,
|
|
1721
|
+
actions: {
|
|
1722
|
+
readFiles: true,
|
|
1723
|
+
editFiles: true,
|
|
1724
|
+
executeSafeCommands: true,
|
|
1725
|
+
useMcp: true
|
|
1726
|
+
},
|
|
1727
|
+
maxRequests: 1000
|
|
1728
|
+
};
|
|
1729
|
+
|
|
1730
|
+
// Ensure settings exists
|
|
1731
|
+
config.settings = config.settings || {};
|
|
1732
|
+
config.settings['cline.enableCheckpoints'] = false;
|
|
1733
|
+
|
|
1734
|
+
// Write updated config
|
|
1735
|
+
fs.mkdirSync(path.dirname(configPath), { recursive: true });
|
|
1736
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1737
|
+
|
|
1738
|
+
this.logger.log('Cline CLI configured with Google Gemini API (OpenAI-compatible)');
|
|
1739
|
+
this.logger.log(` Provider: Google Gemini`);
|
|
1740
|
+
this.logger.log(` Base URL: https://generativelanguage.googleapis.com/v1beta/openai`);
|
|
1741
|
+
this.logger.log(` Model: ${modelId}`);
|
|
1742
|
+
this.logger.log(` API Key: ${apiKey.substring(0, 20)}...`);
|
|
1743
|
+
|
|
1744
|
+
return { success: true, configPath };
|
|
1745
|
+
} catch (error) {
|
|
1746
|
+
return {
|
|
1747
|
+
success: false,
|
|
1748
|
+
error: error.message
|
|
1749
|
+
};
|
|
1750
|
+
}
|
|
1751
|
+
}
|
|
1752
|
+
|
|
1753
|
+
/**
|
|
1754
|
+
* Install Cline CLI via npm
|
|
1755
|
+
*/
|
|
1756
|
+
async install() {
|
|
1757
|
+
this.logger.log('Installing Cline CLI via npm...');
|
|
1758
|
+
|
|
1759
|
+
try {
|
|
1760
|
+
// Install globally
|
|
1761
|
+
execSync('npm install -g @yaegaki/cline-cli', {
|
|
1762
|
+
stdio: 'inherit',
|
|
1763
|
+
timeout: 120000 // 2 minute timeout
|
|
1764
|
+
});
|
|
1765
|
+
|
|
1766
|
+
this.logger.log('Cline CLI installed successfully');
|
|
1767
|
+
return { success: true };
|
|
1768
|
+
} catch (error) {
|
|
1769
|
+
this.logger.error('Failed to install Cline CLI:', error.message);
|
|
1770
|
+
return {
|
|
1771
|
+
success: false,
|
|
1772
|
+
error: error.message
|
|
1773
|
+
};
|
|
1774
|
+
}
|
|
1775
|
+
}
|
|
1776
|
+
|
|
1777
|
+
/**
|
|
1778
|
+
* Send text to Cline CLI and execute
|
|
1779
|
+
* @param {string} text - The instruction text to send
|
|
1780
|
+
* @param {string} cwd - Working directory (defaults to current)
|
|
1781
|
+
* @param {boolean} fullAuto - Run in fully automated mode (default: true)
|
|
1782
|
+
* @returns {Promise<Object>} Result with success, output, and error
|
|
1783
|
+
*/
|
|
1784
|
+
async sendText(text, cwd = process.cwd(), fullAuto = true) {
|
|
1785
|
+
if (!this.isInstalled()) {
|
|
1786
|
+
return {
|
|
1787
|
+
success: false,
|
|
1788
|
+
error: 'Cline CLI is not installed. Run install() first.',
|
|
1789
|
+
needsInstall: true
|
|
1790
|
+
};
|
|
1791
|
+
}
|
|
1792
|
+
|
|
1793
|
+
try {
|
|
1794
|
+
const args = ['task', text, '--workspace', cwd];
|
|
1795
|
+
if (fullAuto) {
|
|
1796
|
+
args.push('--full-auto'); // Auto-approve changes
|
|
1797
|
+
}
|
|
1798
|
+
|
|
1799
|
+
this.logger.log(`Executing: cline-cli task "${text}" --workspace ${cwd} ${fullAuto ? '--full-auto' : ''}`);
|
|
1800
|
+
this.logger.log(`Working directory: ${cwd}`);
|
|
1801
|
+
|
|
1802
|
+
// Set up environment with API key based on configured provider
|
|
1803
|
+
const env = { ...process.env };
|
|
1804
|
+
|
|
1805
|
+
// Check which provider is configured and ensure API key is set
|
|
1806
|
+
try {
|
|
1807
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1808
|
+
if (fs.existsSync(configPath)) {
|
|
1809
|
+
let config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1810
|
+
const apiProvider = config.globalState?.apiProvider;
|
|
1811
|
+
|
|
1812
|
+
if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
1813
|
+
// Ollama - uses openai-native provider with local endpoint
|
|
1814
|
+
// Cline CLI needs OPENAI_API_KEY env var even for Ollama (it validates the format)
|
|
1815
|
+
env.OPENAI_API_KEY = 'sk-ollama-placeholder-key-for-local-api';
|
|
1816
|
+
env.OPENAI_BASE_URL = 'http://localhost:11434/v1';
|
|
1817
|
+
} else if (apiProvider === 'openrouter') {
|
|
1818
|
+
const apiKey = config.globalState?.openRouterApiKey || this.getSavedOpenRouterKey() || process.env.OPENROUTER_API_KEY;
|
|
1819
|
+
if (apiKey) {
|
|
1820
|
+
env.OPENROUTER_API_KEY = apiKey;
|
|
1821
|
+
env.OPENAI_API_KEY = apiKey; // OpenRouter uses OpenAI-compatible API
|
|
1822
|
+
}
|
|
1823
|
+
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl?.includes('generativelanguage.googleapis.com')) {
|
|
1824
|
+
const apiKey = config.globalState?.openAiApiKey || this.getSavedGeminiKey() || process.env.GEMINI_API_KEY;
|
|
1825
|
+
if (apiKey) {
|
|
1826
|
+
env.OPENAI_API_KEY = apiKey;
|
|
1827
|
+
}
|
|
1828
|
+
} else {
|
|
1829
|
+
// Default to Anthropic
|
|
1830
|
+
const apiKey = config.globalState?.anthropicApiKey || this.getSavedAnthropicKey() || process.env.ANTHROPIC_API_KEY;
|
|
1831
|
+
if (apiKey) {
|
|
1832
|
+
env.ANTHROPIC_API_KEY = apiKey;
|
|
1833
|
+
}
|
|
1834
|
+
}
|
|
1835
|
+
}
|
|
1836
|
+
} catch (error) {
|
|
1837
|
+
this.logger.warn('Warning: Failed to load API key from config:', error.message);
|
|
1838
|
+
}
|
|
1839
|
+
|
|
1840
|
+
return new Promise((resolve) => {
|
|
1841
|
+
const proc = spawn('cline-cli', args, {
|
|
1842
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
1843
|
+
env: env,
|
|
1844
|
+
cwd: cwd
|
|
1845
|
+
// Do NOT use shell: true - it causes escaping issues with special characters
|
|
1846
|
+
});
|
|
1847
|
+
|
|
1848
|
+
let stdout = '';
|
|
1849
|
+
let stderr = '';
|
|
1850
|
+
|
|
1851
|
+
proc.stdout.on('data', (data) => {
|
|
1852
|
+
const chunk = data.toString();
|
|
1853
|
+
stdout += chunk;
|
|
1854
|
+
this.logger.log('[Cline]', chunk);
|
|
1855
|
+
});
|
|
1856
|
+
|
|
1857
|
+
proc.stderr.on('data', (data) => {
|
|
1858
|
+
const chunk = data.toString();
|
|
1859
|
+
stderr += chunk;
|
|
1860
|
+
this.logger.log('[Cline Error]', chunk);
|
|
1861
|
+
});
|
|
1862
|
+
|
|
1863
|
+
proc.on('close', (code) => {
|
|
1864
|
+
if (code === 0) {
|
|
1865
|
+
resolve({
|
|
1866
|
+
success: true,
|
|
1867
|
+
output: stdout,
|
|
1868
|
+
stderr: stderr,
|
|
1869
|
+
exitCode: code
|
|
1870
|
+
});
|
|
1871
|
+
} else {
|
|
1872
|
+
resolve({
|
|
1873
|
+
success: false,
|
|
1874
|
+
output: stdout,
|
|
1875
|
+
error: stderr || `Process exited with code ${code}`,
|
|
1876
|
+
exitCode: code
|
|
1877
|
+
});
|
|
1878
|
+
}
|
|
1879
|
+
});
|
|
1880
|
+
|
|
1881
|
+
proc.on('error', (error) => {
|
|
1882
|
+
resolve({
|
|
1883
|
+
success: false,
|
|
1884
|
+
error: error.message,
|
|
1885
|
+
exitCode: -1
|
|
1886
|
+
});
|
|
1887
|
+
});
|
|
1888
|
+
});
|
|
1889
|
+
} catch (error) {
|
|
1890
|
+
return {
|
|
1891
|
+
success: false,
|
|
1892
|
+
error: error.message
|
|
1893
|
+
};
|
|
1894
|
+
}
|
|
1895
|
+
}
|
|
1896
|
+
|
|
1897
|
+
/**
|
|
1898
|
+
* Run Cline CLI in background and return process
|
|
1899
|
+
* @param {string} text - The instruction text
|
|
1900
|
+
* @param {string} cwd - Working directory
|
|
1901
|
+
* @param {Function} onOutput - Callback for stdout chunks
|
|
1902
|
+
* @param {Function} onError - Callback for stderr chunks
|
|
1903
|
+
* @returns {ChildProcess} The spawned process
|
|
1904
|
+
*/
|
|
1905
|
+
runInBackground(text, cwd = process.cwd(), onOutput, onError) {
|
|
1906
|
+
// Use Cline CLI with --full-auto for actual code execution
|
|
1907
|
+
// The direct Ollama API approach doesn't execute code, only streams text
|
|
1908
|
+
// Cline CLI with proper Ollama config should work now
|
|
1909
|
+
const args = ['task', text, '--workspace', cwd, '--full-auto'];
|
|
1910
|
+
|
|
1911
|
+
// Set up environment with API key based on configured provider
|
|
1912
|
+
const env = { ...process.env };
|
|
1913
|
+
|
|
1914
|
+
// Check which provider is configured and ensure API key is in config file
|
|
1915
|
+
try {
|
|
1916
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
1917
|
+
if (fs.existsSync(configPath)) {
|
|
1918
|
+
let config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
1919
|
+
const apiProvider = config.globalState?.apiProvider;
|
|
1920
|
+
|
|
1921
|
+
// Log current configuration for debugging
|
|
1922
|
+
if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
1923
|
+
const modelId = config.globalState?.openAiModelId || config.globalState?.apiModelId;
|
|
1924
|
+
this.logger.log(`✓ Cline CLI config: Provider=${apiProvider}, Model=${modelId}, BaseURL=${config.globalState?.openAiBaseUrl}`);
|
|
1925
|
+
}
|
|
1926
|
+
|
|
1927
|
+
if (apiProvider === 'openrouter') {
|
|
1928
|
+
// Ensure key is in config file (sync from saved file if needed)
|
|
1929
|
+
if (!config.globalState?.openRouterApiKey) {
|
|
1930
|
+
const savedKey = this.getSavedOpenRouterKey();
|
|
1931
|
+
if (savedKey && savedKey.trim().length > 0) {
|
|
1932
|
+
config.globalState = config.globalState || {};
|
|
1933
|
+
config.globalState.openRouterApiKey = savedKey;
|
|
1934
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1935
|
+
this.logger.log('✓ Synced OpenRouter API key to Cline CLI config');
|
|
1936
|
+
}
|
|
1937
|
+
}
|
|
1938
|
+
|
|
1939
|
+
// Set environment variables - OpenRouter uses OpenAI-compatible API
|
|
1940
|
+
const apiKey = config.globalState?.openRouterApiKey || this.getSavedOpenRouterKey() || process.env.OPENROUTER_API_KEY;
|
|
1941
|
+
if (apiKey) {
|
|
1942
|
+
env.OPENROUTER_API_KEY = apiKey;
|
|
1943
|
+
env.OPENAI_API_KEY = apiKey; // OpenRouter uses OpenAI-compatible API
|
|
1944
|
+
} else {
|
|
1945
|
+
this.logger.warn('Warning: OPENROUTER_API_KEY not set');
|
|
1946
|
+
}
|
|
1947
|
+
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl?.includes('generativelanguage.googleapis.com')) {
|
|
1948
|
+
// Gemini via OpenAI-compatible endpoint
|
|
1949
|
+
// Ensure key is in config file (sync from saved file if needed)
|
|
1950
|
+
if (!config.globalState?.openAiApiKey) {
|
|
1951
|
+
const savedKey = this.getSavedGeminiKey();
|
|
1952
|
+
if (savedKey && savedKey.trim().length > 0) {
|
|
1953
|
+
config.globalState = config.globalState || {};
|
|
1954
|
+
config.globalState.openAiApiKey = savedKey;
|
|
1955
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1956
|
+
this.logger.log('✓ Synced Gemini API key to Cline CLI config');
|
|
1957
|
+
}
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
// Set OPENAI_API_KEY environment variable - Cline CLI with openai-native provider expects this
|
|
1961
|
+
const apiKey = config.globalState?.openAiApiKey || this.getSavedGeminiKey() || process.env.GEMINI_API_KEY;
|
|
1962
|
+
if (apiKey) {
|
|
1963
|
+
env.OPENAI_API_KEY = apiKey; // CRITICAL: Cline CLI needs this for openai-native provider
|
|
1964
|
+
// Also ensure it's definitely in the config file
|
|
1965
|
+
if (!config.globalState?.openAiApiKey) {
|
|
1966
|
+
config.globalState.openAiApiKey = apiKey;
|
|
1967
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
1968
|
+
this.logger.log('✓ Synced Gemini API key to Cline CLI config');
|
|
1969
|
+
}
|
|
1970
|
+
} else {
|
|
1971
|
+
this.logger.warn('Warning: Gemini API key not set');
|
|
1972
|
+
}
|
|
1973
|
+
} else if (apiProvider === 'openai-native' && config.globalState?.openAiBaseUrl === 'http://localhost:11434/v1') {
|
|
1974
|
+
// Ollama - uses openai-native provider with local endpoint
|
|
1975
|
+
// Cline CLI needs OPENAI_API_KEY env var even for Ollama (it validates the format)
|
|
1976
|
+
// Also ensure OPENAI_BASE_URL is set for Ollama
|
|
1977
|
+
env.OPENAI_API_KEY = 'sk-ollama-placeholder-key-for-local-api';
|
|
1978
|
+
env.OPENAI_BASE_URL = 'http://localhost:11434/v1';
|
|
1979
|
+
|
|
1980
|
+
this.logger.log('✓ Set OPENAI_API_KEY environment variable for Ollama');
|
|
1981
|
+
|
|
1982
|
+
// Verify Ollama service is running and model is accessible
|
|
1983
|
+
this.logger.log('✓ Verifying Ollama service is running...');
|
|
1984
|
+
const { execSync } = require('child_process');
|
|
1985
|
+
try {
|
|
1986
|
+
// Test basic connectivity
|
|
1987
|
+
execSync('curl -s http://localhost:11434/api/tags > /dev/null 2>&1', { timeout: 2000 });
|
|
1988
|
+
this.logger.log('✓ Ollama service is running');
|
|
1989
|
+
|
|
1990
|
+
// Test if the configured model is accessible
|
|
1991
|
+
const modelId = config.globalState?.openAiModelId || config.globalState?.apiModelId;
|
|
1992
|
+
if (modelId) {
|
|
1993
|
+
try {
|
|
1994
|
+
// Test if model can be accessed via API
|
|
1995
|
+
const testResponse = execSync(`curl -s -X POST http://localhost:11434/api/generate -d '{"model":"${modelId}","prompt":"test","stream":false}' -H "Content-Type: application/json"`, { timeout: 3000 });
|
|
1996
|
+
this.logger.log(`✓ Model ${modelId} is accessible`);
|
|
1997
|
+
} catch (modelError) {
|
|
1998
|
+
this.logger.warn(`⚠ Model ${modelId} may not be accessible or may need to be pulled`);
|
|
1999
|
+
this.logger.warn(` Try: ollama pull ${modelId}`);
|
|
2000
|
+
}
|
|
2001
|
+
}
|
|
2002
|
+
} catch (error) {
|
|
2003
|
+
this.logger.warn('⚠ Ollama service may not be running - Cline CLI may fail');
|
|
2004
|
+
this.logger.warn(' Start it with: ollama serve');
|
|
2005
|
+
this.logger.warn(' Or launch Ollama.app from /Applications');
|
|
2006
|
+
}
|
|
2007
|
+
} else {
|
|
2008
|
+
// Default to Anthropic for backwards compatibility
|
|
2009
|
+
// Ensure key is in config file (sync from saved file if needed)
|
|
2010
|
+
if (!config.globalState?.anthropicApiKey) {
|
|
2011
|
+
const savedKey = this.getSavedAnthropicKey();
|
|
2012
|
+
if (savedKey && savedKey.trim().length > 0) {
|
|
2013
|
+
config.globalState = config.globalState || {};
|
|
2014
|
+
config.globalState.anthropicApiKey = savedKey;
|
|
2015
|
+
fs.writeFileSync(configPath, JSON.stringify(config, null, 4));
|
|
2016
|
+
this.logger.log('✓ Synced Anthropic API key to Cline CLI config');
|
|
2017
|
+
}
|
|
2018
|
+
}
|
|
2019
|
+
|
|
2020
|
+
// Set environment variable as fallback
|
|
2021
|
+
const apiKey = config.globalState?.anthropicApiKey || this.getSavedAnthropicKey() || process.env.ANTHROPIC_API_KEY;
|
|
2022
|
+
if (apiKey) {
|
|
2023
|
+
env.ANTHROPIC_API_KEY = apiKey;
|
|
2024
|
+
} else {
|
|
2025
|
+
this.logger.warn('Warning: ANTHROPIC_API_KEY not set');
|
|
2026
|
+
}
|
|
2027
|
+
}
|
|
2028
|
+
} else {
|
|
2029
|
+
// No config file, try Anthropic for backwards compatibility
|
|
2030
|
+
const apiKey = this.getSavedAnthropicKey();
|
|
2031
|
+
if (apiKey) {
|
|
2032
|
+
env.ANTHROPIC_API_KEY = apiKey;
|
|
2033
|
+
} else if (!env.ANTHROPIC_API_KEY) {
|
|
2034
|
+
this.logger.warn('Warning: ANTHROPIC_API_KEY not set');
|
|
2035
|
+
}
|
|
2036
|
+
}
|
|
2037
|
+
} catch (error) {
|
|
2038
|
+
this.logger.warn('Warning: Failed to load API key from config:', error.message);
|
|
2039
|
+
}
|
|
2040
|
+
|
|
2041
|
+
// Add error handler for spawn failures
|
|
2042
|
+
let proc;
|
|
2043
|
+
try {
|
|
2044
|
+
proc = spawn('cline-cli', args, {
|
|
2045
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
2046
|
+
env: env,
|
|
2047
|
+
cwd: cwd
|
|
2048
|
+
// DO NOT use shell: true - it causes escaping issues with special characters
|
|
2049
|
+
});
|
|
2050
|
+
|
|
2051
|
+
// Handle spawn errors (e.g., cline-cli not found)
|
|
2052
|
+
proc.on('error', (spawnError) => {
|
|
2053
|
+
if (onError) {
|
|
2054
|
+
onError(`Failed to spawn Cline CLI: ${spawnError.message}\n`);
|
|
2055
|
+
if (spawnError.code === 'ENOENT') {
|
|
2056
|
+
onError('Cline CLI is not installed or not in PATH. Install with: npm install -g @yaegaki/cline-cli\n');
|
|
2057
|
+
}
|
|
2058
|
+
}
|
|
2059
|
+
});
|
|
2060
|
+
|
|
2061
|
+
if (onOutput) {
|
|
2062
|
+
proc.stdout.on('data', (data) => {
|
|
2063
|
+
onOutput(data.toString());
|
|
2064
|
+
});
|
|
2065
|
+
}
|
|
2066
|
+
|
|
2067
|
+
if (onError) {
|
|
2068
|
+
proc.stderr.on('data', (data) => {
|
|
2069
|
+
onError(data.toString());
|
|
2070
|
+
});
|
|
2071
|
+
}
|
|
2072
|
+
} catch (spawnError) {
|
|
2073
|
+
// If spawn itself fails (shouldn't happen, but handle it)
|
|
2074
|
+
if (onError) {
|
|
2075
|
+
onError(`Failed to start Cline CLI: ${spawnError.message}\n`);
|
|
2076
|
+
}
|
|
2077
|
+
// Return a mock process that will fail immediately
|
|
2078
|
+
const { EventEmitter } = require('events');
|
|
2079
|
+
const mockProc = new EventEmitter();
|
|
2080
|
+
mockProc.pid = null;
|
|
2081
|
+
mockProc.kill = () => {};
|
|
2082
|
+
mockProc.on = () => {};
|
|
2083
|
+
mockProc.stdout = { on: () => {} };
|
|
2084
|
+
mockProc.stderr = { on: () => {} };
|
|
2085
|
+
setTimeout(() => {
|
|
2086
|
+
mockProc.emit('close', 1);
|
|
2087
|
+
}, 0);
|
|
2088
|
+
return mockProc;
|
|
2089
|
+
}
|
|
2090
|
+
|
|
2091
|
+
return proc;
|
|
2092
|
+
}
|
|
2093
|
+
|
|
2094
|
+
/**
|
|
2095
|
+
* Run Ollama directly via HTTP API (bypasses Cline CLI)
|
|
2096
|
+
* This is used when Cline CLI has issues with Ollama API key validation
|
|
2097
|
+
* @param {string} text - The instruction text
|
|
2098
|
+
* @param {string} cwd - Working directory
|
|
2099
|
+
* @param {Function} onOutput - Callback for response chunks
|
|
2100
|
+
* @param {Function} onError - Callback for errors
|
|
2101
|
+
* @returns {Object} Mock process object with kill() method
|
|
2102
|
+
*/
|
|
2103
|
+
async runOllamaDirectly(text, cwd = process.cwd(), onOutput, onError) {
|
|
2104
|
+
const https = require('https');
|
|
2105
|
+
const http = require('http');
|
|
2106
|
+
|
|
2107
|
+
// Get configured model from Cline CLI config
|
|
2108
|
+
const configPath = path.join(os.homedir(), '.cline_cli', 'cline_cli_settings.json');
|
|
2109
|
+
let modelId = 'deepseek-coder:33b'; // default
|
|
2110
|
+
|
|
2111
|
+
try {
|
|
2112
|
+
if (fs.existsSync(configPath)) {
|
|
2113
|
+
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
2114
|
+
modelId = config.globalState?.openAiModelId || config.globalState?.apiModelId || modelId;
|
|
2115
|
+
}
|
|
2116
|
+
} catch (error) {
|
|
2117
|
+
this.logger.warn('Could not read model from config, using default:', modelId);
|
|
2118
|
+
}
|
|
2119
|
+
|
|
2120
|
+
// Create system prompt that mimics Cline's behavior with tool capabilities
|
|
2121
|
+
const systemPrompt = `You are Cline, an autonomous AI coding assistant with FULL access to the filesystem and command execution.
|
|
2122
|
+
|
|
2123
|
+
CAPABILITIES - You CAN and MUST:
|
|
2124
|
+
✓ Read any file in the project
|
|
2125
|
+
✓ Write/edit files to implement features and fix bugs
|
|
2126
|
+
✓ Execute bash commands to test changes
|
|
2127
|
+
✓ Install packages and dependencies
|
|
2128
|
+
✓ Run tests and verify functionality
|
|
2129
|
+
✓ Take screenshots to verify UI changes
|
|
2130
|
+
|
|
2131
|
+
WORKING DIRECTORY: ${cwd}
|
|
2132
|
+
|
|
2133
|
+
TASK WORKFLOW - Follow ALL stages in order:
|
|
2134
|
+
1. PREPARE - Read relevant files, understand requirements, review code structure
|
|
2135
|
+
2. ACT - Implement the actual functionality with complete, working code
|
|
2136
|
+
3. CLEAN UP - Apply DRY principles, lint, remove temp files, fix any issues
|
|
2137
|
+
4. VERIFY - Test implementation, run automation, take screenshots to confirm it works
|
|
2138
|
+
5. DONE - Update status ONLY when everything is working perfectly
|
|
2139
|
+
|
|
2140
|
+
CRITICAL RULES:
|
|
2141
|
+
❌ NO placeholder code, NO "// rest of code" comments, NO incomplete implementations
|
|
2142
|
+
❌ NO saying "I don't have access to files" - YOU DO, just read/write them
|
|
2143
|
+
✓ ALWAYS provide COMPLETE, FUNCTIONAL code
|
|
2144
|
+
✓ ACTUALLY run commands and verify they work
|
|
2145
|
+
✓ UPDATE status in REQUIREMENTS file at EACH stage
|
|
2146
|
+
✓ NEVER mark DONE until you've VERIFIED everything works
|
|
2147
|
+
|
|
2148
|
+
RESPONSE FORMAT:
|
|
2149
|
+
To read a file: "Let me read <filename>..." then show the content
|
|
2150
|
+
To write a file: "I'll update <filename>..." then show the complete new content
|
|
2151
|
+
To run command: "Running: <command>..." then show the output
|
|
2152
|
+
To verify: "Testing the changes..." then show test results
|
|
2153
|
+
|
|
2154
|
+
CURRENT TASK: ${text}
|
|
2155
|
+
|
|
2156
|
+
Begin with PREPARE stage. Read necessary files and understand the requirements before coding.`;
|
|
2157
|
+
|
|
2158
|
+
const requestBody = JSON.stringify({
|
|
2159
|
+
model: modelId,
|
|
2160
|
+
messages: [
|
|
2161
|
+
{ role: 'system', content: systemPrompt },
|
|
2162
|
+
{ role: 'user', content: text }
|
|
2163
|
+
],
|
|
2164
|
+
stream: true,
|
|
2165
|
+
temperature: 0.7,
|
|
2166
|
+
max_tokens: 4000
|
|
2167
|
+
});
|
|
2168
|
+
|
|
2169
|
+
return new Promise((resolve, reject) => {
|
|
2170
|
+
let aborted = false;
|
|
2171
|
+
let response = '';
|
|
2172
|
+
|
|
2173
|
+
const options = {
|
|
2174
|
+
hostname: 'localhost',
|
|
2175
|
+
port: 11434,
|
|
2176
|
+
path: '/v1/chat/completions',
|
|
2177
|
+
method: 'POST',
|
|
2178
|
+
headers: {
|
|
2179
|
+
'Content-Type': 'application/json',
|
|
2180
|
+
'Content-Length': Buffer.byteLength(requestBody)
|
|
2181
|
+
}
|
|
2182
|
+
};
|
|
2183
|
+
|
|
2184
|
+
const req = http.request(options, (res) => {
|
|
2185
|
+
res.on('data', (chunk) => {
|
|
2186
|
+
if (aborted) return;
|
|
2187
|
+
|
|
2188
|
+
const lines = chunk.toString().split('\n').filter(line => line.trim());
|
|
2189
|
+
|
|
2190
|
+
for (const line of lines) {
|
|
2191
|
+
if (line.startsWith('data: ')) {
|
|
2192
|
+
const data = line.slice(6);
|
|
2193
|
+
if (data === '[DONE]') continue;
|
|
2194
|
+
|
|
2195
|
+
try {
|
|
2196
|
+
const parsed = JSON.parse(data);
|
|
2197
|
+
const content = parsed.choices?.[0]?.delta?.content;
|
|
2198
|
+
if (content && onOutput) {
|
|
2199
|
+
response += content;
|
|
2200
|
+
onOutput(content);
|
|
2201
|
+
}
|
|
2202
|
+
} catch (e) {
|
|
2203
|
+
// Ignore parse errors for streaming chunks
|
|
2204
|
+
}
|
|
2205
|
+
}
|
|
2206
|
+
}
|
|
2207
|
+
});
|
|
2208
|
+
|
|
2209
|
+
res.on('end', () => {
|
|
2210
|
+
if (!aborted) {
|
|
2211
|
+
resolve({
|
|
2212
|
+
killed: false,
|
|
2213
|
+
kill: () => { aborted = true; },
|
|
2214
|
+
on: () => {},
|
|
2215
|
+
stdout: { on: () => {} },
|
|
2216
|
+
stderr: { on: () => {} }
|
|
2217
|
+
});
|
|
2218
|
+
}
|
|
2219
|
+
});
|
|
2220
|
+
});
|
|
2221
|
+
|
|
2222
|
+
req.on('error', (error) => {
|
|
2223
|
+
if (onError) {
|
|
2224
|
+
onError(`Ollama API error: ${error.message}\n`);
|
|
2225
|
+
}
|
|
2226
|
+
reject(error);
|
|
2227
|
+
});
|
|
2228
|
+
|
|
2229
|
+
req.write(requestBody);
|
|
2230
|
+
req.end();
|
|
2231
|
+
|
|
2232
|
+
// Return mock process object immediately
|
|
2233
|
+
resolve({
|
|
2234
|
+
killed: false,
|
|
2235
|
+
kill: () => {
|
|
2236
|
+
aborted = true;
|
|
2237
|
+
req.destroy();
|
|
2238
|
+
},
|
|
2239
|
+
on: (event, handler) => {
|
|
2240
|
+
if (event === 'close') {
|
|
2241
|
+
// Call handler when request completes
|
|
2242
|
+
req.on('close', () => handler(0));
|
|
2243
|
+
}
|
|
2244
|
+
},
|
|
2245
|
+
stdout: { on: () => {} },
|
|
2246
|
+
stderr: { on: () => {} }
|
|
2247
|
+
});
|
|
2248
|
+
});
|
|
2249
|
+
}
|
|
2250
|
+
}
|
|
2251
|
+
|
|
2252
|
+
module.exports = { ClineCLIManager };
|