erosolar-cli 1.7.14 → 1.7.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/core/responseVerifier.d.ts +79 -0
- package/dist/core/responseVerifier.d.ts.map +1 -0
- package/dist/core/responseVerifier.js +443 -0
- package/dist/core/responseVerifier.js.map +1 -0
- package/dist/shell/interactiveShell.d.ts +10 -0
- package/dist/shell/interactiveShell.d.ts.map +1 -1
- package/dist/shell/interactiveShell.js +80 -0
- package/dist/shell/interactiveShell.js.map +1 -1
- package/dist/ui/ShellUIAdapter.d.ts +3 -0
- package/dist/ui/ShellUIAdapter.d.ts.map +1 -1
- package/dist/ui/ShellUIAdapter.js +4 -10
- package/dist/ui/ShellUIAdapter.js.map +1 -1
- package/dist/ui/persistentPrompt.d.ts +4 -0
- package/dist/ui/persistentPrompt.d.ts.map +1 -1
- package/dist/ui/persistentPrompt.js +10 -11
- package/dist/ui/persistentPrompt.js.map +1 -1
- package/package.json +1 -1
- package/dist/bin/core/agent.js +0 -362
- package/dist/bin/core/agentProfileManifest.js +0 -187
- package/dist/bin/core/agentProfiles.js +0 -34
- package/dist/bin/core/agentRulebook.js +0 -135
- package/dist/bin/core/agentSchemaLoader.js +0 -233
- package/dist/bin/core/contextManager.js +0 -412
- package/dist/bin/core/contextWindow.js +0 -122
- package/dist/bin/core/customCommands.js +0 -80
- package/dist/bin/core/errors/apiKeyErrors.js +0 -114
- package/dist/bin/core/errors/errorTypes.js +0 -340
- package/dist/bin/core/errors/safetyValidator.js +0 -304
- package/dist/bin/core/errors.js +0 -32
- package/dist/bin/core/modelDiscovery.js +0 -755
- package/dist/bin/core/preferences.js +0 -224
- package/dist/bin/core/schemaValidator.js +0 -92
- package/dist/bin/core/secretStore.js +0 -199
- package/dist/bin/core/sessionStore.js +0 -187
- package/dist/bin/core/toolRuntime.js +0 -290
- package/dist/bin/core/types.js +0 -1
- package/dist/bin/shell/bracketedPasteManager.js +0 -350
- package/dist/bin/shell/fileChangeTracker.js +0 -65
- package/dist/bin/shell/interactiveShell.js +0 -2908
- package/dist/bin/shell/liveStatus.js +0 -78
- package/dist/bin/shell/shellApp.js +0 -290
- package/dist/bin/shell/systemPrompt.js +0 -60
- package/dist/bin/shell/updateManager.js +0 -108
- package/dist/bin/ui/ShellUIAdapter.js +0 -459
- package/dist/bin/ui/UnifiedUIController.js +0 -183
- package/dist/bin/ui/animation/AnimationScheduler.js +0 -430
- package/dist/bin/ui/codeHighlighter.js +0 -854
- package/dist/bin/ui/designSystem.js +0 -121
- package/dist/bin/ui/display.js +0 -1222
- package/dist/bin/ui/interrupts/InterruptManager.js +0 -437
- package/dist/bin/ui/layout.js +0 -139
- package/dist/bin/ui/orchestration/StatusOrchestrator.js +0 -403
- package/dist/bin/ui/outputMode.js +0 -38
- package/dist/bin/ui/persistentPrompt.js +0 -183
- package/dist/bin/ui/richText.js +0 -338
- package/dist/bin/ui/shortcutsHelp.js +0 -87
- package/dist/bin/ui/telemetry/UITelemetry.js +0 -443
- package/dist/bin/ui/textHighlighter.js +0 -210
- package/dist/bin/ui/theme.js +0 -116
- package/dist/bin/ui/toolDisplay.js +0 -423
- package/dist/bin/ui/toolDisplayAdapter.js +0 -357
|
@@ -1,755 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Model discovery system for auto-detecting new models from providers.
|
|
3
|
-
*
|
|
4
|
-
* This module queries provider APIs to discover available models and caches
|
|
5
|
-
* them for use alongside the static model schema. It never modifies the
|
|
6
|
-
* static schema - discoveries are stored separately and merged at runtime.
|
|
7
|
-
*/
|
|
8
|
-
import { readFileSync, writeFileSync, existsSync } from 'node:fs';
|
|
9
|
-
import { mkdir } from 'node:fs/promises';
|
|
10
|
-
import { join } from 'node:path';
|
|
11
|
-
import { homedir } from 'node:os';
|
|
12
|
-
/**
|
|
13
|
-
* Discovered model cache file location
|
|
14
|
-
*/
|
|
15
|
-
const CACHE_DIR = join(homedir(), '.erosolar');
|
|
16
|
-
const CACHE_FILE = join(CACHE_DIR, 'discovered-models.json');
|
|
17
|
-
/**
|
|
18
|
-
* Cache expiration time (24 hours)
|
|
19
|
-
*/
|
|
20
|
-
const CACHE_EXPIRATION_MS = 24 * 60 * 60 * 1000;
|
|
21
|
-
/**
|
|
22
|
-
* Get cached discovered models
|
|
23
|
-
*/
|
|
24
|
-
export function getCachedDiscoveredModels() {
|
|
25
|
-
try {
|
|
26
|
-
if (!existsSync(CACHE_FILE)) {
|
|
27
|
-
return [];
|
|
28
|
-
}
|
|
29
|
-
const raw = readFileSync(CACHE_FILE, 'utf-8');
|
|
30
|
-
const cache = JSON.parse(raw);
|
|
31
|
-
// Check if cache is expired
|
|
32
|
-
const lastUpdated = new Date(cache.lastUpdated).getTime();
|
|
33
|
-
const now = Date.now();
|
|
34
|
-
if (now - lastUpdated > CACHE_EXPIRATION_MS) {
|
|
35
|
-
return [];
|
|
36
|
-
}
|
|
37
|
-
return cache.models;
|
|
38
|
-
}
|
|
39
|
-
catch (error) {
|
|
40
|
-
console.warn('Failed to read discovered models cache:', error);
|
|
41
|
-
return [];
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
/**
|
|
45
|
-
* Save discovered models to cache
|
|
46
|
-
*/
|
|
47
|
-
async function saveDiscoveredModels(models) {
|
|
48
|
-
try {
|
|
49
|
-
await mkdir(CACHE_DIR, { recursive: true });
|
|
50
|
-
const cache = {
|
|
51
|
-
version: '1.0.0',
|
|
52
|
-
lastUpdated: new Date().toISOString(),
|
|
53
|
-
models,
|
|
54
|
-
};
|
|
55
|
-
writeFileSync(CACHE_FILE, JSON.stringify(cache, null, 2), 'utf-8');
|
|
56
|
-
}
|
|
57
|
-
catch (error) {
|
|
58
|
-
console.warn('Failed to save discovered models cache:', error);
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
/**
|
|
62
|
-
* Discover models from OpenAI
|
|
63
|
-
*/
|
|
64
|
-
async function discoverOpenAIModels(apiKey) {
|
|
65
|
-
const provider = 'openai';
|
|
66
|
-
try {
|
|
67
|
-
const response = await fetch('https://api.openai.com/v1/models', {
|
|
68
|
-
headers: {
|
|
69
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
70
|
-
},
|
|
71
|
-
});
|
|
72
|
-
if (!response.ok) {
|
|
73
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
74
|
-
}
|
|
75
|
-
const data = await response.json();
|
|
76
|
-
// Filter for GPT models only and create ModelConfig objects
|
|
77
|
-
const models = data.data
|
|
78
|
-
.filter(model => model.id.startsWith('gpt-') ||
|
|
79
|
-
model.id.startsWith('o1-') ||
|
|
80
|
-
model.id.startsWith('o3-'))
|
|
81
|
-
.map(model => ({
|
|
82
|
-
id: model.id,
|
|
83
|
-
label: model.id,
|
|
84
|
-
provider,
|
|
85
|
-
description: `OpenAI ${model.id} (auto-discovered)`,
|
|
86
|
-
capabilities: ['chat', 'tools', 'streaming'],
|
|
87
|
-
}));
|
|
88
|
-
return {
|
|
89
|
-
provider,
|
|
90
|
-
success: true,
|
|
91
|
-
models,
|
|
92
|
-
};
|
|
93
|
-
}
|
|
94
|
-
catch (error) {
|
|
95
|
-
return {
|
|
96
|
-
provider,
|
|
97
|
-
success: false,
|
|
98
|
-
models: [],
|
|
99
|
-
error: error instanceof Error ? error.message : String(error),
|
|
100
|
-
};
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
/**
|
|
104
|
-
* Discover models from Anthropic
|
|
105
|
-
*/
|
|
106
|
-
async function discoverAnthropicModels(apiKey) {
|
|
107
|
-
const provider = 'anthropic';
|
|
108
|
-
try {
|
|
109
|
-
const response = await fetch('https://api.anthropic.com/v1/models', {
|
|
110
|
-
headers: {
|
|
111
|
-
'x-api-key': apiKey,
|
|
112
|
-
'anthropic-version': '2023-06-01',
|
|
113
|
-
},
|
|
114
|
-
});
|
|
115
|
-
if (!response.ok) {
|
|
116
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
117
|
-
}
|
|
118
|
-
const data = await response.json();
|
|
119
|
-
const models = data.data
|
|
120
|
-
.filter(model => model.type === 'model')
|
|
121
|
-
.map(model => ({
|
|
122
|
-
id: model.id,
|
|
123
|
-
label: model.display_name || model.id,
|
|
124
|
-
provider,
|
|
125
|
-
description: `Anthropic ${model.display_name || model.id} (auto-discovered)`,
|
|
126
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming'],
|
|
127
|
-
}));
|
|
128
|
-
return {
|
|
129
|
-
provider,
|
|
130
|
-
success: true,
|
|
131
|
-
models,
|
|
132
|
-
};
|
|
133
|
-
}
|
|
134
|
-
catch (error) {
|
|
135
|
-
return {
|
|
136
|
-
provider,
|
|
137
|
-
success: false,
|
|
138
|
-
models: [],
|
|
139
|
-
error: error instanceof Error ? error.message : String(error),
|
|
140
|
-
};
|
|
141
|
-
}
|
|
142
|
-
}
|
|
143
|
-
/**
|
|
144
|
-
* Discover models from Google Gemini
|
|
145
|
-
* Note: Google's models API often requires special permissions.
|
|
146
|
-
* Falls back to known models if API access fails.
|
|
147
|
-
*/
|
|
148
|
-
async function discoverGoogleModels(apiKey) {
|
|
149
|
-
const provider = 'google';
|
|
150
|
-
// Known Google Gemini models (fallback if API doesn't work) - Updated Nov 2025
|
|
151
|
-
const knownModels = [
|
|
152
|
-
{ id: 'gemini-3.0-pro-preview', label: 'Gemini 3.0 Pro', provider, description: 'Latest Gemini 3.0 Pro', capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'] },
|
|
153
|
-
{ id: 'gemini-3.0-flash-preview', label: 'Gemini 3.0 Flash', provider, description: 'Latest Gemini 3.0 Flash', capabilities: ['chat', 'tools', 'streaming', 'multimodal'] },
|
|
154
|
-
{ id: 'gemini-2.5-pro-preview', label: 'Gemini 2.5 Pro', provider, description: 'Gemini 2.5 Pro', capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'] },
|
|
155
|
-
{ id: 'gemini-2.5-flash-preview', label: 'Gemini 2.5 Flash', provider, description: 'Gemini 2.5 Flash', capabilities: ['chat', 'tools', 'streaming', 'multimodal'] },
|
|
156
|
-
];
|
|
157
|
-
try {
|
|
158
|
-
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`, {
|
|
159
|
-
signal: AbortSignal.timeout(5000),
|
|
160
|
-
});
|
|
161
|
-
if (!response.ok) {
|
|
162
|
-
// API access restricted - return known models with warning
|
|
163
|
-
return {
|
|
164
|
-
provider,
|
|
165
|
-
success: true, // Consider it success with fallback
|
|
166
|
-
models: knownModels,
|
|
167
|
-
};
|
|
168
|
-
}
|
|
169
|
-
const data = await response.json();
|
|
170
|
-
const models = data.models
|
|
171
|
-
.filter(model => model.name.includes('gemini') &&
|
|
172
|
-
model.supportedGenerationMethods?.includes('generateContent'))
|
|
173
|
-
.map(model => {
|
|
174
|
-
const id = model.name.replace('models/', '');
|
|
175
|
-
return {
|
|
176
|
-
id,
|
|
177
|
-
label: id,
|
|
178
|
-
provider,
|
|
179
|
-
description: `${model.displayName} (auto-discovered)`,
|
|
180
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming', 'multimodal'],
|
|
181
|
-
};
|
|
182
|
-
});
|
|
183
|
-
return {
|
|
184
|
-
provider,
|
|
185
|
-
success: true,
|
|
186
|
-
models: models.length > 0 ? models : knownModels,
|
|
187
|
-
};
|
|
188
|
-
}
|
|
189
|
-
catch {
|
|
190
|
-
// Network error or timeout - return known models
|
|
191
|
-
return {
|
|
192
|
-
provider,
|
|
193
|
-
success: true,
|
|
194
|
-
models: knownModels,
|
|
195
|
-
};
|
|
196
|
-
}
|
|
197
|
-
}
|
|
198
|
-
/**
|
|
199
|
-
* Discover models from DeepSeek (OpenAI-compatible)
|
|
200
|
-
*/
|
|
201
|
-
async function discoverDeepSeekModels(apiKey) {
|
|
202
|
-
const provider = 'deepseek';
|
|
203
|
-
try {
|
|
204
|
-
const response = await fetch('https://api.deepseek.com/v1/models', {
|
|
205
|
-
headers: {
|
|
206
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
207
|
-
},
|
|
208
|
-
});
|
|
209
|
-
if (!response.ok) {
|
|
210
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
211
|
-
}
|
|
212
|
-
const data = await response.json();
|
|
213
|
-
const models = data.data.map(model => ({
|
|
214
|
-
id: model.id,
|
|
215
|
-
label: model.id,
|
|
216
|
-
provider,
|
|
217
|
-
description: `DeepSeek ${model.id} (auto-discovered)`,
|
|
218
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming'],
|
|
219
|
-
}));
|
|
220
|
-
return {
|
|
221
|
-
provider,
|
|
222
|
-
success: true,
|
|
223
|
-
models,
|
|
224
|
-
};
|
|
225
|
-
}
|
|
226
|
-
catch (error) {
|
|
227
|
-
return {
|
|
228
|
-
provider,
|
|
229
|
-
success: false,
|
|
230
|
-
models: [],
|
|
231
|
-
error: error instanceof Error ? error.message : String(error),
|
|
232
|
-
};
|
|
233
|
-
}
|
|
234
|
-
}
|
|
235
|
-
/**
|
|
236
|
-
* Discover models from xAI (OpenAI-compatible)
|
|
237
|
-
*/
|
|
238
|
-
async function discoverXAIModels(apiKey) {
|
|
239
|
-
const provider = 'xai';
|
|
240
|
-
try {
|
|
241
|
-
const response = await fetch('https://api.x.ai/v1/models', {
|
|
242
|
-
headers: {
|
|
243
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
244
|
-
},
|
|
245
|
-
});
|
|
246
|
-
if (!response.ok) {
|
|
247
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
248
|
-
}
|
|
249
|
-
const data = await response.json();
|
|
250
|
-
const models = data.data.map(model => ({
|
|
251
|
-
id: model.id,
|
|
252
|
-
label: model.id,
|
|
253
|
-
provider,
|
|
254
|
-
description: `xAI ${model.id} (auto-discovered)`,
|
|
255
|
-
capabilities: ['chat', 'reasoning', 'tools', 'streaming'],
|
|
256
|
-
}));
|
|
257
|
-
return {
|
|
258
|
-
provider,
|
|
259
|
-
success: true,
|
|
260
|
-
models,
|
|
261
|
-
};
|
|
262
|
-
}
|
|
263
|
-
catch (error) {
|
|
264
|
-
return {
|
|
265
|
-
provider,
|
|
266
|
-
success: false,
|
|
267
|
-
models: [],
|
|
268
|
-
error: error instanceof Error ? error.message : String(error),
|
|
269
|
-
};
|
|
270
|
-
}
|
|
271
|
-
}
|
|
272
|
-
/**
|
|
273
|
-
* Discover models from Ollama (local)
|
|
274
|
-
*/
|
|
275
|
-
async function discoverOllamaModels() {
|
|
276
|
-
const provider = 'ollama';
|
|
277
|
-
const baseURL = process.env['OLLAMA_BASE_URL'] || 'http://localhost:11434';
|
|
278
|
-
try {
|
|
279
|
-
const response = await fetch(`${baseURL}/api/tags`, {
|
|
280
|
-
signal: AbortSignal.timeout(2000), // 2 second timeout
|
|
281
|
-
});
|
|
282
|
-
if (!response.ok) {
|
|
283
|
-
throw new Error(`API returned ${response.status}: ${response.statusText}`);
|
|
284
|
-
}
|
|
285
|
-
const data = await response.json();
|
|
286
|
-
const models = (data.models || []).map(model => ({
|
|
287
|
-
id: model.name,
|
|
288
|
-
label: model.name,
|
|
289
|
-
provider,
|
|
290
|
-
description: `Local Ollama model: ${model.name} (auto-discovered)`,
|
|
291
|
-
capabilities: ['chat', 'tools', 'streaming'],
|
|
292
|
-
}));
|
|
293
|
-
return {
|
|
294
|
-
provider,
|
|
295
|
-
success: true,
|
|
296
|
-
models,
|
|
297
|
-
};
|
|
298
|
-
}
|
|
299
|
-
catch (error) {
|
|
300
|
-
return {
|
|
301
|
-
provider,
|
|
302
|
-
success: false,
|
|
303
|
-
models: [],
|
|
304
|
-
error: error instanceof Error ? error.message : String(error),
|
|
305
|
-
};
|
|
306
|
-
}
|
|
307
|
-
}
|
|
308
|
-
/**
|
|
309
|
-
* Discover models from all configured providers
|
|
310
|
-
*/
|
|
311
|
-
export async function discoverAllModels() {
|
|
312
|
-
const results = [];
|
|
313
|
-
const errors = [];
|
|
314
|
-
let totalModelsDiscovered = 0;
|
|
315
|
-
// Discover from each provider if API key is available
|
|
316
|
-
const providers = [
|
|
317
|
-
{ id: 'openai', envVar: 'OPENAI_API_KEY', discover: discoverOpenAIModels },
|
|
318
|
-
{ id: 'anthropic', envVar: 'ANTHROPIC_API_KEY', discover: discoverAnthropicModels },
|
|
319
|
-
{ id: 'google', envVar: 'GEMINI_API_KEY', discover: discoverGoogleModels },
|
|
320
|
-
{ id: 'deepseek', envVar: 'DEEPSEEK_API_KEY', discover: discoverDeepSeekModels },
|
|
321
|
-
{ id: 'xai', envVar: 'XAI_API_KEY', discover: discoverXAIModels },
|
|
322
|
-
];
|
|
323
|
-
for (const provider of providers) {
|
|
324
|
-
const apiKey = process.env[provider.envVar];
|
|
325
|
-
if (!apiKey) {
|
|
326
|
-
results.push({
|
|
327
|
-
provider: provider.id,
|
|
328
|
-
success: false,
|
|
329
|
-
models: [],
|
|
330
|
-
error: `API key not configured (${provider.envVar})`,
|
|
331
|
-
});
|
|
332
|
-
continue;
|
|
333
|
-
}
|
|
334
|
-
try {
|
|
335
|
-
const result = await provider.discover(apiKey);
|
|
336
|
-
results.push(result);
|
|
337
|
-
if (result.success) {
|
|
338
|
-
totalModelsDiscovered += result.models.length;
|
|
339
|
-
}
|
|
340
|
-
else if (result.error) {
|
|
341
|
-
errors.push(`${provider.id}: ${result.error}`);
|
|
342
|
-
}
|
|
343
|
-
}
|
|
344
|
-
catch (error) {
|
|
345
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
346
|
-
errors.push(`${provider.id}: ${errorMessage}`);
|
|
347
|
-
results.push({
|
|
348
|
-
provider: provider.id,
|
|
349
|
-
success: false,
|
|
350
|
-
models: [],
|
|
351
|
-
error: errorMessage,
|
|
352
|
-
});
|
|
353
|
-
}
|
|
354
|
-
}
|
|
355
|
-
// Always try to discover Ollama models (no API key required)
|
|
356
|
-
try {
|
|
357
|
-
const ollamaResult = await discoverOllamaModels();
|
|
358
|
-
results.push(ollamaResult);
|
|
359
|
-
if (ollamaResult.success) {
|
|
360
|
-
totalModelsDiscovered += ollamaResult.models.length;
|
|
361
|
-
}
|
|
362
|
-
else if (ollamaResult.error) {
|
|
363
|
-
// Don't add Ollama errors to the main error list if it's just not running
|
|
364
|
-
if (!ollamaResult.error.includes('ECONNREFUSED') && !ollamaResult.error.includes('fetch failed')) {
|
|
365
|
-
errors.push(`ollama: ${ollamaResult.error}`);
|
|
366
|
-
}
|
|
367
|
-
}
|
|
368
|
-
}
|
|
369
|
-
catch (error) {
|
|
370
|
-
// Silently ignore Ollama connection errors (it's often not running)
|
|
371
|
-
}
|
|
372
|
-
// Collect all discovered models
|
|
373
|
-
const allModels = results
|
|
374
|
-
.filter(r => r.success)
|
|
375
|
-
.flatMap(r => r.models);
|
|
376
|
-
// Save to cache
|
|
377
|
-
if (allModels.length > 0) {
|
|
378
|
-
await saveDiscoveredModels(allModels);
|
|
379
|
-
}
|
|
380
|
-
return {
|
|
381
|
-
success: errors.length === 0,
|
|
382
|
-
timestamp: new Date().toISOString(),
|
|
383
|
-
results,
|
|
384
|
-
totalModelsDiscovered,
|
|
385
|
-
errors,
|
|
386
|
-
};
|
|
387
|
-
}
|
|
388
|
-
/**
|
|
389
|
-
* Clear the discovered models cache
|
|
390
|
-
*/
|
|
391
|
-
export function clearDiscoveredModelsCache() {
|
|
392
|
-
try {
|
|
393
|
-
if (existsSync(CACHE_FILE)) {
|
|
394
|
-
writeFileSync(CACHE_FILE, JSON.stringify({ version: '1.0.0', lastUpdated: new Date().toISOString(), models: [] }, null, 2), 'utf-8');
|
|
395
|
-
}
|
|
396
|
-
}
|
|
397
|
-
catch (error) {
|
|
398
|
-
console.warn('Failed to clear discovered models cache:', error);
|
|
399
|
-
}
|
|
400
|
-
}
|
|
401
|
-
/**
|
|
402
|
-
* Supported providers with their environment variable requirements
|
|
403
|
-
*/
|
|
404
|
-
const PROVIDER_CONFIGS = [
|
|
405
|
-
{ id: 'anthropic', name: 'Anthropic', envVar: 'ANTHROPIC_API_KEY', defaultLatestModel: 'claude-opus-4-5-20251101' },
|
|
406
|
-
{ id: 'openai', name: 'OpenAI', envVar: 'OPENAI_API_KEY', defaultLatestModel: 'gpt-5.1-codex-max' },
|
|
407
|
-
{ id: 'google', name: 'Google', envVar: 'GOOGLE_API_KEY', altEnvVars: ['GEMINI_API_KEY'], defaultLatestModel: 'gemini-3.0-pro-preview' },
|
|
408
|
-
{ id: 'deepseek', name: 'DeepSeek', envVar: 'DEEPSEEK_API_KEY', defaultLatestModel: 'deepseek-reasoner' },
|
|
409
|
-
{ id: 'xai', name: 'xAI', envVar: 'XAI_API_KEY', defaultLatestModel: 'grok-4.1-reasoning-fast' },
|
|
410
|
-
{ id: 'ollama', name: 'Ollama', envVar: 'OLLAMA_BASE_URL', defaultLatestModel: 'llama3.3:70b' },
|
|
411
|
-
];
|
|
412
|
-
/**
|
|
413
|
-
* Model priority rankings for selecting the "best" model
|
|
414
|
-
*/
|
|
415
|
-
const MODEL_PRIORITIES = {
|
|
416
|
-
openai: {
|
|
417
|
-
'gpt-5.1-codex-max': 100,
|
|
418
|
-
'gpt-5.1-codex': 98,
|
|
419
|
-
'gpt-5.1': 95,
|
|
420
|
-
'o3': 93,
|
|
421
|
-
'o3-mini': 90,
|
|
422
|
-
'o1-pro': 88,
|
|
423
|
-
'o1': 85,
|
|
424
|
-
'o1-mini': 82,
|
|
425
|
-
'gpt-4.1': 80,
|
|
426
|
-
'gpt-4o': 75,
|
|
427
|
-
'gpt-4-turbo': 70,
|
|
428
|
-
'gpt-4o-mini': 65,
|
|
429
|
-
'gpt-4': 60,
|
|
430
|
-
'gpt-3.5-turbo': 30,
|
|
431
|
-
},
|
|
432
|
-
anthropic: {
|
|
433
|
-
'claude-opus-4-5': 100,
|
|
434
|
-
'claude-opus-4-5-20251101': 100,
|
|
435
|
-
'claude-sonnet-4-5': 98,
|
|
436
|
-
'claude-sonnet-4-5-20250929': 98,
|
|
437
|
-
'claude-opus-4-20250514': 95,
|
|
438
|
-
'claude-opus-4': 95,
|
|
439
|
-
'claude-sonnet-4': 92,
|
|
440
|
-
'claude-3-5-sonnet-20241022': 88,
|
|
441
|
-
'claude-3-5-haiku-20241022': 85,
|
|
442
|
-
'claude-3-opus-20240229': 80,
|
|
443
|
-
'claude-3-sonnet-20240229': 75,
|
|
444
|
-
'claude-3-haiku-20240307': 70,
|
|
445
|
-
},
|
|
446
|
-
google: {
|
|
447
|
-
'gemini-3.0-pro': 100,
|
|
448
|
-
'gemini-3.0-flash': 98,
|
|
449
|
-
'gemini-2.5-pro': 95,
|
|
450
|
-
'gemini-2.5-flash': 93,
|
|
451
|
-
'gemini-2.0-flash': 90,
|
|
452
|
-
'gemini-2.0': 88,
|
|
453
|
-
'gemini-1.5-pro': 80,
|
|
454
|
-
'gemini-1.5-flash': 75,
|
|
455
|
-
'gemini-1.0': 50,
|
|
456
|
-
},
|
|
457
|
-
deepseek: {
|
|
458
|
-
'deepseek-reasoner': 100,
|
|
459
|
-
'deepseek-chat': 90,
|
|
460
|
-
'deepseek-coder': 85,
|
|
461
|
-
},
|
|
462
|
-
xai: {
|
|
463
|
-
'grok-4.1-reasoning': 100,
|
|
464
|
-
'grok-4.1-reasoning-fast': 98,
|
|
465
|
-
'grok-4.1': 95,
|
|
466
|
-
'grok-4': 93,
|
|
467
|
-
'grok-3': 90,
|
|
468
|
-
'grok-2': 85,
|
|
469
|
-
'grok-2-mini': 80,
|
|
470
|
-
'grok-beta': 60,
|
|
471
|
-
},
|
|
472
|
-
};
|
|
473
|
-
/**
|
|
474
|
-
* Get model priority for sorting
|
|
475
|
-
*/
|
|
476
|
-
function getModelPriority(provider, modelId) {
|
|
477
|
-
const priorities = MODEL_PRIORITIES[provider];
|
|
478
|
-
if (!priorities)
|
|
479
|
-
return 0;
|
|
480
|
-
// Check for exact match first
|
|
481
|
-
if (priorities[modelId] !== undefined) {
|
|
482
|
-
return priorities[modelId];
|
|
483
|
-
}
|
|
484
|
-
// Check for prefix match
|
|
485
|
-
for (const [prefix, priority] of Object.entries(priorities)) {
|
|
486
|
-
if (modelId.startsWith(prefix)) {
|
|
487
|
-
return priority;
|
|
488
|
-
}
|
|
489
|
-
}
|
|
490
|
-
return 0;
|
|
491
|
-
}
|
|
492
|
-
/**
|
|
493
|
-
* Sort models by priority (best first)
|
|
494
|
-
*/
|
|
495
|
-
export function sortModelsByPriority(provider, models) {
|
|
496
|
-
return [...models].sort((a, b) => {
|
|
497
|
-
const priorityA = getModelPriority(provider, a);
|
|
498
|
-
const priorityB = getModelPriority(provider, b);
|
|
499
|
-
return priorityB - priorityA;
|
|
500
|
-
});
|
|
501
|
-
}
|
|
502
|
-
/**
|
|
503
|
-
* Get the best/latest model for a provider
|
|
504
|
-
*/
|
|
505
|
-
export function getBestModel(provider, models) {
|
|
506
|
-
if (models.length === 0) {
|
|
507
|
-
const config = PROVIDER_CONFIGS.find(p => p.id === provider);
|
|
508
|
-
return config?.defaultLatestModel || '';
|
|
509
|
-
}
|
|
510
|
-
const sorted = sortModelsByPriority(provider, models);
|
|
511
|
-
return sorted[0] ?? models[0] ?? '';
|
|
512
|
-
}
|
|
513
|
-
/**
|
|
514
|
-
* Check if a provider is configured (has API key or is accessible)
|
|
515
|
-
*/
|
|
516
|
-
export function isProviderConfigured(providerId) {
|
|
517
|
-
const config = PROVIDER_CONFIGS.find(p => p.id === providerId);
|
|
518
|
-
if (!config)
|
|
519
|
-
return false;
|
|
520
|
-
// Ollama is special - it's available if the server is running (no API key needed)
|
|
521
|
-
if (providerId === 'ollama') {
|
|
522
|
-
// We'll check this via actual connection, return true for now
|
|
523
|
-
// The actual check happens in getConfiguredProviders
|
|
524
|
-
return true;
|
|
525
|
-
}
|
|
526
|
-
// Check main env var
|
|
527
|
-
if (process.env[config.envVar]) {
|
|
528
|
-
return true;
|
|
529
|
-
}
|
|
530
|
-
// Check alternative env vars
|
|
531
|
-
if (config.altEnvVars) {
|
|
532
|
-
for (const altVar of config.altEnvVars) {
|
|
533
|
-
if (process.env[altVar]) {
|
|
534
|
-
return true;
|
|
535
|
-
}
|
|
536
|
-
}
|
|
537
|
-
}
|
|
538
|
-
return false;
|
|
539
|
-
}
|
|
540
|
-
/**
|
|
541
|
-
* Get all providers with their configuration status
|
|
542
|
-
*/
|
|
543
|
-
export function getProvidersStatus() {
|
|
544
|
-
return PROVIDER_CONFIGS.map(config => {
|
|
545
|
-
let configured = false;
|
|
546
|
-
if (config.id === 'ollama') {
|
|
547
|
-
// For Ollama, we can't check synchronously if it's running
|
|
548
|
-
// Mark as potentially available
|
|
549
|
-
configured = !!process.env['OLLAMA_BASE_URL'] || true; // Always show Ollama as option
|
|
550
|
-
}
|
|
551
|
-
else {
|
|
552
|
-
configured = !!process.env[config.envVar];
|
|
553
|
-
if (!configured && config.altEnvVars) {
|
|
554
|
-
configured = config.altEnvVars.some(v => !!process.env[v]);
|
|
555
|
-
}
|
|
556
|
-
}
|
|
557
|
-
return {
|
|
558
|
-
id: config.id,
|
|
559
|
-
name: config.name,
|
|
560
|
-
envVar: config.envVar,
|
|
561
|
-
configured,
|
|
562
|
-
latestModel: config.defaultLatestModel,
|
|
563
|
-
};
|
|
564
|
-
});
|
|
565
|
-
}
|
|
566
|
-
/**
|
|
567
|
-
* Get list of configured providers (with valid API keys)
|
|
568
|
-
*/
|
|
569
|
-
export function getConfiguredProviders() {
|
|
570
|
-
return getProvidersStatus().filter(p => p.configured);
|
|
571
|
-
}
|
|
572
|
-
/**
|
|
573
|
-
* Get list of unconfigured providers
|
|
574
|
-
*/
|
|
575
|
-
export function getUnconfiguredProviders() {
|
|
576
|
-
return getProvidersStatus().filter(p => !p.configured);
|
|
577
|
-
}
|
|
578
|
-
/**
|
|
579
|
-
* Get the first available provider (for auto-selection)
|
|
580
|
-
*/
|
|
581
|
-
export function getFirstAvailableProvider() {
|
|
582
|
-
const configured = getConfiguredProviders();
|
|
583
|
-
// Prefer in this order: anthropic, openai, google, deepseek, xai, ollama
|
|
584
|
-
const preferenceOrder = ['anthropic', 'openai', 'google', 'deepseek', 'xai', 'ollama'];
|
|
585
|
-
for (const providerId of preferenceOrder) {
|
|
586
|
-
const provider = configured.find(p => p.id === providerId);
|
|
587
|
-
if (provider && provider.id !== 'ollama') {
|
|
588
|
-
return provider;
|
|
589
|
-
}
|
|
590
|
-
}
|
|
591
|
-
// Fall back to Ollama if nothing else is configured
|
|
592
|
-
return configured.find(p => p.id === 'ollama') || null;
|
|
593
|
-
}
|
|
594
|
-
/**
|
|
595
|
-
* Get latest model for a provider from cache or defaults
|
|
596
|
-
*/
|
|
597
|
-
export function getLatestModelForProvider(providerId) {
|
|
598
|
-
// Check cache first
|
|
599
|
-
const cached = getCachedDiscoveredModels();
|
|
600
|
-
const providerModels = cached.filter(m => m.provider === providerId);
|
|
601
|
-
if (providerModels.length > 0) {
|
|
602
|
-
const modelIds = providerModels.map(m => m.id);
|
|
603
|
-
return getBestModel(providerId, modelIds);
|
|
604
|
-
}
|
|
605
|
-
// Fall back to default
|
|
606
|
-
const config = PROVIDER_CONFIGS.find(p => p.id === providerId);
|
|
607
|
-
return config?.defaultLatestModel || '';
|
|
608
|
-
}
|
|
609
|
-
/**
|
|
610
|
-
* Quick API check for a single provider - returns best model or null
|
|
611
|
-
*/
|
|
612
|
-
async function quickFetchProviderModels(providerId, apiKey, timeoutMs = 3000) {
|
|
613
|
-
try {
|
|
614
|
-
switch (providerId) {
|
|
615
|
-
case 'openai': {
|
|
616
|
-
const response = await fetch('https://api.openai.com/v1/models', {
|
|
617
|
-
headers: { 'Authorization': `Bearer ${apiKey}` },
|
|
618
|
-
signal: AbortSignal.timeout(timeoutMs),
|
|
619
|
-
});
|
|
620
|
-
if (!response.ok)
|
|
621
|
-
return [];
|
|
622
|
-
const data = await response.json();
|
|
623
|
-
return data.data
|
|
624
|
-
.filter(m => m.id.startsWith('gpt-') || m.id.startsWith('o1-') || m.id.startsWith('o3-'))
|
|
625
|
-
.map(m => m.id);
|
|
626
|
-
}
|
|
627
|
-
case 'anthropic': {
|
|
628
|
-
const response = await fetch('https://api.anthropic.com/v1/models', {
|
|
629
|
-
headers: {
|
|
630
|
-
'x-api-key': apiKey,
|
|
631
|
-
'anthropic-version': '2023-06-01',
|
|
632
|
-
},
|
|
633
|
-
signal: AbortSignal.timeout(timeoutMs),
|
|
634
|
-
});
|
|
635
|
-
if (!response.ok)
|
|
636
|
-
return [];
|
|
637
|
-
const data = await response.json();
|
|
638
|
-
return data.data.filter(m => m.type === 'model').map(m => m.id);
|
|
639
|
-
}
|
|
640
|
-
case 'google': {
|
|
641
|
-
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`, {
|
|
642
|
-
signal: AbortSignal.timeout(timeoutMs),
|
|
643
|
-
});
|
|
644
|
-
if (!response.ok)
|
|
645
|
-
return [];
|
|
646
|
-
const data = await response.json();
|
|
647
|
-
return data.models
|
|
648
|
-
.filter(m => m.name.includes('gemini') && m.supportedGenerationMethods?.includes('generateContent'))
|
|
649
|
-
.map(m => m.name.replace('models/', ''));
|
|
650
|
-
}
|
|
651
|
-
case 'deepseek': {
|
|
652
|
-
const response = await fetch('https://api.deepseek.com/v1/models', {
|
|
653
|
-
headers: { 'Authorization': `Bearer ${apiKey}` },
|
|
654
|
-
signal: AbortSignal.timeout(timeoutMs),
|
|
655
|
-
});
|
|
656
|
-
if (!response.ok)
|
|
657
|
-
return [];
|
|
658
|
-
const data = await response.json();
|
|
659
|
-
return data.data.map(m => m.id);
|
|
660
|
-
}
|
|
661
|
-
case 'xai': {
|
|
662
|
-
const response = await fetch('https://api.x.ai/v1/models', {
|
|
663
|
-
headers: { 'Authorization': `Bearer ${apiKey}` },
|
|
664
|
-
signal: AbortSignal.timeout(timeoutMs),
|
|
665
|
-
});
|
|
666
|
-
if (!response.ok)
|
|
667
|
-
return [];
|
|
668
|
-
const data = await response.json();
|
|
669
|
-
return data.data.map(m => m.id);
|
|
670
|
-
}
|
|
671
|
-
default:
|
|
672
|
-
return [];
|
|
673
|
-
}
|
|
674
|
-
}
|
|
675
|
-
catch {
|
|
676
|
-
return [];
|
|
677
|
-
}
|
|
678
|
-
}
|
|
679
|
-
/**
|
|
680
|
-
* Quickly check if providers are available by querying their APIs
|
|
681
|
-
* Returns actual latest models from each provider
|
|
682
|
-
*/
|
|
683
|
-
export async function quickCheckProviders() {
|
|
684
|
-
const checks = [];
|
|
685
|
-
for (const config of PROVIDER_CONFIGS) {
|
|
686
|
-
// Handle Ollama separately (no API key needed)
|
|
687
|
-
if (config.id === 'ollama') {
|
|
688
|
-
checks.push((async () => {
|
|
689
|
-
try {
|
|
690
|
-
const baseURL = process.env['OLLAMA_BASE_URL'] || 'http://localhost:11434';
|
|
691
|
-
const response = await fetch(`${baseURL}/api/tags`, {
|
|
692
|
-
signal: AbortSignal.timeout(1500),
|
|
693
|
-
});
|
|
694
|
-
if (response.ok) {
|
|
695
|
-
const data = await response.json();
|
|
696
|
-
const models = data.models?.map(m => m.name) || [];
|
|
697
|
-
return {
|
|
698
|
-
provider: 'ollama',
|
|
699
|
-
available: models.length > 0,
|
|
700
|
-
latestModel: models[0] || config.defaultLatestModel,
|
|
701
|
-
};
|
|
702
|
-
}
|
|
703
|
-
}
|
|
704
|
-
catch { /* ignore */ }
|
|
705
|
-
return {
|
|
706
|
-
provider: 'ollama',
|
|
707
|
-
available: false,
|
|
708
|
-
latestModel: config.defaultLatestModel,
|
|
709
|
-
error: 'Not running',
|
|
710
|
-
};
|
|
711
|
-
})());
|
|
712
|
-
continue;
|
|
713
|
-
}
|
|
714
|
-
// Check for API key
|
|
715
|
-
let apiKey = process.env[config.envVar];
|
|
716
|
-
if (!apiKey && config.altEnvVars) {
|
|
717
|
-
for (const altVar of config.altEnvVars) {
|
|
718
|
-
if (process.env[altVar]) {
|
|
719
|
-
apiKey = process.env[altVar];
|
|
720
|
-
break;
|
|
721
|
-
}
|
|
722
|
-
}
|
|
723
|
-
}
|
|
724
|
-
if (!apiKey) {
|
|
725
|
-
checks.push(Promise.resolve({
|
|
726
|
-
provider: config.id,
|
|
727
|
-
available: false,
|
|
728
|
-
latestModel: config.defaultLatestModel,
|
|
729
|
-
error: `${config.envVar} not set`,
|
|
730
|
-
}));
|
|
731
|
-
continue;
|
|
732
|
-
}
|
|
733
|
-
// Query the API for actual models
|
|
734
|
-
checks.push((async () => {
|
|
735
|
-
const models = await quickFetchProviderModels(config.id, apiKey, 3000);
|
|
736
|
-
if (models.length > 0) {
|
|
737
|
-
const bestModel = getBestModel(config.id, models);
|
|
738
|
-
return {
|
|
739
|
-
provider: config.id,
|
|
740
|
-
available: true,
|
|
741
|
-
latestModel: bestModel,
|
|
742
|
-
};
|
|
743
|
-
}
|
|
744
|
-
// API call failed or returned no models - still mark available if key exists
|
|
745
|
-
return {
|
|
746
|
-
provider: config.id,
|
|
747
|
-
available: true,
|
|
748
|
-
latestModel: config.defaultLatestModel,
|
|
749
|
-
error: 'Could not fetch models',
|
|
750
|
-
};
|
|
751
|
-
})());
|
|
752
|
-
}
|
|
753
|
-
// Run all checks in parallel for speed
|
|
754
|
-
return Promise.all(checks);
|
|
755
|
-
}
|