portos-ai-toolkit 0.3.0 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/defaults/providers.sample.json +104 -0
- package/src/server/index.js +17 -1
- package/src/server/providers.js +139 -9
- package/src/server/runner.js +1 -0
package/package.json
CHANGED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
{
|
|
2
|
+
"activeProvider": "claude-code",
|
|
3
|
+
"providers": {
|
|
4
|
+
"claude-code": {
|
|
5
|
+
"id": "claude-code",
|
|
6
|
+
"name": "Claude Code CLI",
|
|
7
|
+
"type": "cli",
|
|
8
|
+
"command": "claude",
|
|
9
|
+
"args": ["--print"],
|
|
10
|
+
"models": ["claude-haiku-4-5-20251001", "claude-sonnet-4-20250514", "claude-opus-4-20250514"],
|
|
11
|
+
"defaultModel": "claude-sonnet-4-20250514",
|
|
12
|
+
"lightModel": "claude-haiku-4-5-20251001",
|
|
13
|
+
"mediumModel": "claude-sonnet-4-20250514",
|
|
14
|
+
"heavyModel": "claude-opus-4-20250514",
|
|
15
|
+
"timeout": 300000,
|
|
16
|
+
"enabled": true,
|
|
17
|
+
"envVars": {}
|
|
18
|
+
},
|
|
19
|
+
"claude-code-bedrock": {
|
|
20
|
+
"id": "claude-code-bedrock",
|
|
21
|
+
"name": "Claude Code CLI: Bedrock",
|
|
22
|
+
"type": "cli",
|
|
23
|
+
"command": "claude",
|
|
24
|
+
"args": ["--print"],
|
|
25
|
+
"models": ["us.anthropic.claude-sonnet-4-5-20250929-v1:0", "global.anthropic.claude-opus-4-5-20251101-v1:0"],
|
|
26
|
+
"defaultModel": "global.anthropic.claude-opus-4-5-20251101-v1:0",
|
|
27
|
+
"lightModel": "us.anthropic.claude-sonnet-4-5-20250929-v1:0",
|
|
28
|
+
"mediumModel": "global.anthropic.claude-opus-4-5-20251101-v1:0",
|
|
29
|
+
"heavyModel": "global.anthropic.claude-opus-4-5-20251101-v1:0",
|
|
30
|
+
"timeout": 300000,
|
|
31
|
+
"enabled": false,
|
|
32
|
+
"envVars": {
|
|
33
|
+
"CLAUDE_CODE_USE_BEDROCK": "1"
|
|
34
|
+
}
|
|
35
|
+
},
|
|
36
|
+
"codex": {
|
|
37
|
+
"id": "codex",
|
|
38
|
+
"name": "Codex CLI",
|
|
39
|
+
"type": "cli",
|
|
40
|
+
"command": "codex",
|
|
41
|
+
"args": [],
|
|
42
|
+
"models": ["gpt-5.1-codex-mini", "gpt-5.1-codex-max"],
|
|
43
|
+
"defaultModel": "gpt-5.1-codex-max",
|
|
44
|
+
"lightModel": "gpt-5.1-codex-mini",
|
|
45
|
+
"mediumModel": "gpt-5.1-codex-max",
|
|
46
|
+
"heavyModel": "gpt-5.1-codex-max",
|
|
47
|
+
"timeout": 300000,
|
|
48
|
+
"enabled": true,
|
|
49
|
+
"envVars": {}
|
|
50
|
+
},
|
|
51
|
+
"gemini-cli": {
|
|
52
|
+
"id": "gemini-cli",
|
|
53
|
+
"name": "Gemini CLI",
|
|
54
|
+
"type": "cli",
|
|
55
|
+
"command": "gemini",
|
|
56
|
+
"args": [],
|
|
57
|
+
"models": [],
|
|
58
|
+
"defaultModel": null,
|
|
59
|
+
"timeout": 300000,
|
|
60
|
+
"enabled": true,
|
|
61
|
+
"envVars": {}
|
|
62
|
+
},
|
|
63
|
+
"nvidia-kimi": {
|
|
64
|
+
"id": "nvidia-kimi",
|
|
65
|
+
"name": "NVIDIA Kimi K2.5",
|
|
66
|
+
"type": "api",
|
|
67
|
+
"endpoint": "https://integrate.api.nvidia.com/v1",
|
|
68
|
+
"apiKey": "",
|
|
69
|
+
"models": ["moonshotai/kimi-k2-5", "moonshotai/kimi-k2-instruct", "moonshotai/kimi-k2-thinking"],
|
|
70
|
+
"defaultModel": "moonshotai/kimi-k2-5",
|
|
71
|
+
"lightModel": "moonshotai/kimi-k2-instruct",
|
|
72
|
+
"mediumModel": "moonshotai/kimi-k2-5",
|
|
73
|
+
"heavyModel": "moonshotai/kimi-k2-thinking",
|
|
74
|
+
"fallbackProvider": null,
|
|
75
|
+
"timeout": 300000,
|
|
76
|
+
"enabled": false,
|
|
77
|
+
"envVars": {}
|
|
78
|
+
},
|
|
79
|
+
"lmstudio": {
|
|
80
|
+
"id": "lmstudio",
|
|
81
|
+
"name": "LM Studio",
|
|
82
|
+
"type": "api",
|
|
83
|
+
"endpoint": "http://localhost:1234/v1",
|
|
84
|
+
"apiKey": "lm-studio",
|
|
85
|
+
"models": [],
|
|
86
|
+
"defaultModel": null,
|
|
87
|
+
"timeout": 300000,
|
|
88
|
+
"enabled": false,
|
|
89
|
+
"envVars": {}
|
|
90
|
+
},
|
|
91
|
+
"ollama": {
|
|
92
|
+
"id": "ollama",
|
|
93
|
+
"name": "Ollama",
|
|
94
|
+
"type": "api",
|
|
95
|
+
"endpoint": "http://localhost:11434/v1",
|
|
96
|
+
"apiKey": "",
|
|
97
|
+
"models": [],
|
|
98
|
+
"defaultModel": null,
|
|
99
|
+
"timeout": 300000,
|
|
100
|
+
"enabled": false,
|
|
101
|
+
"envVars": {}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
package/src/server/index.js
CHANGED
|
@@ -11,6 +11,22 @@ import { createProvidersRoutes } from './routes/providers.js';
|
|
|
11
11
|
import { createRunsRoutes } from './routes/runs.js';
|
|
12
12
|
import { createPromptsRoutes } from './routes/prompts.js';
|
|
13
13
|
import { createProviderStatusRoutes } from './routes/providerStatus.js';
|
|
14
|
+
import { fileURLToPath } from 'url';
|
|
15
|
+
import { dirname, join } from 'path';
|
|
16
|
+
|
|
17
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Path to the default providers sample file included with the toolkit.
|
|
21
|
+
* Use this as the sampleProvidersFile config option to get preconfigured providers:
|
|
22
|
+
* - claude-code (CLI)
|
|
23
|
+
* - codex (CLI)
|
|
24
|
+
* - gemini-cli (CLI)
|
|
25
|
+
* - nvidia-kimi (API - NVIDIA's free Kimi K2.5 models)
|
|
26
|
+
* - lmstudio (API)
|
|
27
|
+
* - ollama (API)
|
|
28
|
+
*/
|
|
29
|
+
export const DEFAULT_PROVIDERS_SAMPLE = join(__dirname, 'defaults/providers.sample.json');
|
|
14
30
|
|
|
15
31
|
export * from './validation.js';
|
|
16
32
|
export * from './errorDetection.js';
|
|
@@ -44,7 +60,7 @@ export function createAIToolkit(config = {}) {
|
|
|
44
60
|
|
|
45
61
|
// Provider status config
|
|
46
62
|
enableProviderStatus = true,
|
|
47
|
-
defaultFallbackPriority = ['claude-code', 'codex', '
|
|
63
|
+
defaultFallbackPriority = ['claude-code', 'codex', 'nvidia-kimi', 'lmstudio', 'ollama', 'gemini-cli']
|
|
48
64
|
} = config;
|
|
49
65
|
|
|
50
66
|
// Create services
|
package/src/server/providers.js
CHANGED
|
@@ -226,25 +226,33 @@ export function createProviderService(config = {}) {
|
|
|
226
226
|
},
|
|
227
227
|
|
|
228
228
|
/**
|
|
229
|
-
* Refresh models from
|
|
229
|
+
* Refresh models from provider using provider-specific strategies
|
|
230
230
|
*/
|
|
231
231
|
async refreshProviderModels(id) {
|
|
232
232
|
const data = await loadProviders();
|
|
233
233
|
const provider = data.providers[id];
|
|
234
234
|
|
|
235
|
-
if (!provider
|
|
235
|
+
if (!provider) {
|
|
236
236
|
return null;
|
|
237
237
|
}
|
|
238
238
|
|
|
239
|
-
|
|
240
|
-
const response = await fetch(modelsUrl, {
|
|
241
|
-
headers: provider.apiKey ? { 'Authorization': `Bearer ${provider.apiKey}` } : {}
|
|
242
|
-
}).catch(() => null);
|
|
239
|
+
let models = [];
|
|
243
240
|
|
|
244
|
-
|
|
241
|
+
try {
|
|
242
|
+
// Provider-specific refresh strategies
|
|
243
|
+
if (provider.type === 'api') {
|
|
244
|
+
models = await this._refreshAPIProviderModels(provider);
|
|
245
|
+
} else if (provider.type === 'cli') {
|
|
246
|
+
models = await this._refreshCLIProviderModels(provider);
|
|
247
|
+
}
|
|
248
|
+
} catch (error) {
|
|
249
|
+
console.error(`Failed to refresh models for ${provider.name}:`, error.message);
|
|
250
|
+
return null;
|
|
251
|
+
}
|
|
245
252
|
|
|
246
|
-
|
|
247
|
-
|
|
253
|
+
if (!models || models.length === 0) {
|
|
254
|
+
return null;
|
|
255
|
+
}
|
|
248
256
|
|
|
249
257
|
const updatedProvider = {
|
|
250
258
|
...data.providers[id],
|
|
@@ -254,6 +262,128 @@ export function createProviderService(config = {}) {
|
|
|
254
262
|
data.providers[id] = updatedProvider;
|
|
255
263
|
await saveProviders(data);
|
|
256
264
|
return updatedProvider;
|
|
265
|
+
},
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Refresh models from API providers
|
|
269
|
+
* Supports OpenAI-compatible endpoints (OpenAI, LM Studio, etc.)
|
|
270
|
+
* and Ollama-style endpoints
|
|
271
|
+
*/
|
|
272
|
+
async _refreshAPIProviderModels(provider) {
|
|
273
|
+
// Try Ollama format first if endpoint suggests it
|
|
274
|
+
if (provider.endpoint?.includes('ollama') || provider.endpoint?.includes(':11434')) {
|
|
275
|
+
const ollamaUrl = `${provider.endpoint}/api/tags`;
|
|
276
|
+
const response = await fetch(ollamaUrl).catch(() => null);
|
|
277
|
+
|
|
278
|
+
if (response?.ok) {
|
|
279
|
+
const data = await response.json().catch(() => null);
|
|
280
|
+
if (data?.models) {
|
|
281
|
+
return data.models.map(m => m.name || m.model);
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// Try OpenAI-compatible format (default)
|
|
287
|
+
const modelsUrl = `${provider.endpoint}/models`;
|
|
288
|
+
const headers = {};
|
|
289
|
+
|
|
290
|
+
if (provider.apiKey) {
|
|
291
|
+
headers['Authorization'] = `Bearer ${provider.apiKey}`;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
const response = await fetch(modelsUrl, { headers }).catch(() => null);
|
|
295
|
+
|
|
296
|
+
if (!response?.ok) {
|
|
297
|
+
throw new Error(`HTTP ${response?.status || 'error'}`);
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
const responseData = await response.json().catch(() => ({ data: [] }));
|
|
301
|
+
|
|
302
|
+
// OpenAI format: { data: [{ id: "model-name" }] }
|
|
303
|
+
if (responseData.data && Array.isArray(responseData.data)) {
|
|
304
|
+
return responseData.data.map(m => m.id);
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// Alternative format: { models: ["model-name"] }
|
|
308
|
+
if (responseData.models && Array.isArray(responseData.models)) {
|
|
309
|
+
return responseData.models;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
return [];
|
|
313
|
+
},
|
|
314
|
+
|
|
315
|
+
/**
|
|
316
|
+
* Refresh models from CLI providers using provider-specific APIs
|
|
317
|
+
*/
|
|
318
|
+
async _refreshCLIProviderModels(provider) {
|
|
319
|
+
const providerName = provider.name.toLowerCase();
|
|
320
|
+
|
|
321
|
+
// Claude/Anthropic - fetch from Anthropic API
|
|
322
|
+
if (providerName.includes('claude') || provider.command === 'claude') {
|
|
323
|
+
return await this._fetchAnthropicModels(provider);
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// Gemini - fetch from Google AI API
|
|
327
|
+
if (providerName.includes('gemini') || provider.command === 'gemini') {
|
|
328
|
+
return await this._fetchGeminiModels(provider);
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// For other CLI providers, we can't refresh models
|
|
332
|
+
throw new Error('Model refresh not supported for this CLI provider');
|
|
333
|
+
},
|
|
334
|
+
|
|
335
|
+
/**
|
|
336
|
+
* Fetch available Claude models from Anthropic API
|
|
337
|
+
*/
|
|
338
|
+
async _fetchAnthropicModels(provider) {
|
|
339
|
+
// Check for API key in provider or environment
|
|
340
|
+
const apiKey = provider.apiKey || process.env.ANTHROPIC_API_KEY;
|
|
341
|
+
|
|
342
|
+
if (!apiKey) {
|
|
343
|
+
throw new Error('Anthropic API key required for model refresh');
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// Known Claude models as of January 2025
|
|
347
|
+
// Anthropic doesn't have a public models list endpoint yet
|
|
348
|
+
return [
|
|
349
|
+
'claude-opus-4-6',
|
|
350
|
+
'claude-opus-4',
|
|
351
|
+
'claude-sonnet-4-6',
|
|
352
|
+
'claude-sonnet-4',
|
|
353
|
+
'claude-3-7-sonnet-20250219',
|
|
354
|
+
'claude-3-5-sonnet-20241022',
|
|
355
|
+
'claude-3-5-sonnet-20240620',
|
|
356
|
+
'claude-3-5-haiku-20241022',
|
|
357
|
+
'claude-3-opus-20240229',
|
|
358
|
+
'claude-3-sonnet-20240229',
|
|
359
|
+
'claude-3-haiku-20240307'
|
|
360
|
+
];
|
|
361
|
+
},
|
|
362
|
+
|
|
363
|
+
/**
|
|
364
|
+
* Fetch available Gemini models from Google AI API
|
|
365
|
+
*/
|
|
366
|
+
async _fetchGeminiModels(provider) {
|
|
367
|
+
const apiKey = provider.apiKey || process.env.GOOGLE_API_KEY;
|
|
368
|
+
|
|
369
|
+
if (!apiKey) {
|
|
370
|
+
throw new Error('Google API key required for model refresh');
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
const response = await fetch(
|
|
374
|
+
`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`
|
|
375
|
+
).catch(() => null);
|
|
376
|
+
|
|
377
|
+
if (!response?.ok) {
|
|
378
|
+
throw new Error(`HTTP ${response?.status || 'error'}`);
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
const data = await response.json().catch(() => ({ models: [] }));
|
|
382
|
+
|
|
383
|
+
// Filter to only generative models
|
|
384
|
+
return (data.models || [])
|
|
385
|
+
.filter(m => m.supportedGenerationMethods?.includes('generateContent'))
|
|
386
|
+
.map(m => m.name.replace('models/', ''));
|
|
257
387
|
}
|
|
258
388
|
};
|
|
259
389
|
}
|
package/src/server/runner.js
CHANGED
|
@@ -188,6 +188,7 @@ export function createRunnerService(config = {}) {
|
|
|
188
188
|
await writeFile(join(runDir, 'output.txt'), '');
|
|
189
189
|
|
|
190
190
|
hooks.onRunCreated?.(metadata);
|
|
191
|
+
console.log(`🤖 AI run [${source}]: ${provider.name}/${metadata.model}`);
|
|
191
192
|
|
|
192
193
|
const effectiveTimeout = timeout || provider.timeout;
|
|
193
194
|
|