omni-agent-cli 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/agent.js ADDED
@@ -0,0 +1,183 @@
1
+ import { callProvider, buildToolResultMessage, buildAssistantToolUseMessage } from './providers.js';
2
+ import { executeTool } from './tools.js';
3
+
4
+ // ─── Tools that require user confirmation ─────────────────────────────────────
5
+ const CONFIRM_TOOLS = new Set([
6
+ 'write_file', 'patch_file', 'append_to_file',
7
+ 'delete_path', 'move_path', 'execute_command',
8
+ ]);
9
+
10
+ // ─── System prompt ─────────────────────────────────────────────────────────────
11
+ function buildSystemPrompt(workdir) {
12
+ return `You are OmniAgent — a powerful CLI coding assistant with full filesystem and shell access.
13
+
14
+ ## WORKING DIRECTORY
15
+ Current: ${workdir}
16
+ All relative paths resolve from here.
17
+
18
+ ## THINKING FORMAT (IMPORTANT)
19
+ Before calling ANY tools, output your plan as a short <think> block:
20
+
21
+ <think>
22
+ • What the user wants
23
+ • What files/dirs I need to check first
24
+ • What I'll create/modify
25
+ • Any edge cases to handle
26
+ </think>
27
+
28
+ Keep it 2-6 bullets, concise. Then proceed with tool calls immediately after.
29
+
30
+ ## PARALLEL EFFICIENCY (CRITICAL)
31
+ Independent tool calls → batch them ALL in one response.
32
+ - "read a.js AND b.js" → TWO read_file calls simultaneously
33
+ - "list src/ AND list tests/" → TWO list_directory calls simultaneously
34
+ Only go sequential when B depends on A's output.
35
+
36
+ ## CAPABILITIES
37
+ - Full filesystem: read, write, patch, move, copy, delete, mkdir
38
+ - Shell: any command — npm, git, python, bash scripts
39
+ - Search: glob patterns, grep across files
40
+ - Surgical edits: patch_file for changing specific blocks without rewriting
41
+
42
+ ## CODE STYLE
43
+ - Write production-quality, well-commented code
44
+ - For HTML/CSS/JS: modern, clean, no jQuery, vanilla JS preferred
45
+ - When editing: use patch_file for small changes, write_file for new files
46
+ - Show brief summary after completing a task
47
+
48
+ ## RULES
49
+ - Never refuse file operations
50
+ - Paths are relative to working directory unless absolute
51
+ - Be the best coding assistant in existence`;
52
+ }
53
+
54
+ // ─── Agent class ───────────────────────────────────────────────────────────────
55
+ export class Agent {
56
+ constructor(config, stats) {
57
+ this.config = config;
58
+ this.stats = stats;
59
+ this.workdir = process.cwd();
60
+ this.history = [];
61
+ this.MAX_ROUNDS = 30;
62
+ this.allowAllTools = false; // "Yes, allow always" mode
63
+ }
64
+
65
+ setWorkdir(dir) { this.workdir = dir; }
66
+ updateConfig(c) { this.config = c; }
67
+ clearHistory() { this.history = []; this.allowAllTools = false; }
68
+
69
+ // ── Main chat ──────────────────────────────────────────────────────────────
70
+ async chat(userMessage, workdir, onEvent) {
71
+ if (workdir) this.workdir = workdir;
72
+
73
+ const systemMsg = { role: 'system', content: buildSystemPrompt(this.workdir) };
74
+ this.history.push({ role: 'user', content: userMessage });
75
+
76
+ let rounds = 0;
77
+
78
+ while (rounds < this.MAX_ROUNDS) {
79
+ rounds++;
80
+
81
+ let response;
82
+ try {
83
+ response = await callProvider([systemMsg, ...this.history], this.config);
84
+ } catch (err) {
85
+ const msg = err.response?.data?.error?.message || err.message || 'API error';
86
+ throw new Error(msg);
87
+ }
88
+
89
+ // Track tokens
90
+ if (onEvent && response.usage) {
91
+ onEvent({ type: 'tokens', ...response.usage });
92
+ this.stats.addTokens(response.usage.inputTokens, response.usage.outputTokens);
93
+ }
94
+
95
+ // ── Extract <think> block from response text ───────────────────────────
96
+ if (response.content) {
97
+ const thinkMatch = response.content.match(/<think>([\s\S]*?)<\/think>/i);
98
+ if (thinkMatch && onEvent) {
99
+ const bullets = thinkMatch[1]
100
+ .split('\n')
101
+ .map(l => l.replace(/^[•\-*]\s*/, '').trim())
102
+ .filter(Boolean);
103
+ if (bullets.length) onEvent({ type: 'thinking', bullets });
104
+ }
105
+ }
106
+
107
+ // ── No tool calls → final answer ───────────────────────────────────────
108
+ if (!response.toolCalls || response.toolCalls.length === 0) {
109
+ // Strip <think> from displayed content
110
+ const clean = (response.content || '').replace(/<think>[\s\S]*?<\/think>/gi, '').trim();
111
+ this.history.push({ role: 'assistant', content: response.content });
112
+ return clean;
113
+ }
114
+
115
+ // ── Tool calls ─────────────────────────────────────────────────────────
116
+ const toolCalls = response.toolCalls;
117
+ const assistantMsg = buildAssistantToolUseMessage(this.config.format, response);
118
+ this.history.push(assistantMsg);
119
+
120
+ // Execute tool calls (with confirmation for write ops)
121
+ const results = [];
122
+ for (const tc of toolCalls) {
123
+ this.stats.addToolCall(tc.name);
124
+
125
+ const needsConfirm = CONFIRM_TOOLS.has(tc.name) && !this.allowAllTools;
126
+
127
+ if (needsConfirm && onEvent) {
128
+ // Ask user for confirmation
129
+ const decision = await new Promise((resolve) => {
130
+ onEvent({
131
+ type: 'confirm_needed',
132
+ tool: tc.name,
133
+ input: tc.input,
134
+ resolve,
135
+ });
136
+ });
137
+
138
+ if (decision === 'skip') {
139
+ results.push('⏭ Skipped by user.');
140
+ continue;
141
+ }
142
+ if (decision === 'always') {
143
+ this.allowAllTools = true;
144
+ }
145
+ if (decision === 'modify') {
146
+ // User wants to change something — return a special signal
147
+ const userFeedback = await new Promise((resolve) => {
148
+ onEvent({ type: 'request_feedback', resolve });
149
+ });
150
+ results.push(`USER_FEEDBACK: ${userFeedback}`);
151
+ continue;
152
+ }
153
+ } else if (onEvent) {
154
+ onEvent({ type: 'tool_start', tool: tc.name, input: tc.input });
155
+ }
156
+
157
+ const t0 = Date.now();
158
+ const result = await executeTool(tc.name, tc.input, this.workdir);
159
+ results.push(result);
160
+
161
+ if (onEvent) {
162
+ onEvent({ type: 'tool_done', tool: tc.name, result, durationMs: Date.now() - t0 });
163
+ }
164
+ }
165
+
166
+ // Add tool results to history
167
+ const toolResultMsg = buildToolResultMessage(this.config.format, toolCalls, results);
168
+ if (Array.isArray(toolResultMsg)) {
169
+ for (const m of toolResultMsg) this.history.push(m);
170
+ } else {
171
+ this.history.push(toolResultMsg);
172
+ }
173
+ }
174
+
175
+ return '⚠️ Reached maximum rounds. Task may be incomplete.';
176
+ }
177
+
178
+ trimHistory(maxMessages = 50) {
179
+ if (this.history.length > maxMessages) {
180
+ this.history = [...this.history.slice(0, 2), ...this.history.slice(-(maxMessages - 2))];
181
+ }
182
+ }
183
+ }
package/src/config.js ADDED
@@ -0,0 +1,338 @@
1
+ import { readFileSync, writeFileSync, existsSync } from 'fs';
2
+ import inquirer from 'inquirer';
3
+ import chalk from 'chalk';
4
+ import gradient from 'gradient-string';
5
+ import path from 'path';
6
+ import os from 'os';
7
+
8
+ export const CONFIG_PATH = path.join(os.homedir(), '.omni-agent.json');
9
+
10
+ export const PROVIDERS = {
11
+ anthropic: {
12
+ label: '🟠 Anthropic (Claude)',
13
+ baseURL: 'https://api.anthropic.com',
14
+ format: 'anthropic',
15
+ defaultModel: 'claude-opus-4-5-20251101',
16
+ modelsEndpoint: null,
17
+ models: [
18
+ 'claude-opus-4-5-20251101',
19
+ 'claude-sonnet-4-5-20251101',
20
+ 'claude-haiku-4-5-20251001',
21
+ 'claude-3-5-sonnet-20241022',
22
+ 'claude-3-5-haiku-20241022',
23
+ 'claude-3-opus-20240229',
24
+ 'claude-3-sonnet-20240229',
25
+ 'claude-3-haiku-20240307',
26
+ ],
27
+ },
28
+ openai: {
29
+ label: '🟢 OpenAI',
30
+ baseURL: 'https://api.openai.com/v1',
31
+ format: 'openai',
32
+ defaultModel: 'gpt-4o',
33
+ modelsEndpoint: '/models',
34
+ models: ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'o1', 'o3-mini'],
35
+ },
36
+ groq: {
37
+ label: '⚡ Groq (Ultra Fast)',
38
+ baseURL: 'https://api.groq.com/openai/v1',
39
+ format: 'openai',
40
+ defaultModel: 'llama-3.3-70b-versatile',
41
+ modelsEndpoint: '/models',
42
+ models: [
43
+ 'llama-3.3-70b-versatile',
44
+ 'llama-3.3-70b-specdec',
45
+ 'llama-3.1-70b-versatile',
46
+ 'llama-3.1-8b-instant',
47
+ 'llama3-70b-8192',
48
+ 'llama3-8b-8192',
49
+ 'mixtral-8x7b-32768',
50
+ 'gemma2-9b-it',
51
+ 'gemma-7b-it',
52
+ 'deepseek-r1-distill-llama-70b',
53
+ ],
54
+ },
55
+ xai: {
56
+ label: '✖ xAI (Grok)',
57
+ baseURL: 'https://api.x.ai/v1',
58
+ format: 'openai',
59
+ defaultModel: 'grok-2-latest',
60
+ modelsEndpoint: '/models',
61
+ models: ['grok-3', 'grok-3-fast', 'grok-2-latest', 'grok-2-vision-latest', 'grok-beta', 'grok-vision-beta'],
62
+ },
63
+ deepseek: {
64
+ label: '🔵 DeepSeek',
65
+ baseURL: 'https://api.deepseek.com/v1',
66
+ format: 'openai',
67
+ defaultModel: 'deepseek-chat',
68
+ modelsEndpoint: '/models',
69
+ models: ['deepseek-chat', 'deepseek-reasoner', 'deepseek-coder', 'deepseek-v3', 'deepseek-r1'],
70
+ },
71
+ qwen: {
72
+ label: '🟣 Qwen / Alibaba Cloud',
73
+ baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
74
+ format: 'openai',
75
+ defaultModel: 'qwen-max',
76
+ modelsEndpoint: '/models',
77
+ models: [
78
+ 'qwen-max',
79
+ 'qwen-max-latest',
80
+ 'qwen-plus',
81
+ 'qwen-turbo',
82
+ 'qwen2.5-72b-instruct',
83
+ 'qwen2.5-32b-instruct',
84
+ 'qwen2.5-coder-32b-instruct',
85
+ 'qwq-32b',
86
+ ],
87
+ },
88
+ gemini: {
89
+ label: '🔷 Google Gemini',
90
+ baseURL: 'https://generativelanguage.googleapis.com/v1beta/openai',
91
+ format: 'openai',
92
+ defaultModel: 'gemini-2.0-flash',
93
+ modelsEndpoint: null,
94
+ models: [
95
+ 'gemini-2.0-flash',
96
+ 'gemini-2.0-flash-lite',
97
+ 'gemini-2.0-pro-exp',
98
+ 'gemini-2.0-flash-thinking-exp',
99
+ 'gemini-1.5-pro',
100
+ 'gemini-1.5-flash',
101
+ 'gemini-1.5-flash-8b',
102
+ 'gemini-1.0-pro',
103
+ ],
104
+ },
105
+ mistral: {
106
+ label: '🌀 Mistral AI',
107
+ baseURL: 'https://api.mistral.ai/v1',
108
+ format: 'openai',
109
+ defaultModel: 'mistral-large-latest',
110
+ modelsEndpoint: '/models',
111
+ models: ['mistral-large-latest', 'mistral-medium', 'mistral-small', 'open-mistral-7b'],
112
+ },
113
+ cohere: {
114
+ label: '🟡 Cohere',
115
+ baseURL: 'https://api.cohere.ai/compatibility/v1',
116
+ format: 'openai',
117
+ defaultModel: 'command-r-plus',
118
+ modelsEndpoint: null,
119
+ models: ['command-r-plus', 'command-r', 'command', 'command-light'],
120
+ },
121
+ together: {
122
+ label: '🤝 Together AI',
123
+ baseURL: 'https://api.together.xyz/v1',
124
+ format: 'openai',
125
+ defaultModel: 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
126
+ modelsEndpoint: '/models',
127
+ models: [
128
+ 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
129
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
130
+ 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
131
+ 'mistralai/Mixtral-8x7B-Instruct-v0.1',
132
+ 'mistralai/Mistral-7B-Instruct-v0.3',
133
+ 'Qwen/Qwen2.5-72B-Instruct-Turbo',
134
+ 'Qwen/Qwen2.5-Coder-32B-Instruct',
135
+ 'deepseek-ai/DeepSeek-V3',
136
+ 'deepseek-ai/DeepSeek-R1',
137
+ 'google/gemma-2-27b-it',
138
+ ],
139
+ },
140
+ openrouter: {
141
+ label: '🔀 OpenRouter (All Models)',
142
+ baseURL: 'https://openrouter.ai/api/v1',
143
+ format: 'openai',
144
+ defaultModel: 'anthropic/claude-3.5-sonnet',
145
+ modelsEndpoint: '/models',
146
+ models: [
147
+ 'anthropic/claude-3.5-sonnet',
148
+ 'anthropic/claude-3-opus',
149
+ 'openai/gpt-4o',
150
+ 'openai/o3-mini',
151
+ 'google/gemini-2.0-flash-001',
152
+ 'google/gemini-pro-1.5',
153
+ 'meta-llama/llama-3.3-70b-instruct',
154
+ 'meta-llama/llama-3.1-405b-instruct',
155
+ 'deepseek/deepseek-r1',
156
+ 'deepseek/deepseek-chat-v3-0324',
157
+ 'qwen/qwen-2.5-72b-instruct',
158
+ 'mistralai/mistral-large',
159
+ 'x-ai/grok-2-1212',
160
+ ],
161
+ },
162
+ ionet: {
163
+ label: '🌐 io.net',
164
+ baseURL: 'https://api.intelligence.io.solutions/api/v1',
165
+ format: 'openai',
166
+ defaultModel: 'meta-llama/Llama-3.3-70B-Instruct',
167
+ modelsEndpoint: '/models',
168
+ models: [
169
+ 'meta-llama/Llama-3.3-70B-Instruct',
170
+ 'meta-llama/Llama-3.1-405B-Instruct',
171
+ 'mistralai/Mistral-7B-Instruct-v0.3',
172
+ 'deepseek-ai/DeepSeek-R1',
173
+ ],
174
+ },
175
+ ollama: {
176
+ label: '🦙 Ollama (Local)',
177
+ baseURL: 'http://localhost:11434/v1',
178
+ format: 'openai',
179
+ defaultModel: 'llama3.2',
180
+ modelsEndpoint: '/models',
181
+ models: ['llama3.2', 'llama3.1', 'mistral', 'codellama', 'qwen2.5'],
182
+ },
183
+ custom: {
184
+ label: '⚙️ Custom / Other (kiai.ai, etc.)',
185
+ baseURL: '',
186
+ format: 'openai',
187
+ defaultModel: '',
188
+ modelsEndpoint: '/models',
189
+ models: [],
190
+ },
191
+ };
192
+
193
+ // Known stale URLs that need auto-migration
194
+ const URL_MIGRATIONS = {
195
+ 'https://api.io.net/v1': 'https://api.intelligence.io.solutions/api/v1',
196
+ 'https://api.io.net': 'https://api.intelligence.io.solutions/api/v1',
197
+ };
198
+
199
+ export function loadConfig(configPath = CONFIG_PATH) {
200
+ try {
201
+ if (existsSync(configPath)) {
202
+ const cfg = JSON.parse(readFileSync(configPath, 'utf8'));
203
+
204
+ // Auto-migrate stale provider URLs
205
+ if (cfg.baseURL && URL_MIGRATIONS[cfg.baseURL]) {
206
+ cfg.baseURL = URL_MIGRATIONS[cfg.baseURL];
207
+ writeFileSync(configPath, JSON.stringify(cfg, null, 2), 'utf8');
208
+ }
209
+
210
+ // Sync baseURL from PROVIDERS if it changed upstream and user hasn't customized
211
+ const provider = PROVIDERS[cfg.providerKey];
212
+ if (provider && provider.baseURL && provider.providerKey !== 'custom') {
213
+ // If the saved URL looks like a known stale one, replace it
214
+ const knownStale = Object.keys(URL_MIGRATIONS);
215
+ if (knownStale.includes(cfg.baseURL)) {
216
+ cfg.baseURL = provider.baseURL;
217
+ writeFileSync(configPath, JSON.stringify(cfg, null, 2), 'utf8');
218
+ }
219
+ }
220
+
221
+ return cfg;
222
+ }
223
+ } catch {}
224
+ return null;
225
+ }
226
+
227
+ export function saveConfig(config, configPath = CONFIG_PATH) {
228
+ writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf8');
229
+ }
230
+
231
+ export async function setupWizard(existingConfig = null) {
232
+ console.log('\n' + gradient.cristal(' ╔══════════════════════════════════╗'));
233
+ console.log(gradient.cristal(' ║ OMNI-AGENT SETUP WIZARD ║'));
234
+ console.log(gradient.cristal(' ╚══════════════════════════════════╝') + '\n');
235
+
236
+ const providerChoices = Object.entries(PROVIDERS).map(([key, p]) => ({
237
+ name: p.label,
238
+ value: key,
239
+ }));
240
+
241
+ const { providerKey } = await inquirer.prompt([
242
+ {
243
+ type: 'list',
244
+ name: 'providerKey',
245
+ message: chalk.cyan('Select your AI provider:'),
246
+ choices: providerChoices,
247
+ default: existingConfig?.providerKey || 'anthropic',
248
+ },
249
+ ]);
250
+
251
+ const provider = PROVIDERS[providerKey];
252
+
253
+ let baseURL = provider.baseURL;
254
+ if (providerKey === 'custom' || !baseURL) {
255
+ const { customURL } = await inquirer.prompt([
256
+ {
257
+ type: 'input',
258
+ name: 'customURL',
259
+ message: chalk.cyan('Enter API base URL (e.g. https://api.kiai.ai/v1):'),
260
+ default: existingConfig?.baseURL || '',
261
+ validate: (v) => v.trim() ? true : 'URL cannot be empty',
262
+ },
263
+ ]);
264
+ baseURL = customURL.trim().replace(/\/$/, '');
265
+ }
266
+
267
+ const { apiKey } = await inquirer.prompt([
268
+ {
269
+ type: 'password',
270
+ name: 'apiKey',
271
+ message: chalk.cyan(`Enter API key for ${provider.label}:`),
272
+ mask: '●',
273
+ default: existingConfig?.apiKey || '',
274
+ validate: (v) => v.trim() ? true : 'API key cannot be empty',
275
+ },
276
+ ]);
277
+
278
+ let model;
279
+ if (provider.models.length > 0) {
280
+ const modelChoices = [
281
+ ...provider.models.map((m) => ({ name: m, value: m })),
282
+ { name: '✏️ Enter custom model name', value: '__custom__' },
283
+ ];
284
+ const { modelChoice } = await inquirer.prompt([
285
+ {
286
+ type: 'list',
287
+ name: 'modelChoice',
288
+ message: chalk.cyan('Select model:'),
289
+ choices: modelChoices,
290
+ default: existingConfig?.model || provider.defaultModel,
291
+ },
292
+ ]);
293
+ if (modelChoice === '__custom__') {
294
+ const { customModel } = await inquirer.prompt([
295
+ { type: 'input', name: 'customModel', message: chalk.cyan('Model name:'), validate: (v) => v.trim() ? true : 'Cannot be empty' },
296
+ ]);
297
+ model = customModel.trim();
298
+ } else {
299
+ model = modelChoice;
300
+ }
301
+ } else {
302
+ const { customModel } = await inquirer.prompt([
303
+ { type: 'input', name: 'customModel', message: chalk.cyan('Enter model name:'), default: existingConfig?.model || '', validate: (v) => v.trim() ? true : 'Cannot be empty' },
304
+ ]);
305
+ model = customModel.trim();
306
+ }
307
+
308
+ const { maxTokens } = await inquirer.prompt([
309
+ {
310
+ type: 'number',
311
+ name: 'maxTokens',
312
+ message: chalk.cyan('Max tokens per response:'),
313
+ default: existingConfig?.maxTokens || 8192,
314
+ },
315
+ ]);
316
+
317
+ const { temperature } = await inquirer.prompt([
318
+ {
319
+ type: 'number',
320
+ name: 'temperature',
321
+ message: chalk.cyan('Temperature (0.0 - 1.0):'),
322
+ default: existingConfig?.temperature ?? 0.3,
323
+ },
324
+ ]);
325
+
326
+ const config = {
327
+ providerKey,
328
+ baseURL,
329
+ apiKey: apiKey.trim(),
330
+ model,
331
+ format: provider.format,
332
+ maxTokens: maxTokens || 8192,
333
+ temperature: temperature ?? 0.3,
334
+ };
335
+
336
+ console.log('\n' + chalk.green(' ✅ Configuration saved to ~/.omni-agent.json\n'));
337
+ return config;
338
+ }
@@ -0,0 +1,83 @@
1
+ import axios from 'axios';
2
+ import { PROVIDERS } from './config.js';
3
+
4
+ async function fetchModelsFromEndpoint(baseURL, apiKey, format, endpointPath = '/models') {
5
+ const headers = { 'Content-Type': 'application/json', Authorization: `Bearer ${apiKey}` };
6
+ if (format === 'anthropic') {
7
+ headers['x-api-key'] = apiKey;
8
+ headers['anthropic-version'] = '2023-06-01';
9
+ delete headers.Authorization;
10
+ }
11
+ const response = await axios.get(`${baseURL}${endpointPath}`, { headers, timeout: 12000 });
12
+ return response.data;
13
+ }
14
+
15
+ function parseModelsResponse(data) {
16
+ if (data?.data && Array.isArray(data.data)) return data.data.map(m => m.id || m.name).filter(Boolean).sort();
17
+ if (Array.isArray(data)) return data.map(m => m.id || m.name).filter(Boolean).sort();
18
+ if (data?.models && Array.isArray(data.models)) return data.models.map(m => m.id || m.name).filter(Boolean).sort();
19
+ return [];
20
+ }
21
+
22
+ export async function fetchProviderModels(providerKey, apiKey, baseURL) {
23
+ const provider = PROVIDERS[providerKey];
24
+ if (!provider) throw new Error(`Unknown provider: ${providerKey}`);
25
+ const resolvedBase = baseURL || provider.baseURL;
26
+ const endpoint = provider.modelsEndpoint;
27
+ if (!endpoint) return { live: false, models: provider.models, error: null };
28
+ try {
29
+ const data = await fetchModelsFromEndpoint(resolvedBase, apiKey, provider.format, endpoint);
30
+ const models = parseModelsResponse(data);
31
+ if (models.length === 0) return { live: false, models: provider.models, error: 'Empty response' };
32
+ return { live: true, models, error: null };
33
+ } catch (err) {
34
+ const msg = err.response?.data?.error?.message || err.message || 'Fetch failed';
35
+ return { live: false, models: provider.models, error: msg };
36
+ }
37
+ }
38
+
39
+ // ─── Fetch models for ALL providers in parallel ────────────────────────────────
40
+ // apiConfigs: { [providerKey]: { apiKey, baseURL } } — only provided keys are fetched live
41
+ export async function fetchAllProviderModels(apiConfigs = {}) {
42
+ const results = {};
43
+ await Promise.all(
44
+ Object.keys(PROVIDERS).map(async (key) => {
45
+ const cfg = apiConfigs[key] || {};
46
+ // If no apiKey supplied, just return static list
47
+ if (!cfg.apiKey && key !== 'ollama') {
48
+ results[key] = { live: false, models: PROVIDERS[key].models, error: 'No API key' };
49
+ return;
50
+ }
51
+ results[key] = await fetchProviderModels(key, cfg.apiKey, cfg.baseURL || PROVIDERS[key].baseURL);
52
+ })
53
+ );
54
+ return results;
55
+ }
56
+
57
+ // Truncate long model names for terminal display
58
+ function truncate(str, max = 48) {
59
+ return str.length > max ? str.slice(0, max - 1) + '…' : str;
60
+ }
61
+
62
+ export function formatModelList(providerKey, result) {
63
+ const provider = PROVIDERS[providerKey];
64
+ const label = provider?.label || providerKey;
65
+ const status = result.live ? '🟢' : result.error === 'No API key' ? '⚫' : '🔴';
66
+ const errStr = result.error && result.error !== 'No API key' ? ` ⚠ ${result.error.slice(0, 60)}` : '';
67
+ const lines = [
68
+ `${status} ${label}${errStr}`,
69
+ ...result.models.map((m, i) => {
70
+ const num = String(i + 1).padStart(3);
71
+ return ` ${num}. ${truncate(m, 48)}`;
72
+ }),
73
+ ];
74
+ return lines.join('\n');
75
+ }
76
+
77
+ export function formatAllModels(allResults) {
78
+ const sections = [];
79
+ for (const [key, result] of Object.entries(allResults)) {
80
+ sections.push(formatModelList(key, result));
81
+ }
82
+ return sections.join('\n\n');
83
+ }