nex-code 0.3.4 → 0.3.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,237 +0,0 @@
1
- /**
2
- * cli/providers/openai.js — OpenAI-compatible Provider
3
- * Supports GPT-4o, o1, o3, GPT-4o-mini via OpenAI API with SSE streaming.
4
- */
5
-
6
- const axios = require('axios');
7
- const { BaseProvider } = require('./base');
8
-
9
- const OPENAI_MODELS = {
10
- 'gpt-4o': { id: 'gpt-4o', name: 'GPT-4o', maxTokens: 16384, contextWindow: 128000 },
11
- 'gpt-4o-mini': { id: 'gpt-4o-mini', name: 'GPT-4o Mini', maxTokens: 16384, contextWindow: 128000 },
12
- 'gpt-4.1': { id: 'gpt-4.1', name: 'GPT-4.1', maxTokens: 32768, contextWindow: 128000 },
13
- 'gpt-4.1-mini': { id: 'gpt-4.1-mini', name: 'GPT-4.1 Mini', maxTokens: 32768, contextWindow: 128000 },
14
- 'gpt-4.1-nano': { id: 'gpt-4.1-nano', name: 'GPT-4.1 Nano', maxTokens: 16384, contextWindow: 128000 },
15
- 'o1': { id: 'o1', name: 'o1', maxTokens: 100000, contextWindow: 200000 },
16
- 'o3': { id: 'o3', name: 'o3', maxTokens: 100000, contextWindow: 200000 },
17
- 'o3-mini': { id: 'o3-mini', name: 'o3 Mini', maxTokens: 65536, contextWindow: 200000 },
18
- 'o4-mini': { id: 'o4-mini', name: 'o4 Mini', maxTokens: 100000, contextWindow: 200000 },
19
- };
20
-
21
- class OpenAIProvider extends BaseProvider {
22
- constructor(config = {}) {
23
- super({
24
- name: 'openai',
25
- baseUrl: config.baseUrl || 'https://api.openai.com/v1',
26
- models: config.models || OPENAI_MODELS,
27
- defaultModel: config.defaultModel || 'gpt-4o',
28
- ...config,
29
- });
30
- this.timeout = config.timeout || 180000;
31
- this.temperature = config.temperature ?? 0.2;
32
- }
33
-
34
- isConfigured() {
35
- return !!this.getApiKey();
36
- }
37
-
38
- getApiKey() {
39
- return process.env.OPENAI_API_KEY || null;
40
- }
41
-
42
- _getHeaders() {
43
- const key = this.getApiKey();
44
- if (!key) throw new Error('OPENAI_API_KEY not set');
45
- return {
46
- Authorization: `Bearer ${key}`,
47
- 'Content-Type': 'application/json',
48
- };
49
- }
50
-
51
- formatMessages(messages) {
52
- return {
53
- messages: messages.map((msg) => {
54
- if (msg.role === 'assistant' && msg.tool_calls) {
55
- return {
56
- role: 'assistant',
57
- content: msg.content || null,
58
- tool_calls: msg.tool_calls.map((tc) => ({
59
- id: tc.id || `call-${Date.now()}`,
60
- type: 'function',
61
- function: {
62
- name: tc.function.name,
63
- arguments:
64
- typeof tc.function.arguments === 'string'
65
- ? tc.function.arguments
66
- : JSON.stringify(tc.function.arguments),
67
- },
68
- })),
69
- };
70
- }
71
- if (msg.role === 'tool') {
72
- return {
73
- role: 'tool',
74
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
75
- tool_call_id: msg.tool_call_id,
76
- };
77
- }
78
- return { role: msg.role, content: msg.content };
79
- }),
80
- };
81
- }
82
-
83
- async chat(messages, tools, options = {}) {
84
- const model = options.model || this.defaultModel;
85
- const modelInfo = this.getModel(model);
86
- const maxTokens = options.maxTokens || modelInfo?.maxTokens || 16384;
87
- const { messages: formatted } = this.formatMessages(messages);
88
-
89
- const body = {
90
- model,
91
- messages: formatted,
92
- max_tokens: maxTokens,
93
- temperature: options.temperature ?? this.temperature,
94
- };
95
-
96
- if (tools && tools.length > 0) {
97
- body.tools = tools;
98
- }
99
-
100
- const response = await axios.post(`${this.baseUrl}/chat/completions`, body, {
101
- timeout: options.timeout || this.timeout,
102
- headers: this._getHeaders(),
103
- });
104
-
105
- return this.normalizeResponse(response.data);
106
- }
107
-
108
- async stream(messages, tools, options = {}) {
109
- const model = options.model || this.defaultModel;
110
- const modelInfo = this.getModel(model);
111
- const maxTokens = options.maxTokens || modelInfo?.maxTokens || 16384;
112
- const onToken = options.onToken || (() => {});
113
- const { messages: formatted } = this.formatMessages(messages);
114
-
115
- const body = {
116
- model,
117
- messages: formatted,
118
- max_tokens: maxTokens,
119
- temperature: options.temperature ?? this.temperature,
120
- stream: true,
121
- };
122
-
123
- if (tools && tools.length > 0) {
124
- body.tools = tools;
125
- }
126
-
127
- let response;
128
- try {
129
- response = await axios.post(`${this.baseUrl}/chat/completions`, body, {
130
- timeout: options.timeout || this.timeout,
131
- headers: this._getHeaders(),
132
- responseType: 'stream',
133
- signal: options.signal,
134
- });
135
- } catch (err) {
136
- if (err.name === 'CanceledError' || err.name === 'AbortError' || err.code === 'ERR_CANCELED') throw err;
137
- const msg = err.response?.data?.error?.message || err.message;
138
- throw new Error(`API Error: ${msg}`);
139
- }
140
-
141
- return new Promise((resolve, reject) => {
142
- let content = '';
143
- const toolCallsMap = {}; // index -> { id, name, arguments }
144
- let buffer = '';
145
-
146
- // Abort listener: destroy stream on signal
147
- if (options.signal) {
148
- options.signal.addEventListener('abort', () => {
149
- response.data.destroy();
150
- reject(new DOMException('The operation was aborted', 'AbortError'));
151
- }, { once: true });
152
- }
153
-
154
- response.data.on('data', (chunk) => {
155
- buffer += chunk.toString();
156
- const lines = buffer.split('\n');
157
- buffer = lines.pop() || '';
158
-
159
- for (const line of lines) {
160
- const trimmed = line.trim();
161
- if (!trimmed || !trimmed.startsWith('data: ')) continue;
162
- const data = trimmed.slice(6);
163
- if (data === '[DONE]') {
164
- resolve({ content, tool_calls: this._buildToolCalls(toolCallsMap) });
165
- return;
166
- }
167
-
168
- let parsed;
169
- try {
170
- parsed = JSON.parse(data);
171
- } catch {
172
- continue;
173
- }
174
-
175
- const delta = parsed.choices?.[0]?.delta;
176
- if (!delta) continue;
177
-
178
- if (delta.content) {
179
- onToken(delta.content);
180
- content += delta.content;
181
- }
182
-
183
- if (delta.tool_calls) {
184
- for (const tc of delta.tool_calls) {
185
- const idx = tc.index ?? 0;
186
- if (!toolCallsMap[idx]) {
187
- toolCallsMap[idx] = { id: tc.id || '', name: '', arguments: '' };
188
- }
189
- if (tc.id) toolCallsMap[idx].id = tc.id;
190
- if (tc.function?.name) toolCallsMap[idx].name += tc.function.name;
191
- if (tc.function?.arguments) toolCallsMap[idx].arguments += tc.function.arguments;
192
- }
193
- }
194
- }
195
- });
196
-
197
- response.data.on('error', (err) => {
198
- if (options.signal?.aborted) return; // Ignore errors after abort
199
- reject(new Error(`Stream error: ${err.message}`));
200
- });
201
-
202
- response.data.on('end', () => {
203
- resolve({ content, tool_calls: this._buildToolCalls(toolCallsMap) });
204
- });
205
- });
206
- }
207
-
208
- normalizeResponse(data) {
209
- const choice = data.choices?.[0]?.message || {};
210
- const toolCalls = (choice.tool_calls || []).map((tc) => ({
211
- id: tc.id,
212
- function: {
213
- name: tc.function.name,
214
- arguments: tc.function.arguments,
215
- },
216
- }));
217
-
218
- return {
219
- content: choice.content || '',
220
- tool_calls: toolCalls,
221
- };
222
- }
223
-
224
- _buildToolCalls(toolCallsMap) {
225
- return Object.values(toolCallsMap)
226
- .filter((tc) => tc.name)
227
- .map((tc) => ({
228
- id: tc.id || `openai-${Date.now()}`,
229
- function: {
230
- name: tc.name,
231
- arguments: tc.arguments,
232
- },
233
- }));
234
- }
235
- }
236
-
237
- module.exports = { OpenAIProvider, OPENAI_MODELS };
@@ -1,454 +0,0 @@
1
- /**
2
- * cli/providers/registry.js — Provider Registry + Model Resolution
3
- * Central hub for multi-provider model management.
4
- *
5
- * Model specs: 'provider:model' (e.g. 'openai:gpt-4o', 'anthropic:claude-sonnet', 'local:llama3')
6
- * Short specs: 'model' (resolved against active provider)
7
- */
8
-
9
- const { OllamaProvider } = require('./ollama');
10
- const { OpenAIProvider } = require('./openai');
11
- const { AnthropicProvider } = require('./anthropic');
12
- const { GeminiProvider } = require('./gemini');
13
- const { LocalProvider } = require('./local');
14
- const { checkBudget } = require('../costs');
15
-
16
- // ─── Model Equivalence Map ─────────────────────────────────────
17
- // Maps models across providers by capability tier (top/strong/fast).
18
- // Used during fallback to pick an equivalent model on a different provider.
19
-
20
- const MODEL_EQUIVALENTS = {
21
- top: { ollama: 'kimi-k2.5', openai: 'gpt-4.1', anthropic: 'claude-sonnet-4-5', gemini: 'gemini-2.5-pro' },
22
- strong: { ollama: 'qwen3-coder:480b', openai: 'gpt-4o', anthropic: 'claude-sonnet', gemini: 'gemini-2.5-flash' },
23
- fast: { ollama: 'devstral-small-2:24b', openai: 'gpt-4.1-mini', anthropic: 'claude-haiku', gemini: 'gemini-2.0-flash' },
24
- };
25
-
26
- // Reverse lookup: model → tier
27
- const _modelToTier = {};
28
- for (const [tier, mapping] of Object.entries(MODEL_EQUIVALENTS)) {
29
- for (const model of Object.values(mapping)) {
30
- _modelToTier[model] = tier;
31
- }
32
- }
33
-
34
- /**
35
- * Resolve the equivalent model for a target provider.
36
- * If sourceModel exists in MODEL_EQUIVALENTS, returns the target provider's
37
- * equivalent. Otherwise returns sourceModel unchanged.
38
- *
39
- * @param {string} sourceModel - The model ID to map (e.g. 'kimi-k2.5')
40
- * @param {string} targetProviderName - Target provider (e.g. 'openai')
41
- * @returns {string} resolved model ID
42
- */
43
- function resolveModelForProvider(sourceModel, targetProviderName) {
44
- const tier = _modelToTier[sourceModel];
45
- if (!tier) return sourceModel; // Unknown model — pass through unchanged
46
- const equivalent = MODEL_EQUIVALENTS[tier][targetProviderName];
47
- return equivalent || sourceModel; // No mapping for this provider — pass through
48
- }
49
-
50
- // ─── Registry State ────────────────────────────────────────────
51
-
52
- const providers = {};
53
- let activeProviderName = null;
54
- let activeModelId = null;
55
- let fallbackChain = [];
56
-
57
- // ─── Initialize Default Providers ──────────────────────────────
58
-
59
- function initDefaults() {
60
- if (Object.keys(providers).length > 0) return;
61
-
62
- registerProvider('ollama', new OllamaProvider());
63
- registerProvider('openai', new OpenAIProvider());
64
- registerProvider('anthropic', new AnthropicProvider());
65
- registerProvider('gemini', new GeminiProvider());
66
- registerProvider('local', new LocalProvider());
67
-
68
- // Determine active provider from env or default to ollama
69
- const defaultProvider = process.env.DEFAULT_PROVIDER || 'ollama';
70
- const defaultModel = process.env.DEFAULT_MODEL || null;
71
-
72
- if (providers[defaultProvider]) {
73
- activeProviderName = defaultProvider;
74
- activeModelId = defaultModel || providers[defaultProvider].defaultModel;
75
- } else {
76
- activeProviderName = 'ollama';
77
- activeModelId = 'kimi-k2.5';
78
- }
79
-
80
- // Initialize fallback chain from env
81
- const envChain = process.env.FALLBACK_CHAIN;
82
- if (envChain) {
83
- fallbackChain = envChain.split(',').map((s) => s.trim()).filter(Boolean);
84
- }
85
- }
86
-
87
- // ─── Provider Management ───────────────────────────────────────
88
-
89
- function registerProvider(name, provider) {
90
- providers[name] = provider;
91
- }
92
-
93
- function getProvider(name) {
94
- initDefaults();
95
- return providers[name] || null;
96
- }
97
-
98
- function getActiveProvider() {
99
- initDefaults();
100
- return providers[activeProviderName] || null;
101
- }
102
-
103
- function getActiveProviderName() {
104
- initDefaults();
105
- return activeProviderName;
106
- }
107
-
108
- function getActiveModelId() {
109
- initDefaults();
110
- return activeModelId;
111
- }
112
-
113
- /**
114
- * Get active model info (compatible with old getActiveModel format)
115
- * @returns {{ id: string, name: string, provider: string, maxTokens?: number, contextWindow?: number }}
116
- */
117
- function getActiveModel() {
118
- initDefaults();
119
- const provider = getActiveProvider();
120
- if (!provider) return { id: activeModelId, name: activeModelId, provider: activeProviderName };
121
-
122
- const model = provider.getModel(activeModelId);
123
- if (model) {
124
- return { ...model, provider: activeProviderName };
125
- }
126
-
127
- return { id: activeModelId, name: activeModelId, provider: activeProviderName };
128
- }
129
-
130
- // ─── Model Resolution ──────────────────────────────────────────
131
-
132
- /**
133
- * Parse a model spec like 'openai:gpt-4o' or just 'gpt-4o'
134
- * @param {string} spec
135
- * @returns {{ provider: string|null, model: string }}
136
- */
137
- function parseModelSpec(spec) {
138
- if (!spec) return { provider: null, model: null };
139
- const colonIdx = spec.indexOf(':');
140
- if (colonIdx > 0) {
141
- const prefix = spec.slice(0, colonIdx);
142
- // Only treat as provider:model if prefix is a known provider name
143
- if (providers[prefix] || ['ollama', 'openai', 'anthropic', 'gemini', 'local'].includes(prefix)) {
144
- return { provider: prefix, model: spec.slice(colonIdx + 1) };
145
- }
146
- }
147
- return { provider: null, model: spec };
148
- }
149
-
150
- /**
151
- * Set active model. Accepts 'provider:model' or just 'model'.
152
- * @param {string} spec - Model spec (e.g. 'openai:gpt-4o', 'kimi-k2.5')
153
- * @returns {boolean} true if model was set successfully
154
- */
155
- function setActiveModel(spec) {
156
- initDefaults();
157
- const { provider: providerName, model: modelId } = parseModelSpec(spec);
158
-
159
- if (providerName) {
160
- const provider = providers[providerName];
161
- if (!provider) return false;
162
-
163
- const model = provider.getModel(modelId);
164
- if (!model) {
165
- // Allow setting model even if not in predefined list (for local models)
166
- if (providerName === 'local') {
167
- activeProviderName = providerName;
168
- activeModelId = modelId;
169
- return true;
170
- }
171
- return false;
172
- }
173
-
174
- activeProviderName = providerName;
175
- activeModelId = modelId;
176
- return true;
177
- }
178
-
179
- // No provider prefix — search in active provider first, then all providers
180
- const active = getActiveProvider();
181
- if (active && active.getModel(modelId)) {
182
- activeModelId = modelId;
183
- return true;
184
- }
185
-
186
- for (const [name, provider] of Object.entries(providers)) {
187
- if (provider.getModel(modelId)) {
188
- activeProviderName = name;
189
- activeModelId = modelId;
190
- return true;
191
- }
192
- }
193
-
194
- return false;
195
- }
196
-
197
- /**
198
- * Get all model names across all providers (for tab completion, etc.)
199
- * Returns just the model ids without provider prefix.
200
- */
201
- function getModelNames() {
202
- initDefaults();
203
- const names = new Set();
204
- for (const provider of Object.values(providers)) {
205
- for (const name of provider.getModelNames()) {
206
- names.add(name);
207
- }
208
- }
209
- return Array.from(names);
210
- }
211
-
212
- /**
213
- * Get all models grouped by provider
214
- * @returns {Array<{ provider: string, configured: boolean, models: Array }>}
215
- */
216
- function listProviders() {
217
- initDefaults();
218
- return Object.entries(providers).map(([name, provider]) => ({
219
- provider: name,
220
- configured: provider.isConfigured(),
221
- models: Object.values(provider.getModels()).map((m) => ({
222
- ...m,
223
- active: name === activeProviderName && m.id === activeModelId,
224
- })),
225
- }));
226
- }
227
-
228
- /**
229
- * Get flat list of all models with provider prefix
230
- * @returns {Array<{ spec: string, name: string, provider: string, configured: boolean }>}
231
- */
232
- function listAllModels() {
233
- initDefaults();
234
- const result = [];
235
- for (const [provName, provider] of Object.entries(providers)) {
236
- const configured = provider.isConfigured();
237
- for (const model of Object.values(provider.getModels())) {
238
- result.push({
239
- spec: `${provName}:${model.id}`,
240
- name: model.name,
241
- provider: provName,
242
- configured,
243
- });
244
- }
245
- }
246
- return result;
247
- }
248
-
249
- // ─── Fallback Chain ─────────────────────────────────────────────
250
-
251
- function setFallbackChain(chain) {
252
- fallbackChain = Array.isArray(chain) ? chain : [];
253
- }
254
-
255
- function getFallbackChain() {
256
- return [...fallbackChain];
257
- }
258
-
259
- // ─── Streaming Call (convenience) ──────────────────────────────
260
-
261
- /**
262
- * Check if an error is retryable (rate limit, server error, or network failure).
263
- */
264
- function isRetryableError(err) {
265
- const msg = err.message || '';
266
- const code = err.code || '';
267
-
268
- // HTTP status errors
269
- if (msg.includes('429') || msg.includes('500') || msg.includes('502') ||
270
- msg.includes('503') || msg.includes('504')) return true;
271
-
272
- // Network/TLS/socket errors — transient, retryable
273
- if (code === 'ECONNABORTED' || code === 'ETIMEDOUT' || code === 'ECONNREFUSED' ||
274
- code === 'ECONNRESET' || code === 'EHOSTUNREACH' || code === 'ENETUNREACH' ||
275
- code === 'EPIPE' || code === 'ERR_SOCKET_CONNECTION_TIMEOUT') return true;
276
-
277
- if (msg.includes('socket disconnected') || msg.includes('TLS') ||
278
- msg.includes('ECONNRESET') || msg.includes('ECONNABORTED') ||
279
- msg.includes('network') || msg.includes('ETIMEDOUT')) return true;
280
-
281
- return false;
282
- }
283
-
284
- /**
285
- * Make a streaming call through the active provider.
286
- * Falls back to next provider in chain on retryable errors.
287
- * Skips providers that are over budget.
288
- */
289
- async function callStream(messages, tools, options = {}) {
290
- initDefaults();
291
- const providersToTry = [activeProviderName, ...fallbackChain.filter((p) => p !== activeProviderName)];
292
-
293
- let lastError;
294
- let budgetBlockedCount = 0;
295
- let configuredCount = 0;
296
- for (let idx = 0; idx < providersToTry.length; idx++) {
297
- const provName = providersToTry[idx];
298
- const provider = providers[provName];
299
- if (!provider || !provider.isConfigured()) continue;
300
- configuredCount++;
301
-
302
- // Budget gate: skip providers that are over budget
303
- const budget = checkBudget(provName);
304
- if (!budget.allowed) {
305
- budgetBlockedCount++;
306
- lastError = new Error(`Budget limit reached for ${provName}: $${budget.spent.toFixed(2)} / $${budget.limit.toFixed(2)}`);
307
- continue;
308
- }
309
-
310
- try {
311
- const isFallback = idx > 0;
312
- const model = isFallback ? resolveModelForProvider(activeModelId, provName) : activeModelId;
313
- if (isFallback && model !== activeModelId) {
314
- process.stderr.write(` [fallback: ${provName}:${model}]\n`);
315
- }
316
- return await provider.stream(messages, tools, { model, signal: options.signal, ...options });
317
- } catch (err) {
318
- lastError = err;
319
- if (isRetryableError(err) && idx < providersToTry.length - 1) {
320
- continue;
321
- }
322
- throw err;
323
- }
324
- }
325
-
326
- if (budgetBlockedCount > 0 && budgetBlockedCount === configuredCount) {
327
- throw new Error('All providers are over budget. Use /budget to check limits or /budget <provider> off to remove a limit.');
328
- }
329
- throw lastError || new Error('No configured provider available');
330
- }
331
-
332
- /**
333
- * Make a non-streaming call through the active provider.
334
- * Falls back to next provider in chain on retryable errors.
335
- * Skips providers that are over budget.
336
- */
337
- async function callChat(messages, tools, options = {}) {
338
- initDefaults();
339
-
340
- // Direct provider override: skip fallback chain
341
- if (options.provider) {
342
- const provider = providers[options.provider];
343
- if (!provider || !provider.isConfigured()) {
344
- throw new Error(`Provider '${options.provider}' is not available`);
345
- }
346
- const chatOpts = { model: options.model || activeModelId, ...options };
347
- try {
348
- return await provider.chat(messages, tools, chatOpts);
349
- } catch (chatErr) {
350
- // Fallback: some providers handle stream:true better than stream:false
351
- // Use streaming endpoint but collect the full response silently
352
- if (typeof provider.stream === 'function') {
353
- try {
354
- return await provider.stream(messages, tools, { ...chatOpts, onToken: () => {} });
355
- } catch { /* stream fallback also failed — throw original error */ }
356
- }
357
- throw chatErr;
358
- }
359
- }
360
-
361
- const providersToTry = [activeProviderName, ...fallbackChain.filter((p) => p !== activeProviderName)];
362
-
363
- let lastError;
364
- let budgetBlockedCount = 0;
365
- let configuredCount = 0;
366
- for (let idx = 0; idx < providersToTry.length; idx++) {
367
- const provName = providersToTry[idx];
368
- const provider = providers[provName];
369
- if (!provider || !provider.isConfigured()) continue;
370
- configuredCount++;
371
-
372
- // Budget gate: skip providers that are over budget
373
- const budget = checkBudget(provName);
374
- if (!budget.allowed) {
375
- budgetBlockedCount++;
376
- lastError = new Error(`Budget limit reached for ${provName}: $${budget.spent.toFixed(2)} / $${budget.limit.toFixed(2)}`);
377
- continue;
378
- }
379
-
380
- try {
381
- const isFallback = idx > 0;
382
- const model = isFallback ? resolveModelForProvider(activeModelId, provName) : activeModelId;
383
- if (isFallback && model !== activeModelId) {
384
- process.stderr.write(` [fallback: ${provName}:${model}]\n`);
385
- }
386
- return await provider.chat(messages, tools, { model, ...options });
387
- } catch (err) {
388
- // Fallback: try streaming endpoint before giving up on this provider
389
- const fallbackModel = idx > 0 ? resolveModelForProvider(activeModelId, provName) : activeModelId;
390
- if (typeof provider.stream === 'function') {
391
- try {
392
- return await provider.stream(messages, tools, { model: fallbackModel, ...options, onToken: () => {} });
393
- } catch { /* stream fallback also failed — continue with original error */ }
394
- }
395
- lastError = err;
396
- if (isRetryableError(err) && idx < providersToTry.length - 1) {
397
- continue;
398
- }
399
- throw err;
400
- }
401
- }
402
-
403
- if (budgetBlockedCount > 0 && budgetBlockedCount === configuredCount) {
404
- throw new Error('All providers are over budget. Use /budget to check limits or /budget <provider> off to remove a limit.');
405
- }
406
- throw lastError || new Error('No configured provider available');
407
- }
408
-
409
- /**
410
- * Get all configured providers with their models.
411
- * @returns {Array<{ name: string, models: Array<{ id: string, name: string, maxTokens?: number, contextWindow?: number }> }>}
412
- */
413
- function getConfiguredProviders() {
414
- initDefaults();
415
- const result = [];
416
- for (const [name, provider] of Object.entries(providers)) {
417
- if (provider.isConfigured()) {
418
- result.push({ name, models: Object.values(provider.getModels()) });
419
- }
420
- }
421
- return result;
422
- }
423
-
424
- // ─── Reset (for testing) ───────────────────────────────────────
425
-
426
- function _reset() {
427
- for (const key of Object.keys(providers)) {
428
- delete providers[key];
429
- }
430
- activeProviderName = null;
431
- activeModelId = null;
432
- fallbackChain = [];
433
- }
434
-
435
- module.exports = {
436
- registerProvider,
437
- getProvider,
438
- getActiveProvider,
439
- getActiveProviderName,
440
- getActiveModelId,
441
- getActiveModel,
442
- setActiveModel,
443
- getModelNames,
444
- parseModelSpec,
445
- listProviders,
446
- listAllModels,
447
- callStream,
448
- callChat,
449
- getConfiguredProviders,
450
- setFallbackChain,
451
- getFallbackChain,
452
- resolveModelForProvider,
453
- _reset,
454
- };