unified-ai-router 3.4.1 → 3.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/main.js +93 -17
- package/{openai-compatible-server/index.js → openai-server.js} +18 -64
- package/package.json +2 -2
- package/provider.js +29 -51
- package/readme.md +1 -8
package/main.js
CHANGED
|
@@ -4,38 +4,31 @@ const pretty = require( "pino-pretty" );
|
|
|
4
4
|
const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
|
|
5
5
|
const logger = pino({ base: false }, pinoStream );
|
|
6
6
|
|
|
7
|
-
const CircuitBreaker = require( "opossum" );
|
|
7
|
+
const CircuitBreaker = require( "opossum" );
|
|
8
8
|
|
|
9
9
|
class AIRouter
|
|
10
10
|
{
|
|
11
11
|
constructor ( providers )
|
|
12
12
|
{
|
|
13
|
-
this.providers = providers;
|
|
13
|
+
this.providers = this._initializeProviders( providers );
|
|
14
14
|
|
|
15
15
|
const defaultCircuitOptions = {
|
|
16
|
-
timeout: 300000,
|
|
17
|
-
errorThresholdPercentage: 50,
|
|
18
|
-
resetTimeout: 9000000,
|
|
16
|
+
timeout: 300000,
|
|
17
|
+
errorThresholdPercentage: 50,
|
|
18
|
+
resetTimeout: 9000000,
|
|
19
19
|
};
|
|
20
|
-
|
|
21
20
|
for ( const provider of this.providers )
|
|
22
21
|
{
|
|
23
|
-
// allow provider to override circuit options
|
|
24
22
|
const circuitOptions = Object.assign({}, defaultCircuitOptions, provider.circuitOptions || {});
|
|
25
23
|
|
|
26
|
-
// action receives an object: { params, withResponse }
|
|
27
24
|
const action = async ({ params, withResponse }) =>
|
|
28
25
|
{
|
|
29
26
|
const client = this.createClient( provider );
|
|
30
27
|
|
|
31
|
-
// If caller requested .withResponse() use it
|
|
32
28
|
if ( withResponse )
|
|
33
29
|
{
|
|
34
|
-
// return whatever .withResponse() returns (assumed promise resolving to { data, response })
|
|
35
30
|
return client.chat.completions.create( params ).withResponse();
|
|
36
31
|
}
|
|
37
|
-
|
|
38
|
-
// Normal create (may return Promise resolving to response OR an async iterable for streaming)
|
|
39
32
|
return client.chat.completions.create( params );
|
|
40
33
|
};
|
|
41
34
|
|
|
@@ -199,11 +192,6 @@ class AIRouter
|
|
|
199
192
|
const models = [];
|
|
200
193
|
for ( const provider of this.providers )
|
|
201
194
|
{
|
|
202
|
-
if ( !provider.apiKey )
|
|
203
|
-
{
|
|
204
|
-
logger.warn( `Skipping provider ${provider.name} due to missing API key` );
|
|
205
|
-
continue;
|
|
206
|
-
}
|
|
207
195
|
try
|
|
208
196
|
{
|
|
209
197
|
logger.info( `Fetching models for provider: ${provider.name}` );
|
|
@@ -227,6 +215,94 @@ class AIRouter
|
|
|
227
215
|
}
|
|
228
216
|
return models;
|
|
229
217
|
}
|
|
218
|
+
|
|
219
|
+
async checkProvidersStatus ()
|
|
220
|
+
{
|
|
221
|
+
const healthCheckPromises = this.providers.map( async ( provider ) =>
|
|
222
|
+
{
|
|
223
|
+
const maskApiKey = ( apiKey ) =>
|
|
224
|
+
{
|
|
225
|
+
if ( !apiKey || typeof apiKey !== "string" || apiKey.length < 8 )
|
|
226
|
+
{
|
|
227
|
+
return "Invalid API Key";
|
|
228
|
+
}
|
|
229
|
+
return `${apiKey.substring( 0, 4 )}...${apiKey.substring( apiKey.length - 4 )}`;
|
|
230
|
+
};
|
|
231
|
+
|
|
232
|
+
try
|
|
233
|
+
{
|
|
234
|
+
const client = this.createClient( provider );
|
|
235
|
+
await client.chat.completions.create({
|
|
236
|
+
messages: [{ role: "user", content: "test" }],
|
|
237
|
+
model: provider.model,
|
|
238
|
+
max_tokens: 1,
|
|
239
|
+
});
|
|
240
|
+
return {
|
|
241
|
+
name: provider.name,
|
|
242
|
+
status: "ok",
|
|
243
|
+
apiKey: maskApiKey( provider.apiKey ),
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
catch ( error )
|
|
247
|
+
{
|
|
248
|
+
return {
|
|
249
|
+
name: provider.name,
|
|
250
|
+
status: "error",
|
|
251
|
+
reason: error.message.substring( 0, 100 ),
|
|
252
|
+
apiKey: maskApiKey( provider.apiKey ),
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
});
|
|
256
|
+
|
|
257
|
+
const results = await Promise.allSettled( healthCheckPromises );
|
|
258
|
+
const processedResults = results.map( result =>
|
|
259
|
+
{
|
|
260
|
+
if ( result.status === "fulfilled" )
|
|
261
|
+
{
|
|
262
|
+
return result.value;
|
|
263
|
+
}
|
|
264
|
+
return {
|
|
265
|
+
name: "unknown",
|
|
266
|
+
status: "error",
|
|
267
|
+
reason: result.reason.message.substring( 0, 100 ),
|
|
268
|
+
apiKey: "N/A",
|
|
269
|
+
};
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
return processedResults.sort( ( a, b ) =>
|
|
273
|
+
{
|
|
274
|
+
if ( a.status === "ok" && b.status !== "ok" ) return -1;
|
|
275
|
+
if ( a.status !== "ok" && b.status === "ok" ) return 1;
|
|
276
|
+
return 0;
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
_initializeProviders ( providers )
|
|
281
|
+
{
|
|
282
|
+
const allProviders = [];
|
|
283
|
+
for ( const p of providers )
|
|
284
|
+
{
|
|
285
|
+
if ( Array.isArray( p.apiKey ) )
|
|
286
|
+
{
|
|
287
|
+
p.apiKey.forEach( ( key, i ) =>
|
|
288
|
+
{
|
|
289
|
+
if ( key )
|
|
290
|
+
{
|
|
291
|
+
allProviders.push({
|
|
292
|
+
...p,
|
|
293
|
+
apiKey: key,
|
|
294
|
+
name: `${p.name}_${i + 1}`
|
|
295
|
+
});
|
|
296
|
+
}
|
|
297
|
+
});
|
|
298
|
+
}
|
|
299
|
+
else if ( p.apiKey )
|
|
300
|
+
{
|
|
301
|
+
allProviders.push( p );
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
return allProviders;
|
|
305
|
+
}
|
|
230
306
|
}
|
|
231
307
|
|
|
232
308
|
module.exports = AIRouter;
|
|
@@ -5,8 +5,8 @@ const pretty = require( "pino-pretty" );
|
|
|
5
5
|
const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
|
|
6
6
|
const logger = pino({ base: false }, pinoStream );
|
|
7
7
|
require( "dotenv" ).config({ quiet: true });
|
|
8
|
-
const AIRouter = require( "
|
|
9
|
-
const providers = require( "
|
|
8
|
+
const AIRouter = require( "./main" );
|
|
9
|
+
const providers = require( "./provider" )
|
|
10
10
|
const aiRouter = new AIRouter( providers );
|
|
11
11
|
|
|
12
12
|
const app = express();
|
|
@@ -14,7 +14,7 @@ app.use( cors() );
|
|
|
14
14
|
app.use( express.json({ limit: "50mb" }) );
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
|
|
17
|
+
const handleChatCompletion = async ( req, res ) =>
|
|
18
18
|
{
|
|
19
19
|
const { messages, model, stream, ...rest } = req.body;
|
|
20
20
|
|
|
@@ -46,7 +46,6 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
|
|
|
46
46
|
res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
|
|
47
47
|
res.write( "data: [DONE]\n\n" );
|
|
48
48
|
}
|
|
49
|
-
|
|
50
49
|
res.end();
|
|
51
50
|
}
|
|
52
51
|
else
|
|
@@ -54,7 +53,6 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
|
|
|
54
53
|
try
|
|
55
54
|
{
|
|
56
55
|
const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
|
|
57
|
-
|
|
58
56
|
res.json( result.data );
|
|
59
57
|
}
|
|
60
58
|
catch ( err )
|
|
@@ -63,59 +61,9 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
|
|
|
63
61
|
res.status( 500 ).json({ error: { message: err.message } });
|
|
64
62
|
}
|
|
65
63
|
}
|
|
66
|
-
}
|
|
67
|
-
app.post( "/chat/completions", async ( req, res ) =>
|
|
68
|
-
{
|
|
69
|
-
const { messages, model, stream, ...rest } = req.body;
|
|
70
|
-
|
|
71
|
-
if ( !messages || !Array.isArray( messages ) )
|
|
72
|
-
{
|
|
73
|
-
return res.status( 400 ).json({ error: { message: "messages must be an array" } });
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
if ( stream )
|
|
77
|
-
{
|
|
78
|
-
res.setHeader( "Content-Type", "text/event-stream" );
|
|
79
|
-
res.setHeader( "Cache-Control", "no-cache" );
|
|
80
|
-
res.setHeader( "Connection", "keep-alive" );
|
|
64
|
+
};
|
|
81
65
|
|
|
82
|
-
|
|
83
|
-
{
|
|
84
|
-
const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
|
|
85
|
-
|
|
86
|
-
for await ( const chunk of result.data )
|
|
87
|
-
{
|
|
88
|
-
res.write( `data: ${JSON.stringify( chunk )}\n\n` );
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
res.write( "data: [DONE]\n\n" );
|
|
92
|
-
}
|
|
93
|
-
catch ( err )
|
|
94
|
-
{
|
|
95
|
-
logger.error( err );
|
|
96
|
-
res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
|
|
97
|
-
res.write( "data: [DONE]\n\n" );
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
res.end();
|
|
101
|
-
}
|
|
102
|
-
else
|
|
103
|
-
{
|
|
104
|
-
try
|
|
105
|
-
{
|
|
106
|
-
const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
|
|
107
|
-
|
|
108
|
-
res.json( result.data );
|
|
109
|
-
}
|
|
110
|
-
catch ( err )
|
|
111
|
-
{
|
|
112
|
-
logger.error( err );
|
|
113
|
-
res.status( 500 ).json({ error: { message: err.message } });
|
|
114
|
-
}
|
|
115
|
-
}
|
|
116
|
-
});
|
|
117
|
-
|
|
118
|
-
app.get( "/v1/models", async ( req, res ) =>
|
|
66
|
+
const handleGetModels = async ( req, res ) =>
|
|
119
67
|
{
|
|
120
68
|
try
|
|
121
69
|
{
|
|
@@ -127,24 +75,30 @@ app.get( "/v1/models", async ( req, res ) =>
|
|
|
127
75
|
logger.error( `Error in /v1/models: ${error.message}` );
|
|
128
76
|
res.status( 500 ).json({ error: { message: error.message } });
|
|
129
77
|
}
|
|
130
|
-
}
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
app.post( "/v1/chat/completions", handleChatCompletion );
|
|
81
|
+
app.post( "/chat/completions", handleChatCompletion );
|
|
131
82
|
|
|
132
|
-
app.get( "/models",
|
|
83
|
+
app.get( "/v1/models", handleGetModels );
|
|
84
|
+
app.get( "/models", handleGetModels );
|
|
85
|
+
|
|
86
|
+
app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
|
|
87
|
+
|
|
88
|
+
app.get( "/providers/status", async ( req, res ) =>
|
|
133
89
|
{
|
|
134
90
|
try
|
|
135
91
|
{
|
|
136
|
-
const
|
|
137
|
-
res.json({ data:
|
|
92
|
+
const statuses = await aiRouter.checkProvidersStatus();
|
|
93
|
+
res.json({ data: statuses });
|
|
138
94
|
}
|
|
139
95
|
catch ( error )
|
|
140
96
|
{
|
|
141
|
-
logger.error( `Error in /
|
|
97
|
+
logger.error( `Error in /v1/providers/status: ${error.message}` );
|
|
142
98
|
res.status( 500 ).json({ error: { message: error.message } });
|
|
143
99
|
}
|
|
144
100
|
});
|
|
145
101
|
|
|
146
|
-
app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
|
|
147
|
-
|
|
148
102
|
// Start server
|
|
149
103
|
const PORT = process.env.PORT || 3000;
|
|
150
104
|
app.listen( PORT, () =>
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "unified-ai-router",
|
|
3
|
-
"version": "3.4.
|
|
3
|
+
"version": "3.4.3",
|
|
4
4
|
"description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
|
|
5
5
|
"license": "ISC",
|
|
6
6
|
"author": "mlibre",
|
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
"main": "main.js",
|
|
17
17
|
"scripts": {
|
|
18
18
|
"test": "echo \"Error: no test specified\" && exit 1",
|
|
19
|
-
"start": "node openai-
|
|
19
|
+
"start": "node openai-server.js",
|
|
20
20
|
"docs:dev": "vitepress dev docs",
|
|
21
21
|
"docs:build": "vitepress build docs",
|
|
22
22
|
"docs:preview": "vitepress preview docs"
|
package/provider.js
CHANGED
|
@@ -1,67 +1,41 @@
|
|
|
1
1
|
module.exports = [
|
|
2
2
|
{
|
|
3
|
-
name: "
|
|
4
|
-
apiKey:
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
name: "gemini_2",
|
|
10
|
-
apiKey: process.env.GEMINI_API_KEY_2,
|
|
11
|
-
model: "gemini-2.5-pro",
|
|
12
|
-
apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
name: "gemini_3",
|
|
16
|
-
apiKey: process.env.GEMINI_API_KEY_3,
|
|
3
|
+
name: "gemini_pro",
|
|
4
|
+
apiKey: [
|
|
5
|
+
process.env.GEMINI_API_KEY,
|
|
6
|
+
process.env.GEMINI_API_KEY_2,
|
|
7
|
+
process.env.GEMINI_API_KEY_3,
|
|
8
|
+
],
|
|
17
9
|
model: "gemini-2.5-pro",
|
|
18
10
|
apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
19
11
|
},
|
|
20
12
|
{
|
|
21
13
|
name: "cerebras",
|
|
22
|
-
apiKey:
|
|
14
|
+
apiKey: [
|
|
15
|
+
process.env.CEREBRAS_API_KEY,
|
|
16
|
+
process.env.CEREBRAS_API_KEY_2,
|
|
17
|
+
],
|
|
23
18
|
model: "gpt-oss-120b",
|
|
24
19
|
apiUrl: "https://api.cerebras.ai/v1",
|
|
25
20
|
},
|
|
26
21
|
{
|
|
27
|
-
name: "
|
|
28
|
-
apiKey:
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
apiKey: process.env.OPENROUTER_API_KEY,
|
|
35
|
-
model: "qwen/qwen3-coder:free",
|
|
36
|
-
apiUrl: "https://openrouter.ai/api/v1",
|
|
37
|
-
},
|
|
38
|
-
{
|
|
39
|
-
name: "openrouter",
|
|
40
|
-
apiKey: process.env.OPENROUTER_API_KEY,
|
|
41
|
-
model: "z-ai/glm-4.5-air:free",
|
|
42
|
-
apiUrl: "https://openrouter.ai/api/v1",
|
|
43
|
-
},
|
|
44
|
-
{
|
|
45
|
-
name: "openrouter_2",
|
|
46
|
-
apiKey: process.env.OPENROUTER_API_KEY_2,
|
|
47
|
-
model: "z-ai/glm-4.5-air:free",
|
|
48
|
-
apiUrl: "https://openrouter.ai/api/v1",
|
|
49
|
-
},
|
|
50
|
-
{
|
|
51
|
-
name: "openrouter_2",
|
|
52
|
-
apiKey: process.env.OPENROUTER_API_KEY_2,
|
|
53
|
-
model: "qwen/qwen3-coder:free",
|
|
54
|
-
apiUrl: "https://openrouter.ai/api/v1",
|
|
55
|
-
},
|
|
56
|
-
{
|
|
57
|
-
name: "openrouter_3",
|
|
58
|
-
apiKey: process.env.OPENROUTER_API_KEY_3,
|
|
22
|
+
name: "openrouter_qwen",
|
|
23
|
+
apiKey: [
|
|
24
|
+
process.env.OPENROUTER_API_KEY,
|
|
25
|
+
process.env.OPENROUTER_API_KEY_2,
|
|
26
|
+
process.env.OPENROUTER_API_KEY_3,
|
|
27
|
+
process.env.OPENROUTER_API_KEY_4
|
|
28
|
+
],
|
|
59
29
|
model: "qwen/qwen3-coder:free",
|
|
60
30
|
apiUrl: "https://openrouter.ai/api/v1",
|
|
61
31
|
},
|
|
62
32
|
{
|
|
63
|
-
name: "
|
|
64
|
-
apiKey:
|
|
33
|
+
name: "openrouter_glm",
|
|
34
|
+
apiKey: [
|
|
35
|
+
process.env.OPENROUTER_API_KEY,
|
|
36
|
+
process.env.OPENROUTER_API_KEY_2,
|
|
37
|
+
process.env.OPENROUTER_API_KEY_3,
|
|
38
|
+
],
|
|
65
39
|
model: "z-ai/glm-4.5-air:free",
|
|
66
40
|
apiUrl: "https://openrouter.ai/api/v1",
|
|
67
41
|
},
|
|
@@ -72,8 +46,12 @@ module.exports = [
|
|
|
72
46
|
apiUrl: "https://api.groq.com/openai/v1",
|
|
73
47
|
},
|
|
74
48
|
{
|
|
75
|
-
name: "
|
|
76
|
-
apiKey:
|
|
49
|
+
name: "gemini_flash",
|
|
50
|
+
apiKey: [
|
|
51
|
+
process.env.GEMINI_API_KEY,
|
|
52
|
+
process.env.GEMINI_API_KEY_2,
|
|
53
|
+
process.env.GEMINI_API_KEY_3,
|
|
54
|
+
],
|
|
77
55
|
model: "gemini-2.5-flash",
|
|
78
56
|
apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
79
57
|
},
|
package/readme.md
CHANGED
|
@@ -180,15 +180,8 @@ Get your API keys from the following providers:
|
|
|
180
180
|
|
|
181
181
|
- `main.js` - Core AIRouter library implementing the unified interface and fallback logic
|
|
182
182
|
- `provider.js` - Configuration for supported AI providers
|
|
183
|
-
- `openai-
|
|
183
|
+
- `openai-server.js` - OpenAI-compatible API server
|
|
184
184
|
- `tests/` - Comprehensive tests for the library, server, and tools
|
|
185
|
-
- `bruno/` - Bruno API collection for testing endpoints
|
|
186
|
-
- `cloud-flare/` - Ready-to-deploy Cloudflare Pages setup for the Telegram bot
|
|
187
|
-
- `functions/api/index.js` - Telegram webhook handler
|
|
188
|
-
- `functions/api/search.js` - Search proxy endpoint
|
|
189
|
-
- `public/` - Mini App frontend (HTML, CSS, JS)
|
|
190
|
-
- `src/config.js` - Bot configuration
|
|
191
|
-
- `src/telegram.js` - Telegram API integration
|
|
192
185
|
|
|
193
186
|
## 📄 License
|
|
194
187
|
|