unified-ai-router 3.4.1 โ†’ 3.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/main.js CHANGED
@@ -4,38 +4,31 @@ const pretty = require( "pino-pretty" );
4
4
  const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
5
5
  const logger = pino({ base: false }, pinoStream );
6
6
 
7
- const CircuitBreaker = require( "opossum" ); // <-- added
7
+ const CircuitBreaker = require( "opossum" );
8
8
 
9
9
  class AIRouter
10
10
  {
11
11
  constructor ( providers )
12
12
  {
13
- this.providers = providers;
13
+ this.providers = this._initializeProviders( providers );
14
14
 
15
15
  const defaultCircuitOptions = {
16
- timeout: 300000, // time in ms before action considered failed
17
- errorThresholdPercentage: 50, // % of failures before opening the circuit
18
- resetTimeout: 9000000, // time in ms to wait before trying again
16
+ timeout: 300000,
17
+ errorThresholdPercentage: 50,
18
+ resetTimeout: 9000000,
19
19
  };
20
-
21
20
  for ( const provider of this.providers )
22
21
  {
23
- // allow provider to override circuit options
24
22
  const circuitOptions = Object.assign({}, defaultCircuitOptions, provider.circuitOptions || {});
25
23
 
26
- // action receives an object: { params, withResponse }
27
24
  const action = async ({ params, withResponse }) =>
28
25
  {
29
26
  const client = this.createClient( provider );
30
27
 
31
- // If caller requested .withResponse() use it
32
28
  if ( withResponse )
33
29
  {
34
- // return whatever .withResponse() returns (assumed promise resolving to { data, response })
35
30
  return client.chat.completions.create( params ).withResponse();
36
31
  }
37
-
38
- // Normal create (may return Promise resolving to response OR an async iterable for streaming)
39
32
  return client.chat.completions.create( params );
40
33
  };
41
34
 
@@ -199,11 +192,6 @@ class AIRouter
199
192
  const models = [];
200
193
  for ( const provider of this.providers )
201
194
  {
202
- if ( !provider.apiKey )
203
- {
204
- logger.warn( `Skipping provider ${provider.name} due to missing API key` );
205
- continue;
206
- }
207
195
  try
208
196
  {
209
197
  logger.info( `Fetching models for provider: ${provider.name}` );
@@ -227,6 +215,94 @@ class AIRouter
227
215
  }
228
216
  return models;
229
217
  }
218
+
219
+ async checkProvidersStatus ()
220
+ {
221
+ const healthCheckPromises = this.providers.map( async ( provider ) =>
222
+ {
223
+ const maskApiKey = ( apiKey ) =>
224
+ {
225
+ if ( !apiKey || typeof apiKey !== "string" || apiKey.length < 8 )
226
+ {
227
+ return "Invalid API Key";
228
+ }
229
+ return `${apiKey.substring( 0, 4 )}...${apiKey.substring( apiKey.length - 4 )}`;
230
+ };
231
+
232
+ try
233
+ {
234
+ const client = this.createClient( provider );
235
+ await client.chat.completions.create({
236
+ messages: [{ role: "user", content: "test" }],
237
+ model: provider.model,
238
+ max_tokens: 1,
239
+ });
240
+ return {
241
+ name: provider.name,
242
+ status: "ok",
243
+ apiKey: maskApiKey( provider.apiKey ),
244
+ };
245
+ }
246
+ catch ( error )
247
+ {
248
+ return {
249
+ name: provider.name,
250
+ status: "error",
251
+ reason: error.message.substring( 0, 100 ),
252
+ apiKey: maskApiKey( provider.apiKey ),
253
+ };
254
+ }
255
+ });
256
+
257
+ const results = await Promise.allSettled( healthCheckPromises );
258
+ const processedResults = results.map( result =>
259
+ {
260
+ if ( result.status === "fulfilled" )
261
+ {
262
+ return result.value;
263
+ }
264
+ return {
265
+ name: "unknown",
266
+ status: "error",
267
+ reason: result.reason.message.substring( 0, 100 ),
268
+ apiKey: "N/A",
269
+ };
270
+ });
271
+
272
+ return processedResults.sort( ( a, b ) =>
273
+ {
274
+ if ( a.status === "ok" && b.status !== "ok" ) return -1;
275
+ if ( a.status !== "ok" && b.status === "ok" ) return 1;
276
+ return 0;
277
+ });
278
+ }
279
+
280
+ _initializeProviders ( providers )
281
+ {
282
+ const allProviders = [];
283
+ for ( const p of providers )
284
+ {
285
+ if ( Array.isArray( p.apiKey ) )
286
+ {
287
+ p.apiKey.forEach( ( key, i ) =>
288
+ {
289
+ if ( key )
290
+ {
291
+ allProviders.push({
292
+ ...p,
293
+ apiKey: key,
294
+ name: `${p.name}_${i + 1}`
295
+ });
296
+ }
297
+ });
298
+ }
299
+ else if ( p.apiKey )
300
+ {
301
+ allProviders.push( p );
302
+ }
303
+ }
304
+ return allProviders;
305
+ }
230
306
  }
231
307
 
232
308
  module.exports = AIRouter;
@@ -5,8 +5,8 @@ const pretty = require( "pino-pretty" );
5
5
  const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
6
6
  const logger = pino({ base: false }, pinoStream );
7
7
  require( "dotenv" ).config({ quiet: true });
8
- const AIRouter = require( "../main" );
9
- const providers = require( "../provider" )
8
+ const AIRouter = require( "./main" );
9
+ const providers = require( "./provider" )
10
10
  const aiRouter = new AIRouter( providers );
11
11
 
12
12
  const app = express();
@@ -14,7 +14,7 @@ app.use( cors() );
14
14
  app.use( express.json({ limit: "50mb" }) );
15
15
 
16
16
 
17
- app.post( "/v1/chat/completions", async ( req, res ) =>
17
+ const handleChatCompletion = async ( req, res ) =>
18
18
  {
19
19
  const { messages, model, stream, ...rest } = req.body;
20
20
 
@@ -46,7 +46,6 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
46
46
  res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
47
47
  res.write( "data: [DONE]\n\n" );
48
48
  }
49
-
50
49
  res.end();
51
50
  }
52
51
  else
@@ -54,7 +53,6 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
54
53
  try
55
54
  {
56
55
  const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
57
-
58
56
  res.json( result.data );
59
57
  }
60
58
  catch ( err )
@@ -63,59 +61,9 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
63
61
  res.status( 500 ).json({ error: { message: err.message } });
64
62
  }
65
63
  }
66
- });
67
- app.post( "/chat/completions", async ( req, res ) =>
68
- {
69
- const { messages, model, stream, ...rest } = req.body;
70
-
71
- if ( !messages || !Array.isArray( messages ) )
72
- {
73
- return res.status( 400 ).json({ error: { message: "messages must be an array" } });
74
- }
75
-
76
- if ( stream )
77
- {
78
- res.setHeader( "Content-Type", "text/event-stream" );
79
- res.setHeader( "Cache-Control", "no-cache" );
80
- res.setHeader( "Connection", "keep-alive" );
64
+ };
81
65
 
82
- try
83
- {
84
- const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
85
-
86
- for await ( const chunk of result.data )
87
- {
88
- res.write( `data: ${JSON.stringify( chunk )}\n\n` );
89
- }
90
-
91
- res.write( "data: [DONE]\n\n" );
92
- }
93
- catch ( err )
94
- {
95
- logger.error( err );
96
- res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
97
- res.write( "data: [DONE]\n\n" );
98
- }
99
-
100
- res.end();
101
- }
102
- else
103
- {
104
- try
105
- {
106
- const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
107
-
108
- res.json( result.data );
109
- }
110
- catch ( err )
111
- {
112
- logger.error( err );
113
- res.status( 500 ).json({ error: { message: err.message } });
114
- }
115
- }
116
- });
117
-
118
- app.get( "/v1/models", async ( req, res ) =>
66
+ const handleGetModels = async ( req, res ) =>
119
67
  {
120
68
  try
121
69
  {
@@ -127,24 +75,30 @@ app.get( "/v1/models", async ( req, res ) =>
127
75
  logger.error( `Error in /v1/models: ${error.message}` );
128
76
  res.status( 500 ).json({ error: { message: error.message } });
129
77
  }
130
- });
78
+ };
79
+
80
+ app.post( "/v1/chat/completions", handleChatCompletion );
81
+ app.post( "/chat/completions", handleChatCompletion );
131
82
 
132
- app.get( "/models", async ( req, res ) =>
83
+ app.get( "/v1/models", handleGetModels );
84
+ app.get( "/models", handleGetModels );
85
+
86
+ app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
87
+
88
+ app.get( "/providers/status", async ( req, res ) =>
133
89
  {
134
90
  try
135
91
  {
136
- const models = await aiRouter.getModels();
137
- res.json({ data: models });
92
+ const statuses = await aiRouter.checkProvidersStatus();
93
+ res.json({ data: statuses });
138
94
  }
139
95
  catch ( error )
140
96
  {
141
- logger.error( `Error in /models: ${error.message}` );
97
+ logger.error( `Error in /v1/providers/status: ${error.message}` );
142
98
  res.status( 500 ).json({ error: { message: error.message } });
143
99
  }
144
100
  });
145
101
 
146
- app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
147
-
148
102
  // Start server
149
103
  const PORT = process.env.PORT || 3000;
150
104
  app.listen( PORT, () =>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "unified-ai-router",
3
- "version": "3.4.1",
3
+ "version": "3.4.4",
4
4
  "description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
5
5
  "license": "ISC",
6
6
  "author": "mlibre",
@@ -16,7 +16,7 @@
16
16
  "main": "main.js",
17
17
  "scripts": {
18
18
  "test": "echo \"Error: no test specified\" && exit 1",
19
- "start": "node openai-compatible-server/index.js",
19
+ "start": "node openai-server.js",
20
20
  "docs:dev": "vitepress dev docs",
21
21
  "docs:build": "vitepress build docs",
22
22
  "docs:preview": "vitepress preview docs"
package/provider.js CHANGED
@@ -1,67 +1,42 @@
1
1
  module.exports = [
2
2
  {
3
- name: "gemini_1",
4
- apiKey: process.env.GEMINI_API_KEY,
5
- model: "gemini-2.5-pro",
6
- apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
7
- },
8
- {
9
- name: "gemini_2",
10
- apiKey: process.env.GEMINI_API_KEY_2,
11
- model: "gemini-2.5-pro",
12
- apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
13
- },
14
- {
15
- name: "gemini_3",
16
- apiKey: process.env.GEMINI_API_KEY_3,
3
+ name: "gemini_pro",
4
+ apiKey: [
5
+ process.env.GEMINI_API_KEY,
6
+ process.env.GEMINI_API_KEY_2,
7
+ process.env.GEMINI_API_KEY_3,
8
+ ],
17
9
  model: "gemini-2.5-pro",
18
10
  apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
19
11
  },
20
12
  {
21
13
  name: "cerebras",
22
- apiKey: process.env.CEREBRAS_API_KEY,
14
+ apiKey: [
15
+ process.env.CEREBRAS_API_KEY,
16
+ process.env.CEREBRAS_API_KEY_2,
17
+ ],
23
18
  model: "gpt-oss-120b",
24
19
  apiUrl: "https://api.cerebras.ai/v1",
25
20
  },
26
21
  {
27
- name: "cerebras_2",
28
- apiKey: process.env.CEREBRAS_API_KEY_2,
29
- model: "gpt-oss-120b",
30
- apiUrl: "https://api.cerebras.ai/v1",
31
- },
32
- {
33
- name: "openrouter",
34
- apiKey: process.env.OPENROUTER_API_KEY,
35
- model: "qwen/qwen3-coder:free",
36
- apiUrl: "https://openrouter.ai/api/v1",
37
- },
38
- {
39
- name: "openrouter",
40
- apiKey: process.env.OPENROUTER_API_KEY,
41
- model: "z-ai/glm-4.5-air:free",
42
- apiUrl: "https://openrouter.ai/api/v1",
43
- },
44
- {
45
- name: "openrouter_2",
46
- apiKey: process.env.OPENROUTER_API_KEY_2,
47
- model: "z-ai/glm-4.5-air:free",
48
- apiUrl: "https://openrouter.ai/api/v1",
49
- },
50
- {
51
- name: "openrouter_2",
52
- apiKey: process.env.OPENROUTER_API_KEY_2,
53
- model: "qwen/qwen3-coder:free",
54
- apiUrl: "https://openrouter.ai/api/v1",
55
- },
56
- {
57
- name: "openrouter_3",
58
- apiKey: process.env.OPENROUTER_API_KEY_3,
22
+ name: "openrouter_qwen",
23
+ apiKey: [
24
+ process.env.OPENROUTER_API_KEY,
25
+ process.env.OPENROUTER_API_KEY_2,
26
+ process.env.OPENROUTER_API_KEY_3,
27
+ process.env.OPENROUTER_API_KEY_4,
28
+ process.env.OPENROUTER_API_KEY_5
29
+ ],
59
30
  model: "qwen/qwen3-coder:free",
60
31
  apiUrl: "https://openrouter.ai/api/v1",
61
32
  },
62
33
  {
63
- name: "openrouter_3",
64
- apiKey: process.env.OPENROUTER_API_KEY_3,
34
+ name: "openrouter_glm",
35
+ apiKey: [
36
+ process.env.OPENROUTER_API_KEY,
37
+ process.env.OPENROUTER_API_KEY_2,
38
+ process.env.OPENROUTER_API_KEY_3,
39
+ ],
65
40
  model: "z-ai/glm-4.5-air:free",
66
41
  apiUrl: "https://openrouter.ai/api/v1",
67
42
  },
@@ -72,8 +47,12 @@ module.exports = [
72
47
  apiUrl: "https://api.groq.com/openai/v1",
73
48
  },
74
49
  {
75
- name: "gemini_1",
76
- apiKey: process.env.GEMINI_API_KEY,
50
+ name: "gemini_flash",
51
+ apiKey: [
52
+ process.env.GEMINI_API_KEY,
53
+ process.env.GEMINI_API_KEY_2,
54
+ process.env.GEMINI_API_KEY_3,
55
+ ],
77
56
  model: "gemini-2.5-flash",
78
57
  apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
79
58
  },
package/readme.md CHANGED
@@ -2,8 +2,8 @@
2
2
 
3
3
  Unified AI Router is a comprehensive toolkit for AI applications, featuring:
4
4
 
5
- - An OpenAI-compatible server for seamless API integration
6
- - A unified interface for multiple LLM providers with automatic fallback
5
+ - An **OpenAI-compatible** server for seamless API integration
6
+ - A **unified interface** for multiple LLM providers with **automatic fallback**
7
7
 
8
8
  It supports all the OpenAI-compatible servers, including major providers like OpenAI, Google, Grok, Litellm, Vllm, Ollama and more, ensuring reliability and flexibility.
9
9
 
@@ -77,6 +77,19 @@ const response = await llm.chatCompletion(messages, {
77
77
  console.log(response);
78
78
  ```
79
79
 
80
+ You can also provide an array of API keys for a single provider definition.
81
+
82
+ ```javascript
83
+ const providers = [
84
+ {
85
+ name: "openai",
86
+ apiKey: [process.env.OPENAI_API_KEY_1, process.env.OPENAI_API_KEY_2],
87
+ model: "gpt-4",
88
+ apiUrl: "https://api.openai.com/v1"
89
+ }
90
+ ];
91
+ ```
92
+
80
93
  ### ๐Ÿ”Œ OpenAI-Compatible Server
81
94
 
82
95
  The OpenAI-compatible server provides a drop-in replacement for the OpenAI API. It routes requests through the unified router with fallback logic, ensuring high availability.
@@ -90,7 +103,7 @@ The server uses the provider configurations defined in [provider.js](provider.js
90
103
 
91
104
  2. Edit `.env` and add your API keys for the desired providers (see [๐Ÿ”‘ API Keys](#-api-keys) for sources).
92
105
 
93
- 3. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey` (referencing the corresponding env variable), `model`, and `apiUrl` for the providers you want to use.
106
+ 3. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey`, `model`, and `apiUrl` for the providers you want to use.
94
107
 
95
108
  To start the server locally, run:
96
109
 
@@ -105,6 +118,7 @@ The server listens at `http://localhost:3000/` and supports the following OpenAI
105
118
  - `GET /v1/models` - List available models
106
119
  - `GET /models` - List available models
107
120
  - `GET /health` - Health check
121
+ - `GET /v1/providers/status` - Check the status of all configured providers
108
122
 
109
123
  ### ๐Ÿงช Testing
110
124
 
@@ -180,15 +194,8 @@ Get your API keys from the following providers:
180
194
 
181
195
  - `main.js` - Core AIRouter library implementing the unified interface and fallback logic
182
196
  - `provider.js` - Configuration for supported AI providers
183
- - `openai-compatible-server/index.js` - OpenAI-compatible API server
197
+ - `openai-server.js` - OpenAI-compatible API server
184
198
  - `tests/` - Comprehensive tests for the library, server, and tools
185
- - `bruno/` - Bruno API collection for testing endpoints
186
- - `cloud-flare/` - Ready-to-deploy Cloudflare Pages setup for the Telegram bot
187
- - `functions/api/index.js` - Telegram webhook handler
188
- - `functions/api/search.js` - Search proxy endpoint
189
- - `public/` - Mini App frontend (HTML, CSS, JS)
190
- - `src/config.js` - Bot configuration
191
- - `src/telegram.js` - Telegram API integration
192
199
 
193
200
  ## ๐Ÿ“„ License
194
201