unified-ai-router 3.3.14 → 3.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +0 -2
- package/docs/configuration.md +13 -1
- package/docs/index.md +2 -0
- package/main.js +60 -9
- package/package.json +2 -1
- package/provider.js +30 -18
- package/readme.md +1 -0
package/.env.example
CHANGED
package/docs/configuration.md
CHANGED
|
@@ -45,10 +45,22 @@ Each provider object supports (at minimum) these fields:
|
|
|
45
45
|
name: "openai", // simple identifier for logs/debug
|
|
46
46
|
apiKey: process.env.OPENAI_API_KEY,
|
|
47
47
|
model: "gpt-4", // model id to request from this provider
|
|
48
|
-
apiUrl: "https://api.openai.com/v1", // base URL for provider-compatible OpenAI endpoints
|
|
48
|
+
apiUrl: "https://api.openai.com/v1", // base URL for provider-compatible OpenAI endpoints,
|
|
49
49
|
}
|
|
50
50
|
```
|
|
51
51
|
|
|
52
|
+
### Circuit Breaker Configuration
|
|
53
|
+
|
|
54
|
+
The router includes built-in circuit breaker protection for each provider using the "opossum" library. This provides fault tolerance by automatically stopping requests to a provider that's experiencing issues and preventing cascading failures.
|
|
55
|
+
|
|
56
|
+
Default circuit breaker options:
|
|
57
|
+
|
|
58
|
+
* **timeout**: 300000ms (5 minutes) - time before action is considered failed
|
|
59
|
+
* **errorThresholdPercentage**: 50% - percentage of failures before opening the circuit
|
|
60
|
+
* **resetTimeout**: 9000000ms (15 minutes) - time to wait before trying the provider again
|
|
61
|
+
|
|
62
|
+
You can override these options per provider by passing `circuitOptions`.
|
|
63
|
+
|
|
52
64
|
### Important notes
|
|
53
65
|
|
|
54
66
|
* `apiKey` should reference the environment variable (use `process.env.X`). If the env var is missing the router will skip that provider and log a warning.
|
package/docs/index.md
CHANGED
|
@@ -16,6 +16,8 @@ hero:
|
|
|
16
16
|
features:
|
|
17
17
|
- title: Multi-provider fallback
|
|
18
18
|
details: If one provider fails, requests automatically fall back to the next available provider.
|
|
19
|
+
- title: Circuit breaker protection
|
|
20
|
+
details: Built-in fault tolerance with automatic circuit breaking for each provider to prevent cascading failures.
|
|
19
21
|
- title: OpenAI-compatible API
|
|
20
22
|
details: Run a drop-in replacement for the OpenAI chat completion endpoints (streaming & non-streaming).
|
|
21
23
|
- title: Tool calling & streaming
|
package/main.js
CHANGED
|
@@ -4,11 +4,63 @@ const pretty = require( "pino-pretty" );
|
|
|
4
4
|
const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
|
|
5
5
|
const logger = pino({ base: false }, pinoStream );
|
|
6
6
|
|
|
7
|
+
const CircuitBreaker = require( "opossum" ); // <-- added
|
|
8
|
+
|
|
7
9
|
class AIRouter
|
|
8
10
|
{
|
|
9
11
|
constructor ( providers )
|
|
10
12
|
{
|
|
11
13
|
this.providers = providers;
|
|
14
|
+
|
|
15
|
+
const defaultCircuitOptions = {
|
|
16
|
+
timeout: 300000, // time in ms before action considered failed
|
|
17
|
+
errorThresholdPercentage: 50, // % of failures before opening the circuit
|
|
18
|
+
resetTimeout: 9000000, // time in ms to wait before trying again
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
for ( const provider of this.providers )
|
|
22
|
+
{
|
|
23
|
+
// allow provider to override circuit options
|
|
24
|
+
const circuitOptions = Object.assign({}, defaultCircuitOptions, provider.circuitOptions || {});
|
|
25
|
+
|
|
26
|
+
// action receives an object: { params, withResponse }
|
|
27
|
+
const action = async ({ params, withResponse }) =>
|
|
28
|
+
{
|
|
29
|
+
const client = this.createClient( provider );
|
|
30
|
+
|
|
31
|
+
// If caller requested .withResponse() use it
|
|
32
|
+
if ( withResponse )
|
|
33
|
+
{
|
|
34
|
+
// return whatever .withResponse() returns (assumed promise resolving to { data, response })
|
|
35
|
+
return client.chat.completions.create( params ).withResponse();
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Normal create (may return Promise resolving to response OR an async iterable for streaming)
|
|
39
|
+
return client.chat.completions.create( params );
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
const breaker = new CircuitBreaker( action, circuitOptions );
|
|
43
|
+
|
|
44
|
+
// simple logging for breaker transitions
|
|
45
|
+
breaker.on( "open", ( ) =>
|
|
46
|
+
{
|
|
47
|
+
return logger.warn( `Circuit open for provider: ${provider.name}` )
|
|
48
|
+
});
|
|
49
|
+
breaker.on( "halfOpen", () => { return logger.info( `Circuit half-open for provider: ${provider.name}` ) });
|
|
50
|
+
breaker.on( "close", () => { return logger.info( `Circuit closed for provider: ${provider.name}` ) });
|
|
51
|
+
breaker.on( "fallback", () => { return logger.warn( `Fallback triggered for provider: ${provider.name}` ) });
|
|
52
|
+
breaker.on( "failure", ( err ) =>
|
|
53
|
+
{
|
|
54
|
+
logger.error({ provider: provider.name, event: "failure", error: err.message }, "Breaker failure event" );
|
|
55
|
+
});
|
|
56
|
+
// optional fallback: we throw so the router will continue to next provider
|
|
57
|
+
breaker.fallback( ( err ) =>
|
|
58
|
+
{
|
|
59
|
+
throw new Error( `Circuit open for ${provider.name}` );
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
provider.breaker = breaker;
|
|
63
|
+
}
|
|
12
64
|
}
|
|
13
65
|
|
|
14
66
|
createClient ( provider )
|
|
@@ -33,8 +85,6 @@ class AIRouter
|
|
|
33
85
|
try
|
|
34
86
|
{
|
|
35
87
|
logger.info( `Attempting with provider: ${provider.name}` );
|
|
36
|
-
const client = this.createClient( provider );
|
|
37
|
-
|
|
38
88
|
const params = {
|
|
39
89
|
messages,
|
|
40
90
|
...tools && tools.length > 0 ? { tools } : {},
|
|
@@ -42,10 +92,11 @@ class AIRouter
|
|
|
42
92
|
...restOptions,
|
|
43
93
|
model: provider.model
|
|
44
94
|
};
|
|
45
|
-
|
|
95
|
+
const result = await provider.breaker.fire({ params, withResponse: false });
|
|
96
|
+
logger.info( `Successful with provider: ${provider.name}` );
|
|
46
97
|
if ( isStreaming )
|
|
47
98
|
{
|
|
48
|
-
const responseStream =
|
|
99
|
+
const responseStream = result;
|
|
49
100
|
return ( async function* ()
|
|
50
101
|
{
|
|
51
102
|
for await ( const chunk of responseStream )
|
|
@@ -78,7 +129,7 @@ class AIRouter
|
|
|
78
129
|
}
|
|
79
130
|
else
|
|
80
131
|
{
|
|
81
|
-
const response =
|
|
132
|
+
const response = result;
|
|
82
133
|
const content = response.choices[0]?.message?.content;
|
|
83
134
|
const reasoning = response.choices[0]?.message?.reasoning;
|
|
84
135
|
const tool_calls = response.choices[0]?.message?.tool_calls
|
|
@@ -104,7 +155,7 @@ class AIRouter
|
|
|
104
155
|
// Continue to next provider
|
|
105
156
|
}
|
|
106
157
|
}
|
|
107
|
-
throw new Error( `All providers failed. Last error: ${lastError
|
|
158
|
+
throw new Error( `All providers failed. Last error: ${lastError?.message || "unknown"}` );
|
|
108
159
|
}
|
|
109
160
|
|
|
110
161
|
async chatCompletionWithResponse ( messages, options = {})
|
|
@@ -120,7 +171,6 @@ class AIRouter
|
|
|
120
171
|
try
|
|
121
172
|
{
|
|
122
173
|
logger.info( `Attempting with provider: ${provider.name}` );
|
|
123
|
-
const client = this.createClient( provider );
|
|
124
174
|
|
|
125
175
|
const params = {
|
|
126
176
|
messages,
|
|
@@ -130,7 +180,8 @@ class AIRouter
|
|
|
130
180
|
model: provider.model
|
|
131
181
|
};
|
|
132
182
|
|
|
133
|
-
const { data, response: rawResponse } = await
|
|
183
|
+
const { data, response: rawResponse } = await provider.breaker.fire({ params, withResponse: true });
|
|
184
|
+
logger.info( `Successful with provider: ${provider.name}` );
|
|
134
185
|
return { data, response: rawResponse }
|
|
135
186
|
}
|
|
136
187
|
catch ( error )
|
|
@@ -140,7 +191,7 @@ class AIRouter
|
|
|
140
191
|
// Continue to next provider
|
|
141
192
|
}
|
|
142
193
|
}
|
|
143
|
-
throw new Error( `All providers failed. Last error: ${lastError
|
|
194
|
+
throw new Error( `All providers failed. Last error: ${lastError?.message || "unknown"}` );
|
|
144
195
|
}
|
|
145
196
|
|
|
146
197
|
async getModels ()
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "unified-ai-router",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.4.0",
|
|
4
4
|
"description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
|
|
5
5
|
"license": "ISC",
|
|
6
6
|
"author": "mlibre",
|
|
@@ -46,6 +46,7 @@
|
|
|
46
46
|
"eslint": "^9.33.0",
|
|
47
47
|
"express": "^5.1.0",
|
|
48
48
|
"openai": "^6.1.0",
|
|
49
|
+
"opossum": "^9.0.0",
|
|
49
50
|
"pino": "^9.9.0",
|
|
50
51
|
"pino-pretty": "^13.1.1"
|
|
51
52
|
},
|
package/provider.js
CHANGED
|
@@ -6,22 +6,22 @@ module.exports = [
|
|
|
6
6
|
apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
7
7
|
},
|
|
8
8
|
{
|
|
9
|
-
name: "
|
|
10
|
-
apiKey: process.env.
|
|
9
|
+
name: "gemini_2",
|
|
10
|
+
apiKey: process.env.GEMINI_API_KEY_2,
|
|
11
11
|
model: "gemini-2.5-pro",
|
|
12
12
|
apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
13
13
|
},
|
|
14
14
|
{
|
|
15
|
-
name: "
|
|
16
|
-
apiKey: process.env.
|
|
15
|
+
name: "gemini_3",
|
|
16
|
+
apiKey: process.env.GEMINI_API_KEY_3,
|
|
17
17
|
model: "gemini-2.5-pro",
|
|
18
18
|
apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
19
19
|
},
|
|
20
20
|
{
|
|
21
|
-
name: "
|
|
22
|
-
apiKey: process.env.
|
|
23
|
-
model: "
|
|
24
|
-
apiUrl: "https://
|
|
21
|
+
name: "cerebras",
|
|
22
|
+
apiKey: process.env.CEREBRAS_API_KEY,
|
|
23
|
+
model: "gpt-oss-120b",
|
|
24
|
+
apiUrl: "https://api.cerebras.ai/v1",
|
|
25
25
|
},
|
|
26
26
|
{
|
|
27
27
|
name: "cerebras_2",
|
|
@@ -30,22 +30,22 @@ module.exports = [
|
|
|
30
30
|
apiUrl: "https://api.cerebras.ai/v1",
|
|
31
31
|
},
|
|
32
32
|
{
|
|
33
|
-
name: "
|
|
34
|
-
apiKey: process.env.
|
|
35
|
-
model: "
|
|
36
|
-
apiUrl: "https://
|
|
33
|
+
name: "openrouter",
|
|
34
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
35
|
+
model: "qwen/qwen3-coder:free",
|
|
36
|
+
apiUrl: "https://openrouter.ai/api/v1",
|
|
37
37
|
},
|
|
38
38
|
{
|
|
39
|
-
name: "
|
|
40
|
-
apiKey: process.env.
|
|
39
|
+
name: "openrouter",
|
|
40
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
41
41
|
model: "z-ai/glm-4.5-air:free",
|
|
42
42
|
apiUrl: "https://openrouter.ai/api/v1",
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
|
-
name: "
|
|
46
|
-
apiKey: process.env.
|
|
47
|
-
model: "
|
|
48
|
-
apiUrl: "https://
|
|
45
|
+
name: "openrouter_2",
|
|
46
|
+
apiKey: process.env.OPENROUTER_API_KEY_2,
|
|
47
|
+
model: "z-ai/glm-4.5-air:free",
|
|
48
|
+
apiUrl: "https://openrouter.ai/api/v1",
|
|
49
49
|
},
|
|
50
50
|
{
|
|
51
51
|
name: "openrouter_2",
|
|
@@ -53,12 +53,24 @@ module.exports = [
|
|
|
53
53
|
model: "qwen/qwen3-coder:free",
|
|
54
54
|
apiUrl: "https://openrouter.ai/api/v1",
|
|
55
55
|
},
|
|
56
|
+
{
|
|
57
|
+
name: "openrouter_3",
|
|
58
|
+
apiKey: process.env.OPENROUTER_API_KEY_3,
|
|
59
|
+
model: "qwen/qwen3-coder:free",
|
|
60
|
+
apiUrl: "https://openrouter.ai/api/v1",
|
|
61
|
+
},
|
|
56
62
|
{
|
|
57
63
|
name: "openrouter_3",
|
|
58
64
|
apiKey: process.env.OPENROUTER_API_KEY_3,
|
|
59
65
|
model: "z-ai/glm-4.5-air:free",
|
|
60
66
|
apiUrl: "https://openrouter.ai/api/v1",
|
|
61
67
|
},
|
|
68
|
+
{
|
|
69
|
+
name: "qroq",
|
|
70
|
+
apiKey: process.env.QROQ_API_KEY,
|
|
71
|
+
model: "openai/gpt-oss-120b",
|
|
72
|
+
apiUrl: "https://api.groq.com/openai/v1",
|
|
73
|
+
},
|
|
62
74
|
{
|
|
63
75
|
name: "gemini_1",
|
|
64
76
|
apiKey: process.env.GEMINI_API_KEY,
|
package/readme.md
CHANGED
|
@@ -22,6 +22,7 @@ It supports all the OpenAI-compatible servers, including major providers like Op
|
|
|
22
22
|
|
|
23
23
|
- **Multi-Provider Support**: Works with OpenAI, Google, Grok, OpenRouter, Z.ai, Qroq, Cohere, Cerebras, LLM7 and etc
|
|
24
24
|
- **Automatic Fallback**: If one provider fails for **any reason**, automatically tries the next
|
|
25
|
+
- **Circuit Breaker**: Built-in fault tolerance with automatic circuit breaking for each provider to prevent cascading failures
|
|
25
26
|
- **OpenAI-Compatible Server**: Drop-in replacement for the OpenAI API, enabling easy integration with existing tools and clients
|
|
26
27
|
- **Simple API**: Easy-to-use interface for all supported providers
|
|
27
28
|
- **Streaming and Non-Streaming Support**: Handles both streaming and non-streaming responses
|