unified-ai-router 3.0.1 → 3.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,38 @@
1
+ meta {
2
+ name: models
3
+ type: http
4
+ seq: 5
5
+ }
6
+
7
+ get {
8
+ url: {{address}}/models
9
+ body: json
10
+ auth: none
11
+ }
12
+
13
+ headers {
14
+ Content-Type: application/json
15
+ Authorization: {{token}}
16
+ }
17
+
18
+ body:json {
19
+ {
20
+ "messages": [
21
+ {
22
+ "role": "system",
23
+ "content": "You are a helpful assistant."
24
+ },
25
+ {
26
+ "role": "user",
27
+ "content": "Hello, say two words only."
28
+ }
29
+ ],
30
+ "model": "x-ai/grok-4-fast:free",
31
+ "temperature": 0,
32
+ "stream": true
33
+ }
34
+ }
35
+
36
+ settings {
37
+ encodeUrl: false
38
+ }
package/main.js CHANGED
@@ -134,6 +134,43 @@ class AIRouter
134
134
  }
135
135
  throw new Error( `All providers failed. Last error: ${lastError.message}` );
136
136
  }
137
+ async getModels ()
138
+ {
139
+ const models = [];
140
+ for ( const provider of this.providers )
141
+ {
142
+ if ( !provider.apiKey )
143
+ {
144
+ logger.warn( `Skipping provider ${provider.name} due to missing API key` );
145
+ continue;
146
+ }
147
+ try
148
+ {
149
+ logger.info( `Fetching models for provider: ${provider.name}` );
150
+ const client = new OpenAI({
151
+ apiKey: provider.apiKey,
152
+ baseURL: provider.apiUrl,
153
+ timeout: 60000,
154
+ });
155
+ const listResponse = await client.models.list();
156
+ const modelList = listResponse.data && listResponse.data.length > 0 ? listResponse.data : listResponse.body || [];
157
+ const model = modelList.find( m => { return m.id === provider.model || m.id === `models/${provider.model}` });
158
+ if ( model )
159
+ {
160
+ models.push( model );
161
+ }
162
+ else
163
+ {
164
+ logger.warn( `Model ${provider.model} not found in provider ${provider.name}` );
165
+ }
166
+ }
167
+ catch ( error )
168
+ {
169
+ logger.error( `Failed to list models for provider ${provider.name}: ${error.message}` );
170
+ }
171
+ }
172
+ return models;
173
+ }
137
174
  }
138
175
 
139
176
  module.exports = AIRouter;
@@ -1,26 +1,19 @@
1
1
  const express = require( "express" );
2
2
  const cors = require( "cors" );
3
- const AIRouter = require( "../main" ); // your existing class
3
+ const AIRouter = require( "../main" );
4
4
  const pino = require( "pino" );
5
5
  const pretty = require( "pino-pretty" );
6
- const stream = pretty({ colorize: true, ignore: "pid,hostname" });
7
- const logger = pino({ base: false }, stream );
6
+ const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
7
+ const logger = pino({ base: false }, pinoStream );
8
8
  require( "dotenv" ).config({ quiet: true });
9
9
 
10
10
  const app = express();
11
11
  app.use( cors() );
12
12
  app.use( express.json() );
13
13
 
14
- /**
15
- * Initialize router with providers (could load from env/config)
16
- */
17
14
  const providers = require( "../provider" )
18
-
19
15
  const aiRouter = new AIRouter( providers );
20
16
 
21
- /**
22
- * OpenAI-compatible endpoint: POST /v1/chat/completions
23
- */
24
17
  app.post( "/v1/chat/completions", async ( req, res ) =>
25
18
  {
26
19
  const { messages, model, stream, ...rest } = req.body;
@@ -72,7 +65,20 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
72
65
  }
73
66
  });
74
67
 
75
- // Health check
68
+ app.get( "/v1/models", async ( req, res ) =>
69
+ {
70
+ try
71
+ {
72
+ const models = await aiRouter.getModels();
73
+ res.json({ data: models });
74
+ }
75
+ catch ( error )
76
+ {
77
+ logger.error( `Error in /v1/models: ${error.message}` );
78
+ res.status( 500 ).json({ error: { message: error.message } });
79
+ }
80
+ });
81
+
76
82
  app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
77
83
 
78
84
  // Start server
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "unified-ai-router",
3
- "version": "3.0.1",
3
+ "version": "3.0.3",
4
4
  "description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
5
5
  "license": "ISC",
6
6
  "author": "mlibre",
package/provider.js CHANGED
@@ -1,5 +1,4 @@
1
1
  module.exports = [
2
-
3
2
  {
4
3
  name: "openrouter",
5
4
  apiKey: process.env.OPENROUTER_API_KEY,
package/readme.md CHANGED
@@ -84,7 +84,20 @@ console.log(response);
84
84
 
85
85
  ### 🔌 OpenAI-Compatible Server
86
86
 
87
- The OpenAI-compatible server provides a drop-in replacement for the OpenAI API. It routes requests through the unified router with fallback logic, ensuring high availability.
87
+ The OpenAI-compatible server provides a drop-in replacement for the OpenAI API. It routes requests through the unified router with fallback logic, ensuring high availability.
88
+ The server uses the provider configurations defined in [provider.js](provider.js) file, and requires API keys set in a `.env` file.
89
+
90
+ #### Setup
91
+
92
+ 1. Copy the example environment file:
93
+
94
+ ```bash
95
+ cp .env.example .env
96
+ ```
97
+
98
+ 2. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey` (referencing the corresponding env variable), `model`, and `apiUrl` for the providers you want to use.
99
+
100
+ 3. Edit `.env` and add your API keys for the desired providers (see [🔑 API Keys](#-api-keys) for sources).
88
101
 
89
102
  To start the server locally, run:
90
103
 
package/legacy/main.js DELETED
@@ -1,63 +0,0 @@
1
- const { ChatOpenAI } = require( "@langchain/openai" );
2
- const pino = require( "pino" );
3
- const pretty = require( "pino-pretty" );
4
- const stream = pretty({ colorize: true, ignore: "pid,hostname" });
5
- const logger = pino({ base: false }, stream );
6
-
7
- class AIRouter
8
- {
9
- constructor ( providers )
10
- {
11
- this.providers = providers;
12
- }
13
-
14
- async chatCompletion ( messages, options = {}, stream = false )
15
- {
16
- const { stream: streamOption, tools, model, ...restOptions } = options;
17
- const isStreaming = stream || streamOption;
18
-
19
- logger.info( `Starting chatCompletion with ${this.providers.length} providers (streaming: ${isStreaming})` );
20
- let lastError;
21
-
22
- for ( const provider of this.providers )
23
- {
24
- try
25
- {
26
- logger.info( `Attempting with provider: ${provider.name}` );
27
- let llm = new ChatOpenAI({
28
- apiKey: provider.apiKey,
29
- model: provider.model,
30
- configuration: {
31
- baseURL: provider.apiUrl,
32
- },
33
- ...restOptions,
34
- });
35
-
36
- if ( tools && tools.length > 0 )
37
- {
38
- llm = llm.bindTools( tools );
39
- }
40
-
41
- if ( isStreaming )
42
- {
43
- const stream = await llm.stream( messages );
44
- return stream;
45
- }
46
- else
47
- {
48
- const response = await llm.invoke( messages, { timeout: 60000 });
49
- return response;
50
- }
51
- }
52
- catch ( error )
53
- {
54
- lastError = error;
55
- logger.error( `Failed with ${provider.name}:${error.message}` );
56
- // Continue to next provider
57
- }
58
- }
59
- throw new Error( `All providers failed. Last error: ${lastError.message}` );
60
- }
61
- }
62
-
63
- module.exports = AIRouter;
@@ -1,192 +0,0 @@
1
- const express = require( "express" );
2
- const cors = require( "cors" );
3
- const AIRouter = require( "../main" ); // your existing class
4
- const pino = require( "pino" );
5
- const pretty = require( "pino-pretty" );
6
- const stream = pretty({ colorize: true, ignore: "pid,hostname" });
7
- const logger = pino({ base: false }, stream );
8
- require( "dotenv" ).config({ quiet: true });
9
-
10
- const app = express();
11
- app.use( cors() );
12
- app.use( express.json() );
13
-
14
- /**
15
- * Initialize router with providers (could load from env/config)
16
- */
17
- const providers = require( "../provider" )
18
-
19
- const aiRouter = new AIRouter( providers );
20
-
21
- /**
22
- * OpenAI-compatible endpoint: POST /v1/chat/completions
23
- */
24
- app.post( "/v1/chat/completions", async ( req, res ) =>
25
- {
26
- try
27
- {
28
- const { messages, model, stream, ...rest } = req.body;
29
-
30
- if ( !messages || !Array.isArray( messages ) )
31
- {
32
- return res.status( 400 ).json({ error: { message: "messages must be an array" } });
33
- }
34
-
35
- if ( stream )
36
- {
37
- // Streaming mode → use Server-Sent Events (SSE)
38
- res.setHeader( "Content-Type", "text/event-stream" );
39
- res.setHeader( "Cache-Control", "no-cache" );
40
- res.setHeader( "Connection", "keep-alive" );
41
-
42
- try
43
- {
44
- const response = await aiRouter.chatCompletion( messages, { model, ...rest }, true );
45
- const id = `chatcmpl-${Date.now()}`;
46
- const created = Math.floor( Date.now() / 1000 );
47
- let fullResponse = null;
48
- for await ( const chunk of response )
49
- {
50
- const modelName = chunk?.response_metadata?.model_name || model || "unknown";
51
- const systemFingerprint = chunk?.response_metadata?.system_fingerprint || null;
52
- let delta = { ... chunk.delta || { content: chunk.content || "" } };
53
- if ( !delta.role ) delta.role = "assistant";
54
- delta.reasoning = delta.reasoning || null;
55
- delta.reasoning_details = delta.reasoning_details || [];
56
- let toolCallsDelta = null;
57
- if ( chunk.tool_calls && chunk.tool_calls.length > 0 )
58
- {
59
- toolCallsDelta = chunk.tool_calls.map( ( tc, index ) =>
60
- {
61
- return {
62
- id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
63
- type: "function",
64
- index,
65
- function: {
66
- name: tc.name,
67
- arguments: JSON.stringify( tc.args || {})
68
- }
69
- };
70
- });
71
- delta.tool_calls = toolCallsDelta;
72
- delta.content = "";
73
- }
74
- const chunkFinishReason = delta.finish_reason || chunk?.response_metadata?.finish_reason || null;
75
- const chunkNativeFinishReason = delta.native_finish_reason || chunk?.response_metadata?.native_finish_reason || chunkFinishReason || null;
76
- if ( chunk.content && !fullResponse ) fullResponse = chunk; // Capture full for reasoning if available
77
- const payload = {
78
- id,
79
- provider: "OpenAI",
80
- object: "chat.completion.chunk",
81
- created,
82
- model: modelName,
83
- system_fingerprint: systemFingerprint,
84
- choices: [
85
- {
86
- logprobs: null,
87
- delta,
88
- index: 0,
89
- finish_reason: chunkFinishReason,
90
- native_finish_reason: chunkNativeFinishReason,
91
- },
92
- ],
93
- };
94
- const usage = chunk?.response_metadata?.usage;
95
- if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
96
- {
97
- payload.usage = chunk?.response_metadata?.usage || null;
98
- }
99
- res.write( `data: ${JSON.stringify( payload )}\n\n` );
100
- }
101
- // Send done signal
102
- res.write( "data: [DONE]\n\n" );
103
- res.end();
104
- }
105
- catch ( err )
106
- {
107
- logger.error( err );
108
- res.write( `data: ${JSON.stringify({ error: err.message })}\n\n` );
109
- res.write( "data: [DONE]\n\n" );
110
- res.end();
111
- }
112
- }
113
- else
114
- {
115
- // Non-streaming → return one-shot completion
116
- const response = await aiRouter.chatCompletion( messages, { model, ...rest }, false );
117
- let reasoning = null;
118
- let refusal = null;
119
- let toolCalls = null;
120
- if ( response.contentBlocks )
121
- {
122
- const reasoningBlocks = response.contentBlocks.filter( b => { return b.type === "reasoning" || b.type === "thinking" });
123
- reasoning = reasoningBlocks.length > 0 ? reasoningBlocks.map( b => { return b.text }).join( "\n" ) : null;
124
- const refusalBlocks = response.contentBlocks.filter( b => { return b.type === "refusal" });
125
- refusal = refusalBlocks.length > 0 ? refusalBlocks.map( b => { return b.text }).join( "\n" ) : null;
126
- }
127
- if ( response.tool_calls && response.tool_calls.length > 0 )
128
- {
129
- toolCalls = response.tool_calls.map( ( tc, index ) =>
130
- {
131
- return {
132
- id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
133
- type: "function",
134
- index,
135
- function: {
136
- name: tc.name,
137
- arguments: JSON.stringify( tc.args || {})
138
- }
139
- };
140
- });
141
- }
142
- const systemFingerprint = response.response_metadata?.system_fingerprint || null;
143
- const finishReason = response.response_metadata?.finish_reason || null;
144
- const nativeFinishReason = response.response_metadata?.native_finish_reason || finishReason || null;
145
- const messageContent = toolCalls && toolCalls.length > 0 ? "" : response.content;
146
- let finalResult = {
147
- id: `chatcmpl_${Date.now()}`,
148
- provider: "OpenAI",
149
- object: "chat.completion",
150
- created: Math.floor( Date.now() / 1000 ),
151
- model: response.response_metadata?.model_name || model || "unknown",
152
- system_fingerprint: systemFingerprint,
153
- choices: [
154
- {
155
- logprobs: null,
156
- finish_reason: finishReason,
157
- native_finish_reason: nativeFinishReason,
158
- index: 0,
159
- message: {
160
- role: "assistant",
161
- content: messageContent,
162
- refusal,
163
- reasoning,
164
- tool_calls: toolCalls
165
- },
166
- },
167
- ],
168
- }
169
- const usage = response?.response_metadata?.usage;
170
- if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
171
- {
172
- finalResult.usage = response?.response_metadata?.usage || null;
173
- }
174
- res.json( finalResult );
175
- }
176
- }
177
- catch ( err )
178
- {
179
- logger.error( err );
180
- res.status( 500 ).json({ error: { message: err.message } });
181
- }
182
- });
183
-
184
- // Health check
185
- app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
186
-
187
- // Start server
188
- const PORT = process.env.PORT || 3000;
189
- app.listen( PORT, () =>
190
- {
191
- logger.info( `🚀 OpenAI-compatible API listening at http://localhost:${PORT}/v1/chat/completions` );
192
- });