unified-ai-router 3.1.0 → 3.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/main.js +17 -20
- package/openai-compatible-server/index.js +53 -3
- package/package.json +1 -1
package/main.js
CHANGED
|
@@ -11,9 +11,18 @@ class AIRouter
|
|
|
11
11
|
this.providers = providers;
|
|
12
12
|
}
|
|
13
13
|
|
|
14
|
+
createClient ( provider )
|
|
15
|
+
{
|
|
16
|
+
return new OpenAI({
|
|
17
|
+
apiKey: provider.apiKey,
|
|
18
|
+
baseURL: provider.apiUrl,
|
|
19
|
+
timeout: 60000,
|
|
20
|
+
});
|
|
21
|
+
}
|
|
22
|
+
|
|
14
23
|
async chatCompletion ( messages, options = {}, stream = false )
|
|
15
24
|
{
|
|
16
|
-
const { stream: streamOption, tools,
|
|
25
|
+
const { stream: streamOption, tools, ...restOptions } = options;
|
|
17
26
|
const isStreaming = stream || streamOption;
|
|
18
27
|
|
|
19
28
|
logger.info( `Starting chatCompletion with ${this.providers.length} providers (streaming: ${isStreaming})` );
|
|
@@ -24,11 +33,7 @@ class AIRouter
|
|
|
24
33
|
try
|
|
25
34
|
{
|
|
26
35
|
logger.info( `Attempting with provider: ${provider.name}` );
|
|
27
|
-
const client =
|
|
28
|
-
apiKey: provider.apiKey,
|
|
29
|
-
baseURL: provider.apiUrl,
|
|
30
|
-
timeout: 60000,
|
|
31
|
-
});
|
|
36
|
+
const client = this.createClient( provider );
|
|
32
37
|
|
|
33
38
|
const params = {
|
|
34
39
|
model: provider.model,
|
|
@@ -46,7 +51,7 @@ class AIRouter
|
|
|
46
51
|
for await ( const chunk of responseStream )
|
|
47
52
|
{
|
|
48
53
|
const content = chunk.choices[0]?.delta?.content;
|
|
49
|
-
const reasoning = chunk.choices[0]?.delta?.reasoning
|
|
54
|
+
const reasoning = chunk.choices[0]?.delta?.reasoning;
|
|
50
55
|
const tool_calls_delta = chunk.choices[0]?.delta?.tool_calls;
|
|
51
56
|
if ( content !== null )
|
|
52
57
|
{
|
|
@@ -78,7 +83,7 @@ class AIRouter
|
|
|
78
83
|
{
|
|
79
84
|
response.reasoning = reasoning
|
|
80
85
|
}
|
|
81
|
-
if (
|
|
86
|
+
if ( tool_calls !== null )
|
|
82
87
|
{
|
|
83
88
|
response.tool_calls = tool_calls
|
|
84
89
|
}
|
|
@@ -97,7 +102,7 @@ class AIRouter
|
|
|
97
102
|
|
|
98
103
|
async chatCompletionWithResponse ( messages, options = {})
|
|
99
104
|
{
|
|
100
|
-
const { stream, tools,
|
|
105
|
+
const { stream, tools, ...restOptions } = options;
|
|
101
106
|
const isStreaming = stream;
|
|
102
107
|
|
|
103
108
|
logger.info( `Starting chatCompletionWithResponse with ${this.providers.length} providers (streaming: ${isStreaming})` );
|
|
@@ -108,11 +113,7 @@ class AIRouter
|
|
|
108
113
|
try
|
|
109
114
|
{
|
|
110
115
|
logger.info( `Attempting with provider: ${provider.name}` );
|
|
111
|
-
const client =
|
|
112
|
-
apiKey: provider.apiKey,
|
|
113
|
-
baseURL: provider.apiUrl,
|
|
114
|
-
timeout: 60000,
|
|
115
|
-
});
|
|
116
|
+
const client = this.createClient( provider );
|
|
116
117
|
|
|
117
118
|
const params = {
|
|
118
119
|
model: provider.model,
|
|
@@ -148,13 +149,9 @@ class AIRouter
|
|
|
148
149
|
try
|
|
149
150
|
{
|
|
150
151
|
logger.info( `Fetching models for provider: ${provider.name}` );
|
|
151
|
-
const client =
|
|
152
|
-
apiKey: provider.apiKey,
|
|
153
|
-
baseURL: provider.apiUrl,
|
|
154
|
-
timeout: 60000,
|
|
155
|
-
});
|
|
152
|
+
const client = this.createClient( provider );
|
|
156
153
|
const listResponse = await client.models.list();
|
|
157
|
-
const modelList =
|
|
154
|
+
const modelList = Array.isArray( listResponse.data ) ? listResponse.data : listResponse.body || [];
|
|
158
155
|
const model = modelList.find( m => { return m.id === provider.model || m.id === `models/${provider.model}` });
|
|
159
156
|
if ( model )
|
|
160
157
|
{
|
|
@@ -1,18 +1,18 @@
|
|
|
1
1
|
const express = require( "express" );
|
|
2
2
|
const cors = require( "cors" );
|
|
3
|
-
const AIRouter = require( "../main" );
|
|
4
3
|
const pino = require( "pino" );
|
|
5
4
|
const pretty = require( "pino-pretty" );
|
|
6
5
|
const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
|
|
7
6
|
const logger = pino({ base: false }, pinoStream );
|
|
8
7
|
require( "dotenv" ).config({ quiet: true });
|
|
8
|
+
const AIRouter = require( "../main" );
|
|
9
|
+
const providers = require( "../provider" )
|
|
10
|
+
const aiRouter = new AIRouter( providers );
|
|
9
11
|
|
|
10
12
|
const app = express();
|
|
11
13
|
app.use( cors() );
|
|
12
14
|
app.use( express.json() );
|
|
13
15
|
|
|
14
|
-
const providers = require( "../provider" )
|
|
15
|
-
const aiRouter = new AIRouter( providers );
|
|
16
16
|
|
|
17
17
|
app.post( "/v1/chat/completions", async ( req, res ) =>
|
|
18
18
|
{
|
|
@@ -64,6 +64,56 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
|
|
|
64
64
|
}
|
|
65
65
|
}
|
|
66
66
|
});
|
|
67
|
+
app.post( "/chat/completions", async ( req, res ) =>
|
|
68
|
+
{
|
|
69
|
+
const { messages, model, stream, ...rest } = req.body;
|
|
70
|
+
|
|
71
|
+
if ( !messages || !Array.isArray( messages ) )
|
|
72
|
+
{
|
|
73
|
+
return res.status( 400 ).json({ error: { message: "messages must be an array" } });
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if ( stream )
|
|
77
|
+
{
|
|
78
|
+
res.setHeader( "Content-Type", "text/event-stream" );
|
|
79
|
+
res.setHeader( "Cache-Control", "no-cache" );
|
|
80
|
+
res.setHeader( "Connection", "keep-alive" );
|
|
81
|
+
|
|
82
|
+
try
|
|
83
|
+
{
|
|
84
|
+
const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
|
|
85
|
+
|
|
86
|
+
for await ( const chunk of result.data )
|
|
87
|
+
{
|
|
88
|
+
res.write( `data: ${JSON.stringify( chunk )}\n\n` );
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
res.write( "data: [DONE]\n\n" );
|
|
92
|
+
}
|
|
93
|
+
catch ( err )
|
|
94
|
+
{
|
|
95
|
+
logger.error( err );
|
|
96
|
+
res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
|
|
97
|
+
res.write( "data: [DONE]\n\n" );
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
res.end();
|
|
101
|
+
}
|
|
102
|
+
else
|
|
103
|
+
{
|
|
104
|
+
try
|
|
105
|
+
{
|
|
106
|
+
const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
|
|
107
|
+
|
|
108
|
+
res.json( result.data );
|
|
109
|
+
}
|
|
110
|
+
catch ( err )
|
|
111
|
+
{
|
|
112
|
+
logger.error( err );
|
|
113
|
+
res.status( 500 ).json({ error: { message: err.message } });
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
});
|
|
67
117
|
|
|
68
118
|
app.get( "/v1/models", async ( req, res ) =>
|
|
69
119
|
{
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "unified-ai-router",
|
|
3
|
-
"version": "3.1.
|
|
3
|
+
"version": "3.1.1",
|
|
4
4
|
"description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
|
|
5
5
|
"license": "ISC",
|
|
6
6
|
"author": "mlibre",
|