unified-ai-router 3.0.3 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/main.js CHANGED
@@ -11,9 +11,18 @@ class AIRouter
11
11
  this.providers = providers;
12
12
  }
13
13
 
14
+ createClient ( provider )
15
+ {
16
+ return new OpenAI({
17
+ apiKey: provider.apiKey,
18
+ baseURL: provider.apiUrl,
19
+ timeout: 60000,
20
+ });
21
+ }
22
+
14
23
  async chatCompletion ( messages, options = {}, stream = false )
15
24
  {
16
- const { stream: streamOption, tools, model, ...restOptions } = options;
25
+ const { stream: streamOption, tools, ...restOptions } = options;
17
26
  const isStreaming = stream || streamOption;
18
27
 
19
28
  logger.info( `Starting chatCompletion with ${this.providers.length} providers (streaming: ${isStreaming})` );
@@ -24,11 +33,7 @@ class AIRouter
24
33
  try
25
34
  {
26
35
  logger.info( `Attempting with provider: ${provider.name}` );
27
- const client = new OpenAI({
28
- apiKey: provider.apiKey,
29
- baseURL: provider.apiUrl,
30
- timeout: 60000,
31
- });
36
+ const client = this.createClient( provider );
32
37
 
33
38
  const params = {
34
39
  model: provider.model,
@@ -46,7 +51,7 @@ class AIRouter
46
51
  for await ( const chunk of responseStream )
47
52
  {
48
53
  const content = chunk.choices[0]?.delta?.content;
49
- const reasoning = chunk.choices[0]?.delta?.reasoning
54
+ const reasoning = chunk.choices[0]?.delta?.reasoning;
50
55
  const tool_calls_delta = chunk.choices[0]?.delta?.tool_calls;
51
56
  if ( content !== null )
52
57
  {
@@ -78,7 +83,7 @@ class AIRouter
78
83
  {
79
84
  response.reasoning = reasoning
80
85
  }
81
- if ( tools !== null )
86
+ if ( tool_calls !== null )
82
87
  {
83
88
  response.tool_calls = tool_calls
84
89
  }
@@ -97,7 +102,7 @@ class AIRouter
97
102
 
98
103
  async chatCompletionWithResponse ( messages, options = {})
99
104
  {
100
- const { stream, tools, model, ...restOptions } = options;
105
+ const { stream, tools, ...restOptions } = options;
101
106
  const isStreaming = stream;
102
107
 
103
108
  logger.info( `Starting chatCompletionWithResponse with ${this.providers.length} providers (streaming: ${isStreaming})` );
@@ -108,11 +113,7 @@ class AIRouter
108
113
  try
109
114
  {
110
115
  logger.info( `Attempting with provider: ${provider.name}` );
111
- const client = new OpenAI({
112
- apiKey: provider.apiKey,
113
- baseURL: provider.apiUrl,
114
- timeout: 60000,
115
- });
116
+ const client = this.createClient( provider );
116
117
 
117
118
  const params = {
118
119
  model: provider.model,
@@ -134,6 +135,7 @@ class AIRouter
134
135
  }
135
136
  throw new Error( `All providers failed. Last error: ${lastError.message}` );
136
137
  }
138
+
137
139
  async getModels ()
138
140
  {
139
141
  const models = [];
@@ -147,13 +149,9 @@ class AIRouter
147
149
  try
148
150
  {
149
151
  logger.info( `Fetching models for provider: ${provider.name}` );
150
- const client = new OpenAI({
151
- apiKey: provider.apiKey,
152
- baseURL: provider.apiUrl,
153
- timeout: 60000,
154
- });
152
+ const client = this.createClient( provider );
155
153
  const listResponse = await client.models.list();
156
- const modelList = listResponse.data && listResponse.data.length > 0 ? listResponse.data : listResponse.body || [];
154
+ const modelList = Array.isArray( listResponse.data ) ? listResponse.data : listResponse.body || [];
157
155
  const model = modelList.find( m => { return m.id === provider.model || m.id === `models/${provider.model}` });
158
156
  if ( model )
159
157
  {
@@ -1,18 +1,18 @@
1
1
  const express = require( "express" );
2
2
  const cors = require( "cors" );
3
- const AIRouter = require( "../main" );
4
3
  const pino = require( "pino" );
5
4
  const pretty = require( "pino-pretty" );
6
5
  const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
7
6
  const logger = pino({ base: false }, pinoStream );
8
7
  require( "dotenv" ).config({ quiet: true });
8
+ const AIRouter = require( "../main" );
9
+ const providers = require( "../provider" )
10
+ const aiRouter = new AIRouter( providers );
9
11
 
10
12
  const app = express();
11
13
  app.use( cors() );
12
14
  app.use( express.json() );
13
15
 
14
- const providers = require( "../provider" )
15
- const aiRouter = new AIRouter( providers );
16
16
 
17
17
  app.post( "/v1/chat/completions", async ( req, res ) =>
18
18
  {
@@ -31,7 +31,7 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
31
31
 
32
32
  try
33
33
  {
34
- const result = await aiRouter.chatCompletionWithResponse( messages, { model, ...rest });
34
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
35
35
 
36
36
  for await ( const chunk of result.data )
37
37
  {
@@ -53,7 +53,57 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
53
53
  {
54
54
  try
55
55
  {
56
- const result = await aiRouter.chatCompletionWithResponse( messages, { model, ...rest });
56
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
57
+
58
+ res.json( result.data );
59
+ }
60
+ catch ( err )
61
+ {
62
+ logger.error( err );
63
+ res.status( 500 ).json({ error: { message: err.message } });
64
+ }
65
+ }
66
+ });
67
+ app.post( "/chat/completions", async ( req, res ) =>
68
+ {
69
+ const { messages, model, stream, ...rest } = req.body;
70
+
71
+ if ( !messages || !Array.isArray( messages ) )
72
+ {
73
+ return res.status( 400 ).json({ error: { message: "messages must be an array" } });
74
+ }
75
+
76
+ if ( stream )
77
+ {
78
+ res.setHeader( "Content-Type", "text/event-stream" );
79
+ res.setHeader( "Cache-Control", "no-cache" );
80
+ res.setHeader( "Connection", "keep-alive" );
81
+
82
+ try
83
+ {
84
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
85
+
86
+ for await ( const chunk of result.data )
87
+ {
88
+ res.write( `data: ${JSON.stringify( chunk )}\n\n` );
89
+ }
90
+
91
+ res.write( "data: [DONE]\n\n" );
92
+ }
93
+ catch ( err )
94
+ {
95
+ logger.error( err );
96
+ res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
97
+ res.write( "data: [DONE]\n\n" );
98
+ }
99
+
100
+ res.end();
101
+ }
102
+ else
103
+ {
104
+ try
105
+ {
106
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
57
107
 
58
108
  res.json( result.data );
59
109
  }
@@ -79,6 +129,20 @@ app.get( "/v1/models", async ( req, res ) =>
79
129
  }
80
130
  });
81
131
 
132
+ app.get( "/models", async ( req, res ) =>
133
+ {
134
+ try
135
+ {
136
+ const models = await aiRouter.getModels();
137
+ res.json({ data: models });
138
+ }
139
+ catch ( error )
140
+ {
141
+ logger.error( `Error in /models: ${error.message}` );
142
+ res.status( 500 ).json({ error: { message: error.message } });
143
+ }
144
+ });
145
+
82
146
  app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
83
147
 
84
148
  // Start server
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "unified-ai-router",
3
- "version": "3.0.3",
3
+ "version": "3.1.1",
4
4
  "description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
5
5
  "license": "ISC",
6
6
  "author": "mlibre",