unified-ai-router 3.1.0 โ†’ 3.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -9,4 +9,5 @@ VERCEL_AI_GATEWAY_API_KEY=API_KEY
9
9
  COHERE_API_KEY=API_KEY
10
10
  CEREBRAS_API_KEY=API_KEY
11
11
  LLM7_API_KEY=API_KEY
12
- SEARX_URL=https://searx.perennialte.ch
12
+ SEARX_URL=https://searx.perennialte.ch
13
+ PORT=3000
package/main.js CHANGED
@@ -11,9 +11,18 @@ class AIRouter
11
11
  this.providers = providers;
12
12
  }
13
13
 
14
+ createClient ( provider )
15
+ {
16
+ return new OpenAI({
17
+ apiKey: provider.apiKey,
18
+ baseURL: provider.apiUrl,
19
+ timeout: 60000,
20
+ });
21
+ }
22
+
14
23
  async chatCompletion ( messages, options = {}, stream = false )
15
24
  {
16
- const { stream: streamOption, tools, model, ...restOptions } = options;
25
+ const { stream: streamOption, tools, ...restOptions } = options;
17
26
  const isStreaming = stream || streamOption;
18
27
 
19
28
  logger.info( `Starting chatCompletion with ${this.providers.length} providers (streaming: ${isStreaming})` );
@@ -24,11 +33,7 @@ class AIRouter
24
33
  try
25
34
  {
26
35
  logger.info( `Attempting with provider: ${provider.name}` );
27
- const client = new OpenAI({
28
- apiKey: provider.apiKey,
29
- baseURL: provider.apiUrl,
30
- timeout: 60000,
31
- });
36
+ const client = this.createClient( provider );
32
37
 
33
38
  const params = {
34
39
  model: provider.model,
@@ -46,7 +51,7 @@ class AIRouter
46
51
  for await ( const chunk of responseStream )
47
52
  {
48
53
  const content = chunk.choices[0]?.delta?.content;
49
- const reasoning = chunk.choices[0]?.delta?.reasoning
54
+ const reasoning = chunk.choices[0]?.delta?.reasoning;
50
55
  const tool_calls_delta = chunk.choices[0]?.delta?.tool_calls;
51
56
  if ( content !== null )
52
57
  {
@@ -78,7 +83,7 @@ class AIRouter
78
83
  {
79
84
  response.reasoning = reasoning
80
85
  }
81
- if ( tools !== null )
86
+ if ( tool_calls !== null )
82
87
  {
83
88
  response.tool_calls = tool_calls
84
89
  }
@@ -97,7 +102,7 @@ class AIRouter
97
102
 
98
103
  async chatCompletionWithResponse ( messages, options = {})
99
104
  {
100
- const { stream, tools, model, ...restOptions } = options;
105
+ const { stream, tools, ...restOptions } = options;
101
106
  const isStreaming = stream;
102
107
 
103
108
  logger.info( `Starting chatCompletionWithResponse with ${this.providers.length} providers (streaming: ${isStreaming})` );
@@ -108,11 +113,7 @@ class AIRouter
108
113
  try
109
114
  {
110
115
  logger.info( `Attempting with provider: ${provider.name}` );
111
- const client = new OpenAI({
112
- apiKey: provider.apiKey,
113
- baseURL: provider.apiUrl,
114
- timeout: 60000,
115
- });
116
+ const client = this.createClient( provider );
116
117
 
117
118
  const params = {
118
119
  model: provider.model,
@@ -148,13 +149,9 @@ class AIRouter
148
149
  try
149
150
  {
150
151
  logger.info( `Fetching models for provider: ${provider.name}` );
151
- const client = new OpenAI({
152
- apiKey: provider.apiKey,
153
- baseURL: provider.apiUrl,
154
- timeout: 60000,
155
- });
152
+ const client = this.createClient( provider );
156
153
  const listResponse = await client.models.list();
157
- const modelList = listResponse.data && listResponse.data.length > 0 ? listResponse.data : listResponse.body || [];
154
+ const modelList = Array.isArray( listResponse.data ) ? listResponse.data : listResponse.body || [];
158
155
  const model = modelList.find( m => { return m.id === provider.model || m.id === `models/${provider.model}` });
159
156
  if ( model )
160
157
  {
@@ -1,18 +1,18 @@
1
1
  const express = require( "express" );
2
2
  const cors = require( "cors" );
3
- const AIRouter = require( "../main" );
4
3
  const pino = require( "pino" );
5
4
  const pretty = require( "pino-pretty" );
6
5
  const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
7
6
  const logger = pino({ base: false }, pinoStream );
8
7
  require( "dotenv" ).config({ quiet: true });
8
+ const AIRouter = require( "../main" );
9
+ const providers = require( "../provider" )
10
+ const aiRouter = new AIRouter( providers );
9
11
 
10
12
  const app = express();
11
13
  app.use( cors() );
12
14
  app.use( express.json() );
13
15
 
14
- const providers = require( "../provider" )
15
- const aiRouter = new AIRouter( providers );
16
16
 
17
17
  app.post( "/v1/chat/completions", async ( req, res ) =>
18
18
  {
@@ -64,6 +64,56 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
64
64
  }
65
65
  }
66
66
  });
67
+ app.post( "/chat/completions", async ( req, res ) =>
68
+ {
69
+ const { messages, model, stream, ...rest } = req.body;
70
+
71
+ if ( !messages || !Array.isArray( messages ) )
72
+ {
73
+ return res.status( 400 ).json({ error: { message: "messages must be an array" } });
74
+ }
75
+
76
+ if ( stream )
77
+ {
78
+ res.setHeader( "Content-Type", "text/event-stream" );
79
+ res.setHeader( "Cache-Control", "no-cache" );
80
+ res.setHeader( "Connection", "keep-alive" );
81
+
82
+ try
83
+ {
84
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
85
+
86
+ for await ( const chunk of result.data )
87
+ {
88
+ res.write( `data: ${JSON.stringify( chunk )}\n\n` );
89
+ }
90
+
91
+ res.write( "data: [DONE]\n\n" );
92
+ }
93
+ catch ( err )
94
+ {
95
+ logger.error( err );
96
+ res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
97
+ res.write( "data: [DONE]\n\n" );
98
+ }
99
+
100
+ res.end();
101
+ }
102
+ else
103
+ {
104
+ try
105
+ {
106
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, stream, ...rest });
107
+
108
+ res.json( result.data );
109
+ }
110
+ catch ( err )
111
+ {
112
+ logger.error( err );
113
+ res.status( 500 ).json({ error: { message: err.message } });
114
+ }
115
+ }
116
+ });
67
117
 
68
118
  app.get( "/v1/models", async ( req, res ) =>
69
119
  {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "unified-ai-router",
3
- "version": "3.1.0",
3
+ "version": "3.1.3",
4
4
  "description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
5
5
  "license": "ISC",
6
6
  "author": "mlibre",
package/readme.md CHANGED
@@ -4,7 +4,6 @@ Unified AI Router is a comprehensive toolkit for AI applications, featuring:
4
4
 
5
5
  - A unified interface for multiple LLM providers with automatic fallback (the core router library)
6
6
  - An OpenAI-compatible server for seamless API integration
7
- - A deployable Telegram bot with Mini App interface
8
7
 
9
8
  It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility.
10
9
 
@@ -13,6 +12,7 @@ It supports major providers like OpenAI, Google, Grok, and more, ensuring reliab
13
12
  - [๐Ÿ“– Usage](#-usage)
14
13
  - [๐Ÿ“š Basic Library Usage](#-basic-library-usage)
15
14
  - [๐Ÿ”Œ OpenAI-Compatible Server](#-openai-compatible-server)
15
+ - [Setup](#setup)
16
16
  - [๐Ÿงช Testing](#-testing)
17
17
  - [๐Ÿ”ง Supported Providers](#-supported-providers)
18
18
  - [๐Ÿ”‘ API Keys](#-api-keys)
@@ -31,15 +31,14 @@ It supports major providers like OpenAI, Google, Grok, and more, ensuring reliab
31
31
  - **OpenAI-Compatible Server**: Drop-in replacement for the OpenAI API, enabling easy integration with existing tools and clients
32
32
  - **Streaming and Non-Streaming Support**: Handles both streaming and non-streaming responses
33
33
  - **Tool Calling**: Full support for tools in LLM interactions
34
- - **Telegram Bot Integration**: Deployable as a Telegram bot with an interactive Mini App interface
35
34
 
36
35
  ## ๐Ÿ› ๏ธ Installation
37
36
 
38
37
  ```bash
39
38
  npm i unified-ai-router
40
39
  # OR
41
- git clone https://github.com/mlibre/AIRouter
42
- cd AIRouter
40
+ git clone https://github.com/mlibre/Unified-AI-Router
41
+ cd Unified-AI-Router
43
42
  npm i
44
43
  ```
45
44
 
@@ -89,14 +88,14 @@ The server uses the provider configurations defined in [provider.js](provider.js
89
88
 
90
89
  #### Setup
91
90
 
92
- 1. Copy the example environment file:
91
+ 1. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey` (referencing the corresponding env variable), `model`, and `apiUrl` for the providers you want to use.
92
+
93
+ 2. Copy the example environment file:
93
94
 
94
95
  ```bash
95
96
  cp .env.example .env
96
97
  ```
97
98
 
98
- 2. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey` (referencing the corresponding env variable), `model`, and `apiUrl` for the providers you want to use.
99
-
100
99
  3. Edit `.env` and add your API keys for the desired providers (see [๐Ÿ”‘ API Keys](#-api-keys) for sources).
101
100
 
102
101
  To start the server locally, run:
@@ -105,7 +104,13 @@ To start the server locally, run:
105
104
  npm start
106
105
  ```
107
106
 
108
- The server listens at `http://localhost:3000/v1/chat/completions` and supports standard OpenAI endpoints like `/v1/chat/completions`.
107
+ The server listens at `http://localhost:3000/` and supports the following OpenAI-compatible endpoints:
108
+
109
+ - `POST /v1/chat/completions` - Chat completions (streaming and non-streaming)
110
+ - `POST /chat/completions` - Chat completions (streaming and non-streaming)
111
+ - `GET /v1/models` - List available models
112
+ - `GET /models` - List available models
113
+ - `GET /health` - Health check
109
114
 
110
115
  ### ๐Ÿงช Testing
111
116
 
@@ -137,6 +142,7 @@ node tests/tools.js
137
142
  - Vercel
138
143
  - Cerebras
139
144
  - LLM7
145
+ - Any Other OpenAI Compatible Server
140
146
 
141
147
  ## ๐Ÿ”‘ API Keys
142
148
 
@@ -152,7 +158,6 @@ Get your API keys from the following providers:
152
158
  - **Vercel AI Gateway**: [vercel.com/docs/ai/ai-gateway](https://vercel.com/docs/ai-gateway)
153
159
  - **Cerebras**: [cloud.cerebras.ai](https://cloud.cerebras.ai)
154
160
  - **LLM7**: [token.llm7.io](https://token.llm7.io/)
155
- - Seems like it does not support tool calling
156
161
 
157
162
  ## ๐Ÿ”ผ Vercel Deployment (Telegram Bot)
158
163
 
@@ -214,19 +219,14 @@ curl "https://ai-router-flame.vercel.app/api?register_webhook=true"
214
219
 
215
220
  After deploying the bot, you need to configure the Telegram Mini App and menu button:
216
221
 
217
- 1. **Configure Mini App:**
218
- - Go to [@BotFather](https://t.me/botfather)
219
- - Send `/mybots` and select your bot
220
- - Go to `Bot Settings` โ†’ `Configure Mini App`
221
- - Set the Mini App URL to: `https://ai-router-flame.vercel.app`
222
+ **Configure Mini App:**
222
223
 
223
- 2. **Configure Menu Button:**
224
- - Go to [@BotFather](https://t.me/botfather)
225
- - Send `/mybots` and select your bot
226
- - Go to `Bot Settings` โ†’ `Menu Button`
227
- - Ensure the URL shown is: `https://ai-router-flame.vercel.app`
224
+ - Go to [@BotFather](https://t.me/botfather)
225
+ - Send `/mybots` and select your bot
226
+ - Go to `Bot Settings` โ†’ `Configure Mini App`
227
+ - Set the Mini App URL to: `https://ai-router-flame.vercel.app`
228
228
 
229
- Once configured, users can access the Mini App by sending `/start` or `/app` to your bot, or through the menu button.
229
+ Once configured, users can access the Mini App by sending `/start` or `/app` to your bot.
230
230
 
231
231
  An example of a deployed bot is accessible on Telegram: [https://t.me/freePulseAIbot](https://t.me/freePulseAIbot)
232
232