unified-ai-router 2.4.4 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/legacy/main.js ADDED
@@ -0,0 +1,63 @@
1
+ const { ChatOpenAI } = require( "@langchain/openai" );
2
+ const pino = require( "pino" );
3
+ const pretty = require( "pino-pretty" );
4
+ const stream = pretty({ colorize: true, ignore: "pid,hostname" });
5
+ const logger = pino({ base: false }, stream );
6
+
7
+ class AIRouter
8
+ {
9
+ constructor ( providers )
10
+ {
11
+ this.providers = providers;
12
+ }
13
+
14
+ async chatCompletion ( messages, options = {}, stream = false )
15
+ {
16
+ const { stream: streamOption, tools, model, ...restOptions } = options;
17
+ const isStreaming = stream || streamOption;
18
+
19
+ logger.info( `Starting chatCompletion with ${this.providers.length} providers (streaming: ${isStreaming})` );
20
+ let lastError;
21
+
22
+ for ( const provider of this.providers )
23
+ {
24
+ try
25
+ {
26
+ logger.info( `Attempting with provider: ${provider.name}` );
27
+ let llm = new ChatOpenAI({
28
+ apiKey: provider.apiKey,
29
+ model: provider.model,
30
+ configuration: {
31
+ baseURL: provider.apiUrl,
32
+ },
33
+ ...restOptions,
34
+ });
35
+
36
+ if ( tools && tools.length > 0 )
37
+ {
38
+ llm = llm.bindTools( tools );
39
+ }
40
+
41
+ if ( isStreaming )
42
+ {
43
+ const stream = await llm.stream( messages );
44
+ return stream;
45
+ }
46
+ else
47
+ {
48
+ const response = await llm.invoke( messages, { timeout: 60000 });
49
+ return response;
50
+ }
51
+ }
52
+ catch ( error )
53
+ {
54
+ lastError = error;
55
+ logger.error( `Failed with ${provider.name}:${error.message}` );
56
+ // Continue to next provider
57
+ }
58
+ }
59
+ throw new Error( `All providers failed. Last error: ${lastError.message}` );
60
+ }
61
+ }
62
+
63
+ module.exports = AIRouter;
@@ -0,0 +1,192 @@
1
+ const express = require( "express" );
2
+ const cors = require( "cors" );
3
+ const AIRouter = require( "../main" ); // your existing class
4
+ const pino = require( "pino" );
5
+ const pretty = require( "pino-pretty" );
6
+ const stream = pretty({ colorize: true, ignore: "pid,hostname" });
7
+ const logger = pino({ base: false }, stream );
8
+ require( "dotenv" ).config({ quiet: true });
9
+
10
+ const app = express();
11
+ app.use( cors() );
12
+ app.use( express.json() );
13
+
14
+ /**
15
+ * Initialize router with providers (could load from env/config)
16
+ */
17
+ const providers = require( "../provider" )
18
+
19
+ const aiRouter = new AIRouter( providers );
20
+
21
+ /**
22
+ * OpenAI-compatible endpoint: POST /v1/chat/completions
23
+ */
24
+ app.post( "/v1/chat/completions", async ( req, res ) =>
25
+ {
26
+ try
27
+ {
28
+ const { messages, model, stream, ...rest } = req.body;
29
+
30
+ if ( !messages || !Array.isArray( messages ) )
31
+ {
32
+ return res.status( 400 ).json({ error: { message: "messages must be an array" } });
33
+ }
34
+
35
+ if ( stream )
36
+ {
37
+ // Streaming mode → use Server-Sent Events (SSE)
38
+ res.setHeader( "Content-Type", "text/event-stream" );
39
+ res.setHeader( "Cache-Control", "no-cache" );
40
+ res.setHeader( "Connection", "keep-alive" );
41
+
42
+ try
43
+ {
44
+ const response = await aiRouter.chatCompletion( messages, { model, ...rest }, true );
45
+ const id = `chatcmpl-${Date.now()}`;
46
+ const created = Math.floor( Date.now() / 1000 );
47
+ let fullResponse = null;
48
+ for await ( const chunk of response )
49
+ {
50
+ const modelName = chunk?.response_metadata?.model_name || model || "unknown";
51
+ const systemFingerprint = chunk?.response_metadata?.system_fingerprint || null;
52
+ let delta = { ... chunk.delta || { content: chunk.content || "" } };
53
+ if ( !delta.role ) delta.role = "assistant";
54
+ delta.reasoning = delta.reasoning || null;
55
+ delta.reasoning_details = delta.reasoning_details || [];
56
+ let toolCallsDelta = null;
57
+ if ( chunk.tool_calls && chunk.tool_calls.length > 0 )
58
+ {
59
+ toolCallsDelta = chunk.tool_calls.map( ( tc, index ) =>
60
+ {
61
+ return {
62
+ id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
63
+ type: "function",
64
+ index,
65
+ function: {
66
+ name: tc.name,
67
+ arguments: JSON.stringify( tc.args || {})
68
+ }
69
+ };
70
+ });
71
+ delta.tool_calls = toolCallsDelta;
72
+ delta.content = "";
73
+ }
74
+ const chunkFinishReason = delta.finish_reason || chunk?.response_metadata?.finish_reason || null;
75
+ const chunkNativeFinishReason = delta.native_finish_reason || chunk?.response_metadata?.native_finish_reason || chunkFinishReason || null;
76
+ if ( chunk.content && !fullResponse ) fullResponse = chunk; // Capture full for reasoning if available
77
+ const payload = {
78
+ id,
79
+ provider: "OpenAI",
80
+ object: "chat.completion.chunk",
81
+ created,
82
+ model: modelName,
83
+ system_fingerprint: systemFingerprint,
84
+ choices: [
85
+ {
86
+ logprobs: null,
87
+ delta,
88
+ index: 0,
89
+ finish_reason: chunkFinishReason,
90
+ native_finish_reason: chunkNativeFinishReason,
91
+ },
92
+ ],
93
+ };
94
+ const usage = chunk?.response_metadata?.usage;
95
+ if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
96
+ {
97
+ payload.usage = chunk?.response_metadata?.usage || null;
98
+ }
99
+ res.write( `data: ${JSON.stringify( payload )}\n\n` );
100
+ }
101
+ // Send done signal
102
+ res.write( "data: [DONE]\n\n" );
103
+ res.end();
104
+ }
105
+ catch ( err )
106
+ {
107
+ logger.error( err );
108
+ res.write( `data: ${JSON.stringify({ error: err.message })}\n\n` );
109
+ res.write( "data: [DONE]\n\n" );
110
+ res.end();
111
+ }
112
+ }
113
+ else
114
+ {
115
+ // Non-streaming → return one-shot completion
116
+ const response = await aiRouter.chatCompletion( messages, { model, ...rest }, false );
117
+ let reasoning = null;
118
+ let refusal = null;
119
+ let toolCalls = null;
120
+ if ( response.contentBlocks )
121
+ {
122
+ const reasoningBlocks = response.contentBlocks.filter( b => { return b.type === "reasoning" || b.type === "thinking" });
123
+ reasoning = reasoningBlocks.length > 0 ? reasoningBlocks.map( b => { return b.text }).join( "\n" ) : null;
124
+ const refusalBlocks = response.contentBlocks.filter( b => { return b.type === "refusal" });
125
+ refusal = refusalBlocks.length > 0 ? refusalBlocks.map( b => { return b.text }).join( "\n" ) : null;
126
+ }
127
+ if ( response.tool_calls && response.tool_calls.length > 0 )
128
+ {
129
+ toolCalls = response.tool_calls.map( ( tc, index ) =>
130
+ {
131
+ return {
132
+ id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
133
+ type: "function",
134
+ index,
135
+ function: {
136
+ name: tc.name,
137
+ arguments: JSON.stringify( tc.args || {})
138
+ }
139
+ };
140
+ });
141
+ }
142
+ const systemFingerprint = response.response_metadata?.system_fingerprint || null;
143
+ const finishReason = response.response_metadata?.finish_reason || null;
144
+ const nativeFinishReason = response.response_metadata?.native_finish_reason || finishReason || null;
145
+ const messageContent = toolCalls && toolCalls.length > 0 ? "" : response.content;
146
+ let finalResult = {
147
+ id: `chatcmpl_${Date.now()}`,
148
+ provider: "OpenAI",
149
+ object: "chat.completion",
150
+ created: Math.floor( Date.now() / 1000 ),
151
+ model: response.response_metadata?.model_name || model || "unknown",
152
+ system_fingerprint: systemFingerprint,
153
+ choices: [
154
+ {
155
+ logprobs: null,
156
+ finish_reason: finishReason,
157
+ native_finish_reason: nativeFinishReason,
158
+ index: 0,
159
+ message: {
160
+ role: "assistant",
161
+ content: messageContent,
162
+ refusal,
163
+ reasoning,
164
+ tool_calls: toolCalls
165
+ },
166
+ },
167
+ ],
168
+ }
169
+ const usage = response?.response_metadata?.usage;
170
+ if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
171
+ {
172
+ finalResult.usage = response?.response_metadata?.usage || null;
173
+ }
174
+ res.json( finalResult );
175
+ }
176
+ }
177
+ catch ( err )
178
+ {
179
+ logger.error( err );
180
+ res.status( 500 ).json({ error: { message: err.message } });
181
+ }
182
+ });
183
+
184
+ // Health check
185
+ app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
186
+
187
+ // Start server
188
+ const PORT = process.env.PORT || 3000;
189
+ app.listen( PORT, () =>
190
+ {
191
+ logger.info( `🚀 OpenAI-compatible API listening at http://localhost:${PORT}/v1/chat/completions` );
192
+ });
package/main.js CHANGED
@@ -1,8 +1,8 @@
1
- const { ChatOpenAI } = require( "@langchain/openai" );
1
+ const OpenAI = require( "openai" );
2
2
  const pino = require( "pino" );
3
3
  const pretty = require( "pino-pretty" );
4
- const stream = pretty({ colorize: true, ignore: "pid,hostname" });
5
- const logger = pino({ base: false }, stream );
4
+ const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
5
+ const logger = pino({ base: false }, pinoStream );
6
6
 
7
7
  class AIRouter
8
8
  {
@@ -24,35 +24,111 @@ class AIRouter
24
24
  try
25
25
  {
26
26
  logger.info( `Attempting with provider: ${provider.name}` );
27
- let llm = new ChatOpenAI({
27
+ const client = new OpenAI({
28
28
  apiKey: provider.apiKey,
29
- model: provider.model,
30
- configuration: {
31
- baseURL: provider.apiUrl,
32
- },
33
- ...restOptions,
29
+ baseURL: provider.apiUrl,
30
+ timeout: 60000,
34
31
  });
35
32
 
36
- if ( tools && tools.length > 0 )
37
- {
38
- llm = llm.bindTools( tools );
39
- }
33
+ const params = {
34
+ model: provider.model,
35
+ messages,
36
+ ...tools && tools.length > 0 ? { tools } : {},
37
+ stream: isStreaming,
38
+ ...restOptions
39
+ };
40
40
 
41
41
  if ( isStreaming )
42
42
  {
43
- const stream = await llm.stream( messages );
44
- return stream;
43
+ const responseStream = await client.chat.completions.create( params );
44
+ return ( async function* ()
45
+ {
46
+ for await ( const chunk of responseStream )
47
+ {
48
+ const content = chunk.choices[0]?.delta?.content;
49
+ const reasoning = chunk.choices[0]?.delta?.reasoning
50
+ const tool_calls_delta = chunk.choices[0]?.delta?.tool_calls;
51
+ if ( content !== null )
52
+ {
53
+ chunk.content = content
54
+ }
55
+ if ( reasoning !== null )
56
+ {
57
+ chunk.reasoning = reasoning
58
+ }
59
+ if ( tool_calls_delta !== null )
60
+ {
61
+ chunk.tool_calls_delta = tool_calls_delta;
62
+ }
63
+ yield chunk;
64
+ }
65
+ })();
45
66
  }
46
67
  else
47
68
  {
48
- const response = await llm.invoke( messages, { timeout: 60000 });
69
+ const response = await client.chat.completions.create( params );
70
+ const content = response.choices[0]?.message?.content;
71
+ const reasoning = response.choices[0]?.message?.reasoning;
72
+ const tool_calls = response.choices[0]?.message?.tool_calls
73
+ if ( content !== null )
74
+ {
75
+ response.content = content
76
+ }
77
+ if ( reasoning !== null )
78
+ {
79
+ response.reasoning = reasoning
80
+ }
81
+ if ( tools !== null )
82
+ {
83
+ response.tool_calls = tool_calls
84
+ }
49
85
  return response;
50
86
  }
51
87
  }
52
88
  catch ( error )
53
89
  {
54
90
  lastError = error;
55
- logger.error( `Failed with ${provider.name}:${error.message}` );
91
+ logger.error( `Failed with ${provider.name}: ${error.message}` );
92
+ // Continue to next provider
93
+ }
94
+ }
95
+ throw new Error( `All providers failed. Last error: ${lastError.message}` );
96
+ }
97
+
98
+ async chatCompletionWithResponse ( messages, options = {})
99
+ {
100
+ const { stream, tools, model, ...restOptions } = options;
101
+ const isStreaming = stream;
102
+
103
+ logger.info( `Starting chatCompletionWithResponse with ${this.providers.length} providers (streaming: ${isStreaming})` );
104
+ let lastError;
105
+
106
+ for ( const provider of this.providers )
107
+ {
108
+ try
109
+ {
110
+ logger.info( `Attempting with provider: ${provider.name}` );
111
+ const client = new OpenAI({
112
+ apiKey: provider.apiKey,
113
+ baseURL: provider.apiUrl,
114
+ timeout: 60000,
115
+ });
116
+
117
+ const params = {
118
+ model: provider.model,
119
+ messages,
120
+ ...tools && tools.length > 0 ? { tools } : {},
121
+ stream: isStreaming,
122
+ ...restOptions
123
+ };
124
+
125
+ const { data, response: rawResponse } = await client.chat.completions.create( params ).withResponse();
126
+ return { data, response: rawResponse }
127
+ }
128
+ catch ( error )
129
+ {
130
+ lastError = error;
131
+ logger.error( `Failed with ${provider.name}: ${error.message}` );
56
132
  // Continue to next provider
57
133
  }
58
134
  }
@@ -60,4 +136,4 @@ class AIRouter
60
136
  }
61
137
  }
62
138
 
63
- module.exports = AIRouter;
139
+ module.exports = AIRouter;
@@ -23,161 +23,52 @@ const aiRouter = new AIRouter( providers );
23
23
  */
24
24
  app.post( "/v1/chat/completions", async ( req, res ) =>
25
25
  {
26
- try
26
+ const { messages, model, stream, ...rest } = req.body;
27
+
28
+ if ( !messages || !Array.isArray( messages ) )
27
29
  {
28
- const { messages, model, stream, ...rest } = req.body;
30
+ return res.status( 400 ).json({ error: { message: "messages must be an array" } });
31
+ }
29
32
 
30
- if ( !messages || !Array.isArray( messages ) )
31
- {
32
- return res.status( 400 ).json({ error: { message: "messages must be an array" } });
33
- }
33
+ if ( stream )
34
+ {
35
+ res.setHeader( "Content-Type", "text/event-stream" );
36
+ res.setHeader( "Cache-Control", "no-cache" );
37
+ res.setHeader( "Connection", "keep-alive" );
34
38
 
35
- if ( stream )
39
+ try
36
40
  {
37
- // Streaming mode use Server-Sent Events (SSE)
38
- res.setHeader( "Content-Type", "text/event-stream" );
39
- res.setHeader( "Cache-Control", "no-cache" );
40
- res.setHeader( "Connection", "keep-alive" );
41
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, ...rest });
41
42
 
42
- try
43
+ for await ( const chunk of result.data )
43
44
  {
44
- const response = await aiRouter.chatCompletion( messages, { model, ...rest }, true );
45
- const id = `chatcmpl-${Date.now()}`;
46
- const created = Math.floor( Date.now() / 1000 );
47
- let fullResponse = null;
48
- for await ( const chunk of response )
49
- {
50
- const modelName = chunk?.response_metadata?.model_name || model || "unknown";
51
- const systemFingerprint = chunk?.response_metadata?.system_fingerprint || null;
52
- let delta = { ... chunk.delta || { content: chunk.content || "" } };
53
- if ( !delta.role ) delta.role = "assistant";
54
- delta.reasoning = delta.reasoning || null;
55
- delta.reasoning_details = delta.reasoning_details || [];
56
- let toolCallsDelta = null;
57
- if ( chunk.tool_calls && chunk.tool_calls.length > 0 )
58
- {
59
- toolCallsDelta = chunk.tool_calls.map( ( tc, index ) =>
60
- {
61
- return {
62
- id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
63
- type: "function",
64
- index,
65
- function: {
66
- name: tc.name,
67
- arguments: JSON.stringify( tc.args || {})
68
- }
69
- };
70
- });
71
- delta.tool_calls = toolCallsDelta;
72
- delta.content = "";
73
- }
74
- const chunkFinishReason = delta.finish_reason || chunk?.response_metadata?.finish_reason || null;
75
- const chunkNativeFinishReason = delta.native_finish_reason || chunk?.response_metadata?.native_finish_reason || chunkFinishReason || null;
76
- if ( chunk.content && !fullResponse ) fullResponse = chunk; // Capture full for reasoning if available
77
- const payload = {
78
- id,
79
- provider: "OpenAI",
80
- object: "chat.completion.chunk",
81
- created,
82
- model: modelName,
83
- system_fingerprint: systemFingerprint,
84
- choices: [
85
- {
86
- logprobs: null,
87
- delta,
88
- index: 0,
89
- finish_reason: chunkFinishReason,
90
- native_finish_reason: chunkNativeFinishReason,
91
- },
92
- ],
93
- };
94
- const usage = chunk?.response_metadata?.usage;
95
- if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
96
- {
97
- payload.usage = chunk?.response_metadata?.usage || null;
98
- }
99
- res.write( `data: ${JSON.stringify( payload )}\n\n` );
100
- }
101
- // Send done signal
102
- res.write( "data: [DONE]\n\n" );
103
- res.end();
104
- }
105
- catch ( err )
106
- {
107
- logger.error( err );
108
- res.write( `data: ${JSON.stringify({ error: err.message })}\n\n` );
109
- res.write( "data: [DONE]\n\n" );
110
- res.end();
45
+ res.write( `data: ${JSON.stringify( chunk )}\n\n` );
111
46
  }
47
+
48
+ res.write( "data: [DONE]\n\n" );
112
49
  }
113
- else
50
+ catch ( err )
114
51
  {
115
- // Non-streaming → return one-shot completion
116
- const response = await aiRouter.chatCompletion( messages, { model, ...rest }, false );
117
- let reasoning = null;
118
- let refusal = null;
119
- let toolCalls = null;
120
- if ( response.contentBlocks )
121
- {
122
- const reasoningBlocks = response.contentBlocks.filter( b => { return b.type === "reasoning" || b.type === "thinking" });
123
- reasoning = reasoningBlocks.length > 0 ? reasoningBlocks.map( b => { return b.text }).join( "\n" ) : null;
124
- const refusalBlocks = response.contentBlocks.filter( b => { return b.type === "refusal" });
125
- refusal = refusalBlocks.length > 0 ? refusalBlocks.map( b => { return b.text }).join( "\n" ) : null;
126
- }
127
- if ( response.tool_calls && response.tool_calls.length > 0 )
128
- {
129
- toolCalls = response.tool_calls.map( ( tc, index ) =>
130
- {
131
- return {
132
- id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
133
- type: "function",
134
- index,
135
- function: {
136
- name: tc.name,
137
- arguments: JSON.stringify( tc.args || {})
138
- }
139
- };
140
- });
141
- }
142
- const systemFingerprint = response.response_metadata?.system_fingerprint || null;
143
- const finishReason = response.response_metadata?.finish_reason || null;
144
- const nativeFinishReason = response.response_metadata?.native_finish_reason || finishReason || null;
145
- const messageContent = toolCalls && toolCalls.length > 0 ? "" : response.content;
146
- let finalResult = {
147
- id: `chatcmpl_${Date.now()}`,
148
- provider: "OpenAI",
149
- object: "chat.completion",
150
- created: Math.floor( Date.now() / 1000 ),
151
- model: response.response_metadata?.model_name || model || "unknown",
152
- system_fingerprint: systemFingerprint,
153
- choices: [
154
- {
155
- logprobs: null,
156
- finish_reason: finishReason,
157
- native_finish_reason: nativeFinishReason,
158
- index: 0,
159
- message: {
160
- role: "assistant",
161
- content: messageContent,
162
- refusal,
163
- reasoning,
164
- tool_calls: toolCalls
165
- },
166
- },
167
- ],
168
- }
169
- const usage = response?.response_metadata?.usage;
170
- if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
171
- {
172
- finalResult.usage = response?.response_metadata?.usage || null;
173
- }
174
- res.json( finalResult );
52
+ logger.error( err );
53
+ res.write( `data: ${JSON.stringify({ error: { message: err.message } })}\n\n` );
54
+ res.write( "data: [DONE]\n\n" );
175
55
  }
56
+
57
+ res.end();
176
58
  }
177
- catch ( err )
59
+ else
178
60
  {
179
- logger.error( err );
180
- res.status( 500 ).json({ error: { message: err.message } });
61
+ try
62
+ {
63
+ const result = await aiRouter.chatCompletionWithResponse( messages, { model, ...rest });
64
+
65
+ res.json( result.data );
66
+ }
67
+ catch ( err )
68
+ {
69
+ logger.error( err );
70
+ res.status( 500 ).json({ error: { message: err.message } });
71
+ }
181
72
  }
182
73
  });
183
74
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "unified-ai-router",
3
- "version": "2.4.4",
3
+ "version": "3.0.1",
4
4
  "description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
5
5
  "license": "ISC",
6
6
  "author": "mlibre",
@@ -47,6 +47,7 @@
47
47
  "eslint": "^9.33.0",
48
48
  "express": "^5.1.0",
49
49
  "langchain": "^1.0.0-alpha.6",
50
+ "openai": "^5.23.0",
50
51
  "pino": "^9.9.0",
51
52
  "pino-pretty": "^13.1.1",
52
53
  "zod": "^3.25.76"
package/provider.js CHANGED
@@ -1,11 +1,17 @@
1
1
  module.exports = [
2
+
3
+ {
4
+ name: "openrouter",
5
+ apiKey: process.env.OPENROUTER_API_KEY,
6
+ model: "deepseek/deepseek-r1-0528-qwen3-8b:free",
7
+ apiUrl: "https://openrouter.ai/api/v1",
8
+ },
2
9
  {
3
10
  name: "openrouter",
4
11
  apiKey: process.env.OPENROUTER_API_KEY,
5
12
  model: "x-ai/grok-4-fast:free",
6
13
  apiUrl: "https://openrouter.ai/api/v1",
7
14
  },
8
-
9
15
  {
10
16
  name: "cerebras",
11
17
  apiKey: process.env.CEREBRAS_API_KEY,
package/readme.md CHANGED
@@ -1,31 +1,37 @@
1
1
  # Unified AI Router
2
2
 
3
- A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.
4
-
5
- * [🚀 Features](#-features)
6
- * [🛠️ Installation](#️-installation)
7
- * [📖 Usage](#-usage)
8
- * [📚 Basic Library Usage](#-basic-library-usage)
9
- * [🤖 OpenAI-Compatible Server](#-openai-compatible-server)
10
- * [🧪 Testing](#-testing)
11
- * [🔧 Supported Providers](#-supported-providers)
12
- * [🔑 API Keys](#-api-keys)
13
- * [🔼 Vercel Deployment (Telegram Bot)](#-vercel-deployment-telegram-bot)
14
- * [📋 Prerequisites](#-prerequisites)
15
- * [🚀 Deployment Steps](#-deployment-steps)
16
- * [📱 Enable Telegram Mini App](#-enable-telegram-mini-app)
17
- * [📁 Project Structure](#-project-structure)
18
- * [📄 License](#-license)
3
+ Unified AI Router is a comprehensive toolkit for AI applications, featuring:
4
+
5
+ - A unified interface for multiple LLM providers with automatic fallback (the core router library)
6
+ - An OpenAI-compatible server for seamless API integration
7
+ - A deployable Telegram bot with Mini App interface
8
+
9
+ It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility.
10
+
11
+ - [🚀 Features](#-features)
12
+ - [🛠️ Installation](#️-installation)
13
+ - [📖 Usage](#-usage)
14
+ - [📚 Basic Library Usage](#-basic-library-usage)
15
+ - [🔌 OpenAI-Compatible Server](#-openai-compatible-server)
16
+ - [🧪 Testing](#-testing)
17
+ - [🔧 Supported Providers](#-supported-providers)
18
+ - [🔑 API Keys](#-api-keys)
19
+ - [🔼 Vercel Deployment (Telegram Bot)](#-vercel-deployment-telegram-bot)
20
+ - [📋 Prerequisites](#-prerequisites)
21
+ - [🚀 Deployment Steps](#-deployment-steps)
22
+ - [📱 Enable Telegram Mini App](#-enable-telegram-mini-app)
23
+ - [📁 Project Structure](#-project-structure)
24
+ - [📄 License](#-license)
19
25
 
20
26
  ## 🚀 Features
21
27
 
22
- * **Multi-Provider Support**: Works with OpenAI, Google, Grok, OpenRouter, Z.ai, Qroq, Cohere, Vercel, Cerebras, and LLM7
23
- * **Automatic Fallback**: If one provider fails, automatically tries the next
24
- * **Simple API**: Easy to use interface for all supported providers
25
- * **OpenAI-Compatible Server**: Drop-in replacement for OpenAI API
26
- * **Streaming and Non-Streaming**: Supports both streaming and non-streaming responses
27
- * **Tool Support**: Use tools with your LLM calls
28
- * **Telegram Bot**: Deployable as a Telegram bot with a Mini App interface
28
+ - **Multi-Provider Support**: Works with OpenAI, Google, Grok, OpenRouter, Z.ai, Qroq, Cohere, Vercel, Cerebras, and LLM7
29
+ - **Automatic Fallback**: If one provider fails, automatically tries the next
30
+ - **Simple API**: Easy-to-use interface for all supported providers
31
+ - **OpenAI-Compatible Server**: Drop-in replacement for the OpenAI API, enabling easy integration with existing tools and clients
32
+ - **Streaming and Non-Streaming Support**: Handles both streaming and non-streaming responses
33
+ - **Tool Calling**: Full support for tools in LLM interactions
34
+ - **Telegram Bot Integration**: Deployable as a Telegram bot with an interactive Mini App interface
29
35
 
30
36
  ## 🛠️ Installation
31
37
 
@@ -76,17 +82,17 @@ const response = await llm.chatCompletion(messages, {
76
82
  console.log(response);
77
83
  ```
78
84
 
79
- ### 🤖 OpenAI-Compatible Server
85
+ ### 🔌 OpenAI-Compatible Server
80
86
 
81
- The project includes an OpenAI-compatible server that can be used as a drop-in replacement for the OpenAI API.
87
+ The OpenAI-compatible server provides a drop-in replacement for the OpenAI API. It routes requests through the unified router with fallback logic, ensuring high availability.
82
88
 
83
- To start the server, run:
89
+ To start the server locally, run:
84
90
 
85
91
  ```bash
86
92
  npm start
87
93
  ```
88
94
 
89
- The server will be available at `http://localhost:3000/v1/chat/completions`.
95
+ The server listens at `http://localhost:3000/v1/chat/completions` and supports standard OpenAI endpoints like `/v1/chat/completions`.
90
96
 
91
97
  ### 🧪 Testing
92
98
 
@@ -108,32 +114,32 @@ node tests/tools.js
108
114
 
109
115
  ## 🔧 Supported Providers
110
116
 
111
- * OpenAI
112
- * Google Gemini
113
- * Grok
114
- * OpenRouter
115
- * Z.ai
116
- * Qroq
117
- * Cohere
118
- * Vercel
119
- * Cerebras
120
- * LLM7
117
+ - OpenAI
118
+ - Google Gemini
119
+ - Grok
120
+ - OpenRouter
121
+ - Z.ai
122
+ - Qroq
123
+ - Cohere
124
+ - Vercel
125
+ - Cerebras
126
+ - LLM7
121
127
 
122
128
  ## 🔑 API Keys
123
129
 
124
130
  Get your API keys from the following providers:
125
131
 
126
- * **OpenAI**: [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
127
- * **Google Gemini**: [aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey)
128
- * **Grok**: [console.x.ai](https://console.x.ai/)
129
- * **OpenRouter**: [openrouter.ai/keys](https://openrouter.ai/keys)
130
- * **Z.ai**: [api.z.ai](https://api.z.ai)
131
- * **Qroq**: [console.groq.com/keys](https://console.groq.com/keys)
132
- * **Cohere**: [dashboard.cohere.com/api-keys](https://dashboard.cohere.com/api-keys)
133
- * **Vercel**: [vercel.com/docs/ai/ai-gateway](https://vercel.com/masoud-ghorbanzadehs-projects/~/ai/api-keys)
134
- * **Cerebras**: [cloud.cerebras.ai](https://cloud.cerebras.ai)
135
- * **LLM7**: [token.llm7.io](https://token.llm7.io/)
136
- * Seems like it does not support tool calling
132
+ - **OpenAI**: [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
133
+ - **Google Gemini**: [aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey)
134
+ - **Grok**: [console.x.ai](https://console.x.ai/)
135
+ - **OpenRouter**: [openrouter.ai/keys](https://openrouter.ai/keys)
136
+ - **Z.ai**: [api.z.ai](https://api.z.ai)
137
+ - **Qroq**: [console.groq.com/keys](https://console.groq.com/keys)
138
+ - **Cohere**: [dashboard.cohere.com/api-keys](https://dashboard.cohere.com/api-keys)
139
+ - **Vercel AI Gateway**: [vercel.com/docs/ai/ai-gateway](https://vercel.com/docs/ai-gateway)
140
+ - **Cerebras**: [cloud.cerebras.ai](https://cloud.cerebras.ai)
141
+ - **LLM7**: [token.llm7.io](https://token.llm7.io/)
142
+ - Seems like it does not support tool calling
137
143
 
138
144
  ## 🔼 Vercel Deployment (Telegram Bot)
139
145
 
@@ -141,9 +147,9 @@ This section describes how to deploy the AIRouter as a Telegram bot using Vercel
141
147
 
142
148
  ### 📋 Prerequisites
143
149
 
144
- * A Telegram Bot Token (@BotFather)
145
- * API keys for various AI providers
146
- * Vercel account
150
+ - A Telegram Bot Token (@BotFather)
151
+ - API keys for various AI providers
152
+ - Vercel account
147
153
 
148
154
  ### 🚀 Deployment Steps
149
155
 
@@ -196,16 +202,16 @@ curl "https://ai-router-flame.vercel.app/api?register_webhook=true"
196
202
  After deploying the bot, you need to configure the Telegram Mini App and menu button:
197
203
 
198
204
  1. **Configure Mini App:**
199
- * Go to [@BotFather](https://t.me/botfather)
200
- * Send `/mybots` and select your bot
201
- * Go to `Bot Settings` → `Configure Mini App`
202
- * Set the Mini App URL to: `https://ai-router-flame.vercel.app`
205
+ - Go to [@BotFather](https://t.me/botfather)
206
+ - Send `/mybots` and select your bot
207
+ - Go to `Bot Settings` → `Configure Mini App`
208
+ - Set the Mini App URL to: `https://ai-router-flame.vercel.app`
203
209
 
204
210
  2. **Configure Menu Button:**
205
- * Go to [@BotFather](https://t.me/botfather)
206
- * Send `/mybots` and select your bot
207
- * Go to `Bot Settings` → `Menu Button`
208
- * Ensure the URL shown is: `https://ai-router-flame.vercel.app`
211
+ - Go to [@BotFather](https://t.me/botfather)
212
+ - Send `/mybots` and select your bot
213
+ - Go to `Bot Settings` → `Menu Button`
214
+ - Ensure the URL shown is: `https://ai-router-flame.vercel.app`
209
215
 
210
216
  Once configured, users can access the Mini App by sending `/start` or `/app` to your bot, or through the menu button.
211
217
 
@@ -213,17 +219,17 @@ An example of a deployed bot is accessible on Telegram: [https://t.me/freePulseA
213
219
 
214
220
  ## 📁 Project Structure
215
221
 
216
- * `main.js` - The core AIRouter library.
217
- * `provider.js` - A list of supported AI providers.
218
- * `openai-compatible-server/index.js` - An OpenAI-compatible server.
219
- * `tests/` - A suite of tests for the library and server.
220
- * `bruno/` - A Bruno collection for API testing.
221
- * `vercel-project/` - A Vercel project for deploying a Telegram bot.
222
- * `api/index.js` - Main webhook handler for the Telegram bot.
223
- * `api/search.js` - A search proxy for the Telegram bot.
224
- * `public/` - The frontend for the Telegram Mini App.
225
- * `src/config.js` - Configuration for the Telegram bot.
226
- * `src/telegram.js` - Telegram client implementation.
222
+ - `main.js` - Core AIRouter library implementing the unified interface and fallback logic
223
+ - `provider.js` - Configuration for supported AI providers
224
+ - `openai-compatible-server/index.js` - OpenAI-compatible API server
225
+ - `tests/` - Comprehensive tests for the library, server, and tools
226
+ - `bruno/` - Bruno API collection for testing endpoints
227
+ - `vercel-project/` - Ready-to-deploy Vercel setup for the Telegram bot
228
+ - `api/index.js` - Telegram webhook handler
229
+ - `api/search.js` - Search proxy endpoint
230
+ - `public/` - Mini App frontend (HTML, CSS, JS)
231
+ - `src/config.js` - Bot configuration
232
+ - `src/telegram.js` - Telegram API integration
227
233
 
228
234
  ## 📄 License
229
235
 
package/tests/chat.js CHANGED
@@ -20,7 +20,7 @@ async function getResponse ()
20
20
 
21
21
  for await ( const chunk of stream )
22
22
  {
23
- process.stdout.write( chunk.content );
23
+ process.stdout.write( chunk.reasoning || chunk.content );
24
24
  }
25
25
  }
26
26
  catch ( error )
package/tests/tools.js CHANGED
@@ -1,105 +1,130 @@
1
1
  const AIRouter = require( "../main" );
2
- const { tool } = require( "langchain" );
3
- const z = require( "zod" );
4
2
  require( "dotenv" ).config({ quiet: true });
5
3
 
6
4
  const providers = require( "../provider" )
7
5
  const llm = new AIRouter( providers );
8
6
 
9
- // Example tool: Multiply two numbers
10
- const multiplyTool = tool(
11
- async ({ a, b }) =>
7
+ // Tool functions
8
+ async function multiply ({ a, b })
9
+ {
10
+ return {
11
+ result: a * b,
12
+ }
13
+ }
14
+
15
+ async function getWeather ({ city })
16
+ {
17
+ // Mock weather data for demonstration
18
+ const mockWeather = {
19
+ city,
20
+ temperature: 25,
21
+ condition: "Sunny",
22
+ humidity: 50,
23
+ wind: "10 km/h"
24
+ };
25
+ return mockWeather;
26
+ }
27
+
28
+ const tools = [
12
29
  {
13
- return {
14
- result: a * b,
15
- }
30
+ type: "function",
31
+ function: {
32
+ name: "multiply",
33
+ description: "Multiply two numbers",
34
+ parameters: {
35
+ type: "object",
36
+ properties: {
37
+ a: {
38
+ type: "number",
39
+ description: "First number"
40
+ },
41
+ b: {
42
+ type: "number",
43
+ description: "Second number"
44
+ },
45
+ },
46
+ required: ["a", "b"],
47
+ additionalProperties: false,
48
+ },
49
+ strict: true,
50
+ },
51
+ },
52
+ {
53
+ type: "function",
54
+ function: {
55
+ name: "get_weather",
56
+ description: "Get the current weather forecast for a given city.",
57
+ parameters: {
58
+ type: "object",
59
+ properties: {
60
+ city: {
61
+ type: "string",
62
+ description: "The name of the city (e.g., Tehran) to get the weather for."
63
+ }
64
+ },
65
+ required: ["city"],
66
+ additionalProperties: false,
67
+ },
68
+ strict: true,
69
+ },
16
70
  },
71
+ ];
72
+
73
+ const toolMap = {
74
+ multiply,
75
+ get_weather: getWeather,
76
+ };
77
+
78
+ async function executeTool ( toolCall )
79
+ {
80
+ const toolFn = toolMap[toolCall.function.name];
81
+ if ( !toolFn )
17
82
  {
18
- name: "multiply",
19
- description: "Multiply two numbers",
20
- schema: z.object({
21
- a: z.number().describe( "First number" ),
22
- b: z.number().describe( "Second number" ),
23
- }),
83
+ throw new Error( `Unknown tool: ${toolCall.function.name}` );
24
84
  }
25
- );
26
85
 
27
- const weatherTool = tool(
28
- async ({ city }) =>
86
+ let result;
87
+ try
29
88
  {
30
- // Mock weather data for demonstration
31
- const mockWeather = {
32
- city,
33
- temperature: 25,
34
- condition: "Sunny",
35
- humidity: 50,
36
- wind: "10 km/h"
37
- };
38
- return mockWeather;
39
- },
89
+ const args = JSON.parse( toolCall.function.arguments );
90
+ result = await toolFn( args );
91
+ console.log( `Tool "${toolCall.function.name}" executed with result:`, result );
92
+ }
93
+ catch ( toolError )
40
94
  {
41
- name: "get_weather",
42
- description: "Get the current weather forecast for a given city.",
43
- schema: z.object({
44
- city: z.string().describe( "The name of the city (e.g., Tehran) to get the weather for." )
45
- })
95
+ console.error( `Error executing tool "${toolCall.function.name}":`, toolError.message );
96
+ result = `Error: ${toolError.message}`;
46
97
  }
47
- );
98
+
99
+ return {
100
+ tool_call_id: toolCall.id,
101
+ content: typeof result === "object" ? JSON.stringify( result ) : result,
102
+ name: toolCall.function.name
103
+ };
104
+ }
48
105
  async function main ()
49
106
  {
50
107
  try
51
108
  {
52
109
  const messages = [
53
- { role: "system", content: "You are a helpful assistant with access to tools for calculations and weather forecasts. Use the multiply tool for calculations and the get_weather tool for weather information." },
54
- { role: "user", content: "What's the weather like in Tehran today?" }
110
+ { role: "system", content: "You are a helpful assistant with access to tools for calculations and weather forecasts. Use the multiply tool for calculations, the get_weather tool for weather information." },
111
+ { role: "user", content: "how is weather in tehran today and what 1099*45?" }
55
112
  ];
56
113
 
57
114
  const response = await llm.chatCompletion( messages, {
58
115
  temperature: 0,
59
- tools: [multiplyTool, weatherTool],
116
+ tools,
60
117
  });
61
118
 
62
- console.log( "Weather tool example response:", response );
119
+ console.log( "weather tool example response:", response );
63
120
 
64
121
  const toolResults = [];
65
122
  if ( response.tool_calls && response.tool_calls.length > 0 )
66
123
  {
67
124
  for ( const toolCall of response.tool_calls )
68
125
  {
69
- let selectedTool;
70
- if ( toolCall.name === "multiply" )
71
- {
72
- selectedTool = multiplyTool;
73
- }
74
- else if ( toolCall.name === "get_weather" )
75
- {
76
- selectedTool = weatherTool;
77
- }
78
-
79
- if ( selectedTool )
80
- {
81
- try
82
- {
83
- const result = await selectedTool.call( toolCall.args );
84
- console.log( `Tool "${toolCall.name}" executed with result:`, result );
85
- toolResults.push({
86
- tool_call_id: toolCall.id,
87
- content: JSON.stringify( result )
88
- });
89
- }
90
- catch ( toolError )
91
- {
92
- console.error( `Error executing tool "${toolCall.name}":`, toolError.message );
93
- toolResults.push({
94
- tool_call_id: toolCall.id,
95
- content: `Error: ${toolError.message}`
96
- });
97
- }
98
- }
99
- else
100
- {
101
- console.warn( `Unknown tool: ${toolCall.name}` );
102
- }
126
+ const toolResult = await executeTool( toolCall );
127
+ toolResults.push( toolResult );
103
128
  }
104
129
 
105
130
  if ( toolResults.length > 0 )
@@ -116,13 +141,14 @@ async function main ()
116
141
  return {
117
142
  role: "tool",
118
143
  content: tr.content,
119
- tool_call_id: tr.tool_call_id
144
+ tool_call_id: tr.tool_call_id,
145
+ name: tr.name
120
146
  }
121
147
  })
122
148
  ];
123
149
  const finalResponse = await llm.chatCompletion( updatedMessages, {
124
150
  temperature: 0,
125
- tools: [multiplyTool, weatherTool]
151
+ tools
126
152
  });
127
153
  console.log( "Final response after tool execution:", finalResponse.content || finalResponse );
128
154
  }