unified-ai-router 3.5.0 โ†’ 3.5.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example CHANGED
@@ -1,8 +1,8 @@
1
+ PORT=3000
1
2
  GEMINI_API_KEY=API_KEY
2
3
  OPENROUTER_API_KEY=API_KEY
3
4
  ZAI_API_KEY=API_KEY
4
5
  GROK_API_KEY=API_KEY
5
6
  QROQ_API_KEY=API_KEY
6
7
  CEREBRAS_API_KEY=API_KEY
7
- LLM7_API_KEY=API_KEY
8
- PORT=3000
8
+ LLM7_API_KEY=API_KEY
@@ -0,0 +1,32 @@
1
+ # This workflow will run tests using node and then publish a package to GitHub Packages when a release is created
2
+ # For more information see: https://docs.github.com/en/actions/publishing-packages/publishing-nodejs-packages
3
+
4
+ name: Node.js Package
5
+
6
+ on:
7
+ release:
8
+ types: [created]
9
+
10
+ jobs:
11
+ build:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/checkout@v4
15
+ - uses: actions/setup-node@v4
16
+ with:
17
+ node-version: 24
18
+ - run: npm ci
19
+
20
+ publish-npm:
21
+ needs: build
22
+ runs-on: ubuntu-latest
23
+ steps:
24
+ - uses: actions/checkout@v4
25
+ - uses: actions/setup-node@v4
26
+ with:
27
+ node-version: 24
28
+ registry-url: https://registry.npmjs.org/
29
+ - run: npm ci
30
+ - run: npm publish
31
+ env:
32
+ NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
package/Agents.md ADDED
@@ -0,0 +1,10 @@
1
+ # AGENTS.md
2
+
3
+ * Always write code that is simple and clear.
4
+ * Avoid tricky one-liners.
5
+ * Refactor if a solution can be simpler, cleaner, or more elegant.
6
+ * Prefer short, readable, and straightforward solutions.
7
+ * Keep logic easy to follow.
8
+ * Use meaningful names.
9
+ * Readable and beautiful
10
+ * If you made changes, update package.json to reflect them.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "unified-ai-router",
3
- "version": "3.5.0",
3
+ "version": "3.5.4",
4
4
  "description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
5
5
  "license": "ISC",
6
6
  "author": "mlibre",
@@ -40,17 +40,17 @@
40
40
  "cerebras"
41
41
  ],
42
42
  "dependencies": {
43
- "axios": "^1.11.0",
43
+ "axios": "^1.13.2",
44
44
  "cors": "^2.8.5",
45
- "dotenv": "^17.2.1",
46
- "eslint": "^9.33.0",
47
- "express": "^5.1.0",
48
- "openai": "^6.1.0",
45
+ "dotenv": "^17.2.3",
46
+ "eslint": "^9.39.2",
47
+ "express": "^5.2.1",
48
+ "openai": "^6.15.0",
49
49
  "opossum": "^9.0.0",
50
- "pino": "^9.9.0",
51
- "pino-pretty": "^13.1.1"
50
+ "pino": "^10.1.0",
51
+ "pino-pretty": "^13.1.3"
52
52
  },
53
53
  "devDependencies": {
54
- "vitepress": "^2.0.0-alpha.12"
54
+ "vitepress": "^2.0.0-alpha.15"
55
55
  }
56
56
  }
package/provider.js CHANGED
@@ -1,10 +1,21 @@
1
1
  module.exports = [
2
+ {
3
+ name: "qroq",
4
+ apiKey: [
5
+ process.env.QROQ_API_KEY,
6
+ process.env.QROQ_API_KEY_2,
7
+ process.env.QROQ_API_KEY_3
8
+ ],
9
+ model: "openai/gpt-oss-120b",
10
+ apiUrl: "https://api.groq.com/openai/v1",
11
+ },
2
12
  {
3
13
  name: "gemini_pro",
4
14
  apiKey: [
5
15
  process.env.GEMINI_API_KEY,
6
16
  process.env.GEMINI_API_KEY_2,
7
17
  process.env.GEMINI_API_KEY_3,
18
+ process.env.GEMINI_API_KEY_4,
8
19
  ],
9
20
  model: "gemini-2.5-pro",
10
21
  apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
@@ -14,12 +25,19 @@ module.exports = [
14
25
  apiKey: [
15
26
  process.env.CEREBRAS_API_KEY,
16
27
  process.env.CEREBRAS_API_KEY_2,
28
+ process.env.CEREBRAS_API_KEY_3,
17
29
  ],
18
30
  model: "gpt-oss-120b",
19
31
  apiUrl: "https://api.cerebras.ai/v1",
20
32
  },
21
33
  {
22
- name: "openrouter_qwen",
34
+ name: "llm7",
35
+ apiKey: process.env.LLM7_API_KEY,
36
+ model: "deepseek-v3.1",
37
+ apiUrl: "https://api.llm7.io/v1",
38
+ },
39
+ {
40
+ name: "openrouter_minimax",
23
41
  apiKey: [
24
42
  process.env.OPENROUTER_API_KEY,
25
43
  process.env.OPENROUTER_API_KEY_2,
@@ -28,11 +46,11 @@ module.exports = [
28
46
  process.env.OPENROUTER_API_KEY_5,
29
47
  process.env.OPENROUTER_API_KEY_6
30
48
  ],
31
- model: "qwen/qwen3-coder:free",
49
+ model: "minimax/minimax-m2:free",
32
50
  apiUrl: "https://openrouter.ai/api/v1",
33
51
  },
34
52
  {
35
- name: "openrouter_glm",
53
+ name: "openrouter_qwen",
36
54
  apiKey: [
37
55
  process.env.OPENROUTER_API_KEY,
38
56
  process.env.OPENROUTER_API_KEY_2,
@@ -41,17 +59,16 @@ module.exports = [
41
59
  process.env.OPENROUTER_API_KEY_5,
42
60
  process.env.OPENROUTER_API_KEY_6
43
61
  ],
44
- model: "z-ai/glm-4.5-air:free",
62
+ model: "qwen/qwen3-coder:free",
45
63
  apiUrl: "https://openrouter.ai/api/v1",
46
64
  },
47
65
  {
48
- name: "qroq",
66
+ name: "github",
49
67
  apiKey: [
50
- process.env.QROQ_API_KEY,
51
- process.env.QROQ_API_KEY_2
68
+ process.env.GITHUB_API_KEY_1,
52
69
  ],
53
- model: "openai/gpt-oss-120b",
54
- apiUrl: "https://api.groq.com/openai/v1",
70
+ model: "openai/gpt-4.1",
71
+ apiUrl: "https://models.github.ai/inference",
55
72
  },
56
73
  {
57
74
  name: "gemini_flash",
@@ -59,20 +76,31 @@ module.exports = [
59
76
  process.env.GEMINI_API_KEY,
60
77
  process.env.GEMINI_API_KEY_2,
61
78
  process.env.GEMINI_API_KEY_3,
79
+ process.env.GEMINI_API_KEY_4,
62
80
  ],
63
81
  model: "gemini-2.5-flash",
64
82
  apiUrl: "https://generativelanguage.googleapis.com/v1beta/openai/",
65
83
  },
84
+ {
85
+ name: "openrouter_glm",
86
+ apiKey: [
87
+ process.env.OPENROUTER_API_KEY,
88
+ process.env.OPENROUTER_API_KEY_2,
89
+ process.env.OPENROUTER_API_KEY_3,
90
+ process.env.OPENROUTER_API_KEY_4,
91
+ process.env.OPENROUTER_API_KEY_5,
92
+ process.env.OPENROUTER_API_KEY_6
93
+ ],
94
+ model: "z-ai/glm-4.5-air:free",
95
+ apiUrl: "https://openrouter.ai/api/v1",
96
+ },
66
97
  {
67
98
  name: "z.ai",
68
- apiKey: process.env.ZAI_API_KEY,
99
+ apiKey: [
100
+ process.env.ZAI_API_KEY,
101
+ process.env.ZAI_API_KEY_2
102
+ ],
69
103
  model: "glm-4.5-flash",
70
104
  apiUrl: "https://api.z.ai/api/paas/v4",
71
- },
72
- {
73
- name: "llm7", // does not support tool calling
74
- apiKey: process.env.LLM7_API_KEY,
75
- model: "gpt-o4-mini-2025-04-16",
76
- apiUrl: "https://api.llm7.io/v1",
77
- },
105
+ }
78
106
  ];
package/readme.md CHANGED
@@ -3,7 +3,7 @@
3
3
  Unified AI Router is a comprehensive toolkit for AI applications, featuring:
4
4
 
5
5
  - An **OpenAI-compatible** server for seamless API integration
6
- - A **unified interface** for multiple LLM providers with **automatic fallback**
6
+ - A **Unified Interface** for multiple LLM providers with **Automatic Fallback**
7
7
 
8
8
  It supports all the OpenAI-compatible servers, including major providers like OpenAI, Google, Grok, Litellm, Vllm, Ollama and more, ensuring reliability and flexibility.
9
9
 
@@ -13,6 +13,7 @@ It supports all the OpenAI-compatible servers, including major providers like Op
13
13
  - [๐Ÿ“š Basic Library Usage](#-basic-library-usage)
14
14
  - [๐Ÿ”Œ OpenAI-Compatible Server](#-openai-compatible-server)
15
15
  - [๐Ÿงช Testing](#-testing)
16
+ - [๐ŸŒ Deploying to Render.com](#-deploying-to-rendercom)
16
17
  - [๐Ÿ”ง Supported Providers](#-supported-providers)
17
18
  - [๐Ÿ”‘ API Keys](#-api-keys)
18
19
  - [๐Ÿ“ Project Structure](#-project-structure)
@@ -40,7 +41,40 @@ npm i
40
41
 
41
42
  ## ๐Ÿ“– Usage
42
43
 
43
- ### ๐Ÿ“š Basic Library Usage
44
+ ### ๐Ÿ”Œ OpenAI-Compatible Server
45
+
46
+ The OpenAI-compatible server provides a drop-in replacement for the OpenAI API. It routes requests through the unified router with fallback logic, ensuring high availability.
47
+ The server uses the provider configurations defined in [provider.js](provider.js) file, and requires API keys set in a `.env` file.
48
+
49
+ 1. Prepare `.env`:
50
+
51
+ ```bash
52
+ cp .env.example .env
53
+ nano .env
54
+ ```
55
+
56
+ 1. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey`, `model`, and `apiUrl` for the providers you want to use.
57
+
58
+ ```bash
59
+ nano provider.js
60
+ ```
61
+
62
+ To start the server locally, run:
63
+
64
+ ```bash
65
+ npm start
66
+ ```
67
+
68
+ The server listens at `http://localhost:3000/` and supports the following OpenAI-compatible endpoints:
69
+
70
+ - `POST /v1/chat/completions` - Chat completions (streaming and non-streaming)
71
+ - `POST /chat/completions` - Chat completions (streaming and non-streaming)
72
+ - `GET /v1/models` - List available models
73
+ - `GET /models` - List available models
74
+ - `GET /health` - Health check
75
+ - `GET /v1/providers/status` - Check the status of all configured providers
76
+
77
+ ### ๐Ÿ“š Library Usage
44
78
 
45
79
  This is the core AIRouter library - a JavaScript class that provides a unified interface for multiple LLM providers.
46
80
 
@@ -90,39 +124,9 @@ const providers = [
90
124
  ];
91
125
  ```
92
126
 
93
- ### ๐Ÿ”Œ OpenAI-Compatible Server
94
-
95
- The OpenAI-compatible server provides a drop-in replacement for the OpenAI API. It routes requests through the unified router with fallback logic, ensuring high availability.
96
- The server uses the provider configurations defined in [provider.js](provider.js) file, and requires API keys set in a `.env` file.
97
-
98
- 1. Copy the example environment file:
99
-
100
- ```bash
101
- cp .env.example .env
102
- ```
103
-
104
- 2. Edit `.env` and add your API keys for the desired providers (see [๐Ÿ”‘ API Keys](#-api-keys) for sources).
105
-
106
- 3. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey`, `model`, and `apiUrl` for the providers you want to use.
107
-
108
- To start the server locally, run:
109
-
110
- ```bash
111
- npm start
112
- ```
113
-
114
- The server listens at `http://localhost:3000/` and supports the following OpenAI-compatible endpoints:
115
-
116
- - `POST /v1/chat/completions` - Chat completions (streaming and non-streaming)
117
- - `POST /chat/completions` - Chat completions (streaming and non-streaming)
118
- - `GET /v1/models` - List available models
119
- - `GET /models` - List available models
120
- - `GET /health` - Health check
121
- - `GET /v1/providers/status` - Check the status of all configured providers
122
-
123
127
  ### ๐Ÿงช Testing
124
128
 
125
- The project includes tests for the core library and the OpenAI-compatible server. To run the tests, use the following commands:
129
+ The project includes tests for the core library and the OpenAI-compatible server.
126
130
 
127
131
  ```bash
128
132
  # Test chat completion