unified-ai-router 3.0.0 → 3.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,38 @@
1
+ meta {
2
+ name: models
3
+ type: http
4
+ seq: 5
5
+ }
6
+
7
+ get {
8
+ url: {{address}}/models
9
+ body: json
10
+ auth: none
11
+ }
12
+
13
+ headers {
14
+ Content-Type: application/json
15
+ Authorization: {{token}}
16
+ }
17
+
18
+ body:json {
19
+ {
20
+ "messages": [
21
+ {
22
+ "role": "system",
23
+ "content": "You are a helpful assistant."
24
+ },
25
+ {
26
+ "role": "user",
27
+ "content": "Hello, say two words only."
28
+ }
29
+ ],
30
+ "model": "x-ai/grok-4-fast:free",
31
+ "temperature": 0,
32
+ "stream": true
33
+ }
34
+ }
35
+
36
+ settings {
37
+ encodeUrl: false
38
+ }
package/main.js CHANGED
@@ -134,6 +134,43 @@ class AIRouter
134
134
  }
135
135
  throw new Error( `All providers failed. Last error: ${lastError.message}` );
136
136
  }
137
+ async getModels ()
138
+ {
139
+ const models = [];
140
+ for ( const provider of this.providers )
141
+ {
142
+ if ( !provider.apiKey )
143
+ {
144
+ logger.warn( `Skipping provider ${provider.name} due to missing API key` );
145
+ continue;
146
+ }
147
+ try
148
+ {
149
+ logger.info( `Fetching models for provider: ${provider.name}` );
150
+ const client = new OpenAI({
151
+ apiKey: provider.apiKey,
152
+ baseURL: provider.apiUrl,
153
+ timeout: 60000,
154
+ });
155
+ const listResponse = await client.models.list();
156
+ const modelList = listResponse.data && listResponse.data.length > 0 ? listResponse.data : listResponse.body || [];
157
+ const model = modelList.find( m => { return m.id === provider.model || m.id === `models/${provider.model}` });
158
+ if ( model )
159
+ {
160
+ models.push( model );
161
+ }
162
+ else
163
+ {
164
+ logger.warn( `Model ${provider.model} not found in provider ${provider.name}` );
165
+ }
166
+ }
167
+ catch ( error )
168
+ {
169
+ logger.error( `Failed to list models for provider ${provider.name}: ${error.message}` );
170
+ }
171
+ }
172
+ return models;
173
+ }
137
174
  }
138
175
 
139
176
  module.exports = AIRouter;
@@ -1,26 +1,19 @@
1
1
  const express = require( "express" );
2
2
  const cors = require( "cors" );
3
- const AIRouter = require( "../main" ); // your existing class
3
+ const AIRouter = require( "../main" );
4
4
  const pino = require( "pino" );
5
5
  const pretty = require( "pino-pretty" );
6
- const stream = pretty({ colorize: true, ignore: "pid,hostname" });
7
- const logger = pino({ base: false }, stream );
6
+ const pinoStream = pretty({ colorize: true, ignore: "pid,hostname" });
7
+ const logger = pino({ base: false }, pinoStream );
8
8
  require( "dotenv" ).config({ quiet: true });
9
9
 
10
10
  const app = express();
11
11
  app.use( cors() );
12
12
  app.use( express.json() );
13
13
 
14
- /**
15
- * Initialize router with providers (could load from env/config)
16
- */
17
14
  const providers = require( "../provider" )
18
-
19
15
  const aiRouter = new AIRouter( providers );
20
16
 
21
- /**
22
- * OpenAI-compatible endpoint: POST /v1/chat/completions
23
- */
24
17
  app.post( "/v1/chat/completions", async ( req, res ) =>
25
18
  {
26
19
  const { messages, model, stream, ...rest } = req.body;
@@ -72,7 +65,20 @@ app.post( "/v1/chat/completions", async ( req, res ) =>
72
65
  }
73
66
  });
74
67
 
75
- // Health check
68
+ app.get( "/v1/models", async ( req, res ) =>
69
+ {
70
+ try
71
+ {
72
+ const models = await aiRouter.getModels();
73
+ res.json({ data: models });
74
+ }
75
+ catch ( error )
76
+ {
77
+ logger.error( `Error in /v1/models: ${error.message}` );
78
+ res.status( 500 ).json({ error: { message: error.message } });
79
+ }
80
+ });
81
+
76
82
  app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
77
83
 
78
84
  // Start server
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "unified-ai-router",
3
- "version": "3.0.0",
3
+ "version": "3.0.3",
4
4
  "description": "A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.",
5
5
  "license": "ISC",
6
6
  "author": "mlibre",
@@ -55,4 +55,4 @@
55
55
  "devDependencies": {
56
56
  "vercel": "^45.0.9"
57
57
  }
58
- }
58
+ }
package/provider.js CHANGED
@@ -1,5 +1,4 @@
1
1
  module.exports = [
2
-
3
2
  {
4
3
  name: "openrouter",
5
4
  apiKey: process.env.OPENROUTER_API_KEY,
package/readme.md CHANGED
@@ -1,31 +1,37 @@
1
1
  # Unified AI Router
2
2
 
3
- A unified interface for multiple LLM providers with automatic fallback. This project includes an OpenAI-compatible server and a deployable Telegram bot with a Mini App interface. It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility for your AI applications.
4
-
5
- * [🚀 Features](#-features)
6
- * [🛠️ Installation](#️-installation)
7
- * [📖 Usage](#-usage)
8
- * [📚 Basic Library Usage](#-basic-library-usage)
9
- * [🤖 OpenAI-Compatible Server](#-openai-compatible-server)
10
- * [🧪 Testing](#-testing)
11
- * [🔧 Supported Providers](#-supported-providers)
12
- * [🔑 API Keys](#-api-keys)
13
- * [🔼 Vercel Deployment (Telegram Bot)](#-vercel-deployment-telegram-bot)
14
- * [📋 Prerequisites](#-prerequisites)
15
- * [🚀 Deployment Steps](#-deployment-steps)
16
- * [📱 Enable Telegram Mini App](#-enable-telegram-mini-app)
17
- * [📁 Project Structure](#-project-structure)
18
- * [📄 License](#-license)
3
+ Unified AI Router is a comprehensive toolkit for AI applications, featuring:
4
+
5
+ - A unified interface for multiple LLM providers with automatic fallback (the core router library)
6
+ - An OpenAI-compatible server for seamless API integration
7
+ - A deployable Telegram bot with Mini App interface
8
+
9
+ It supports major providers like OpenAI, Google, Grok, and more, ensuring reliability and flexibility.
10
+
11
+ - [🚀 Features](#-features)
12
+ - [🛠️ Installation](#️-installation)
13
+ - [📖 Usage](#-usage)
14
+ - [📚 Basic Library Usage](#-basic-library-usage)
15
+ - [🔌 OpenAI-Compatible Server](#-openai-compatible-server)
16
+ - [🧪 Testing](#-testing)
17
+ - [🔧 Supported Providers](#-supported-providers)
18
+ - [🔑 API Keys](#-api-keys)
19
+ - [🔼 Vercel Deployment (Telegram Bot)](#-vercel-deployment-telegram-bot)
20
+ - [📋 Prerequisites](#-prerequisites)
21
+ - [🚀 Deployment Steps](#-deployment-steps)
22
+ - [📱 Enable Telegram Mini App](#-enable-telegram-mini-app)
23
+ - [📁 Project Structure](#-project-structure)
24
+ - [📄 License](#-license)
19
25
 
20
26
  ## 🚀 Features
21
27
 
22
- * **Multi-Provider Support**: Works with OpenAI, Google, Grok, OpenRouter, Z.ai, Qroq, Cohere, Vercel, Cerebras, and LLM7
23
- * **Automatic Fallback**: If one provider fails, automatically tries the next
24
- * **Simple API**: Easy to use interface for all supported providers
25
- * **OpenAI-Compatible Server**: Drop-in replacement for OpenAI API
26
- * **Streaming and Non-Streaming**: Supports both streaming and non-streaming responses
27
- * **Tool Support**: Use tools with your LLM calls
28
- * **Telegram Bot**: Deployable as a Telegram bot with a Mini App interface
28
+ - **Multi-Provider Support**: Works with OpenAI, Google, Grok, OpenRouter, Z.ai, Qroq, Cohere, Vercel, Cerebras, and LLM7
29
+ - **Automatic Fallback**: If one provider fails, automatically tries the next
30
+ - **Simple API**: Easy-to-use interface for all supported providers
31
+ - **OpenAI-Compatible Server**: Drop-in replacement for the OpenAI API, enabling easy integration with existing tools and clients
32
+ - **Streaming and Non-Streaming Support**: Handles both streaming and non-streaming responses
33
+ - **Tool Calling**: Full support for tools in LLM interactions
34
+ - **Telegram Bot Integration**: Deployable as a Telegram bot with an interactive Mini App interface
29
35
 
30
36
  ## 🛠️ Installation
31
37
 
@@ -76,17 +82,30 @@ const response = await llm.chatCompletion(messages, {
76
82
  console.log(response);
77
83
  ```
78
84
 
79
- ### 🤖 OpenAI-Compatible Server
85
+ ### 🔌 OpenAI-Compatible Server
80
86
 
81
- The project includes an OpenAI-compatible server that can be used as a drop-in replacement for the OpenAI API.
87
+ The OpenAI-compatible server provides a drop-in replacement for the OpenAI API. It routes requests through the unified router with fallback logic, ensuring high availability.
88
+ The server uses the provider configurations defined in [provider.js](provider.js) file, and requires API keys set in a `.env` file.
82
89
 
83
- To start the server, run:
90
+ #### Setup
91
+
92
+ 1. Copy the example environment file:
93
+
94
+ ```bash
95
+ cp .env.example .env
96
+ ```
97
+
98
+ 2. Configure your providers in `provider.js`. Add new provider or modify existing ones with the appropriate `name`, `apiKey` (referencing the corresponding env variable), `model`, and `apiUrl` for the providers you want to use.
99
+
100
+ 3. Edit `.env` and add your API keys for the desired providers (see [🔑 API Keys](#-api-keys) for sources).
101
+
102
+ To start the server locally, run:
84
103
 
85
104
  ```bash
86
105
  npm start
87
106
  ```
88
107
 
89
- The server will be available at `http://localhost:3000/v1/chat/completions`.
108
+ The server listens at `http://localhost:3000/v1/chat/completions` and supports standard OpenAI endpoints like `/v1/chat/completions`.
90
109
 
91
110
  ### 🧪 Testing
92
111
 
@@ -108,32 +127,32 @@ node tests/tools.js
108
127
 
109
128
  ## 🔧 Supported Providers
110
129
 
111
- * OpenAI
112
- * Google Gemini
113
- * Grok
114
- * OpenRouter
115
- * Z.ai
116
- * Qroq
117
- * Cohere
118
- * Vercel
119
- * Cerebras
120
- * LLM7
130
+ - OpenAI
131
+ - Google Gemini
132
+ - Grok
133
+ - OpenRouter
134
+ - Z.ai
135
+ - Qroq
136
+ - Cohere
137
+ - Vercel
138
+ - Cerebras
139
+ - LLM7
121
140
 
122
141
  ## 🔑 API Keys
123
142
 
124
143
  Get your API keys from the following providers:
125
144
 
126
- * **OpenAI**: [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
127
- * **Google Gemini**: [aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey)
128
- * **Grok**: [console.x.ai](https://console.x.ai/)
129
- * **OpenRouter**: [openrouter.ai/keys](https://openrouter.ai/keys)
130
- * **Z.ai**: [api.z.ai](https://api.z.ai)
131
- * **Qroq**: [console.groq.com/keys](https://console.groq.com/keys)
132
- * **Cohere**: [dashboard.cohere.com/api-keys](https://dashboard.cohere.com/api-keys)
133
- * **Vercel**: [vercel.com/docs/ai/ai-gateway](https://vercel.com/masoud-ghorbanzadehs-projects/~/ai/api-keys)
134
- * **Cerebras**: [cloud.cerebras.ai](https://cloud.cerebras.ai)
135
- * **LLM7**: [token.llm7.io](https://token.llm7.io/)
136
- * Seems like it does not support tool calling
145
+ - **OpenAI**: [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
146
+ - **Google Gemini**: [aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey)
147
+ - **Grok**: [console.x.ai](https://console.x.ai/)
148
+ - **OpenRouter**: [openrouter.ai/keys](https://openrouter.ai/keys)
149
+ - **Z.ai**: [api.z.ai](https://api.z.ai)
150
+ - **Qroq**: [console.groq.com/keys](https://console.groq.com/keys)
151
+ - **Cohere**: [dashboard.cohere.com/api-keys](https://dashboard.cohere.com/api-keys)
152
+ - **Vercel AI Gateway**: [vercel.com/docs/ai/ai-gateway](https://vercel.com/docs/ai-gateway)
153
+ - **Cerebras**: [cloud.cerebras.ai](https://cloud.cerebras.ai)
154
+ - **LLM7**: [token.llm7.io](https://token.llm7.io/)
155
+ - Seems like it does not support tool calling
137
156
 
138
157
  ## 🔼 Vercel Deployment (Telegram Bot)
139
158
 
@@ -141,9 +160,9 @@ This section describes how to deploy the AIRouter as a Telegram bot using Vercel
141
160
 
142
161
  ### 📋 Prerequisites
143
162
 
144
- * A Telegram Bot Token (@BotFather)
145
- * API keys for various AI providers
146
- * Vercel account
163
+ - A Telegram Bot Token (@BotFather)
164
+ - API keys for various AI providers
165
+ - Vercel account
147
166
 
148
167
  ### 🚀 Deployment Steps
149
168
 
@@ -196,16 +215,16 @@ curl "https://ai-router-flame.vercel.app/api?register_webhook=true"
196
215
  After deploying the bot, you need to configure the Telegram Mini App and menu button:
197
216
 
198
217
  1. **Configure Mini App:**
199
- * Go to [@BotFather](https://t.me/botfather)
200
- * Send `/mybots` and select your bot
201
- * Go to `Bot Settings` → `Configure Mini App`
202
- * Set the Mini App URL to: `https://ai-router-flame.vercel.app`
218
+ - Go to [@BotFather](https://t.me/botfather)
219
+ - Send `/mybots` and select your bot
220
+ - Go to `Bot Settings` → `Configure Mini App`
221
+ - Set the Mini App URL to: `https://ai-router-flame.vercel.app`
203
222
 
204
223
  2. **Configure Menu Button:**
205
- * Go to [@BotFather](https://t.me/botfather)
206
- * Send `/mybots` and select your bot
207
- * Go to `Bot Settings` → `Menu Button`
208
- * Ensure the URL shown is: `https://ai-router-flame.vercel.app`
224
+ - Go to [@BotFather](https://t.me/botfather)
225
+ - Send `/mybots` and select your bot
226
+ - Go to `Bot Settings` → `Menu Button`
227
+ - Ensure the URL shown is: `https://ai-router-flame.vercel.app`
209
228
 
210
229
  Once configured, users can access the Mini App by sending `/start` or `/app` to your bot, or through the menu button.
211
230
 
@@ -213,17 +232,17 @@ An example of a deployed bot is accessible on Telegram: [https://t.me/freePulseA
213
232
 
214
233
  ## 📁 Project Structure
215
234
 
216
- * `main.js` - The core AIRouter library.
217
- * `provider.js` - A list of supported AI providers.
218
- * `openai-compatible-server/index.js` - An OpenAI-compatible server.
219
- * `tests/` - A suite of tests for the library and server.
220
- * `bruno/` - A Bruno collection for API testing.
221
- * `vercel-project/` - A Vercel project for deploying a Telegram bot.
222
- * `api/index.js` - Main webhook handler for the Telegram bot.
223
- * `api/search.js` - A search proxy for the Telegram bot.
224
- * `public/` - The frontend for the Telegram Mini App.
225
- * `src/config.js` - Configuration for the Telegram bot.
226
- * `src/telegram.js` - Telegram client implementation.
235
+ - `main.js` - Core AIRouter library implementing the unified interface and fallback logic
236
+ - `provider.js` - Configuration for supported AI providers
237
+ - `openai-compatible-server/index.js` - OpenAI-compatible API server
238
+ - `tests/` - Comprehensive tests for the library, server, and tools
239
+ - `bruno/` - Bruno API collection for testing endpoints
240
+ - `vercel-project/` - Ready-to-deploy Vercel setup for the Telegram bot
241
+ - `api/index.js` - Telegram webhook handler
242
+ - `api/search.js` - Search proxy endpoint
243
+ - `public/` - Mini App frontend (HTML, CSS, JS)
244
+ - `src/config.js` - Bot configuration
245
+ - `src/telegram.js` - Telegram API integration
227
246
 
228
247
  ## 📄 License
229
248
 
package/legacy/main.js DELETED
@@ -1,63 +0,0 @@
1
- const { ChatOpenAI } = require( "@langchain/openai" );
2
- const pino = require( "pino" );
3
- const pretty = require( "pino-pretty" );
4
- const stream = pretty({ colorize: true, ignore: "pid,hostname" });
5
- const logger = pino({ base: false }, stream );
6
-
7
- class AIRouter
8
- {
9
- constructor ( providers )
10
- {
11
- this.providers = providers;
12
- }
13
-
14
- async chatCompletion ( messages, options = {}, stream = false )
15
- {
16
- const { stream: streamOption, tools, model, ...restOptions } = options;
17
- const isStreaming = stream || streamOption;
18
-
19
- logger.info( `Starting chatCompletion with ${this.providers.length} providers (streaming: ${isStreaming})` );
20
- let lastError;
21
-
22
- for ( const provider of this.providers )
23
- {
24
- try
25
- {
26
- logger.info( `Attempting with provider: ${provider.name}` );
27
- let llm = new ChatOpenAI({
28
- apiKey: provider.apiKey,
29
- model: provider.model,
30
- configuration: {
31
- baseURL: provider.apiUrl,
32
- },
33
- ...restOptions,
34
- });
35
-
36
- if ( tools && tools.length > 0 )
37
- {
38
- llm = llm.bindTools( tools );
39
- }
40
-
41
- if ( isStreaming )
42
- {
43
- const stream = await llm.stream( messages );
44
- return stream;
45
- }
46
- else
47
- {
48
- const response = await llm.invoke( messages, { timeout: 60000 });
49
- return response;
50
- }
51
- }
52
- catch ( error )
53
- {
54
- lastError = error;
55
- logger.error( `Failed with ${provider.name}:${error.message}` );
56
- // Continue to next provider
57
- }
58
- }
59
- throw new Error( `All providers failed. Last error: ${lastError.message}` );
60
- }
61
- }
62
-
63
- module.exports = AIRouter;
@@ -1,192 +0,0 @@
1
- const express = require( "express" );
2
- const cors = require( "cors" );
3
- const AIRouter = require( "../main" ); // your existing class
4
- const pino = require( "pino" );
5
- const pretty = require( "pino-pretty" );
6
- const stream = pretty({ colorize: true, ignore: "pid,hostname" });
7
- const logger = pino({ base: false }, stream );
8
- require( "dotenv" ).config({ quiet: true });
9
-
10
- const app = express();
11
- app.use( cors() );
12
- app.use( express.json() );
13
-
14
- /**
15
- * Initialize router with providers (could load from env/config)
16
- */
17
- const providers = require( "../provider" )
18
-
19
- const aiRouter = new AIRouter( providers );
20
-
21
- /**
22
- * OpenAI-compatible endpoint: POST /v1/chat/completions
23
- */
24
- app.post( "/v1/chat/completions", async ( req, res ) =>
25
- {
26
- try
27
- {
28
- const { messages, model, stream, ...rest } = req.body;
29
-
30
- if ( !messages || !Array.isArray( messages ) )
31
- {
32
- return res.status( 400 ).json({ error: { message: "messages must be an array" } });
33
- }
34
-
35
- if ( stream )
36
- {
37
- // Streaming mode → use Server-Sent Events (SSE)
38
- res.setHeader( "Content-Type", "text/event-stream" );
39
- res.setHeader( "Cache-Control", "no-cache" );
40
- res.setHeader( "Connection", "keep-alive" );
41
-
42
- try
43
- {
44
- const response = await aiRouter.chatCompletion( messages, { model, ...rest }, true );
45
- const id = `chatcmpl-${Date.now()}`;
46
- const created = Math.floor( Date.now() / 1000 );
47
- let fullResponse = null;
48
- for await ( const chunk of response )
49
- {
50
- const modelName = chunk?.response_metadata?.model_name || model || "unknown";
51
- const systemFingerprint = chunk?.response_metadata?.system_fingerprint || null;
52
- let delta = { ... chunk.delta || { content: chunk.content || "" } };
53
- if ( !delta.role ) delta.role = "assistant";
54
- delta.reasoning = delta.reasoning || null;
55
- delta.reasoning_details = delta.reasoning_details || [];
56
- let toolCallsDelta = null;
57
- if ( chunk.tool_calls && chunk.tool_calls.length > 0 )
58
- {
59
- toolCallsDelta = chunk.tool_calls.map( ( tc, index ) =>
60
- {
61
- return {
62
- id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
63
- type: "function",
64
- index,
65
- function: {
66
- name: tc.name,
67
- arguments: JSON.stringify( tc.args || {})
68
- }
69
- };
70
- });
71
- delta.tool_calls = toolCallsDelta;
72
- delta.content = "";
73
- }
74
- const chunkFinishReason = delta.finish_reason || chunk?.response_metadata?.finish_reason || null;
75
- const chunkNativeFinishReason = delta.native_finish_reason || chunk?.response_metadata?.native_finish_reason || chunkFinishReason || null;
76
- if ( chunk.content && !fullResponse ) fullResponse = chunk; // Capture full for reasoning if available
77
- const payload = {
78
- id,
79
- provider: "OpenAI",
80
- object: "chat.completion.chunk",
81
- created,
82
- model: modelName,
83
- system_fingerprint: systemFingerprint,
84
- choices: [
85
- {
86
- logprobs: null,
87
- delta,
88
- index: 0,
89
- finish_reason: chunkFinishReason,
90
- native_finish_reason: chunkNativeFinishReason,
91
- },
92
- ],
93
- };
94
- const usage = chunk?.response_metadata?.usage;
95
- if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
96
- {
97
- payload.usage = chunk?.response_metadata?.usage || null;
98
- }
99
- res.write( `data: ${JSON.stringify( payload )}\n\n` );
100
- }
101
- // Send done signal
102
- res.write( "data: [DONE]\n\n" );
103
- res.end();
104
- }
105
- catch ( err )
106
- {
107
- logger.error( err );
108
- res.write( `data: ${JSON.stringify({ error: err.message })}\n\n` );
109
- res.write( "data: [DONE]\n\n" );
110
- res.end();
111
- }
112
- }
113
- else
114
- {
115
- // Non-streaming → return one-shot completion
116
- const response = await aiRouter.chatCompletion( messages, { model, ...rest }, false );
117
- let reasoning = null;
118
- let refusal = null;
119
- let toolCalls = null;
120
- if ( response.contentBlocks )
121
- {
122
- const reasoningBlocks = response.contentBlocks.filter( b => { return b.type === "reasoning" || b.type === "thinking" });
123
- reasoning = reasoningBlocks.length > 0 ? reasoningBlocks.map( b => { return b.text }).join( "\n" ) : null;
124
- const refusalBlocks = response.contentBlocks.filter( b => { return b.type === "refusal" });
125
- refusal = refusalBlocks.length > 0 ? refusalBlocks.map( b => { return b.text }).join( "\n" ) : null;
126
- }
127
- if ( response.tool_calls && response.tool_calls.length > 0 )
128
- {
129
- toolCalls = response.tool_calls.map( ( tc, index ) =>
130
- {
131
- return {
132
- id: tc.id || `call_${Date.now()}_${Math.random().toString( 36 ).substr( 2, 9 )}`,
133
- type: "function",
134
- index,
135
- function: {
136
- name: tc.name,
137
- arguments: JSON.stringify( tc.args || {})
138
- }
139
- };
140
- });
141
- }
142
- const systemFingerprint = response.response_metadata?.system_fingerprint || null;
143
- const finishReason = response.response_metadata?.finish_reason || null;
144
- const nativeFinishReason = response.response_metadata?.native_finish_reason || finishReason || null;
145
- const messageContent = toolCalls && toolCalls.length > 0 ? "" : response.content;
146
- let finalResult = {
147
- id: `chatcmpl_${Date.now()}`,
148
- provider: "OpenAI",
149
- object: "chat.completion",
150
- created: Math.floor( Date.now() / 1000 ),
151
- model: response.response_metadata?.model_name || model || "unknown",
152
- system_fingerprint: systemFingerprint,
153
- choices: [
154
- {
155
- logprobs: null,
156
- finish_reason: finishReason,
157
- native_finish_reason: nativeFinishReason,
158
- index: 0,
159
- message: {
160
- role: "assistant",
161
- content: messageContent,
162
- refusal,
163
- reasoning,
164
- tool_calls: toolCalls
165
- },
166
- },
167
- ],
168
- }
169
- const usage = response?.response_metadata?.usage;
170
- if ( usage && typeof usage === "object" && !Array.isArray( usage ) && Object.keys( usage ).length > 0 )
171
- {
172
- finalResult.usage = response?.response_metadata?.usage || null;
173
- }
174
- res.json( finalResult );
175
- }
176
- }
177
- catch ( err )
178
- {
179
- logger.error( err );
180
- res.status( 500 ).json({ error: { message: err.message } });
181
- }
182
- });
183
-
184
- // Health check
185
- app.get( "/health", ( req, res ) => { return res.json({ status: "ok" }) });
186
-
187
- // Start server
188
- const PORT = process.env.PORT || 3000;
189
- app.listen( PORT, () =>
190
- {
191
- logger.info( `🚀 OpenAI-compatible API listening at http://localhost:${PORT}/v1/chat/completions` );
192
- });