routerxjs 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +13 -0
- package/dist/index.js +278 -0
- package/dist/index.js.map +1 -0
- package/package.json +34 -0
- package/src/app.ts +360 -0
- package/src/index.ts +2 -0
- package/src/serve.ts +58 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import * as hono_types from 'hono/types';
|
|
2
|
+
import { RouterConfig } from '@routerxjs/core';
|
|
3
|
+
import { Hono } from 'hono';
|
|
4
|
+
|
|
5
|
+
interface RouterXConfig {
|
|
6
|
+
/** Router configuration (providers, default) */
|
|
7
|
+
router: RouterConfig;
|
|
8
|
+
/** API key for authenticating incoming requests (optional) */
|
|
9
|
+
apiKey?: string;
|
|
10
|
+
}
|
|
11
|
+
declare function createRouterX(config: RouterXConfig): Hono<hono_types.BlankEnv, hono_types.BlankSchema, "/">;
|
|
12
|
+
|
|
13
|
+
export { type RouterXConfig, createRouterX };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
// src/app.ts
|
|
2
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
3
|
+
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
4
|
+
import { Router } from "@routerxjs/core";
|
|
5
|
+
import { generateText, streamText } from "ai";
|
|
6
|
+
import { Hono } from "hono";
|
|
7
|
+
import { stream } from "hono/streaming";
|
|
8
|
+
function createModel(provider, modelId) {
|
|
9
|
+
switch (provider.protocol) {
|
|
10
|
+
case "openai-compatible": {
|
|
11
|
+
const p = createOpenAICompatible({
|
|
12
|
+
name: provider.id,
|
|
13
|
+
baseURL: provider.baseUrl ?? "https://api.openai.com/v1",
|
|
14
|
+
apiKey: provider.apiKey
|
|
15
|
+
});
|
|
16
|
+
return p(modelId);
|
|
17
|
+
}
|
|
18
|
+
case "anthropic": {
|
|
19
|
+
const p = createAnthropic({
|
|
20
|
+
baseURL: provider.baseUrl ?? "https://api.anthropic.com",
|
|
21
|
+
apiKey: provider.apiKey
|
|
22
|
+
});
|
|
23
|
+
return p(modelId);
|
|
24
|
+
}
|
|
25
|
+
default:
|
|
26
|
+
throw new Error(`Unsupported protocol: ${provider.protocol}`);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
function parseOpenAIRequest(body) {
|
|
30
|
+
return {
|
|
31
|
+
model: body.model,
|
|
32
|
+
messages: body.messages ?? [],
|
|
33
|
+
maxTokens: body.max_tokens ?? body.max_completion_tokens,
|
|
34
|
+
temperature: body.temperature,
|
|
35
|
+
topP: body.top_p,
|
|
36
|
+
stream: body.stream
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
function parseAnthropicRequest(body) {
|
|
40
|
+
return {
|
|
41
|
+
model: body.model,
|
|
42
|
+
messages: body.messages ?? [],
|
|
43
|
+
system: typeof body.system === "string" ? body.system : Array.isArray(body.system) ? body.system.filter((b) => b.type === "text").map((b) => b.text).join("\n") : void 0,
|
|
44
|
+
maxTokens: body.max_tokens,
|
|
45
|
+
temperature: body.temperature,
|
|
46
|
+
topP: body.top_p,
|
|
47
|
+
stream: body.stream
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
function toAIMessages(parsed) {
|
|
51
|
+
return parsed.messages.map((m) => {
|
|
52
|
+
if (Array.isArray(m.content)) {
|
|
53
|
+
const text = m.content.filter((b) => b.type === "text").map((b) => b.text).join("");
|
|
54
|
+
return { role: m.role, content: text || "" };
|
|
55
|
+
}
|
|
56
|
+
return { role: m.role, content: m.content ?? "" };
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
function formatOpenAIResponse(result, modelId) {
|
|
60
|
+
return {
|
|
61
|
+
id: result.response?.id ?? `routerx-${Date.now()}`,
|
|
62
|
+
object: "chat.completion",
|
|
63
|
+
created: Math.floor(Date.now() / 1e3),
|
|
64
|
+
model: result.response?.modelId ?? modelId,
|
|
65
|
+
choices: [
|
|
66
|
+
{
|
|
67
|
+
index: 0,
|
|
68
|
+
message: { role: "assistant", content: result.text ?? "" },
|
|
69
|
+
finish_reason: result.finishReason === "length" ? "length" : result.finishReason === "tool-calls" ? "tool_calls" : "stop"
|
|
70
|
+
}
|
|
71
|
+
],
|
|
72
|
+
usage: {
|
|
73
|
+
prompt_tokens: result.usage?.promptTokens ?? 0,
|
|
74
|
+
completion_tokens: result.usage?.completionTokens ?? 0,
|
|
75
|
+
total_tokens: (result.usage?.promptTokens ?? 0) + (result.usage?.completionTokens ?? 0)
|
|
76
|
+
}
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
function formatAnthropicResponse(result, modelId) {
|
|
80
|
+
return {
|
|
81
|
+
id: result.response?.id ?? `routerx-${Date.now()}`,
|
|
82
|
+
type: "message",
|
|
83
|
+
role: "assistant",
|
|
84
|
+
model: result.response?.modelId ?? modelId,
|
|
85
|
+
content: [{ type: "text", text: result.text ?? "" }],
|
|
86
|
+
stop_reason: result.finishReason === "length" ? "max_tokens" : result.finishReason === "tool-calls" ? "tool_use" : "end_turn",
|
|
87
|
+
usage: {
|
|
88
|
+
input_tokens: result.usage?.promptTokens ?? 0,
|
|
89
|
+
output_tokens: result.usage?.completionTokens ?? 0
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
function openAIStreamFormatter() {
|
|
94
|
+
return {
|
|
95
|
+
onChunk(chunk) {
|
|
96
|
+
if (chunk.type === "text-delta" && chunk.text) {
|
|
97
|
+
return `data: ${JSON.stringify({
|
|
98
|
+
object: "chat.completion.chunk",
|
|
99
|
+
choices: [{ index: 0, delta: { content: chunk.text }, finish_reason: null }]
|
|
100
|
+
})}
|
|
101
|
+
|
|
102
|
+
`;
|
|
103
|
+
}
|
|
104
|
+
return null;
|
|
105
|
+
},
|
|
106
|
+
onFinish(result) {
|
|
107
|
+
let out = `data: ${JSON.stringify({
|
|
108
|
+
object: "chat.completion.chunk",
|
|
109
|
+
choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
|
|
110
|
+
usage: {
|
|
111
|
+
prompt_tokens: result.usage?.promptTokens ?? 0,
|
|
112
|
+
completion_tokens: result.usage?.completionTokens ?? 0,
|
|
113
|
+
total_tokens: (result.usage?.promptTokens ?? 0) + (result.usage?.completionTokens ?? 0)
|
|
114
|
+
}
|
|
115
|
+
})}
|
|
116
|
+
|
|
117
|
+
`;
|
|
118
|
+
out += "data: [DONE]\n\n";
|
|
119
|
+
return out;
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
function anthropicStreamFormatter() {
|
|
124
|
+
let blockStarted = false;
|
|
125
|
+
return {
|
|
126
|
+
onStart(modelId) {
|
|
127
|
+
return `event: message_start
|
|
128
|
+
data: ${JSON.stringify({
|
|
129
|
+
type: "message_start",
|
|
130
|
+
message: {
|
|
131
|
+
id: `routerx-${Date.now()}`,
|
|
132
|
+
type: "message",
|
|
133
|
+
role: "assistant",
|
|
134
|
+
model: modelId,
|
|
135
|
+
content: [],
|
|
136
|
+
usage: { input_tokens: 0, output_tokens: 0 }
|
|
137
|
+
}
|
|
138
|
+
})}
|
|
139
|
+
|
|
140
|
+
`;
|
|
141
|
+
},
|
|
142
|
+
onChunk(chunk) {
|
|
143
|
+
if (chunk.type === "text-delta" && chunk.text) {
|
|
144
|
+
let sse = "";
|
|
145
|
+
if (!blockStarted) {
|
|
146
|
+
blockStarted = true;
|
|
147
|
+
sse += `event: content_block_start
|
|
148
|
+
data: ${JSON.stringify({
|
|
149
|
+
type: "content_block_start",
|
|
150
|
+
index: 0,
|
|
151
|
+
content_block: { type: "text", text: "" }
|
|
152
|
+
})}
|
|
153
|
+
|
|
154
|
+
`;
|
|
155
|
+
}
|
|
156
|
+
sse += `event: content_block_delta
|
|
157
|
+
data: ${JSON.stringify({
|
|
158
|
+
type: "content_block_delta",
|
|
159
|
+
index: 0,
|
|
160
|
+
delta: { type: "text_delta", text: chunk.text }
|
|
161
|
+
})}
|
|
162
|
+
|
|
163
|
+
`;
|
|
164
|
+
return sse;
|
|
165
|
+
}
|
|
166
|
+
return null;
|
|
167
|
+
},
|
|
168
|
+
onFinish(result) {
|
|
169
|
+
let sse = "";
|
|
170
|
+
sse += `event: content_block_stop
|
|
171
|
+
data: ${JSON.stringify({
|
|
172
|
+
type: "content_block_stop",
|
|
173
|
+
index: 0
|
|
174
|
+
})}
|
|
175
|
+
|
|
176
|
+
`;
|
|
177
|
+
sse += `event: message_delta
|
|
178
|
+
data: ${JSON.stringify({
|
|
179
|
+
type: "message_delta",
|
|
180
|
+
delta: { stop_reason: "end_turn" },
|
|
181
|
+
usage: { output_tokens: result.usage?.completionTokens ?? 0 }
|
|
182
|
+
})}
|
|
183
|
+
|
|
184
|
+
`;
|
|
185
|
+
sse += `event: message_stop
|
|
186
|
+
data: ${JSON.stringify({ type: "message_stop" })}
|
|
187
|
+
|
|
188
|
+
`;
|
|
189
|
+
return sse;
|
|
190
|
+
}
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
function createRouterX(config) {
|
|
194
|
+
const app = new Hono();
|
|
195
|
+
const router = new Router(config.router);
|
|
196
|
+
if (config.apiKey) {
|
|
197
|
+
app.use("*", async (c, next) => {
|
|
198
|
+
if (c.req.path === "/health") return next();
|
|
199
|
+
const auth = c.req.header("Authorization");
|
|
200
|
+
const apiKey = c.req.header("x-api-key");
|
|
201
|
+
const token = auth?.replace("Bearer ", "") ?? apiKey;
|
|
202
|
+
if (token !== config.apiKey) {
|
|
203
|
+
return c.json({ error: { message: "Invalid API key", type: "authentication_error" } }, 401);
|
|
204
|
+
}
|
|
205
|
+
return next();
|
|
206
|
+
});
|
|
207
|
+
}
|
|
208
|
+
app.post("/openai/v1/chat/completions", async (c) => {
|
|
209
|
+
return handleRequest(c, parseOpenAIRequest(await c.req.json()), "openai");
|
|
210
|
+
});
|
|
211
|
+
app.post("/anthropic/v1/messages", async (c) => {
|
|
212
|
+
return handleRequest(c, parseAnthropicRequest(await c.req.json()), "anthropic");
|
|
213
|
+
});
|
|
214
|
+
app.get("/v1/models", (c) => {
|
|
215
|
+
const models = router.listModels();
|
|
216
|
+
return c.json({
|
|
217
|
+
object: "list",
|
|
218
|
+
data: models.map((m) => ({ id: m.model, object: "model", owned_by: m.providerId }))
|
|
219
|
+
});
|
|
220
|
+
});
|
|
221
|
+
app.get("/health", (c) => c.json({ status: "ok" }));
|
|
222
|
+
async function handleRequest(c, parsed, downstream) {
|
|
223
|
+
try {
|
|
224
|
+
const routeResult = router.route(parsed.model);
|
|
225
|
+
if (!routeResult) {
|
|
226
|
+
return c.json({ error: { message: `Model "${parsed.model}" not found` } }, 404);
|
|
227
|
+
}
|
|
228
|
+
const model = createModel(routeResult.provider, routeResult.model);
|
|
229
|
+
const messages = toAIMessages(parsed);
|
|
230
|
+
if (parsed.stream) {
|
|
231
|
+
const fmt = downstream === "anthropic" ? anthropicStreamFormatter() : openAIStreamFormatter();
|
|
232
|
+
const result2 = streamText({
|
|
233
|
+
model,
|
|
234
|
+
messages,
|
|
235
|
+
system: parsed.system,
|
|
236
|
+
maxOutputTokens: parsed.maxTokens,
|
|
237
|
+
temperature: parsed.temperature,
|
|
238
|
+
topP: parsed.topP
|
|
239
|
+
});
|
|
240
|
+
return stream(c, async (s) => {
|
|
241
|
+
try {
|
|
242
|
+
if ("onStart" in fmt) {
|
|
243
|
+
await s.write(fmt.onStart(routeResult.model));
|
|
244
|
+
}
|
|
245
|
+
for await (const chunk of result2.fullStream) {
|
|
246
|
+
const formatted = fmt.onChunk(chunk);
|
|
247
|
+
if (formatted) await s.write(formatted);
|
|
248
|
+
}
|
|
249
|
+
const final = await result2;
|
|
250
|
+
await s.write(fmt.onFinish(final));
|
|
251
|
+
} catch (err) {
|
|
252
|
+
await s.write(`data: ${JSON.stringify({ error: err.message })}
|
|
253
|
+
|
|
254
|
+
`);
|
|
255
|
+
}
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
const result = await generateText({
|
|
259
|
+
model,
|
|
260
|
+
messages,
|
|
261
|
+
system: parsed.system,
|
|
262
|
+
maxOutputTokens: parsed.maxTokens,
|
|
263
|
+
temperature: parsed.temperature,
|
|
264
|
+
topP: parsed.topP
|
|
265
|
+
});
|
|
266
|
+
return c.json(
|
|
267
|
+
downstream === "anthropic" ? formatAnthropicResponse(result, routeResult.model) : formatOpenAIResponse(result, routeResult.model)
|
|
268
|
+
);
|
|
269
|
+
} catch (err) {
|
|
270
|
+
return c.json({ error: { message: err.message ?? "Internal error" } }, 500);
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
return app;
|
|
274
|
+
}
|
|
275
|
+
export {
|
|
276
|
+
createRouterX
|
|
277
|
+
};
|
|
278
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/app.ts"],"sourcesContent":["/**\n * RouterX Hono Application\n *\n * Routes:\n * - POST /openai/v1/chat/completions — OpenAI protocol\n * - POST /anthropic/v1/messages — Anthropic protocol\n * - GET /v1/models — Model listing\n * - GET /health — Health check\n *\n * Uses Vercel AI SDK for upstream LLM calls.\n */\n\nimport { createAnthropic } from \"@ai-sdk/anthropic\";\nimport { createOpenAICompatible } from \"@ai-sdk/openai-compatible\";\nimport { type RegisteredProvider, Router, type RouterConfig } from \"@routerxjs/core\";\nimport { generateText, type LanguageModel, streamText } from \"ai\";\nimport { Hono } from \"hono\";\nimport { stream } from \"hono/streaming\";\n\n// ============================================================================\n// Config\n// ============================================================================\n\nexport interface RouterXConfig {\n /** Router configuration (providers, default) */\n router: RouterConfig;\n\n /** API key for authenticating incoming requests (optional) */\n apiKey?: string;\n}\n\n// ============================================================================\n// Vercel AI SDK model factory\n// ============================================================================\n\nfunction createModel(provider: RegisteredProvider, modelId: string): LanguageModel {\n switch (provider.protocol) {\n case \"openai-compatible\": {\n const p = createOpenAICompatible({\n name: provider.id,\n baseURL: provider.baseUrl ?? \"https://api.openai.com/v1\",\n apiKey: provider.apiKey,\n });\n return p(modelId);\n }\n case \"anthropic\": {\n const p = createAnthropic({\n baseURL: provider.baseUrl ?? \"https://api.anthropic.com\",\n apiKey: provider.apiKey,\n });\n return p(modelId);\n }\n default:\n throw new Error(`Unsupported protocol: ${provider.protocol}`);\n }\n}\n\n// ============================================================================\n// Parse incoming requests\n// ============================================================================\n\ninterface ParsedRequest {\n model: string;\n messages: Array<{ role: string; content: any }>;\n system?: string;\n maxTokens?: number;\n temperature?: number;\n topP?: number;\n stream?: boolean;\n}\n\nfunction parseOpenAIRequest(body: any): ParsedRequest {\n return {\n model: body.model,\n messages: body.messages ?? [],\n maxTokens: body.max_tokens ?? body.max_completion_tokens,\n temperature: body.temperature,\n topP: body.top_p,\n stream: body.stream,\n };\n}\n\nfunction parseAnthropicRequest(body: any): ParsedRequest {\n return {\n model: body.model,\n messages: body.messages ?? [],\n system:\n typeof body.system === \"string\"\n ? body.system\n : Array.isArray(body.system)\n ? body.system\n .filter((b: any) => b.type === \"text\")\n .map((b: any) => b.text)\n .join(\"\\n\")\n : undefined,\n maxTokens: body.max_tokens,\n temperature: body.temperature,\n topP: body.top_p,\n stream: body.stream,\n };\n}\n\n// ============================================================================\n// Convert to Vercel AI SDK messages\n// ============================================================================\n\nfunction toAIMessages(parsed: ParsedRequest): any[] {\n return parsed.messages.map((m: any) => {\n if (Array.isArray(m.content)) {\n const text = m.content\n .filter((b: any) => b.type === \"text\")\n .map((b: any) => b.text)\n .join(\"\");\n return { role: m.role, content: text || \"\" };\n }\n return { role: m.role, content: m.content ?? \"\" };\n });\n}\n\n// ============================================================================\n// Format responses\n// ============================================================================\n\nfunction formatOpenAIResponse(result: any, modelId: string): any {\n return {\n id: result.response?.id ?? `routerx-${Date.now()}`,\n object: \"chat.completion\",\n created: Math.floor(Date.now() / 1000),\n model: result.response?.modelId ?? modelId,\n choices: [\n {\n index: 0,\n message: { role: \"assistant\", content: result.text ?? \"\" },\n finish_reason:\n result.finishReason === \"length\"\n ? \"length\"\n : result.finishReason === \"tool-calls\"\n ? \"tool_calls\"\n : \"stop\",\n },\n ],\n usage: {\n prompt_tokens: result.usage?.promptTokens ?? 0,\n completion_tokens: result.usage?.completionTokens ?? 0,\n total_tokens: (result.usage?.promptTokens ?? 0) + (result.usage?.completionTokens ?? 0),\n },\n };\n}\n\nfunction formatAnthropicResponse(result: any, modelId: string): any {\n return {\n id: result.response?.id ?? `routerx-${Date.now()}`,\n type: \"message\",\n role: \"assistant\",\n model: result.response?.modelId ?? modelId,\n content: [{ type: \"text\", text: result.text ?? \"\" }],\n stop_reason:\n result.finishReason === \"length\"\n ? \"max_tokens\"\n : result.finishReason === \"tool-calls\"\n ? \"tool_use\"\n : \"end_turn\",\n usage: {\n input_tokens: result.usage?.promptTokens ?? 0,\n output_tokens: result.usage?.completionTokens ?? 0,\n },\n };\n}\n\n// ============================================================================\n// Streaming formatters\n// ============================================================================\n\nfunction openAIStreamFormatter() {\n return {\n onChunk(chunk: any): string | null {\n if (chunk.type === \"text-delta\" && chunk.text) {\n return `data: ${JSON.stringify({\n object: \"chat.completion.chunk\",\n choices: [{ index: 0, delta: { content: chunk.text }, finish_reason: null }],\n })}\\n\\n`;\n }\n return null;\n },\n onFinish(result: any): string {\n let out = `data: ${JSON.stringify({\n object: \"chat.completion.chunk\",\n choices: [{ index: 0, delta: {}, finish_reason: \"stop\" }],\n usage: {\n prompt_tokens: result.usage?.promptTokens ?? 0,\n completion_tokens: result.usage?.completionTokens ?? 0,\n total_tokens: (result.usage?.promptTokens ?? 0) + (result.usage?.completionTokens ?? 0),\n },\n })}\\n\\n`;\n out += \"data: [DONE]\\n\\n\";\n return out;\n },\n };\n}\n\nfunction anthropicStreamFormatter() {\n let blockStarted = false;\n return {\n onStart(modelId: string): string {\n return `event: message_start\\ndata: ${JSON.stringify({\n type: \"message_start\",\n message: {\n id: `routerx-${Date.now()}`,\n type: \"message\",\n role: \"assistant\",\n model: modelId,\n content: [],\n usage: { input_tokens: 0, output_tokens: 0 },\n },\n })}\\n\\n`;\n },\n onChunk(chunk: any): string | null {\n if (chunk.type === \"text-delta\" && chunk.text) {\n let sse = \"\";\n if (!blockStarted) {\n blockStarted = true;\n sse += `event: content_block_start\\ndata: ${JSON.stringify({\n type: \"content_block_start\",\n index: 0,\n content_block: { type: \"text\", text: \"\" },\n })}\\n\\n`;\n }\n sse += `event: content_block_delta\\ndata: ${JSON.stringify({\n type: \"content_block_delta\",\n index: 0,\n delta: { type: \"text_delta\", text: chunk.text },\n })}\\n\\n`;\n return sse;\n }\n return null;\n },\n onFinish(result: any): string {\n let sse = \"\";\n sse += `event: content_block_stop\\ndata: ${JSON.stringify({\n type: \"content_block_stop\",\n index: 0,\n })}\\n\\n`;\n sse += `event: message_delta\\ndata: ${JSON.stringify({\n type: \"message_delta\",\n delta: { stop_reason: \"end_turn\" },\n usage: { output_tokens: result.usage?.completionTokens ?? 0 },\n })}\\n\\n`;\n sse += `event: message_stop\\ndata: ${JSON.stringify({ type: \"message_stop\" })}\\n\\n`;\n return sse;\n },\n };\n}\n\n// ============================================================================\n// App Factory\n// ============================================================================\n\nexport function createRouterX(config: RouterXConfig) {\n const app = new Hono();\n const router = new Router(config.router);\n\n // Auth middleware\n if (config.apiKey) {\n app.use(\"*\", async (c, next) => {\n if (c.req.path === \"/health\") return next();\n const auth = c.req.header(\"Authorization\");\n const apiKey = c.req.header(\"x-api-key\");\n const token = auth?.replace(\"Bearer \", \"\") ?? apiKey;\n if (token !== config.apiKey) {\n return c.json({ error: { message: \"Invalid API key\", type: \"authentication_error\" } }, 401);\n }\n return next();\n });\n }\n\n // OpenAI endpoint\n app.post(\"/openai/v1/chat/completions\", async (c) => {\n return handleRequest(c, parseOpenAIRequest(await c.req.json()), \"openai\");\n });\n\n // Anthropic endpoint\n app.post(\"/anthropic/v1/messages\", async (c) => {\n return handleRequest(c, parseAnthropicRequest(await c.req.json()), \"anthropic\");\n });\n\n // Model list\n app.get(\"/v1/models\", (c) => {\n const models = router.listModels();\n return c.json({\n object: \"list\",\n data: models.map((m) => ({ id: m.model, object: \"model\", owned_by: m.providerId })),\n });\n });\n\n // Health\n app.get(\"/health\", (c) => c.json({ status: \"ok\" }));\n\n // Core handler\n async function handleRequest(c: any, parsed: ParsedRequest, downstream: \"openai\" | \"anthropic\") {\n try {\n const routeResult = router.route(parsed.model);\n if (!routeResult) {\n return c.json({ error: { message: `Model \"${parsed.model}\" not found` } }, 404);\n }\n\n const model = createModel(routeResult.provider, routeResult.model);\n const messages = toAIMessages(parsed);\n\n if (parsed.stream) {\n const fmt =\n downstream === \"anthropic\" ? anthropicStreamFormatter() : openAIStreamFormatter();\n\n const result = streamText({\n model,\n messages,\n system: parsed.system,\n maxOutputTokens: parsed.maxTokens,\n temperature: parsed.temperature,\n topP: parsed.topP,\n });\n\n return stream(c, async (s) => {\n try {\n if (\"onStart\" in fmt) {\n await s.write((fmt as any).onStart(routeResult.model));\n }\n for await (const chunk of result.fullStream) {\n const formatted = fmt.onChunk(chunk);\n if (formatted) await s.write(formatted);\n }\n const final = await result;\n await s.write(fmt.onFinish(final));\n } catch (err: any) {\n await s.write(`data: ${JSON.stringify({ error: err.message })}\\n\\n`);\n }\n });\n }\n\n // Non-streaming\n const result = await generateText({\n model,\n messages,\n system: parsed.system,\n maxOutputTokens: parsed.maxTokens,\n temperature: parsed.temperature,\n topP: parsed.topP,\n });\n\n return c.json(\n downstream === \"anthropic\"\n ? formatAnthropicResponse(result, routeResult.model)\n : formatOpenAIResponse(result, routeResult.model)\n );\n } catch (err: any) {\n return c.json({ error: { message: err.message ?? \"Internal error\" } }, 500);\n }\n }\n\n return app;\n}\n"],"mappings":";AAYA,SAAS,uBAAuB;AAChC,SAAS,8BAA8B;AACvC,SAAkC,cAAiC;AACnE,SAAS,cAAkC,kBAAkB;AAC7D,SAAS,YAAY;AACrB,SAAS,cAAc;AAkBvB,SAAS,YAAY,UAA8B,SAAgC;AACjF,UAAQ,SAAS,UAAU;AAAA,IACzB,KAAK,qBAAqB;AACxB,YAAM,IAAI,uBAAuB;AAAA,QAC/B,MAAM,SAAS;AAAA,QACf,SAAS,SAAS,WAAW;AAAA,QAC7B,QAAQ,SAAS;AAAA,MACnB,CAAC;AACD,aAAO,EAAE,OAAO;AAAA,IAClB;AAAA,IACA,KAAK,aAAa;AAChB,YAAM,IAAI,gBAAgB;AAAA,QACxB,SAAS,SAAS,WAAW;AAAA,QAC7B,QAAQ,SAAS;AAAA,MACnB,CAAC;AACD,aAAO,EAAE,OAAO;AAAA,IAClB;AAAA,IACA;AACE,YAAM,IAAI,MAAM,yBAAyB,SAAS,QAAQ,EAAE;AAAA,EAChE;AACF;AAgBA,SAAS,mBAAmB,MAA0B;AACpD,SAAO;AAAA,IACL,OAAO,KAAK;AAAA,IACZ,UAAU,KAAK,YAAY,CAAC;AAAA,IAC5B,WAAW,KAAK,cAAc,KAAK;AAAA,IACnC,aAAa,KAAK;AAAA,IAClB,MAAM,KAAK;AAAA,IACX,QAAQ,KAAK;AAAA,EACf;AACF;AAEA,SAAS,sBAAsB,MAA0B;AACvD,SAAO;AAAA,IACL,OAAO,KAAK;AAAA,IACZ,UAAU,KAAK,YAAY,CAAC;AAAA,IAC5B,QACE,OAAO,KAAK,WAAW,WACnB,KAAK,SACL,MAAM,QAAQ,KAAK,MAAM,IACvB,KAAK,OACF,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACpC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,IAAI,IACZ;AAAA,IACR,WAAW,KAAK;AAAA,IAChB,aAAa,KAAK;AAAA,IAClB,MAAM,KAAK;AAAA,IACX,QAAQ,KAAK;AAAA,EACf;AACF;AAMA,SAAS,aAAa,QAA8B;AAClD,SAAO,OAAO,SAAS,IAAI,CAAC,MAAW;AACrC,QAAI,MAAM,QAAQ,EAAE,OAAO,GAAG;AAC5B,YAAM,OAAO,EAAE,QACZ,OAAO,CAAC,MAAW,EAAE,SAAS,MAAM,EACpC,IAAI,CAAC,MAAW,EAAE,IAAI,EACtB,KAAK,EAAE;AACV,aAAO,EAAE,MAAM,EAAE,MAAM,SAAS,QAAQ,GAAG;AAAA,IAC7C;AACA,WAAO,EAAE,MAAM,EAAE,MAAM,SAAS,EAAE,WAAW,GAAG;AAAA,EAClD,CAAC;AACH;AAMA,SAAS,qBAAqB,QAAa,SAAsB;AAC/D,SAAO;AAAA,IACL,IAAI,OAAO,UAAU,MAAM,WAAW,KAAK,IAAI,CAAC;AAAA,IAChD,QAAQ;AAAA,IACR,SAAS,KAAK,MAAM,KAAK,IAAI,IAAI,GAAI;AAAA,IACrC,OAAO,OAAO,UAAU,WAAW;AAAA,IACnC,SAAS;AAAA,MACP;AAAA,QACE,OAAO;AAAA,QACP,SAAS,EAAE,MAAM,aAAa,SAAS,OAAO,QAAQ,GAAG;AAAA,QACzD,eACE,OAAO,iBAAiB,WACpB,WACA,OAAO,iBAAiB,eACtB,eACA;AAAA,MACV;AAAA,IACF;AAAA,IACA,OAAO;AAAA,MACL,eAAe,OAAO,OAAO,gBAAgB;AAAA,MAC7C,mBAAmB,OAAO,OAAO,oBAAoB;AAAA,MACrD,eAAe,OAAO,OAAO,gBAAgB,MAAM,OAAO,OAAO,oBAAoB;AAAA,IACvF;AAAA,EACF;AACF;AAEA,SAAS,wBAAwB,QAAa,SAAsB;AAClE,SAAO;AAAA,IACL,IAAI,OAAO,UAAU,MAAM,WAAW,KAAK,IAAI,CAAC;AAAA,IAChD,MAAM;AAAA,IACN,MAAM;AAAA,IACN,OAAO,OAAO,UAAU,WAAW;AAAA,IACnC,SAAS,CAAC,EAAE,MAAM,QAAQ,MAAM,OAAO,QAAQ,GAAG,CAAC;AAAA,IACnD,aACE,OAAO,iBAAiB,WACpB,eACA,OAAO,iBAAiB,eACtB,aACA;AAAA,IACR,OAAO;AAAA,MACL,cAAc,OAAO,OAAO,gBAAgB;AAAA,MAC5C,eAAe,OAAO,OAAO,oBAAoB;AAAA,IACnD;AAAA,EACF;AACF;AAMA,SAAS,wBAAwB;AAC/B,SAAO;AAAA,IACL,QAAQ,OAA2B;AACjC,UAAI,MAAM,SAAS,gBAAgB,MAAM,MAAM;AAC7C,eAAO,SAAS,KAAK,UAAU;AAAA,UAC7B,QAAQ;AAAA,UACR,SAAS,CAAC,EAAE,OAAO,GAAG,OAAO,EAAE,SAAS,MAAM,KAAK,GAAG,eAAe,KAAK,CAAC;AAAA,QAC7E,CAAC,CAAC;AAAA;AAAA;AAAA,MACJ;AACA,aAAO;AAAA,IACT;AAAA,IACA,SAAS,QAAqB;AAC5B,UAAI,MAAM,SAAS,KAAK,UAAU;AAAA,QAChC,QAAQ;AAAA,QACR,SAAS,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,GAAG,eAAe,OAAO,CAAC;AAAA,QACxD,OAAO;AAAA,UACL,eAAe,OAAO,OAAO,gBAAgB;AAAA,UAC7C,mBAAmB,OAAO,OAAO,oBAAoB;AAAA,UACrD,eAAe,OAAO,OAAO,gBAAgB,MAAM,OAAO,OAAO,oBAAoB;AAAA,QACvF;AAAA,MACF,CAAC,CAAC;AAAA;AAAA;AACF,aAAO;AACP,aAAO;AAAA,IACT;AAAA,EACF;AACF;AAEA,SAAS,2BAA2B;AAClC,MAAI,eAAe;AACnB,SAAO;AAAA,IACL,QAAQ,SAAyB;AAC/B,aAAO;AAAA,QAA+B,KAAK,UAAU;AAAA,QACnD,MAAM;AAAA,QACN,SAAS;AAAA,UACP,IAAI,WAAW,KAAK,IAAI,CAAC;AAAA,UACzB,MAAM;AAAA,UACN,MAAM;AAAA,UACN,OAAO;AAAA,UACP,SAAS,CAAC;AAAA,UACV,OAAO,EAAE,cAAc,GAAG,eAAe,EAAE;AAAA,QAC7C;AAAA,MACF,CAAC,CAAC;AAAA;AAAA;AAAA,IACJ;AAAA,IACA,QAAQ,OAA2B;AACjC,UAAI,MAAM,SAAS,gBAAgB,MAAM,MAAM;AAC7C,YAAI,MAAM;AACV,YAAI,CAAC,cAAc;AACjB,yBAAe;AACf,iBAAO;AAAA,QAAqC,KAAK,UAAU;AAAA,YACzD,MAAM;AAAA,YACN,OAAO;AAAA,YACP,eAAe,EAAE,MAAM,QAAQ,MAAM,GAAG;AAAA,UAC1C,CAAC,CAAC;AAAA;AAAA;AAAA,QACJ;AACA,eAAO;AAAA,QAAqC,KAAK,UAAU;AAAA,UACzD,MAAM;AAAA,UACN,OAAO;AAAA,UACP,OAAO,EAAE,MAAM,cAAc,MAAM,MAAM,KAAK;AAAA,QAChD,CAAC,CAAC;AAAA;AAAA;AACF,eAAO;AAAA,MACT;AACA,aAAO;AAAA,IACT;AAAA,IACA,SAAS,QAAqB;AAC5B,UAAI,MAAM;AACV,aAAO;AAAA,QAAoC,KAAK,UAAU;AAAA,QACxD,MAAM;AAAA,QACN,OAAO;AAAA,MACT,CAAC,CAAC;AAAA;AAAA;AACF,aAAO;AAAA,QAA+B,KAAK,UAAU;AAAA,QACnD,MAAM;AAAA,QACN,OAAO,EAAE,aAAa,WAAW;AAAA,QACjC,OAAO,EAAE,eAAe,OAAO,OAAO,oBAAoB,EAAE;AAAA,MAC9D,CAAC,CAAC;AAAA;AAAA;AACF,aAAO;AAAA,QAA8B,KAAK,UAAU,EAAE,MAAM,eAAe,CAAC,CAAC;AAAA;AAAA;AAC7E,aAAO;AAAA,IACT;AAAA,EACF;AACF;AAMO,SAAS,cAAc,QAAuB;AACnD,QAAM,MAAM,IAAI,KAAK;AACrB,QAAM,SAAS,IAAI,OAAO,OAAO,MAAM;AAGvC,MAAI,OAAO,QAAQ;AACjB,QAAI,IAAI,KAAK,OAAO,GAAG,SAAS;AAC9B,UAAI,EAAE,IAAI,SAAS,UAAW,QAAO,KAAK;AAC1C,YAAM,OAAO,EAAE,IAAI,OAAO,eAAe;AACzC,YAAM,SAAS,EAAE,IAAI,OAAO,WAAW;AACvC,YAAM,QAAQ,MAAM,QAAQ,WAAW,EAAE,KAAK;AAC9C,UAAI,UAAU,OAAO,QAAQ;AAC3B,eAAO,EAAE,KAAK,EAAE,OAAO,EAAE,SAAS,mBAAmB,MAAM,uBAAuB,EAAE,GAAG,GAAG;AAAA,MAC5F;AACA,aAAO,KAAK;AAAA,IACd,CAAC;AAAA,EACH;AAGA,MAAI,KAAK,+BAA+B,OAAO,MAAM;AACnD,WAAO,cAAc,GAAG,mBAAmB,MAAM,EAAE,IAAI,KAAK,CAAC,GAAG,QAAQ;AAAA,EAC1E,CAAC;AAGD,MAAI,KAAK,0BAA0B,OAAO,MAAM;AAC9C,WAAO,cAAc,GAAG,sBAAsB,MAAM,EAAE,IAAI,KAAK,CAAC,GAAG,WAAW;AAAA,EAChF,CAAC;AAGD,MAAI,IAAI,cAAc,CAAC,MAAM;AAC3B,UAAM,SAAS,OAAO,WAAW;AACjC,WAAO,EAAE,KAAK;AAAA,MACZ,QAAQ;AAAA,MACR,MAAM,OAAO,IAAI,CAAC,OAAO,EAAE,IAAI,EAAE,OAAO,QAAQ,SAAS,UAAU,EAAE,WAAW,EAAE;AAAA,IACpF,CAAC;AAAA,EACH,CAAC;AAGD,MAAI,IAAI,WAAW,CAAC,MAAM,EAAE,KAAK,EAAE,QAAQ,KAAK,CAAC,CAAC;AAGlD,iBAAe,cAAc,GAAQ,QAAuB,YAAoC;AAC9F,QAAI;AACF,YAAM,cAAc,OAAO,MAAM,OAAO,KAAK;AAC7C,UAAI,CAAC,aAAa;AAChB,eAAO,EAAE,KAAK,EAAE,OAAO,EAAE,SAAS,UAAU,OAAO,KAAK,cAAc,EAAE,GAAG,GAAG;AAAA,MAChF;AAEA,YAAM,QAAQ,YAAY,YAAY,UAAU,YAAY,KAAK;AACjE,YAAM,WAAW,aAAa,MAAM;AAEpC,UAAI,OAAO,QAAQ;AACjB,cAAM,MACJ,eAAe,cAAc,yBAAyB,IAAI,sBAAsB;AAElF,cAAMA,UAAS,WAAW;AAAA,UACxB;AAAA,UACA;AAAA,UACA,QAAQ,OAAO;AAAA,UACf,iBAAiB,OAAO;AAAA,UACxB,aAAa,OAAO;AAAA,UACpB,MAAM,OAAO;AAAA,QACf,CAAC;AAED,eAAO,OAAO,GAAG,OAAO,MAAM;AAC5B,cAAI;AACF,gBAAI,aAAa,KAAK;AACpB,oBAAM,EAAE,MAAO,IAAY,QAAQ,YAAY,KAAK,CAAC;AAAA,YACvD;AACA,6BAAiB,SAASA,QAAO,YAAY;AAC3C,oBAAM,YAAY,IAAI,QAAQ,KAAK;AACnC,kBAAI,UAAW,OAAM,EAAE,MAAM,SAAS;AAAA,YACxC;AACA,kBAAM,QAAQ,MAAMA;AACpB,kBAAM,EAAE,MAAM,IAAI,SAAS,KAAK,CAAC;AAAA,UACnC,SAAS,KAAU;AACjB,kBAAM,EAAE,MAAM,SAAS,KAAK,UAAU,EAAE,OAAO,IAAI,QAAQ,CAAC,CAAC;AAAA;AAAA,CAAM;AAAA,UACrE;AAAA,QACF,CAAC;AAAA,MACH;AAGA,YAAM,SAAS,MAAM,aAAa;AAAA,QAChC;AAAA,QACA;AAAA,QACA,QAAQ,OAAO;AAAA,QACf,iBAAiB,OAAO;AAAA,QACxB,aAAa,OAAO;AAAA,QACpB,MAAM,OAAO;AAAA,MACf,CAAC;AAED,aAAO,EAAE;AAAA,QACP,eAAe,cACX,wBAAwB,QAAQ,YAAY,KAAK,IACjD,qBAAqB,QAAQ,YAAY,KAAK;AAAA,MACpD;AAAA,IACF,SAAS,KAAU;AACjB,aAAO,EAAE,KAAK,EAAE,OAAO,EAAE,SAAS,IAAI,WAAW,iBAAiB,EAAE,GAAG,GAAG;AAAA,IAC5E;AAAA,EACF;AAEA,SAAO;AACT;","names":["result"]}
|
package/package.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "routerxjs",
|
|
3
|
+
"description": "RouterX - Universal LLM API Router",
|
|
4
|
+
"license": "MIT",
|
|
5
|
+
"version": "0.0.1",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"main": "./dist/index.js",
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"import": "./dist/index.js"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"dist",
|
|
17
|
+
"src"
|
|
18
|
+
],
|
|
19
|
+
"scripts": {
|
|
20
|
+
"build": "tsup",
|
|
21
|
+
"typecheck": "tsc --noEmit",
|
|
22
|
+
"test": "bun test"
|
|
23
|
+
},
|
|
24
|
+
"dependencies": {
|
|
25
|
+
"@routerxjs/core": "^0.0.1",
|
|
26
|
+
"@ai-sdk/anthropic": "^3.0.36",
|
|
27
|
+
"@ai-sdk/openai-compatible": "^2.0.27",
|
|
28
|
+
"ai": "^6.0.0",
|
|
29
|
+
"hono": "^4.7.0"
|
|
30
|
+
},
|
|
31
|
+
"devDependencies": {
|
|
32
|
+
"typescript": "^5.9.3"
|
|
33
|
+
}
|
|
34
|
+
}
|
package/src/app.ts
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* RouterX Hono Application
|
|
3
|
+
*
|
|
4
|
+
* Routes:
|
|
5
|
+
* - POST /openai/v1/chat/completions — OpenAI protocol
|
|
6
|
+
* - POST /anthropic/v1/messages — Anthropic protocol
|
|
7
|
+
* - GET /v1/models — Model listing
|
|
8
|
+
* - GET /health — Health check
|
|
9
|
+
*
|
|
10
|
+
* Uses Vercel AI SDK for upstream LLM calls.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
14
|
+
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
15
|
+
import { type RegisteredProvider, Router, type RouterConfig } from "@routerxjs/core";
|
|
16
|
+
import { generateText, type LanguageModel, streamText } from "ai";
|
|
17
|
+
import { Hono } from "hono";
|
|
18
|
+
import { stream } from "hono/streaming";
|
|
19
|
+
|
|
20
|
+
// ============================================================================
|
|
21
|
+
// Config
|
|
22
|
+
// ============================================================================
|
|
23
|
+
|
|
24
|
+
export interface RouterXConfig {
|
|
25
|
+
/** Router configuration (providers, default) */
|
|
26
|
+
router: RouterConfig;
|
|
27
|
+
|
|
28
|
+
/** API key for authenticating incoming requests (optional) */
|
|
29
|
+
apiKey?: string;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// ============================================================================
|
|
33
|
+
// Vercel AI SDK model factory
|
|
34
|
+
// ============================================================================
|
|
35
|
+
|
|
36
|
+
function createModel(provider: RegisteredProvider, modelId: string): LanguageModel {
|
|
37
|
+
switch (provider.protocol) {
|
|
38
|
+
case "openai-compatible": {
|
|
39
|
+
const p = createOpenAICompatible({
|
|
40
|
+
name: provider.id,
|
|
41
|
+
baseURL: provider.baseUrl ?? "https://api.openai.com/v1",
|
|
42
|
+
apiKey: provider.apiKey,
|
|
43
|
+
});
|
|
44
|
+
return p(modelId);
|
|
45
|
+
}
|
|
46
|
+
case "anthropic": {
|
|
47
|
+
const p = createAnthropic({
|
|
48
|
+
baseURL: provider.baseUrl ?? "https://api.anthropic.com",
|
|
49
|
+
apiKey: provider.apiKey,
|
|
50
|
+
});
|
|
51
|
+
return p(modelId);
|
|
52
|
+
}
|
|
53
|
+
default:
|
|
54
|
+
throw new Error(`Unsupported protocol: ${provider.protocol}`);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// ============================================================================
|
|
59
|
+
// Parse incoming requests
|
|
60
|
+
// ============================================================================
|
|
61
|
+
|
|
62
|
+
interface ParsedRequest {
|
|
63
|
+
model: string;
|
|
64
|
+
messages: Array<{ role: string; content: any }>;
|
|
65
|
+
system?: string;
|
|
66
|
+
maxTokens?: number;
|
|
67
|
+
temperature?: number;
|
|
68
|
+
topP?: number;
|
|
69
|
+
stream?: boolean;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
function parseOpenAIRequest(body: any): ParsedRequest {
|
|
73
|
+
return {
|
|
74
|
+
model: body.model,
|
|
75
|
+
messages: body.messages ?? [],
|
|
76
|
+
maxTokens: body.max_tokens ?? body.max_completion_tokens,
|
|
77
|
+
temperature: body.temperature,
|
|
78
|
+
topP: body.top_p,
|
|
79
|
+
stream: body.stream,
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
function parseAnthropicRequest(body: any): ParsedRequest {
|
|
84
|
+
return {
|
|
85
|
+
model: body.model,
|
|
86
|
+
messages: body.messages ?? [],
|
|
87
|
+
system:
|
|
88
|
+
typeof body.system === "string"
|
|
89
|
+
? body.system
|
|
90
|
+
: Array.isArray(body.system)
|
|
91
|
+
? body.system
|
|
92
|
+
.filter((b: any) => b.type === "text")
|
|
93
|
+
.map((b: any) => b.text)
|
|
94
|
+
.join("\n")
|
|
95
|
+
: undefined,
|
|
96
|
+
maxTokens: body.max_tokens,
|
|
97
|
+
temperature: body.temperature,
|
|
98
|
+
topP: body.top_p,
|
|
99
|
+
stream: body.stream,
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// ============================================================================
|
|
104
|
+
// Convert to Vercel AI SDK messages
|
|
105
|
+
// ============================================================================
|
|
106
|
+
|
|
107
|
+
function toAIMessages(parsed: ParsedRequest): any[] {
|
|
108
|
+
return parsed.messages.map((m: any) => {
|
|
109
|
+
if (Array.isArray(m.content)) {
|
|
110
|
+
const text = m.content
|
|
111
|
+
.filter((b: any) => b.type === "text")
|
|
112
|
+
.map((b: any) => b.text)
|
|
113
|
+
.join("");
|
|
114
|
+
return { role: m.role, content: text || "" };
|
|
115
|
+
}
|
|
116
|
+
return { role: m.role, content: m.content ?? "" };
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// ============================================================================
|
|
121
|
+
// Format responses
|
|
122
|
+
// ============================================================================
|
|
123
|
+
|
|
124
|
+
function formatOpenAIResponse(result: any, modelId: string): any {
|
|
125
|
+
return {
|
|
126
|
+
id: result.response?.id ?? `routerx-${Date.now()}`,
|
|
127
|
+
object: "chat.completion",
|
|
128
|
+
created: Math.floor(Date.now() / 1000),
|
|
129
|
+
model: result.response?.modelId ?? modelId,
|
|
130
|
+
choices: [
|
|
131
|
+
{
|
|
132
|
+
index: 0,
|
|
133
|
+
message: { role: "assistant", content: result.text ?? "" },
|
|
134
|
+
finish_reason:
|
|
135
|
+
result.finishReason === "length"
|
|
136
|
+
? "length"
|
|
137
|
+
: result.finishReason === "tool-calls"
|
|
138
|
+
? "tool_calls"
|
|
139
|
+
: "stop",
|
|
140
|
+
},
|
|
141
|
+
],
|
|
142
|
+
usage: {
|
|
143
|
+
prompt_tokens: result.usage?.promptTokens ?? 0,
|
|
144
|
+
completion_tokens: result.usage?.completionTokens ?? 0,
|
|
145
|
+
total_tokens: (result.usage?.promptTokens ?? 0) + (result.usage?.completionTokens ?? 0),
|
|
146
|
+
},
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
function formatAnthropicResponse(result: any, modelId: string): any {
|
|
151
|
+
return {
|
|
152
|
+
id: result.response?.id ?? `routerx-${Date.now()}`,
|
|
153
|
+
type: "message",
|
|
154
|
+
role: "assistant",
|
|
155
|
+
model: result.response?.modelId ?? modelId,
|
|
156
|
+
content: [{ type: "text", text: result.text ?? "" }],
|
|
157
|
+
stop_reason:
|
|
158
|
+
result.finishReason === "length"
|
|
159
|
+
? "max_tokens"
|
|
160
|
+
: result.finishReason === "tool-calls"
|
|
161
|
+
? "tool_use"
|
|
162
|
+
: "end_turn",
|
|
163
|
+
usage: {
|
|
164
|
+
input_tokens: result.usage?.promptTokens ?? 0,
|
|
165
|
+
output_tokens: result.usage?.completionTokens ?? 0,
|
|
166
|
+
},
|
|
167
|
+
};
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// ============================================================================
|
|
171
|
+
// Streaming formatters
|
|
172
|
+
// ============================================================================
|
|
173
|
+
|
|
174
|
+
function openAIStreamFormatter() {
|
|
175
|
+
return {
|
|
176
|
+
onChunk(chunk: any): string | null {
|
|
177
|
+
if (chunk.type === "text-delta" && chunk.text) {
|
|
178
|
+
return `data: ${JSON.stringify({
|
|
179
|
+
object: "chat.completion.chunk",
|
|
180
|
+
choices: [{ index: 0, delta: { content: chunk.text }, finish_reason: null }],
|
|
181
|
+
})}\n\n`;
|
|
182
|
+
}
|
|
183
|
+
return null;
|
|
184
|
+
},
|
|
185
|
+
onFinish(result: any): string {
|
|
186
|
+
let out = `data: ${JSON.stringify({
|
|
187
|
+
object: "chat.completion.chunk",
|
|
188
|
+
choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
|
|
189
|
+
usage: {
|
|
190
|
+
prompt_tokens: result.usage?.promptTokens ?? 0,
|
|
191
|
+
completion_tokens: result.usage?.completionTokens ?? 0,
|
|
192
|
+
total_tokens: (result.usage?.promptTokens ?? 0) + (result.usage?.completionTokens ?? 0),
|
|
193
|
+
},
|
|
194
|
+
})}\n\n`;
|
|
195
|
+
out += "data: [DONE]\n\n";
|
|
196
|
+
return out;
|
|
197
|
+
},
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
function anthropicStreamFormatter() {
|
|
202
|
+
let blockStarted = false;
|
|
203
|
+
return {
|
|
204
|
+
onStart(modelId: string): string {
|
|
205
|
+
return `event: message_start\ndata: ${JSON.stringify({
|
|
206
|
+
type: "message_start",
|
|
207
|
+
message: {
|
|
208
|
+
id: `routerx-${Date.now()}`,
|
|
209
|
+
type: "message",
|
|
210
|
+
role: "assistant",
|
|
211
|
+
model: modelId,
|
|
212
|
+
content: [],
|
|
213
|
+
usage: { input_tokens: 0, output_tokens: 0 },
|
|
214
|
+
},
|
|
215
|
+
})}\n\n`;
|
|
216
|
+
},
|
|
217
|
+
onChunk(chunk: any): string | null {
|
|
218
|
+
if (chunk.type === "text-delta" && chunk.text) {
|
|
219
|
+
let sse = "";
|
|
220
|
+
if (!blockStarted) {
|
|
221
|
+
blockStarted = true;
|
|
222
|
+
sse += `event: content_block_start\ndata: ${JSON.stringify({
|
|
223
|
+
type: "content_block_start",
|
|
224
|
+
index: 0,
|
|
225
|
+
content_block: { type: "text", text: "" },
|
|
226
|
+
})}\n\n`;
|
|
227
|
+
}
|
|
228
|
+
sse += `event: content_block_delta\ndata: ${JSON.stringify({
|
|
229
|
+
type: "content_block_delta",
|
|
230
|
+
index: 0,
|
|
231
|
+
delta: { type: "text_delta", text: chunk.text },
|
|
232
|
+
})}\n\n`;
|
|
233
|
+
return sse;
|
|
234
|
+
}
|
|
235
|
+
return null;
|
|
236
|
+
},
|
|
237
|
+
onFinish(result: any): string {
|
|
238
|
+
let sse = "";
|
|
239
|
+
sse += `event: content_block_stop\ndata: ${JSON.stringify({
|
|
240
|
+
type: "content_block_stop",
|
|
241
|
+
index: 0,
|
|
242
|
+
})}\n\n`;
|
|
243
|
+
sse += `event: message_delta\ndata: ${JSON.stringify({
|
|
244
|
+
type: "message_delta",
|
|
245
|
+
delta: { stop_reason: "end_turn" },
|
|
246
|
+
usage: { output_tokens: result.usage?.completionTokens ?? 0 },
|
|
247
|
+
})}\n\n`;
|
|
248
|
+
sse += `event: message_stop\ndata: ${JSON.stringify({ type: "message_stop" })}\n\n`;
|
|
249
|
+
return sse;
|
|
250
|
+
},
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// ============================================================================
|
|
255
|
+
// App Factory
|
|
256
|
+
// ============================================================================
|
|
257
|
+
|
|
258
|
+
export function createRouterX(config: RouterXConfig) {
|
|
259
|
+
const app = new Hono();
|
|
260
|
+
const router = new Router(config.router);
|
|
261
|
+
|
|
262
|
+
// Auth middleware
|
|
263
|
+
if (config.apiKey) {
|
|
264
|
+
app.use("*", async (c, next) => {
|
|
265
|
+
if (c.req.path === "/health") return next();
|
|
266
|
+
const auth = c.req.header("Authorization");
|
|
267
|
+
const apiKey = c.req.header("x-api-key");
|
|
268
|
+
const token = auth?.replace("Bearer ", "") ?? apiKey;
|
|
269
|
+
if (token !== config.apiKey) {
|
|
270
|
+
return c.json({ error: { message: "Invalid API key", type: "authentication_error" } }, 401);
|
|
271
|
+
}
|
|
272
|
+
return next();
|
|
273
|
+
});
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// OpenAI endpoint
|
|
277
|
+
app.post("/openai/v1/chat/completions", async (c) => {
|
|
278
|
+
return handleRequest(c, parseOpenAIRequest(await c.req.json()), "openai");
|
|
279
|
+
});
|
|
280
|
+
|
|
281
|
+
// Anthropic endpoint
|
|
282
|
+
app.post("/anthropic/v1/messages", async (c) => {
|
|
283
|
+
return handleRequest(c, parseAnthropicRequest(await c.req.json()), "anthropic");
|
|
284
|
+
});
|
|
285
|
+
|
|
286
|
+
// Model list
|
|
287
|
+
app.get("/v1/models", (c) => {
|
|
288
|
+
const models = router.listModels();
|
|
289
|
+
return c.json({
|
|
290
|
+
object: "list",
|
|
291
|
+
data: models.map((m) => ({ id: m.model, object: "model", owned_by: m.providerId })),
|
|
292
|
+
});
|
|
293
|
+
});
|
|
294
|
+
|
|
295
|
+
// Health
|
|
296
|
+
app.get("/health", (c) => c.json({ status: "ok" }));
|
|
297
|
+
|
|
298
|
+
// Core handler
|
|
299
|
+
async function handleRequest(c: any, parsed: ParsedRequest, downstream: "openai" | "anthropic") {
|
|
300
|
+
try {
|
|
301
|
+
const routeResult = router.route(parsed.model);
|
|
302
|
+
if (!routeResult) {
|
|
303
|
+
return c.json({ error: { message: `Model "${parsed.model}" not found` } }, 404);
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
const model = createModel(routeResult.provider, routeResult.model);
|
|
307
|
+
const messages = toAIMessages(parsed);
|
|
308
|
+
|
|
309
|
+
if (parsed.stream) {
|
|
310
|
+
const fmt =
|
|
311
|
+
downstream === "anthropic" ? anthropicStreamFormatter() : openAIStreamFormatter();
|
|
312
|
+
|
|
313
|
+
const result = streamText({
|
|
314
|
+
model,
|
|
315
|
+
messages,
|
|
316
|
+
system: parsed.system,
|
|
317
|
+
maxOutputTokens: parsed.maxTokens,
|
|
318
|
+
temperature: parsed.temperature,
|
|
319
|
+
topP: parsed.topP,
|
|
320
|
+
});
|
|
321
|
+
|
|
322
|
+
return stream(c, async (s) => {
|
|
323
|
+
try {
|
|
324
|
+
if ("onStart" in fmt) {
|
|
325
|
+
await s.write((fmt as any).onStart(routeResult.model));
|
|
326
|
+
}
|
|
327
|
+
for await (const chunk of result.fullStream) {
|
|
328
|
+
const formatted = fmt.onChunk(chunk);
|
|
329
|
+
if (formatted) await s.write(formatted);
|
|
330
|
+
}
|
|
331
|
+
const final = await result;
|
|
332
|
+
await s.write(fmt.onFinish(final));
|
|
333
|
+
} catch (err: any) {
|
|
334
|
+
await s.write(`data: ${JSON.stringify({ error: err.message })}\n\n`);
|
|
335
|
+
}
|
|
336
|
+
});
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// Non-streaming
|
|
340
|
+
const result = await generateText({
|
|
341
|
+
model,
|
|
342
|
+
messages,
|
|
343
|
+
system: parsed.system,
|
|
344
|
+
maxOutputTokens: parsed.maxTokens,
|
|
345
|
+
temperature: parsed.temperature,
|
|
346
|
+
topP: parsed.topP,
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
return c.json(
|
|
350
|
+
downstream === "anthropic"
|
|
351
|
+
? formatAnthropicResponse(result, routeResult.model)
|
|
352
|
+
: formatOpenAIResponse(result, routeResult.model)
|
|
353
|
+
);
|
|
354
|
+
} catch (err: any) {
|
|
355
|
+
return c.json({ error: { message: err.message ?? "Internal error" } }, 500);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
return app;
|
|
360
|
+
}
|
package/src/index.ts
ADDED
package/src/serve.ts
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Local development server
|
|
3
|
+
*
|
|
4
|
+
* Usage: bun run dev
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { serve } from "bun";
|
|
8
|
+
import { createRouterX } from "./app";
|
|
9
|
+
|
|
10
|
+
const ARK_API_KEY = process.env.ARK_API_KEY;
|
|
11
|
+
const ARK_BASE_URL = process.env.ARK_BASE_URL;
|
|
12
|
+
|
|
13
|
+
if (!ARK_API_KEY || !ARK_BASE_URL) {
|
|
14
|
+
console.error("Missing ARK_API_KEY or ARK_BASE_URL in environment");
|
|
15
|
+
process.exit(1);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
const app = createRouterX({
|
|
19
|
+
router: {
|
|
20
|
+
providers: [
|
|
21
|
+
{
|
|
22
|
+
id: "ark",
|
|
23
|
+
name: "Volcengine Ark",
|
|
24
|
+
protocol: "openai-compatible",
|
|
25
|
+
apiKey: ARK_API_KEY,
|
|
26
|
+
baseUrl: ARK_BASE_URL,
|
|
27
|
+
models: [
|
|
28
|
+
"deepseek-v3-2-251201",
|
|
29
|
+
"deepseek-v3-250324",
|
|
30
|
+
"deepseek-r1-250528",
|
|
31
|
+
"doubao-1-5-pro-32k-250115",
|
|
32
|
+
"doubao-1-5-lite-32k-250115",
|
|
33
|
+
"doubao-seed-2-0-pro-260215",
|
|
34
|
+
"doubao-seed-2-0-code-preview-260215",
|
|
35
|
+
"glm-4-7-251222",
|
|
36
|
+
"qwen3-32b-20250429",
|
|
37
|
+
],
|
|
38
|
+
priority: 1,
|
|
39
|
+
},
|
|
40
|
+
],
|
|
41
|
+
},
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
const PORT = parseInt(process.env.PORT ?? "3700", 10);
|
|
45
|
+
|
|
46
|
+
serve({
|
|
47
|
+
fetch: app.fetch,
|
|
48
|
+
port: PORT,
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
console.log(`RouterX running on http://localhost:${PORT}`);
|
|
52
|
+
console.log(`
|
|
53
|
+
Endpoints:
|
|
54
|
+
POST /openai/v1/chat/completions (OpenAI protocol)
|
|
55
|
+
POST /anthropic/v1/messages (Anthropic protocol)
|
|
56
|
+
GET /v1/models (list models)
|
|
57
|
+
GET /health (health check)
|
|
58
|
+
`);
|