openclaw-freerouter 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,117 @@
1
+ /**
2
+ * Tier → Model Selection
3
+ * Forked from ClawRouter (MIT License). No payment dependencies.
4
+ *
5
+ * Maps a classification tier to the best model from configured providers.
6
+ * Builds RoutingDecision metadata with cost estimates and savings.
7
+ */
8
+
9
+ import type { Tier, TierConfig, RoutingDecision } from "./types.js";
10
+
11
+ export type ModelPricing = {
12
+ inputPrice: number; // per 1M tokens
13
+ outputPrice: number; // per 1M tokens
14
+ };
15
+
16
+ /**
17
+ * Select the primary model for a tier and build the RoutingDecision.
18
+ */
19
+ export function selectModel(
20
+ tier: Tier,
21
+ confidence: number,
22
+ method: "rules" | "llm",
23
+ reasoning: string,
24
+ tierConfigs: Record<Tier, TierConfig>,
25
+ modelPricing: Map<string, ModelPricing>,
26
+ estimatedInputTokens: number,
27
+ maxOutputTokens: number,
28
+ ): RoutingDecision {
29
+ const tierConfig = tierConfigs[tier];
30
+ const model = tierConfig.primary;
31
+ const pricing = modelPricing.get(model);
32
+
33
+ const inputPrice = pricing?.inputPrice ?? 0;
34
+ const outputPrice = pricing?.outputPrice ?? 0;
35
+ const inputCost = (estimatedInputTokens / 1_000_000) * inputPrice;
36
+ const outputCost = (maxOutputTokens / 1_000_000) * outputPrice;
37
+ const costEstimate = inputCost + outputCost;
38
+
39
+ // Baseline: what the most expensive configured model would cost
40
+ const opusPricing = modelPricing.get("anthropic/claude-opus-4-6");
41
+ const opusInputPrice = opusPricing?.inputPrice ?? 15;
42
+ const opusOutputPrice = opusPricing?.outputPrice ?? 75;
43
+ const baselineInput = (estimatedInputTokens / 1_000_000) * opusInputPrice;
44
+ const baselineOutput = (maxOutputTokens / 1_000_000) * opusOutputPrice;
45
+ const baselineCost = baselineInput + baselineOutput;
46
+
47
+ const savings = baselineCost > 0 ? Math.max(0, (baselineCost - costEstimate) / baselineCost) : 0;
48
+
49
+ return {
50
+ model,
51
+ tier,
52
+ confidence,
53
+ method,
54
+ reasoning,
55
+ costEstimate,
56
+ baselineCost,
57
+ savings,
58
+ };
59
+ }
60
+
61
+ /**
62
+ * Get the ordered fallback chain for a tier: [primary, ...fallbacks].
63
+ */
64
+ export function getFallbackChain(tier: Tier, tierConfigs: Record<Tier, TierConfig>): string[] {
65
+ const config = tierConfigs[tier];
66
+ return [config.primary, ...config.fallback];
67
+ }
68
+
69
+ /**
70
+ * Calculate cost for a specific model.
71
+ */
72
+ export function calculateModelCost(
73
+ model: string,
74
+ modelPricing: Map<string, ModelPricing>,
75
+ estimatedInputTokens: number,
76
+ maxOutputTokens: number,
77
+ ): { costEstimate: number; baselineCost: number; savings: number } {
78
+ const pricing = modelPricing.get(model);
79
+
80
+ const inputPrice = pricing?.inputPrice ?? 0;
81
+ const outputPrice = pricing?.outputPrice ?? 0;
82
+ const inputCost = (estimatedInputTokens / 1_000_000) * inputPrice;
83
+ const outputCost = (maxOutputTokens / 1_000_000) * outputPrice;
84
+ const costEstimate = inputCost + outputCost;
85
+
86
+ const opusPricing = modelPricing.get("anthropic/claude-opus-4-6");
87
+ const opusInputPrice = opusPricing?.inputPrice ?? 15;
88
+ const opusOutputPrice = opusPricing?.outputPrice ?? 75;
89
+ const baselineInput = (estimatedInputTokens / 1_000_000) * opusInputPrice;
90
+ const baselineOutput = (maxOutputTokens / 1_000_000) * opusOutputPrice;
91
+ const baselineCost = baselineInput + baselineOutput;
92
+
93
+ const savings = baselineCost > 0 ? Math.max(0, (baselineCost - costEstimate) / baselineCost) : 0;
94
+
95
+ return { costEstimate, baselineCost, savings };
96
+ }
97
+
98
+ /**
99
+ * Get the fallback chain filtered by context length.
100
+ */
101
+ export function getFallbackChainFiltered(
102
+ tier: Tier,
103
+ tierConfigs: Record<Tier, TierConfig>,
104
+ estimatedTotalTokens: number,
105
+ getContextWindow: (modelId: string) => number | undefined,
106
+ ): string[] {
107
+ const fullChain = getFallbackChain(tier, tierConfigs);
108
+
109
+ const filtered = fullChain.filter((modelId) => {
110
+ const contextWindow = getContextWindow(modelId);
111
+ if (contextWindow === undefined) return true;
112
+ return contextWindow >= estimatedTotalTokens * 1.1;
113
+ });
114
+
115
+ if (filtered.length === 0) return fullChain;
116
+ return filtered;
117
+ }
@@ -0,0 +1,84 @@
1
+ /**
2
+ * Smart Router Types — Forked from ClawRouter (MIT License)
3
+ * Stripped of x402/BlockRun payment layer.
4
+ *
5
+ * Four classification tiers — REASONING is distinct from COMPLEX because
6
+ * reasoning tasks need different models (o3, deepseek-reasoner) than general
7
+ * complex tasks (gpt-4o, sonnet-4).
8
+ *
9
+ * Scoring uses weighted float dimensions with sigmoid confidence calibration.
10
+ */
11
+
12
+ export type Tier = "SIMPLE" | "MEDIUM" | "COMPLEX" | "REASONING";
13
+
14
+ export type ScoringResult = {
15
+ score: number; // weighted float (roughly [-0.3, 0.4])
16
+ tier: Tier | null; // null = ambiguous, needs fallback
17
+ confidence: number; // sigmoid-calibrated [0, 1]
18
+ signals: string[];
19
+ agenticScore?: number; // 0-1 agentic task score for auto-switching to agentic tiers
20
+ };
21
+
22
+ export type RoutingDecision = {
23
+ model: string;
24
+ tier: Tier;
25
+ confidence: number;
26
+ method: "rules" | "llm";
27
+ reasoning: string;
28
+ costEstimate: number;
29
+ baselineCost: number;
30
+ savings: number; // 0-1 percentage
31
+ };
32
+
33
+ export type TierConfig = {
34
+ primary: string;
35
+ fallback: string[];
36
+ };
37
+
38
+ export type ScoringConfig = {
39
+ tokenCountThresholds: { simple: number; complex: number };
40
+ codeKeywords: string[];
41
+ reasoningKeywords: string[];
42
+ simpleKeywords: string[];
43
+ technicalKeywords: string[];
44
+ creativeKeywords: string[];
45
+ imperativeVerbs: string[];
46
+ constraintIndicators: string[];
47
+ outputFormatKeywords: string[];
48
+ referenceKeywords: string[];
49
+ negationKeywords: string[];
50
+ domainSpecificKeywords: string[];
51
+ agenticTaskKeywords: string[];
52
+ dimensionWeights: Record<string, number>;
53
+ tierBoundaries: {
54
+ simpleMedium: number;
55
+ mediumComplex: number;
56
+ complexReasoning: number;
57
+ };
58
+ confidenceSteepness: number;
59
+ confidenceThreshold: number;
60
+ };
61
+
62
+ export type ClassifierConfig = {
63
+ llmModel: string;
64
+ llmMaxTokens: number;
65
+ llmTemperature: number;
66
+ promptTruncationChars: number;
67
+ cacheTtlMs: number;
68
+ };
69
+
70
+ export type OverridesConfig = {
71
+ maxTokensForceComplex: number;
72
+ structuredOutputMinTier: Tier;
73
+ ambiguousDefaultTier: Tier;
74
+ agenticMode?: boolean;
75
+ };
76
+
77
+ export type RoutingConfig = {
78
+ version: string;
79
+ classifier: ClassifierConfig;
80
+ scoring: ScoringConfig;
81
+ tiers: Record<Tier, TierConfig>;
82
+ agenticTiers?: Record<Tier, TierConfig>;
83
+ overrides: OverridesConfig;
84
+ };
package/src/server.ts ADDED
@@ -0,0 +1,381 @@
1
+ /**
2
+ * FreeRouter Proxy Server — OpenClaw Plugin Edition
3
+ *
4
+ * Exports startServer()/stopServer() for plugin lifecycle management
5
+ * instead of auto-starting on import.
6
+ */
7
+
8
+ import { createServer, type IncomingMessage, type ServerResponse, type Server } from "node:http";
9
+ import { route } from "./router/index.js";
10
+ import { getRoutingConfig } from "./router/config.js";
11
+ import { buildPricingMap } from "./models.js";
12
+ import { forwardRequest, TimeoutError, type ChatRequest } from "./provider.js";
13
+ import { reloadAuth } from "./auth.js";
14
+ import { loadConfig, loadConfigFromPlugin, getConfig, reloadConfig, getSanitizedConfig, getConfigPath } from "./config.js";
15
+ import { logger, setLogLevel } from "./logger.js";
16
+
17
+ // Build pricing map once at startup
18
+ let modelPricing = buildPricingMap();
19
+
20
+ // Stats
21
+ const stats = {
22
+ started: new Date().toISOString(),
23
+ requests: 0,
24
+ errors: 0,
25
+ timeouts: 0,
26
+ byTier: { SIMPLE: 0, MEDIUM: 0, COMPLEX: 0, REASONING: 0 } as Record<string, number>,
27
+ byModel: {} as Record<string, number>,
28
+ };
29
+
30
+ // Server instance
31
+ let _server: Server | null = null;
32
+
33
+ function readBody(req: IncomingMessage): Promise<string> {
34
+ return new Promise((resolve, reject) => {
35
+ const chunks: Buffer[] = [];
36
+ req.on("data", (chunk: Buffer) => chunks.push(chunk));
37
+ req.on("end", () => resolve(Buffer.concat(chunks).toString("utf-8")));
38
+ req.on("error", reject);
39
+ });
40
+ }
41
+
42
+ function sendError(res: ServerResponse, status: number, message: string, type = "server_error") {
43
+ res.writeHead(status, { "Content-Type": "application/json" });
44
+ res.end(JSON.stringify({ error: { message, type, code: status } }));
45
+ }
46
+
47
+ function extractPromptForClassification(messages: ChatRequest["messages"]): {
48
+ prompt: string;
49
+ systemPrompt: string | undefined;
50
+ } {
51
+ let systemPrompt: string | undefined;
52
+ const contextWindow = 3;
53
+ const conversationMsgs: Array<{ role: string; text: string }> = [];
54
+
55
+ for (const msg of messages) {
56
+ const text = typeof msg.content === "string"
57
+ ? msg.content
58
+ : (msg.content ?? []).filter(b => b.type === "text").map(b => b.text ?? "").join("\n");
59
+
60
+ if (msg.role === "system" || msg.role === "developer") {
61
+ systemPrompt = (systemPrompt ? systemPrompt + "\n" : "") + text;
62
+ } else {
63
+ conversationMsgs.push({ role: msg.role, text });
64
+ }
65
+ }
66
+
67
+ const recentMsgs = conversationMsgs.slice(-contextWindow);
68
+ const lastUserMsg = recentMsgs.filter(m => m.role === "user").pop()?.text ?? "";
69
+ const contextParts: string[] = [];
70
+ for (const msg of recentMsgs) {
71
+ if (msg.text !== lastUserMsg) contextParts.push(msg.text.slice(0, 500));
72
+ }
73
+
74
+ const prompt = contextParts.length > 0
75
+ ? contextParts.join("\n") + "\n" + lastUserMsg
76
+ : lastUserMsg;
77
+
78
+ return { prompt, systemPrompt };
79
+ }
80
+
81
+ function detectModeOverride(prompt: string): { tier: string; cleanedPrompt: string } | null {
82
+ const modeMap: Record<string, string> = {
83
+ simple: "SIMPLE", basic: "SIMPLE", cheap: "SIMPLE",
84
+ medium: "MEDIUM", balanced: "MEDIUM",
85
+ complex: "COMPLEX", advanced: "COMPLEX",
86
+ max: "REASONING", reasoning: "REASONING", think: "REASONING", deep: "REASONING",
87
+ };
88
+
89
+ const slashMatch = prompt.match(/^\/([a-z]+)\s+/i);
90
+ if (slashMatch) {
91
+ const mode = slashMatch[1].toLowerCase();
92
+ if (modeMap[mode]) return { tier: modeMap[mode], cleanedPrompt: prompt.slice(slashMatch[0].length).trim() };
93
+ }
94
+
95
+ const prefixMatch = prompt.match(/^([a-z]+)\s+mode[:\s,]+/i);
96
+ if (prefixMatch) {
97
+ const mode = prefixMatch[1].toLowerCase();
98
+ if (modeMap[mode]) return { tier: modeMap[mode], cleanedPrompt: prompt.slice(prefixMatch[0].length).trim() };
99
+ }
100
+
101
+ const bracketMatch = prompt.match(/^\[([a-z]+)\]\s*/i);
102
+ if (bracketMatch) {
103
+ const mode = bracketMatch[1].toLowerCase();
104
+ if (modeMap[mode]) return { tier: modeMap[mode], cleanedPrompt: prompt.slice(bracketMatch[0].length).trim() };
105
+ }
106
+
107
+ return null;
108
+ }
109
+
110
+ async function handleChatCompletions(req: IncomingMessage, res: ServerResponse) {
111
+ const bodyStr = await readBody(req);
112
+ let chatReq: ChatRequest;
113
+
114
+ try {
115
+ chatReq = JSON.parse(bodyStr);
116
+ } catch {
117
+ return sendError(res, 400, "Invalid JSON body");
118
+ }
119
+
120
+ if (!chatReq.model) return sendError(res, 400, "model field is required");
121
+ if (!chatReq.messages || !Array.isArray(chatReq.messages) || chatReq.messages.length === 0) {
122
+ return sendError(res, 400, "messages array is required");
123
+ }
124
+
125
+ const stream = chatReq.stream ?? false;
126
+ const maxTokens = chatReq.max_tokens ?? 4096;
127
+ const { prompt, systemPrompt } = extractPromptForClassification(chatReq.messages);
128
+ if (!prompt) return sendError(res, 400, "No user message found");
129
+
130
+ const requestedModel = chatReq.model ?? "auto";
131
+ let routedModel: string;
132
+ let tier: string;
133
+ let reasoning: string;
134
+
135
+ if (requestedModel === "auto" || requestedModel === "freerouter/auto" || requestedModel === "clawrouter/auto" || requestedModel === "blockrun/auto") {
136
+ const modeOverride = detectModeOverride(prompt);
137
+
138
+ if (modeOverride) {
139
+ const routingCfg = getRoutingConfig();
140
+ const tierConfig = routingCfg.tiers[modeOverride.tier as keyof typeof routingCfg.tiers];
141
+ routedModel = tierConfig?.primary ?? "anthropic/claude-opus-4-6";
142
+ tier = modeOverride.tier;
143
+ reasoning = `user-mode: ${modeOverride.tier.toLowerCase()}`;
144
+ logger.info(`[${stats.requests + 1}] Mode override: tier=${tier} model=${routedModel} | ${reasoning}`);
145
+ } else {
146
+ const decision = route(prompt, systemPrompt, maxTokens, {
147
+ config: getRoutingConfig(),
148
+ modelPricing,
149
+ });
150
+ routedModel = decision.model;
151
+ tier = decision.tier;
152
+ reasoning = decision.reasoning;
153
+ logger.info(`[${stats.requests + 1}] Classified: tier=${tier} model=${routedModel} confidence=${decision.confidence.toFixed(2)} | ${reasoning}`);
154
+ }
155
+ } else {
156
+ routedModel = requestedModel;
157
+ tier = "EXPLICIT";
158
+ reasoning = `explicit model: ${requestedModel}`;
159
+ logger.info(`[${stats.requests + 1}] Passthrough: model=${routedModel}`);
160
+ }
161
+
162
+ stats.requests++;
163
+ stats.byTier[tier] = (stats.byTier[tier] ?? 0) + 1;
164
+ stats.byModel[routedModel] = (stats.byModel[routedModel] ?? 0) + 1;
165
+
166
+ res.setHeader("X-FreeRouter-Model", routedModel);
167
+ res.setHeader("X-FreeRouter-Tier", tier);
168
+ res.setHeader("X-FreeRouter-Reasoning", reasoning.slice(0, 200));
169
+
170
+ const modelsToTry: string[] = [routedModel];
171
+ if (tier !== "EXPLICIT") {
172
+ const routingCfg = getRoutingConfig();
173
+ const tierConfig = routingCfg.tiers[tier as keyof typeof routingCfg.tiers];
174
+ if (tierConfig?.fallback) {
175
+ for (const fb of tierConfig.fallback) {
176
+ if (fb !== routedModel) modelsToTry.push(fb);
177
+ }
178
+ }
179
+ }
180
+
181
+ let lastError: string = "";
182
+ for (const modelToTry of modelsToTry) {
183
+ try {
184
+ if (modelToTry !== routedModel) {
185
+ logger.info(`[${stats.requests}] Falling back to ${modelToTry}`);
186
+ res.setHeader("X-FreeRouter-Model", modelToTry);
187
+ }
188
+ await forwardRequest(chatReq, modelToTry, tier, res, stream);
189
+ return;
190
+ } catch (err) {
191
+ lastError = err instanceof Error ? err.message : String(err);
192
+ if (err instanceof TimeoutError) {
193
+ stats.timeouts++;
194
+ logger.error(`⏱ TIMEOUT (${modelToTry}): ${lastError}`);
195
+ } else {
196
+ logger.error(`Forward error (${modelToTry}): ${lastError}`);
197
+ }
198
+ if (res.headersSent) break;
199
+ }
200
+ }
201
+
202
+ stats.errors++;
203
+ if (!res.headersSent) {
204
+ sendError(res, 502, `Backend error: ${lastError}`, "upstream_error");
205
+ } else if (!res.writableEnded) {
206
+ res.write(`data: ${JSON.stringify({ error: { message: lastError } })}\n\n`);
207
+ res.write("data: [DONE]\n\n");
208
+ res.end();
209
+ }
210
+ }
211
+
212
+ function handleListModels(_req: IncomingMessage, res: ServerResponse) {
213
+ const models = [
214
+ { id: "auto", object: "model", created: Math.floor(Date.now() / 1000), owned_by: "freerouter" },
215
+ { id: "anthropic/claude-opus-4-6", object: "model", created: Math.floor(Date.now() / 1000), owned_by: "anthropic" },
216
+ { id: "anthropic/claude-sonnet-4-5", object: "model", created: Math.floor(Date.now() / 1000), owned_by: "anthropic" },
217
+ { id: "anthropic/claude-haiku-4-5", object: "model", created: Math.floor(Date.now() / 1000), owned_by: "anthropic" },
218
+ { id: "kimi-coding/kimi-for-coding", object: "model", created: Math.floor(Date.now() / 1000), owned_by: "kimi-coding" },
219
+ ];
220
+ res.writeHead(200, { "Content-Type": "application/json" });
221
+ res.end(JSON.stringify({ object: "list", data: models }));
222
+ }
223
+
224
+ function handleHealth(_req: IncomingMessage, res: ServerResponse) {
225
+ res.writeHead(200, { "Content-Type": "application/json" });
226
+ res.end(JSON.stringify({ status: "ok", version: "1.3.0", uptime: process.uptime(), stats }));
227
+ }
228
+
229
+ function handleStats(_req: IncomingMessage, res: ServerResponse) {
230
+ res.writeHead(200, { "Content-Type": "application/json" });
231
+ res.end(JSON.stringify(stats, null, 2));
232
+ }
233
+
234
+ function handleConfig(_req: IncomingMessage, res: ServerResponse) {
235
+ res.writeHead(200, { "Content-Type": "application/json" });
236
+ res.end(JSON.stringify({ configPath: getConfigPath(), config: getSanitizedConfig() }, null, 2));
237
+ }
238
+
239
+ function handleReloadConfig(_req: IncomingMessage, res: ServerResponse) {
240
+ reloadConfig();
241
+ reloadAuth();
242
+ modelPricing = buildPricingMap();
243
+ const cfg = getConfig();
244
+ res.writeHead(200, { "Content-Type": "application/json" });
245
+ res.end(JSON.stringify({ status: "reloaded", providers: Object.keys(cfg.providers), tiers: Object.keys(cfg.tiers) }));
246
+ }
247
+
248
+ function handleReload(_req: IncomingMessage, res: ServerResponse) {
249
+ reloadConfig();
250
+ reloadAuth();
251
+ modelPricing = buildPricingMap();
252
+ res.writeHead(200, { "Content-Type": "application/json" });
253
+ res.end(JSON.stringify({ status: "reloaded" }));
254
+ }
255
+
256
+ async function handleRequest(req: IncomingMessage, res: ServerResponse) {
257
+ const method = req.method ?? "GET";
258
+ const url = req.url ?? "/";
259
+
260
+ res.setHeader("Access-Control-Allow-Origin", "*");
261
+ res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS");
262
+ res.setHeader("Access-Control-Allow-Headers", "Content-Type, Authorization");
263
+
264
+ if (method === "OPTIONS") { res.writeHead(204); res.end(); return; }
265
+
266
+ try {
267
+ if (method === "POST" && (url === "/v1/chat/completions" || url === "/chat/completions")) {
268
+ await handleChatCompletions(req, res);
269
+ } else if (method === "GET" && (url === "/v1/models" || url === "/models")) {
270
+ handleListModels(req, res);
271
+ } else if (method === "GET" && url === "/health") {
272
+ handleHealth(req, res);
273
+ } else if (method === "GET" && url === "/stats") {
274
+ handleStats(req, res);
275
+ } else if (method === "POST" && url === "/reload") {
276
+ handleReload(req, res);
277
+ } else if (method === "GET" && url === "/config") {
278
+ handleConfig(req, res);
279
+ } else if (method === "POST" && url === "/reload-config") {
280
+ handleReloadConfig(req, res);
281
+ } else {
282
+ sendError(res, 404, `Not found: ${method} ${url}`, "not_found");
283
+ }
284
+ } catch (err) {
285
+ const msg = err instanceof Error ? err.message : String(err);
286
+ logger.error(`Unhandled error: ${msg}`);
287
+ if (!res.headersSent) sendError(res, 500, msg);
288
+ }
289
+ }
290
+
291
+ // ═══ Plugin Lifecycle ═══
292
+
293
+ export type ServerOptions = {
294
+ port?: number;
295
+ host?: string;
296
+ pluginConfig?: Record<string, unknown>;
297
+ debug?: boolean;
298
+ };
299
+
300
+ /**
301
+ * Start the FreeRouter proxy server.
302
+ * Returns a promise that resolves when the server is listening.
303
+ */
304
+ export function startServer(options: ServerOptions = {}): Promise<Server> {
305
+ if (_server) {
306
+ return Promise.resolve(_server);
307
+ }
308
+
309
+ // Load config
310
+ if (options.pluginConfig) {
311
+ loadConfigFromPlugin(options.pluginConfig);
312
+ } else {
313
+ loadConfig();
314
+ }
315
+
316
+ if (options.debug) setLogLevel("debug");
317
+
318
+ const cfg = getConfig();
319
+ const port = options.port ?? cfg.port;
320
+ const host = options.host ?? cfg.host ?? "127.0.0.1";
321
+
322
+ modelPricing = buildPricingMap();
323
+
324
+ return new Promise((resolve, reject) => {
325
+ const server = createServer(handleRequest);
326
+
327
+ server.on("error", (err) => {
328
+ logger.error(`Server error: ${err.message}`);
329
+ reject(err);
330
+ });
331
+
332
+ server.listen(port, host, () => {
333
+ _server = server;
334
+ logger.info(`🚀 FreeRouter proxy listening on http://${host}:${port} (config: ${getConfigPath() ?? "built-in defaults"})`);
335
+ logger.info(` POST /v1/chat/completions — route & forward`);
336
+ logger.info(` GET /v1/models — list models`);
337
+ logger.info(` GET /health — health check`);
338
+ logger.info(` GET /stats — request statistics`);
339
+ logger.info(` POST /reload — reload auth keys`);
340
+ logger.info(` GET /config — show config (sanitized)`);
341
+ logger.info(` POST /reload-config — reload config + auth`);
342
+ resolve(server);
343
+ });
344
+ });
345
+ }
346
+
347
+ /**
348
+ * Stop the FreeRouter proxy server.
349
+ */
350
+ export function stopServer(): Promise<void> {
351
+ return new Promise((resolve) => {
352
+ if (!_server) { resolve(); return; }
353
+ logger.info("Shutting down FreeRouter proxy...");
354
+ _server.close(() => {
355
+ _server = null;
356
+ resolve();
357
+ });
358
+ });
359
+ }
360
+
361
+ /**
362
+ * Get the running server instance (or null).
363
+ */
364
+ export function getServer(): Server | null {
365
+ return _server;
366
+ }
367
+
368
+ // ═══ Standalone mode (when run directly) ═══
369
+ const isDirectRun = process.argv[1]?.includes("server") && !process.argv.includes("--no-auto");
370
+ if (isDirectRun) {
371
+ const debug = process.argv.includes("--debug");
372
+ const port = parseInt(process.env.FREEROUTER_PORT ?? "18800", 10);
373
+ const host = process.env.FREEROUTER_HOST ?? "127.0.0.1";
374
+ startServer({ port, host, debug }).catch((err) => {
375
+ logger.error(`Failed to start: ${err.message}`);
376
+ process.exit(1);
377
+ });
378
+
379
+ process.on("SIGINT", () => { stopServer().then(() => process.exit(0)); });
380
+ process.on("SIGTERM", () => { stopServer().then(() => process.exit(0)); });
381
+ }