@tspappsen/elamax 1.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,459 @@
1
+ import { approveAll } from "@github/copilot-sdk";
2
+ import { createTools } from "./tools.js";
3
+ import { createWatchdogTools } from "./watchdog-tools.js";
4
+ import { getOrchestratorSystemMessage } from "./system-message.js";
5
+ import { config, DEFAULT_MODEL } from "../config.js";
6
+ import { loadMcpConfig } from "./mcp-config.js";
7
+ import { getSkillDirectories } from "./skills.js";
8
+ import { resetClient } from "./client.js";
9
+ import { logConversation, getState, setState, deleteState, getMemorySummary, getRecentConversation } from "../store/db.js";
10
+ import { IS_WATCHDOG, INSTRUCTIONS_DIR, SESSIONS_DIR } from "../paths.js";
11
+ import { resolveModel } from "./router.js";
12
+ import { watchInstructions, seedDefaultInstructions } from "./workspace-instructions.js";
13
+ const MAX_RETRIES = 3;
14
+ const RECONNECT_DELAYS_MS = [1_000, 3_000, 10_000];
15
+ const HEALTH_CHECK_INTERVAL_MS = 30_000;
16
+ const ORCHESTRATOR_SESSION_KEY = "orchestrator_session_id";
17
+ let logMessage = () => { };
18
+ export function setMessageLogger(fn) {
19
+ logMessage = fn;
20
+ }
21
+ let proactiveNotifyFn;
22
+ export function setProactiveNotify(fn) {
23
+ proactiveNotifyFn = fn;
24
+ }
25
+ let copilotClient;
26
+ const workers = new Map();
27
+ let healthCheckTimer;
28
+ // Router state — tracks model across the session
29
+ let currentSessionModel;
30
+ let recentTiers = [];
31
+ let lastRouteResult;
32
+ export function getLastRouteResult() {
33
+ return lastRouteResult;
34
+ }
35
+ // Persistent orchestrator session
36
+ let orchestratorSession;
37
+ // Coalesces concurrent ensureOrchestratorSession calls
38
+ let sessionCreatePromise;
39
+ let sessionGeneration = 0;
40
+ const messageQueue = [];
41
+ let processing = false;
42
+ let currentCallback;
43
+ /** The channel currently being processed — tools use this to tag new workers. */
44
+ let currentSourceChannel;
45
+ let currentSourceChannelId;
46
+ let currentSourceThreadId;
47
+ /** Get the channel that originated the message currently being processed. */
48
+ export function getCurrentSourceChannel() {
49
+ return currentSourceChannel;
50
+ }
51
+ export function getCurrentSourceChannelId() {
52
+ return currentSourceChannelId;
53
+ }
54
+ export function getCurrentSourceThreadId() {
55
+ return currentSourceThreadId;
56
+ }
57
+ function getSessionConfig() {
58
+ if (IS_WATCHDOG) {
59
+ const tools = createWatchdogTools();
60
+ return { tools, mcpServers: {}, skillDirectories: [] };
61
+ }
62
+ const tools = createTools({
63
+ client: copilotClient,
64
+ workers,
65
+ onWorkerComplete: feedBackgroundResult,
66
+ });
67
+ const mcpServers = loadMcpConfig();
68
+ const skillDirectories = getSkillDirectories();
69
+ return { tools, mcpServers, skillDirectories };
70
+ }
71
+ /** Feed a background worker result into the orchestrator as a new turn. */
72
+ export function feedBackgroundResult(workerName, result, isFailure) {
73
+ const worker = workers.get(workerName);
74
+ const channel = worker?.originChannel;
75
+ const discordChannelId = worker?.originChannelId;
76
+ const discordThreadId = worker?.originThreadId;
77
+ const failureSuffix = isFailure
78
+ ? `\n\n---\nThis worker failed. If the failure reveals a reusable lesson — a tool quirk, environment gotcha, API behavior, setup requirement, or pattern worth remembering — extract it and call learn_skill to save it as a skill (slug: lesson-<topic>). If the failure is task-specific with no transferable insight, skip it.`
79
+ : "";
80
+ const prompt = `[Background task completed] Worker '${workerName}' ${isFailure ? "FAILED" : "finished"}:\n\n${result}${failureSuffix}`;
81
+ sendToOrchestrator(prompt, { type: "background" }, (_text, done) => {
82
+ if (done && proactiveNotifyFn) {
83
+ proactiveNotifyFn(_text, channel, discordChannelId, discordThreadId);
84
+ }
85
+ });
86
+ }
87
+ function sleep(ms) {
88
+ return new Promise((resolve) => setTimeout(resolve, ms));
89
+ }
90
+ export function invalidateSession() {
91
+ sessionGeneration += 1;
92
+ orchestratorSession = undefined;
93
+ sessionCreatePromise = undefined;
94
+ currentSessionModel = undefined;
95
+ lastRouteResult = undefined;
96
+ deleteState(ORCHESTRATOR_SESSION_KEY);
97
+ }
98
+ /** Ensure the SDK client is connected, resetting if necessary. Coalesces concurrent resets. */
99
+ let resetPromise;
100
+ async function ensureClient() {
101
+ if (copilotClient && copilotClient.getState() === "connected") {
102
+ return copilotClient;
103
+ }
104
+ if (!resetPromise) {
105
+ console.log(`[max] Client not connected (state: ${copilotClient?.getState() ?? "null"}), resetting…`);
106
+ resetPromise = resetClient().then((c) => {
107
+ console.log(`[max] Client reset successful, state: ${c.getState()}`);
108
+ copilotClient = c;
109
+ return c;
110
+ }).finally(() => { resetPromise = undefined; });
111
+ }
112
+ return resetPromise;
113
+ }
114
+ /** Start periodic health check that proactively reconnects the client. */
115
+ function startHealthCheck() {
116
+ if (healthCheckTimer)
117
+ return;
118
+ healthCheckTimer = setInterval(async () => {
119
+ if (!copilotClient)
120
+ return;
121
+ try {
122
+ const state = copilotClient.getState();
123
+ if (state !== "connected") {
124
+ console.log(`[max] Health check: client state is '${state}', resetting…`);
125
+ await ensureClient();
126
+ // Session may need recovery after client reset
127
+ orchestratorSession = undefined;
128
+ currentSessionModel = undefined;
129
+ }
130
+ }
131
+ catch (err) {
132
+ console.error(`[max] Health check error:`, err instanceof Error ? err.message : err);
133
+ }
134
+ }, HEALTH_CHECK_INTERVAL_MS);
135
+ }
136
+ /** Create or resume the persistent orchestrator session. */
137
+ async function ensureOrchestratorSession() {
138
+ if (orchestratorSession)
139
+ return orchestratorSession;
140
+ // Coalesce concurrent callers — wait for an in-flight creation
141
+ if (sessionCreatePromise)
142
+ return sessionCreatePromise;
143
+ const generation = sessionGeneration;
144
+ sessionCreatePromise = createOrResumeSession();
145
+ try {
146
+ const session = await sessionCreatePromise;
147
+ if (generation !== sessionGeneration) {
148
+ session.destroy().catch(() => { });
149
+ throw new Error("Orchestrator session was invalidated during creation");
150
+ }
151
+ orchestratorSession = session;
152
+ return session;
153
+ }
154
+ finally {
155
+ if (generation === sessionGeneration) {
156
+ sessionCreatePromise = undefined;
157
+ }
158
+ }
159
+ }
160
+ /** Internal: actually create or resume a session (not concurrency-safe — use ensureOrchestratorSession). */
161
+ async function createOrResumeSession() {
162
+ const client = await ensureClient();
163
+ const { tools, mcpServers, skillDirectories } = getSessionConfig();
164
+ const memorySummary = IS_WATCHDOG ? undefined : getMemorySummary();
165
+ const infiniteSessions = {
166
+ enabled: true,
167
+ backgroundCompactionThreshold: 0.80,
168
+ bufferExhaustionThreshold: 0.95,
169
+ };
170
+ // Try to resume a previous session
171
+ const savedSessionId = getState(ORCHESTRATOR_SESSION_KEY);
172
+ if (savedSessionId) {
173
+ try {
174
+ console.log(`[max] Resuming orchestrator session ${savedSessionId.slice(0, 8)}…`);
175
+ const session = await client.resumeSession(savedSessionId, {
176
+ model: config.copilotModel,
177
+ configDir: SESSIONS_DIR,
178
+ streaming: true,
179
+ systemMessage: {
180
+ content: getOrchestratorSystemMessage(memorySummary || undefined, { selfEditEnabled: config.selfEditEnabled }),
181
+ },
182
+ tools,
183
+ mcpServers,
184
+ skillDirectories,
185
+ onPermissionRequest: approveAll,
186
+ infiniteSessions,
187
+ });
188
+ console.log(`[max] Resumed orchestrator session successfully`);
189
+ currentSessionModel = config.copilotModel;
190
+ return session;
191
+ }
192
+ catch (err) {
193
+ console.log(`[max] Could not resume session: ${err instanceof Error ? err.message : err}. Creating new.`);
194
+ deleteState(ORCHESTRATOR_SESSION_KEY);
195
+ }
196
+ }
197
+ // Create a fresh session
198
+ console.log(`[max] Creating new persistent orchestrator session`);
199
+ const session = await client.createSession({
200
+ model: config.copilotModel,
201
+ configDir: SESSIONS_DIR,
202
+ streaming: true,
203
+ systemMessage: {
204
+ content: getOrchestratorSystemMessage(memorySummary || undefined, { selfEditEnabled: config.selfEditEnabled }),
205
+ },
206
+ tools,
207
+ mcpServers,
208
+ skillDirectories,
209
+ onPermissionRequest: approveAll,
210
+ infiniteSessions,
211
+ });
212
+ // Persist the session ID for future restarts
213
+ setState(ORCHESTRATOR_SESSION_KEY, session.sessionId);
214
+ console.log(`[max] Created orchestrator session ${session.sessionId.slice(0, 8)}…`);
215
+ // Recover conversation context if available (session was lost, not first run)
216
+ const recentHistory = getRecentConversation(10);
217
+ if (recentHistory) {
218
+ console.log(`[max] Injecting recent conversation context into new session`);
219
+ try {
220
+ await session.sendAndWait({
221
+ prompt: `[System: Session recovered] Your previous session was lost. Here's the recent conversation for context — do NOT respond to these messages, just absorb the context silently:\n\n${recentHistory}\n\n(End of recovery context. Wait for the next real message.)`,
222
+ }, 60_000);
223
+ }
224
+ catch (err) {
225
+ console.log(`[max] Context recovery injection failed (non-fatal): ${err instanceof Error ? err.message : err}`);
226
+ }
227
+ }
228
+ currentSessionModel = config.copilotModel;
229
+ return session;
230
+ }
231
+ export async function initOrchestrator(client) {
232
+ copilotClient = client;
233
+ const { mcpServers, skillDirectories } = getSessionConfig();
234
+ // Validate configured model against available models
235
+ try {
236
+ const models = await client.listModels();
237
+ const configured = config.copilotModel;
238
+ const isAvailable = models.some((m) => m.id === configured);
239
+ if (!isAvailable) {
240
+ console.log(`[max] ⚠️ Configured model '${configured}' is not available. Falling back to '${DEFAULT_MODEL}'.`);
241
+ config.copilotModel = DEFAULT_MODEL;
242
+ }
243
+ }
244
+ catch (err) {
245
+ console.log(`[max] Could not validate model (will use '${config.copilotModel}' as-is): ${err instanceof Error ? err.message : err}`);
246
+ }
247
+ console.log(`[max] Loading ${Object.keys(mcpServers).length} MCP server(s): ${Object.keys(mcpServers).join(", ") || "(none)"}`);
248
+ console.log(`[max] Skill directories: ${skillDirectories.join(", ") || "(none)"}`);
249
+ console.log(`[max] Persistent session mode — conversation history maintained by SDK`);
250
+ startHealthCheck();
251
+ // Seed default instruction templates if missing, then watch for changes
252
+ if (!IS_WATCHDOG) {
253
+ seedDefaultInstructions();
254
+ watchInstructions(() => {
255
+ console.log(`[max] Workspace instructions changed — rebuilding session`);
256
+ invalidateSession();
257
+ });
258
+ console.log(`[max] Watching ${INSTRUCTIONS_DIR} for workspace instructions`);
259
+ }
260
+ // Eagerly create/resume the orchestrator session
261
+ try {
262
+ await ensureOrchestratorSession();
263
+ }
264
+ catch (err) {
265
+ console.error(`[max] Failed to create initial session (will retry on first message):`, err instanceof Error ? err.message : err);
266
+ }
267
+ }
268
+ /** Send a prompt on the persistent session, return the response. */
269
+ async function executeOnSession(prompt, callback, attachments) {
270
+ const session = await ensureOrchestratorSession();
271
+ currentCallback = callback;
272
+ let accumulated = "";
273
+ let toolCallExecuted = false;
274
+ const unsubToolDone = session.on("tool.execution_complete", () => {
275
+ toolCallExecuted = true;
276
+ });
277
+ const unsubDelta = session.on("assistant.message_delta", (event) => {
278
+ // After a tool call completes, ensure a line break separates the text blocks
279
+ // so they don't visually run together in the TUI.
280
+ if (toolCallExecuted && accumulated.length > 0 && !accumulated.endsWith("\n")) {
281
+ accumulated += "\n";
282
+ }
283
+ toolCallExecuted = false;
284
+ accumulated += event.data.deltaContent;
285
+ callback(accumulated, false);
286
+ });
287
+ try {
288
+ const result = await session.sendAndWait({ prompt, attachments }, 300_000);
289
+ const finalContent = result?.data?.content || accumulated || "(No response)";
290
+ return finalContent;
291
+ }
292
+ catch (err) {
293
+ // If the session is broken, invalidate it so it's recreated on next attempt
294
+ const msg = err instanceof Error ? err.message : String(err);
295
+ if (/closed|destroy|disposed|invalid|expired|not found/i.test(msg)) {
296
+ console.log(`[max] Session appears dead, will recreate: ${msg}`);
297
+ invalidateSession();
298
+ }
299
+ throw err;
300
+ }
301
+ finally {
302
+ unsubDelta();
303
+ unsubToolDone();
304
+ currentCallback = undefined;
305
+ }
306
+ }
307
+ /** Process the message queue one at a time. */
308
+ async function processQueue() {
309
+ if (processing) {
310
+ if (messageQueue.length > 0) {
311
+ console.log(`[max] Message queued (${messageQueue.length} waiting — orchestrator is busy)`);
312
+ }
313
+ return;
314
+ }
315
+ processing = true;
316
+ while (messageQueue.length > 0) {
317
+ const item = messageQueue.shift();
318
+ currentSourceChannel = item.sourceChannel;
319
+ currentSourceChannelId = item.sourceChannelId;
320
+ currentSourceThreadId = item.sourceThreadId;
321
+ try {
322
+ if (item.attachments && item.attachments.length > 0) {
323
+ // Preserve the currently selected model for attachment turns so image analysis
324
+ // isn't auto-routed to a non-vision model mid-request.
325
+ lastRouteResult = {
326
+ model: currentSessionModel || config.copilotModel,
327
+ tier: null,
328
+ switched: false,
329
+ routerMode: "manual",
330
+ };
331
+ }
332
+ else {
333
+ const routeResult = await resolveModel(item.prompt, currentSessionModel || config.copilotModel, recentTiers, copilotClient);
334
+ if (routeResult.switched) {
335
+ console.log(`[max] Auto: switching to ${routeResult.model} (${routeResult.overrideName || routeResult.tier})`);
336
+ config.copilotModel = routeResult.model;
337
+ invalidateSession();
338
+ }
339
+ if (routeResult.tier) {
340
+ recentTiers.push(routeResult.tier);
341
+ if (recentTiers.length > 5)
342
+ recentTiers = recentTiers.slice(-5);
343
+ }
344
+ lastRouteResult = routeResult;
345
+ }
346
+ const result = await executeOnSession(item.prompt, item.callback, item.attachments);
347
+ item.resolve(result);
348
+ }
349
+ catch (err) {
350
+ item.reject(err);
351
+ }
352
+ currentSourceChannel = undefined;
353
+ currentSourceChannelId = undefined;
354
+ currentSourceThreadId = undefined;
355
+ }
356
+ processing = false;
357
+ }
358
+ function isRecoverableError(err) {
359
+ const msg = err instanceof Error ? err.message : String(err);
360
+ return /timeout|disconnect|connection|EPIPE|ECONNRESET|ECONNREFUSED|socket|closed|ENOENT|spawn|not found|expired|stale/i.test(msg);
361
+ }
362
+ export async function sendToOrchestrator(prompt, source, callback, options) {
363
+ const sourceLabel = source.type === "telegram" ? "telegram" :
364
+ source.type === "discord" ? "discord" :
365
+ source.type === "tui" ? "tui" : "background";
366
+ logMessage("in", sourceLabel, prompt);
367
+ // Tag the prompt with its source channel
368
+ const taggedPrompt = source.type === "background"
369
+ ? prompt
370
+ : `[via ${sourceLabel}] ${prompt}`;
371
+ // Log role: background events are "system", user messages are "user"
372
+ const logRole = source.type === "background" ? "system" : "user";
373
+ // Determine the source channel for worker origin tracking
374
+ const sourceChannel = source.type === "telegram" ? "telegram" :
375
+ source.type === "discord" ? "discord" :
376
+ source.type === "tui" ? "tui" : undefined;
377
+ // Enqueue and process
378
+ return (async () => {
379
+ for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
380
+ try {
381
+ const finalContent = await new Promise((resolve, reject) => {
382
+ messageQueue.push({
383
+ prompt: taggedPrompt,
384
+ attachments: options?.attachments,
385
+ callback,
386
+ sourceChannel,
387
+ sourceChannelId: source.type === "discord" ? source.channelId : undefined,
388
+ sourceThreadId: source.type === "discord" ? source.threadId : undefined,
389
+ resolve,
390
+ reject,
391
+ });
392
+ processQueue();
393
+ });
394
+ // Deliver response to user FIRST, then log best-effort
395
+ callback(finalContent, true);
396
+ try {
397
+ logMessage("out", sourceLabel, finalContent);
398
+ }
399
+ catch { /* best-effort */ }
400
+ // Log both sides of the conversation after delivery
401
+ try {
402
+ logConversation(logRole, prompt, sourceLabel);
403
+ }
404
+ catch { /* best-effort */ }
405
+ try {
406
+ logConversation("assistant", finalContent, sourceLabel);
407
+ }
408
+ catch { /* best-effort */ }
409
+ return;
410
+ }
411
+ catch (err) {
412
+ const msg = err instanceof Error ? err.message : String(err);
413
+ // Don't retry cancelled messages
414
+ if (/cancelled|abort/i.test(msg)) {
415
+ return;
416
+ }
417
+ if (isRecoverableError(err) && attempt < MAX_RETRIES) {
418
+ const delay = RECONNECT_DELAYS_MS[Math.min(attempt, RECONNECT_DELAYS_MS.length - 1)];
419
+ console.error(`[max] Recoverable error: ${msg}. Retry ${attempt + 1}/${MAX_RETRIES} after ${delay}ms…`);
420
+ await sleep(delay);
421
+ // Reset client before retry in case the connection is stale
422
+ try {
423
+ await ensureClient();
424
+ }
425
+ catch { /* will fail again on next attempt */ }
426
+ continue;
427
+ }
428
+ console.error(`[max] Error processing message: ${msg}`);
429
+ callback(`Error: ${msg}`, true);
430
+ return;
431
+ }
432
+ }
433
+ })();
434
+ }
435
+ /** Cancel the in-flight message and drain the queue. */
436
+ export async function cancelCurrentMessage() {
437
+ // Drain any queued messages
438
+ const drained = messageQueue.length;
439
+ while (messageQueue.length > 0) {
440
+ const item = messageQueue.shift();
441
+ item.reject(new Error("Cancelled"));
442
+ }
443
+ // Abort the active session request
444
+ if (orchestratorSession && currentCallback) {
445
+ try {
446
+ await orchestratorSession.abort();
447
+ console.log(`[max] Aborted in-flight request`);
448
+ return true;
449
+ }
450
+ catch (err) {
451
+ console.error(`[max] Abort failed:`, err instanceof Error ? err.message : err);
452
+ }
453
+ }
454
+ return drained > 0;
455
+ }
456
+ export function getWorkers() {
457
+ return workers;
458
+ }
459
+ //# sourceMappingURL=orchestrator.js.map
@@ -0,0 +1,147 @@
1
+ import { getState, setState } from "../store/db.js";
2
+ import { classifyWithLLM } from "./classifier.js";
3
+ import { parseJSON } from "../utils/parseJSON.js";
4
+ // ---------------------------------------------------------------------------
5
+ // Default configuration
6
+ // ---------------------------------------------------------------------------
7
+ const DEFAULT_CONFIG = {
8
+ enabled: false,
9
+ tierModels: {
10
+ fast: "gpt-4.1",
11
+ standard: "claude-sonnet-4.6",
12
+ premium: "claude-opus-4.6",
13
+ },
14
+ overrides: [
15
+ {
16
+ name: "design",
17
+ keywords: [
18
+ "design", "ui", "ux", "css", "layout", "styling", "visual",
19
+ "mockup", "wireframe", "frontend design", "tailwind", "responsive",
20
+ ],
21
+ model: "claude-opus-4.6",
22
+ },
23
+ ],
24
+ cooldownMessages: 2,
25
+ };
26
+ // ---------------------------------------------------------------------------
27
+ // Module-level state
28
+ // ---------------------------------------------------------------------------
29
+ let messagesSinceSwitch = Infinity;
30
+ // Short replies that should inherit the previous turn's tier
31
+ const FOLLOW_UP_PATTERNS = [
32
+ "yes", "no", "do it", "go ahead", "sure", "sounds good", "looks good",
33
+ "perfect", "+1", "please", "yep", "yup", "nope", "nah", "ok", "okay",
34
+ "got it", "cool", "nice", "great", "alright", "right",
35
+ ];
36
+ // ---------------------------------------------------------------------------
37
+ // Helpers
38
+ // ---------------------------------------------------------------------------
39
+ /** Strip channel prefixes and trim whitespace. */
40
+ function sanitize(prompt) {
41
+ return prompt
42
+ .replace(/^\[via telegram\]\s*/i, "")
43
+ .replace(/^\[via discord\]\s*/i, "")
44
+ .replace(/^\[via tui\]\s*/i, "")
45
+ .trim();
46
+ }
47
+ /** Word-boundary match that avoids partial-word hits (e.g. "ui" ≠ "fruit"). */
48
+ function wordMatch(text, keyword) {
49
+ const escaped = keyword.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
50
+ return new RegExp(`\\b${escaped}\\b`, "i").test(text);
51
+ }
52
+ // ---------------------------------------------------------------------------
53
+ // Config management
54
+ // ---------------------------------------------------------------------------
55
+ export function getRouterConfig() {
56
+ const stored = getState("router_config");
57
+ if (stored) {
58
+ try {
59
+ return { ...DEFAULT_CONFIG, ...(parseJSON(stored) ?? {}) };
60
+ }
61
+ catch {
62
+ return { ...DEFAULT_CONFIG };
63
+ }
64
+ }
65
+ return { ...DEFAULT_CONFIG };
66
+ }
67
+ export function updateRouterConfig(partial) {
68
+ const current = getRouterConfig();
69
+ const merged = {
70
+ ...current,
71
+ ...partial,
72
+ tierModels: {
73
+ ...current.tierModels,
74
+ ...(partial.tierModels ?? {}),
75
+ },
76
+ overrides: partial.overrides ?? current.overrides,
77
+ };
78
+ setState("router_config", JSON.stringify(merged));
79
+ return merged;
80
+ }
81
+ // ---------------------------------------------------------------------------
82
+ // Classification
83
+ // ---------------------------------------------------------------------------
84
+ /**
85
+ * Classify a message using GPT-4.1. Falls back to "standard" if the LLM
86
+ * is unavailable. Background tasks and follow-ups are handled deterministically.
87
+ */
88
+ async function classifyMessage(prompt, recentTiers, client) {
89
+ const text = sanitize(prompt);
90
+ const lower = text.toLowerCase();
91
+ // Background tasks → always standard
92
+ if (lower.startsWith("[background task completed]"))
93
+ return "standard";
94
+ // Short follow-ups inherit the previous tier
95
+ if (text.length < 20 && recentTiers.length > 0) {
96
+ const isFollowUp = FOLLOW_UP_PATTERNS.some((p) => lower === p || lower === p + ".");
97
+ if (isFollowUp)
98
+ return recentTiers[0];
99
+ }
100
+ // LLM classification
101
+ if (client) {
102
+ const tier = await classifyWithLLM(client, text);
103
+ if (tier) {
104
+ console.log(`[max] Classifier: ${tier}`);
105
+ return tier;
106
+ }
107
+ }
108
+ // Fallback — standard is always safe
109
+ console.log(`[max] Classifier (fallback): standard`);
110
+ return "standard";
111
+ }
112
+ // ---------------------------------------------------------------------------
113
+ // Main entry point
114
+ // ---------------------------------------------------------------------------
115
+ export async function resolveModel(prompt, currentModel, recentTiers, client) {
116
+ const config = getRouterConfig();
117
+ // Router disabled → manual mode
118
+ if (!config.enabled) {
119
+ messagesSinceSwitch = Infinity;
120
+ return { model: currentModel, tier: null, switched: false, routerMode: "manual" };
121
+ }
122
+ const text = sanitize(prompt);
123
+ // 1. Check overrides first — they bypass cooldown
124
+ for (const rule of config.overrides) {
125
+ if (rule.keywords.some((kw) => wordMatch(text, kw))) {
126
+ const switched = rule.model !== currentModel;
127
+ if (switched)
128
+ messagesSinceSwitch = 0;
129
+ return { model: rule.model, tier: null, overrideName: rule.name, switched, routerMode: "auto" };
130
+ }
131
+ }
132
+ // 2. Classify the message
133
+ const tier = await classifyMessage(prompt, recentTiers, client);
134
+ const targetModel = config.tierModels[tier];
135
+ const wouldSwitch = targetModel !== currentModel;
136
+ // 3. Cooldown — prevent rapid switching
137
+ if (wouldSwitch && messagesSinceSwitch < config.cooldownMessages) {
138
+ messagesSinceSwitch++;
139
+ return { model: currentModel, tier, switched: false, routerMode: "auto" };
140
+ }
141
+ if (wouldSwitch)
142
+ messagesSinceSwitch = 0;
143
+ else
144
+ messagesSinceSwitch++;
145
+ return { model: targetModel, tier, switched: wouldSwitch, routerMode: "auto" };
146
+ }
147
+ //# sourceMappingURL=router.js.map