@houtini/lm 2.10.0 → 2.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +86 -10
- package/dist/index.js +524 -87
- package/dist/index.js.map +1 -1
- package/dist/model-cache.d.ts +54 -0
- package/dist/model-cache.js +205 -11
- package/dist/model-cache.js.map +1 -1
- package/package.json +2 -1
- package/server.json +2 -2
package/dist/index.js
CHANGED
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
9
9
|
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
10
10
|
import { CallToolRequestSchema, ListToolsRequestSchema, ListResourcesRequestSchema, ReadResourceRequestSchema, } from '@modelcontextprotocol/sdk/types.js';
|
|
11
|
-
import { profileModelsAtStartup, getCachedProfile, toModelProfile as cachedToProfile, getHFEnrichmentLine, getPromptHints, getThinkingSupport, } from './model-cache.js';
|
|
11
|
+
import { profileModelsAtStartup, getCachedProfile, toModelProfile as cachedToProfile, getHFEnrichmentLine, getPromptHints, getThinkingSupport, recordPerformance, getAllPerformance, getLifetimeTotals, } from './model-cache.js';
|
|
12
12
|
import { readFile } from 'node:fs/promises';
|
|
13
13
|
import { isAbsolute, basename } from 'node:path';
|
|
14
14
|
const LM_BASE_URL = process.env.LM_STUDIO_URL || 'http://localhost:1234';
|
|
@@ -19,7 +19,9 @@ const DEFAULT_TEMPERATURE = 0.3;
|
|
|
19
19
|
const CONNECT_TIMEOUT_MS = 5000;
|
|
20
20
|
const INFERENCE_CONNECT_TIMEOUT_MS = 30_000; // generous connect timeout for inference
|
|
21
21
|
const SOFT_TIMEOUT_MS = 300_000; // 5 min — progress notifications reset MCP client timeout, so this is a safety net not the primary limit
|
|
22
|
-
const READ_CHUNK_TIMEOUT_MS = 30_000; // max wait for a single SSE chunk
|
|
22
|
+
const READ_CHUNK_TIMEOUT_MS = 30_000; // max wait for a single SSE chunk mid-stream
|
|
23
|
+
const PREFILL_TIMEOUT_MS = 180_000; // max wait for the FIRST chunk — prompt prefill on slow hardware with big inputs can legitimately take 1-2 min
|
|
24
|
+
const PREFILL_KEEPALIVE_MS = 10_000; // fire a progress notification every N ms while waiting for prefill to finish
|
|
23
25
|
const FALLBACK_CONTEXT_LENGTH = parseInt(process.env.LM_CONTEXT_WINDOW || '100000', 10);
|
|
24
26
|
// ── Session-level token accounting ───────────────────────────────────
|
|
25
27
|
// Tracks cumulative tokens offloaded to the local LLM across all calls
|
|
@@ -32,17 +34,65 @@ const session = {
|
|
|
32
34
|
/** Per-model performance tracking for routing insights */
|
|
33
35
|
modelStats: new Map(),
|
|
34
36
|
};
|
|
37
|
+
// Lifetime mirror — kept in sync with the SQLite `model_performance` table
|
|
38
|
+
// so the footer/discover path stays synchronous. Hydrated once at startup
|
|
39
|
+
// from `getAllPerformance()`, then updated in-memory alongside every DB
|
|
40
|
+
// write in `recordUsage`. Also updated after the async DB write completes
|
|
41
|
+
// so counters can only ever run a tick behind, never ahead.
|
|
42
|
+
const lifetime = {
|
|
43
|
+
totalCalls: 0,
|
|
44
|
+
totalTokens: 0,
|
|
45
|
+
modelsUsed: 0,
|
|
46
|
+
firstSeenAt: null,
|
|
47
|
+
/** Per-model lifetime stats — same shape as session.modelStats for easy formatting. */
|
|
48
|
+
modelStats: new Map(),
|
|
49
|
+
};
|
|
50
|
+
async function hydrateLifetimeFromDb() {
|
|
51
|
+
try {
|
|
52
|
+
const totals = await getLifetimeTotals();
|
|
53
|
+
lifetime.totalCalls = totals.totalCalls;
|
|
54
|
+
lifetime.totalTokens = totals.totalTokens;
|
|
55
|
+
lifetime.modelsUsed = totals.modelsUsed;
|
|
56
|
+
lifetime.firstSeenAt = totals.firstSeenAt;
|
|
57
|
+
const rows = await getAllPerformance();
|
|
58
|
+
lifetime.modelStats.clear();
|
|
59
|
+
for (const r of rows) {
|
|
60
|
+
lifetime.modelStats.set(r.modelId, {
|
|
61
|
+
calls: r.totalCalls,
|
|
62
|
+
ttftCalls: r.ttftCalls,
|
|
63
|
+
perfCalls: r.perfCalls,
|
|
64
|
+
totalTtftMs: r.totalTtftMs,
|
|
65
|
+
totalTokPerSec: r.totalTokPerSec,
|
|
66
|
+
totalPromptTokens: r.totalPromptTokens,
|
|
67
|
+
firstSeenAt: r.firstSeenAt,
|
|
68
|
+
lastUsedAt: r.lastUsedAt,
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
catch (err) {
|
|
73
|
+
process.stderr.write(`[houtini-lm] Lifetime hydration failed (stats will build from this session): ${err}\n`);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
35
76
|
function recordUsage(resp) {
|
|
36
77
|
session.calls++;
|
|
78
|
+
const promptTokens = resp.usage?.prompt_tokens ?? 0;
|
|
79
|
+
let completionTokens = resp.usage?.completion_tokens ?? 0;
|
|
80
|
+
const reasoningTokens = resp.usage?.completion_tokens_details?.reasoning_tokens ?? 0;
|
|
37
81
|
if (resp.usage) {
|
|
38
|
-
session.promptTokens +=
|
|
39
|
-
session.completionTokens +=
|
|
82
|
+
session.promptTokens += promptTokens;
|
|
83
|
+
session.completionTokens += completionTokens;
|
|
40
84
|
}
|
|
41
85
|
else if (resp.content.length > 0) {
|
|
42
86
|
// Estimate when usage is missing (truncated responses)
|
|
43
|
-
|
|
87
|
+
const est = Math.ceil(resp.content.length / 4);
|
|
88
|
+
completionTokens = est;
|
|
89
|
+
session.completionTokens += est;
|
|
44
90
|
}
|
|
45
|
-
//
|
|
91
|
+
// Tok/s used by both session and lifetime stats
|
|
92
|
+
const tokPerSec = resp.usage && resp.generationMs > 50
|
|
93
|
+
? (resp.usage.completion_tokens / (resp.generationMs / 1000))
|
|
94
|
+
: 0;
|
|
95
|
+
// Session per-model (unchanged behaviour)
|
|
46
96
|
if (resp.model) {
|
|
47
97
|
const existing = session.modelStats.get(resp.model) || { calls: 0, ttftCalls: 0, perfCalls: 0, totalTtftMs: 0, totalTokPerSec: 0 };
|
|
48
98
|
existing.calls++;
|
|
@@ -50,22 +100,66 @@ function recordUsage(resp) {
|
|
|
50
100
|
existing.totalTtftMs += resp.ttftMs;
|
|
51
101
|
existing.ttftCalls++;
|
|
52
102
|
}
|
|
53
|
-
const tokPerSec = resp.usage && resp.generationMs > 50
|
|
54
|
-
? (resp.usage.completion_tokens / (resp.generationMs / 1000))
|
|
55
|
-
: 0;
|
|
56
103
|
if (tokPerSec > 0) {
|
|
57
104
|
existing.perfCalls++;
|
|
58
105
|
existing.totalTokPerSec += tokPerSec;
|
|
59
106
|
}
|
|
60
107
|
session.modelStats.set(resp.model, existing);
|
|
61
108
|
}
|
|
109
|
+
// Lifetime mirror + SQLite write — fire-and-forget so a DB hiccup can't
|
|
110
|
+
// stall a tool response. The in-memory mirror is updated synchronously so
|
|
111
|
+
// the footer and discover output reflect this call immediately.
|
|
112
|
+
if (resp.model && (promptTokens > 0 || completionTokens > 0)) {
|
|
113
|
+
const now = Date.now();
|
|
114
|
+
const wasFirstEver = !lifetime.modelStats.has(resp.model);
|
|
115
|
+
const lExisting = lifetime.modelStats.get(resp.model) || {
|
|
116
|
+
calls: 0, ttftCalls: 0, perfCalls: 0, totalTtftMs: 0, totalTokPerSec: 0, totalPromptTokens: 0,
|
|
117
|
+
firstSeenAt: now, lastUsedAt: now,
|
|
118
|
+
};
|
|
119
|
+
lExisting.calls++;
|
|
120
|
+
if (resp.ttftMs) {
|
|
121
|
+
lExisting.totalTtftMs += resp.ttftMs;
|
|
122
|
+
lExisting.ttftCalls++;
|
|
123
|
+
}
|
|
124
|
+
if (tokPerSec > 0) {
|
|
125
|
+
lExisting.perfCalls++;
|
|
126
|
+
lExisting.totalTokPerSec += tokPerSec;
|
|
127
|
+
}
|
|
128
|
+
lExisting.totalPromptTokens += promptTokens;
|
|
129
|
+
lExisting.lastUsedAt = now;
|
|
130
|
+
lifetime.modelStats.set(resp.model, lExisting);
|
|
131
|
+
lifetime.totalCalls++;
|
|
132
|
+
lifetime.totalTokens += promptTokens + completionTokens;
|
|
133
|
+
if (wasFirstEver) {
|
|
134
|
+
lifetime.modelsUsed++;
|
|
135
|
+
if (lifetime.firstSeenAt === null)
|
|
136
|
+
lifetime.firstSeenAt = now;
|
|
137
|
+
}
|
|
138
|
+
recordPerformance(resp.model, {
|
|
139
|
+
ttftMs: resp.ttftMs,
|
|
140
|
+
tokPerSec: tokPerSec > 0 ? tokPerSec : undefined,
|
|
141
|
+
promptTokens,
|
|
142
|
+
completionTokens,
|
|
143
|
+
reasoningTokens,
|
|
144
|
+
}).catch((err) => {
|
|
145
|
+
process.stderr.write(`[houtini-lm] Performance write failed (continuing): ${err}\n`);
|
|
146
|
+
});
|
|
147
|
+
}
|
|
62
148
|
}
|
|
63
149
|
function sessionSummary() {
|
|
64
150
|
const total = session.promptTokens + session.completionTokens;
|
|
65
|
-
if (session.calls === 0)
|
|
151
|
+
if (session.calls === 0 && lifetime.totalCalls === 0)
|
|
66
152
|
return '';
|
|
67
|
-
const callWord =
|
|
68
|
-
|
|
153
|
+
const callWord = (n) => (n === 1 ? 'call' : 'calls');
|
|
154
|
+
const sessionPart = session.calls > 0
|
|
155
|
+
? `this session: ${total.toLocaleString()} tokens / ${session.calls} ${callWord(session.calls)}`
|
|
156
|
+
: 'this session: 0 tokens';
|
|
157
|
+
// Lifetime numbers only show once there's something in the DB — avoids a
|
|
158
|
+
// confusing "lifetime: 0" on a truly fresh install.
|
|
159
|
+
if (lifetime.totalCalls > 0) {
|
|
160
|
+
return `💰 Claude quota saved — ${sessionPart} · lifetime: ${lifetime.totalTokens.toLocaleString()} tokens / ${lifetime.totalCalls} ${callWord(lifetime.totalCalls)}`;
|
|
161
|
+
}
|
|
162
|
+
return `💰 Claude quota saved ${sessionPart}`;
|
|
69
163
|
}
|
|
70
164
|
/**
|
|
71
165
|
* Return true when this response is the first one with measurable perf stats
|
|
@@ -381,6 +475,10 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
381
475
|
messages,
|
|
382
476
|
temperature: options.temperature ?? DEFAULT_TEMPERATURE,
|
|
383
477
|
max_tokens: effectiveMaxTokens,
|
|
478
|
+
// Send max_completion_tokens alongside max_tokens for OpenAI reasoning-model
|
|
479
|
+
// compatibility (OpenAI spec distinguishes total generation cap from visible
|
|
480
|
+
// output cap). Backends that don't understand it ignore unknown fields.
|
|
481
|
+
max_completion_tokens: effectiveMaxTokens,
|
|
384
482
|
stream: true,
|
|
385
483
|
stream_options: { include_usage: true },
|
|
386
484
|
};
|
|
@@ -391,23 +489,36 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
391
489
|
body.response_format = options.responseFormat;
|
|
392
490
|
}
|
|
393
491
|
// Handle thinking/reasoning models.
|
|
394
|
-
// Some models (Gemma 4, Qwen3, DeepSeek) have extended
|
|
395
|
-
// part of the max_tokens budget for invisible reasoning
|
|
396
|
-
//
|
|
397
|
-
//
|
|
398
|
-
//
|
|
492
|
+
// Some models (Gemma 4, Qwen3, DeepSeek R1, Nemotron, gpt-oss) have extended
|
|
493
|
+
// thinking that consumes part of the max_tokens budget for invisible reasoning
|
|
494
|
+
// before producing content. Strategy:
|
|
495
|
+
// 1. reasoning_effort=<family-specific value> to minimise reasoning
|
|
496
|
+
// 2. enable_thinking:false — Qwen3 vendor param (ignored elsewhere)
|
|
497
|
+
// 3. inflate max_tokens 4× — safety net when both flags are ignored
|
|
498
|
+
// (e.g. Gemma 4 hardcodes enable_thinking=true in its Jinja template)
|
|
499
|
+
//
|
|
500
|
+
// IMPORTANT: reasoning_effort values are NOT standard. OpenAI/gpt-oss use
|
|
501
|
+
// 'low'|'medium'|'high'; Ollama adds 'none'; LM Studio's Nemotron adapter
|
|
502
|
+
// only accepts 'on'|'off'. Sending 'low' to Nemotron causes LM Studio to
|
|
503
|
+
// silently fall back to 'on' — maximising reasoning, the OPPOSITE of intent.
|
|
504
|
+
// Hence the family-specific mapping below. When uncertain, we omit the
|
|
505
|
+
// field entirely rather than risk a bad-value fallback.
|
|
399
506
|
const modelId = (options.model || LM_MODEL || '').toString();
|
|
400
507
|
if (modelId) {
|
|
401
508
|
const thinking = await getThinkingSupport(modelId);
|
|
402
509
|
if (thinking?.supportsThinkingToggle) {
|
|
403
510
|
body.enable_thinking = false;
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
511
|
+
const reasoningValue = getReasoningEffortValue(modelId);
|
|
512
|
+
if (reasoningValue !== null) {
|
|
513
|
+
body.reasoning_effort = reasoningValue;
|
|
514
|
+
}
|
|
515
|
+
// Inflation uses effectiveMaxTokens (the context-aware value), not
|
|
516
|
+
// DEFAULT_MAX_TOKENS — otherwise big-context models get sized down.
|
|
517
|
+
const beforeInflation = effectiveMaxTokens;
|
|
518
|
+
const inflated = Math.max(beforeInflation * 4, beforeInflation + 2000);
|
|
519
|
+
body.max_tokens = inflated;
|
|
520
|
+
body.max_completion_tokens = inflated;
|
|
521
|
+
process.stderr.write(`[houtini-lm] Thinking model ${modelId}: reasoning_effort=${reasoningValue ?? '(omitted)'}, enable_thinking=false, max_tokens inflated ${beforeInflation} → ${inflated}\n`);
|
|
411
522
|
}
|
|
412
523
|
}
|
|
413
524
|
const startTime = Date.now();
|
|
@@ -422,13 +533,39 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
422
533
|
const reader = res.body.getReader();
|
|
423
534
|
const decoder = new TextDecoder();
|
|
424
535
|
let content = '';
|
|
425
|
-
let
|
|
536
|
+
let reasoning = '';
|
|
537
|
+
let progressSeq = 0;
|
|
426
538
|
let model = '';
|
|
427
539
|
let usage;
|
|
428
540
|
let finishReason = '';
|
|
429
541
|
let truncated = false;
|
|
542
|
+
let prefillStall = false;
|
|
430
543
|
let buffer = '';
|
|
431
544
|
let ttftMs;
|
|
545
|
+
let firstChunkReceived = false;
|
|
546
|
+
// Prefill keep-alive — /v1/chat/completions gives no SSE events during
|
|
547
|
+
// prompt processing, so the MCP client clock ticks uninterrupted on a slow
|
|
548
|
+
// backend with a big input. Fire a progress notification every 10s until
|
|
549
|
+
// the first chunk arrives to keep the client from timing out at 60s.
|
|
550
|
+
const sendProgress = (message) => {
|
|
551
|
+
if (options.progressToken === undefined)
|
|
552
|
+
return;
|
|
553
|
+
progressSeq++;
|
|
554
|
+
server.notification({
|
|
555
|
+
method: 'notifications/progress',
|
|
556
|
+
params: {
|
|
557
|
+
progressToken: options.progressToken,
|
|
558
|
+
progress: progressSeq,
|
|
559
|
+
message,
|
|
560
|
+
},
|
|
561
|
+
}).catch(() => { });
|
|
562
|
+
};
|
|
563
|
+
const keepAliveTimer = setInterval(() => {
|
|
564
|
+
if (firstChunkReceived)
|
|
565
|
+
return;
|
|
566
|
+
const waitedMs = Date.now() - startTime;
|
|
567
|
+
sendProgress(`Waiting for model... (${(waitedMs / 1000).toFixed(0)}s, still in prefill)`);
|
|
568
|
+
}, PREFILL_KEEPALIVE_MS);
|
|
432
569
|
try {
|
|
433
570
|
while (true) {
|
|
434
571
|
// Check soft timeout before each read
|
|
@@ -438,17 +575,24 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
438
575
|
process.stderr.write(`[houtini-lm] Soft timeout at ${elapsed}ms, returning ${content.length} chars of partial content\n`);
|
|
439
576
|
break;
|
|
440
577
|
}
|
|
441
|
-
//
|
|
578
|
+
// Split prefill vs mid-stream timeouts. Prefill on slow hardware with
|
|
579
|
+
// a 7k-token input can legitimately take 1-2 min; mid-stream stalls
|
|
580
|
+
// should surface much faster. Track firstChunkReceived to switch.
|
|
442
581
|
const remaining = SOFT_TIMEOUT_MS - elapsed;
|
|
443
|
-
const
|
|
582
|
+
const perChunkCeiling = firstChunkReceived ? READ_CHUNK_TIMEOUT_MS : PREFILL_TIMEOUT_MS;
|
|
583
|
+
const chunkTimeout = Math.min(perChunkCeiling, remaining);
|
|
444
584
|
const result = await timedRead(reader, chunkTimeout);
|
|
445
585
|
if (result === 'timeout') {
|
|
446
586
|
truncated = true;
|
|
447
|
-
|
|
587
|
+
prefillStall = !firstChunkReceived;
|
|
588
|
+
process.stderr.write(`[houtini-lm] ${prefillStall ? 'Prefill' : 'Mid-stream'} timeout, returning ${content.length} chars of partial content\n`);
|
|
448
589
|
break;
|
|
449
590
|
}
|
|
450
591
|
if (result.done)
|
|
451
592
|
break;
|
|
593
|
+
if (!firstChunkReceived) {
|
|
594
|
+
firstChunkReceived = true;
|
|
595
|
+
}
|
|
452
596
|
buffer += decoder.decode(result.value, { stream: true });
|
|
453
597
|
// Parse SSE lines
|
|
454
598
|
const lines = buffer.split('\n');
|
|
@@ -464,41 +608,20 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
464
608
|
if (json.model)
|
|
465
609
|
model = json.model;
|
|
466
610
|
const delta = json.choices?.[0]?.delta;
|
|
467
|
-
//
|
|
468
|
-
//
|
|
469
|
-
//
|
|
470
|
-
//
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
method: 'notifications/progress',
|
|
476
|
-
params: {
|
|
477
|
-
progressToken: options.progressToken,
|
|
478
|
-
progress: chunkCount,
|
|
479
|
-
message: `Thinking... (${chunkCount} chunks)`,
|
|
480
|
-
},
|
|
481
|
-
}).catch(() => { });
|
|
482
|
-
}
|
|
611
|
+
// Reasoning channel. LM Studio (with "Separate reasoning_content"
|
|
612
|
+
// dev setting), DeepSeek R1, Ollama OpenAI-compat, Nemotron etc.
|
|
613
|
+
// stream reasoning via delta.reasoning_content — we MUST capture it
|
|
614
|
+
// so the safety net below can return something when the model
|
|
615
|
+
// burns its entire budget before emitting a single content token.
|
|
616
|
+
if (typeof delta?.reasoning_content === 'string' && delta.reasoning_content.length > 0) {
|
|
617
|
+
reasoning += delta.reasoning_content;
|
|
618
|
+
sendProgress(`Thinking... (${reasoning.length} chars of reasoning)`);
|
|
483
619
|
}
|
|
484
|
-
if (delta?.content) {
|
|
620
|
+
if (typeof delta?.content === 'string' && delta.content.length > 0) {
|
|
485
621
|
if (ttftMs === undefined)
|
|
486
622
|
ttftMs = Date.now() - startTime;
|
|
487
623
|
content += delta.content;
|
|
488
|
-
|
|
489
|
-
// Send progress notification to reset MCP client timeout.
|
|
490
|
-
// Each notification resets the 60s clock, giving slow models
|
|
491
|
-
// unlimited time as long as they're actively generating.
|
|
492
|
-
if (options.progressToken !== undefined) {
|
|
493
|
-
server.notification({
|
|
494
|
-
method: 'notifications/progress',
|
|
495
|
-
params: {
|
|
496
|
-
progressToken: options.progressToken,
|
|
497
|
-
progress: chunkCount,
|
|
498
|
-
message: `Streaming... ${content.length} chars`,
|
|
499
|
-
},
|
|
500
|
-
}).catch(() => { });
|
|
501
|
-
}
|
|
624
|
+
sendProgress(`Streaming... ${content.length} chars`);
|
|
502
625
|
}
|
|
503
626
|
const reason = json.choices?.[0]?.finish_reason;
|
|
504
627
|
if (reason)
|
|
@@ -522,7 +645,10 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
522
645
|
if (json.model)
|
|
523
646
|
model = json.model;
|
|
524
647
|
const delta = json.choices?.[0]?.delta;
|
|
525
|
-
if (delta?.
|
|
648
|
+
if (typeof delta?.reasoning_content === 'string' && delta.reasoning_content.length > 0) {
|
|
649
|
+
reasoning += delta.reasoning_content;
|
|
650
|
+
}
|
|
651
|
+
if (typeof delta?.content === 'string' && delta.content.length > 0) {
|
|
526
652
|
if (ttftMs === undefined)
|
|
527
653
|
ttftMs = Date.now() - startTime;
|
|
528
654
|
content += delta.content;
|
|
@@ -541,6 +667,7 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
541
667
|
}
|
|
542
668
|
}
|
|
543
669
|
finally {
|
|
670
|
+
clearInterval(keepAliveTimer);
|
|
544
671
|
// Best-effort cancel with a short timeout — cancel() can hang if the upstream
|
|
545
672
|
// connection is wedged, so we race it against a 500ms timer. This frees the
|
|
546
673
|
// underlying socket sooner on abrupt client disconnects without blocking the
|
|
@@ -559,45 +686,98 @@ async function chatCompletionStreamingInner(messages, options = {}) {
|
|
|
559
686
|
}
|
|
560
687
|
const generationMs = Date.now() - startTime;
|
|
561
688
|
// Strip <think>...</think> reasoning blocks from models that always emit them
|
|
562
|
-
// (e.g. GLM Flash
|
|
563
|
-
// Handle both closed
|
|
564
|
-
// or grammar-constrained output forced content before the closing tag).
|
|
689
|
+
// inline on the content channel (e.g. GLM Flash). Claude doesn't need the
|
|
690
|
+
// model's internal reasoning. Handle both closed and unclosed blocks.
|
|
565
691
|
let cleanContent = content.replace(/<think>[\s\S]*?<\/think>\s*/g, ''); // closed blocks
|
|
566
692
|
cleanContent = cleanContent.replace(/^<think>\s*/, ''); // orphaned opening tag
|
|
567
693
|
cleanContent = cleanContent.trim();
|
|
568
|
-
// Safety
|
|
569
|
-
//
|
|
570
|
-
//
|
|
571
|
-
//
|
|
572
|
-
//
|
|
694
|
+
// Safety nets for empty visible output. Try in order:
|
|
695
|
+
// 1. thinkStripFallback: stripping <think> left nothing, but raw content had text
|
|
696
|
+
// 2. reasoningFallback: no visible content AT ALL, but reasoning_content was streamed
|
|
697
|
+
// (this is the Nemotron/DeepSeek-R1/LM-Studio-dev-toggle case — previously
|
|
698
|
+
// produced silent empty bodies because reasoning was discarded)
|
|
573
699
|
let thinkStripFallback = false;
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
700
|
+
let reasoningFallback = false;
|
|
701
|
+
if (!cleanContent) {
|
|
702
|
+
if (content.trim()) {
|
|
703
|
+
thinkStripFallback = true;
|
|
704
|
+
cleanContent = content.trim();
|
|
705
|
+
}
|
|
706
|
+
else if (reasoning.trim()) {
|
|
707
|
+
reasoningFallback = true;
|
|
708
|
+
cleanContent =
|
|
709
|
+
'[No visible output — the model spent its entire output budget on reasoning_content before emitting any content. ' +
|
|
710
|
+
'Raw reasoning below so you can see what it was doing:]\n\n' +
|
|
711
|
+
reasoning.trim();
|
|
712
|
+
}
|
|
577
713
|
}
|
|
578
|
-
return {
|
|
714
|
+
return {
|
|
715
|
+
content: cleanContent,
|
|
716
|
+
rawContent: content,
|
|
717
|
+
reasoningContent: reasoning || undefined,
|
|
718
|
+
model,
|
|
719
|
+
usage,
|
|
720
|
+
finishReason,
|
|
721
|
+
truncated,
|
|
722
|
+
ttftMs,
|
|
723
|
+
generationMs,
|
|
724
|
+
thinkStripFallback,
|
|
725
|
+
reasoningFallback,
|
|
726
|
+
prefillStall,
|
|
727
|
+
};
|
|
728
|
+
}
|
|
729
|
+
let detectedBackend = null;
|
|
730
|
+
function getBackend() {
|
|
731
|
+
return detectedBackend ?? 'openai-compat';
|
|
579
732
|
}
|
|
580
733
|
/**
|
|
581
|
-
* Fetch models
|
|
582
|
-
*
|
|
734
|
+
* Fetch models with backend-aware probing.
|
|
735
|
+
* 1. LM Studio /api/v0/models — richest metadata, sets backend='lmstudio'
|
|
736
|
+
* 2. Ollama /api/tags — native list, sets backend='ollama', maps to ModelInfo
|
|
737
|
+
* 3. OpenAI-compatible /v1/models — generic fallback (DeepSeek, vLLM, llama.cpp, OpenRouter)
|
|
583
738
|
*/
|
|
584
739
|
async function listModelsRaw() {
|
|
585
|
-
// Try v0 API first — returns type, arch, publisher, quantization, state
|
|
740
|
+
// Try LM Studio's v0 API first — returns type, arch, publisher, quantization, state
|
|
586
741
|
try {
|
|
587
742
|
const v0 = await fetchWithTimeout(`${LM_BASE_URL}/api/v0/models`, { headers: apiHeaders() });
|
|
588
743
|
if (v0.ok) {
|
|
589
744
|
const data = (await v0.json());
|
|
745
|
+
detectedBackend = 'lmstudio';
|
|
590
746
|
return data.data;
|
|
591
747
|
}
|
|
592
748
|
}
|
|
593
749
|
catch {
|
|
594
|
-
// v0 not available — fall through
|
|
750
|
+
// v0 not available — fall through
|
|
751
|
+
}
|
|
752
|
+
// Try Ollama's /api/tags next. Shape differs from OpenAI: returns
|
|
753
|
+
// { models: [{ name, model, size, details: { family, parameter_size, ... } }] }
|
|
754
|
+
try {
|
|
755
|
+
const tags = await fetchWithTimeout(`${LM_BASE_URL}/api/tags`, { headers: apiHeaders() });
|
|
756
|
+
if (tags.ok) {
|
|
757
|
+
const data = (await tags.json());
|
|
758
|
+
if (Array.isArray(data.models)) {
|
|
759
|
+
detectedBackend = 'ollama';
|
|
760
|
+
return data.models.map((m) => ({
|
|
761
|
+
id: m.name,
|
|
762
|
+
object: 'model',
|
|
763
|
+
type: 'llm',
|
|
764
|
+
arch: m.details?.family,
|
|
765
|
+
quantization: m.details?.quantization_level,
|
|
766
|
+
state: 'loaded', // Ollama loads on-demand; treat all listed as available
|
|
767
|
+
publisher: m.name.includes('/') ? m.name.split('/')[0] : undefined,
|
|
768
|
+
}));
|
|
769
|
+
}
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
catch {
|
|
773
|
+
// Not Ollama — fall through
|
|
595
774
|
}
|
|
596
|
-
// Fallback: OpenAI-compatible v1 endpoint (
|
|
775
|
+
// Fallback: OpenAI-compatible v1 endpoint (DeepSeek, vLLM, llama.cpp, OpenRouter)
|
|
597
776
|
const res = await fetchWithTimeout(`${LM_BASE_URL}/v1/models`, { headers: apiHeaders() });
|
|
598
777
|
if (!res.ok)
|
|
599
778
|
throw new Error(`Failed to list models: ${res.status}`);
|
|
600
779
|
const data = (await res.json());
|
|
780
|
+
detectedBackend = 'openai-compat';
|
|
601
781
|
return data.data;
|
|
602
782
|
}
|
|
603
783
|
function getContextLength(model) {
|
|
@@ -608,6 +788,81 @@ function getContextLength(model) {
|
|
|
608
788
|
function getMaxContextLength(model) {
|
|
609
789
|
return model.max_context_length;
|
|
610
790
|
}
|
|
791
|
+
/**
|
|
792
|
+
* Map model family / backend → reasoning_effort value that minimises reasoning.
|
|
793
|
+
*
|
|
794
|
+
* The `reasoning_effort` field exists across OpenAI, Ollama, LM Studio and
|
|
795
|
+
* DeepSeek, but the accepted values differ per vendor. Verified empirically
|
|
796
|
+
* from the LM Studio error response: "Supported values: none, minimal, low,
|
|
797
|
+
* medium, high, xhigh" (that's the set the LM Studio adapter accepts).
|
|
798
|
+
*
|
|
799
|
+
* OpenAI (gpt-5, o-series) : 'low' | 'medium' | 'high' (spec)
|
|
800
|
+
* Ollama : 'low' | 'medium' | 'high' | 'none'
|
|
801
|
+
* LM Studio (all models) : 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
|
|
802
|
+
*
|
|
803
|
+
* We want the HARDEST off-switch we can portably send:
|
|
804
|
+
* - LM Studio / Ollama: 'none' (no reasoning budget at all)
|
|
805
|
+
* - Generic OpenAI-compat: 'low' (OpenAI's minimum, safe to send)
|
|
806
|
+
*
|
|
807
|
+
* An unsupported value is a hard 400 error on LM Studio (not a silent
|
|
808
|
+
* fallback), so this function is conservative — it returns null for
|
|
809
|
+
* unknown backends and we omit the field rather than risk a 400.
|
|
810
|
+
*/
|
|
811
|
+
function getReasoningEffortValue(_modelId) {
|
|
812
|
+
const backend = getBackend();
|
|
813
|
+
// LM Studio accepts 'none' as an explicit reasoning-off switch for
|
|
814
|
+
// every thinking model (Nemotron, DeepSeek R1, Gemma 4, gpt-oss, ...).
|
|
815
|
+
if (backend === 'lmstudio')
|
|
816
|
+
return 'none';
|
|
817
|
+
// Ollama likewise documents 'none' as valid.
|
|
818
|
+
if (backend === 'ollama')
|
|
819
|
+
return 'none';
|
|
820
|
+
// Generic OpenAI-compatible — 'low' is the minimum OpenAI accepts per spec.
|
|
821
|
+
// DeepSeek's own API treats 'low' as minimum too.
|
|
822
|
+
return 'low';
|
|
823
|
+
}
|
|
824
|
+
/** Rough chars→tokens ratio used for pre-flight estimates. Matches the ratio
|
|
825
|
+
* we already use to estimate completion_tokens when usage is missing. */
|
|
826
|
+
const CHARS_PER_TOKEN = 4;
|
|
827
|
+
/** Conservative default prefill rate when no per-model measurement exists.
|
|
828
|
+
* Slower than real hardware so we err toward letting the call run — a false
|
|
829
|
+
* refusal is much worse than a false-ok that eventually times out. */
|
|
830
|
+
const DEFAULT_PREFILL_TOK_PER_SEC = 300;
|
|
831
|
+
/** Hard ceiling for when we refuse to send the call. Leaves ~15s of
|
|
832
|
+
* generation headroom inside the ~60s MCP-client request-timeout budget. */
|
|
833
|
+
const PREFILL_REFUSE_THRESHOLD_SEC = 45;
|
|
834
|
+
/** Soft warning threshold — we proceed but log a stderr warning. */
|
|
835
|
+
const PREFILL_WARN_THRESHOLD_SEC = 25;
|
|
836
|
+
/**
|
|
837
|
+
* Estimate how long prompt prefill will take, using measured per-model data
|
|
838
|
+
* from the SQLite cache when available. `totalTtftMs` is very close to pure
|
|
839
|
+
* prefill time for a streaming call (first-content-delta arrives right after
|
|
840
|
+
* prefill finishes), so `totalPromptTokens / totalTtftMs` gives a usable
|
|
841
|
+
* prefill-tok/s rate for that specific (model, hardware) pair.
|
|
842
|
+
*/
|
|
843
|
+
function estimatePrefill(inputChars, modelId) {
|
|
844
|
+
const inputTokens = Math.ceil(inputChars / CHARS_PER_TOKEN);
|
|
845
|
+
const stats = lifetime.modelStats.get(modelId);
|
|
846
|
+
let prefillTokPerSec = DEFAULT_PREFILL_TOK_PER_SEC;
|
|
847
|
+
let basis = 'default';
|
|
848
|
+
if (stats && stats.ttftCalls >= 2 && stats.totalTtftMs > 0 && stats.totalPromptTokens > 0) {
|
|
849
|
+
// Only trust measured data after >=2 TTFT samples — single samples on a
|
|
850
|
+
// cold model run are noisy. Average prompt tokens per call approximated
|
|
851
|
+
// as totalPromptTokens / calls, then divided by average TTFT seconds.
|
|
852
|
+
const avgPromptTokens = stats.totalPromptTokens / stats.calls;
|
|
853
|
+
const avgTtftSec = (stats.totalTtftMs / stats.ttftCalls) / 1000;
|
|
854
|
+
if (avgTtftSec > 0) {
|
|
855
|
+
prefillTokPerSec = avgPromptTokens / avgTtftSec;
|
|
856
|
+
basis = 'measured';
|
|
857
|
+
}
|
|
858
|
+
}
|
|
859
|
+
return {
|
|
860
|
+
inputTokens,
|
|
861
|
+
estimatedSeconds: inputTokens / prefillTokPerSec,
|
|
862
|
+
prefillTokPerSec,
|
|
863
|
+
basis,
|
|
864
|
+
};
|
|
865
|
+
}
|
|
611
866
|
async function routeToModel(taskType) {
|
|
612
867
|
let models;
|
|
613
868
|
try {
|
|
@@ -674,9 +929,11 @@ function assessQuality(resp, rawContent) {
|
|
|
674
929
|
: null;
|
|
675
930
|
return {
|
|
676
931
|
truncated: resp.truncated,
|
|
932
|
+
prefillStall: resp.prefillStall ?? false,
|
|
677
933
|
finishReason: resp.finishReason || 'unknown',
|
|
678
934
|
thinkBlocksStripped: hadThinkBlocks,
|
|
679
935
|
thinkStripFallback: resp.thinkStripFallback ?? false,
|
|
936
|
+
reasoningFallback: resp.reasoningFallback ?? false,
|
|
680
937
|
estimatedTokens: estimated,
|
|
681
938
|
contentLength: resp.content.length,
|
|
682
939
|
generationMs: resp.generationMs,
|
|
@@ -685,9 +942,13 @@ function assessQuality(resp, rawContent) {
|
|
|
685
942
|
}
|
|
686
943
|
function formatQualityLine(quality) {
|
|
687
944
|
const flags = [];
|
|
688
|
-
if (quality.
|
|
945
|
+
if (quality.prefillStall)
|
|
946
|
+
flags.push('PREFILL-STALL (no tokens received — input may be too large for this model/hardware)');
|
|
947
|
+
else if (quality.truncated)
|
|
689
948
|
flags.push('TRUNCATED');
|
|
690
|
-
if (quality.
|
|
949
|
+
if (quality.reasoningFallback)
|
|
950
|
+
flags.push('reasoning-only (model exhausted output budget before emitting visible content — showing raw reasoning)');
|
|
951
|
+
else if (quality.thinkStripFallback)
|
|
691
952
|
flags.push('think-strip-empty (showing raw reasoning — model ignored enable_thinking:false)');
|
|
692
953
|
else if (quality.thinkBlocksStripped)
|
|
693
954
|
flags.push('think-blocks-stripped');
|
|
@@ -715,7 +976,17 @@ function formatFooter(resp, extra) {
|
|
|
715
976
|
if (resp.model)
|
|
716
977
|
parts.push(`Model: ${resp.model}`);
|
|
717
978
|
if (resp.usage) {
|
|
718
|
-
|
|
979
|
+
// OpenAI-spec reasoning-tokens split — when present, show it so the user
|
|
980
|
+
// sees how much of the completion budget went to hidden reasoning vs
|
|
981
|
+
// visible output. Diagnoses "empty body + hit-max-tokens" immediately.
|
|
982
|
+
const reasoningTokens = resp.usage.completion_tokens_details?.reasoning_tokens;
|
|
983
|
+
if (typeof reasoningTokens === 'number' && reasoningTokens > 0) {
|
|
984
|
+
const visible = resp.usage.completion_tokens - reasoningTokens;
|
|
985
|
+
parts.push(`${resp.usage.prompt_tokens}→${resp.usage.completion_tokens} tokens (${reasoningTokens} reasoning / ${visible} visible)`);
|
|
986
|
+
}
|
|
987
|
+
else {
|
|
988
|
+
parts.push(`${resp.usage.prompt_tokens}→${resp.usage.completion_tokens} tokens`);
|
|
989
|
+
}
|
|
719
990
|
}
|
|
720
991
|
else if (resp.content.length > 0) {
|
|
721
992
|
// Estimate when usage is missing (truncated responses where final SSE chunk was lost)
|
|
@@ -899,11 +1170,13 @@ const TOOLS = [
|
|
|
899
1170
|
'• Provide absolute paths. Relative paths are rejected.\n' +
|
|
900
1171
|
'• Files are read in parallel (Promise.allSettled) — one unreadable file does not sink the call.\n' +
|
|
901
1172
|
'• Files are concatenated with `=== filename ===` headers and sent to the same code-review pipeline as code_task.\n' +
|
|
902
|
-
'• Read failures are surfaced inline with the reason so the LLM can still reason about the rest.\n
|
|
1173
|
+
'• Read failures are surfaced inline with the reason so the LLM can still reason about the rest.\n' +
|
|
1174
|
+
'• Pre-flight prefill estimate: if measured per-model data shows the input would exceed the MCP client\'s ~60s request timeout during prompt processing, the call is refused early with a diagnostic instead of hanging. Split or trim when this fires.\n\n' +
|
|
903
1175
|
'Good fit:\n' +
|
|
904
1176
|
'• Reviewing related files together (module + its tests, client + server pair)\n' +
|
|
905
1177
|
'• Auditing a single large file too big to paste comfortably\n' +
|
|
906
1178
|
'• Any code_task where keeping source out of the Claude context window matters\n\n' +
|
|
1179
|
+
'Size guidance: on slow hardware (< 25 tok/s generation), keep total input under ~8,000 tokens (~32,000 chars) to stay safely under the client timeout. Faster hardware handles much more — the pre-flight estimator adapts once you\'ve done a few calls and real per-model timings are in the SQLite cache.\n\n' +
|
|
907
1180
|
'Same review discipline as code_task — verify the output before acting on it.',
|
|
908
1181
|
inputSchema: {
|
|
909
1182
|
type: 'object',
|
|
@@ -965,6 +1238,23 @@ const TOOLS = [
|
|
|
965
1238
|
required: ['input'],
|
|
966
1239
|
},
|
|
967
1240
|
},
|
|
1241
|
+
{
|
|
1242
|
+
name: 'stats',
|
|
1243
|
+
description: 'Show user stats: tokens offloaded, calls made, per-model performance — for the current session AND ' +
|
|
1244
|
+
'lifetime (persisted in SQLite at ~/.houtini-lm/model-cache.db). Unlike `discover` which includes the ' +
|
|
1245
|
+
'model catalog, `stats` returns just the numbers in a compact markdown table — cheap to call repeatedly ' +
|
|
1246
|
+
'to see the 💰 Claude-quota savings counter climb. Useful for quantifying how much work the local model ' +
|
|
1247
|
+
'is genuinely doing, and for noticing when a model\'s reasoning-token ratio is drifting.',
|
|
1248
|
+
inputSchema: {
|
|
1249
|
+
type: 'object',
|
|
1250
|
+
properties: {
|
|
1251
|
+
model: {
|
|
1252
|
+
type: 'string',
|
|
1253
|
+
description: 'Optional: filter output to a single model ID. Omit to see all models this workstation has used.',
|
|
1254
|
+
},
|
|
1255
|
+
},
|
|
1256
|
+
},
|
|
1257
|
+
},
|
|
968
1258
|
];
|
|
969
1259
|
// ── MCP Server ───────────────────────────────────────────────────────
|
|
970
1260
|
// Session-level sidekick framing. MCP clients surface this to the model
|
|
@@ -974,7 +1264,7 @@ const SIDEKICK_INSTRUCTIONS = `Houtini-lm is a local LLM sidekick. It runs on th
|
|
|
974
1264
|
`When to reach for it: bounded, self-contained tasks you can describe in one message — explanations, boilerplate, test stubs, code review of pasted or file-loaded source, translations, commit messages, format conversion, brainstorming. Trades wall-clock time for tokens (typically 3-30× slower than frontier models).\n\n` +
|
|
975
1265
|
`When not to: tasks that need tool access, cross-file reasoning you haven't captured, or work fast enough to answer directly before the delegation round-trip completes.\n\n` +
|
|
976
1266
|
`Call \`discover\` in delegation-heavy sessions to see what model is loaded, its capability profile, and — after the first real call — its measured speed. The response footer reports cumulative tokens kept in the user's quota.`;
|
|
977
|
-
const server = new Server({ name: 'houtini-lm', version: '2.
|
|
1267
|
+
const server = new Server({ name: 'houtini-lm', version: '2.11.0' }, { capabilities: { tools: {}, resources: {} }, instructions: SIDEKICK_INSTRUCTIONS });
|
|
978
1268
|
// ── MCP Resources ─────────────────────────────────────────────────────
|
|
979
1269
|
// Exposes session performance metrics as a readable resource so Claude can
|
|
980
1270
|
// proactively check offload efficiency and make smarter delegation decisions.
|
|
@@ -1155,6 +1445,32 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
1155
1445
|
? ` ${route.hints.outputConstraint}`
|
|
1156
1446
|
: '';
|
|
1157
1447
|
const combined = sections.join('\n\n');
|
|
1448
|
+
// Pre-flight prefill estimate. Huge inputs can legitimately exceed
|
|
1449
|
+
// the MCP client's ~60s request timeout during prompt processing, and
|
|
1450
|
+
// progress notifications don't reset that timeout on Claude Desktop.
|
|
1451
|
+
// If measured per-model data in the SQLite cache shows this input
|
|
1452
|
+
// would obviously overrun, refuse with a concrete diagnostic so the
|
|
1453
|
+
// caller knows to split or trim instead of waiting for a silent hang.
|
|
1454
|
+
const estimate = estimatePrefill(combined.length, route.modelId);
|
|
1455
|
+
if (estimate.basis === 'measured' && estimate.estimatedSeconds > PREFILL_REFUSE_THRESHOLD_SEC) {
|
|
1456
|
+
const prefillRate = Math.round(estimate.prefillTokPerSec);
|
|
1457
|
+
const estSec = Math.round(estimate.estimatedSeconds);
|
|
1458
|
+
return {
|
|
1459
|
+
content: [{
|
|
1460
|
+
type: 'text',
|
|
1461
|
+
text: `Error: estimated prefill time exceeds the ~60s MCP client timeout.\n\n` +
|
|
1462
|
+
`• Input size: ~${estimate.inputTokens.toLocaleString()} tokens across ${successCount} file(s)\n` +
|
|
1463
|
+
`• Measured prefill rate on ${route.modelId}: ~${prefillRate} tok/s (from ${lifetime.modelStats.get(route.modelId)?.ttftCalls ?? 0} prior calls)\n` +
|
|
1464
|
+
`• Estimated prefill: ~${estSec}s (threshold: ${PREFILL_REFUSE_THRESHOLD_SEC}s)\n\n` +
|
|
1465
|
+
`Options: split the files into smaller groups, trim the largest file, or use \`code_task\` with a focused excerpt. ` +
|
|
1466
|
+
`If you know this workstation can handle it, pass fewer files or run the task again when the measured rate improves.`,
|
|
1467
|
+
}],
|
|
1468
|
+
isError: true,
|
|
1469
|
+
};
|
|
1470
|
+
}
|
|
1471
|
+
if (estimate.estimatedSeconds > PREFILL_WARN_THRESHOLD_SEC) {
|
|
1472
|
+
process.stderr.write(`[houtini-lm] Large input warning: ~${estimate.inputTokens} tokens, est prefill ~${Math.round(estimate.estimatedSeconds)}s (${estimate.basis}). Proceeding.\n`);
|
|
1473
|
+
}
|
|
1158
1474
|
const codeMessages = [
|
|
1159
1475
|
{
|
|
1160
1476
|
role: 'system',
|
|
@@ -1212,25 +1528,45 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
1212
1528
|
const primary = loaded[0] || models[0];
|
|
1213
1529
|
const ctx = getContextLength(primary);
|
|
1214
1530
|
const primaryProfile = await getModelProfileAsync(primary);
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1531
|
+
// Use sessionSummary() so discover matches the footer format and
|
|
1532
|
+
// automatically picks up the lifetime line when the SQLite cache has
|
|
1533
|
+
// cross-session data.
|
|
1534
|
+
const summary = sessionSummary();
|
|
1535
|
+
const sessionStats = session.calls > 0 || lifetime.totalCalls > 0
|
|
1536
|
+
? `\n${summary}`
|
|
1218
1537
|
: `\n💰 Claude quota saved this session: 0 tokens — no calls yet. Measured speed for each model will appear here after the first real call.`;
|
|
1219
1538
|
// Measured speed line for the active model. Discover intentionally does
|
|
1220
1539
|
// not run a synthetic warmup — speed is captured from real tasks, so the
|
|
1221
1540
|
// numbers reflect actual workload rather than a contrived benchmark.
|
|
1541
|
+
// Shows session stats when this session has measured calls; otherwise
|
|
1542
|
+
// falls back to workstation lifetime stats so Claude sees historical
|
|
1543
|
+
// perf from call 1 instead of "not yet benchmarked".
|
|
1222
1544
|
const primaryStats = session.modelStats.get(primary.id);
|
|
1545
|
+
const primaryLifetime = lifetime.modelStats.get(primary.id);
|
|
1223
1546
|
let speedLine = '';
|
|
1224
1547
|
if (primaryStats && primaryStats.perfCalls > 0) {
|
|
1225
1548
|
const avgTtft = primaryStats.ttftCalls > 0 ? Math.round(primaryStats.totalTtftMs / primaryStats.ttftCalls) : 0;
|
|
1226
1549
|
const avgTokSec = (primaryStats.totalTokPerSec / primaryStats.perfCalls).toFixed(1);
|
|
1227
|
-
speedLine = `Measured speed: ${avgTokSec} tok/s · TTFT ${avgTtft}ms (
|
|
1550
|
+
speedLine = `Measured speed (session): ${avgTokSec} tok/s · TTFT ${avgTtft}ms (${primaryStats.perfCalls} call${primaryStats.perfCalls === 1 ? '' : 's'})\n`;
|
|
1551
|
+
if (primaryLifetime && primaryLifetime.perfCalls > primaryStats.perfCalls) {
|
|
1552
|
+
const lAvgTtft = primaryLifetime.ttftCalls > 0 ? Math.round(primaryLifetime.totalTtftMs / primaryLifetime.ttftCalls) : 0;
|
|
1553
|
+
const lAvgTokSec = (primaryLifetime.totalTokPerSec / primaryLifetime.perfCalls).toFixed(1);
|
|
1554
|
+
speedLine += `Measured speed (lifetime on this workstation): ${lAvgTokSec} tok/s · TTFT ${lAvgTtft}ms (${primaryLifetime.perfCalls} calls)\n`;
|
|
1555
|
+
}
|
|
1556
|
+
}
|
|
1557
|
+
else if (primaryLifetime && primaryLifetime.perfCalls > 0) {
|
|
1558
|
+
const lAvgTtft = primaryLifetime.ttftCalls > 0 ? Math.round(primaryLifetime.totalTtftMs / primaryLifetime.ttftCalls) : 0;
|
|
1559
|
+
const lAvgTokSec = (primaryLifetime.totalTokPerSec / primaryLifetime.perfCalls).toFixed(1);
|
|
1560
|
+
speedLine = `Measured speed (lifetime on this workstation): ${lAvgTokSec} tok/s · TTFT ${lAvgTtft}ms (${primaryLifetime.perfCalls} calls, last used ${new Date(primaryLifetime.lastUsedAt).toISOString().slice(0, 10)})\n`;
|
|
1228
1561
|
}
|
|
1229
1562
|
else {
|
|
1230
1563
|
speedLine = `Measured speed: not yet benchmarked — will be captured on the first real call.\n`;
|
|
1231
1564
|
}
|
|
1565
|
+
const backendLabel = getBackend() === 'lmstudio' ? 'LM Studio'
|
|
1566
|
+
: getBackend() === 'ollama' ? 'Ollama'
|
|
1567
|
+
: 'OpenAI-compatible';
|
|
1232
1568
|
let text = `Status: ONLINE\n` +
|
|
1233
|
-
`Endpoint: ${LM_BASE_URL}\n` +
|
|
1569
|
+
`Endpoint: ${LM_BASE_URL} (${backendLabel})\n` +
|
|
1234
1570
|
`Connection latency: ${ms}ms (does not reflect inference speed)\n` +
|
|
1235
1571
|
`Active model: ${primary.id}\n` +
|
|
1236
1572
|
`Context window: ${ctx.toLocaleString()} tokens\n` +
|
|
@@ -1261,6 +1597,20 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
1261
1597
|
text += ` ${modelId}: ${stats.calls} calls, avg TTFT ${avgTtft}ms, avg ${avgTokSec} tok/s\n`;
|
|
1262
1598
|
}
|
|
1263
1599
|
}
|
|
1600
|
+
// Workstation lifetime stats — built from SQLite, persists across restarts.
|
|
1601
|
+
// Only shown when there's lifetime data beyond this session, so a first-run
|
|
1602
|
+
// user doesn't see a duplicate of the session block above.
|
|
1603
|
+
const hasLifetimeBeyondSession = Array.from(lifetime.modelStats.entries())
|
|
1604
|
+
.some(([id, l]) => l.calls > (session.modelStats.get(id)?.calls ?? 0));
|
|
1605
|
+
if (hasLifetimeBeyondSession) {
|
|
1606
|
+
text += `\nPerformance (lifetime on this workstation):\n`;
|
|
1607
|
+
for (const [modelId, stats] of lifetime.modelStats) {
|
|
1608
|
+
const avgTtft = stats.ttftCalls > 0 ? Math.round(stats.totalTtftMs / stats.ttftCalls) : 0;
|
|
1609
|
+
const avgTokSec = stats.perfCalls > 0 ? (stats.totalTokPerSec / stats.perfCalls).toFixed(1) : '?';
|
|
1610
|
+
const lastUsed = new Date(stats.lastUsedAt).toISOString().slice(0, 10);
|
|
1611
|
+
text += ` ${modelId}: ${stats.calls} calls, avg TTFT ${avgTtft}ms, avg ${avgTokSec} tok/s (last used ${lastUsed})\n`;
|
|
1612
|
+
}
|
|
1613
|
+
}
|
|
1264
1614
|
text += `${sessionStats}\n\n`;
|
|
1265
1615
|
text += `The local LLM is available. You can delegate tasks using chat, custom_prompt, code_task, code_task_files, or embed.`;
|
|
1266
1616
|
return { content: [{ type: 'text', text }] };
|
|
@@ -1318,6 +1668,89 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
1318
1668
|
};
|
|
1319
1669
|
});
|
|
1320
1670
|
}
|
|
1671
|
+
case 'stats': {
|
|
1672
|
+
const { model: filterModel } = args;
|
|
1673
|
+
const backendLabel = getBackend() === 'lmstudio' ? 'LM Studio'
|
|
1674
|
+
: getBackend() === 'ollama' ? 'Ollama'
|
|
1675
|
+
: 'OpenAI-compatible';
|
|
1676
|
+
const lines = [];
|
|
1677
|
+
lines.push(`## Houtini LM stats`);
|
|
1678
|
+
lines.push('');
|
|
1679
|
+
lines.push(`**Endpoint**: ${LM_BASE_URL} (${backendLabel})`);
|
|
1680
|
+
if (lifetime.firstSeenAt) {
|
|
1681
|
+
lines.push(`**First call on this workstation**: ${new Date(lifetime.firstSeenAt).toISOString().slice(0, 10)}`);
|
|
1682
|
+
}
|
|
1683
|
+
lines.push('');
|
|
1684
|
+
// Totals block
|
|
1685
|
+
lines.push(`### Totals`);
|
|
1686
|
+
lines.push('');
|
|
1687
|
+
lines.push(`| Scope | Calls | Prompt tokens | Completion tokens | Total tokens |`);
|
|
1688
|
+
lines.push(`|----------|------:|--------------:|------------------:|-------------:|`);
|
|
1689
|
+
lines.push(`| Session | ${session.calls} | ${session.promptTokens.toLocaleString()} | ${session.completionTokens.toLocaleString()} | ${(session.promptTokens + session.completionTokens).toLocaleString()} |`);
|
|
1690
|
+
lines.push(`| Lifetime | ${lifetime.totalCalls} | — | — | ${lifetime.totalTokens.toLocaleString()} |`);
|
|
1691
|
+
lines.push('');
|
|
1692
|
+
// Per-model block (union of session + lifetime model ids)
|
|
1693
|
+
const modelIds = new Set([
|
|
1694
|
+
...session.modelStats.keys(),
|
|
1695
|
+
...lifetime.modelStats.keys(),
|
|
1696
|
+
]);
|
|
1697
|
+
const filtered = filterModel ? [...modelIds].filter((m) => m === filterModel) : [...modelIds];
|
|
1698
|
+
if (filtered.length > 0) {
|
|
1699
|
+
lines.push(`### Per-model performance`);
|
|
1700
|
+
lines.push('');
|
|
1701
|
+
lines.push(`| Model | Scope | Calls | Avg TTFT (ms) | Avg tok/s | Prompt tokens | Last used |`);
|
|
1702
|
+
lines.push(`|-------|-------|------:|--------------:|----------:|--------------:|-----------|`);
|
|
1703
|
+
for (const modelId of filtered.sort()) {
|
|
1704
|
+
const s = session.modelStats.get(modelId);
|
|
1705
|
+
const l = lifetime.modelStats.get(modelId);
|
|
1706
|
+
if (s) {
|
|
1707
|
+
const avgTtft = s.ttftCalls > 0 ? Math.round(s.totalTtftMs / s.ttftCalls) : '—';
|
|
1708
|
+
const avgTokSec = s.perfCalls > 0 ? (s.totalTokPerSec / s.perfCalls).toFixed(1) : '—';
|
|
1709
|
+
lines.push(`| ${modelId} | session | ${s.calls} | ${avgTtft} | ${avgTokSec} | — | — |`);
|
|
1710
|
+
}
|
|
1711
|
+
if (l) {
|
|
1712
|
+
const avgTtft = l.ttftCalls > 0 ? Math.round(l.totalTtftMs / l.ttftCalls) : '—';
|
|
1713
|
+
const avgTokSec = l.perfCalls > 0 ? (l.totalTokPerSec / l.perfCalls).toFixed(1) : '—';
|
|
1714
|
+
const lastUsed = new Date(l.lastUsedAt).toISOString().slice(0, 10);
|
|
1715
|
+
lines.push(`| ${modelId} | lifetime | ${l.calls} | ${avgTtft} | ${avgTokSec} | ${l.totalPromptTokens.toLocaleString()} | ${lastUsed} |`);
|
|
1716
|
+
}
|
|
1717
|
+
}
|
|
1718
|
+
lines.push('');
|
|
1719
|
+
}
|
|
1720
|
+
else if (filterModel) {
|
|
1721
|
+
lines.push(`No history for model: \`${filterModel}\`. Try \`list_models\` to see what's been used.`);
|
|
1722
|
+
lines.push('');
|
|
1723
|
+
}
|
|
1724
|
+
else {
|
|
1725
|
+
lines.push(`No calls yet — delegate a task via \`chat\`, \`custom_prompt\`, \`code_task\`, or \`code_task_files\` to start building stats.`);
|
|
1726
|
+
lines.push('');
|
|
1727
|
+
}
|
|
1728
|
+
// Reasoning-token diagnostic (lifetime only — needs persistence to be meaningful)
|
|
1729
|
+
if (!filterModel) {
|
|
1730
|
+
// Sum reasoning tokens across all models. We store this per-model
|
|
1731
|
+
// in SQLite but not in the in-memory mirror, so fetch on demand.
|
|
1732
|
+
try {
|
|
1733
|
+
const rows = await getAllPerformance();
|
|
1734
|
+
const totalReasoning = rows.reduce((sum, r) => sum + (r.totalReasoningTokens || 0), 0);
|
|
1735
|
+
const totalCompletion = rows.reduce((sum, r) => sum + r.totalCompletionTokens, 0);
|
|
1736
|
+
if (totalCompletion > 0) {
|
|
1737
|
+
const pct = ((totalReasoning / totalCompletion) * 100).toFixed(1);
|
|
1738
|
+
lines.push(`### Reasoning-token overhead (lifetime)`);
|
|
1739
|
+
lines.push('');
|
|
1740
|
+
lines.push(`${totalReasoning.toLocaleString()} / ${totalCompletion.toLocaleString()} completion tokens spent on hidden reasoning (${pct}% of generation budget). ` +
|
|
1741
|
+
(parseFloat(pct) > 30
|
|
1742
|
+
? `**High** — consider loading a non-thinking model, or check that \`reasoning_effort\` is being honoured (see stderr logs).`
|
|
1743
|
+
: parseFloat(pct) > 10
|
|
1744
|
+
? `Moderate — normal for thinking-model families.`
|
|
1745
|
+
: `Low — reasoning is effectively suppressed.`));
|
|
1746
|
+
lines.push('');
|
|
1747
|
+
}
|
|
1748
|
+
}
|
|
1749
|
+
catch { /* best-effort — don't fail the tool call */ }
|
|
1750
|
+
}
|
|
1751
|
+
lines.push(`*Stats persist across restarts in \`~/.houtini-lm/model-cache.db\`.*`);
|
|
1752
|
+
return { content: [{ type: 'text', text: lines.join('\n') }] };
|
|
1753
|
+
}
|
|
1321
1754
|
default:
|
|
1322
1755
|
throw new Error(`Unknown tool: ${name}`);
|
|
1323
1756
|
}
|
|
@@ -1338,6 +1771,10 @@ async function main() {
|
|
|
1338
1771
|
listModelsRaw()
|
|
1339
1772
|
.then((models) => profileModelsAtStartup(models))
|
|
1340
1773
|
.catch((err) => process.stderr.write(`[houtini-lm] Startup profiling skipped: ${err}\n`));
|
|
1774
|
+
// Hydrate the in-memory lifetime mirror from SQLite so the very first
|
|
1775
|
+
// tool call this session shows historical savings + per-model perf.
|
|
1776
|
+
// Non-blocking too; the footer degrades to session-only if this fails.
|
|
1777
|
+
hydrateLifetimeFromDb().catch((err) => process.stderr.write(`[houtini-lm] Lifetime hydration skipped: ${err}\n`));
|
|
1341
1778
|
}
|
|
1342
1779
|
main().catch((error) => {
|
|
1343
1780
|
process.stderr.write(`Fatal error: ${error}\n`);
|