org-qmd 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/llm.js ADDED
@@ -0,0 +1,1174 @@
1
+ /**
2
+ * llm.ts - LLM abstraction layer for QMD using node-llama-cpp
3
+ *
4
+ * Provides embeddings, text generation, and reranking using local GGUF models.
5
+ */
6
+ import { getLlama, resolveModelFile, LlamaChatSession, LlamaLogLevel, } from "node-llama-cpp";
7
+ import { homedir } from "os";
8
+ import { join } from "path";
9
+ import { existsSync, mkdirSync, statSync, unlinkSync, readdirSync, readFileSync, writeFileSync } from "fs";
10
+ // =============================================================================
11
+ // Embedding Formatting Functions
12
+ // =============================================================================
13
+ /**
14
+ * Detect if a model URI uses the Qwen3-Embedding format.
15
+ * Qwen3-Embedding uses a different prompting style than nomic/embeddinggemma.
16
+ */
17
+ export function isQwen3EmbeddingModel(modelUri) {
18
+ return /qwen.*embed/i.test(modelUri) || /embed.*qwen/i.test(modelUri);
19
+ }
20
+ /**
21
+ * Format a query for embedding.
22
+ * Uses nomic-style task prefix format for embeddinggemma (default).
23
+ * Uses Qwen3-Embedding instruct format when a Qwen embedding model is active.
24
+ */
25
+ export function formatQueryForEmbedding(query, modelUri) {
26
+ const uri = modelUri ?? process.env.QMD_EMBED_MODEL ?? DEFAULT_EMBED_MODEL;
27
+ if (isQwen3EmbeddingModel(uri)) {
28
+ return `Instruct: Retrieve relevant documents for the given query\nQuery: ${query}`;
29
+ }
30
+ return `task: search result | query: ${query}`;
31
+ }
32
+ /**
33
+ * Format a document for embedding.
34
+ * Uses nomic-style format with title and text fields (default).
35
+ * Qwen3-Embedding encodes documents as raw text without special prefixes.
36
+ */
37
+ export function formatDocForEmbedding(text, title, modelUri) {
38
+ const uri = modelUri ?? process.env.QMD_EMBED_MODEL ?? DEFAULT_EMBED_MODEL;
39
+ if (isQwen3EmbeddingModel(uri)) {
40
+ // Qwen3-Embedding: documents are raw text, no task prefix
41
+ return title ? `${title}\n${text}` : text;
42
+ }
43
+ return `title: ${title || "none"} | text: ${text}`;
44
+ }
45
+ // =============================================================================
46
+ // Model Configuration
47
+ // =============================================================================
48
+ // HuggingFace model URIs for node-llama-cpp
49
+ // Format: hf:<user>/<repo>/<file>
50
+ // Override via QMD_EMBED_MODEL env var (e.g. hf:Qwen/Qwen3-Embedding-0.6B-GGUF/Qwen3-Embedding-0.6B-Q8_0.gguf)
51
+ const DEFAULT_EMBED_MODEL = process.env.QMD_EMBED_MODEL ?? "hf:ggml-org/embeddinggemma-300M-GGUF/embeddinggemma-300M-Q8_0.gguf";
52
+ const DEFAULT_RERANK_MODEL = "hf:ggml-org/Qwen3-Reranker-0.6B-Q8_0-GGUF/qwen3-reranker-0.6b-q8_0.gguf";
53
+ // const DEFAULT_GENERATE_MODEL = "hf:ggml-org/Qwen3-0.6B-GGUF/Qwen3-0.6B-Q8_0.gguf";
54
+ const DEFAULT_GENERATE_MODEL = "hf:tobil/qmd-query-expansion-1.7B-gguf/qmd-query-expansion-1.7B-q4_k_m.gguf";
55
+ // Alternative generation models for query expansion:
56
+ // LiquidAI LFM2 - hybrid architecture optimized for edge/on-device inference
57
+ // Use these as base for fine-tuning with configs/sft_lfm2.yaml
58
+ export const LFM2_GENERATE_MODEL = "hf:LiquidAI/LFM2-1.2B-GGUF/LFM2-1.2B-Q4_K_M.gguf";
59
+ export const LFM2_INSTRUCT_MODEL = "hf:LiquidAI/LFM2.5-1.2B-Instruct-GGUF/LFM2.5-1.2B-Instruct-Q4_K_M.gguf";
60
+ export const DEFAULT_EMBED_MODEL_URI = DEFAULT_EMBED_MODEL;
61
+ export const DEFAULT_RERANK_MODEL_URI = DEFAULT_RERANK_MODEL;
62
+ export const DEFAULT_GENERATE_MODEL_URI = DEFAULT_GENERATE_MODEL;
63
+ // Local model cache directory
64
+ const MODEL_CACHE_DIR = process.env.XDG_CACHE_HOME
65
+ ? join(process.env.XDG_CACHE_HOME, "qmd", "models")
66
+ : join(homedir(), ".cache", "qmd", "models");
67
+ export const DEFAULT_MODEL_CACHE_DIR = MODEL_CACHE_DIR;
68
+ function parseHfUri(model) {
69
+ if (!model.startsWith("hf:"))
70
+ return null;
71
+ const without = model.slice(3);
72
+ const parts = without.split("/");
73
+ if (parts.length < 3)
74
+ return null;
75
+ const repo = parts.slice(0, 2).join("/");
76
+ const file = parts.slice(2).join("/");
77
+ return { repo, file };
78
+ }
79
+ async function getRemoteEtag(ref) {
80
+ const url = `https://huggingface.co/${ref.repo}/resolve/main/${ref.file}`;
81
+ try {
82
+ const resp = await fetch(url, { method: "HEAD" });
83
+ if (!resp.ok)
84
+ return null;
85
+ const etag = resp.headers.get("etag");
86
+ return etag || null;
87
+ }
88
+ catch {
89
+ return null;
90
+ }
91
+ }
92
+ export async function pullModels(models, options = {}) {
93
+ const cacheDir = options.cacheDir || MODEL_CACHE_DIR;
94
+ if (!existsSync(cacheDir)) {
95
+ mkdirSync(cacheDir, { recursive: true });
96
+ }
97
+ const results = [];
98
+ for (const model of models) {
99
+ let refreshed = false;
100
+ const hfRef = parseHfUri(model);
101
+ const filename = model.split("/").pop();
102
+ const entries = readdirSync(cacheDir, { withFileTypes: true });
103
+ const cached = filename
104
+ ? entries
105
+ .filter((entry) => entry.isFile() && entry.name.includes(filename))
106
+ .map((entry) => join(cacheDir, entry.name))
107
+ : [];
108
+ if (hfRef && filename) {
109
+ const etagPath = join(cacheDir, `${filename}.etag`);
110
+ const remoteEtag = await getRemoteEtag(hfRef);
111
+ const localEtag = existsSync(etagPath)
112
+ ? readFileSync(etagPath, "utf-8").trim()
113
+ : null;
114
+ const shouldRefresh = options.refresh || !remoteEtag || remoteEtag !== localEtag || cached.length === 0;
115
+ if (shouldRefresh) {
116
+ for (const candidate of cached) {
117
+ if (existsSync(candidate))
118
+ unlinkSync(candidate);
119
+ }
120
+ if (existsSync(etagPath))
121
+ unlinkSync(etagPath);
122
+ refreshed = cached.length > 0;
123
+ }
124
+ }
125
+ else if (options.refresh && filename) {
126
+ for (const candidate of cached) {
127
+ if (existsSync(candidate))
128
+ unlinkSync(candidate);
129
+ refreshed = true;
130
+ }
131
+ }
132
+ const path = await resolveModelFile(model, cacheDir);
133
+ const sizeBytes = existsSync(path) ? statSync(path).size : 0;
134
+ if (hfRef && filename) {
135
+ const remoteEtag = await getRemoteEtag(hfRef);
136
+ if (remoteEtag) {
137
+ const etagPath = join(cacheDir, `${filename}.etag`);
138
+ writeFileSync(etagPath, remoteEtag + "\n", "utf-8");
139
+ }
140
+ }
141
+ results.push({ model, path, sizeBytes, refreshed });
142
+ }
143
+ return results;
144
+ }
145
+ /**
146
+ * LLM implementation using node-llama-cpp
147
+ */
148
+ // Default inactivity timeout: 5 minutes (keep models warm during typical search sessions)
149
+ const DEFAULT_INACTIVITY_TIMEOUT_MS = 5 * 60 * 1000;
150
+ const DEFAULT_EXPAND_CONTEXT_SIZE = 2048;
151
+ function resolveExpandContextSize(configValue) {
152
+ if (configValue !== undefined) {
153
+ if (!Number.isInteger(configValue) || configValue <= 0) {
154
+ throw new Error(`Invalid expandContextSize: ${configValue}. Must be a positive integer.`);
155
+ }
156
+ return configValue;
157
+ }
158
+ const envValue = process.env.QMD_EXPAND_CONTEXT_SIZE?.trim();
159
+ if (!envValue)
160
+ return DEFAULT_EXPAND_CONTEXT_SIZE;
161
+ const parsed = Number.parseInt(envValue, 10);
162
+ if (!Number.isInteger(parsed) || parsed <= 0) {
163
+ process.stderr.write(`QMD Warning: invalid QMD_EXPAND_CONTEXT_SIZE="${envValue}", using default ${DEFAULT_EXPAND_CONTEXT_SIZE}.\n`);
164
+ return DEFAULT_EXPAND_CONTEXT_SIZE;
165
+ }
166
+ return parsed;
167
+ }
168
+ export class LlamaCpp {
169
+ _ciMode = !!process.env.CI;
170
+ llama = null;
171
+ embedModel = null;
172
+ embedContexts = [];
173
+ generateModel = null;
174
+ rerankModel = null;
175
+ rerankContexts = [];
176
+ embedModelUri;
177
+ generateModelUri;
178
+ rerankModelUri;
179
+ modelCacheDir;
180
+ expandContextSize;
181
+ // Ensure we don't load the same model/context concurrently (which can allocate duplicate VRAM).
182
+ embedModelLoadPromise = null;
183
+ generateModelLoadPromise = null;
184
+ rerankModelLoadPromise = null;
185
+ // Inactivity timer for auto-unloading models
186
+ inactivityTimer = null;
187
+ inactivityTimeoutMs;
188
+ disposeModelsOnInactivity;
189
+ // Track disposal state to prevent double-dispose
190
+ disposed = false;
191
+ constructor(config = {}) {
192
+ this.embedModelUri = config.embedModel || DEFAULT_EMBED_MODEL;
193
+ this.generateModelUri = config.generateModel || DEFAULT_GENERATE_MODEL;
194
+ this.rerankModelUri = config.rerankModel || DEFAULT_RERANK_MODEL;
195
+ this.modelCacheDir = config.modelCacheDir || MODEL_CACHE_DIR;
196
+ this.expandContextSize = resolveExpandContextSize(config.expandContextSize);
197
+ this.inactivityTimeoutMs = config.inactivityTimeoutMs ?? DEFAULT_INACTIVITY_TIMEOUT_MS;
198
+ this.disposeModelsOnInactivity = config.disposeModelsOnInactivity ?? false;
199
+ }
200
+ /**
201
+ * Reset the inactivity timer. Called after each model operation.
202
+ * When timer fires, models are unloaded to free memory (if no active sessions).
203
+ */
204
+ touchActivity() {
205
+ // Clear existing timer
206
+ if (this.inactivityTimer) {
207
+ clearTimeout(this.inactivityTimer);
208
+ this.inactivityTimer = null;
209
+ }
210
+ // Only set timer if we have disposable contexts and timeout is enabled
211
+ if (this.inactivityTimeoutMs > 0 && this.hasLoadedContexts()) {
212
+ this.inactivityTimer = setTimeout(() => {
213
+ // Check if session manager allows unloading
214
+ // canUnloadLLM is defined later in this file - it checks the session manager
215
+ // We use dynamic import pattern to avoid circular dependency issues
216
+ if (typeof canUnloadLLM === 'function' && !canUnloadLLM()) {
217
+ // Active sessions/operations - reschedule timer
218
+ this.touchActivity();
219
+ return;
220
+ }
221
+ this.unloadIdleResources().catch(err => {
222
+ console.error("Error unloading idle resources:", err);
223
+ });
224
+ }, this.inactivityTimeoutMs);
225
+ // Don't keep process alive just for this timer
226
+ this.inactivityTimer.unref();
227
+ }
228
+ }
229
+ /**
230
+ * Check if any contexts are currently loaded (and therefore worth unloading on inactivity).
231
+ */
232
+ hasLoadedContexts() {
233
+ return !!(this.embedContexts.length > 0 || this.rerankContexts.length > 0);
234
+ }
235
+ /**
236
+ * Unload idle resources but keep the instance alive for future use.
237
+ *
238
+ * By default, this disposes contexts (and their dependent sequences), while keeping models loaded.
239
+ * This matches the intended lifecycle: model → context → sequence, where contexts are per-session.
240
+ */
241
+ async unloadIdleResources() {
242
+ // Don't unload if already disposed
243
+ if (this.disposed) {
244
+ return;
245
+ }
246
+ // Clear timer
247
+ if (this.inactivityTimer) {
248
+ clearTimeout(this.inactivityTimer);
249
+ this.inactivityTimer = null;
250
+ }
251
+ // Dispose contexts first
252
+ for (const ctx of this.embedContexts) {
253
+ await ctx.dispose();
254
+ }
255
+ this.embedContexts = [];
256
+ for (const ctx of this.rerankContexts) {
257
+ await ctx.dispose();
258
+ }
259
+ this.rerankContexts = [];
260
+ // Optionally dispose models too (opt-in)
261
+ if (this.disposeModelsOnInactivity) {
262
+ if (this.embedModel) {
263
+ await this.embedModel.dispose();
264
+ this.embedModel = null;
265
+ }
266
+ if (this.generateModel) {
267
+ await this.generateModel.dispose();
268
+ this.generateModel = null;
269
+ }
270
+ if (this.rerankModel) {
271
+ await this.rerankModel.dispose();
272
+ this.rerankModel = null;
273
+ }
274
+ // Reset load promises so models can be reloaded later
275
+ this.embedModelLoadPromise = null;
276
+ this.generateModelLoadPromise = null;
277
+ this.rerankModelLoadPromise = null;
278
+ }
279
+ // Note: We keep llama instance alive - it's lightweight
280
+ }
281
+ /**
282
+ * Ensure model cache directory exists
283
+ */
284
+ ensureModelCacheDir() {
285
+ if (!existsSync(this.modelCacheDir)) {
286
+ mkdirSync(this.modelCacheDir, { recursive: true });
287
+ }
288
+ }
289
+ /**
290
+ * Initialize the llama instance (lazy)
291
+ */
292
+ async ensureLlama() {
293
+ if (!this.llama) {
294
+ const llama = await getLlama({
295
+ // attempt to build
296
+ build: "autoAttempt",
297
+ logLevel: LlamaLogLevel.error
298
+ });
299
+ if (llama.gpu === false) {
300
+ process.stderr.write("QMD Warning: no GPU acceleration, running on CPU (slow). Run 'qmd status' for details.\n");
301
+ }
302
+ this.llama = llama;
303
+ }
304
+ return this.llama;
305
+ }
306
+ /**
307
+ * Resolve a model URI to a local path, downloading if needed
308
+ */
309
+ async resolveModel(modelUri) {
310
+ this.ensureModelCacheDir();
311
+ // resolveModelFile handles HF URIs and downloads to the cache dir
312
+ return await resolveModelFile(modelUri, this.modelCacheDir);
313
+ }
314
+ /**
315
+ * Load embedding model (lazy)
316
+ */
317
+ async ensureEmbedModel() {
318
+ if (this.embedModel) {
319
+ return this.embedModel;
320
+ }
321
+ if (this.embedModelLoadPromise) {
322
+ return await this.embedModelLoadPromise;
323
+ }
324
+ this.embedModelLoadPromise = (async () => {
325
+ const llama = await this.ensureLlama();
326
+ const modelPath = await this.resolveModel(this.embedModelUri);
327
+ const model = await llama.loadModel({ modelPath });
328
+ this.embedModel = model;
329
+ // Model loading counts as activity - ping to keep alive
330
+ this.touchActivity();
331
+ return model;
332
+ })();
333
+ try {
334
+ return await this.embedModelLoadPromise;
335
+ }
336
+ finally {
337
+ // Keep the resolved model cached; clear only the in-flight promise.
338
+ this.embedModelLoadPromise = null;
339
+ }
340
+ }
341
+ /**
342
+ * Compute how many parallel contexts to create.
343
+ *
344
+ * GPU: constrained by VRAM (25% of free, capped at 8).
345
+ * CPU: constrained by cores. Splitting threads across contexts enables
346
+ * true parallelism (each context runs on its own cores). Use at most
347
+ * half the math cores, with at least 4 threads per context.
348
+ */
349
+ async computeParallelism(perContextMB) {
350
+ const llama = await this.ensureLlama();
351
+ if (llama.gpu) {
352
+ try {
353
+ const vram = await llama.getVramState();
354
+ const freeMB = vram.free / (1024 * 1024);
355
+ const maxByVram = Math.floor((freeMB * 0.25) / perContextMB);
356
+ return Math.max(1, Math.min(8, maxByVram));
357
+ }
358
+ catch {
359
+ return 2;
360
+ }
361
+ }
362
+ // CPU: split cores across contexts. At least 4 threads per context.
363
+ const cores = llama.cpuMathCores || 4;
364
+ const maxContexts = Math.floor(cores / 4);
365
+ return Math.max(1, Math.min(4, maxContexts));
366
+ }
367
+ /**
368
+ * Get the number of threads each context should use, given N parallel contexts.
369
+ * Splits available math cores evenly across contexts.
370
+ */
371
+ async threadsPerContext(parallelism) {
372
+ const llama = await this.ensureLlama();
373
+ if (llama.gpu)
374
+ return 0; // GPU: let the library decide
375
+ const cores = llama.cpuMathCores || 4;
376
+ return Math.max(1, Math.floor(cores / parallelism));
377
+ }
378
+ /**
379
+ * Load embedding contexts (lazy). Creates multiple for parallel embedding.
380
+ * Uses promise guard to prevent concurrent context creation race condition.
381
+ */
382
+ embedContextsCreatePromise = null;
383
+ async ensureEmbedContexts() {
384
+ if (this.embedContexts.length > 0) {
385
+ this.touchActivity();
386
+ return this.embedContexts;
387
+ }
388
+ if (this.embedContextsCreatePromise) {
389
+ return await this.embedContextsCreatePromise;
390
+ }
391
+ this.embedContextsCreatePromise = (async () => {
392
+ const model = await this.ensureEmbedModel();
393
+ // Embed contexts are ~143 MB each (nomic-embed 2048 ctx)
394
+ const n = await this.computeParallelism(150);
395
+ const threads = await this.threadsPerContext(n);
396
+ for (let i = 0; i < n; i++) {
397
+ try {
398
+ this.embedContexts.push(await model.createEmbeddingContext({
399
+ ...(threads > 0 ? { threads } : {}),
400
+ }));
401
+ }
402
+ catch {
403
+ if (this.embedContexts.length === 0)
404
+ throw new Error("Failed to create any embedding context");
405
+ break;
406
+ }
407
+ }
408
+ this.touchActivity();
409
+ return this.embedContexts;
410
+ })();
411
+ try {
412
+ return await this.embedContextsCreatePromise;
413
+ }
414
+ finally {
415
+ this.embedContextsCreatePromise = null;
416
+ }
417
+ }
418
+ /**
419
+ * Get a single embed context (for single-embed calls). Uses first from pool.
420
+ */
421
+ async ensureEmbedContext() {
422
+ const contexts = await this.ensureEmbedContexts();
423
+ return contexts[0];
424
+ }
425
+ /**
426
+ * Load generation model (lazy) - context is created fresh per call
427
+ */
428
+ async ensureGenerateModel() {
429
+ if (!this.generateModel) {
430
+ if (this.generateModelLoadPromise) {
431
+ return await this.generateModelLoadPromise;
432
+ }
433
+ this.generateModelLoadPromise = (async () => {
434
+ const llama = await this.ensureLlama();
435
+ const modelPath = await this.resolveModel(this.generateModelUri);
436
+ const model = await llama.loadModel({ modelPath });
437
+ this.generateModel = model;
438
+ return model;
439
+ })();
440
+ try {
441
+ await this.generateModelLoadPromise;
442
+ }
443
+ finally {
444
+ this.generateModelLoadPromise = null;
445
+ }
446
+ }
447
+ this.touchActivity();
448
+ if (!this.generateModel) {
449
+ throw new Error("Generate model not loaded");
450
+ }
451
+ return this.generateModel;
452
+ }
453
+ /**
454
+ * Load rerank model (lazy)
455
+ */
456
+ async ensureRerankModel() {
457
+ if (this.rerankModel) {
458
+ return this.rerankModel;
459
+ }
460
+ if (this.rerankModelLoadPromise) {
461
+ return await this.rerankModelLoadPromise;
462
+ }
463
+ this.rerankModelLoadPromise = (async () => {
464
+ const llama = await this.ensureLlama();
465
+ const modelPath = await this.resolveModel(this.rerankModelUri);
466
+ const model = await llama.loadModel({ modelPath });
467
+ this.rerankModel = model;
468
+ // Model loading counts as activity - ping to keep alive
469
+ this.touchActivity();
470
+ return model;
471
+ })();
472
+ try {
473
+ return await this.rerankModelLoadPromise;
474
+ }
475
+ finally {
476
+ this.rerankModelLoadPromise = null;
477
+ }
478
+ }
479
+ /**
480
+ * Load rerank contexts (lazy). Creates multiple contexts for parallel ranking.
481
+ * Each context has its own sequence, so they can evaluate independently.
482
+ *
483
+ * Tuning choices:
484
+ * - contextSize 1024: reranking chunks are ~800 tokens max, 1024 is plenty
485
+ * - flashAttention: ~20% less VRAM per context (568 vs 711 MB)
486
+ * - Combined: drops from 11.6 GB (auto, no flash) to 568 MB per context (20×)
487
+ */
488
+ // Qwen3 reranker template adds ~200 tokens overhead (system prompt, tags, etc.)
489
+ // Default 2048 was too small for longer documents (e.g. session transcripts,
490
+ // CJK text, or large markdown files) — callers hit "input lengths exceed
491
+ // context size" errors even after truncation because the overhead estimate
492
+ // was insufficient. 4096 comfortably fits the largest real-world chunks
493
+ // while staying well below the 40 960-token auto size.
494
+ // Override with QMD_RERANK_CONTEXT_SIZE env var if you need more headroom.
495
+ static RERANK_CONTEXT_SIZE = (() => {
496
+ const v = parseInt(process.env.QMD_RERANK_CONTEXT_SIZE ?? "", 10);
497
+ return Number.isFinite(v) && v > 0 ? v : 4096;
498
+ })();
499
+ async ensureRerankContexts() {
500
+ if (this.rerankContexts.length === 0) {
501
+ const model = await this.ensureRerankModel();
502
+ // ~960 MB per context with flash attention at contextSize 2048
503
+ const n = Math.min(await this.computeParallelism(1000), 4);
504
+ const threads = await this.threadsPerContext(n);
505
+ for (let i = 0; i < n; i++) {
506
+ try {
507
+ this.rerankContexts.push(await model.createRankingContext({
508
+ contextSize: LlamaCpp.RERANK_CONTEXT_SIZE,
509
+ flashAttention: true,
510
+ ...(threads > 0 ? { threads } : {}),
511
+ }));
512
+ }
513
+ catch {
514
+ if (this.rerankContexts.length === 0) {
515
+ // Flash attention might not be supported — retry without it
516
+ try {
517
+ this.rerankContexts.push(await model.createRankingContext({
518
+ contextSize: LlamaCpp.RERANK_CONTEXT_SIZE,
519
+ ...(threads > 0 ? { threads } : {}),
520
+ }));
521
+ }
522
+ catch {
523
+ throw new Error("Failed to create any rerank context");
524
+ }
525
+ }
526
+ break;
527
+ }
528
+ }
529
+ }
530
+ this.touchActivity();
531
+ return this.rerankContexts;
532
+ }
533
+ // ==========================================================================
534
+ // Tokenization
535
+ // ==========================================================================
536
+ /**
537
+ * Tokenize text using the embedding model's tokenizer
538
+ * Returns tokenizer tokens (opaque type from node-llama-cpp)
539
+ */
540
+ async tokenize(text) {
541
+ await this.ensureEmbedContext(); // Ensure model is loaded
542
+ if (!this.embedModel) {
543
+ throw new Error("Embed model not loaded");
544
+ }
545
+ return this.embedModel.tokenize(text);
546
+ }
547
+ /**
548
+ * Count tokens in text using the embedding model's tokenizer
549
+ */
550
+ async countTokens(text) {
551
+ const tokens = await this.tokenize(text);
552
+ return tokens.length;
553
+ }
554
+ /**
555
+ * Detokenize token IDs back to text
556
+ */
557
+ async detokenize(tokens) {
558
+ await this.ensureEmbedContext();
559
+ if (!this.embedModel) {
560
+ throw new Error("Embed model not loaded");
561
+ }
562
+ return this.embedModel.detokenize(tokens);
563
+ }
564
+ // ==========================================================================
565
+ // Core API methods
566
+ // ==========================================================================
567
+ /**
568
+ * Truncate text to fit within the embedding model's context window.
569
+ * Uses the model's own tokenizer for accurate token counting, then
570
+ * detokenizes back to text if truncation is needed.
571
+ * Returns the (possibly truncated) text and whether truncation occurred.
572
+ */
573
+ async truncateToContextSize(text) {
574
+ if (!this.embedModel)
575
+ return { text, truncated: false };
576
+ const maxTokens = this.embedModel.trainContextSize;
577
+ if (maxTokens <= 0)
578
+ return { text, truncated: false };
579
+ const tokens = this.embedModel.tokenize(text);
580
+ if (tokens.length <= maxTokens)
581
+ return { text, truncated: false };
582
+ // Leave a small margin (4 tokens) for BOS/EOS overhead
583
+ const safeLimit = Math.max(1, maxTokens - 4);
584
+ const truncatedTokens = tokens.slice(0, safeLimit);
585
+ const truncatedText = this.embedModel.detokenize(truncatedTokens);
586
+ return { text: truncatedText, truncated: true };
587
+ }
588
+ async embed(text, options = {}) {
589
+ // Ping activity at start to keep models alive during this operation
590
+ this.touchActivity();
591
+ try {
592
+ const context = await this.ensureEmbedContext();
593
+ // Guard: truncate text that exceeds model context window to prevent GGML crash
594
+ const { text: safeText, truncated } = await this.truncateToContextSize(text);
595
+ if (truncated) {
596
+ console.warn(`⚠ Text truncated to fit embedding context (${this.embedModel?.trainContextSize} tokens)`);
597
+ }
598
+ const embedding = await context.getEmbeddingFor(safeText);
599
+ return {
600
+ embedding: Array.from(embedding.vector),
601
+ model: this.embedModelUri,
602
+ };
603
+ }
604
+ catch (error) {
605
+ console.error("Embedding error:", error);
606
+ return null;
607
+ }
608
+ }
609
+ /**
610
+ * Batch embed multiple texts efficiently
611
+ * Uses Promise.all for parallel embedding - node-llama-cpp handles batching internally
612
+ */
613
+ async embedBatch(texts) {
614
+ if (this._ciMode)
615
+ throw new Error("LLM operations are disabled in CI (set CI=true)");
616
+ // Ping activity at start to keep models alive during this operation
617
+ this.touchActivity();
618
+ if (texts.length === 0)
619
+ return [];
620
+ try {
621
+ const contexts = await this.ensureEmbedContexts();
622
+ const n = contexts.length;
623
+ if (n === 1) {
624
+ // Single context: sequential (no point splitting)
625
+ const context = contexts[0];
626
+ const embeddings = [];
627
+ for (const text of texts) {
628
+ try {
629
+ const { text: safeText, truncated } = await this.truncateToContextSize(text);
630
+ if (truncated) {
631
+ console.warn(`⚠ Batch text truncated to fit embedding context (${this.embedModel?.trainContextSize} tokens)`);
632
+ }
633
+ const embedding = await context.getEmbeddingFor(safeText);
634
+ this.touchActivity();
635
+ embeddings.push({ embedding: Array.from(embedding.vector), model: this.embedModelUri });
636
+ }
637
+ catch (err) {
638
+ console.error("Embedding error for text:", err);
639
+ embeddings.push(null);
640
+ }
641
+ }
642
+ return embeddings;
643
+ }
644
+ // Multiple contexts: split texts across contexts for parallel evaluation
645
+ const chunkSize = Math.ceil(texts.length / n);
646
+ const chunks = Array.from({ length: n }, (_, i) => texts.slice(i * chunkSize, (i + 1) * chunkSize));
647
+ const chunkResults = await Promise.all(chunks.map(async (chunk, i) => {
648
+ const ctx = contexts[i];
649
+ const results = [];
650
+ for (const text of chunk) {
651
+ try {
652
+ const { text: safeText, truncated } = await this.truncateToContextSize(text);
653
+ if (truncated) {
654
+ console.warn(`⚠ Batch text truncated to fit embedding context (${this.embedModel?.trainContextSize} tokens)`);
655
+ }
656
+ const embedding = await ctx.getEmbeddingFor(safeText);
657
+ this.touchActivity();
658
+ results.push({ embedding: Array.from(embedding.vector), model: this.embedModelUri });
659
+ }
660
+ catch (err) {
661
+ console.error("Embedding error for text:", err);
662
+ results.push(null);
663
+ }
664
+ }
665
+ return results;
666
+ }));
667
+ return chunkResults.flat();
668
+ }
669
+ catch (error) {
670
+ console.error("Batch embedding error:", error);
671
+ return texts.map(() => null);
672
+ }
673
+ }
674
+ async generate(prompt, options = {}) {
675
+ if (this._ciMode)
676
+ throw new Error("LLM operations are disabled in CI (set CI=true)");
677
+ // Ping activity at start to keep models alive during this operation
678
+ this.touchActivity();
679
+ // Ensure model is loaded
680
+ await this.ensureGenerateModel();
681
+ // Create fresh context -> sequence -> session for each call
682
+ const context = await this.generateModel.createContext();
683
+ const sequence = context.getSequence();
684
+ const session = new LlamaChatSession({ contextSequence: sequence });
685
+ const maxTokens = options.maxTokens ?? 150;
686
+ // Qwen3 recommends temp=0.7, topP=0.8, topK=20 for non-thinking mode
687
+ // DO NOT use greedy decoding (temp=0) - causes repetition loops
688
+ const temperature = options.temperature ?? 0.7;
689
+ let result = "";
690
+ try {
691
+ await session.prompt(prompt, {
692
+ maxTokens,
693
+ temperature,
694
+ topK: 20,
695
+ topP: 0.8,
696
+ onTextChunk: (text) => {
697
+ result += text;
698
+ },
699
+ });
700
+ return {
701
+ text: result,
702
+ model: this.generateModelUri,
703
+ done: true,
704
+ };
705
+ }
706
+ finally {
707
+ // Dispose context (which disposes dependent sequences/sessions per lifecycle rules)
708
+ await context.dispose();
709
+ }
710
+ }
711
+ async modelExists(modelUri) {
712
+ // For HuggingFace URIs, we assume they exist
713
+ // For local paths, check if file exists
714
+ if (modelUri.startsWith("hf:")) {
715
+ return { name: modelUri, exists: true };
716
+ }
717
+ const exists = existsSync(modelUri);
718
+ return {
719
+ name: modelUri,
720
+ exists,
721
+ path: exists ? modelUri : undefined,
722
+ };
723
+ }
724
+ // ==========================================================================
725
+ // High-level abstractions
726
+ // ==========================================================================
727
+ async expandQuery(query, options = {}) {
728
+ if (this._ciMode)
729
+ throw new Error("LLM operations are disabled in CI (set CI=true)");
730
+ // Ping activity at start to keep models alive during this operation
731
+ this.touchActivity();
732
+ const llama = await this.ensureLlama();
733
+ await this.ensureGenerateModel();
734
+ const includeLexical = options.includeLexical ?? true;
735
+ const context = options.context;
736
+ const grammar = await llama.createGrammar({
737
+ grammar: `
738
+ root ::= line+
739
+ line ::= type ": " content "\\n"
740
+ type ::= "lex" | "vec" | "hyde"
741
+ content ::= [^\\n]+
742
+ `
743
+ });
744
+ const intent = options.intent;
745
+ const prompt = intent
746
+ ? `/no_think Expand this search query: ${query}\nQuery intent: ${intent}`
747
+ : `/no_think Expand this search query: ${query}`;
748
+ // Create a bounded context for expansion to prevent large default VRAM allocations.
749
+ const genContext = await this.generateModel.createContext({
750
+ contextSize: this.expandContextSize,
751
+ });
752
+ const sequence = genContext.getSequence();
753
+ const session = new LlamaChatSession({ contextSequence: sequence });
754
+ try {
755
+ // Qwen3 recommended settings for non-thinking mode:
756
+ // temp=0.7, topP=0.8, topK=20, presence_penalty for repetition
757
+ // DO NOT use greedy decoding (temp=0) - causes infinite loops
758
+ const result = await session.prompt(prompt, {
759
+ grammar,
760
+ maxTokens: 600,
761
+ temperature: 0.7,
762
+ topK: 20,
763
+ topP: 0.8,
764
+ repeatPenalty: {
765
+ lastTokens: 64,
766
+ presencePenalty: 0.5,
767
+ },
768
+ });
769
+ const lines = result.trim().split("\n");
770
+ const queryLower = query.toLowerCase();
771
+ const queryTerms = queryLower.replace(/[^a-z0-9\s]/g, " ").split(/\s+/).filter(Boolean);
772
+ const hasQueryTerm = (text) => {
773
+ const lower = text.toLowerCase();
774
+ if (queryTerms.length === 0)
775
+ return true;
776
+ return queryTerms.some(term => lower.includes(term));
777
+ };
778
+ const queryables = lines.map(line => {
779
+ const colonIdx = line.indexOf(":");
780
+ if (colonIdx === -1)
781
+ return null;
782
+ const type = line.slice(0, colonIdx).trim();
783
+ if (type !== 'lex' && type !== 'vec' && type !== 'hyde')
784
+ return null;
785
+ const text = line.slice(colonIdx + 1).trim();
786
+ if (!hasQueryTerm(text))
787
+ return null;
788
+ return { type: type, text };
789
+ }).filter((q) => q !== null);
790
+ // Filter out lex entries if not requested
791
+ const filtered = includeLexical ? queryables : queryables.filter(q => q.type !== 'lex');
792
+ if (filtered.length > 0)
793
+ return filtered;
794
+ const fallback = [
795
+ { type: 'hyde', text: `Information about ${query}` },
796
+ { type: 'lex', text: query },
797
+ { type: 'vec', text: query },
798
+ ];
799
+ return includeLexical ? fallback : fallback.filter(q => q.type !== 'lex');
800
+ }
801
+ catch (error) {
802
+ console.error("Structured query expansion failed:", error);
803
+ // Fallback to original query
804
+ const fallback = [{ type: 'vec', text: query }];
805
+ if (includeLexical)
806
+ fallback.unshift({ type: 'lex', text: query });
807
+ return fallback;
808
+ }
809
+ finally {
810
+ await genContext.dispose();
811
+ }
812
+ }
813
+ // Qwen3 reranker chat template overhead (system prompt, tags, separators).
814
+ // Measured at ~350 tokens on real queries; use 512 as a safe upper bound so
815
+ // the truncation budget never lets a document slip past the context limit.
816
+ static RERANK_TEMPLATE_OVERHEAD = 512;
817
+ static RERANK_TARGET_DOCS_PER_CONTEXT = 10;
818
+ async rerank(query, documents, options = {}) {
819
+ if (this._ciMode)
820
+ throw new Error("LLM operations are disabled in CI (set CI=true)");
821
+ // Ping activity at start to keep models alive during this operation
822
+ this.touchActivity();
823
+ const contexts = await this.ensureRerankContexts();
824
+ const model = await this.ensureRerankModel();
825
+ // Truncate documents that would exceed the rerank context size.
826
+ // Budget = contextSize - template overhead - query tokens
827
+ const queryTokens = model.tokenize(query).length;
828
+ const maxDocTokens = LlamaCpp.RERANK_CONTEXT_SIZE - LlamaCpp.RERANK_TEMPLATE_OVERHEAD - queryTokens;
829
+ const truncationCache = new Map();
830
+ const truncatedDocs = documents.map((doc) => {
831
+ const cached = truncationCache.get(doc.text);
832
+ if (cached !== undefined) {
833
+ return cached === doc.text ? doc : { ...doc, text: cached };
834
+ }
835
+ const tokens = model.tokenize(doc.text);
836
+ const truncatedText = tokens.length <= maxDocTokens
837
+ ? doc.text
838
+ : model.detokenize(tokens.slice(0, maxDocTokens));
839
+ truncationCache.set(doc.text, truncatedText);
840
+ if (truncatedText === doc.text)
841
+ return doc;
842
+ return { ...doc, text: truncatedText };
843
+ });
844
+ // Deduplicate identical effective texts before scoring.
845
+ // This avoids redundant work for repeated chunks and fixes collisions where
846
+ // multiple docs map to the same chunk text.
847
+ const textToDocs = new Map();
848
+ truncatedDocs.forEach((doc, index) => {
849
+ const existing = textToDocs.get(doc.text);
850
+ if (existing) {
851
+ existing.push({ file: doc.file, index });
852
+ }
853
+ else {
854
+ textToDocs.set(doc.text, [{ file: doc.file, index }]);
855
+ }
856
+ });
857
+ // Extract just the text for ranking
858
+ const texts = Array.from(textToDocs.keys());
859
+ // Split documents across contexts for parallel evaluation.
860
+ // Each context has its own sequence with a lock, so parallelism comes
861
+ // from multiple contexts evaluating different chunks simultaneously.
862
+ const activeContextCount = Math.max(1, Math.min(contexts.length, Math.ceil(texts.length / LlamaCpp.RERANK_TARGET_DOCS_PER_CONTEXT)));
863
+ const activeContexts = contexts.slice(0, activeContextCount);
864
+ const chunkSize = Math.ceil(texts.length / activeContexts.length);
865
+ const chunks = Array.from({ length: activeContexts.length }, (_, i) => texts.slice(i * chunkSize, (i + 1) * chunkSize)).filter(chunk => chunk.length > 0);
866
+ const allScores = await Promise.all(chunks.map((chunk, i) => activeContexts[i].rankAll(query, chunk)));
867
+ // Reassemble scores in original order and sort
868
+ const flatScores = allScores.flat();
869
+ const ranked = texts
870
+ .map((text, i) => ({ document: text, score: flatScores[i] }))
871
+ .sort((a, b) => b.score - a.score);
872
+ // Map back to our result format.
873
+ const results = [];
874
+ for (const item of ranked) {
875
+ const docInfos = textToDocs.get(item.document) ?? [];
876
+ for (const docInfo of docInfos) {
877
+ results.push({
878
+ file: docInfo.file,
879
+ score: item.score,
880
+ index: docInfo.index,
881
+ });
882
+ }
883
+ }
884
+ return {
885
+ results,
886
+ model: this.rerankModelUri,
887
+ };
888
+ }
889
+ /**
890
+ * Get device/GPU info for status display.
891
+ * Initializes llama if not already done.
892
+ */
893
+ async getDeviceInfo() {
894
+ const llama = await this.ensureLlama();
895
+ const gpuDevices = await llama.getGpuDeviceNames();
896
+ let vram;
897
+ if (llama.gpu) {
898
+ try {
899
+ const state = await llama.getVramState();
900
+ vram = { total: state.total, used: state.used, free: state.free };
901
+ }
902
+ catch { /* no vram info */ }
903
+ }
904
+ return {
905
+ gpu: llama.gpu,
906
+ gpuOffloading: llama.supportsGpuOffloading,
907
+ gpuDevices,
908
+ vram,
909
+ cpuCores: llama.cpuMathCores,
910
+ };
911
+ }
912
+ async dispose() {
913
+ // Prevent double-dispose
914
+ if (this.disposed) {
915
+ return;
916
+ }
917
+ this.disposed = true;
918
+ // Clear inactivity timer
919
+ if (this.inactivityTimer) {
920
+ clearTimeout(this.inactivityTimer);
921
+ this.inactivityTimer = null;
922
+ }
923
+ // Disposing llama cascades to models and contexts automatically
924
+ // See: https://node-llama-cpp.withcat.ai/guide/objects-lifecycle
925
+ // Note: llama.dispose() can hang indefinitely, so we use a timeout
926
+ if (this.llama) {
927
+ const disposePromise = this.llama.dispose();
928
+ const timeoutPromise = new Promise((resolve) => setTimeout(resolve, 1000));
929
+ await Promise.race([disposePromise, timeoutPromise]);
930
+ }
931
+ // Clear references
932
+ this.embedContexts = [];
933
+ this.rerankContexts = [];
934
+ this.embedModel = null;
935
+ this.generateModel = null;
936
+ this.rerankModel = null;
937
+ this.llama = null;
938
+ // Clear any in-flight load/create promises
939
+ this.embedModelLoadPromise = null;
940
+ this.embedContextsCreatePromise = null;
941
+ this.generateModelLoadPromise = null;
942
+ this.rerankModelLoadPromise = null;
943
+ }
944
+ }
945
+ // =============================================================================
946
+ // Session Management Layer
947
+ // =============================================================================
948
+ /**
949
+ * Manages LLM session lifecycle with reference counting.
950
+ * Coordinates with LlamaCpp idle timeout to prevent disposal during active sessions.
951
+ */
952
+ class LLMSessionManager {
953
+ llm;
954
+ _activeSessionCount = 0;
955
+ _inFlightOperations = 0;
956
+ constructor(llm) {
957
+ this.llm = llm;
958
+ }
959
+ get activeSessionCount() {
960
+ return this._activeSessionCount;
961
+ }
962
+ get inFlightOperations() {
963
+ return this._inFlightOperations;
964
+ }
965
+ /**
966
+ * Returns true only when both session count and in-flight operations are 0.
967
+ * Used by LlamaCpp to determine if idle unload is safe.
968
+ */
969
+ canUnload() {
970
+ return this._activeSessionCount === 0 && this._inFlightOperations === 0;
971
+ }
972
+ acquire() {
973
+ this._activeSessionCount++;
974
+ }
975
+ release() {
976
+ this._activeSessionCount = Math.max(0, this._activeSessionCount - 1);
977
+ }
978
+ operationStart() {
979
+ this._inFlightOperations++;
980
+ }
981
+ operationEnd() {
982
+ this._inFlightOperations = Math.max(0, this._inFlightOperations - 1);
983
+ }
984
+ getLlamaCpp() {
985
+ return this.llm;
986
+ }
987
+ }
988
+ /**
989
+ * Error thrown when an operation is attempted on a released or aborted session.
990
+ */
991
+ export class SessionReleasedError extends Error {
992
+ constructor(message = "LLM session has been released or aborted") {
993
+ super(message);
994
+ this.name = "SessionReleasedError";
995
+ }
996
+ }
997
+ /**
998
+ * Scoped LLM session with automatic lifecycle management.
999
+ * Wraps LlamaCpp methods with operation tracking and abort handling.
1000
+ */
1001
+ class LLMSession {
1002
+ manager;
1003
+ released = false;
1004
+ abortController;
1005
+ maxDurationTimer = null;
1006
+ name;
1007
+ constructor(manager, options = {}) {
1008
+ this.manager = manager;
1009
+ this.name = options.name || "unnamed";
1010
+ this.abortController = new AbortController();
1011
+ // Link external abort signal if provided
1012
+ if (options.signal) {
1013
+ if (options.signal.aborted) {
1014
+ this.abortController.abort(options.signal.reason);
1015
+ }
1016
+ else {
1017
+ options.signal.addEventListener("abort", () => {
1018
+ this.abortController.abort(options.signal.reason);
1019
+ }, { once: true });
1020
+ }
1021
+ }
1022
+ // Set up max duration timer
1023
+ const maxDuration = options.maxDuration ?? 10 * 60 * 1000; // Default 10 minutes
1024
+ if (maxDuration > 0) {
1025
+ this.maxDurationTimer = setTimeout(() => {
1026
+ this.abortController.abort(new Error(`Session "${this.name}" exceeded max duration of ${maxDuration}ms`));
1027
+ }, maxDuration);
1028
+ this.maxDurationTimer.unref(); // Don't keep process alive
1029
+ }
1030
+ // Acquire session lease
1031
+ this.manager.acquire();
1032
+ }
1033
+ get isValid() {
1034
+ return !this.released && !this.abortController.signal.aborted;
1035
+ }
1036
+ get signal() {
1037
+ return this.abortController.signal;
1038
+ }
1039
+ /**
1040
+ * Release the session and decrement ref count.
1041
+ * Called automatically by withLLMSession when the callback completes.
1042
+ */
1043
+ release() {
1044
+ if (this.released)
1045
+ return;
1046
+ this.released = true;
1047
+ if (this.maxDurationTimer) {
1048
+ clearTimeout(this.maxDurationTimer);
1049
+ this.maxDurationTimer = null;
1050
+ }
1051
+ this.abortController.abort(new Error("Session released"));
1052
+ this.manager.release();
1053
+ }
1054
+ /**
1055
+ * Wrap an operation with tracking and abort checking.
1056
+ */
1057
+ async withOperation(fn) {
1058
+ if (!this.isValid) {
1059
+ throw new SessionReleasedError();
1060
+ }
1061
+ this.manager.operationStart();
1062
+ try {
1063
+ // Check abort before starting
1064
+ if (this.abortController.signal.aborted) {
1065
+ throw new SessionReleasedError(this.abortController.signal.reason?.message || "Session aborted");
1066
+ }
1067
+ return await fn();
1068
+ }
1069
+ finally {
1070
+ this.manager.operationEnd();
1071
+ }
1072
+ }
1073
+ async embed(text, options) {
1074
+ return this.withOperation(() => this.manager.getLlamaCpp().embed(text, options));
1075
+ }
1076
+ async embedBatch(texts) {
1077
+ return this.withOperation(() => this.manager.getLlamaCpp().embedBatch(texts));
1078
+ }
1079
+ async expandQuery(query, options) {
1080
+ return this.withOperation(() => this.manager.getLlamaCpp().expandQuery(query, options));
1081
+ }
1082
+ async rerank(query, documents, options) {
1083
+ return this.withOperation(() => this.manager.getLlamaCpp().rerank(query, documents, options));
1084
+ }
1085
+ }
1086
+ // Session manager for the default LlamaCpp instance
1087
+ let defaultSessionManager = null;
1088
+ /**
1089
+ * Get the session manager for the default LlamaCpp instance.
1090
+ */
1091
+ function getSessionManager() {
1092
+ const llm = getDefaultLlamaCpp();
1093
+ if (!defaultSessionManager || defaultSessionManager.getLlamaCpp() !== llm) {
1094
+ defaultSessionManager = new LLMSessionManager(llm);
1095
+ }
1096
+ return defaultSessionManager;
1097
+ }
1098
+ /**
1099
+ * Execute a function with a scoped LLM session.
1100
+ * The session provides lifecycle guarantees - resources won't be disposed mid-operation.
1101
+ *
1102
+ * @example
1103
+ * ```typescript
1104
+ * await withLLMSession(async (session) => {
1105
+ * const expanded = await session.expandQuery(query);
1106
+ * const embeddings = await session.embedBatch(texts);
1107
+ * const reranked = await session.rerank(query, docs);
1108
+ * return reranked;
1109
+ * }, { maxDuration: 10 * 60 * 1000, name: 'querySearch' });
1110
+ * ```
1111
+ */
1112
+ export async function withLLMSession(fn, options) {
1113
+ const manager = getSessionManager();
1114
+ const session = new LLMSession(manager, options);
1115
+ try {
1116
+ return await fn(session);
1117
+ }
1118
+ finally {
1119
+ session.release();
1120
+ }
1121
+ }
1122
+ /**
1123
+ * Execute a function with a scoped LLM session using a specific LlamaCpp instance.
1124
+ * Unlike withLLMSession, this does not use the global singleton.
1125
+ */
1126
+ export async function withLLMSessionForLlm(llm, fn, options) {
1127
+ const manager = new LLMSessionManager(llm);
1128
+ const session = new LLMSession(manager, options);
1129
+ try {
1130
+ return await fn(session);
1131
+ }
1132
+ finally {
1133
+ session.release();
1134
+ }
1135
+ }
1136
+ /**
1137
+ * Check if idle unload is safe (no active sessions or operations).
1138
+ * Used internally by LlamaCpp idle timer.
1139
+ */
1140
+ export function canUnloadLLM() {
1141
+ if (!defaultSessionManager)
1142
+ return true;
1143
+ return defaultSessionManager.canUnload();
1144
+ }
1145
+ // =============================================================================
1146
+ // Singleton for default LlamaCpp instance
1147
+ // =============================================================================
1148
+ let defaultLlamaCpp = null;
1149
+ /**
1150
+ * Get the default LlamaCpp instance (creates one if needed)
1151
+ */
1152
+ export function getDefaultLlamaCpp() {
1153
+ if (!defaultLlamaCpp) {
1154
+ const embedModel = process.env.QMD_EMBED_MODEL;
1155
+ defaultLlamaCpp = new LlamaCpp(embedModel ? { embedModel } : {});
1156
+ }
1157
+ return defaultLlamaCpp;
1158
+ }
1159
+ /**
1160
+ * Set a custom default LlamaCpp instance (useful for testing)
1161
+ */
1162
+ export function setDefaultLlamaCpp(llm) {
1163
+ defaultLlamaCpp = llm;
1164
+ }
1165
+ /**
1166
+ * Dispose the default LlamaCpp instance if it exists.
1167
+ * Call this before process exit to prevent NAPI crashes.
1168
+ */
1169
+ export async function disposeDefaultLlamaCpp() {
1170
+ if (defaultLlamaCpp) {
1171
+ await defaultLlamaCpp.dispose();
1172
+ defaultLlamaCpp = null;
1173
+ }
1174
+ }