@tobilu/qmd 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/llm.ts ADDED
@@ -0,0 +1,1208 @@
1
+ /**
2
+ * llm.ts - LLM abstraction layer for QMD using node-llama-cpp
3
+ *
4
+ * Provides embeddings, text generation, and reranking using local GGUF models.
5
+ */
6
+
7
+ import {
8
+ getLlama,
9
+ resolveModelFile,
10
+ LlamaChatSession,
11
+ LlamaLogLevel,
12
+ type Llama,
13
+ type LlamaModel,
14
+ type LlamaEmbeddingContext,
15
+ type Token as LlamaToken,
16
+ } from "node-llama-cpp";
17
+ import { homedir } from "os";
18
+ import { join } from "path";
19
+ import { existsSync, mkdirSync, statSync, unlinkSync, readdirSync, readFileSync, writeFileSync } from "fs";
20
+
21
+ // =============================================================================
22
+ // Embedding Formatting Functions
23
+ // =============================================================================
24
+
25
+ /**
26
+ * Format a query for embedding.
27
+ * Uses nomic-style task prefix format for embeddinggemma.
28
+ */
29
+ export function formatQueryForEmbedding(query: string): string {
30
+ return `task: search result | query: ${query}`;
31
+ }
32
+
33
+ /**
34
+ * Format a document for embedding.
35
+ * Uses nomic-style format with title and text fields.
36
+ */
37
+ export function formatDocForEmbedding(text: string, title?: string): string {
38
+ return `title: ${title || "none"} | text: ${text}`;
39
+ }
40
+
41
+ // =============================================================================
42
+ // Types
43
+ // =============================================================================
44
+
45
+ /**
46
+ * Token with log probability
47
+ */
48
+ export type TokenLogProb = {
49
+ token: string;
50
+ logprob: number;
51
+ };
52
+
53
+ /**
54
+ * Embedding result
55
+ */
56
+ export type EmbeddingResult = {
57
+ embedding: number[];
58
+ model: string;
59
+ };
60
+
61
+ /**
62
+ * Generation result with optional logprobs
63
+ */
64
+ export type GenerateResult = {
65
+ text: string;
66
+ model: string;
67
+ logprobs?: TokenLogProb[];
68
+ done: boolean;
69
+ };
70
+
71
+ /**
72
+ * Rerank result for a single document
73
+ */
74
+ export type RerankDocumentResult = {
75
+ file: string;
76
+ score: number;
77
+ index: number;
78
+ };
79
+
80
+ /**
81
+ * Batch rerank result
82
+ */
83
+ export type RerankResult = {
84
+ results: RerankDocumentResult[];
85
+ model: string;
86
+ };
87
+
88
+ /**
89
+ * Model info
90
+ */
91
+ export type ModelInfo = {
92
+ name: string;
93
+ exists: boolean;
94
+ path?: string;
95
+ };
96
+
97
+ /**
98
+ * Options for embedding
99
+ */
100
+ export type EmbedOptions = {
101
+ model?: string;
102
+ isQuery?: boolean;
103
+ title?: string;
104
+ };
105
+
106
+ /**
107
+ * Options for text generation
108
+ */
109
+ export type GenerateOptions = {
110
+ model?: string;
111
+ maxTokens?: number;
112
+ temperature?: number;
113
+ };
114
+
115
+ /**
116
+ * Options for reranking
117
+ */
118
+ export type RerankOptions = {
119
+ model?: string;
120
+ };
121
+
122
+ /**
123
+ * Options for LLM sessions
124
+ */
125
+ export type LLMSessionOptions = {
126
+ /** Max session duration in ms (default: 10 minutes) */
127
+ maxDuration?: number;
128
+ /** External abort signal */
129
+ signal?: AbortSignal;
130
+ /** Debug name for logging */
131
+ name?: string;
132
+ };
133
+
134
+ /**
135
+ * Session interface for scoped LLM access with lifecycle guarantees
136
+ */
137
+ export interface ILLMSession {
138
+ embed(text: string, options?: EmbedOptions): Promise<EmbeddingResult | null>;
139
+ embedBatch(texts: string[]): Promise<(EmbeddingResult | null)[]>;
140
+ expandQuery(query: string, options?: { context?: string; includeLexical?: boolean }): Promise<Queryable[]>;
141
+ rerank(query: string, documents: RerankDocument[], options?: RerankOptions): Promise<RerankResult>;
142
+ /** Whether this session is still valid (not released or aborted) */
143
+ readonly isValid: boolean;
144
+ /** Abort signal for this session (aborts on release or maxDuration) */
145
+ readonly signal: AbortSignal;
146
+ }
147
+
148
+ /**
149
+ * Supported query types for different search backends
150
+ */
151
+ export type QueryType = 'lex' | 'vec' | 'hyde';
152
+
153
+ /**
154
+ * A single query and its target backend type
155
+ */
156
+ export type Queryable = {
157
+ type: QueryType;
158
+ text: string;
159
+ };
160
+
161
+ /**
162
+ * Document to rerank
163
+ */
164
+ export type RerankDocument = {
165
+ file: string;
166
+ text: string;
167
+ title?: string;
168
+ };
169
+
170
+ // =============================================================================
171
+ // Model Configuration
172
+ // =============================================================================
173
+
174
+ // HuggingFace model URIs for node-llama-cpp
175
+ // Format: hf:<user>/<repo>/<file>
176
+ const DEFAULT_EMBED_MODEL = "hf:ggml-org/embeddinggemma-300M-GGUF/embeddinggemma-300M-Q8_0.gguf";
177
+ const DEFAULT_RERANK_MODEL = "hf:ggml-org/Qwen3-Reranker-0.6B-Q8_0-GGUF/qwen3-reranker-0.6b-q8_0.gguf";
178
+ // const DEFAULT_GENERATE_MODEL = "hf:ggml-org/Qwen3-0.6B-GGUF/Qwen3-0.6B-Q8_0.gguf";
179
+ const DEFAULT_GENERATE_MODEL = "hf:tobil/qmd-query-expansion-1.7B-gguf/qmd-query-expansion-1.7B-q4_k_m.gguf";
180
+
181
+ export const DEFAULT_EMBED_MODEL_URI = DEFAULT_EMBED_MODEL;
182
+ export const DEFAULT_RERANK_MODEL_URI = DEFAULT_RERANK_MODEL;
183
+ export const DEFAULT_GENERATE_MODEL_URI = DEFAULT_GENERATE_MODEL;
184
+
185
+ // Local model cache directory
186
+ const MODEL_CACHE_DIR = join(homedir(), ".cache", "qmd", "models");
187
+ export const DEFAULT_MODEL_CACHE_DIR = MODEL_CACHE_DIR;
188
+
189
+ export type PullResult = {
190
+ model: string;
191
+ path: string;
192
+ sizeBytes: number;
193
+ refreshed: boolean;
194
+ };
195
+
196
+ type HfRef = {
197
+ repo: string;
198
+ file: string;
199
+ };
200
+
201
+ function parseHfUri(model: string): HfRef | null {
202
+ if (!model.startsWith("hf:")) return null;
203
+ const without = model.slice(3);
204
+ const parts = without.split("/");
205
+ if (parts.length < 3) return null;
206
+ const repo = parts.slice(0, 2).join("/");
207
+ const file = parts.slice(2).join("/");
208
+ return { repo, file };
209
+ }
210
+
211
+ async function getRemoteEtag(ref: HfRef): Promise<string | null> {
212
+ const url = `https://huggingface.co/${ref.repo}/resolve/main/${ref.file}`;
213
+ try {
214
+ const resp = await fetch(url, { method: "HEAD" });
215
+ if (!resp.ok) return null;
216
+ const etag = resp.headers.get("etag");
217
+ return etag || null;
218
+ } catch {
219
+ return null;
220
+ }
221
+ }
222
+
223
+ export async function pullModels(
224
+ models: string[],
225
+ options: { refresh?: boolean; cacheDir?: string } = {}
226
+ ): Promise<PullResult[]> {
227
+ const cacheDir = options.cacheDir || MODEL_CACHE_DIR;
228
+ if (!existsSync(cacheDir)) {
229
+ mkdirSync(cacheDir, { recursive: true });
230
+ }
231
+
232
+ const results: PullResult[] = [];
233
+ for (const model of models) {
234
+ let refreshed = false;
235
+ const hfRef = parseHfUri(model);
236
+ const filename = model.split("/").pop();
237
+ const entries = readdirSync(cacheDir, { withFileTypes: true });
238
+ const cached = filename
239
+ ? entries
240
+ .filter((entry) => entry.isFile() && entry.name.includes(filename))
241
+ .map((entry) => join(cacheDir, entry.name))
242
+ : [];
243
+
244
+ if (hfRef && filename) {
245
+ const etagPath = join(cacheDir, `${filename}.etag`);
246
+ const remoteEtag = await getRemoteEtag(hfRef);
247
+ const localEtag = existsSync(etagPath)
248
+ ? readFileSync(etagPath, "utf-8").trim()
249
+ : null;
250
+ const shouldRefresh =
251
+ options.refresh || !remoteEtag || remoteEtag !== localEtag || cached.length === 0;
252
+
253
+ if (shouldRefresh) {
254
+ for (const candidate of cached) {
255
+ if (existsSync(candidate)) unlinkSync(candidate);
256
+ }
257
+ if (existsSync(etagPath)) unlinkSync(etagPath);
258
+ refreshed = cached.length > 0;
259
+ }
260
+ } else if (options.refresh && filename) {
261
+ for (const candidate of cached) {
262
+ if (existsSync(candidate)) unlinkSync(candidate);
263
+ refreshed = true;
264
+ }
265
+ }
266
+
267
+ const path = await resolveModelFile(model, cacheDir);
268
+ const sizeBytes = existsSync(path) ? statSync(path).size : 0;
269
+ if (hfRef && filename) {
270
+ const remoteEtag = await getRemoteEtag(hfRef);
271
+ if (remoteEtag) {
272
+ const etagPath = join(cacheDir, `${filename}.etag`);
273
+ writeFileSync(etagPath, remoteEtag + "\n", "utf-8");
274
+ }
275
+ }
276
+ results.push({ model, path, sizeBytes, refreshed });
277
+ }
278
+ return results;
279
+ }
280
+
281
+ // =============================================================================
282
+ // LLM Interface
283
+ // =============================================================================
284
+
285
+ /**
286
+ * Abstract LLM interface - implement this for different backends
287
+ */
288
+ export interface LLM {
289
+ /**
290
+ * Get embeddings for text
291
+ */
292
+ embed(text: string, options?: EmbedOptions): Promise<EmbeddingResult | null>;
293
+
294
+ /**
295
+ * Generate text completion
296
+ */
297
+ generate(prompt: string, options?: GenerateOptions): Promise<GenerateResult | null>;
298
+
299
+ /**
300
+ * Check if a model exists/is available
301
+ */
302
+ modelExists(model: string): Promise<ModelInfo>;
303
+
304
+ /**
305
+ * Expand a search query into multiple variations for different backends.
306
+ * Returns a list of Queryable objects.
307
+ */
308
+ expandQuery(query: string, options?: { context?: string, includeLexical?: boolean }): Promise<Queryable[]>;
309
+
310
+ /**
311
+ * Rerank documents by relevance to a query
312
+ * Returns list of documents with relevance scores (higher = more relevant)
313
+ */
314
+ rerank(query: string, documents: RerankDocument[], options?: RerankOptions): Promise<RerankResult>;
315
+
316
+ /**
317
+ * Dispose of resources
318
+ */
319
+ dispose(): Promise<void>;
320
+ }
321
+
322
+ // =============================================================================
323
+ // node-llama-cpp Implementation
324
+ // =============================================================================
325
+
326
+ export type LlamaCppConfig = {
327
+ embedModel?: string;
328
+ generateModel?: string;
329
+ rerankModel?: string;
330
+ modelCacheDir?: string;
331
+ /**
332
+ * Inactivity timeout in ms before unloading contexts (default: 2 minutes, 0 to disable).
333
+ *
334
+ * Per node-llama-cpp lifecycle guidance, we prefer keeping models loaded and only disposing
335
+ * contexts when idle, since contexts (and their sequences) are the heavy per-session objects.
336
+ * @see https://node-llama-cpp.withcat.ai/guide/objects-lifecycle
337
+ */
338
+ inactivityTimeoutMs?: number;
339
+ /**
340
+ * Whether to dispose models on inactivity (default: false).
341
+ *
342
+ * Keeping models loaded avoids repeated VRAM thrash; set to true only if you need aggressive
343
+ * memory reclaim.
344
+ */
345
+ disposeModelsOnInactivity?: boolean;
346
+ };
347
+
348
+ /**
349
+ * LLM implementation using node-llama-cpp
350
+ */
351
+ // Default inactivity timeout: 5 minutes (keep models warm during typical search sessions)
352
+ const DEFAULT_INACTIVITY_TIMEOUT_MS = 5 * 60 * 1000;
353
+
354
+ export class LlamaCpp implements LLM {
355
+ private llama: Llama | null = null;
356
+ private embedModel: LlamaModel | null = null;
357
+ private embedContext: LlamaEmbeddingContext | null = null;
358
+ private generateModel: LlamaModel | null = null;
359
+ private rerankModel: LlamaModel | null = null;
360
+ private rerankContext: Awaited<ReturnType<LlamaModel["createRankingContext"]>> | null = null;
361
+
362
+ private embedModelUri: string;
363
+ private generateModelUri: string;
364
+ private rerankModelUri: string;
365
+ private modelCacheDir: string;
366
+
367
+ // Ensure we don't load the same model/context concurrently (which can allocate duplicate VRAM).
368
+ private embedModelLoadPromise: Promise<LlamaModel> | null = null;
369
+ private embedContextCreatePromise: Promise<LlamaEmbeddingContext> | null = null;
370
+ private generateModelLoadPromise: Promise<LlamaModel> | null = null;
371
+ private rerankModelLoadPromise: Promise<LlamaModel> | null = null;
372
+
373
+ // Inactivity timer for auto-unloading models
374
+ private inactivityTimer: ReturnType<typeof setTimeout> | null = null;
375
+ private inactivityTimeoutMs: number;
376
+ private disposeModelsOnInactivity: boolean;
377
+
378
+ // Track disposal state to prevent double-dispose
379
+ private disposed = false;
380
+
381
+
382
+ constructor(config: LlamaCppConfig = {}) {
383
+ this.embedModelUri = config.embedModel || DEFAULT_EMBED_MODEL;
384
+ this.generateModelUri = config.generateModel || DEFAULT_GENERATE_MODEL;
385
+ this.rerankModelUri = config.rerankModel || DEFAULT_RERANK_MODEL;
386
+ this.modelCacheDir = config.modelCacheDir || MODEL_CACHE_DIR;
387
+ this.inactivityTimeoutMs = config.inactivityTimeoutMs ?? DEFAULT_INACTIVITY_TIMEOUT_MS;
388
+ this.disposeModelsOnInactivity = config.disposeModelsOnInactivity ?? false;
389
+ }
390
+
391
+ /**
392
+ * Reset the inactivity timer. Called after each model operation.
393
+ * When timer fires, models are unloaded to free memory (if no active sessions).
394
+ */
395
+ private touchActivity(): void {
396
+ // Clear existing timer
397
+ if (this.inactivityTimer) {
398
+ clearTimeout(this.inactivityTimer);
399
+ this.inactivityTimer = null;
400
+ }
401
+
402
+ // Only set timer if we have disposable contexts and timeout is enabled
403
+ if (this.inactivityTimeoutMs > 0 && this.hasLoadedContexts()) {
404
+ this.inactivityTimer = setTimeout(() => {
405
+ // Check if session manager allows unloading
406
+ // canUnloadLLM is defined later in this file - it checks the session manager
407
+ // We use dynamic import pattern to avoid circular dependency issues
408
+ if (typeof canUnloadLLM === 'function' && !canUnloadLLM()) {
409
+ // Active sessions/operations - reschedule timer
410
+ this.touchActivity();
411
+ return;
412
+ }
413
+ this.unloadIdleResources().catch(err => {
414
+ console.error("Error unloading idle resources:", err);
415
+ });
416
+ }, this.inactivityTimeoutMs);
417
+ // Don't keep process alive just for this timer
418
+ this.inactivityTimer.unref();
419
+ }
420
+ }
421
+
422
+ /**
423
+ * Check if any contexts are currently loaded (and therefore worth unloading on inactivity).
424
+ */
425
+ private hasLoadedContexts(): boolean {
426
+ return !!(this.embedContext || this.rerankContext);
427
+ }
428
+
429
+ /**
430
+ * Unload idle resources but keep the instance alive for future use.
431
+ *
432
+ * By default, this disposes contexts (and their dependent sequences), while keeping models loaded.
433
+ * This matches the intended lifecycle: model → context → sequence, where contexts are per-session.
434
+ */
435
+ async unloadIdleResources(): Promise<void> {
436
+ // Don't unload if already disposed
437
+ if (this.disposed) {
438
+ return;
439
+ }
440
+
441
+ // Clear timer
442
+ if (this.inactivityTimer) {
443
+ clearTimeout(this.inactivityTimer);
444
+ this.inactivityTimer = null;
445
+ }
446
+
447
+ // Dispose contexts first
448
+ if (this.embedContext) {
449
+ await this.embedContext.dispose();
450
+ this.embedContext = null;
451
+ }
452
+ if (this.rerankContext) {
453
+ await this.rerankContext.dispose();
454
+ this.rerankContext = null;
455
+ }
456
+
457
+ // Optionally dispose models too (opt-in)
458
+ if (this.disposeModelsOnInactivity) {
459
+ if (this.embedModel) {
460
+ await this.embedModel.dispose();
461
+ this.embedModel = null;
462
+ }
463
+ if (this.generateModel) {
464
+ await this.generateModel.dispose();
465
+ this.generateModel = null;
466
+ }
467
+ if (this.rerankModel) {
468
+ await this.rerankModel.dispose();
469
+ this.rerankModel = null;
470
+ }
471
+ // Reset load promises so models can be reloaded later
472
+ this.embedModelLoadPromise = null;
473
+ this.generateModelLoadPromise = null;
474
+ this.rerankModelLoadPromise = null;
475
+ }
476
+
477
+ // Note: We keep llama instance alive - it's lightweight
478
+ }
479
+
480
+ /**
481
+ * Ensure model cache directory exists
482
+ */
483
+ private ensureModelCacheDir(): void {
484
+ if (!existsSync(this.modelCacheDir)) {
485
+ mkdirSync(this.modelCacheDir, { recursive: true });
486
+ }
487
+ }
488
+
489
+ /**
490
+ * Initialize the llama instance (lazy)
491
+ */
492
+ private async ensureLlama(): Promise<Llama> {
493
+ if (!this.llama) {
494
+ this.llama = await getLlama({ logLevel: LlamaLogLevel.error });
495
+ }
496
+ return this.llama;
497
+ }
498
+
499
+ /**
500
+ * Resolve a model URI to a local path, downloading if needed
501
+ */
502
+ private async resolveModel(modelUri: string): Promise<string> {
503
+ this.ensureModelCacheDir();
504
+ // resolveModelFile handles HF URIs and downloads to the cache dir
505
+ return await resolveModelFile(modelUri, this.modelCacheDir);
506
+ }
507
+
508
+ /**
509
+ * Load embedding model (lazy)
510
+ */
511
+ private async ensureEmbedModel(): Promise<LlamaModel> {
512
+ if (this.embedModel) {
513
+ return this.embedModel;
514
+ }
515
+ if (this.embedModelLoadPromise) {
516
+ return await this.embedModelLoadPromise;
517
+ }
518
+
519
+ this.embedModelLoadPromise = (async () => {
520
+ const llama = await this.ensureLlama();
521
+ const modelPath = await this.resolveModel(this.embedModelUri);
522
+ const model = await llama.loadModel({ modelPath });
523
+ this.embedModel = model;
524
+ // Model loading counts as activity - ping to keep alive
525
+ this.touchActivity();
526
+ return model;
527
+ })();
528
+
529
+ try {
530
+ return await this.embedModelLoadPromise;
531
+ } finally {
532
+ // Keep the resolved model cached; clear only the in-flight promise.
533
+ this.embedModelLoadPromise = null;
534
+ }
535
+ }
536
+
537
+ /**
538
+ * Load embedding context (lazy). Context can be disposed and recreated without reloading the model.
539
+ * Uses promise guard to prevent concurrent context creation race condition.
540
+ */
541
+ private async ensureEmbedContext(): Promise<LlamaEmbeddingContext> {
542
+ if (!this.embedContext) {
543
+ // If context creation is already in progress, wait for it
544
+ if (this.embedContextCreatePromise) {
545
+ return await this.embedContextCreatePromise;
546
+ }
547
+
548
+ // Start context creation and store promise so concurrent calls wait
549
+ this.embedContextCreatePromise = (async () => {
550
+ const model = await this.ensureEmbedModel();
551
+ const context = await model.createEmbeddingContext();
552
+ this.embedContext = context;
553
+ return context;
554
+ })();
555
+
556
+ try {
557
+ const context = await this.embedContextCreatePromise;
558
+ this.touchActivity();
559
+ return context;
560
+ } finally {
561
+ this.embedContextCreatePromise = null;
562
+ }
563
+ }
564
+ this.touchActivity();
565
+ return this.embedContext;
566
+ }
567
+
568
+ /**
569
+ * Load generation model (lazy) - context is created fresh per call
570
+ */
571
+ private async ensureGenerateModel(): Promise<LlamaModel> {
572
+ if (!this.generateModel) {
573
+ if (this.generateModelLoadPromise) {
574
+ return await this.generateModelLoadPromise;
575
+ }
576
+
577
+ this.generateModelLoadPromise = (async () => {
578
+ const llama = await this.ensureLlama();
579
+ const modelPath = await this.resolveModel(this.generateModelUri);
580
+ const model = await llama.loadModel({ modelPath });
581
+ this.generateModel = model;
582
+ return model;
583
+ })();
584
+
585
+ try {
586
+ await this.generateModelLoadPromise;
587
+ } finally {
588
+ this.generateModelLoadPromise = null;
589
+ }
590
+ }
591
+ this.touchActivity();
592
+ if (!this.generateModel) {
593
+ throw new Error("Generate model not loaded");
594
+ }
595
+ return this.generateModel;
596
+ }
597
+
598
+ /**
599
+ * Load rerank model (lazy)
600
+ */
601
+ private async ensureRerankModel(): Promise<LlamaModel> {
602
+ if (this.rerankModel) {
603
+ return this.rerankModel;
604
+ }
605
+ if (this.rerankModelLoadPromise) {
606
+ return await this.rerankModelLoadPromise;
607
+ }
608
+
609
+ this.rerankModelLoadPromise = (async () => {
610
+ const llama = await this.ensureLlama();
611
+ const modelPath = await this.resolveModel(this.rerankModelUri);
612
+ const model = await llama.loadModel({ modelPath });
613
+ this.rerankModel = model;
614
+ // Model loading counts as activity - ping to keep alive
615
+ this.touchActivity();
616
+ return model;
617
+ })();
618
+
619
+ try {
620
+ return await this.rerankModelLoadPromise;
621
+ } finally {
622
+ this.rerankModelLoadPromise = null;
623
+ }
624
+ }
625
+
626
+ /**
627
+ * Load rerank context (lazy). Context can be disposed and recreated without reloading the model.
628
+ */
629
+ private async ensureRerankContext(): Promise<Awaited<ReturnType<LlamaModel["createRankingContext"]>>> {
630
+ if (!this.rerankContext) {
631
+ const model = await this.ensureRerankModel();
632
+ this.rerankContext = await model.createRankingContext();
633
+ }
634
+ this.touchActivity();
635
+ return this.rerankContext;
636
+ }
637
+
638
+ // ==========================================================================
639
+ // Tokenization
640
+ // ==========================================================================
641
+
642
+ /**
643
+ * Tokenize text using the embedding model's tokenizer
644
+ * Returns tokenizer tokens (opaque type from node-llama-cpp)
645
+ */
646
+ async tokenize(text: string): Promise<readonly LlamaToken[]> {
647
+ await this.ensureEmbedContext(); // Ensure model is loaded
648
+ if (!this.embedModel) {
649
+ throw new Error("Embed model not loaded");
650
+ }
651
+ return this.embedModel.tokenize(text);
652
+ }
653
+
654
+ /**
655
+ * Count tokens in text using the embedding model's tokenizer
656
+ */
657
+ async countTokens(text: string): Promise<number> {
658
+ const tokens = await this.tokenize(text);
659
+ return tokens.length;
660
+ }
661
+
662
+ /**
663
+ * Detokenize token IDs back to text
664
+ */
665
+ async detokenize(tokens: readonly LlamaToken[]): Promise<string> {
666
+ await this.ensureEmbedContext();
667
+ if (!this.embedModel) {
668
+ throw new Error("Embed model not loaded");
669
+ }
670
+ return this.embedModel.detokenize(tokens);
671
+ }
672
+
673
+ // ==========================================================================
674
+ // Core API methods
675
+ // ==========================================================================
676
+
677
+ async embed(text: string, options: EmbedOptions = {}): Promise<EmbeddingResult | null> {
678
+ // Ping activity at start to keep models alive during this operation
679
+ this.touchActivity();
680
+
681
+ try {
682
+ const context = await this.ensureEmbedContext();
683
+ const embedding = await context.getEmbeddingFor(text);
684
+
685
+ return {
686
+ embedding: Array.from(embedding.vector),
687
+ model: this.embedModelUri,
688
+ };
689
+ } catch (error) {
690
+ console.error("Embedding error:", error);
691
+ return null;
692
+ }
693
+ }
694
+
695
+ /**
696
+ * Batch embed multiple texts efficiently
697
+ * Uses Promise.all for parallel embedding - node-llama-cpp handles batching internally
698
+ */
699
+ async embedBatch(texts: string[]): Promise<(EmbeddingResult | null)[]> {
700
+ // Ping activity at start to keep models alive during this operation
701
+ this.touchActivity();
702
+
703
+ if (texts.length === 0) return [];
704
+
705
+ try {
706
+ const context = await this.ensureEmbedContext();
707
+
708
+ // node-llama-cpp handles batching internally when we make parallel requests
709
+ const embeddings = await Promise.all(
710
+ texts.map(async (text) => {
711
+ try {
712
+ const embedding = await context.getEmbeddingFor(text);
713
+ this.touchActivity(); // Keep-alive during slow batches
714
+ return {
715
+ embedding: Array.from(embedding.vector),
716
+ model: this.embedModelUri,
717
+ };
718
+ } catch (err) {
719
+ console.error("Embedding error for text:", err);
720
+ return null;
721
+ }
722
+ })
723
+ );
724
+
725
+ return embeddings;
726
+ } catch (error) {
727
+ console.error("Batch embedding error:", error);
728
+ return texts.map(() => null);
729
+ }
730
+ }
731
+
732
+ async generate(prompt: string, options: GenerateOptions = {}): Promise<GenerateResult | null> {
733
+ // Ping activity at start to keep models alive during this operation
734
+ this.touchActivity();
735
+
736
+ // Ensure model is loaded
737
+ await this.ensureGenerateModel();
738
+
739
+ // Create fresh context -> sequence -> session for each call
740
+ const context = await this.generateModel!.createContext();
741
+ const sequence = context.getSequence();
742
+ const session = new LlamaChatSession({ contextSequence: sequence });
743
+
744
+ const maxTokens = options.maxTokens ?? 150;
745
+ // Qwen3 recommends temp=0.7, topP=0.8, topK=20 for non-thinking mode
746
+ // DO NOT use greedy decoding (temp=0) - causes repetition loops
747
+ const temperature = options.temperature ?? 0.7;
748
+
749
+ let result = "";
750
+ try {
751
+ await session.prompt(prompt, {
752
+ maxTokens,
753
+ temperature,
754
+ topK: 20,
755
+ topP: 0.8,
756
+ onTextChunk: (text) => {
757
+ result += text;
758
+ },
759
+ });
760
+
761
+ return {
762
+ text: result,
763
+ model: this.generateModelUri,
764
+ done: true,
765
+ };
766
+ } finally {
767
+ // Dispose context (which disposes dependent sequences/sessions per lifecycle rules)
768
+ await context.dispose();
769
+ }
770
+ }
771
+
772
+ async modelExists(modelUri: string): Promise<ModelInfo> {
773
+ // For HuggingFace URIs, we assume they exist
774
+ // For local paths, check if file exists
775
+ if (modelUri.startsWith("hf:")) {
776
+ return { name: modelUri, exists: true };
777
+ }
778
+
779
+ const exists = existsSync(modelUri);
780
+ return {
781
+ name: modelUri,
782
+ exists,
783
+ path: exists ? modelUri : undefined,
784
+ };
785
+ }
786
+
787
+ // ==========================================================================
788
+ // High-level abstractions
789
+ // ==========================================================================
790
+
791
+ async expandQuery(query: string, options: { context?: string, includeLexical?: boolean } = {}): Promise<Queryable[]> {
792
+ // Ping activity at start to keep models alive during this operation
793
+ this.touchActivity();
794
+
795
+ const llama = await this.ensureLlama();
796
+ await this.ensureGenerateModel();
797
+
798
+ const includeLexical = options.includeLexical ?? true;
799
+ const context = options.context;
800
+
801
+ const grammar = await llama.createGrammar({
802
+ grammar: `
803
+ root ::= line+
804
+ line ::= type ": " content "\\n"
805
+ type ::= "lex" | "vec" | "hyde"
806
+ content ::= [^\\n]+
807
+ `
808
+ });
809
+
810
+ const prompt = `/no_think Expand this search query: ${query}`;
811
+
812
+ // Create fresh context for each call
813
+ const genContext = await this.generateModel!.createContext();
814
+ const sequence = genContext.getSequence();
815
+ const session = new LlamaChatSession({ contextSequence: sequence });
816
+
817
+ try {
818
+ // Qwen3 recommended settings for non-thinking mode:
819
+ // temp=0.7, topP=0.8, topK=20, presence_penalty for repetition
820
+ // DO NOT use greedy decoding (temp=0) - causes infinite loops
821
+ const result = await session.prompt(prompt, {
822
+ grammar,
823
+ maxTokens: 600,
824
+ temperature: 0.7,
825
+ topK: 20,
826
+ topP: 0.8,
827
+ repeatPenalty: {
828
+ lastTokens: 64,
829
+ presencePenalty: 0.5,
830
+ },
831
+ });
832
+
833
+ const lines = result.trim().split("\n");
834
+ const queryLower = query.toLowerCase();
835
+ const queryTerms = queryLower.replace(/[^a-z0-9\s]/g, " ").split(/\s+/).filter(Boolean);
836
+
837
+ const hasQueryTerm = (text: string): boolean => {
838
+ const lower = text.toLowerCase();
839
+ if (queryTerms.length === 0) return true;
840
+ return queryTerms.some(term => lower.includes(term));
841
+ };
842
+
843
+ const queryables: Queryable[] = lines.map(line => {
844
+ const colonIdx = line.indexOf(":");
845
+ if (colonIdx === -1) return null;
846
+ const type = line.slice(0, colonIdx).trim();
847
+ if (type !== 'lex' && type !== 'vec' && type !== 'hyde') return null;
848
+ const text = line.slice(colonIdx + 1).trim();
849
+ if (!hasQueryTerm(text)) return null;
850
+ return { type: type as QueryType, text };
851
+ }).filter((q): q is Queryable => q !== null);
852
+
853
+ // Filter out lex entries if not requested
854
+ const filtered = includeLexical ? queryables : queryables.filter(q => q.type !== 'lex');
855
+ if (filtered.length > 0) return filtered;
856
+
857
+ const fallback: Queryable[] = [
858
+ { type: 'hyde', text: `Information about ${query}` },
859
+ { type: 'lex', text: query },
860
+ { type: 'vec', text: query },
861
+ ];
862
+ return includeLexical ? fallback : fallback.filter(q => q.type !== 'lex');
863
+ } catch (error) {
864
+ console.error("Structured query expansion failed:", error);
865
+ // Fallback to original query
866
+ const fallback: Queryable[] = [{ type: 'vec', text: query }];
867
+ if (includeLexical) fallback.unshift({ type: 'lex', text: query });
868
+ return fallback;
869
+ } finally {
870
+ await genContext.dispose();
871
+ }
872
+ }
873
+
874
+ async rerank(
875
+ query: string,
876
+ documents: RerankDocument[],
877
+ options: RerankOptions = {}
878
+ ): Promise<RerankResult> {
879
+ // Ping activity at start to keep models alive during this operation
880
+ this.touchActivity();
881
+
882
+ const context = await this.ensureRerankContext();
883
+
884
+ // Build a map from document text to original indices (for lookup after sorting)
885
+ const textToDoc = new Map<string, { file: string; index: number }>();
886
+ documents.forEach((doc, index) => {
887
+ textToDoc.set(doc.text, { file: doc.file, index });
888
+ });
889
+
890
+ // Extract just the text for ranking
891
+ const texts = documents.map((doc) => doc.text);
892
+
893
+ // Use the proper ranking API - returns [{document: string, score: number}] sorted by score
894
+ const ranked = await context.rankAndSort(query, texts);
895
+
896
+ // Map back to our result format using the text-to-doc map
897
+ const results: RerankDocumentResult[] = ranked.map((item) => {
898
+ const docInfo = textToDoc.get(item.document)!;
899
+ return {
900
+ file: docInfo.file,
901
+ score: item.score,
902
+ index: docInfo.index,
903
+ };
904
+ });
905
+
906
+ return {
907
+ results,
908
+ model: this.rerankModelUri,
909
+ };
910
+ }
911
+
912
+ async dispose(): Promise<void> {
913
+ // Prevent double-dispose
914
+ if (this.disposed) {
915
+ return;
916
+ }
917
+ this.disposed = true;
918
+
919
+ // Clear inactivity timer
920
+ if (this.inactivityTimer) {
921
+ clearTimeout(this.inactivityTimer);
922
+ this.inactivityTimer = null;
923
+ }
924
+
925
+ // Disposing llama cascades to models and contexts automatically
926
+ // See: https://node-llama-cpp.withcat.ai/guide/objects-lifecycle
927
+ // Note: llama.dispose() can hang indefinitely, so we use a timeout
928
+ if (this.llama) {
929
+ const disposePromise = this.llama.dispose();
930
+ const timeoutPromise = new Promise<void>((resolve) => setTimeout(resolve, 1000));
931
+ await Promise.race([disposePromise, timeoutPromise]);
932
+ }
933
+
934
+ // Clear references
935
+ this.embedContext = null;
936
+ this.rerankContext = null;
937
+ this.embedModel = null;
938
+ this.generateModel = null;
939
+ this.rerankModel = null;
940
+ this.llama = null;
941
+
942
+ // Clear any in-flight load/create promises
943
+ this.embedModelLoadPromise = null;
944
+ this.embedContextCreatePromise = null;
945
+ this.generateModelLoadPromise = null;
946
+ this.rerankModelLoadPromise = null;
947
+ }
948
+ }
949
+
950
+ // =============================================================================
951
+ // Session Management Layer
952
+ // =============================================================================
953
+
954
+ /**
955
+ * Manages LLM session lifecycle with reference counting.
956
+ * Coordinates with LlamaCpp idle timeout to prevent disposal during active sessions.
957
+ */
958
+ class LLMSessionManager {
959
+ private llm: LlamaCpp;
960
+ private _activeSessionCount = 0;
961
+ private _inFlightOperations = 0;
962
+
963
+ constructor(llm: LlamaCpp) {
964
+ this.llm = llm;
965
+ }
966
+
967
+ get activeSessionCount(): number {
968
+ return this._activeSessionCount;
969
+ }
970
+
971
+ get inFlightOperations(): number {
972
+ return this._inFlightOperations;
973
+ }
974
+
975
+ /**
976
+ * Returns true only when both session count and in-flight operations are 0.
977
+ * Used by LlamaCpp to determine if idle unload is safe.
978
+ */
979
+ canUnload(): boolean {
980
+ return this._activeSessionCount === 0 && this._inFlightOperations === 0;
981
+ }
982
+
983
+ acquire(): void {
984
+ this._activeSessionCount++;
985
+ }
986
+
987
+ release(): void {
988
+ this._activeSessionCount = Math.max(0, this._activeSessionCount - 1);
989
+ }
990
+
991
+ operationStart(): void {
992
+ this._inFlightOperations++;
993
+ }
994
+
995
+ operationEnd(): void {
996
+ this._inFlightOperations = Math.max(0, this._inFlightOperations - 1);
997
+ }
998
+
999
+ getLlamaCpp(): LlamaCpp {
1000
+ return this.llm;
1001
+ }
1002
+ }
1003
+
1004
+ /**
1005
+ * Error thrown when an operation is attempted on a released or aborted session.
1006
+ */
1007
+ export class SessionReleasedError extends Error {
1008
+ constructor(message = "LLM session has been released or aborted") {
1009
+ super(message);
1010
+ this.name = "SessionReleasedError";
1011
+ }
1012
+ }
1013
+
1014
+ /**
1015
+ * Scoped LLM session with automatic lifecycle management.
1016
+ * Wraps LlamaCpp methods with operation tracking and abort handling.
1017
+ */
1018
+ class LLMSession implements ILLMSession {
1019
+ private manager: LLMSessionManager;
1020
+ private released = false;
1021
+ private abortController: AbortController;
1022
+ private maxDurationTimer: ReturnType<typeof setTimeout> | null = null;
1023
+ private name: string;
1024
+
1025
+ constructor(manager: LLMSessionManager, options: LLMSessionOptions = {}) {
1026
+ this.manager = manager;
1027
+ this.name = options.name || "unnamed";
1028
+ this.abortController = new AbortController();
1029
+
1030
+ // Link external abort signal if provided
1031
+ if (options.signal) {
1032
+ if (options.signal.aborted) {
1033
+ this.abortController.abort(options.signal.reason);
1034
+ } else {
1035
+ options.signal.addEventListener("abort", () => {
1036
+ this.abortController.abort(options.signal!.reason);
1037
+ }, { once: true });
1038
+ }
1039
+ }
1040
+
1041
+ // Set up max duration timer
1042
+ const maxDuration = options.maxDuration ?? 10 * 60 * 1000; // Default 10 minutes
1043
+ if (maxDuration > 0) {
1044
+ this.maxDurationTimer = setTimeout(() => {
1045
+ this.abortController.abort(new Error(`Session "${this.name}" exceeded max duration of ${maxDuration}ms`));
1046
+ }, maxDuration);
1047
+ this.maxDurationTimer.unref(); // Don't keep process alive
1048
+ }
1049
+
1050
+ // Acquire session lease
1051
+ this.manager.acquire();
1052
+ }
1053
+
1054
+ get isValid(): boolean {
1055
+ return !this.released && !this.abortController.signal.aborted;
1056
+ }
1057
+
1058
+ get signal(): AbortSignal {
1059
+ return this.abortController.signal;
1060
+ }
1061
+
1062
+ /**
1063
+ * Release the session and decrement ref count.
1064
+ * Called automatically by withLLMSession when the callback completes.
1065
+ */
1066
+ release(): void {
1067
+ if (this.released) return;
1068
+ this.released = true;
1069
+
1070
+ if (this.maxDurationTimer) {
1071
+ clearTimeout(this.maxDurationTimer);
1072
+ this.maxDurationTimer = null;
1073
+ }
1074
+
1075
+ this.abortController.abort(new Error("Session released"));
1076
+ this.manager.release();
1077
+ }
1078
+
1079
+ /**
1080
+ * Wrap an operation with tracking and abort checking.
1081
+ */
1082
+ private async withOperation<T>(fn: () => Promise<T>): Promise<T> {
1083
+ if (!this.isValid) {
1084
+ throw new SessionReleasedError();
1085
+ }
1086
+
1087
+ this.manager.operationStart();
1088
+ try {
1089
+ // Check abort before starting
1090
+ if (this.abortController.signal.aborted) {
1091
+ throw new SessionReleasedError(
1092
+ this.abortController.signal.reason?.message || "Session aborted"
1093
+ );
1094
+ }
1095
+ return await fn();
1096
+ } finally {
1097
+ this.manager.operationEnd();
1098
+ }
1099
+ }
1100
+
1101
+ async embed(text: string, options?: EmbedOptions): Promise<EmbeddingResult | null> {
1102
+ return this.withOperation(() => this.manager.getLlamaCpp().embed(text, options));
1103
+ }
1104
+
1105
+ async embedBatch(texts: string[]): Promise<(EmbeddingResult | null)[]> {
1106
+ return this.withOperation(() => this.manager.getLlamaCpp().embedBatch(texts));
1107
+ }
1108
+
1109
+ async expandQuery(
1110
+ query: string,
1111
+ options?: { context?: string; includeLexical?: boolean }
1112
+ ): Promise<Queryable[]> {
1113
+ return this.withOperation(() => this.manager.getLlamaCpp().expandQuery(query, options));
1114
+ }
1115
+
1116
+ async rerank(
1117
+ query: string,
1118
+ documents: RerankDocument[],
1119
+ options?: RerankOptions
1120
+ ): Promise<RerankResult> {
1121
+ return this.withOperation(() => this.manager.getLlamaCpp().rerank(query, documents, options));
1122
+ }
1123
+ }
1124
+
1125
+ // Session manager for the default LlamaCpp instance
1126
+ let defaultSessionManager: LLMSessionManager | null = null;
1127
+
1128
+ /**
1129
+ * Get the session manager for the default LlamaCpp instance.
1130
+ */
1131
+ function getSessionManager(): LLMSessionManager {
1132
+ const llm = getDefaultLlamaCpp();
1133
+ if (!defaultSessionManager || defaultSessionManager.getLlamaCpp() !== llm) {
1134
+ defaultSessionManager = new LLMSessionManager(llm);
1135
+ }
1136
+ return defaultSessionManager;
1137
+ }
1138
+
1139
+ /**
1140
+ * Execute a function with a scoped LLM session.
1141
+ * The session provides lifecycle guarantees - resources won't be disposed mid-operation.
1142
+ *
1143
+ * @example
1144
+ * ```typescript
1145
+ * await withLLMSession(async (session) => {
1146
+ * const expanded = await session.expandQuery(query);
1147
+ * const embeddings = await session.embedBatch(texts);
1148
+ * const reranked = await session.rerank(query, docs);
1149
+ * return reranked;
1150
+ * }, { maxDuration: 10 * 60 * 1000, name: 'querySearch' });
1151
+ * ```
1152
+ */
1153
+ export async function withLLMSession<T>(
1154
+ fn: (session: ILLMSession) => Promise<T>,
1155
+ options?: LLMSessionOptions
1156
+ ): Promise<T> {
1157
+ const manager = getSessionManager();
1158
+ const session = new LLMSession(manager, options);
1159
+
1160
+ try {
1161
+ return await fn(session);
1162
+ } finally {
1163
+ session.release();
1164
+ }
1165
+ }
1166
+
1167
+ /**
1168
+ * Check if idle unload is safe (no active sessions or operations).
1169
+ * Used internally by LlamaCpp idle timer.
1170
+ */
1171
+ export function canUnloadLLM(): boolean {
1172
+ if (!defaultSessionManager) return true;
1173
+ return defaultSessionManager.canUnload();
1174
+ }
1175
+
1176
+ // =============================================================================
1177
+ // Singleton for default LlamaCpp instance
1178
+ // =============================================================================
1179
+
1180
+ let defaultLlamaCpp: LlamaCpp | null = null;
1181
+
1182
+ /**
1183
+ * Get the default LlamaCpp instance (creates one if needed)
1184
+ */
1185
+ export function getDefaultLlamaCpp(): LlamaCpp {
1186
+ if (!defaultLlamaCpp) {
1187
+ defaultLlamaCpp = new LlamaCpp();
1188
+ }
1189
+ return defaultLlamaCpp;
1190
+ }
1191
+
1192
+ /**
1193
+ * Set a custom default LlamaCpp instance (useful for testing)
1194
+ */
1195
+ export function setDefaultLlamaCpp(llm: LlamaCpp | null): void {
1196
+ defaultLlamaCpp = llm;
1197
+ }
1198
+
1199
+ /**
1200
+ * Dispose the default LlamaCpp instance if it exists.
1201
+ * Call this before process exit to prevent NAPI crashes.
1202
+ */
1203
+ export async function disposeDefaultLlamaCpp(): Promise<void> {
1204
+ if (defaultLlamaCpp) {
1205
+ await defaultLlamaCpp.dispose();
1206
+ defaultLlamaCpp = null;
1207
+ }
1208
+ }