ts-cachecraft 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,543 @@
1
+ /** Lifecycle events emitted by all cache implementations. */
2
+ type CacheEvent = 'set' | 'get' | 'delete' | 'evict' | 'expire' | 'clear';
3
+ /** Serializable representation of a single cached entry with metadata. */
4
+ type CacheEntry<V> = {
5
+ key: string;
6
+ value: V;
7
+ createdAt: number;
8
+ lastAccessed: number;
9
+ accessCount: number;
10
+ ttl?: number;
11
+ tier?: 'hot' | 'cold';
12
+ metadata?: Record<string, unknown>;
13
+ };
14
+ /** Payload passed to event listeners on every cache operation. */
15
+ type CacheEventDetail<V> = {
16
+ event: CacheEvent;
17
+ key: string;
18
+ value?: V;
19
+ evictedKey?: string;
20
+ evictedValue?: V;
21
+ timestamp: number;
22
+ };
23
+ /** Callback signature for cache event subscribers. */
24
+ type CacheEventListener<V> = (detail: CacheEventDetail<V>) => void;
25
+ /** Base configuration accepted by all cache constructors. */
26
+ type CacheOptions<V = unknown> = {
27
+ capacity: number;
28
+ ttl?: number;
29
+ onEvict?: (key: string, value: V) => void;
30
+ };
31
+ /** Point-in-time performance counters returned by `stats()`. */
32
+ type CacheStats = {
33
+ hits: number;
34
+ misses: number;
35
+ evictions: number;
36
+ size: number;
37
+ capacity: number;
38
+ hitRate: number;
39
+ };
40
+ /** Full cache state returned by `snapshot()`, including entries and stats. */
41
+ type CacheSnapshot<V> = {
42
+ entries: CacheEntry<V>[];
43
+ stats: CacheStats;
44
+ strategy: string;
45
+ };
46
+ /** Public interface implemented by every cache strategy. */
47
+ type Cache<V = unknown> = {
48
+ get(key: string): V | undefined;
49
+ set(key: string, value: V, ttl?: number): void;
50
+ delete(key: string): boolean;
51
+ has(key: string): boolean;
52
+ clear(): void;
53
+ readonly size: number;
54
+ readonly capacity: number;
55
+ keys(): IterableIterator<string>;
56
+ values(): IterableIterator<V>;
57
+ entries(): IterableIterator<[string, V]>;
58
+ stats(): CacheStats;
59
+ snapshot(): CacheSnapshot<V>;
60
+ on(event: CacheEvent, listener: CacheEventListener<V>): void;
61
+ off(event: CacheEvent, listener: CacheEventListener<V>): void;
62
+ };
63
+ /** Options for {@link TwoTierCache}. Total capacity is `hotCapacity + coldCapacity`. */
64
+ type TwoTierOptions<V = unknown> = CacheOptions<V> & {
65
+ hotCapacity: number;
66
+ coldCapacity: number;
67
+ promoteThreshold?: number;
68
+ };
69
+ /** Options for {@link ARCCache}. Uses the base `CacheOptions` fields. */
70
+ type ARCOptions<V = unknown> = CacheOptions<V>;
71
+ /** Options for {@link SLRUCache}. `protectedRatio` controls the segment split. */
72
+ type SLRUOptions<V = unknown> = CacheOptions<V> & {
73
+ protectedRatio?: number;
74
+ };
75
+ /** Options for {@link WTinyLFUCache}. `windowRatio` sizes the admission window. */
76
+ type WTinyLFUOptions<V = unknown> = CacheOptions<V> & {
77
+ windowRatio?: number;
78
+ };
79
+ /** Options for {@link LRUKCache}. `k` sets the number of accesses to track. */
80
+ type LRUKOptions<V = unknown> = CacheOptions<V> & {
81
+ k?: number;
82
+ };
83
+
84
+ /**
85
+ * Abstract foundation for all cache strategies.
86
+ *
87
+ * Provides shared infrastructure: capacity validation, hit/miss/eviction
88
+ * counters, the event pub-sub system, and `stats()`/`snapshot()` methods.
89
+ * Subclasses implement the storage and eviction logic.
90
+ */
91
+ declare abstract class BaseCache<V> {
92
+ protected _capacity: number;
93
+ protected _hits: number;
94
+ protected _misses: number;
95
+ protected _evictions: number;
96
+ protected onEvict?: (key: string, value: V) => void;
97
+ private listeners;
98
+ constructor(options: CacheOptions<V> | number);
99
+ abstract get(key: string): V | undefined;
100
+ abstract set(key: string, value: V, ttl?: number): void;
101
+ abstract delete(key: string): boolean;
102
+ abstract has(key: string): boolean;
103
+ abstract clear(): void;
104
+ abstract get size(): number;
105
+ abstract keys(): IterableIterator<string>;
106
+ abstract values(): IterableIterator<V>;
107
+ abstract entries(): IterableIterator<[string, V]>;
108
+ abstract get strategy(): string;
109
+ protected abstract collectEntries(): CacheEntry<V>[];
110
+ get capacity(): number;
111
+ stats(): CacheStats;
112
+ snapshot(): CacheSnapshot<V>;
113
+ on(event: CacheEvent, listener: CacheEventListener<V>): void;
114
+ off(event: CacheEvent, listener: CacheEventListener<V>): void;
115
+ protected emit(detail: CacheEventDetail<V>): void;
116
+ protected now(): number;
117
+ }
118
+
119
+ /** Configuration flags that differentiate LRU, MRU, FIFO, and LIFO behavior. */
120
+ type DLLCacheConfig = {
121
+ reorderOnGet: boolean;
122
+ reorderOnUpdate: boolean;
123
+ addPosition: 'front' | 'back';
124
+ evictPosition: 'front' | 'back';
125
+ };
126
+ /**
127
+ * Abstract cache backed by a doubly linked list and a HashMap.
128
+ *
129
+ * Provides O(1) get, set, and delete by combining a `Map` for key lookup
130
+ * with a `DoublyLinkedList` for ordering. The {@link DLLCacheConfig} flags
131
+ * control insertion position, eviction position, and whether accesses
132
+ * reorder entries, allowing LRU, MRU, FIFO, and LIFO to share this
133
+ * single implementation. Also supports optional per-entry TTL.
134
+ */
135
+ declare abstract class DLLCache<V> extends BaseCache<V> {
136
+ private list;
137
+ private map;
138
+ private defaultTTL?;
139
+ private config;
140
+ constructor(options: CacheOptions<V>, config: DLLCacheConfig);
141
+ get(key: string): V | undefined;
142
+ set(key: string, value: V, ttl?: number): void;
143
+ delete(key: string): boolean;
144
+ has(key: string): boolean;
145
+ clear(): void;
146
+ get size(): number;
147
+ keys(): IterableIterator<string>;
148
+ values(): IterableIterator<V>;
149
+ entries(): IterableIterator<[string, V]>;
150
+ protected collectEntries(): CacheEntry<V>[];
151
+ private deleteNode;
152
+ }
153
+
154
+ /**
155
+ * Least Recently Used (LRU) cache.
156
+ *
157
+ * Evicts the entry that has not been accessed for the longest time. Every
158
+ * `get` or `set` promotes the entry to the front of the list, and eviction
159
+ * removes from the back. O(1) for all operations.
160
+ */
161
+ declare class LRUCache<V> extends DLLCache<V> {
162
+ constructor(options: CacheOptions<V>);
163
+ get strategy(): string;
164
+ }
165
+
166
+ /**
167
+ * Most Recently Used (MRU) cache.
168
+ *
169
+ * Evicts the most recently accessed entry. Useful for sequential scan
170
+ * workloads where the item just accessed is unlikely to be needed again.
171
+ * O(1) for all operations.
172
+ */
173
+ declare class MRUCache<V> extends DLLCache<V> {
174
+ constructor(options: CacheOptions<V>);
175
+ get strategy(): string;
176
+ }
177
+
178
+ /**
179
+ * Least Frequently Used (LFU) cache.
180
+ *
181
+ * Evicts the entry with the lowest access count. Groups entries into
182
+ * frequency buckets (each a doubly linked list) and tracks the minimum
183
+ * frequency. On eviction, removes the LRU entry from the min-frequency
184
+ * bucket. On access, promotes the entry to the next frequency bucket.
185
+ * O(1) for all operations.
186
+ */
187
+ declare class LFUCache<V> extends BaseCache<V> {
188
+ private store;
189
+ private freqBuckets;
190
+ private minFrequency;
191
+ constructor(options: CacheOptions<V>);
192
+ private getOrCreateBucket;
193
+ private incrementFrequency;
194
+ get(key: string): V | undefined;
195
+ set(key: string, value: V, _ttl?: number): void;
196
+ private evict;
197
+ delete(key: string): boolean;
198
+ has(key: string): boolean;
199
+ clear(): void;
200
+ get size(): number;
201
+ keys(): IterableIterator<string>;
202
+ values(): IterableIterator<V>;
203
+ entries(): IterableIterator<[string, V]>;
204
+ get strategy(): string;
205
+ protected collectEntries(): CacheEntry<V>[];
206
+ }
207
+
208
+ /**
209
+ * Time-to-Live (TTL) cache.
210
+ *
211
+ * Expires entries after a configurable duration. Supports a global default
212
+ * TTL and per-key overrides via the `ttl` parameter on `set()`. Expiration
213
+ * is lazy (checked on access) with periodic background cleanup. Call
214
+ * `destroy()` to stop the cleanup timer when the cache is no longer needed.
215
+ */
216
+ declare class TTLCache<V> extends BaseCache<V> {
217
+ private store;
218
+ private defaultTTL;
219
+ private cleanupTimer;
220
+ constructor(options: CacheOptions<V>);
221
+ private isExpired;
222
+ private cleanup;
223
+ get(key: string): V | undefined;
224
+ set(key: string, value: V, ttl?: number): void;
225
+ private evict;
226
+ delete(key: string): boolean;
227
+ has(key: string): boolean;
228
+ clear(): void;
229
+ get size(): number;
230
+ keys(): IterableIterator<string>;
231
+ values(): IterableIterator<V>;
232
+ entries(): IterableIterator<[string, V]>;
233
+ get strategy(): string;
234
+ protected collectEntries(): CacheEntry<V>[];
235
+ destroy(): void;
236
+ }
237
+
238
+ /**
239
+ * First In, First Out (FIFO) cache.
240
+ *
241
+ * Evicts the oldest inserted entry regardless of access pattern. New entries
242
+ * are appended to the back and eviction removes from the front. Accessing an
243
+ * entry does not change its position. O(1) for all operations.
244
+ */
245
+ declare class FIFOCache<V> extends DLLCache<V> {
246
+ constructor(options: CacheOptions<V>);
247
+ get strategy(): string;
248
+ }
249
+
250
+ /**
251
+ * Last In, First Out (LIFO) cache.
252
+ *
253
+ * Evicts the most recently inserted entry (not the most recently accessed).
254
+ * New entries are added to the front and eviction also removes from the
255
+ * front. Accessing an entry does not change its position. O(1) for all
256
+ * operations.
257
+ */
258
+ declare class LIFOCache<V> extends DLLCache<V> {
259
+ constructor(options: CacheOptions<V>);
260
+ get strategy(): string;
261
+ }
262
+
263
+ /**
264
+ * Random Replacement (RR) cache.
265
+ *
266
+ * Evicts a randomly chosen entry when the cache is full. Uses a contiguous
267
+ * key array alongside a Map: on eviction, picks a random index, swaps it
268
+ * with the last element, and pops for O(1) random removal. Immune to
269
+ * pathological access patterns that defeat deterministic strategies.
270
+ */
271
+ declare class RRCache<V> extends BaseCache<V> {
272
+ private store;
273
+ private keyList;
274
+ constructor(options: CacheOptions<V>);
275
+ get(key: string): V | undefined;
276
+ set(key: string, value: V, _ttl?: number): void;
277
+ private evict;
278
+ private removeKeyAtIndex;
279
+ delete(key: string): boolean;
280
+ has(key: string): boolean;
281
+ clear(): void;
282
+ get size(): number;
283
+ keys(): IterableIterator<string>;
284
+ values(): IterableIterator<V>;
285
+ entries(): IterableIterator<[string, V]>;
286
+ get strategy(): string;
287
+ protected collectEntries(): CacheEntry<V>[];
288
+ }
289
+
290
+ /**
291
+ * Adaptive Replacement Cache (ARC).
292
+ *
293
+ * Self-tuning algorithm that balances recency and frequency by maintaining
294
+ * four lists: T1 (recent), T2 (frequent), and their ghost lists B1/B2
295
+ * (keys only, capped at capacity). A parameter `p` shifts the target size
296
+ * of T1 vs T2 based on which ghost list sees more hits, continuously
297
+ * adapting to the workload without manual tuning. O(1) for all operations.
298
+ *
299
+ * @see https://www.usenix.org/conference/fast-03/arc-self-tuning-low-overhead-replacement-cache
300
+ */
301
+ declare class ARCCache<V> extends BaseCache<V> {
302
+ /** Adaptation parameter: target size of T1. */
303
+ private p;
304
+ /** T1: recent cache entries (seen once recently). */
305
+ private t1;
306
+ /** T2: frequent cache entries (seen at least twice recently). */
307
+ private t2;
308
+ /** B1: ghost list for recently evicted from T1 (keys only). */
309
+ private b1;
310
+ /** B2: ghost list for recently evicted from T2 (keys only). */
311
+ private b2;
312
+ private t1Map;
313
+ private t2Map;
314
+ private b1Map;
315
+ private b2Map;
316
+ private metaMap;
317
+ constructor(options: ARCOptions<V>);
318
+ get strategy(): string;
319
+ get size(): number;
320
+ get(key: string): V | undefined;
321
+ set(key: string, value: V, _ttl?: number): void;
322
+ delete(key: string): boolean;
323
+ has(key: string): boolean;
324
+ clear(): void;
325
+ keys(): IterableIterator<string>;
326
+ values(): IterableIterator<V>;
327
+ entries(): IterableIterator<[string, V]>;
328
+ protected collectEntries(): CacheEntry<V>[];
329
+ /**
330
+ * Replace: evict an entry from either T1 or T2 to make room.
331
+ * The evicted entry's key is moved to the corresponding ghost list.
332
+ */
333
+ private replace;
334
+ /** Cap a ghost list at capacity to prevent unbounded memory growth. */
335
+ private trimGhostList;
336
+ }
337
+
338
+ /**
339
+ * Two-Tier (Hot/Cold) cache.
340
+ *
341
+ * Separates entries into a hot tier and a cold tier, both internally LRU.
342
+ * New entries start in the cold tier. After `promoteThreshold` accesses an
343
+ * entry is promoted to hot. When the hot tier is full its LRU entry is
344
+ * demoted back to cold. When cold is full its LRU entry is permanently
345
+ * evicted. O(1) for all operations.
346
+ */
347
+ declare class TwoTierCache<V> extends BaseCache<V> {
348
+ private hotList;
349
+ private coldList;
350
+ private hotMap;
351
+ private coldMap;
352
+ private metaMap;
353
+ private hotCapacity;
354
+ private coldCapacity;
355
+ private promoteThreshold;
356
+ constructor(options: TwoTierOptions<V>);
357
+ get strategy(): string;
358
+ get size(): number;
359
+ get(key: string): V | undefined;
360
+ set(key: string, value: V, _ttl?: number): void;
361
+ delete(key: string): boolean;
362
+ has(key: string): boolean;
363
+ clear(): void;
364
+ keys(): IterableIterator<string>;
365
+ values(): IterableIterator<V>;
366
+ entries(): IterableIterator<[string, V]>;
367
+ protected collectEntries(): CacheEntry<V>[];
368
+ /** Evict from hot tier if full, demoting LRU to cold tier. */
369
+ private ensureHotCapacity;
370
+ /** Evict LRU from cold tier if full (permanent eviction). */
371
+ private ensureColdCapacity;
372
+ }
373
+
374
+ /**
375
+ * Segmented LRU (SLRU) cache.
376
+ *
377
+ * Divides capacity into a probation segment and a protected segment
378
+ * (controlled by `protectedRatio`, default 0.8). New entries enter
379
+ * probation. A hit in probation promotes the entry to protected. When
380
+ * protected is full its LRU entry is demoted back to probation. Eviction
381
+ * always removes from probation's LRU end. O(1) for all operations.
382
+ */
383
+ declare class SLRUCache<V> extends BaseCache<V> {
384
+ private probationList;
385
+ private protectedList;
386
+ private probationMap;
387
+ private protectedMap;
388
+ private metaMap;
389
+ private protectedCapacity;
390
+ private probationCapacity;
391
+ constructor(options: SLRUOptions<V>);
392
+ get strategy(): string;
393
+ get size(): number;
394
+ get(key: string): V | undefined;
395
+ set(key: string, value: V, _ttl?: number): void;
396
+ delete(key: string): boolean;
397
+ has(key: string): boolean;
398
+ clear(): void;
399
+ keys(): IterableIterator<string>;
400
+ values(): IterableIterator<V>;
401
+ entries(): IterableIterator<[string, V]>;
402
+ protected collectEntries(): CacheEntry<V>[];
403
+ /** If protected is full, demote LRU from protected to probation. */
404
+ private ensureProtectedCapacity;
405
+ /** Evict LRU from probation if full. */
406
+ private ensureProbationCapacity;
407
+ }
408
+
409
+ /**
410
+ * Clock (Second Chance) cache.
411
+ *
412
+ * Approximates LRU using a circular buffer and a sweeping clock hand.
413
+ * Each entry has a reference bit set to 1 on access. On eviction the hand
414
+ * sweeps the buffer: entries with bit=1 get cleared (second chance),
415
+ * the first entry with bit=0 is evicted. O(1) amortized for all operations.
416
+ */
417
+ declare class ClockCache<V> extends BaseCache<V> {
418
+ private buffer;
419
+ private map;
420
+ private hand;
421
+ constructor(options: CacheOptions<V>);
422
+ get(key: string): V | undefined;
423
+ set(key: string, value: V, _ttl?: number): void;
424
+ private findEmptySlot;
425
+ private evict;
426
+ delete(key: string): boolean;
427
+ has(key: string): boolean;
428
+ clear(): void;
429
+ get size(): number;
430
+ keys(): IterableIterator<string>;
431
+ values(): IterableIterator<V>;
432
+ entries(): IterableIterator<[string, V]>;
433
+ get strategy(): string;
434
+ protected collectEntries(): CacheEntry<V>[];
435
+ }
436
+
437
+ /**
438
+ * Window Tiny Least Frequently Used (W-TinyLFU) cache.
439
+ *
440
+ * Near-optimal admission policy combining a small window LRU with a main
441
+ * segmented LRU (probation + protected) and a Count-Min Sketch for
442
+ * frequency estimation. New entries enter the window. When the window
443
+ * evicts, the candidate competes against the main cache's probation
444
+ * victim by estimated frequency, and the loser is discarded. The sketch
445
+ * periodically halves all counters to age out stale frequencies.
446
+ * O(1) for all operations.
447
+ *
448
+ * @see https://arxiv.org/abs/1512.00727
449
+ */
450
+ declare class WTinyLFUCache<V> extends BaseCache<V> {
451
+ private sketch;
452
+ private windowList;
453
+ private windowMap;
454
+ private windowCapacity;
455
+ private probationList;
456
+ private probationMap;
457
+ private protectedList;
458
+ private protectedMap;
459
+ private protectedCapacity;
460
+ private mainCapacity;
461
+ constructor(options: WTinyLFUOptions<V>);
462
+ get(key: string): V | undefined;
463
+ set(key: string, value: V, _ttl?: number): void;
464
+ private evictFromWindow;
465
+ private ensureProtectedCapacity;
466
+ delete(key: string): boolean;
467
+ has(key: string): boolean;
468
+ clear(): void;
469
+ get size(): number;
470
+ keys(): IterableIterator<string>;
471
+ values(): IterableIterator<V>;
472
+ entries(): IterableIterator<[string, V]>;
473
+ get strategy(): string;
474
+ protected collectEntries(): CacheEntry<V>[];
475
+ }
476
+
477
+ /**
478
+ * LRU-K cache.
479
+ *
480
+ * Generalizes LRU by tracking the K-th most recent access time for each
481
+ * entry. Entries with fewer than K accesses (correlated group) are evicted
482
+ * first by oldest first-access time. Among fully-tracked entries, the one
483
+ * whose K-th access is oldest is evicted. With K=2, this effectively
484
+ * filters out one-time scans that would pollute a standard LRU cache.
485
+ * O(1) get/set, O(n) eviction (scans within each group).
486
+ *
487
+ * @see https://www.cs.cmu.edu/~christos/courses/721-resources/p297-o_neil.pdf
488
+ */
489
+ declare class LRUKCache<V> extends BaseCache<V> {
490
+ private store;
491
+ private k;
492
+ /** Keys with < K accesses, in insertion order for FIFO eviction. */
493
+ private correlatedKeys;
494
+ constructor(options: LRUKOptions<V>);
495
+ get(key: string): V | undefined;
496
+ set(key: string, value: V, _ttl?: number): void;
497
+ private evict;
498
+ delete(key: string): boolean;
499
+ has(key: string): boolean;
500
+ clear(): void;
501
+ get size(): number;
502
+ keys(): IterableIterator<string>;
503
+ values(): IterableIterator<V>;
504
+ entries(): IterableIterator<[string, V]>;
505
+ get strategy(): string;
506
+ protected collectEntries(): CacheEntry<V>[];
507
+ }
508
+
509
+ /**
510
+ * SIEVE cache (NSDI'24).
511
+ *
512
+ * Uses lazy promotion and quick demotion for efficient eviction. New
513
+ * entries are inserted at the head of a FIFO queue with visited=false.
514
+ * On access, only a visited bit is set with no reordering. On eviction,
515
+ * a hand pointer sweeps toward the tail: visited entries get their bit
516
+ * cleared (retained), the first unvisited entry is evicted. Achieves
517
+ * competitive hit rates with LRU and W-TinyLFU while requiring fewer
518
+ * metadata updates per operation. O(1) get/set, O(k) amortized eviction.
519
+ *
520
+ * @see https://junchengyang.com/publication/nsdi24-SIEVE.pdf
521
+ */
522
+ declare class SieveCache<V> extends BaseCache<V> {
523
+ private head;
524
+ private tail;
525
+ private hand;
526
+ private map;
527
+ constructor(options: CacheOptions<V>);
528
+ get(key: string): V | undefined;
529
+ set(key: string, value: V, _ttl?: number): void;
530
+ private evict;
531
+ delete(key: string): boolean;
532
+ has(key: string): boolean;
533
+ clear(): void;
534
+ get size(): number;
535
+ keys(): IterableIterator<string>;
536
+ values(): IterableIterator<V>;
537
+ entries(): IterableIterator<[string, V]>;
538
+ get strategy(): string;
539
+ protected collectEntries(): CacheEntry<V>[];
540
+ private removeNode;
541
+ }
542
+
543
+ export { ARCCache, type ARCOptions, BaseCache, type Cache, type CacheEntry, type CacheEvent, type CacheEventDetail, type CacheEventListener, type CacheOptions, type CacheSnapshot, type CacheStats, ClockCache, FIFOCache, LFUCache, LIFOCache, LRUCache, LRUKCache, type LRUKOptions, MRUCache, RRCache, SLRUCache, type SLRUOptions, SieveCache, TTLCache, TwoTierCache, type TwoTierOptions, WTinyLFUCache, type WTinyLFUOptions };