@neezco/cache 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +7 -0
- package/README.md +55 -0
- package/dist/browser/index.d.ts +276 -0
- package/dist/browser/index.js +782 -0
- package/dist/browser/index.js.map +1 -0
- package/dist/node/index.cjs +1153 -0
- package/dist/node/index.cjs.map +1 -0
- package/dist/node/index.d.cts +276 -0
- package/dist/node/index.d.mts +276 -0
- package/dist/node/index.mjs +1124 -0
- package/dist/node/index.mjs.map +1 -0
- package/docs/.gitkeep +0 -0
- package/docs/api-reference.md +285 -0
- package/docs/configuration.md +175 -0
- package/docs/examples.md +145 -0
- package/docs/getting-started.md +86 -0
- package/package.json +93 -0
|
@@ -0,0 +1,1153 @@
|
|
|
1
|
+
//#region rolldown:runtime
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __copyProps = (to, from, except, desc) => {
|
|
9
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
10
|
+
for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
11
|
+
key = keys[i];
|
|
12
|
+
if (!__hasOwnProp.call(to, key) && key !== except) {
|
|
13
|
+
__defProp(to, key, {
|
|
14
|
+
get: ((k) => from[k]).bind(null, key),
|
|
15
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
return to;
|
|
21
|
+
};
|
|
22
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
|
|
23
|
+
value: mod,
|
|
24
|
+
enumerable: true
|
|
25
|
+
}) : target, mod));
|
|
26
|
+
|
|
27
|
+
//#endregion
|
|
28
|
+
let fs = require("fs");
|
|
29
|
+
fs = __toESM(fs);
|
|
30
|
+
let v8 = require("v8");
|
|
31
|
+
v8 = __toESM(v8);
|
|
32
|
+
let perf_hooks = require("perf_hooks");
|
|
33
|
+
|
|
34
|
+
//#region src/cache/clear.ts
|
|
35
|
+
/**
|
|
36
|
+
* Clears all entries from the cache without invoking callbacks.
|
|
37
|
+
*
|
|
38
|
+
* @note The `onDelete` callback is NOT invoked during a clear operation.
|
|
39
|
+
* This is intentional to avoid unnecessary overhead when bulk-removing entries.
|
|
40
|
+
*
|
|
41
|
+
* @param state - The cache state.
|
|
42
|
+
* @returns void
|
|
43
|
+
*/
|
|
44
|
+
const clear = (state) => {
|
|
45
|
+
state.store.clear();
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
//#endregion
|
|
49
|
+
//#region src/defaults.ts
|
|
50
|
+
const ONE_SECOND = 1e3;
|
|
51
|
+
const ONE_MINUTE = 60 * ONE_SECOND;
|
|
52
|
+
/**
|
|
53
|
+
* ===================================================================
|
|
54
|
+
* Cache Entry Lifecycle
|
|
55
|
+
* Default TTL and stale window settings for short-lived cache entries.
|
|
56
|
+
* ===================================================================
|
|
57
|
+
*/
|
|
58
|
+
/**
|
|
59
|
+
* Default Time-To-Live in milliseconds for cache entries.
|
|
60
|
+
* Optimized for short-lived data (3 minutes by default).
|
|
61
|
+
* @default 1_800_000 (30 minutes)
|
|
62
|
+
*/
|
|
63
|
+
const DEFAULT_TTL = 30 * ONE_MINUTE;
|
|
64
|
+
/**
|
|
65
|
+
* Default stale window in milliseconds after expiration.
|
|
66
|
+
* Allows serving slightly outdated data while fetching fresh data.
|
|
67
|
+
*/
|
|
68
|
+
const DEFAULT_STALE_WINDOW = 0;
|
|
69
|
+
/**
|
|
70
|
+
* Maximum number of entries the cache can hold.
|
|
71
|
+
* Beyond this limit, less-used entries are evicted.
|
|
72
|
+
*/
|
|
73
|
+
const DEFAULT_MAX_SIZE = Infinity;
|
|
74
|
+
/**
|
|
75
|
+
* ===================================================================
|
|
76
|
+
* Sweep & Cleanup Operations
|
|
77
|
+
* Parameters controlling how and when expired entries are removed.
|
|
78
|
+
* ===================================================================
|
|
79
|
+
*/
|
|
80
|
+
/**
|
|
81
|
+
* Maximum number of keys to process in a single sweep batch.
|
|
82
|
+
* Higher values = more aggressive cleanup, lower latency overhead.
|
|
83
|
+
*/
|
|
84
|
+
const MAX_KEYS_PER_BATCH = 1e3;
|
|
85
|
+
/**
|
|
86
|
+
* Minimal expired ratio enforced during sweeps.
|
|
87
|
+
* Ensures control sweeps run above {@link EXPIRED_RATIO_MEMORY_THRESHOLD}.
|
|
88
|
+
*/
|
|
89
|
+
const MINIMAL_EXPIRED_RATIO = .05;
|
|
90
|
+
/**
|
|
91
|
+
* Memory usage threshold (normalized 0–1) triggering control sweeps.
|
|
92
|
+
* At or above this level, sweeping becomes more aggressive.
|
|
93
|
+
*/
|
|
94
|
+
const EXPIRED_RATIO_MEMORY_THRESHOLD = .8;
|
|
95
|
+
/**
|
|
96
|
+
* Maximum allowed expired ratio when memory usage is low.
|
|
97
|
+
* Upper bound for interpolation with MINIMAL_EXPIRED_RATIO.
|
|
98
|
+
* Recommended range: `0.3 – 0.5` .
|
|
99
|
+
*/
|
|
100
|
+
const DEFAULT_MAX_EXPIRED_RATIO = .4;
|
|
101
|
+
/**
|
|
102
|
+
* ===================================================================
|
|
103
|
+
* Sweep Intervals & Timing
|
|
104
|
+
* Frequency and time budgets for cleanup operations.
|
|
105
|
+
* ===================================================================
|
|
106
|
+
*/
|
|
107
|
+
/**
|
|
108
|
+
* Optimal interval in milliseconds between sweeps.
|
|
109
|
+
* Used when system load is minimal and metrics are available.
|
|
110
|
+
*/
|
|
111
|
+
const OPTIMAL_SWEEP_INTERVAL = 2 * ONE_SECOND;
|
|
112
|
+
/**
|
|
113
|
+
* Worst-case interval in milliseconds between sweeps.
|
|
114
|
+
* Used when system load is high or metrics unavailable.
|
|
115
|
+
*/
|
|
116
|
+
const WORST_SWEEP_INTERVAL = 200;
|
|
117
|
+
/**
|
|
118
|
+
* Maximum time budget in milliseconds for sweep operations.
|
|
119
|
+
* Prevents sweeping from consuming excessive CPU during high load.
|
|
120
|
+
*/
|
|
121
|
+
const WORST_SWEEP_TIME_BUDGET = 40;
|
|
122
|
+
/**
|
|
123
|
+
* Optimal time budget in milliseconds for each sweep cycle.
|
|
124
|
+
* Used when performance metrics are not available or unreliable.
|
|
125
|
+
*/
|
|
126
|
+
const OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE = 15;
|
|
127
|
+
/**
|
|
128
|
+
* ===================================================================
|
|
129
|
+
* Memory Management
|
|
130
|
+
* Process limits and memory-safe thresholds.
|
|
131
|
+
* ===================================================================
|
|
132
|
+
*/
|
|
133
|
+
/**
|
|
134
|
+
* Default maximum process memory limit in megabytes.
|
|
135
|
+
* Acts as fallback when environment detection is unavailable.
|
|
136
|
+
* NOTE: Overridable via environment detection at runtime.
|
|
137
|
+
*/
|
|
138
|
+
const DEFAULT_MAX_PROCESS_MEMORY_MB = 1024;
|
|
139
|
+
/**
|
|
140
|
+
* ===================================================================
|
|
141
|
+
* System Utilization Weights
|
|
142
|
+
* Balance how memory, CPU, and event-loop pressure influence sweep behavior.
|
|
143
|
+
* Sum of all weights: 10 + 8.5 + 6.5 = 25
|
|
144
|
+
* ===================================================================
|
|
145
|
+
*/
|
|
146
|
+
/**
|
|
147
|
+
* Weight applied to memory utilization in sweep calculations.
|
|
148
|
+
* Higher weight = memory pressure has more influence on sweep aggressiveness.
|
|
149
|
+
*/
|
|
150
|
+
const DEFAULT_MEMORY_WEIGHT = 10;
|
|
151
|
+
/**
|
|
152
|
+
* Weight applied to CPU utilization in sweep calculations.
|
|
153
|
+
* Combined with event-loop weight to balance CPU-related pressure.
|
|
154
|
+
*/
|
|
155
|
+
const DEFAULT_CPU_WEIGHT = 8.5;
|
|
156
|
+
/**
|
|
157
|
+
* Weight applied to event-loop utilization in sweep calculations.
|
|
158
|
+
* Complements CPU weight to assess overall processing capacity.
|
|
159
|
+
*/
|
|
160
|
+
const DEFAULT_LOOP_WEIGHT = 6.5;
|
|
161
|
+
|
|
162
|
+
//#endregion
|
|
163
|
+
//#region src/utils/get-process-memory-limit.ts
|
|
164
|
+
/**
|
|
165
|
+
* Reads a number from a file.
|
|
166
|
+
* @param path File path to read the number from.
|
|
167
|
+
* @returns The number read from the file, or null if reading fails.
|
|
168
|
+
*/
|
|
169
|
+
function readNumber(path) {
|
|
170
|
+
try {
|
|
171
|
+
const raw = fs.default.readFileSync(path, "utf8").trim();
|
|
172
|
+
const n = Number(raw);
|
|
173
|
+
return Number.isFinite(n) ? n : null;
|
|
174
|
+
} catch {
|
|
175
|
+
return null;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
/**
|
|
179
|
+
* Gets the memory limit imposed by cgroups, if any.
|
|
180
|
+
* @return The memory limit in bytes, or null if no limit is found.
|
|
181
|
+
*/
|
|
182
|
+
function getCgroupLimit() {
|
|
183
|
+
const v2 = readNumber("/sys/fs/cgroup/memory.max");
|
|
184
|
+
if (v2 !== null) return v2;
|
|
185
|
+
const v1 = readNumber("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
|
186
|
+
if (v1 !== null) return v1;
|
|
187
|
+
return null;
|
|
188
|
+
}
|
|
189
|
+
/**
|
|
190
|
+
* Gets the effective memory limit for the current process, considering both V8 heap limits and cgroup limits.
|
|
191
|
+
* @returns The effective memory limit in bytes.
|
|
192
|
+
*/
|
|
193
|
+
function getProcessMemoryLimit() {
|
|
194
|
+
const heapLimit = v8.default.getHeapStatistics().heap_size_limit;
|
|
195
|
+
const cgroupLimit = getCgroupLimit();
|
|
196
|
+
if (cgroupLimit && cgroupLimit > 0 && cgroupLimit < Infinity) return Math.min(heapLimit, cgroupLimit);
|
|
197
|
+
return heapLimit;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
//#endregion
|
|
201
|
+
//#region src/utils/process-monitor.ts
|
|
202
|
+
/**
|
|
203
|
+
* Creates a performance monitor that periodically samples memory usage,
|
|
204
|
+
* CPU usage, and event loop utilization for the current Node.js process.
|
|
205
|
+
*
|
|
206
|
+
* The monitor runs on a configurable interval and optionally invokes a
|
|
207
|
+
* callback with the collected metrics on each cycle. It also exposes
|
|
208
|
+
* methods to start and stop monitoring, retrieve the latest metrics,
|
|
209
|
+
* and update configuration dynamically.
|
|
210
|
+
*
|
|
211
|
+
* @param options Configuration options for the monitor, including sampling
|
|
212
|
+
* interval, maximum thresholds for normalization, and an optional callback.
|
|
213
|
+
* @returns An API object that allows controlling the monitor lifecycle.
|
|
214
|
+
*/
|
|
215
|
+
function createMonitorObserver(options) {
|
|
216
|
+
let intervalId = null;
|
|
217
|
+
let lastMetrics = null;
|
|
218
|
+
let prevHrtime = process.hrtime.bigint();
|
|
219
|
+
let prevMem = process.memoryUsage();
|
|
220
|
+
let prevCpu = process.cpuUsage();
|
|
221
|
+
let prevLoop = perf_hooks.performance.eventLoopUtilization();
|
|
222
|
+
let lastCollectedAt = Date.now();
|
|
223
|
+
const config = {
|
|
224
|
+
interval: options?.interval ?? 500,
|
|
225
|
+
maxMemory: (options?.maxMemory ?? 512) * 1024 * 1024
|
|
226
|
+
};
|
|
227
|
+
function start() {
|
|
228
|
+
if (intervalId) return;
|
|
229
|
+
intervalId = setInterval(() => {
|
|
230
|
+
try {
|
|
231
|
+
const now = Date.now();
|
|
232
|
+
const metrics = collectMetrics({
|
|
233
|
+
prevCpu,
|
|
234
|
+
prevHrtime,
|
|
235
|
+
prevMem,
|
|
236
|
+
prevLoop,
|
|
237
|
+
maxMemory: config.maxMemory,
|
|
238
|
+
collectedAtMs: now,
|
|
239
|
+
previousCollectedAtMs: lastCollectedAt,
|
|
240
|
+
interval: config.interval
|
|
241
|
+
});
|
|
242
|
+
lastMetrics = metrics;
|
|
243
|
+
options?.callback?.(metrics);
|
|
244
|
+
prevCpu = metrics.cpu.total;
|
|
245
|
+
prevLoop = metrics.loop.total;
|
|
246
|
+
prevMem = metrics.memory.total;
|
|
247
|
+
prevHrtime = process.hrtime.bigint();
|
|
248
|
+
lastCollectedAt = now;
|
|
249
|
+
} catch (e) {
|
|
250
|
+
stop();
|
|
251
|
+
throw new Error("MonitorObserver: Not available", { cause: e });
|
|
252
|
+
}
|
|
253
|
+
}, config.interval);
|
|
254
|
+
if (typeof intervalId.unref === "function") intervalId.unref();
|
|
255
|
+
}
|
|
256
|
+
function stop() {
|
|
257
|
+
if (intervalId) {
|
|
258
|
+
clearInterval(intervalId);
|
|
259
|
+
intervalId = null;
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
function getMetrics() {
|
|
263
|
+
if (lastMetrics) return lastMetrics;
|
|
264
|
+
return null;
|
|
265
|
+
}
|
|
266
|
+
function updateConfig(newConfig) {
|
|
267
|
+
if (newConfig.maxMemory !== void 0) config.maxMemory = newConfig.maxMemory * 1024 * 1024;
|
|
268
|
+
if (newConfig.interval !== void 0) {
|
|
269
|
+
config.interval = newConfig.interval;
|
|
270
|
+
if (intervalId) {
|
|
271
|
+
stop();
|
|
272
|
+
start();
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
return {
|
|
277
|
+
start,
|
|
278
|
+
stop,
|
|
279
|
+
getMetrics,
|
|
280
|
+
updateConfig
|
|
281
|
+
};
|
|
282
|
+
}
|
|
283
|
+
/**
|
|
284
|
+
* Collects and normalizes performance metrics for the current process,
|
|
285
|
+
* including memory usage, CPU usage, and event loop utilization.
|
|
286
|
+
*
|
|
287
|
+
* CPU and event loop metrics are computed as deltas relative to previously
|
|
288
|
+
* recorded values. All metrics are normalized into a utilization between 0 and 1
|
|
289
|
+
* based on the configured maximum thresholds.
|
|
290
|
+
*
|
|
291
|
+
* @param props Previous metric snapshots and normalization limits.
|
|
292
|
+
* @returns A structured object containing normalized performance metrics.
|
|
293
|
+
*/
|
|
294
|
+
function collectMetrics(props) {
|
|
295
|
+
const nowHrtime = process.hrtime.bigint();
|
|
296
|
+
const elapsedMs = Number(nowHrtime - props.prevHrtime) / 1e6;
|
|
297
|
+
const actualElapsed = props.collectedAtMs - props.previousCollectedAtMs;
|
|
298
|
+
const mem = process.memoryUsage();
|
|
299
|
+
const deltaMem = {
|
|
300
|
+
rss: mem.rss - props.prevMem.rss,
|
|
301
|
+
heapTotal: mem.heapTotal - props.prevMem.heapTotal,
|
|
302
|
+
heapUsed: mem.heapUsed - props.prevMem.heapUsed,
|
|
303
|
+
external: mem.external - props.prevMem.external,
|
|
304
|
+
arrayBuffers: mem.arrayBuffers - props.prevMem.arrayBuffers
|
|
305
|
+
};
|
|
306
|
+
const memRatio = Math.min(1, mem.rss / props.maxMemory);
|
|
307
|
+
const cpuDelta = process.cpuUsage(props.prevCpu);
|
|
308
|
+
const cpuRatio = (cpuDelta.system + cpuDelta.user) / 1e3 / elapsedMs;
|
|
309
|
+
const loop = perf_hooks.performance.eventLoopUtilization(props.prevLoop);
|
|
310
|
+
return {
|
|
311
|
+
cpu: {
|
|
312
|
+
utilization: cpuRatio,
|
|
313
|
+
delta: cpuDelta,
|
|
314
|
+
total: process.cpuUsage()
|
|
315
|
+
},
|
|
316
|
+
loop: {
|
|
317
|
+
utilization: loop.utilization,
|
|
318
|
+
delta: loop,
|
|
319
|
+
total: perf_hooks.performance.eventLoopUtilization()
|
|
320
|
+
},
|
|
321
|
+
memory: {
|
|
322
|
+
utilization: memRatio,
|
|
323
|
+
delta: deltaMem,
|
|
324
|
+
total: mem
|
|
325
|
+
},
|
|
326
|
+
collectedAt: props.collectedAtMs,
|
|
327
|
+
previousCollectedAt: props.previousCollectedAtMs,
|
|
328
|
+
interval: props.interval,
|
|
329
|
+
actualElapsed
|
|
330
|
+
};
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
//#endregion
|
|
334
|
+
//#region src/utils/start-monitor.ts
|
|
335
|
+
let _monitorInstance = null;
|
|
336
|
+
/** Latest collected metrics from the monitor */
|
|
337
|
+
let _metrics;
|
|
338
|
+
/** Maximum memory limit for the monitor (in MB) */
|
|
339
|
+
let maxMemoryLimit = DEFAULT_MAX_PROCESS_MEMORY_MB;
|
|
340
|
+
/** Use 90% of the effective limit */
|
|
341
|
+
const SAFE_MEMORY_LIMIT_RATIO = .9;
|
|
342
|
+
function startMonitor() {
|
|
343
|
+
if (!_monitorInstance) {
|
|
344
|
+
try {
|
|
345
|
+
const processMemoryLimit = getProcessMemoryLimit();
|
|
346
|
+
if (processMemoryLimit && processMemoryLimit > 0) maxMemoryLimit = processMemoryLimit / 1024 / 1024 * SAFE_MEMORY_LIMIT_RATIO;
|
|
347
|
+
} catch {}
|
|
348
|
+
_monitorInstance = createMonitorObserver({
|
|
349
|
+
callback(metrics) {
|
|
350
|
+
_metrics = metrics;
|
|
351
|
+
},
|
|
352
|
+
interval: WORST_SWEEP_INTERVAL,
|
|
353
|
+
maxMemory: maxMemoryLimit
|
|
354
|
+
});
|
|
355
|
+
_monitorInstance.start();
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
//#endregion
|
|
360
|
+
//#region src/sweep/batchUpdateExpiredRatio.ts
|
|
361
|
+
/**
|
|
362
|
+
* Updates the expired ratio for each cache instance based on the collected ratios.
|
|
363
|
+
* @param currentExpiredRatios - An array of arrays containing expired ratios for each cache instance.
|
|
364
|
+
* @internal
|
|
365
|
+
*/
|
|
366
|
+
function _batchUpdateExpiredRatio(currentExpiredRatios) {
|
|
367
|
+
for (const inst of _instancesCache) {
|
|
368
|
+
const ratios = currentExpiredRatios[inst._instanceIndexState];
|
|
369
|
+
if (ratios && ratios.length > 0) {
|
|
370
|
+
const avgRatio = ratios.reduce((sum, val) => sum + val, 0) / ratios.length;
|
|
371
|
+
const alpha = .6;
|
|
372
|
+
inst._expiredRatio = inst._expiredRatio * (1 - alpha) + avgRatio * alpha;
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
//#endregion
|
|
378
|
+
//#region src/utils/interpolate.ts
|
|
379
|
+
/**
|
|
380
|
+
* Interpolates a value between two numeric ranges.
|
|
381
|
+
*
|
|
382
|
+
* Maps `value` from [fromStart, fromEnd] to [toStart, toEnd].
|
|
383
|
+
* Works with inverted ranges, negative values, and any numeric input.
|
|
384
|
+
*/
|
|
385
|
+
function interpolate({ value, fromStart, fromEnd, toStart, toEnd }) {
|
|
386
|
+
if (fromStart === fromEnd) return toStart;
|
|
387
|
+
return toStart + (value - fromStart) / (fromEnd - fromStart) * (toEnd - toStart);
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
//#endregion
|
|
391
|
+
//#region src/sweep/calculate-optimal-sweep-params.ts
|
|
392
|
+
/**
|
|
393
|
+
* Calculates adaptive sweep parameters based on real-time system utilization.
|
|
394
|
+
*
|
|
395
|
+
* Memory utilization is used as-is: higher memory usage → more conservative sweeps.
|
|
396
|
+
* CPU and event loop utilization are inverted: lower usage → more conservative sweeps.
|
|
397
|
+
*
|
|
398
|
+
* This inversion ensures:
|
|
399
|
+
* - When CPU and loop are *free*, sweeping becomes more aggressive (worst-case behavior).
|
|
400
|
+
* - When CPU and loop are *busy*, sweeping becomes more conservative (optimal behavior).
|
|
401
|
+
*
|
|
402
|
+
* The final ratio is a weighted average of the three metrics, clamped to [0, 1].
|
|
403
|
+
* This ratio is then used to interpolate between optimal and worst-case sweep settings.
|
|
404
|
+
*
|
|
405
|
+
* @param options - Optional configuration for weights and sweep bounds.
|
|
406
|
+
* @returns Interpolated sweep interval, time budget, and the ratio used.
|
|
407
|
+
*/
|
|
408
|
+
const calculateOptimalSweepParams = (options) => {
|
|
409
|
+
const { metrics, weights = {}, optimalSweepIntervalMs = OPTIMAL_SWEEP_INTERVAL, worstSweepIntervalMs = WORST_SWEEP_INTERVAL, worstSweepTimeBudgetMs = WORST_SWEEP_TIME_BUDGET } = options;
|
|
410
|
+
const memoryWeight = weights.memory ?? DEFAULT_MEMORY_WEIGHT;
|
|
411
|
+
const cpuWeight = weights.cpu ?? DEFAULT_CPU_WEIGHT;
|
|
412
|
+
const loopWeight = weights.loop ?? DEFAULT_LOOP_WEIGHT;
|
|
413
|
+
const memoryUtilization = metrics?.memory.utilization ?? 0;
|
|
414
|
+
const cpuUtilizationRaw = metrics?.cpu.utilization ?? 0;
|
|
415
|
+
const loopUtilizationRaw = metrics?.loop.utilization ?? 0;
|
|
416
|
+
const cpuUtilization = 1 - cpuUtilizationRaw;
|
|
417
|
+
const loopUtilization = 1 - loopUtilizationRaw;
|
|
418
|
+
const weightedSum = memoryUtilization * memoryWeight + cpuUtilization * cpuWeight + loopUtilization * loopWeight;
|
|
419
|
+
const totalWeight = memoryWeight + cpuWeight + loopWeight;
|
|
420
|
+
const ratio = Math.min(1, Math.max(0, weightedSum / totalWeight));
|
|
421
|
+
return {
|
|
422
|
+
sweepIntervalMs: interpolate({
|
|
423
|
+
value: ratio,
|
|
424
|
+
fromStart: 0,
|
|
425
|
+
fromEnd: 1,
|
|
426
|
+
toStart: optimalSweepIntervalMs,
|
|
427
|
+
toEnd: worstSweepIntervalMs
|
|
428
|
+
}),
|
|
429
|
+
sweepTimeBudgetMs: interpolate({
|
|
430
|
+
value: ratio,
|
|
431
|
+
fromStart: 0,
|
|
432
|
+
fromEnd: 1,
|
|
433
|
+
toStart: 0,
|
|
434
|
+
toEnd: worstSweepTimeBudgetMs
|
|
435
|
+
})
|
|
436
|
+
};
|
|
437
|
+
};
|
|
438
|
+
|
|
439
|
+
//#endregion
|
|
440
|
+
//#region src/sweep/select-instance-to-sweep.ts
|
|
441
|
+
/**
|
|
442
|
+
* Selects a cache instance to sweep based on sweep weights or round‑robin order.
|
|
443
|
+
*
|
|
444
|
+
* Two selection modes are supported:
|
|
445
|
+
* - **Round‑robin mode**: If `totalSweepWeight` ≤ 0, instances are selected
|
|
446
|
+
* deterministically in sequence using `batchSweep`. Once all instances
|
|
447
|
+
* have been processed, returns `null`.
|
|
448
|
+
* - **Weighted mode**: If sweep weights are available, performs a probabilistic
|
|
449
|
+
* selection. Each instance’s `_sweepWeight` contributes proportionally to its
|
|
450
|
+
* chance of being chosen.
|
|
451
|
+
*
|
|
452
|
+
* This function depends on `_updateWeightSweep` to maintain accurate sweep weights.
|
|
453
|
+
*
|
|
454
|
+
* @param totalSweepWeight - Sum of all sweep weights across instances.
|
|
455
|
+
* @param batchSweep - Current batch index used for round‑robin selection.
|
|
456
|
+
* @returns The selected `CacheState` instance, `null` if no instance remains,
|
|
457
|
+
* or `undefined` if the cache is empty.
|
|
458
|
+
*/
|
|
459
|
+
function _selectInstanceToSweep({ totalSweepWeight, batchSweep }) {
|
|
460
|
+
let instanceToSweep = _instancesCache[0];
|
|
461
|
+
if (totalSweepWeight <= 0) {
|
|
462
|
+
if (batchSweep > _instancesCache.length) instanceToSweep = null;
|
|
463
|
+
instanceToSweep = _instancesCache[batchSweep - 1];
|
|
464
|
+
} else {
|
|
465
|
+
let threshold = Math.random() * totalSweepWeight;
|
|
466
|
+
for (const inst of _instancesCache) {
|
|
467
|
+
threshold -= inst._sweepWeight;
|
|
468
|
+
if (threshold <= 0) {
|
|
469
|
+
instanceToSweep = inst;
|
|
470
|
+
break;
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
return instanceToSweep;
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
//#endregion
|
|
478
|
+
//#region src/cache/delete.ts
|
|
479
|
+
let DELETE_REASON = /* @__PURE__ */ function(DELETE_REASON$1) {
|
|
480
|
+
DELETE_REASON$1["MANUAL"] = "manual";
|
|
481
|
+
DELETE_REASON$1["EXPIRED"] = "expired";
|
|
482
|
+
DELETE_REASON$1["STALE"] = "stale";
|
|
483
|
+
return DELETE_REASON$1;
|
|
484
|
+
}({});
|
|
485
|
+
/**
|
|
486
|
+
* Deletes a key from the cache.
|
|
487
|
+
* @param state - The cache state.
|
|
488
|
+
* @param key - The key.
|
|
489
|
+
* @returns A boolean indicating whether the key was successfully deleted.
|
|
490
|
+
*/
|
|
491
|
+
const deleteKey = (state, key, reason = DELETE_REASON.MANUAL) => {
|
|
492
|
+
const onDelete = state.onDelete;
|
|
493
|
+
const onExpire = state.onExpire;
|
|
494
|
+
if (!onDelete && !onExpire) return state.store.delete(key);
|
|
495
|
+
const entry = state.store.get(key);
|
|
496
|
+
if (!entry) return false;
|
|
497
|
+
state.store.delete(key);
|
|
498
|
+
state.onDelete?.(key, entry[1], reason);
|
|
499
|
+
if (reason !== DELETE_REASON.MANUAL) state.onExpire?.(key, entry[1], reason);
|
|
500
|
+
return true;
|
|
501
|
+
};
|
|
502
|
+
|
|
503
|
+
//#endregion
|
|
504
|
+
//#region src/types.ts
|
|
505
|
+
/**
|
|
506
|
+
* Status of a cache entry.
|
|
507
|
+
*/
|
|
508
|
+
let ENTRY_STATUS = /* @__PURE__ */ function(ENTRY_STATUS$1) {
|
|
509
|
+
/** The entry is fresh and valid. */
|
|
510
|
+
ENTRY_STATUS$1["FRESH"] = "fresh";
|
|
511
|
+
/** The entry is stale but can still be served. */
|
|
512
|
+
ENTRY_STATUS$1["STALE"] = "stale";
|
|
513
|
+
/** The entry has expired and is no longer valid. */
|
|
514
|
+
ENTRY_STATUS$1["EXPIRED"] = "expired";
|
|
515
|
+
return ENTRY_STATUS$1;
|
|
516
|
+
}({});
|
|
517
|
+
|
|
518
|
+
//#endregion
|
|
519
|
+
//#region src/utils/status-from-tags.ts
|
|
520
|
+
/**
|
|
521
|
+
* Computes the derived status of a cache entry based on its associated tags.
|
|
522
|
+
*
|
|
523
|
+
* Tags may impose stricter expiration or stale rules on the entry. Only tags
|
|
524
|
+
* created at or after the entry's creation timestamp are considered relevant.
|
|
525
|
+
*
|
|
526
|
+
* Resolution rules:
|
|
527
|
+
* - If any applicable tag marks the entry as expired, the status becomes `EXPIRED`.
|
|
528
|
+
* - Otherwise, if any applicable tag marks it as stale, the status becomes `STALE`.
|
|
529
|
+
* - If no tag imposes stricter rules, the entry remains `FRESH`.
|
|
530
|
+
*
|
|
531
|
+
* @param state - The cache state containing tag metadata.
|
|
532
|
+
* @param entry - The cache entry whose status is being evaluated.
|
|
533
|
+
* @returns A tuple containing:
|
|
534
|
+
* - The final {@link ENTRY_STATUS} imposed by tags.
|
|
535
|
+
* - The earliest timestamp at which a tag marked the entry as stale
|
|
536
|
+
* (or 0 if no tag imposed a stale rule).
|
|
537
|
+
*/
|
|
538
|
+
function _statusFromTags(state, entry) {
|
|
539
|
+
const entryCreatedAt = entry[0][0];
|
|
540
|
+
let earliestTagStaleInvalidation = Infinity;
|
|
541
|
+
let status = ENTRY_STATUS.FRESH;
|
|
542
|
+
const tags = entry[2];
|
|
543
|
+
if (tags) for (const tag of tags) {
|
|
544
|
+
const ts = state._tags.get(tag);
|
|
545
|
+
if (!ts) continue;
|
|
546
|
+
const [tagExpiredAt, tagStaleSinceAt] = ts;
|
|
547
|
+
if (tagExpiredAt >= entryCreatedAt) {
|
|
548
|
+
status = ENTRY_STATUS.EXPIRED;
|
|
549
|
+
break;
|
|
550
|
+
}
|
|
551
|
+
if (tagStaleSinceAt >= entryCreatedAt) {
|
|
552
|
+
if (tagStaleSinceAt < earliestTagStaleInvalidation) earliestTagStaleInvalidation = tagStaleSinceAt;
|
|
553
|
+
status = ENTRY_STATUS.STALE;
|
|
554
|
+
}
|
|
555
|
+
}
|
|
556
|
+
return [status, status === ENTRY_STATUS.STALE ? earliestTagStaleInvalidation : 0];
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
//#endregion
|
|
560
|
+
//#region src/cache/validators.ts
|
|
561
|
+
/**
|
|
562
|
+
* Computes the final derived status of a cache entry by combining:
|
|
563
|
+
*
|
|
564
|
+
* - The entry's own expiration timestamps (TTL and stale TTL).
|
|
565
|
+
* - Any stricter expiration or stale rules imposed by its associated tags.
|
|
566
|
+
*
|
|
567
|
+
* Precedence rules:
|
|
568
|
+
* - `EXPIRED` overrides everything.
|
|
569
|
+
* - `STALE` overrides `FRESH`.
|
|
570
|
+
* - If neither the entry nor its tags impose stricter rules, the entry is `FRESH`.
|
|
571
|
+
*
|
|
572
|
+
* @param state - The cache state containing tag metadata.
|
|
573
|
+
* @param entry - The cache entry being evaluated.
|
|
574
|
+
* @returns The final {@link ENTRY_STATUS} for the entry.
|
|
575
|
+
*/
|
|
576
|
+
function computeEntryStatus(state, entry, now) {
|
|
577
|
+
const [__createdAt, expiresAt, staleExpiresAt] = entry[0];
|
|
578
|
+
const [tagStatus, earliestTagStaleInvalidation] = _statusFromTags(state, entry);
|
|
579
|
+
if (tagStatus === ENTRY_STATUS.EXPIRED) return ENTRY_STATUS.EXPIRED;
|
|
580
|
+
const windowStale = staleExpiresAt - expiresAt;
|
|
581
|
+
if (tagStatus === ENTRY_STATUS.STALE && staleExpiresAt > 0 && now < earliestTagStaleInvalidation + windowStale) return ENTRY_STATUS.STALE;
|
|
582
|
+
if (now < expiresAt) return ENTRY_STATUS.FRESH;
|
|
583
|
+
if (staleExpiresAt > 0 && now < staleExpiresAt) return ENTRY_STATUS.STALE;
|
|
584
|
+
return ENTRY_STATUS.EXPIRED;
|
|
585
|
+
}
|
|
586
|
+
/**
|
|
587
|
+
* Determines whether a cache entry is fresh.
|
|
588
|
+
*
|
|
589
|
+
* A fresh entry is one whose final derived status is `FRESH`, meaning:
|
|
590
|
+
* - It has not expired according to its own timestamps, and
|
|
591
|
+
* - No associated tag imposes a stricter stale or expired rule.
|
|
592
|
+
*
|
|
593
|
+
* @param state - The cache state containing tag metadata.
|
|
594
|
+
* @param entry - The cache entry being evaluated.
|
|
595
|
+
* @returns True if the entry is fresh.
|
|
596
|
+
*/
|
|
597
|
+
const isFresh = (state, entry, now) => computeEntryStatus(state, entry, now) === ENTRY_STATUS.FRESH;
|
|
598
|
+
/**
|
|
599
|
+
* Determines whether a cache entry is stale.
|
|
600
|
+
*
|
|
601
|
+
* A stale entry is one whose final derived status is `STALE`, meaning:
|
|
602
|
+
* - It has passed its TTL but is still within its stale window, or
|
|
603
|
+
* - A tag imposes a stale rule that applies to this entry.
|
|
604
|
+
*
|
|
605
|
+
* @param state - The cache state containing tag metadata.
|
|
606
|
+
* @param entry - The cache entry being evaluated.
|
|
607
|
+
* @returns True if the entry is stale.
|
|
608
|
+
*/
|
|
609
|
+
const isStale = (state, entry, now) => computeEntryStatus(state, entry, now) === ENTRY_STATUS.STALE;
|
|
610
|
+
/**
|
|
611
|
+
* Determines whether a cache entry is expired.
|
|
612
|
+
*
|
|
613
|
+
* An expired entry is one whose final derived status is `EXPIRED`, meaning:
|
|
614
|
+
* - It has exceeded both its TTL and stale TTL, or
|
|
615
|
+
* - A tag imposes an expiration rule that applies to this entry.
|
|
616
|
+
*
|
|
617
|
+
* @param state - The cache state containing tag metadata.
|
|
618
|
+
* @param entry - The cache entry being evaluated.
|
|
619
|
+
* @returns True if the entry is expired.
|
|
620
|
+
*/
|
|
621
|
+
const isExpired = (state, entry, now) => computeEntryStatus(state, entry, now) === ENTRY_STATUS.EXPIRED;
|
|
622
|
+
|
|
623
|
+
//#endregion
|
|
624
|
+
//#region src/sweep/sweep-once.ts
|
|
625
|
+
/**
|
|
626
|
+
* Performs a single sweep operation on the cache to remove expired and optionally stale entries.
|
|
627
|
+
* Uses a linear scan with a saved pointer to resume from the last processed key.
|
|
628
|
+
* @param state - The cache state.
|
|
629
|
+
* @param _maxKeysPerBatch - Maximum number of keys to process in this sweep.
|
|
630
|
+
* @returns An object containing statistics about the sweep operation.
|
|
631
|
+
*/
|
|
632
|
+
function _sweepOnce(state, _maxKeysPerBatch = MAX_KEYS_PER_BATCH) {
|
|
633
|
+
if (!state._sweepIter) state._sweepIter = state.store.entries();
|
|
634
|
+
let processed = 0;
|
|
635
|
+
let expiredCount = 0;
|
|
636
|
+
let staleCount = 0;
|
|
637
|
+
for (let i = 0; i < _maxKeysPerBatch; i++) {
|
|
638
|
+
const next = state._sweepIter.next();
|
|
639
|
+
if (next.done) {
|
|
640
|
+
state._sweepIter = state.store.entries();
|
|
641
|
+
break;
|
|
642
|
+
}
|
|
643
|
+
processed += 1;
|
|
644
|
+
const [key, entry] = next.value;
|
|
645
|
+
const now = Date.now();
|
|
646
|
+
if (isExpired(state, entry, now)) {
|
|
647
|
+
deleteKey(state, key, DELETE_REASON.EXPIRED);
|
|
648
|
+
expiredCount += 1;
|
|
649
|
+
} else if (isStale(state, entry, now)) {
|
|
650
|
+
staleCount += 1;
|
|
651
|
+
if (state.purgeStaleOnSweep) deleteKey(state, key, DELETE_REASON.STALE);
|
|
652
|
+
}
|
|
653
|
+
}
|
|
654
|
+
const expiredStaleCount = state.purgeStaleOnSweep ? staleCount : 0;
|
|
655
|
+
return {
|
|
656
|
+
processed,
|
|
657
|
+
expiredCount,
|
|
658
|
+
staleCount,
|
|
659
|
+
ratio: processed > 0 ? (expiredCount + expiredStaleCount) / processed : 0
|
|
660
|
+
};
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
//#endregion
|
|
664
|
+
//#region src/sweep/calculate-optimal-max-expired-ratio.ts
|
|
665
|
+
/**
|
|
666
|
+
* Calculates the optimal maximum expired ratio based on current memory utilization.
|
|
667
|
+
*
|
|
668
|
+
* This function interpolates between `maxAllowExpiredRatio` and `MINIMAL_EXPIRED_RATIO`
|
|
669
|
+
* depending on the memory usage reported by `_metrics`. At low memory usage (0%),
|
|
670
|
+
* the optimal ratio equals `maxAllowExpiredRatio`. As memory usage approaches or exceeds
|
|
671
|
+
* 80% of the memory limit, the optimal ratio decreases toward `MINIMAL_EXPIRED_RATIO`.
|
|
672
|
+
*
|
|
673
|
+
* @param maxAllowExpiredRatio - The maximum allowed expired ratio at minimal memory usage.
|
|
674
|
+
* Defaults to `DEFAULT_MAX_EXPIRED_RATIO`.
|
|
675
|
+
* @returns A normalized value between 0 and 1 representing the optimal expired ratio.
|
|
676
|
+
*/
|
|
677
|
+
function calculateOptimalMaxExpiredRatio(maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO) {
|
|
678
|
+
const EFFECTIVE_MEMORY_THRESHOLD = EXPIRED_RATIO_MEMORY_THRESHOLD / SAFE_MEMORY_LIMIT_RATIO;
|
|
679
|
+
const optimalExpiredRatio = interpolate({
|
|
680
|
+
value: _metrics?.memory.utilization ?? 0,
|
|
681
|
+
fromStart: 0,
|
|
682
|
+
fromEnd: EFFECTIVE_MEMORY_THRESHOLD,
|
|
683
|
+
toStart: maxAllowExpiredRatio,
|
|
684
|
+
toEnd: MINIMAL_EXPIRED_RATIO
|
|
685
|
+
});
|
|
686
|
+
return Math.min(1, Math.max(0, optimalExpiredRatio));
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
//#endregion
|
|
690
|
+
//#region src/sweep/update-weight.ts
|
|
691
|
+
/**
|
|
692
|
+
* Updates the sweep weight (`_sweepWeight`) for each cache instance.
|
|
693
|
+
*
|
|
694
|
+
* The sweep weight determines the probability that an instance will be selected
|
|
695
|
+
* for a cleanup (sweep) process. It is calculated based on the store size and
|
|
696
|
+
* the ratio of expired keys.
|
|
697
|
+
*
|
|
698
|
+
* This function complements (`_selectInstanceToSweep`), which is responsible
|
|
699
|
+
* for selecting the correct instance based on the weights assigned here.
|
|
700
|
+
*
|
|
701
|
+
* ---
|
|
702
|
+
*
|
|
703
|
+
* ### Sweep systems:
|
|
704
|
+
* 1. **Normal sweep**
|
|
705
|
+
* - Runs whenever the percentage of expired keys exceeds the allowed threshold
|
|
706
|
+
* calculated by `calculateOptimalMaxExpiredRatio`.
|
|
707
|
+
* - It is the main cleanup mechanism and is applied proportionally to the
|
|
708
|
+
* store size and the expired‑key ratio.
|
|
709
|
+
*
|
|
710
|
+
* 2. **Memory‑conditioned sweep (control)**
|
|
711
|
+
* - Works exactly like the normal sweep, except it may run even when it
|
|
712
|
+
* normally wouldn’t.
|
|
713
|
+
* - Only activates under **high memory pressure**.
|
|
714
|
+
* - Serves as an additional control mechanism to adjust weights, keep the
|
|
715
|
+
* system updated, and help prevent memory overflows.
|
|
716
|
+
*
|
|
717
|
+
* 3. **Round‑robin sweep (minimal control)**
|
|
718
|
+
* - Always runs, even if the expired ratio is low or memory usage does not
|
|
719
|
+
* require it.
|
|
720
|
+
* - Processes a very small number of keys per instance, much smaller than
|
|
721
|
+
* the normal sweep.
|
|
722
|
+
* - Its main purpose is to ensure that all instances receive at least a
|
|
723
|
+
* periodic weight update and minimal expired‑key control.
|
|
724
|
+
*
|
|
725
|
+
* ---
|
|
726
|
+
* #### Important notes:
|
|
727
|
+
* - A minimum `MINIMAL_EXPIRED_RATIO` (e.g., 5%) is assumed to ensure that
|
|
728
|
+
* control sweeps can always run under high‑memory scenarios.
|
|
729
|
+
* - Even with a minimum ratio, the normal sweep and the memory‑conditioned sweep
|
|
730
|
+
* may **skip execution** if memory usage allows it and the expired ratio is
|
|
731
|
+
* below the optimal maximum.
|
|
732
|
+
* - The round‑robin sweep is never skipped: it always runs with a very small,
|
|
733
|
+
* almost imperceptible cost.
|
|
734
|
+
*
|
|
735
|
+
* @returns The total accumulated sweep weight across all cache instances.
|
|
736
|
+
*/
|
|
737
|
+
function _updateWeightSweep() {
|
|
738
|
+
let totalSweepWeight = 0;
|
|
739
|
+
for (const instCache of _instancesCache) {
|
|
740
|
+
if (instCache.store.size <= 0) {
|
|
741
|
+
instCache._sweepWeight = 0;
|
|
742
|
+
continue;
|
|
743
|
+
}
|
|
744
|
+
let expiredRatio = MINIMAL_EXPIRED_RATIO;
|
|
745
|
+
if (instCache._expiredRatio > MINIMAL_EXPIRED_RATIO) expiredRatio = instCache._expiredRatio;
|
|
746
|
+
{
|
|
747
|
+
const optimalMaxExpiredRatio = calculateOptimalMaxExpiredRatio(instCache._maxAllowExpiredRatio);
|
|
748
|
+
if (expiredRatio <= optimalMaxExpiredRatio) {
|
|
749
|
+
instCache._sweepWeight = 0;
|
|
750
|
+
continue;
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
instCache._sweepWeight = instCache.store.size * expiredRatio;
|
|
754
|
+
totalSweepWeight += instCache._sweepWeight;
|
|
755
|
+
}
|
|
756
|
+
return totalSweepWeight;
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
//#endregion
|
|
760
|
+
//#region src/sweep/sweep.ts
|
|
761
|
+
/**
|
|
762
|
+
* Performs a sweep operation on the cache to remove expired and optionally stale entries.
|
|
763
|
+
* Uses a linear scan with a saved pointer to resume from the last processed key.
|
|
764
|
+
* @param state - The cache state.
|
|
765
|
+
*/
|
|
766
|
+
const sweep = async (state, utilities = {}) => {
|
|
767
|
+
const { schedule = defaultSchedule, yieldFn = defaultYieldFn, now = Date.now(), runOnlyOne = false } = utilities;
|
|
768
|
+
const startTime = now;
|
|
769
|
+
let sweepIntervalMs = OPTIMAL_SWEEP_INTERVAL;
|
|
770
|
+
let sweepTimeBudgetMs = OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE;
|
|
771
|
+
if (_metrics) ({sweepIntervalMs, sweepTimeBudgetMs} = calculateOptimalSweepParams({ metrics: _metrics }));
|
|
772
|
+
const totalSweepWeight = _updateWeightSweep();
|
|
773
|
+
const currentExpiredRatios = [];
|
|
774
|
+
const maxKeysPerBatch = totalSweepWeight <= 0 ? MAX_KEYS_PER_BATCH / _instancesCache.length : MAX_KEYS_PER_BATCH;
|
|
775
|
+
let batchSweep = 0;
|
|
776
|
+
while (true) {
|
|
777
|
+
batchSweep += 1;
|
|
778
|
+
const instanceToSweep = _selectInstanceToSweep({
|
|
779
|
+
batchSweep,
|
|
780
|
+
totalSweepWeight
|
|
781
|
+
});
|
|
782
|
+
if (!instanceToSweep) break;
|
|
783
|
+
const { ratio } = _sweepOnce(instanceToSweep, maxKeysPerBatch);
|
|
784
|
+
(currentExpiredRatios[instanceToSweep._instanceIndexState] ??= []).push(ratio);
|
|
785
|
+
if (Date.now() - startTime > sweepTimeBudgetMs) break;
|
|
786
|
+
await yieldFn();
|
|
787
|
+
}
|
|
788
|
+
_batchUpdateExpiredRatio(currentExpiredRatios);
|
|
789
|
+
if (!runOnlyOne) schedule(() => void sweep(state, utilities), sweepIntervalMs);
|
|
790
|
+
};
|
|
791
|
+
const defaultSchedule = (fn, ms) => {
|
|
792
|
+
const t = setTimeout(fn, ms);
|
|
793
|
+
if (typeof t.unref === "function") t.unref();
|
|
794
|
+
};
|
|
795
|
+
const defaultYieldFn = () => new Promise((resolve) => setImmediate(resolve));
|
|
796
|
+
|
|
797
|
+
//#endregion
|
|
798
|
+
//#region src/cache/create-cache.ts
|
|
799
|
+
let _instanceCount = 0;
|
|
800
|
+
const INSTANCE_WARNING_THRESHOLD = 99;
|
|
801
|
+
const _instancesCache = [];
|
|
802
|
+
let _initSweepScheduled = false;
|
|
803
|
+
/**
|
|
804
|
+
* Creates the initial state for the TTL cache.
|
|
805
|
+
* @param options - Configuration options for the cache.
|
|
806
|
+
* @returns The initial cache state.
|
|
807
|
+
*/
|
|
808
|
+
const createCache = (options = {}) => {
|
|
809
|
+
const { onExpire, onDelete, defaultTtl = DEFAULT_TTL, maxSize = DEFAULT_MAX_SIZE, _maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO, defaultStaleWindow = DEFAULT_STALE_WINDOW, purgeStaleOnGet = false, purgeStaleOnSweep = false, _autoStartSweep = true } = options;
|
|
810
|
+
_instanceCount++;
|
|
811
|
+
if (_instanceCount > INSTANCE_WARNING_THRESHOLD) console.warn(`Too many instances detected (${_instanceCount}). This may indicate a configuration issue; consider minimizing instance creation or grouping keys by expected expiration ranges. See the documentation: https://github.com/neezco/cache/docs/getting-started.md`);
|
|
812
|
+
const state = {
|
|
813
|
+
store: /* @__PURE__ */ new Map(),
|
|
814
|
+
_sweepIter: null,
|
|
815
|
+
get size() {
|
|
816
|
+
return state.store.size;
|
|
817
|
+
},
|
|
818
|
+
onExpire,
|
|
819
|
+
onDelete,
|
|
820
|
+
maxSize,
|
|
821
|
+
defaultTtl,
|
|
822
|
+
defaultStaleWindow,
|
|
823
|
+
purgeStaleOnGet,
|
|
824
|
+
purgeStaleOnSweep,
|
|
825
|
+
_maxAllowExpiredRatio,
|
|
826
|
+
_autoStartSweep,
|
|
827
|
+
_instanceIndexState: -1,
|
|
828
|
+
_expiredRatio: 0,
|
|
829
|
+
_sweepWeight: 0,
|
|
830
|
+
_tags: /* @__PURE__ */ new Map()
|
|
831
|
+
};
|
|
832
|
+
state._instanceIndexState = _instancesCache.push(state) - 1;
|
|
833
|
+
if (_autoStartSweep) {
|
|
834
|
+
if (_initSweepScheduled) return state;
|
|
835
|
+
_initSweepScheduled = true;
|
|
836
|
+
sweep(state);
|
|
837
|
+
}
|
|
838
|
+
startMonitor();
|
|
839
|
+
return state;
|
|
840
|
+
};
|
|
841
|
+
|
|
842
|
+
//#endregion
|
|
843
|
+
//#region src/cache/get.ts
|
|
844
|
+
/**
|
|
845
|
+
* Retrieves a value from the cache if the entry is valid.
|
|
846
|
+
* @param state - The cache state.
|
|
847
|
+
* @param key - The key to retrieve.
|
|
848
|
+
* @param now - Optional timestamp override (defaults to Date.now()).
|
|
849
|
+
* @returns The cached value if valid, null otherwise.
|
|
850
|
+
*/
|
|
851
|
+
const get = (state, key, now = Date.now()) => {
|
|
852
|
+
const entry = state.store.get(key);
|
|
853
|
+
if (!entry) return void 0;
|
|
854
|
+
if (isFresh(state, entry, now)) return entry[1];
|
|
855
|
+
if (isStale(state, entry, now)) {
|
|
856
|
+
if (state.purgeStaleOnGet) deleteKey(state, key, DELETE_REASON.STALE);
|
|
857
|
+
return entry[1];
|
|
858
|
+
}
|
|
859
|
+
deleteKey(state, key, DELETE_REASON.EXPIRED);
|
|
860
|
+
};
|
|
861
|
+
|
|
862
|
+
//#endregion
|
|
863
|
+
//#region src/cache/has.ts
|
|
864
|
+
/**
|
|
865
|
+
* Checks if a key exists in the cache and is not expired.
|
|
866
|
+
* @param state - The cache state.
|
|
867
|
+
* @param key - The key to check.
|
|
868
|
+
* @param now - Optional timestamp override (defaults to Date.now()).
|
|
869
|
+
* @returns True if the key exists and is valid, false otherwise.
|
|
870
|
+
*/
|
|
871
|
+
const has = (state, key, now = Date.now()) => {
|
|
872
|
+
return get(state, key, now) !== void 0;
|
|
873
|
+
};
|
|
874
|
+
|
|
875
|
+
//#endregion
|
|
876
|
+
//#region src/cache/invalidate-tag.ts
|
|
877
|
+
/**
|
|
878
|
+
* Invalidates one or more tags so that entries associated with them
|
|
879
|
+
* become expired or stale from this moment onward.
|
|
880
|
+
*
|
|
881
|
+
* Semantics:
|
|
882
|
+
* - Each tag maintains two timestamps in `state._tags`:
|
|
883
|
+
* [expiredAt, staleSinceAt].
|
|
884
|
+
* - Calling this function updates one of those timestamps to `_now`,
|
|
885
|
+
* depending on whether the tag should force expiration or staleness.
|
|
886
|
+
*
|
|
887
|
+
* Rules:
|
|
888
|
+
* - If `asStale` is false (default), the tag forces expiration:
|
|
889
|
+
* entries created before `_now` will be considered expired.
|
|
890
|
+
* - If `asStale` is true, the tag forces staleness:
|
|
891
|
+
* entries created before `_now` will be considered stale,
|
|
892
|
+
* but only if they support a stale window.
|
|
893
|
+
*
|
|
894
|
+
* Behavior:
|
|
895
|
+
* - Each call replaces any previous invalidation timestamp for the tag.
|
|
896
|
+
* - Entries created after `_now` are unaffected.
|
|
897
|
+
*
|
|
898
|
+
* @param state - The cache state containing tag metadata.
|
|
899
|
+
* @param tags - A tag or list of tags to invalidate.
|
|
900
|
+
* @param options.asStale - Whether the tag should mark entries as stale.
|
|
901
|
+
*/
|
|
902
|
+
function invalidateTag(state, tags, options = {}, _now = Date.now()) {
|
|
903
|
+
const tagList = Array.isArray(tags) ? tags : [tags];
|
|
904
|
+
const asStale = options.asStale ?? false;
|
|
905
|
+
for (const tag of tagList) {
|
|
906
|
+
const currentTag = state._tags.get(tag);
|
|
907
|
+
if (currentTag) if (asStale) currentTag[1] = _now;
|
|
908
|
+
else currentTag[0] = _now;
|
|
909
|
+
else state._tags.set(tag, [asStale ? 0 : _now, asStale ? _now : 0]);
|
|
910
|
+
}
|
|
911
|
+
}
|
|
912
|
+
|
|
913
|
+
//#endregion
|
|
914
|
+
//#region src/cache/set.ts
|
|
915
|
+
/**
|
|
916
|
+
* Sets or updates a value in the cache with TTL and an optional stale window.
|
|
917
|
+
*
|
|
918
|
+
* @param state - The cache state.
|
|
919
|
+
* @param input - Cache entry definition (key, value, ttl, staleWindow, tags).
|
|
920
|
+
* @param now - Optional timestamp override used as the base time (defaults to Date.now()).
|
|
921
|
+
*
|
|
922
|
+
* @remarks
|
|
923
|
+
* - `ttl` defines when the entry becomes expired.
|
|
924
|
+
* - `staleWindow` defines how long the entry may still be served as stale
|
|
925
|
+
* after the expiration moment (`now + ttl`).
|
|
926
|
+
*/
|
|
927
|
+
const setOrUpdate = (state, input, now = Date.now()) => {
|
|
928
|
+
const { key, value, ttl: ttlInput, staleWindow: staleWindowInput, tags } = input;
|
|
929
|
+
if (value === void 0) return;
|
|
930
|
+
if (key == null) throw new Error("Missing key.");
|
|
931
|
+
const ttl = ttlInput ?? state.defaultTtl;
|
|
932
|
+
const staleWindow = staleWindowInput ?? state.defaultStaleWindow;
|
|
933
|
+
const expiresAt = ttl > 0 ? now + ttl : Infinity;
|
|
934
|
+
const entry = [
|
|
935
|
+
[
|
|
936
|
+
now,
|
|
937
|
+
expiresAt,
|
|
938
|
+
staleWindow > 0 ? expiresAt + staleWindow : 0
|
|
939
|
+
],
|
|
940
|
+
value,
|
|
941
|
+
typeof tags === "string" ? [tags] : Array.isArray(tags) ? tags : null
|
|
942
|
+
];
|
|
943
|
+
state.store.set(key, entry);
|
|
944
|
+
};
|
|
945
|
+
|
|
946
|
+
//#endregion
|
|
947
|
+
//#region src/index.ts
|
|
948
|
+
/**
|
|
949
|
+
* A TTL (Time-To-Live) cache implementation with support for expiration,
|
|
950
|
+
* stale windows, tag-based invalidation, and automatic sweeping.
|
|
951
|
+
*
|
|
952
|
+
* Provides O(1) constant-time operations for all core methods.
|
|
953
|
+
*
|
|
954
|
+
* @example
|
|
955
|
+
* ```typescript
|
|
956
|
+
* const cache = new LocalTtlCache();
|
|
957
|
+
* cache.set("user:123", { name: "Alice" }, { ttl: 5 * 60 * 1000 });
|
|
958
|
+
* const user = cache.get("user:123"); // { name: "Alice" }
|
|
959
|
+
* ```
|
|
960
|
+
*/
|
|
961
|
+
var LocalTtlCache = class {
|
|
962
|
+
state;
|
|
963
|
+
/**
|
|
964
|
+
* Creates a new cache instance.
|
|
965
|
+
*
|
|
966
|
+
* @param options - Configuration options for the cache (defaultTtl, defaultStaleWindow, maxSize, etc.)
|
|
967
|
+
*
|
|
968
|
+
* @example
|
|
969
|
+
* ```typescript
|
|
970
|
+
* const cache = new LocalTtlCache({
|
|
971
|
+
* defaultTtl: 30 * 60 * 1000, // 30 minutes
|
|
972
|
+
* defaultStaleWindow: 5 * 60 * 1000, // 5 minutes
|
|
973
|
+
* maxSize: 500_000, // Maximum 500_000 entries
|
|
974
|
+
* onExpire: (key, value) => console.log(`Expired: ${key}`),
|
|
975
|
+
* onDelete: (key, value, reason) => console.log(`Deleted: ${key}, reason: ${reason}`),
|
|
976
|
+
* });
|
|
977
|
+
* ```
|
|
978
|
+
*/
|
|
979
|
+
constructor(options) {
|
|
980
|
+
this.state = createCache(options);
|
|
981
|
+
}
|
|
982
|
+
/**
|
|
983
|
+
* Gets the current number of entries tracked by the cache.
|
|
984
|
+
*
|
|
985
|
+
* This value may include entries that are already expired but have not yet been
|
|
986
|
+
* removed by the lazy cleanup system. Expired keys are cleaned only when it is
|
|
987
|
+
* efficient to do so, so the count can temporarily be higher than the number of
|
|
988
|
+
* actually valid (non‑expired) entries.
|
|
989
|
+
*
|
|
990
|
+
* @returns The number of entries currently stored (including entries pending cleanup)
|
|
991
|
+
*
|
|
992
|
+
* @example
|
|
993
|
+
* ```typescript
|
|
994
|
+
* console.log(cache.size); // e.g., 42
|
|
995
|
+
* ```
|
|
996
|
+
*/
|
|
997
|
+
get size() {
|
|
998
|
+
return this.state.size;
|
|
999
|
+
}
|
|
1000
|
+
/**
|
|
1001
|
+
* Retrieves a value from the cache.
|
|
1002
|
+
*
|
|
1003
|
+
* Returns the value if it exists and is not fully expired. If an entry is in the
|
|
1004
|
+
* stale window (expired but still within staleWindow), the stale value is returned.
|
|
1005
|
+
*
|
|
1006
|
+
|
|
1007
|
+
* @param key - The key to retrieve
|
|
1008
|
+
* @returns The cached value if valid, undefined otherwise
|
|
1009
|
+
*
|
|
1010
|
+
* @example
|
|
1011
|
+
* ```typescript
|
|
1012
|
+
* const user = cache.get<{ name: string }>("user:123");
|
|
1013
|
+
* ```
|
|
1014
|
+
*
|
|
1015
|
+
* @edge-cases
|
|
1016
|
+
* - Returns `undefined` if the key doesn't exist
|
|
1017
|
+
* - Returns `undefined` if the key has expired beyond the stale window
|
|
1018
|
+
* - Returns the stale value if within the stale window
|
|
1019
|
+
* - If `purgeStaleOnGet` is enabled, stale entries are deleted after being returned
|
|
1020
|
+
*/
|
|
1021
|
+
get(key) {
|
|
1022
|
+
return get(this.state, key);
|
|
1023
|
+
}
|
|
1024
|
+
/**
|
|
1025
|
+
* Sets or updates a value in the cache.
|
|
1026
|
+
*
|
|
1027
|
+
* If the key already exists, it will be completely replaced.
|
|
1028
|
+
*
|
|
1029
|
+
* @param key - The key under which to store the value
|
|
1030
|
+
* @param value - The value to cache (any type)
|
|
1031
|
+
* @param options - Optional configuration for this specific entry
|
|
1032
|
+
* @param options.ttl - Time-To-Live in milliseconds. Defaults to `defaultTtl`
|
|
1033
|
+
* @param options.staleWindow - How long to serve stale data after expiration (milliseconds)
|
|
1034
|
+
* @param options.tags - One or more tags for group invalidation
|
|
1035
|
+
*
|
|
1036
|
+
* @example
|
|
1037
|
+
* ```typescript
|
|
1038
|
+
* cache.set("user:123", { name: "Alice" }, {
|
|
1039
|
+
* ttl: 5 * 60 * 1000,
|
|
1040
|
+
* staleWindow: 1 * 60 * 1000,
|
|
1041
|
+
* tags: "user:123",
|
|
1042
|
+
* });
|
|
1043
|
+
* ```
|
|
1044
|
+
*
|
|
1045
|
+
* @edge-cases
|
|
1046
|
+
* - Overwriting an existing key replaces it completely
|
|
1047
|
+
* - If `ttl` is 0 or Infinite, the entry never expires
|
|
1048
|
+
* - If `staleWindow` is larger than `ttl`, the entry can be served as stale longer than it was fresh
|
|
1049
|
+
* - Tags are optional; only necessary for group invalidation via `invalidateTag()`
|
|
1050
|
+
*/
|
|
1051
|
+
set(key, value, options) {
|
|
1052
|
+
setOrUpdate(this.state, {
|
|
1053
|
+
key,
|
|
1054
|
+
value,
|
|
1055
|
+
ttl: options?.ttl,
|
|
1056
|
+
staleWindow: options?.staleWindow,
|
|
1057
|
+
tags: options?.tags
|
|
1058
|
+
});
|
|
1059
|
+
}
|
|
1060
|
+
/**
|
|
1061
|
+
* Deletes a specific key from the cache.
|
|
1062
|
+
*
|
|
1063
|
+
* @param key - The key to delete
|
|
1064
|
+
* @returns True if the key was deleted, false if it didn't exist
|
|
1065
|
+
*
|
|
1066
|
+
* @example
|
|
1067
|
+
* ```typescript
|
|
1068
|
+
* const wasDeleted = cache.delete("user:123");
|
|
1069
|
+
* ```
|
|
1070
|
+
*
|
|
1071
|
+
* @edge-cases
|
|
1072
|
+
* - Triggers the `onDelete` callback with reason `'manual'`
|
|
1073
|
+
* - Does not trigger the `onExpire` callback
|
|
1074
|
+
* - Returns `false` if the key was already expired
|
|
1075
|
+
* - Deleting a non-existent key returns `false` without error
|
|
1076
|
+
*/
|
|
1077
|
+
delete(key) {
|
|
1078
|
+
return deleteKey(this.state, key);
|
|
1079
|
+
}
|
|
1080
|
+
/**
|
|
1081
|
+
* Checks if a key exists in the cache and is not fully expired.
|
|
1082
|
+
*
|
|
1083
|
+
* Returns true if the key exists and is either fresh or within the stale window.
|
|
1084
|
+
* Use this when you only need to check existence without retrieving the value.
|
|
1085
|
+
*
|
|
1086
|
+
* @param key - The key to check
|
|
1087
|
+
* @returns True if the key exists and is valid, false otherwise
|
|
1088
|
+
*
|
|
1089
|
+
* @example
|
|
1090
|
+
* ```typescript
|
|
1091
|
+
* if (cache.has("user:123")) {
|
|
1092
|
+
* // Key exists (either fresh or stale)
|
|
1093
|
+
* }
|
|
1094
|
+
* ```
|
|
1095
|
+
*
|
|
1096
|
+
* @edge-cases
|
|
1097
|
+
* - Returns `false` if the key doesn't exist
|
|
1098
|
+
* - Returns `false` if the key has expired beyond the stale window
|
|
1099
|
+
* - Returns `true` if the key is in the stale window (still being served)
|
|
1100
|
+
* - Both `has()` and `get()` have O(1) complexity; prefer `get()` if you need the value
|
|
1101
|
+
*/
|
|
1102
|
+
has(key) {
|
|
1103
|
+
return has(this.state, key);
|
|
1104
|
+
}
|
|
1105
|
+
/**
|
|
1106
|
+
* Removes all entries from the cache at once.
|
|
1107
|
+
*
|
|
1108
|
+
* This is useful for resetting the cache or freeing memory when needed.
|
|
1109
|
+
* The `onDelete` callback is NOT invoked during clear (intentional optimization).
|
|
1110
|
+
*
|
|
1111
|
+
* @example
|
|
1112
|
+
* ```typescript
|
|
1113
|
+
* cache.clear(); // cache.size is now 0
|
|
1114
|
+
* ```
|
|
1115
|
+
*
|
|
1116
|
+
* @edge-cases
|
|
1117
|
+
* - The `onDelete` callback is NOT triggered during clear
|
|
1118
|
+
* - Clears both expired and fresh entries
|
|
1119
|
+
* - Resets `cache.size` to 0
|
|
1120
|
+
*/
|
|
1121
|
+
clear() {
|
|
1122
|
+
clear(this.state);
|
|
1123
|
+
}
|
|
1124
|
+
/**
|
|
1125
|
+
* Marks all entries with one or more tags as expired (or stale, if requested).
|
|
1126
|
+
*
|
|
1127
|
+
* If an entry has multiple tags, invalidating ANY of those tags will invalidate the entry.
|
|
1128
|
+
*
|
|
1129
|
+
* @param tags - A single tag (string) or array of tags to invalidate
|
|
1130
|
+
* @param asStale - If true, marks entries as stale instead of fully expired (still served from stale window)
|
|
1131
|
+
*
|
|
1132
|
+
* @example
|
|
1133
|
+
* ```typescript
|
|
1134
|
+
* // Invalidate a single tag
|
|
1135
|
+
* cache.invalidateTag("user:123");
|
|
1136
|
+
*
|
|
1137
|
+
* // Invalidate multiple tags
|
|
1138
|
+
* cache.invalidateTag(["user:123", "posts:456"]);
|
|
1139
|
+
* ```
|
|
1140
|
+
*
|
|
1141
|
+
* @edge-cases
|
|
1142
|
+
* - Does not throw errors if a tag has no associated entries
|
|
1143
|
+
* - Invalidating a tag doesn't prevent new entries from being tagged with it later
|
|
1144
|
+
* - The `onDelete` callback is triggered with reason `'expired'` (even if `asStale` is true)
|
|
1145
|
+
*/
|
|
1146
|
+
invalidateTag(tags, asStale) {
|
|
1147
|
+
invalidateTag(this.state, tags, { asStale });
|
|
1148
|
+
}
|
|
1149
|
+
};
|
|
1150
|
+
|
|
1151
|
+
//#endregion
|
|
1152
|
+
exports.LocalTtlCache = LocalTtlCache;
|
|
1153
|
+
//# sourceMappingURL=index.cjs.map
|