@neezco/cache 0.1.1 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -67,10 +67,16 @@ const DEFAULT_TTL = 30 * ONE_MINUTE;
67
67
  const DEFAULT_STALE_WINDOW = 0;
68
68
  /**
69
69
  * Maximum number of entries the cache can hold.
70
- * Beyond this limit, less-used entries are evicted.
70
+ * Beyond this limit, new entries are ignored.
71
71
  */
72
72
  const DEFAULT_MAX_SIZE = Infinity;
73
73
  /**
74
+ * Default maximum memory size in MB the cache can use.
75
+ * Beyond this limit, new entries are ignored.
76
+ * @default Infinite (unlimited)
77
+ */
78
+ const DEFAULT_MAX_MEMORY_SIZE = Infinity;
79
+ /**
74
80
  * ===================================================================
75
81
  * Sweep & Cleanup Operations
76
82
  * Parameters controlling how and when expired entries are removed.
@@ -589,11 +595,18 @@ function computeEntryStatus(state, entry, now) {
589
595
  * - It has not expired according to its own timestamps, and
590
596
  * - No associated tag imposes a stricter stale or expired rule.
591
597
  *
598
+ * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.
599
+ * Passing a pre-computed status avoids recalculating the entry status.
600
+ *
592
601
  * @param state - The cache state containing tag metadata.
593
- * @param entry - The cache entry being evaluated.
602
+ * @param entry - The cache entry or pre-computed status being evaluated.
603
+ * @param now - The current timestamp.
594
604
  * @returns True if the entry is fresh.
595
605
  */
596
- const isFresh = (state, entry, now) => computeEntryStatus(state, entry, now) === ENTRY_STATUS.FRESH;
606
+ const isFresh = (state, entry, now) => {
607
+ if (typeof entry === "string") return entry === ENTRY_STATUS.FRESH;
608
+ return computeEntryStatus(state, entry, now) === ENTRY_STATUS.FRESH;
609
+ };
597
610
  /**
598
611
  * Determines whether a cache entry is stale.
599
612
  *
@@ -601,11 +614,18 @@ const isFresh = (state, entry, now) => computeEntryStatus(state, entry, now) ===
601
614
  * - It has passed its TTL but is still within its stale window, or
602
615
  * - A tag imposes a stale rule that applies to this entry.
603
616
  *
617
+ * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.
618
+ * Passing a pre-computed status avoids recalculating the entry status.
619
+ *
604
620
  * @param state - The cache state containing tag metadata.
605
- * @param entry - The cache entry being evaluated.
621
+ * @param entry - The cache entry or pre-computed status being evaluated.
622
+ * @param now - The current timestamp.
606
623
  * @returns True if the entry is stale.
607
624
  */
608
- const isStale = (state, entry, now) => computeEntryStatus(state, entry, now) === ENTRY_STATUS.STALE;
625
+ const isStale = (state, entry, now) => {
626
+ if (typeof entry === "string") return entry === ENTRY_STATUS.STALE;
627
+ return computeEntryStatus(state, entry, now) === ENTRY_STATUS.STALE;
628
+ };
609
629
  /**
610
630
  * Determines whether a cache entry is expired.
611
631
  *
@@ -613,11 +633,18 @@ const isStale = (state, entry, now) => computeEntryStatus(state, entry, now) ===
613
633
  * - It has exceeded both its TTL and stale TTL, or
614
634
  * - A tag imposes an expiration rule that applies to this entry.
615
635
  *
636
+ * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.
637
+ * Passing a pre-computed status avoids recalculating the entry status.
638
+ *
616
639
  * @param state - The cache state containing tag metadata.
617
- * @param entry - The cache entry being evaluated.
640
+ * @param entry - The cache entry or pre-computed status being evaluated.
641
+ * @param now - The current timestamp.
618
642
  * @returns True if the entry is expired.
619
643
  */
620
- const isExpired = (state, entry, now) => computeEntryStatus(state, entry, now) === ENTRY_STATUS.EXPIRED;
644
+ const isExpired = (state, entry, now) => {
645
+ if (typeof entry === "string") return entry === ENTRY_STATUS.EXPIRED;
646
+ return computeEntryStatus(state, entry, now) === ENTRY_STATUS.EXPIRED;
647
+ };
621
648
 
622
649
  //#endregion
623
650
  //#region src/sweep/sweep-once.ts
@@ -642,10 +669,11 @@ function _sweepOnce(state, _maxKeysPerBatch = MAX_KEYS_PER_BATCH) {
642
669
  processed += 1;
643
670
  const [key, entry] = next.value;
644
671
  const now = Date.now();
645
- if (isExpired(state, entry, now)) {
672
+ const status = computeEntryStatus(state, entry, now);
673
+ if (isExpired(state, status, now)) {
646
674
  deleteKey(state, key, DELETE_REASON.EXPIRED);
647
675
  expiredCount += 1;
648
- } else if (isStale(state, entry, now)) {
676
+ } else if (isStale(state, status, now)) {
649
677
  staleCount += 1;
650
678
  if (state.purgeStaleOnSweep) deleteKey(state, key, DELETE_REASON.STALE);
651
679
  }
@@ -805,7 +833,7 @@ let _initSweepScheduled = false;
805
833
  * @returns The initial cache state.
806
834
  */
807
835
  const createCache = (options = {}) => {
808
- const { onExpire, onDelete, defaultTtl = DEFAULT_TTL, maxSize = DEFAULT_MAX_SIZE, _maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO, defaultStaleWindow = DEFAULT_STALE_WINDOW, purgeStaleOnGet = false, purgeStaleOnSweep = false, _autoStartSweep = true } = options;
836
+ const { onExpire, onDelete, defaultTtl = DEFAULT_TTL, maxSize = DEFAULT_MAX_SIZE, maxMemorySize = DEFAULT_MAX_MEMORY_SIZE, _maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO, defaultStaleWindow = DEFAULT_STALE_WINDOW, purgeStaleOnGet = false, purgeStaleOnSweep = false, _autoStartSweep = true } = options;
809
837
  _instanceCount++;
810
838
  if (_instanceCount > INSTANCE_WARNING_THRESHOLD) console.warn(`Too many instances detected (${_instanceCount}). This may indicate a configuration issue; consider minimizing instance creation or grouping keys by expected expiration ranges. See the documentation: https://github.com/neezco/cache/docs/getting-started.md`);
811
839
  const state = {
@@ -817,6 +845,7 @@ const createCache = (options = {}) => {
817
845
  onExpire,
818
846
  onDelete,
819
847
  maxSize,
848
+ maxMemorySize,
820
849
  defaultTtl,
821
850
  defaultStaleWindow,
822
851
  purgeStaleOnGet,
@@ -850,8 +879,9 @@ const createCache = (options = {}) => {
850
879
  const get = (state, key, now = Date.now()) => {
851
880
  const entry = state.store.get(key);
852
881
  if (!entry) return void 0;
853
- if (isFresh(state, entry, now)) return entry[1];
854
- if (isStale(state, entry, now)) {
882
+ const status = computeEntryStatus(state, entry, now);
883
+ if (isFresh(state, status, now)) return entry[1];
884
+ if (isStale(state, status, now)) {
855
885
  if (state.purgeStaleOnGet) deleteKey(state, key, DELETE_REASON.STALE);
856
886
  return entry[1];
857
887
  }
@@ -917,16 +947,23 @@ function invalidateTag(state, tags, options = {}, _now = Date.now()) {
917
947
  * @param state - The cache state.
918
948
  * @param input - Cache entry definition (key, value, ttl, staleWindow, tags).
919
949
  * @param now - Optional timestamp override used as the base time (defaults to Date.now()).
950
+ * @returns True if the entry was created or updated, false if rejected due to limits or invalid input.
920
951
  *
921
952
  * @remarks
922
953
  * - `ttl` defines when the entry becomes expired.
923
954
  * - `staleWindow` defines how long the entry may still be served as stale
924
955
  * after the expiration moment (`now + ttl`).
956
+ * - Returns false if value is `undefined` (entry ignored, existing value untouched).
957
+ * - Returns false if new entry would exceed `maxSize` limit (existing keys always allowed).
958
+ * - Returns false if new entry would exceed `maxMemorySize` limit (existing keys always allowed).
959
+ * - Returns true if entry was set or updated (or if existing key was updated at limit).
925
960
  */
926
961
  const setOrUpdate = (state, input, now = Date.now()) => {
927
962
  const { key, value, ttl: ttlInput, staleWindow: staleWindowInput, tags } = input;
928
- if (value === void 0) return;
963
+ if (value === void 0) return false;
929
964
  if (key == null) throw new Error("Missing key.");
965
+ if (state.size >= state.maxSize && !state.store.has(key)) return false;
966
+ if (_metrics?.memory.total.rss && _metrics?.memory.total.rss >= state.maxMemorySize * 1024 * 1024 && !state.store.has(key)) return false;
930
967
  const ttl = ttlInput ?? state.defaultTtl;
931
968
  const staleWindow = staleWindowInput ?? state.defaultStaleWindow;
932
969
  const expiresAt = ttl > 0 ? now + ttl : Infinity;
@@ -940,6 +977,7 @@ const setOrUpdate = (state, input, now = Date.now()) => {
940
977
  typeof tags === "string" ? [tags] : Array.isArray(tags) ? tags : null
941
978
  ];
942
979
  state.store.set(key, entry);
980
+ return true;
943
981
  };
944
982
 
945
983
  //#endregion
@@ -1031,14 +1069,19 @@ var LocalTtlCache = class {
1031
1069
  * @param options.ttl - Time-To-Live in milliseconds. Defaults to `defaultTtl`
1032
1070
  * @param options.staleWindow - How long to serve stale data after expiration (milliseconds)
1033
1071
  * @param options.tags - One or more tags for group invalidation
1072
+ * @returns True if the entry was set or updated, false if rejected due to limits or invalid input
1034
1073
  *
1035
1074
  * @example
1036
1075
  * ```typescript
1037
- * cache.set("user:123", { name: "Alice" }, {
1076
+ * const success = cache.set("user:123", { name: "Alice" }, {
1038
1077
  * ttl: 5 * 60 * 1000,
1039
1078
  * staleWindow: 1 * 60 * 1000,
1040
1079
  * tags: "user:123",
1041
1080
  * });
1081
+ *
1082
+ * if (!success) {
1083
+ * console.log("Entry was rejected due to size or memory limits");
1084
+ * }
1042
1085
  * ```
1043
1086
  *
1044
1087
  * @edge-cases
@@ -1046,9 +1089,13 @@ var LocalTtlCache = class {
1046
1089
  * - If `ttl` is 0 or Infinite, the entry never expires
1047
1090
  * - If `staleWindow` is larger than `ttl`, the entry can be served as stale longer than it was fresh
1048
1091
  * - Tags are optional; only necessary for group invalidation via `invalidateTag()`
1092
+ * - Returns `false` if value is `undefined` (existing value remains untouched)
1093
+ * - Returns `false` if new key would exceed [`maxSize`](./docs/configuration.md#maxsize-number) limit
1094
+ * - Returns `false` if new key would exceed [`maxMemorySize`](./docs/configuration.md#maxmemorysize-number) limit
1095
+ * - Updating existing keys always succeeds, even at limit
1049
1096
  */
1050
1097
  set(key, value, options) {
1051
- setOrUpdate(this.state, {
1098
+ return setOrUpdate(this.state, {
1052
1099
  key,
1053
1100
  value,
1054
1101
  ttl: options?.ttl,
@@ -1 +1 @@
1
- {"version":3,"file":"index.cjs","names":["performance"],"sources":["../../src/cache/clear.ts","../../src/defaults.ts","../../src/utils/get-process-memory-limit.ts","../../src/utils/process-monitor.ts","../../src/utils/start-monitor.ts","../../src/sweep/batchUpdateExpiredRatio.ts","../../src/utils/interpolate.ts","../../src/sweep/calculate-optimal-sweep-params.ts","../../src/sweep/select-instance-to-sweep.ts","../../src/cache/delete.ts","../../src/types.ts","../../src/utils/status-from-tags.ts","../../src/cache/validators.ts","../../src/sweep/sweep-once.ts","../../src/sweep/calculate-optimal-max-expired-ratio.ts","../../src/sweep/update-weight.ts","../../src/sweep/sweep.ts","../../src/cache/create-cache.ts","../../src/cache/get.ts","../../src/cache/has.ts","../../src/cache/invalidate-tag.ts","../../src/cache/set.ts","../../src/index.ts"],"sourcesContent":["import type { CacheState } from \"../types\";\n\n/**\n * Clears all entries from the cache without invoking callbacks.\n *\n * @note The `onDelete` callback is NOT invoked during a clear operation.\n * This is intentional to avoid unnecessary overhead when bulk-removing entries.\n *\n * @param state - The cache state.\n * @returns void\n */\nexport const clear = (state: CacheState): void => {\n state.store.clear();\n};\n","// Time Unit Constants\n// Base temporal units used throughout the caching system.\nconst ONE_SECOND: number = 1000;\nconst ONE_MINUTE: number = 60 * ONE_SECOND;\n\n/**\n * ===================================================================\n * Cache Entry Lifecycle\n * Default TTL and stale window settings for short-lived cache entries.\n * ===================================================================\n */\n\n/**\n * Default Time-To-Live in milliseconds for cache entries.\n * @default 1_800_000 (30 minutes)\n */\nexport const DEFAULT_TTL: number = 30 * ONE_MINUTE;\n\n/**\n * Default stale window in milliseconds after expiration.\n * Allows serving slightly outdated data while fetching fresh data.\n */\nexport const DEFAULT_STALE_WINDOW: number = 0 as const;\n\n/**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, less-used entries are evicted.\n */\nexport const DEFAULT_MAX_SIZE: number = Infinity;\n\n/**\n * ===================================================================\n * Sweep & Cleanup Operations\n * Parameters controlling how and when expired entries are removed.\n * ===================================================================\n */\n\n/**\n * Maximum number of keys to process in a single sweep batch.\n * Higher values = more aggressive cleanup, lower latency overhead.\n */\nexport const MAX_KEYS_PER_BATCH: number = 1000;\n\n/**\n * Minimal expired ratio enforced during sweeps.\n * Ensures control sweeps run above {@link EXPIRED_RATIO_MEMORY_THRESHOLD}.\n */\nexport const MINIMAL_EXPIRED_RATIO: number = 0.05;\n\n/**\n * Memory usage threshold (normalized 0–1) triggering control sweeps.\n * At or above this level, sweeping becomes more aggressive.\n */\nexport const EXPIRED_RATIO_MEMORY_THRESHOLD: number = 0.8;\n\n/**\n * Maximum allowed expired ratio when memory usage is low.\n * Upper bound for interpolation with MINIMAL_EXPIRED_RATIO.\n * Recommended range: `0.3 – 0.5` .\n */\nexport const DEFAULT_MAX_EXPIRED_RATIO: number = 0.4;\n\n/**\n * ===================================================================\n * Sweep Intervals & Timing\n * Frequency and time budgets for cleanup operations.\n * ===================================================================\n */\n\n/**\n * Optimal interval in milliseconds between sweeps.\n * Used when system load is minimal and metrics are available.\n */\nexport const OPTIMAL_SWEEP_INTERVAL: number = 2 * ONE_SECOND;\n\n/**\n * Worst-case interval in milliseconds between sweeps.\n * Used when system load is high or metrics unavailable.\n */\nexport const WORST_SWEEP_INTERVAL: number = 200;\n\n/**\n * Maximum time budget in milliseconds for sweep operations.\n * Prevents sweeping from consuming excessive CPU during high load.\n */\nexport const WORST_SWEEP_TIME_BUDGET: number = 40;\n\n/**\n * Optimal time budget in milliseconds for each sweep cycle.\n * Used when performance metrics are not available or unreliable.\n */\nexport const OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE: number = 15;\n\n/**\n * ===================================================================\n * Memory Management\n * Process limits and memory-safe thresholds.\n * ===================================================================\n */\n\n/**\n * Default maximum process memory limit in megabytes.\n * Acts as fallback when environment detection is unavailable.\n * NOTE: Overridable via environment detection at runtime.\n */\nexport const DEFAULT_MAX_PROCESS_MEMORY_MB: number = 1024;\n\n/**\n * ===================================================================\n * System Utilization Weights\n * Balance how memory, CPU, and event-loop pressure influence sweep behavior.\n * Sum of all weights: 10 + 8.5 + 6.5 = 25\n * ===================================================================\n */\n\n/**\n * Weight applied to memory utilization in sweep calculations.\n * Higher weight = memory pressure has more influence on sweep aggressiveness.\n */\nexport const DEFAULT_MEMORY_WEIGHT: number = 10;\n\n/**\n * Weight applied to CPU utilization in sweep calculations.\n * Combined with event-loop weight to balance CPU-related pressure.\n */\nexport const DEFAULT_CPU_WEIGHT: number = 8.5;\n\n/**\n * Weight applied to event-loop utilization in sweep calculations.\n * Complements CPU weight to assess overall processing capacity.\n */\nexport const DEFAULT_LOOP_WEIGHT: number = 6.5;\n","import fs from \"fs\";\nimport v8 from \"v8\";\n\n/**\n * Reads a number from a file.\n * @param path File path to read the number from.\n * @returns The number read from the file, or null if reading fails.\n */\nfunction readNumber(path: string): number | null {\n try {\n const raw = fs.readFileSync(path, \"utf8\").trim();\n const n = Number(raw);\n return Number.isFinite(n) ? n : null;\n } catch {\n return null;\n }\n}\n\n/**\n * Gets the memory limit imposed by cgroups, if any.\n * @return The memory limit in bytes, or null if no limit is found.\n */\nfunction getCgroupLimit(): number | null {\n // cgroup v2\n const v2 = readNumber(\"/sys/fs/cgroup/memory.max\");\n if (v2 !== null) return v2;\n\n // cgroup v1\n const v1 = readNumber(\"/sys/fs/cgroup/memory/memory.limit_in_bytes\");\n if (v1 !== null) return v1;\n\n return null;\n}\n\n/**\n * Gets the effective memory limit for the current process, considering both V8 heap limits and cgroup limits.\n * @returns The effective memory limit in bytes.\n */\nexport function getProcessMemoryLimit(): number {\n const heapLimit = v8.getHeapStatistics().heap_size_limit;\n const cgroupLimit = getCgroupLimit();\n\n if (cgroupLimit && cgroupLimit > 0 && cgroupLimit < Infinity) {\n return Math.min(heapLimit, cgroupLimit);\n }\n\n return heapLimit;\n}\n","import { performance, type EventLoopUtilization } from \"perf_hooks\";\n\n/**\n * Creates a performance monitor that periodically samples memory usage,\n * CPU usage, and event loop utilization for the current Node.js process.\n *\n * The monitor runs on a configurable interval and optionally invokes a\n * callback with the collected metrics on each cycle. It also exposes\n * methods to start and stop monitoring, retrieve the latest metrics,\n * and update configuration dynamically.\n *\n * @param options Configuration options for the monitor, including sampling\n * interval, maximum thresholds for normalization, and an optional callback.\n * @returns An API object that allows controlling the monitor lifecycle.\n */\nexport function createMonitorObserver(\n options?: Partial<CreateMonitorObserverOptions>,\n): ReturnCreateMonitor {\n let intervalId: NodeJS.Timeout | null = null;\n\n let lastMetrics: PerformanceMetrics | null = null;\n\n let prevHrtime = process.hrtime.bigint();\n\n let prevMem = process.memoryUsage();\n let prevCpu = process.cpuUsage();\n let prevLoop = performance.eventLoopUtilization();\n let lastCollectedAt = Date.now();\n\n const config = {\n interval: options?.interval ?? 500,\n // options.maxMemory is expected in MB; store bytes internally\n maxMemory: (options?.maxMemory ?? 512) * 1024 * 1024,\n };\n\n function start(): void {\n if (intervalId) return; // already running\n\n intervalId = setInterval(() => {\n try {\n const now = Date.now();\n\n const metrics = collectMetrics({\n prevCpu,\n prevHrtime,\n prevMem,\n prevLoop,\n maxMemory: config.maxMemory,\n collectedAtMs: now,\n previousCollectedAtMs: lastCollectedAt,\n interval: config.interval,\n });\n\n lastMetrics = metrics;\n options?.callback?.(metrics);\n\n prevCpu = metrics.cpu.total;\n prevLoop = metrics.loop.total;\n prevMem = metrics.memory.total;\n\n prevHrtime = process.hrtime.bigint();\n lastCollectedAt = now;\n } catch (e: unknown) {\n stop();\n throw new Error(\"MonitorObserver: Not available\", { cause: e });\n }\n }, config.interval);\n\n if (typeof intervalId.unref === \"function\") {\n intervalId.unref();\n }\n }\n\n function stop(): void {\n if (intervalId) {\n clearInterval(intervalId);\n intervalId = null;\n }\n }\n\n function getMetrics(): PerformanceMetrics | null {\n if (lastMetrics) {\n return lastMetrics;\n }\n return null;\n }\n\n function updateConfig(newConfig: Partial<CreateMonitorObserverOptions>): void {\n if (newConfig.maxMemory !== undefined) {\n // convert MB -> bytes\n config.maxMemory = newConfig.maxMemory * 1024 * 1024;\n }\n\n if (newConfig.interval !== undefined) {\n config.interval = newConfig.interval;\n\n // restart if active to apply new interval\n if (intervalId) {\n stop();\n start();\n }\n }\n }\n\n return {\n start,\n stop,\n getMetrics,\n updateConfig,\n };\n}\n\n/**\n * Collects and normalizes performance metrics for the current process,\n * including memory usage, CPU usage, and event loop utilization.\n *\n * CPU and event loop metrics are computed as deltas relative to previously\n * recorded values. All metrics are normalized into a utilization between 0 and 1\n * based on the configured maximum thresholds.\n *\n * @param props Previous metric snapshots and normalization limits.\n * @returns A structured object containing normalized performance metrics.\n */\nexport function collectMetrics(props: {\n prevMem: NodeJS.MemoryUsage;\n prevCpu: NodeJS.CpuUsage;\n prevHrtime: bigint;\n prevLoop: EventLoopUtilization;\n maxMemory: number; // bytes\n collectedAtMs: number;\n previousCollectedAtMs: number;\n interval: number;\n}): PerformanceMetrics {\n const nowHrtime = process.hrtime.bigint();\n\n const elapsedNs = Number(nowHrtime - props.prevHrtime);\n const elapsedMs = elapsedNs / 1e6;\n const actualElapsed = props.collectedAtMs - props.previousCollectedAtMs;\n\n const mem = process.memoryUsage();\n const deltaMem: NodeJS.MemoryUsage = {\n rss: mem.rss - props.prevMem.rss,\n heapTotal: mem.heapTotal - props.prevMem.heapTotal,\n heapUsed: mem.heapUsed - props.prevMem.heapUsed,\n external: mem.external - props.prevMem.external,\n arrayBuffers: mem.arrayBuffers - props.prevMem.arrayBuffers,\n };\n const memRatio = Math.min(1, mem.rss / props.maxMemory);\n\n const cpuDelta = process.cpuUsage(props.prevCpu);\n const cpuMs = (cpuDelta.system + cpuDelta.user) / 1e3;\n const cpuRatio = cpuMs / elapsedMs;\n\n const loop = performance.eventLoopUtilization(props.prevLoop);\n\n return {\n cpu: {\n // deltaMs: cpuMs, // remove to avoid confusion with different unit type\n utilization: cpuRatio,\n delta: cpuDelta,\n total: process.cpuUsage(),\n },\n\n loop: {\n utilization: loop.utilization,\n delta: loop,\n total: performance.eventLoopUtilization(),\n },\n\n memory: {\n utilization: memRatio,\n delta: deltaMem,\n total: mem,\n },\n\n collectedAt: props.collectedAtMs,\n previousCollectedAt: props.previousCollectedAtMs,\n interval: props.interval,\n actualElapsed,\n };\n}\n\n// -----------------------------------------------------------------\n\n/**\n * Represents a metric extended with a normalized utilization between 0 and 1.\n *\n * The utilization indicates how close the metric is to its configured maximum\n * threshold, where 0 means minimal usage and 1 means the limit has been reached.\n *\n * @typeParam T The underlying metric type being normalized.\n */\nexport type NormalizedMetric<T> = T & {\n /** Normalized value between 0 and 1 */\n utilization: number;\n};\n\n/**\n * PerformanceMetrics describes the actual shape returned by collectMetrics.\n * All metric groups include raw `delta` and `total` objects plus a normalized utilization.\n */\nexport interface PerformanceMetrics {\n memory: NormalizedMetric<{\n delta: NodeJS.MemoryUsage;\n total: NodeJS.MemoryUsage;\n }>;\n\n cpu: NormalizedMetric<{\n delta: NodeJS.CpuUsage;\n total: NodeJS.CpuUsage;\n }>;\n\n loop: NormalizedMetric<{\n delta: EventLoopUtilization;\n total: EventLoopUtilization;\n }>;\n\n /** Timestamp in milliseconds when this metric was collected */\n collectedAt: number;\n\n /** Timestamp in milliseconds of the previous metric collection */\n previousCollectedAt: number;\n\n /** Interval in milliseconds at which the monitor is running */\n interval: number;\n\n /** Actual elapsed time in milliseconds since the last collection */\n actualElapsed: number;\n}\n\n/**\n * Options for createMonitorObserver.\n */\nexport interface CreateMonitorObserverOptions {\n /** Interval between samples in ms. Default: 500 */\n interval?: number;\n\n /** Maximum RSS memory in megabytes (MB) used for normalization. */\n maxMemory?: number;\n\n /** Optional callback invoked on each metrics sample. */\n callback?: (metrics: PerformanceMetrics) => void;\n}\n\n/**\n * Public API returned by `createMonitorObserver`.\n *\n * Provides methods to start and stop monitoring, retrieve the latest metrics,\n * and update the monitor configuration at runtime.\n */\nexport interface ReturnCreateMonitor {\n /** Stops the monitoring interval */\n stop: () => void;\n\n /** Starts the monitoring interval */\n start: () => void;\n\n /** Returns the last collected metrics or null if none have been collected yet */\n getMetrics: () => PerformanceMetrics | null;\n\n /** Allows updating the monitor configuration on the fly */\n updateConfig: (newConfig: Partial<CreateMonitorObserverOptions>) => void;\n}\n","import { DEFAULT_MAX_PROCESS_MEMORY_MB, WORST_SWEEP_INTERVAL } from \"../defaults\";\n\nimport { getProcessMemoryLimit } from \"./get-process-memory-limit\";\nimport {\n createMonitorObserver,\n type PerformanceMetrics,\n type ReturnCreateMonitor,\n} from \"./process-monitor\";\n\nlet _monitorInstance: ReturnCreateMonitor | null = null;\n\n/** Latest collected metrics from the monitor */\nexport let _metrics: PerformanceMetrics | null;\n\n/** Maximum memory limit for the monitor (in MB) */\nexport let maxMemoryLimit: number = DEFAULT_MAX_PROCESS_MEMORY_MB;\n\n/** Use 90% of the effective limit */\nexport const SAFE_MEMORY_LIMIT_RATIO = 0.9;\n\nexport function startMonitor(): void {\n if (__BROWSER__) {\n // Ignore monitor in browser environments\n return;\n }\n\n if (!_monitorInstance) {\n try {\n const processMemoryLimit = getProcessMemoryLimit();\n\n if (processMemoryLimit && processMemoryLimit > 0) {\n maxMemoryLimit = (processMemoryLimit / 1024 / 1024) * SAFE_MEMORY_LIMIT_RATIO;\n }\n } catch {\n // TODO: proper logger\n // Ignore errors and use default\n // console.log(\"error getProcessMemoryLimit:\", e);\n }\n\n _monitorInstance = createMonitorObserver({\n callback(metrics) {\n _metrics = metrics;\n },\n interval: WORST_SWEEP_INTERVAL,\n maxMemory: maxMemoryLimit, // 1 GB\n });\n\n _monitorInstance.start();\n }\n}\n","import { _instancesCache } from \"../cache/create-cache\";\n\n/**\n * Updates the expired ratio for each cache instance based on the collected ratios.\n * @param currentExpiredRatios - An array of arrays containing expired ratios for each cache instance.\n * @internal\n */\nexport function _batchUpdateExpiredRatio(currentExpiredRatios: number[][]): void {\n for (const inst of _instancesCache) {\n const ratios = currentExpiredRatios[inst._instanceIndexState];\n if (ratios && ratios.length > 0) {\n const avgRatio = ratios.reduce((sum, val) => sum + val, 0) / ratios.length;\n\n const alpha = 0.6; // NOTE: this must be alway higher than 0.5 to prioritize recent avgRatio\n inst._expiredRatio = inst._expiredRatio * (1 - alpha) + avgRatio * alpha;\n }\n }\n}\n","/**\n * Interpolates a value between two numeric ranges.\n *\n * Maps `value` from [fromStart, fromEnd] to [toStart, toEnd].\n * Works with inverted ranges, negative values, and any numeric input.\n */\nexport function interpolate({\n value,\n fromStart,\n fromEnd,\n toStart,\n toEnd,\n}: {\n value: number;\n fromStart: number;\n fromEnd: number;\n toStart: number;\n toEnd: number;\n}): number {\n // Explicit and predictable: avoid division by zero.\n if (fromStart === fromEnd) return toStart;\n\n const t = (value - fromStart) / (fromEnd - fromStart);\n return toStart + t * (toEnd - toStart);\n}\n","import {\n DEFAULT_CPU_WEIGHT,\n DEFAULT_LOOP_WEIGHT,\n DEFAULT_MEMORY_WEIGHT,\n OPTIMAL_SWEEP_INTERVAL,\n WORST_SWEEP_INTERVAL,\n WORST_SWEEP_TIME_BUDGET,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport type { PerformanceMetrics } from \"../utils/process-monitor\";\n\n/**\n * Weights for calculating the weighted utilization ratio.\n * Each weight determines how strongly each metric influences the final ratio.\n */\nexport interface UtilizationWeights {\n /** Weight applied to memory utilization (non-inverted). Default: 1 */\n memory?: number;\n\n /** Weight applied to CPU utilization (inverted). Default: 1 */\n cpu?: number;\n\n /** Weight applied to event loop utilization (inverted). Default: 1 */\n loop?: number;\n}\n\n/**\n * Represents the calculated optimal sweep parameters based on system metrics.\n */\nexport interface OptimalSweepParams {\n /** The optimal interval in milliseconds between sweep operations. */\n sweepIntervalMs: number;\n\n /** The optimal maximum time budget in milliseconds for a sweep cycle. */\n sweepTimeBudgetMs: number;\n}\n\n/**\n * Options for customizing the sweep parameter calculation.\n */\ninterface CalculateOptimalSweepParamsOptions {\n /** System performance metrics to base the calculations on. */\n metrics: PerformanceMetrics;\n\n /** Optional custom weights for each utilization metric. */\n weights?: UtilizationWeights;\n\n /** Interval (ms) used when system load is minimal. */\n optimalSweepIntervalMs?: number;\n\n /** Interval (ms) used when system load is maximal. */\n worstSweepIntervalMs?: number;\n\n /** Maximum sweep time budget (ms) under worst-case load. */\n worstSweepTimeBudgetMs?: number;\n}\n\n/**\n * Calculates adaptive sweep parameters based on real-time system utilization.\n *\n * Memory utilization is used as-is: higher memory usage → more conservative sweeps.\n * CPU and event loop utilization are inverted: lower usage → more conservative sweeps.\n *\n * This inversion ensures:\n * - When CPU and loop are *free*, sweeping becomes more aggressive (worst-case behavior).\n * - When CPU and loop are *busy*, sweeping becomes more conservative (optimal behavior).\n *\n * The final ratio is a weighted average of the three metrics, clamped to [0, 1].\n * This ratio is then used to interpolate between optimal and worst-case sweep settings.\n *\n * @param options - Optional configuration for weights and sweep bounds.\n * @returns Interpolated sweep interval, time budget, and the ratio used.\n */\nexport const calculateOptimalSweepParams = (\n options: CalculateOptimalSweepParamsOptions,\n): OptimalSweepParams => {\n const {\n metrics,\n weights = {},\n optimalSweepIntervalMs = OPTIMAL_SWEEP_INTERVAL,\n worstSweepIntervalMs = WORST_SWEEP_INTERVAL,\n worstSweepTimeBudgetMs = WORST_SWEEP_TIME_BUDGET,\n } = options;\n\n // Resolve metric weights (default = 1)\n const memoryWeight = weights.memory ?? DEFAULT_MEMORY_WEIGHT;\n const cpuWeight = weights.cpu ?? DEFAULT_CPU_WEIGHT;\n const loopWeight = weights.loop ?? DEFAULT_LOOP_WEIGHT;\n\n // Memory utilization is used directly (0–1)\n const memoryUtilization = metrics?.memory.utilization ?? 0;\n\n // Raw CPU and loop utilization (0–1)\n const cpuUtilizationRaw = metrics?.cpu.utilization ?? 0;\n const loopUtilizationRaw = metrics?.loop.utilization ?? 0;\n\n // Invert CPU and loop utilization:\n // - Low CPU/loop usage → high inverted value → pushes toward worst-case behavior\n // - High CPU/loop usage → low inverted value → pushes toward optimal behavior\n const cpuUtilization = 1 - cpuUtilizationRaw;\n const loopUtilization = 1 - loopUtilizationRaw;\n\n // Weighted average of all metrics\n const weightedSum =\n memoryUtilization * memoryWeight + cpuUtilization * cpuWeight + loopUtilization * loopWeight;\n\n const totalWeight = memoryWeight + cpuWeight + loopWeight;\n\n // Final utilization ratio clamped to [0, 1]\n const ratio = Math.min(1, Math.max(0, weightedSum / totalWeight));\n\n // Interpolate sweep interval based on the ratio\n const sweepIntervalMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: optimalSweepIntervalMs,\n toEnd: worstSweepIntervalMs,\n });\n\n // Interpolate sweep time budget based on the ratio\n const sweepTimeBudgetMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: 0,\n toEnd: worstSweepTimeBudgetMs,\n });\n\n return {\n sweepIntervalMs,\n sweepTimeBudgetMs,\n };\n};\n","import { _instancesCache } from \"../cache/create-cache\";\nimport type { CacheState } from \"../types\";\n\n/**\n * Selects a cache instance to sweep based on sweep weights or round‑robin order.\n *\n * Two selection modes are supported:\n * - **Round‑robin mode**: If `totalSweepWeight` ≤ 0, instances are selected\n * deterministically in sequence using `batchSweep`. Once all instances\n * have been processed, returns `null`.\n * - **Weighted mode**: If sweep weights are available, performs a probabilistic\n * selection. Each instance’s `_sweepWeight` contributes proportionally to its\n * chance of being chosen.\n *\n * This function depends on `_updateWeightSweep` to maintain accurate sweep weights.\n *\n * @param totalSweepWeight - Sum of all sweep weights across instances.\n * @param batchSweep - Current batch index used for round‑robin selection.\n * @returns The selected `CacheState` instance, `null` if no instance remains,\n * or `undefined` if the cache is empty.\n */\nexport function _selectInstanceToSweep({\n totalSweepWeight,\n batchSweep,\n}: {\n totalSweepWeight: number;\n batchSweep: number;\n}): CacheState | null | undefined {\n // Default selection: initialize with the first instance in the cache list.\n // This acts as a fallback in case no weighted selection occurs.\n let instanceToSweep: CacheState | null | undefined = _instancesCache[0];\n\n if (totalSweepWeight <= 0) {\n // Case 1: No sweep weight assigned (all instances skipped or empty).\n // → Perform a deterministic round‑robin minimal sweep across all instances.\n // Each batch iteration selects the next instance in order.\n if (batchSweep > _instancesCache.length) {\n // If all instances have been processed in this cycle, no instance to sweep.\n instanceToSweep = null;\n }\n instanceToSweep = _instancesCache[batchSweep - 1] as CacheState;\n } else {\n // Case 2: Sweep weights are available.\n // → Perform a probabilistic selection based on relative sweep weights.\n // A random threshold is drawn in [0, totalSweepWeight].\n let threshold = Math.random() * totalSweepWeight;\n\n // Iterate through instances, subtracting each instance’s weight.\n // The first instance that reduces the threshold to ≤ 0 is selected.\n // This ensures that instances with higher weights have proportionally\n // higher probability of being chosen for sweeping.\n for (const inst of _instancesCache) {\n threshold -= inst._sweepWeight;\n if (threshold <= 0) {\n instanceToSweep = inst;\n break;\n }\n }\n }\n\n return instanceToSweep;\n}\n","import type { CacheState } from \"../types\";\n\nexport const enum DELETE_REASON {\n MANUAL = \"manual\",\n EXPIRED = \"expired\",\n STALE = \"stale\",\n}\n\n/**\n * Deletes a key from the cache.\n * @param state - The cache state.\n * @param key - The key.\n * @returns A boolean indicating whether the key was successfully deleted.\n */\nexport const deleteKey = (\n state: CacheState,\n key: string,\n reason: DELETE_REASON = DELETE_REASON.MANUAL,\n): boolean => {\n const onDelete = state.onDelete;\n const onExpire = state.onExpire;\n\n if (!onDelete && !onExpire) {\n return state.store.delete(key);\n }\n\n const entry = state.store.get(key);\n if (!entry) return false;\n\n state.store.delete(key);\n state.onDelete?.(key, entry[1], reason);\n if (reason !== DELETE_REASON.MANUAL) {\n state.onExpire?.(key, entry[1], reason);\n }\n\n return true;\n};\n","import type { DELETE_REASON } from \"./cache/delete\";\n\n/**\n * Base configuration shared between CacheOptions and CacheState.\n */\nexport interface CacheConfigBase {\n /**\n * Callback invoked when a key expires naturally.\n * @param key - The expired key.\n * @param value - The value associated with the expired key.\n * @param reason - The reason for deletion ('expired', or 'stale').\n */\n onExpire?: (\n key: string,\n value: unknown,\n reason: Exclude<DELETE_REASON, DELETE_REASON.MANUAL>,\n ) => void;\n\n /**\n * Callback invoked when a key is deleted, either manually or due to expiration.\n * @param key - The deleted key.\n * @param value - The value of the deleted key.\n * @param reason - The reason for deletion ('manual', 'expired', or 'stale').\n */\n onDelete?: (key: string, value: unknown, reason: DELETE_REASON) => void;\n\n /**\n * Default TTL (Time-To-Live) in milliseconds for entries without explicit TTL.\n * @default 1_800_000 (30 minutes)\n */\n defaultTtl: number;\n\n /**\n * Default stale window in milliseconds for entries that do not\n * specify their own `staleWindowMs`.\n *\n * This window determines how long an entry may continue to be\n * served as stale after it reaches its expiration time.\n *\n * The window is always relative to the entry’s own expiration\n * moment, regardless of whether that expiration comes from an\n * explicit `ttl` or from the cache’s default TTL.\n * @default null (No stale window)\n */\n defaultStaleWindow: number;\n\n /**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, new entries are ignored.\n * @default null (unlimited)\n */\n maxSize?: number;\n\n /**\n * Controls how stale entries are handled when read from the cache.\n *\n * - true → stale entries are purged immediately after being returned.\n * - false → stale entries are retained after being returned.\n *\n * @default false\n */\n purgeStaleOnGet: boolean;\n\n /**\n * Controls how stale entries are handled during sweep operations.\n *\n * - true → stale entries are purged during sweeps.\n * - false → stale entries are retained during sweeps.\n *\n * @default false\n */\n purgeStaleOnSweep: boolean;\n\n /**\n * Whether to automatically start the sweep process when the cache is created.\n *\n * - true → sweep starts automatically.\n * - false → sweep does not start automatically, allowing manual control.\n *\n * @internal\n * @default true\n */\n _autoStartSweep: boolean;\n\n /**\n * Allowed expired ratio for the cache instance.\n */\n _maxAllowExpiredRatio: number;\n}\n\n/**\n * Public configuration options for the TTL cache.\n */\nexport type CacheOptions = Partial<CacheConfigBase>;\n\n/**\n * Options for `invalidateTag` operation. Kept intentionally extensible so\n * future flags can be added without breaking callers.\n */\nexport interface InvalidateTagOptions {\n /** If true, mark affected entries as stale instead of fully expired. */\n asStale?: boolean;\n\n // Allow additional option fields for forward-compatibility.\n [key: string]: unknown;\n}\n\n/**\n * Lifecycle timestamps stored in a Tuple:\n * - 0 → createdAt\n * - 1 → expiresAt\n * - 2 → staleExpiresAt\n */\nexport type EntryTimestamp = [\n /** createdAt: Absolute timestamp the entry was created (Date.now()). */\n number,\n\n /** expiresAt: Absolute timestamp when the entry becomes invalid (Date.now() + TTL). */\n number,\n\n /** staleExpiresAt: Absolute timestamp when the entry stops being stale (Date.now() + staleTTL). */\n number,\n];\n\n/**\n * Represents a single cache entry.\n */\nexport type CacheEntry = [\n EntryTimestamp,\n\n /** The stored value. */\n unknown,\n\n (\n /**\n * Optional list of tags associated with this entry.\n * Tags can be used for:\n * - Group invalidation (e.g., clearing all entries with a given tag)\n * - Namespacing or categorization\n * - Tracking dependencies\n *\n * If no tags are associated, this field is `null`.\n */\n string[] | null\n ),\n];\n\n/**\n * Status of a cache entry.\n */\nexport enum ENTRY_STATUS {\n /** The entry is fresh and valid. */\n FRESH = \"fresh\",\n /** The entry is stale but can still be served. */\n STALE = \"stale\",\n /** The entry has expired and is no longer valid. */\n EXPIRED = \"expired\",\n}\n\n/**\n * Internal state of the TTL cache.\n */\nexport interface CacheState extends CacheConfigBase {\n /** Map storing key-value entries. */\n store: Map<string, CacheEntry>;\n\n /** Current size */\n size: number;\n\n /** Iterator for sweeping keys. */\n _sweepIter: MapIterator<[string, CacheEntry]> | null;\n\n /** Index of this instance for sweep all. */\n _instanceIndexState: number;\n\n /** Expire ratio avg for instance */\n _expiredRatio: number;\n\n /** Sweep weight for instance, calculate based on size and _expiredRatio */\n _sweepWeight: number;\n\n /**\n * Tag invalidation state.\n * Each tag stores:\n * - 0 → moment when the tag was marked as expired (0 if never)\n * - 1 → moment when the tag was marked as stale (0 if never)\n *\n * These timestamps define whether a tag affects an entry based on\n * the entry's creation time. */\n _tags: Map<string, [number, number]>;\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\n\n/**\n * Computes the derived status of a cache entry based on its associated tags.\n *\n * Tags may impose stricter expiration or stale rules on the entry. Only tags\n * created at or after the entry's creation timestamp are considered relevant.\n *\n * Resolution rules:\n * - If any applicable tag marks the entry as expired, the status becomes `EXPIRED`.\n * - Otherwise, if any applicable tag marks it as stale, the status becomes `STALE`.\n * - If no tag imposes stricter rules, the entry remains `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry whose status is being evaluated.\n * @returns A tuple containing:\n * - The final {@link ENTRY_STATUS} imposed by tags.\n * - The earliest timestamp at which a tag marked the entry as stale\n * (or 0 if no tag imposed a stale rule).\n */\nexport function _statusFromTags(state: CacheState, entry: CacheEntry): [ENTRY_STATUS, number] {\n const entryCreatedAt = entry[0][0];\n\n // Tracks the earliest point in time when any tag marked this entry as stale.\n // Initialized to Infinity so that comparisons always pick the minimum.\n let earliestTagStaleInvalidation = Infinity;\n\n // Default assumption: entry is fresh unless tags override.\n let status = ENTRY_STATUS.FRESH;\n\n const tags = entry[2];\n if (tags) {\n for (const tag of tags) {\n const ts = state._tags.get(tag);\n if (!ts) continue;\n\n // Each tag provides two timestamps:\n // - tagExpiredAt: when the tag forces expiration\n // - tagStaleSinceAt: when the tag forces stale status\n const [tagExpiredAt, tagStaleSinceAt] = ts;\n\n // A tag can only override if it was created after the entry itself.\n if (tagExpiredAt >= entryCreatedAt) {\n status = ENTRY_STATUS.EXPIRED;\n break; // Expired overrides everything, no need to check further.\n }\n\n if (tagStaleSinceAt >= entryCreatedAt) {\n // Keep track of the earliest stale timestamp across all tags.\n if (tagStaleSinceAt < earliestTagStaleInvalidation) {\n earliestTagStaleInvalidation = tagStaleSinceAt;\n }\n status = ENTRY_STATUS.STALE;\n }\n }\n }\n\n // If no tag imposed stale, return 0 for the timestamp.\n return [status, status === ENTRY_STATUS.STALE ? earliestTagStaleInvalidation : 0];\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\nimport { _statusFromTags } from \"../utils/status-from-tags\";\n\n/**\n * Computes the final derived status of a cache entry by combining:\n *\n * - The entry's own expiration timestamps (TTL and stale TTL).\n * - Any stricter expiration or stale rules imposed by its associated tags.\n *\n * Precedence rules:\n * - `EXPIRED` overrides everything.\n * - `STALE` overrides `FRESH`.\n * - If neither the entry nor its tags impose stricter rules, the entry is `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry being evaluated.\n * @returns The final {@link ENTRY_STATUS} for the entry.\n */\nexport function computeEntryStatus(\n state: CacheState,\n entry: CacheEntry,\n\n /** @internal */\n now: number,\n): ENTRY_STATUS {\n const [__createdAt, expiresAt, staleExpiresAt] = entry[0];\n\n // 1. Status derived from tags\n const [tagStatus, earliestTagStaleInvalidation] = _statusFromTags(state, entry);\n if (tagStatus === ENTRY_STATUS.EXPIRED) return ENTRY_STATUS.EXPIRED;\n const windowStale = staleExpiresAt - expiresAt;\n if (\n tagStatus === ENTRY_STATUS.STALE &&\n staleExpiresAt > 0 &&\n now < earliestTagStaleInvalidation + windowStale\n ) {\n // A tag can mark the entry as stale only if the entry itself supports a stale window.\n // The tag's stale invalidation time is extended by the entry's stale window duration.\n // If \"now\" is still within that extended window, the entry is considered stale.\n return ENTRY_STATUS.STALE;\n }\n\n // 2. Status derived from entry timestamps\n if (now < expiresAt) {\n return ENTRY_STATUS.FRESH;\n }\n if (staleExpiresAt > 0 && now < staleExpiresAt) {\n return ENTRY_STATUS.STALE;\n }\n\n return ENTRY_STATUS.EXPIRED;\n}\n\n// ---------------------------------------------------------------------------\n// Entry status wrappers (semantic helpers built on top of computeEntryStatus)\n// ---------------------------------------------------------------------------\n/**\n * Determines whether a cache entry is fresh.\n *\n * A fresh entry is one whose final derived status is `FRESH`, meaning:\n * - It has not expired according to its own timestamps, and\n * - No associated tag imposes a stricter stale or expired rule.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry being evaluated.\n * @returns True if the entry is fresh.\n */\nexport const isFresh = (state: CacheState, entry: CacheEntry, now: number): boolean =>\n computeEntryStatus(state, entry, now) === ENTRY_STATUS.FRESH;\n\n/**\n * Determines whether a cache entry is stale.\n *\n * A stale entry is one whose final derived status is `STALE`, meaning:\n * - It has passed its TTL but is still within its stale window, or\n * - A tag imposes a stale rule that applies to this entry.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry being evaluated.\n * @returns True if the entry is stale.\n */\nexport const isStale = (\n state: CacheState,\n entry: CacheEntry,\n\n /** @internal */\n now: number,\n): boolean => computeEntryStatus(state, entry, now) === ENTRY_STATUS.STALE;\n\n/**\n * Determines whether a cache entry is expired.\n *\n * An expired entry is one whose final derived status is `EXPIRED`, meaning:\n * - It has exceeded both its TTL and stale TTL, or\n * - A tag imposes an expiration rule that applies to this entry.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry being evaluated.\n * @returns True if the entry is expired.\n */\nexport const isExpired = (\n state: CacheState,\n entry: CacheEntry,\n\n /** @internal */\n now: number,\n): boolean => computeEntryStatus(state, entry, now) === ENTRY_STATUS.EXPIRED;\n\n/**\n * Determines whether a cache entry is valid.\n *\n * A valid entry is one whose final derived status is either:\n * - `FRESH`, or\n * - `STALE` (still within its stale window).\n *\n * Expired entries are considered invalid.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry, or undefined/null if not found.\n * @returns True if the entry exists and is fresh or stale.\n */\nexport const isValid = (\n state: CacheState,\n entry?: CacheEntry | null,\n\n /** @internal */\n now: number = Date.now(),\n): boolean => {\n if (!entry) return false;\n const status = computeEntryStatus(state, entry, now);\n return status === ENTRY_STATUS.FRESH || status === ENTRY_STATUS.STALE;\n};\n","import { DELETE_REASON, deleteKey } from \"../cache/delete\";\nimport { isExpired, isStale } from \"../cache/validators\";\nimport { MAX_KEYS_PER_BATCH } from \"../defaults\";\nimport { type CacheState } from \"../types\";\n\n/**\n * Performs a single sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n * @param _maxKeysPerBatch - Maximum number of keys to process in this sweep.\n * @returns An object containing statistics about the sweep operation.\n */\nexport function _sweepOnce(\n state: CacheState,\n\n /**\n * Maximum number of keys to process in this sweep.\n * @default 1000\n */\n _maxKeysPerBatch: number = MAX_KEYS_PER_BATCH,\n): { processed: number; expiredCount: number; staleCount: number; ratio: number } {\n if (!state._sweepIter) {\n state._sweepIter = state.store.entries();\n }\n\n let processed = 0;\n let expiredCount = 0;\n let staleCount = 0;\n\n for (let i = 0; i < _maxKeysPerBatch; i++) {\n const next = state._sweepIter.next();\n\n if (next.done) {\n state._sweepIter = state.store.entries();\n break;\n }\n\n processed += 1;\n const [key, entry] = next.value;\n\n const now = Date.now();\n\n if (isExpired(state, entry, now)) {\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n expiredCount += 1;\n } else if (isStale(state, entry, now)) {\n staleCount += 1;\n\n if (state.purgeStaleOnSweep) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n }\n }\n\n const expiredStaleCount = state.purgeStaleOnSweep ? staleCount : 0;\n return {\n processed,\n expiredCount,\n staleCount,\n ratio: processed > 0 ? (expiredCount + expiredStaleCount) / processed : 0,\n };\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n EXPIRED_RATIO_MEMORY_THRESHOLD,\n MINIMAL_EXPIRED_RATIO,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport { _metrics, SAFE_MEMORY_LIMIT_RATIO } from \"../utils/start-monitor\";\n\n/**\n * Calculates the optimal maximum expired ratio based on current memory utilization.\n *\n * This function interpolates between `maxAllowExpiredRatio` and `MINIMAL_EXPIRED_RATIO`\n * depending on the memory usage reported by `_metrics`. At low memory usage (0%),\n * the optimal ratio equals `maxAllowExpiredRatio`. As memory usage approaches or exceeds\n * 80% of the memory limit, the optimal ratio decreases toward `MINIMAL_EXPIRED_RATIO`.\n *\n * @param maxAllowExpiredRatio - The maximum allowed expired ratio at minimal memory usage.\n * Defaults to `DEFAULT_MAX_EXPIRED_RATIO`.\n * @returns A normalized value between 0 and 1 representing the optimal expired ratio.\n */\nexport function calculateOptimalMaxExpiredRatio(\n maxAllowExpiredRatio: number = DEFAULT_MAX_EXPIRED_RATIO,\n): number {\n const EFFECTIVE_MEMORY_THRESHOLD = EXPIRED_RATIO_MEMORY_THRESHOLD / SAFE_MEMORY_LIMIT_RATIO;\n\n const optimalExpiredRatio = interpolate({\n value: _metrics?.memory.utilization ?? 0,\n\n fromStart: 0, // baseline: memory usage ratio at 0%\n fromEnd: EFFECTIVE_MEMORY_THRESHOLD, // threshold: memory usage ratio at 80% of safe limit\n\n toStart: maxAllowExpiredRatio, // allowed ratio at minimal memory usage\n toEnd: MINIMAL_EXPIRED_RATIO, // allowed ratio at high memory usage (≥80%)\n });\n\n // At 0% memory usage, the optimalExpiredRatio equals maxAllowExpiredRatio.\n // At or above 80% memory usage, the optimalExpiredRatio approaches or falls below MINIMAL_EXPIRED_RATIO.\n\n return Math.min(1, Math.max(0, optimalExpiredRatio));\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport { MINIMAL_EXPIRED_RATIO } from \"../defaults\";\n\nimport { calculateOptimalMaxExpiredRatio } from \"./calculate-optimal-max-expired-ratio\";\n\n/**\n * Updates the sweep weight (`_sweepWeight`) for each cache instance.\n *\n * The sweep weight determines the probability that an instance will be selected\n * for a cleanup (sweep) process. It is calculated based on the store size and\n * the ratio of expired keys.\n *\n * This function complements (`_selectInstanceToSweep`), which is responsible\n * for selecting the correct instance based on the weights assigned here.\n *\n * ---\n *\n * ### Sweep systems:\n * 1. **Normal sweep**\n * - Runs whenever the percentage of expired keys exceeds the allowed threshold\n * calculated by `calculateOptimalMaxExpiredRatio`.\n * - It is the main cleanup mechanism and is applied proportionally to the\n * store size and the expired‑key ratio.\n *\n * 2. **Memory‑conditioned sweep (control)**\n * - Works exactly like the normal sweep, except it may run even when it\n * normally wouldn’t.\n * - Only activates under **high memory pressure**.\n * - Serves as an additional control mechanism to adjust weights, keep the\n * system updated, and help prevent memory overflows.\n *\n * 3. **Round‑robin sweep (minimal control)**\n * - Always runs, even if the expired ratio is low or memory usage does not\n * require it.\n * - Processes a very small number of keys per instance, much smaller than\n * the normal sweep.\n * - Its main purpose is to ensure that all instances receive at least a\n * periodic weight update and minimal expired‑key control.\n *\n * ---\n * #### Important notes:\n * - A minimum `MINIMAL_EXPIRED_RATIO` (e.g., 5%) is assumed to ensure that\n * control sweeps can always run under high‑memory scenarios.\n * - Even with a minimum ratio, the normal sweep and the memory‑conditioned sweep\n * may **skip execution** if memory usage allows it and the expired ratio is\n * below the optimal maximum.\n * - The round‑robin sweep is never skipped: it always runs with a very small,\n * almost imperceptible cost.\n *\n * @returns The total accumulated sweep weight across all cache instances.\n */\nexport function _updateWeightSweep(): number {\n let totalSweepWeight = 0;\n\n for (const instCache of _instancesCache) {\n if (instCache.store.size <= 0) {\n // Empty instance → no sweep weight needed, skip sweep for this instance.\n instCache._sweepWeight = 0;\n continue;\n }\n\n // Ensure a minimum expired ratio to allow control sweeps.\n // If the real ratio is higher than the minimum, use the real ratio.\n let expiredRatio = MINIMAL_EXPIRED_RATIO;\n if (instCache._expiredRatio > MINIMAL_EXPIRED_RATIO) {\n expiredRatio = instCache._expiredRatio;\n }\n\n if (!__BROWSER__) {\n // In non‑browser environments, compute an optimal maximum allowed ratio.\n const optimalMaxExpiredRatio = calculateOptimalMaxExpiredRatio(\n instCache._maxAllowExpiredRatio,\n );\n\n if (expiredRatio <= optimalMaxExpiredRatio) {\n // If memory usage allows it and the expired ratio is low,\n // this sweep can be skipped. The reduced round‑robin sweep will still run.\n instCache._sweepWeight = 0;\n continue;\n }\n }\n\n // Normal sweep: weight proportional to store size and expired ratio.\n instCache._sweepWeight = instCache.store.size * expiredRatio;\n totalSweepWeight += instCache._sweepWeight;\n }\n\n return totalSweepWeight;\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport {\n MAX_KEYS_PER_BATCH,\n OPTIMAL_SWEEP_INTERVAL,\n OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE,\n} from \"../defaults\";\nimport type { CacheState } from \"../types\";\nimport { _metrics } from \"../utils/start-monitor\";\n\nimport { _batchUpdateExpiredRatio } from \"./batchUpdateExpiredRatio\";\nimport { calculateOptimalSweepParams } from \"./calculate-optimal-sweep-params\";\nimport { _selectInstanceToSweep } from \"./select-instance-to-sweep\";\nimport { _sweepOnce } from \"./sweep-once\";\nimport { _updateWeightSweep } from \"./update-weight\";\n\n/**\n * Performs a sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n */\nexport const sweep = async (\n state: CacheState,\n\n /** @internal */\n utilities: SweepUtilities = {},\n): Promise<void> => {\n const {\n schedule = defaultSchedule,\n yieldFn = defaultYieldFn,\n now = Date.now(),\n runOnlyOne = false,\n } = utilities;\n const startTime = now;\n\n let sweepIntervalMs = OPTIMAL_SWEEP_INTERVAL;\n let sweepTimeBudgetMs = OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE;\n if (!__BROWSER__ && _metrics) {\n ({ sweepIntervalMs, sweepTimeBudgetMs } = calculateOptimalSweepParams({ metrics: _metrics }));\n }\n\n const totalSweepWeight = _updateWeightSweep();\n const currentExpiredRatios: number[][] = [];\n\n // Reduce the maximum number of keys per batch only when no instance weights are available\n // and the sweep is running in minimal round‑robin control mode. In this case, execute the\n // smallest possible sweep (equivalent to one batch, but divided across instances).\n const maxKeysPerBatch =\n totalSweepWeight <= 0 ? MAX_KEYS_PER_BATCH / _instancesCache.length : MAX_KEYS_PER_BATCH;\n\n let batchSweep = 0;\n while (true) {\n batchSweep += 1;\n\n const instanceToSweep = _selectInstanceToSweep({ batchSweep, totalSweepWeight });\n if (!instanceToSweep) {\n // No instance to sweep\n break;\n }\n\n const { ratio } = _sweepOnce(instanceToSweep, maxKeysPerBatch);\n // Initialize or update `currentExpiredRatios` array for current ratios\n (currentExpiredRatios[instanceToSweep._instanceIndexState] ??= []).push(ratio);\n\n if (Date.now() - startTime > sweepTimeBudgetMs) {\n break;\n }\n\n await yieldFn();\n }\n\n _batchUpdateExpiredRatio(currentExpiredRatios);\n\n // Schedule next sweep\n if (!runOnlyOne) {\n schedule(() => void sweep(state, utilities), sweepIntervalMs);\n }\n};\n\n// Default utilities for scheduling and yielding --------------------------------\nconst defaultSchedule: scheduleType = (fn, ms) => {\n const t = setTimeout(fn, ms);\n if (typeof t.unref === \"function\") t.unref();\n};\nexport const defaultYieldFn: yieldFnType = () => new Promise(resolve => setImmediate(resolve));\n\n// Types for internal utilities -----------------------------------------------\ntype scheduleType = (fn: () => void, ms: number) => void;\ntype yieldFnType = () => Promise<void>;\ninterface SweepUtilities {\n /**\n * Default scheduling function using setTimeout.\n * This can be overridden for testing.\n * @internal\n */\n schedule?: scheduleType;\n\n /**\n * Default yielding function using setImmediate.\n * This can be overridden for testing.\n * @internal\n */\n yieldFn?: yieldFnType;\n\n /** Current timestamp for testing purposes. */\n now?: number;\n\n /**\n * If true, only run one sweep cycle.\n * @internal\n */\n runOnlyOne?: boolean;\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n DEFAULT_MAX_SIZE,\n DEFAULT_STALE_WINDOW,\n DEFAULT_TTL,\n} from \"../defaults\";\nimport { sweep } from \"../sweep/sweep\";\nimport type { CacheOptions, CacheState } from \"../types\";\nimport { startMonitor } from \"../utils/start-monitor\";\n\nlet _instanceCount = 0;\nconst INSTANCE_WARNING_THRESHOLD = 99;\nexport const _instancesCache: CacheState[] = [];\n\n/**\n * Resets the instance count for testing purposes.\n * This function is intended for use in tests to avoid instance limits.\n */\nexport const _resetInstanceCount = (): void => {\n _instanceCount = 0;\n};\n\nlet _initSweepScheduled = false;\n\n/**\n * Creates the initial state for the TTL cache.\n * @param options - Configuration options for the cache.\n * @returns The initial cache state.\n */\nexport const createCache = (options: CacheOptions = {}): CacheState => {\n const {\n onExpire,\n onDelete,\n defaultTtl = DEFAULT_TTL,\n maxSize = DEFAULT_MAX_SIZE,\n _maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO,\n defaultStaleWindow = DEFAULT_STALE_WINDOW,\n purgeStaleOnGet = false,\n purgeStaleOnSweep = false,\n _autoStartSweep = true,\n } = options;\n\n _instanceCount++;\n\n // NEXT: warn if internal parameters are touch by user\n\n if (_instanceCount > INSTANCE_WARNING_THRESHOLD) {\n // NEXT: Use a proper logging mechanism\n // NEXT: Create documentation for this\n console.warn(\n `Too many instances detected (${_instanceCount}). This may indicate a configuration issue; consider minimizing instance creation or grouping keys by expected expiration ranges. See the documentation: https://github.com/neezco/cache/docs/getting-started.md`,\n );\n }\n\n const state: CacheState = {\n store: new Map(),\n _sweepIter: null,\n get size() {\n return state.store.size;\n },\n onExpire,\n onDelete,\n maxSize,\n defaultTtl,\n defaultStaleWindow,\n purgeStaleOnGet,\n purgeStaleOnSweep,\n _maxAllowExpiredRatio,\n _autoStartSweep,\n _instanceIndexState: -1,\n _expiredRatio: 0,\n _sweepWeight: 0,\n _tags: new Map(),\n };\n\n state._instanceIndexState = _instancesCache.push(state) - 1;\n\n // Start the sweep process\n if (_autoStartSweep) {\n if (_initSweepScheduled) return state;\n _initSweepScheduled = true;\n void sweep(state);\n }\n\n startMonitor();\n\n return state;\n};\n","import type { CacheState } from \"../types\";\n\nimport { DELETE_REASON, deleteKey } from \"./delete\";\nimport { isFresh, isStale } from \"./validators\";\n\n/**\n * Retrieves a value from the cache if the entry is valid.\n * @param state - The cache state.\n * @param key - The key to retrieve.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns The cached value if valid, null otherwise.\n */\nexport const get = (state: CacheState, key: string, now: number = Date.now()): unknown => {\n const entry = state.store.get(key);\n\n if (!entry) return undefined;\n\n if (isFresh(state, entry, now)) return entry[1];\n\n if (isStale(state, entry, now)) {\n if (state.purgeStaleOnGet) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n return entry[1];\n }\n\n // If it expired, always delete it\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n\n return undefined;\n};\n","import type { CacheState } from \"../types\";\n\nimport { get } from \"./get\";\n\n/**\n * Checks if a key exists in the cache and is not expired.\n * @param state - The cache state.\n * @param key - The key to check.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns True if the key exists and is valid, false otherwise.\n */\nexport const has = (state: CacheState, key: string, now: number = Date.now()): boolean => {\n return get(state, key, now) !== undefined;\n};\n","import type { CacheState, InvalidateTagOptions } from \"../types\";\n\n/**\n * Invalidates one or more tags so that entries associated with them\n * become expired or stale from this moment onward.\n *\n * Semantics:\n * - Each tag maintains two timestamps in `state._tags`:\n * [expiredAt, staleSinceAt].\n * - Calling this function updates one of those timestamps to `_now`,\n * depending on whether the tag should force expiration or staleness.\n *\n * Rules:\n * - If `asStale` is false (default), the tag forces expiration:\n * entries created before `_now` will be considered expired.\n * - If `asStale` is true, the tag forces staleness:\n * entries created before `_now` will be considered stale,\n * but only if they support a stale window.\n *\n * Behavior:\n * - Each call replaces any previous invalidation timestamp for the tag.\n * - Entries created after `_now` are unaffected.\n *\n * @param state - The cache state containing tag metadata.\n * @param tags - A tag or list of tags to invalidate.\n * @param options.asStale - Whether the tag should mark entries as stale.\n */\nexport function invalidateTag(\n state: CacheState,\n tags: string | string[],\n options: InvalidateTagOptions = {},\n\n /** @internal */\n _now: number = Date.now(),\n): void {\n const tagList = Array.isArray(tags) ? tags : [tags];\n const asStale = options.asStale ?? false;\n\n for (const tag of tagList) {\n const currentTag = state._tags.get(tag);\n\n if (currentTag) {\n // Update existing tag timestamps:\n // index 0 = expiredAt, index 1 = staleSinceAt\n if (asStale) {\n currentTag[1] = _now;\n } else {\n currentTag[0] = _now;\n }\n } else {\n // Initialize new tag entry with appropriate timestamp.\n // If marking as stale, expiredAt = 0 and staleSinceAt = _now.\n // If marking as expired, expiredAt = _now and staleSinceAt = 0.\n state._tags.set(tag, [asStale ? 0 : _now, asStale ? _now : 0]);\n }\n }\n}\n","import type { CacheState, CacheEntry } from \"../types\";\n\n/**\n * Sets or updates a value in the cache with TTL and an optional stale window.\n *\n * @param state - The cache state.\n * @param input - Cache entry definition (key, value, ttl, staleWindow, tags).\n * @param now - Optional timestamp override used as the base time (defaults to Date.now()).\n *\n * @remarks\n * - `ttl` defines when the entry becomes expired.\n * - `staleWindow` defines how long the entry may still be served as stale\n * after the expiration moment (`now + ttl`).\n */\nexport const setOrUpdate = (\n state: CacheState,\n input: CacheSetOrUpdateInput,\n\n /** @internal */\n now: number = Date.now(),\n): void => {\n const { key, value, ttl: ttlInput, staleWindow: staleWindowInput, tags } = input;\n\n if (value === undefined) return;\n if (key == null) throw new Error(\"Missing key.\");\n\n const ttl = ttlInput ?? state.defaultTtl;\n const staleWindow = staleWindowInput ?? state.defaultStaleWindow;\n\n const expiresAt = ttl > 0 ? now + ttl : Infinity;\n const entry: CacheEntry = [\n [\n now, // createdAt\n expiresAt, // expiresAt\n staleWindow > 0 ? expiresAt + staleWindow : 0, // staleExpiresAt (relative to expiration)\n ],\n value,\n typeof tags === \"string\" ? [tags] : Array.isArray(tags) ? tags : null,\n ];\n\n state.store.set(key, entry);\n};\n\n/**\n * Input parameters for setting or updating a cache entry.\n */\nexport interface CacheSetOrUpdateInput {\n /**\n * Key under which the value will be stored.\n */\n key: string;\n\n /**\n * Value to be written to the cache.\n *\n * Considerations:\n * - Always overwrites any previous value, if one exists.\n * - `undefined` is ignored, leaving any previous value intact, if one exists.\n * - `null` is explicitly stored as a null value, replacing any previous value, if one exists.\n */\n value: unknown;\n\n /**\n * TTL (Time-To-Live) in milliseconds for this entry.\n */\n ttl?: number;\n\n /**\n * Optional stale window in milliseconds.\n *\n * Defines how long the entry may continue to be served as stale\n * after it has reached its expiration time.\n *\n * The window is always relative to the entry’s own expiration moment,\n * whether that expiration comes from an explicit `ttl` or from the\n * cache’s default TTL.\n *\n * If omitted, the cache-level default stale window is used.\n */\n staleWindow?: number;\n\n /**\n * Optional tags associated with this entry.\n */\n tags?: string | string[];\n}\n","import { clear } from \"./cache/clear\";\nimport { createCache } from \"./cache/create-cache\";\nimport { deleteKey } from \"./cache/delete\";\nimport { get } from \"./cache/get\";\nimport { has } from \"./cache/has\";\nimport { invalidateTag } from \"./cache/invalidate-tag\";\nimport { setOrUpdate } from \"./cache/set\";\nimport type { CacheOptions, CacheState, InvalidateTagOptions } from \"./types\";\n\nexport type { CacheOptions, InvalidateTagOptions } from \"./types\";\n\n/**\n * A TTL (Time-To-Live) cache implementation with support for expiration,\n * stale windows, tag-based invalidation, and automatic sweeping.\n *\n * Provides O(1) constant-time operations for all core methods.\n *\n * @example\n * ```typescript\n * const cache = new LocalTtlCache();\n * cache.set(\"user:123\", { name: \"Alice\" }, { ttl: 5 * 60 * 1000 });\n * const user = cache.get(\"user:123\"); // { name: \"Alice\" }\n * ```\n */\nexport class LocalTtlCache {\n private state: CacheState;\n\n /**\n * Creates a new cache instance.\n *\n * @param options - Configuration options for the cache (defaultTtl, defaultStaleWindow, maxSize, etc.)\n *\n * @example\n * ```typescript\n * const cache = new LocalTtlCache({\n * defaultTtl: 30 * 60 * 1000, // 30 minutes\n * defaultStaleWindow: 5 * 60 * 1000, // 5 minutes\n * maxSize: 500_000, // Maximum 500_000 entries\n * onExpire: (key, value) => console.log(`Expired: ${key}`),\n * onDelete: (key, value, reason) => console.log(`Deleted: ${key}, reason: ${reason}`),\n * });\n * ```\n */\n constructor(options?: CacheOptions) {\n this.state = createCache(options);\n }\n\n /**\n * Gets the current number of entries tracked by the cache.\n *\n * This value may include entries that are already expired but have not yet been\n * removed by the lazy cleanup system. Expired keys are cleaned only when it is\n * efficient to do so, so the count can temporarily be higher than the number of\n * actually valid (non‑expired) entries.\n *\n * @returns The number of entries currently stored (including entries pending cleanup)\n *\n * @example\n * ```typescript\n * console.log(cache.size); // e.g., 42\n * ```\n */\n get size(): number {\n return this.state.size;\n }\n\n /**\n * Retrieves a value from the cache.\n *\n * Returns the value if it exists and is not fully expired. If an entry is in the\n * stale window (expired but still within staleWindow), the stale value is returned.\n *\n\n * @param key - The key to retrieve\n * @returns The cached value if valid, undefined otherwise\n *\n * @example\n * ```typescript\n * const user = cache.get<{ name: string }>(\"user:123\");\n * ```\n *\n * @edge-cases\n * - Returns `undefined` if the key doesn't exist\n * - Returns `undefined` if the key has expired beyond the stale window\n * - Returns the stale value if within the stale window\n * - If `purgeStaleOnGet` is enabled, stale entries are deleted after being returned\n */\n get<T = unknown>(key: string): T | undefined {\n return get(this.state, key) as T | undefined;\n }\n\n /**\n * Sets or updates a value in the cache.\n *\n * If the key already exists, it will be completely replaced.\n *\n * @param key - The key under which to store the value\n * @param value - The value to cache (any type)\n * @param options - Optional configuration for this specific entry\n * @param options.ttl - Time-To-Live in milliseconds. Defaults to `defaultTtl`\n * @param options.staleWindow - How long to serve stale data after expiration (milliseconds)\n * @param options.tags - One or more tags for group invalidation\n *\n * @example\n * ```typescript\n * cache.set(\"user:123\", { name: \"Alice\" }, {\n * ttl: 5 * 60 * 1000,\n * staleWindow: 1 * 60 * 1000,\n * tags: \"user:123\",\n * });\n * ```\n *\n * @edge-cases\n * - Overwriting an existing key replaces it completely\n * - If `ttl` is 0 or Infinite, the entry never expires\n * - If `staleWindow` is larger than `ttl`, the entry can be served as stale longer than it was fresh\n * - Tags are optional; only necessary for group invalidation via `invalidateTag()`\n */\n set(\n key: string,\n value: unknown,\n options?: {\n ttl?: number;\n staleWindow?: number;\n tags?: string | string[];\n },\n ): void {\n setOrUpdate(this.state, {\n key,\n value,\n ttl: options?.ttl,\n staleWindow: options?.staleWindow,\n tags: options?.tags,\n });\n }\n\n /**\n * Deletes a specific key from the cache.\n *\n * @param key - The key to delete\n * @returns True if the key was deleted, false if it didn't exist\n *\n * @example\n * ```typescript\n * const wasDeleted = cache.delete(\"user:123\");\n * ```\n *\n * @edge-cases\n * - Triggers the `onDelete` callback with reason `'manual'`\n * - Does not trigger the `onExpire` callback\n * - Returns `false` if the key was already expired\n * - Deleting a non-existent key returns `false` without error\n */\n delete(key: string): boolean {\n return deleteKey(this.state, key);\n }\n\n /**\n * Checks if a key exists in the cache and is not fully expired.\n *\n * Returns true if the key exists and is either fresh or within the stale window.\n * Use this when you only need to check existence without retrieving the value.\n *\n * @param key - The key to check\n * @returns True if the key exists and is valid, false otherwise\n *\n * @example\n * ```typescript\n * if (cache.has(\"user:123\")) {\n * // Key exists (either fresh or stale)\n * }\n * ```\n *\n * @edge-cases\n * - Returns `false` if the key doesn't exist\n * - Returns `false` if the key has expired beyond the stale window\n * - Returns `true` if the key is in the stale window (still being served)\n * - Both `has()` and `get()` have O(1) complexity; prefer `get()` if you need the value\n */\n has(key: string): boolean {\n return has(this.state, key);\n }\n\n /**\n * Removes all entries from the cache at once.\n *\n * This is useful for resetting the cache or freeing memory when needed.\n * The `onDelete` callback is NOT invoked during clear (intentional optimization).\n *\n * @example\n * ```typescript\n * cache.clear(); // cache.size is now 0\n * ```\n *\n * @edge-cases\n * - The `onDelete` callback is NOT triggered during clear\n * - Clears both expired and fresh entries\n * - Resets `cache.size` to 0\n */\n clear(): void {\n // NEXT: optional supor for onClear callback?\n clear(this.state);\n }\n\n /**\n * Marks all entries with one or more tags as expired (or stale, if requested).\n *\n * If an entry has multiple tags, invalidating ANY of those tags will invalidate the entry.\n *\n * @param tags - A single tag (string) or array of tags to invalidate\n * @param asStale - If true, marks entries as stale instead of fully expired (still served from stale window)\n *\n * @example\n * ```typescript\n * // Invalidate a single tag\n * cache.invalidateTag(\"user:123\");\n *\n * // Invalidate multiple tags\n * cache.invalidateTag([\"user:123\", \"posts:456\"]);\n * ```\n *\n * @edge-cases\n * - Does not throw errors if a tag has no associated entries\n * - Invalidating a tag doesn't prevent new entries from being tagged with it later\n * - The `onDelete` callback is triggered with reason `'expired'` (even if `asStale` is true)\n */\n invalidateTag(tags: string | string[], options?: InvalidateTagOptions): void {\n invalidateTag(this.state, tags, options ?? {});\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAWA,MAAa,SAAS,UAA4B;AAChD,OAAM,MAAM,OAAO;;;;;ACVrB,MAAM,aAAqB;AAC3B,MAAM,aAAqB,KAAK;;;;;;;;;;;AAahC,MAAa,cAAsB,KAAK;;;;;AAMxC,MAAa,uBAA+B;;;;;AAM5C,MAAa,mBAA2B;;;;;;;;;;;AAaxC,MAAa,qBAA6B;;;;;AAM1C,MAAa,wBAAgC;;;;;AAM7C,MAAa,iCAAyC;;;;;;AAOtD,MAAa,4BAAoC;;;;;;;;;;;AAajD,MAAa,yBAAiC,IAAI;;;;;AAMlD,MAAa,uBAA+B;;;;;AAM5C,MAAa,0BAAkC;;;;;AAM/C,MAAa,sDAA8D;;;;;;;;;;;;AAc3E,MAAa,gCAAwC;;;;;;;;;;;;AAcrD,MAAa,wBAAgC;;;;;AAM7C,MAAa,qBAA6B;;;;;AAM1C,MAAa,sBAA8B;;;;;;;;;AC3H3C,SAAS,WAAW,MAA6B;AAC/C,KAAI;EACF,MAAM,MAAM,WAAG,aAAa,MAAM,OAAO,CAAC,MAAM;EAChD,MAAM,IAAI,OAAO,IAAI;AACrB,SAAO,OAAO,SAAS,EAAE,GAAG,IAAI;SAC1B;AACN,SAAO;;;;;;;AAQX,SAAS,iBAAgC;CAEvC,MAAM,KAAK,WAAW,4BAA4B;AAClD,KAAI,OAAO,KAAM,QAAO;CAGxB,MAAM,KAAK,WAAW,8CAA8C;AACpE,KAAI,OAAO,KAAM,QAAO;AAExB,QAAO;;;;;;AAOT,SAAgB,wBAAgC;CAC9C,MAAM,YAAY,WAAG,mBAAmB,CAAC;CACzC,MAAM,cAAc,gBAAgB;AAEpC,KAAI,eAAe,cAAc,KAAK,cAAc,SAClD,QAAO,KAAK,IAAI,WAAW,YAAY;AAGzC,QAAO;;;;;;;;;;;;;;;;;;AC/BT,SAAgB,sBACd,SACqB;CACrB,IAAI,aAAoC;CAExC,IAAI,cAAyC;CAE7C,IAAI,aAAa,QAAQ,OAAO,QAAQ;CAExC,IAAI,UAAU,QAAQ,aAAa;CACnC,IAAI,UAAU,QAAQ,UAAU;CAChC,IAAI,WAAWA,uBAAY,sBAAsB;CACjD,IAAI,kBAAkB,KAAK,KAAK;CAEhC,MAAM,SAAS;EACb,UAAU,SAAS,YAAY;EAE/B,YAAY,SAAS,aAAa,OAAO,OAAO;EACjD;CAED,SAAS,QAAc;AACrB,MAAI,WAAY;AAEhB,eAAa,kBAAkB;AAC7B,OAAI;IACF,MAAM,MAAM,KAAK,KAAK;IAEtB,MAAM,UAAU,eAAe;KAC7B;KACA;KACA;KACA;KACA,WAAW,OAAO;KAClB,eAAe;KACf,uBAAuB;KACvB,UAAU,OAAO;KAClB,CAAC;AAEF,kBAAc;AACd,aAAS,WAAW,QAAQ;AAE5B,cAAU,QAAQ,IAAI;AACtB,eAAW,QAAQ,KAAK;AACxB,cAAU,QAAQ,OAAO;AAEzB,iBAAa,QAAQ,OAAO,QAAQ;AACpC,sBAAkB;YACX,GAAY;AACnB,UAAM;AACN,UAAM,IAAI,MAAM,kCAAkC,EAAE,OAAO,GAAG,CAAC;;KAEhE,OAAO,SAAS;AAEnB,MAAI,OAAO,WAAW,UAAU,WAC9B,YAAW,OAAO;;CAItB,SAAS,OAAa;AACpB,MAAI,YAAY;AACd,iBAAc,WAAW;AACzB,gBAAa;;;CAIjB,SAAS,aAAwC;AAC/C,MAAI,YACF,QAAO;AAET,SAAO;;CAGT,SAAS,aAAa,WAAwD;AAC5E,MAAI,UAAU,cAAc,OAE1B,QAAO,YAAY,UAAU,YAAY,OAAO;AAGlD,MAAI,UAAU,aAAa,QAAW;AACpC,UAAO,WAAW,UAAU;AAG5B,OAAI,YAAY;AACd,UAAM;AACN,WAAO;;;;AAKb,QAAO;EACL;EACA;EACA;EACA;EACD;;;;;;;;;;;;;AAcH,SAAgB,eAAe,OASR;CACrB,MAAM,YAAY,QAAQ,OAAO,QAAQ;CAGzC,MAAM,YADY,OAAO,YAAY,MAAM,WAAW,GACxB;CAC9B,MAAM,gBAAgB,MAAM,gBAAgB,MAAM;CAElD,MAAM,MAAM,QAAQ,aAAa;CACjC,MAAM,WAA+B;EACnC,KAAK,IAAI,MAAM,MAAM,QAAQ;EAC7B,WAAW,IAAI,YAAY,MAAM,QAAQ;EACzC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,cAAc,IAAI,eAAe,MAAM,QAAQ;EAChD;CACD,MAAM,WAAW,KAAK,IAAI,GAAG,IAAI,MAAM,MAAM,UAAU;CAEvD,MAAM,WAAW,QAAQ,SAAS,MAAM,QAAQ;CAEhD,MAAM,YADS,SAAS,SAAS,SAAS,QAAQ,MACzB;CAEzB,MAAM,OAAOA,uBAAY,qBAAqB,MAAM,SAAS;AAE7D,QAAO;EACL,KAAK;GAEH,aAAa;GACb,OAAO;GACP,OAAO,QAAQ,UAAU;GAC1B;EAED,MAAM;GACJ,aAAa,KAAK;GAClB,OAAO;GACP,OAAOA,uBAAY,sBAAsB;GAC1C;EAED,QAAQ;GACN,aAAa;GACb,OAAO;GACP,OAAO;GACR;EAED,aAAa,MAAM;EACnB,qBAAqB,MAAM;EAC3B,UAAU,MAAM;EAChB;EACD;;;;;AC1KH,IAAI,mBAA+C;;AAGnD,IAAW;;AAGX,IAAW,iBAAyB;;AAGpC,MAAa,0BAA0B;AAEvC,SAAgB,eAAqB;AAMnC,KAAI,CAAC,kBAAkB;AACrB,MAAI;GACF,MAAM,qBAAqB,uBAAuB;AAElD,OAAI,sBAAsB,qBAAqB,EAC7C,kBAAkB,qBAAqB,OAAO,OAAQ;UAElD;AAMR,qBAAmB,sBAAsB;GACvC,SAAS,SAAS;AAChB,eAAW;;GAEb,UAAU;GACV,WAAW;GACZ,CAAC;AAEF,mBAAiB,OAAO;;;;;;;;;;;ACxC5B,SAAgB,yBAAyB,sBAAwC;AAC/E,MAAK,MAAM,QAAQ,iBAAiB;EAClC,MAAM,SAAS,qBAAqB,KAAK;AACzC,MAAI,UAAU,OAAO,SAAS,GAAG;GAC/B,MAAM,WAAW,OAAO,QAAQ,KAAK,QAAQ,MAAM,KAAK,EAAE,GAAG,OAAO;GAEpE,MAAM,QAAQ;AACd,QAAK,gBAAgB,KAAK,iBAAiB,IAAI,SAAS,WAAW;;;;;;;;;;;;;ACRzE,SAAgB,YAAY,EAC1B,OACA,WACA,SACA,SACA,SAOS;AAET,KAAI,cAAc,QAAS,QAAO;AAGlC,QAAO,WADI,QAAQ,cAAc,UAAU,cACrB,QAAQ;;;;;;;;;;;;;;;;;;;;;ACkDhC,MAAa,+BACX,YACuB;CACvB,MAAM,EACJ,SACA,UAAU,EAAE,EACZ,yBAAyB,wBACzB,uBAAuB,sBACvB,yBAAyB,4BACvB;CAGJ,MAAM,eAAe,QAAQ,UAAU;CACvC,MAAM,YAAY,QAAQ,OAAO;CACjC,MAAM,aAAa,QAAQ,QAAQ;CAGnC,MAAM,oBAAoB,SAAS,OAAO,eAAe;CAGzD,MAAM,oBAAoB,SAAS,IAAI,eAAe;CACtD,MAAM,qBAAqB,SAAS,KAAK,eAAe;CAKxD,MAAM,iBAAiB,IAAI;CAC3B,MAAM,kBAAkB,IAAI;CAG5B,MAAM,cACJ,oBAAoB,eAAe,iBAAiB,YAAY,kBAAkB;CAEpF,MAAM,cAAc,eAAe,YAAY;CAG/C,MAAM,QAAQ,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,cAAc,YAAY,CAAC;AAoBjE,QAAO;EACL,iBAlBsB,YAAY;GAClC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAaA,mBAVwB,YAAY;GACpC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAKD;;;;;;;;;;;;;;;;;;;;;;;AC/GH,SAAgB,uBAAuB,EACrC,kBACA,cAIgC;CAGhC,IAAI,kBAAiD,gBAAgB;AAErE,KAAI,oBAAoB,GAAG;AAIzB,MAAI,aAAa,gBAAgB,OAE/B,mBAAkB;AAEpB,oBAAkB,gBAAgB,aAAa;QAC1C;EAIL,IAAI,YAAY,KAAK,QAAQ,GAAG;AAMhC,OAAK,MAAM,QAAQ,iBAAiB;AAClC,gBAAa,KAAK;AAClB,OAAI,aAAa,GAAG;AAClB,sBAAkB;AAClB;;;;AAKN,QAAO;;;;;AC1DT,IAAkB,0DAAX;AACL;AACA;AACA;;;;;;;;;AASF,MAAa,aACX,OACA,KACA,SAAwB,cAAc,WAC1B;CACZ,MAAM,WAAW,MAAM;CACvB,MAAM,WAAW,MAAM;AAEvB,KAAI,CAAC,YAAY,CAAC,SAChB,QAAO,MAAM,MAAM,OAAO,IAAI;CAGhC,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAClC,KAAI,CAAC,MAAO,QAAO;AAEnB,OAAM,MAAM,OAAO,IAAI;AACvB,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AACvC,KAAI,WAAW,cAAc,OAC3B,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AAGzC,QAAO;;;;;;;;ACmHT,IAAY,wDAAL;;AAEL;;AAEA;;AAEA;;;;;;;;;;;;;;;;;;;;;;;;ACxIF,SAAgB,gBAAgB,OAAmB,OAA2C;CAC5F,MAAM,iBAAiB,MAAM,GAAG;CAIhC,IAAI,+BAA+B;CAGnC,IAAI,SAAS,aAAa;CAE1B,MAAM,OAAO,MAAM;AACnB,KAAI,KACF,MAAK,MAAM,OAAO,MAAM;EACtB,MAAM,KAAK,MAAM,MAAM,IAAI,IAAI;AAC/B,MAAI,CAAC,GAAI;EAKT,MAAM,CAAC,cAAc,mBAAmB;AAGxC,MAAI,gBAAgB,gBAAgB;AAClC,YAAS,aAAa;AACtB;;AAGF,MAAI,mBAAmB,gBAAgB;AAErC,OAAI,kBAAkB,6BACpB,gCAA+B;AAEjC,YAAS,aAAa;;;AAM5B,QAAO,CAAC,QAAQ,WAAW,aAAa,QAAQ,+BAA+B,EAAE;;;;;;;;;;;;;;;;;;;;ACxCnF,SAAgB,mBACd,OACA,OAGA,KACc;CACd,MAAM,CAAC,aAAa,WAAW,kBAAkB,MAAM;CAGvD,MAAM,CAAC,WAAW,gCAAgC,gBAAgB,OAAO,MAAM;AAC/E,KAAI,cAAc,aAAa,QAAS,QAAO,aAAa;CAC5D,MAAM,cAAc,iBAAiB;AACrC,KACE,cAAc,aAAa,SAC3B,iBAAiB,KACjB,MAAM,+BAA+B,YAKrC,QAAO,aAAa;AAItB,KAAI,MAAM,UACR,QAAO,aAAa;AAEtB,KAAI,iBAAiB,KAAK,MAAM,eAC9B,QAAO,aAAa;AAGtB,QAAO,aAAa;;;;;;;;;;;;;AAiBtB,MAAa,WAAW,OAAmB,OAAmB,QAC5D,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;AAazD,MAAa,WACX,OACA,OAGA,QACY,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;AAarE,MAAa,aACX,OACA,OAGA,QACY,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;AC9FrE,SAAgB,WACd,OAMA,mBAA2B,oBACqD;AAChF,KAAI,CAAC,MAAM,WACT,OAAM,aAAa,MAAM,MAAM,SAAS;CAG1C,IAAI,YAAY;CAChB,IAAI,eAAe;CACnB,IAAI,aAAa;AAEjB,MAAK,IAAI,IAAI,GAAG,IAAI,kBAAkB,KAAK;EACzC,MAAM,OAAO,MAAM,WAAW,MAAM;AAEpC,MAAI,KAAK,MAAM;AACb,SAAM,aAAa,MAAM,MAAM,SAAS;AACxC;;AAGF,eAAa;EACb,MAAM,CAAC,KAAK,SAAS,KAAK;EAE1B,MAAM,MAAM,KAAK,KAAK;AAEtB,MAAI,UAAU,OAAO,OAAO,IAAI,EAAE;AAChC,aAAU,OAAO,KAAK,cAAc,QAAQ;AAC5C,mBAAgB;aACP,QAAQ,OAAO,OAAO,IAAI,EAAE;AACrC,iBAAc;AAEd,OAAI,MAAM,kBACR,WAAU,OAAO,KAAK,cAAc,MAAM;;;CAKhD,MAAM,oBAAoB,MAAM,oBAAoB,aAAa;AACjE,QAAO;EACL;EACA;EACA;EACA,OAAO,YAAY,KAAK,eAAe,qBAAqB,YAAY;EACzE;;;;;;;;;;;;;;;;;ACxCH,SAAgB,gCACd,uBAA+B,2BACvB;CACR,MAAM,6BAA6B,iCAAiC;CAEpE,MAAM,sBAAsB,YAAY;EACtC,OAAO,UAAU,OAAO,eAAe;EAEvC,WAAW;EACX,SAAS;EAET,SAAS;EACT,OAAO;EACR,CAAC;AAKF,QAAO,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,oBAAoB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACatD,SAAgB,qBAA6B;CAC3C,IAAI,mBAAmB;AAEvB,MAAK,MAAM,aAAa,iBAAiB;AACvC,MAAI,UAAU,MAAM,QAAQ,GAAG;AAE7B,aAAU,eAAe;AACzB;;EAKF,IAAI,eAAe;AACnB,MAAI,UAAU,gBAAgB,sBAC5B,gBAAe,UAAU;EAGT;GAEhB,MAAM,yBAAyB,gCAC7B,UAAU,sBACX;AAED,OAAI,gBAAgB,wBAAwB;AAG1C,cAAU,eAAe;AACzB;;;AAKJ,YAAU,eAAe,UAAU,MAAM,OAAO;AAChD,sBAAoB,UAAU;;AAGhC,QAAO;;;;;;;;;;ACnET,MAAa,QAAQ,OACnB,OAGA,YAA4B,EAAE,KACZ;CAClB,MAAM,EACJ,WAAW,iBACX,UAAU,gBACV,MAAM,KAAK,KAAK,EAChB,aAAa,UACX;CACJ,MAAM,YAAY;CAElB,IAAI,kBAAkB;CACtB,IAAI,oBAAoB;AACxB,KAAoB,SAClB,EAAC,CAAE,iBAAiB,qBAAsB,4BAA4B,EAAE,SAAS,UAAU,CAAC;CAG9F,MAAM,mBAAmB,oBAAoB;CAC7C,MAAM,uBAAmC,EAAE;CAK3C,MAAM,kBACJ,oBAAoB,IAAI,qBAAqB,gBAAgB,SAAS;CAExE,IAAI,aAAa;AACjB,QAAO,MAAM;AACX,gBAAc;EAEd,MAAM,kBAAkB,uBAAuB;GAAE;GAAY;GAAkB,CAAC;AAChF,MAAI,CAAC,gBAEH;EAGF,MAAM,EAAE,UAAU,WAAW,iBAAiB,gBAAgB;AAE9D,GAAC,qBAAqB,gBAAgB,yBAAyB,EAAE,EAAE,KAAK,MAAM;AAE9E,MAAI,KAAK,KAAK,GAAG,YAAY,kBAC3B;AAGF,QAAM,SAAS;;AAGjB,0BAAyB,qBAAqB;AAG9C,KAAI,CAAC,WACH,gBAAe,KAAK,MAAM,OAAO,UAAU,EAAE,gBAAgB;;AAKjE,MAAM,mBAAiC,IAAI,OAAO;CAChD,MAAM,IAAI,WAAW,IAAI,GAAG;AAC5B,KAAI,OAAO,EAAE,UAAU,WAAY,GAAE,OAAO;;AAE9C,MAAa,uBAAoC,IAAI,SAAQ,YAAW,aAAa,QAAQ,CAAC;;;;ACzE9F,IAAI,iBAAiB;AACrB,MAAM,6BAA6B;AACnC,MAAa,kBAAgC,EAAE;AAU/C,IAAI,sBAAsB;;;;;;AAO1B,MAAa,eAAe,UAAwB,EAAE,KAAiB;CACrE,MAAM,EACJ,UACA,UACA,aAAa,aACb,UAAU,kBACV,wBAAwB,2BACxB,qBAAqB,sBACrB,kBAAkB,OAClB,oBAAoB,OACpB,kBAAkB,SAChB;AAEJ;AAIA,KAAI,iBAAiB,2BAGnB,SAAQ,KACN,gCAAgC,eAAe,kNAChD;CAGH,MAAM,QAAoB;EACxB,uBAAO,IAAI,KAAK;EAChB,YAAY;EACZ,IAAI,OAAO;AACT,UAAO,MAAM,MAAM;;EAErB;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,qBAAqB;EACrB,eAAe;EACf,cAAc;EACd,uBAAO,IAAI,KAAK;EACjB;AAED,OAAM,sBAAsB,gBAAgB,KAAK,MAAM,GAAG;AAG1D,KAAI,iBAAiB;AACnB,MAAI,oBAAqB,QAAO;AAChC,wBAAsB;AACtB,EAAK,MAAM,MAAM;;AAGnB,eAAc;AAEd,QAAO;;;;;;;;;;;;AC1ET,MAAa,OAAO,OAAmB,KAAa,MAAc,KAAK,KAAK,KAAc;CACxF,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAElC,KAAI,CAAC,MAAO,QAAO;AAEnB,KAAI,QAAQ,OAAO,OAAO,IAAI,CAAE,QAAO,MAAM;AAE7C,KAAI,QAAQ,OAAO,OAAO,IAAI,EAAE;AAC9B,MAAI,MAAM,gBACR,WAAU,OAAO,KAAK,cAAc,MAAM;AAE5C,SAAO,MAAM;;AAIf,WAAU,OAAO,KAAK,cAAc,QAAQ;;;;;;;;;;;;AChB9C,MAAa,OAAO,OAAmB,KAAa,MAAc,KAAK,KAAK,KAAc;AACxF,QAAO,IAAI,OAAO,KAAK,IAAI,KAAK;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACelC,SAAgB,cACd,OACA,MACA,UAAgC,EAAE,EAGlC,OAAe,KAAK,KAAK,EACnB;CACN,MAAM,UAAU,MAAM,QAAQ,KAAK,GAAG,OAAO,CAAC,KAAK;CACnD,MAAM,UAAU,QAAQ,WAAW;AAEnC,MAAK,MAAM,OAAO,SAAS;EACzB,MAAM,aAAa,MAAM,MAAM,IAAI,IAAI;AAEvC,MAAI,WAGF,KAAI,QACF,YAAW,KAAK;MAEhB,YAAW,KAAK;MAMlB,OAAM,MAAM,IAAI,KAAK,CAAC,UAAU,IAAI,MAAM,UAAU,OAAO,EAAE,CAAC;;;;;;;;;;;;;;;;;;ACvCpE,MAAa,eACX,OACA,OAGA,MAAc,KAAK,KAAK,KACf;CACT,MAAM,EAAE,KAAK,OAAO,KAAK,UAAU,aAAa,kBAAkB,SAAS;AAE3E,KAAI,UAAU,OAAW;AACzB,KAAI,OAAO,KAAM,OAAM,IAAI,MAAM,eAAe;CAEhD,MAAM,MAAM,YAAY,MAAM;CAC9B,MAAM,cAAc,oBAAoB,MAAM;CAE9C,MAAM,YAAY,MAAM,IAAI,MAAM,MAAM;CACxC,MAAM,QAAoB;EACxB;GACE;GACA;GACA,cAAc,IAAI,YAAY,cAAc;GAC7C;EACD;EACA,OAAO,SAAS,WAAW,CAAC,KAAK,GAAG,MAAM,QAAQ,KAAK,GAAG,OAAO;EAClE;AAED,OAAM,MAAM,IAAI,KAAK,MAAM;;;;;;;;;;;;;;;;;;AChB7B,IAAa,gBAAb,MAA2B;CACzB,AAAQ;;;;;;;;;;;;;;;;;CAkBR,YAAY,SAAwB;AAClC,OAAK,QAAQ,YAAY,QAAQ;;;;;;;;;;;;;;;;;CAkBnC,IAAI,OAAe;AACjB,SAAO,KAAK,MAAM;;;;;;;;;;;;;;;;;;;;;;;CAwBpB,IAAiB,KAA4B;AAC3C,SAAO,IAAI,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8B7B,IACE,KACA,OACA,SAKM;AACN,cAAY,KAAK,OAAO;GACtB;GACA;GACA,KAAK,SAAS;GACd,aAAa,SAAS;GACtB,MAAM,SAAS;GAChB,CAAC;;;;;;;;;;;;;;;;;;;CAoBJ,OAAO,KAAsB;AAC3B,SAAO,UAAU,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;;;;;;;CAyBnC,IAAI,KAAsB;AACxB,SAAO,IAAI,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;CAmB7B,QAAc;AAEZ,QAAM,KAAK,MAAM;;;;;;;;;;;;;;;;;;;;;;;;CAyBnB,cAAc,MAAyB,SAAsC;AAC3E,gBAAc,KAAK,OAAO,MAAM,WAAW,EAAE,CAAC"}
1
+ {"version":3,"file":"index.cjs","names":["performance"],"sources":["../../src/cache/clear.ts","../../src/defaults.ts","../../src/utils/get-process-memory-limit.ts","../../src/utils/process-monitor.ts","../../src/utils/start-monitor.ts","../../src/sweep/batchUpdateExpiredRatio.ts","../../src/utils/interpolate.ts","../../src/sweep/calculate-optimal-sweep-params.ts","../../src/sweep/select-instance-to-sweep.ts","../../src/cache/delete.ts","../../src/types.ts","../../src/utils/status-from-tags.ts","../../src/cache/validators.ts","../../src/sweep/sweep-once.ts","../../src/sweep/calculate-optimal-max-expired-ratio.ts","../../src/sweep/update-weight.ts","../../src/sweep/sweep.ts","../../src/cache/create-cache.ts","../../src/cache/get.ts","../../src/cache/has.ts","../../src/cache/invalidate-tag.ts","../../src/cache/set.ts","../../src/index.ts"],"sourcesContent":["import type { CacheState } from \"../types\";\n\n/**\n * Clears all entries from the cache without invoking callbacks.\n *\n * @note The `onDelete` callback is NOT invoked during a clear operation.\n * This is intentional to avoid unnecessary overhead when bulk-removing entries.\n *\n * @param state - The cache state.\n * @returns void\n */\nexport const clear = (state: CacheState): void => {\n state.store.clear();\n};\n","// Time Unit Constants\n// Base temporal units used throughout the caching system.\nconst ONE_SECOND: number = 1000;\nconst ONE_MINUTE: number = 60 * ONE_SECOND;\n\n/**\n * ===================================================================\n * Cache Entry Lifecycle\n * Default TTL and stale window settings for short-lived cache entries.\n * ===================================================================\n */\n\n/**\n * Default Time-To-Live in milliseconds for cache entries.\n * @default 1_800_000 (30 minutes)\n */\nexport const DEFAULT_TTL: number = 30 * ONE_MINUTE;\n\n/**\n * Default stale window in milliseconds after expiration.\n * Allows serving slightly outdated data while fetching fresh data.\n */\nexport const DEFAULT_STALE_WINDOW: number = 0 as const;\n\n/**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, new entries are ignored.\n */\nexport const DEFAULT_MAX_SIZE: number = Infinity;\n\n/**\n * Default maximum memory size in MB the cache can use.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\nexport const DEFAULT_MAX_MEMORY_SIZE: number = Infinity;\n\n/**\n * ===================================================================\n * Sweep & Cleanup Operations\n * Parameters controlling how and when expired entries are removed.\n * ===================================================================\n */\n\n/**\n * Maximum number of keys to process in a single sweep batch.\n * Higher values = more aggressive cleanup, lower latency overhead.\n */\nexport const MAX_KEYS_PER_BATCH: number = 1000;\n\n/**\n * Minimal expired ratio enforced during sweeps.\n * Ensures control sweeps run above {@link EXPIRED_RATIO_MEMORY_THRESHOLD}.\n */\nexport const MINIMAL_EXPIRED_RATIO: number = 0.05;\n\n/**\n * Memory usage threshold (normalized 0–1) triggering control sweeps.\n * At or above this level, sweeping becomes more aggressive.\n */\nexport const EXPIRED_RATIO_MEMORY_THRESHOLD: number = 0.8;\n\n/**\n * Maximum allowed expired ratio when memory usage is low.\n * Upper bound for interpolation with MINIMAL_EXPIRED_RATIO.\n * Recommended range: `0.3 – 0.5` .\n */\nexport const DEFAULT_MAX_EXPIRED_RATIO: number = 0.4;\n\n/**\n * ===================================================================\n * Sweep Intervals & Timing\n * Frequency and time budgets for cleanup operations.\n * ===================================================================\n */\n\n/**\n * Optimal interval in milliseconds between sweeps.\n * Used when system load is minimal and metrics are available.\n */\nexport const OPTIMAL_SWEEP_INTERVAL: number = 2 * ONE_SECOND;\n\n/**\n * Worst-case interval in milliseconds between sweeps.\n * Used when system load is high or metrics unavailable.\n */\nexport const WORST_SWEEP_INTERVAL: number = 200;\n\n/**\n * Maximum time budget in milliseconds for sweep operations.\n * Prevents sweeping from consuming excessive CPU during high load.\n */\nexport const WORST_SWEEP_TIME_BUDGET: number = 40;\n\n/**\n * Optimal time budget in milliseconds for each sweep cycle.\n * Used when performance metrics are not available or unreliable.\n */\nexport const OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE: number = 15;\n\n/**\n * ===================================================================\n * Memory Management\n * Process limits and memory-safe thresholds.\n * ===================================================================\n */\n\n/**\n * Default maximum process memory limit in megabytes.\n * Acts as fallback when environment detection is unavailable.\n * NOTE: Overridable via environment detection at runtime.\n */\nexport const DEFAULT_MAX_PROCESS_MEMORY_MB: number = 1024;\n\n/**\n * ===================================================================\n * System Utilization Weights\n * Balance how memory, CPU, and event-loop pressure influence sweep behavior.\n * Sum of all weights: 10 + 8.5 + 6.5 = 25\n * ===================================================================\n */\n\n/**\n * Weight applied to memory utilization in sweep calculations.\n * Higher weight = memory pressure has more influence on sweep aggressiveness.\n */\nexport const DEFAULT_MEMORY_WEIGHT: number = 10;\n\n/**\n * Weight applied to CPU utilization in sweep calculations.\n * Combined with event-loop weight to balance CPU-related pressure.\n */\nexport const DEFAULT_CPU_WEIGHT: number = 8.5;\n\n/**\n * Weight applied to event-loop utilization in sweep calculations.\n * Complements CPU weight to assess overall processing capacity.\n */\nexport const DEFAULT_LOOP_WEIGHT: number = 6.5;\n","import fs from \"fs\";\nimport v8 from \"v8\";\n\n/**\n * Reads a number from a file.\n * @param path File path to read the number from.\n * @returns The number read from the file, or null if reading fails.\n */\nfunction readNumber(path: string): number | null {\n try {\n const raw = fs.readFileSync(path, \"utf8\").trim();\n const n = Number(raw);\n return Number.isFinite(n) ? n : null;\n } catch {\n return null;\n }\n}\n\n/**\n * Gets the memory limit imposed by cgroups, if any.\n * @return The memory limit in bytes, or null if no limit is found.\n */\nfunction getCgroupLimit(): number | null {\n // cgroup v2\n const v2 = readNumber(\"/sys/fs/cgroup/memory.max\");\n if (v2 !== null) return v2;\n\n // cgroup v1\n const v1 = readNumber(\"/sys/fs/cgroup/memory/memory.limit_in_bytes\");\n if (v1 !== null) return v1;\n\n return null;\n}\n\n/**\n * Gets the effective memory limit for the current process, considering both V8 heap limits and cgroup limits.\n * @returns The effective memory limit in bytes.\n */\nexport function getProcessMemoryLimit(): number {\n const heapLimit = v8.getHeapStatistics().heap_size_limit;\n const cgroupLimit = getCgroupLimit();\n\n if (cgroupLimit && cgroupLimit > 0 && cgroupLimit < Infinity) {\n return Math.min(heapLimit, cgroupLimit);\n }\n\n return heapLimit;\n}\n","import { performance, type EventLoopUtilization } from \"perf_hooks\";\n\n/**\n * Creates a performance monitor that periodically samples memory usage,\n * CPU usage, and event loop utilization for the current Node.js process.\n *\n * The monitor runs on a configurable interval and optionally invokes a\n * callback with the collected metrics on each cycle. It also exposes\n * methods to start and stop monitoring, retrieve the latest metrics,\n * and update configuration dynamically.\n *\n * @param options Configuration options for the monitor, including sampling\n * interval, maximum thresholds for normalization, and an optional callback.\n * @returns An API object that allows controlling the monitor lifecycle.\n */\nexport function createMonitorObserver(\n options?: Partial<CreateMonitorObserverOptions>,\n): ReturnCreateMonitor {\n let intervalId: NodeJS.Timeout | null = null;\n\n let lastMetrics: PerformanceMetrics | null = null;\n\n let prevHrtime = process.hrtime.bigint();\n\n let prevMem = process.memoryUsage();\n let prevCpu = process.cpuUsage();\n let prevLoop = performance.eventLoopUtilization();\n let lastCollectedAt = Date.now();\n\n const config = {\n interval: options?.interval ?? 500,\n // options.maxMemory is expected in MB; store bytes internally\n maxMemory: (options?.maxMemory ?? 512) * 1024 * 1024,\n };\n\n function start(): void {\n if (intervalId) return; // already running\n\n intervalId = setInterval(() => {\n try {\n const now = Date.now();\n\n const metrics = collectMetrics({\n prevCpu,\n prevHrtime,\n prevMem,\n prevLoop,\n maxMemory: config.maxMemory,\n collectedAtMs: now,\n previousCollectedAtMs: lastCollectedAt,\n interval: config.interval,\n });\n\n lastMetrics = metrics;\n options?.callback?.(metrics);\n\n prevCpu = metrics.cpu.total;\n prevLoop = metrics.loop.total;\n prevMem = metrics.memory.total;\n\n prevHrtime = process.hrtime.bigint();\n lastCollectedAt = now;\n } catch (e: unknown) {\n stop();\n throw new Error(\"MonitorObserver: Not available\", { cause: e });\n }\n }, config.interval);\n\n if (typeof intervalId.unref === \"function\") {\n intervalId.unref();\n }\n }\n\n function stop(): void {\n if (intervalId) {\n clearInterval(intervalId);\n intervalId = null;\n }\n }\n\n function getMetrics(): PerformanceMetrics | null {\n if (lastMetrics) {\n return lastMetrics;\n }\n return null;\n }\n\n function updateConfig(newConfig: Partial<CreateMonitorObserverOptions>): void {\n if (newConfig.maxMemory !== undefined) {\n // convert MB -> bytes\n config.maxMemory = newConfig.maxMemory * 1024 * 1024;\n }\n\n if (newConfig.interval !== undefined) {\n config.interval = newConfig.interval;\n\n // restart if active to apply new interval\n if (intervalId) {\n stop();\n start();\n }\n }\n }\n\n return {\n start,\n stop,\n getMetrics,\n updateConfig,\n };\n}\n\n/**\n * Collects and normalizes performance metrics for the current process,\n * including memory usage, CPU usage, and event loop utilization.\n *\n * CPU and event loop metrics are computed as deltas relative to previously\n * recorded values. All metrics are normalized into a utilization between 0 and 1\n * based on the configured maximum thresholds.\n *\n * @param props Previous metric snapshots and normalization limits.\n * @returns A structured object containing normalized performance metrics.\n */\nexport function collectMetrics(props: {\n prevMem: NodeJS.MemoryUsage;\n prevCpu: NodeJS.CpuUsage;\n prevHrtime: bigint;\n prevLoop: EventLoopUtilization;\n maxMemory: number; // bytes\n collectedAtMs: number;\n previousCollectedAtMs: number;\n interval: number;\n}): PerformanceMetrics {\n const nowHrtime = process.hrtime.bigint();\n\n const elapsedNs = Number(nowHrtime - props.prevHrtime);\n const elapsedMs = elapsedNs / 1e6;\n const actualElapsed = props.collectedAtMs - props.previousCollectedAtMs;\n\n const mem = process.memoryUsage();\n const deltaMem: NodeJS.MemoryUsage = {\n rss: mem.rss - props.prevMem.rss,\n heapTotal: mem.heapTotal - props.prevMem.heapTotal,\n heapUsed: mem.heapUsed - props.prevMem.heapUsed,\n external: mem.external - props.prevMem.external,\n arrayBuffers: mem.arrayBuffers - props.prevMem.arrayBuffers,\n };\n const memRatio = Math.min(1, mem.rss / props.maxMemory);\n\n const cpuDelta = process.cpuUsage(props.prevCpu);\n const cpuMs = (cpuDelta.system + cpuDelta.user) / 1e3;\n const cpuRatio = cpuMs / elapsedMs;\n\n const loop = performance.eventLoopUtilization(props.prevLoop);\n\n return {\n cpu: {\n // deltaMs: cpuMs, // remove to avoid confusion with different unit type\n utilization: cpuRatio,\n delta: cpuDelta,\n total: process.cpuUsage(),\n },\n\n loop: {\n utilization: loop.utilization,\n delta: loop,\n total: performance.eventLoopUtilization(),\n },\n\n memory: {\n utilization: memRatio,\n delta: deltaMem,\n total: mem,\n },\n\n collectedAt: props.collectedAtMs,\n previousCollectedAt: props.previousCollectedAtMs,\n interval: props.interval,\n actualElapsed,\n };\n}\n\n// -----------------------------------------------------------------\n\n/**\n * Represents a metric extended with a normalized utilization between 0 and 1.\n *\n * The utilization indicates how close the metric is to its configured maximum\n * threshold, where 0 means minimal usage and 1 means the limit has been reached.\n *\n * @typeParam T The underlying metric type being normalized.\n */\nexport type NormalizedMetric<T> = T & {\n /** Normalized value between 0 and 1 */\n utilization: number;\n};\n\n/**\n * PerformanceMetrics describes the actual shape returned by collectMetrics.\n * All metric groups include raw `delta` and `total` objects plus a normalized utilization.\n */\nexport interface PerformanceMetrics {\n memory: NormalizedMetric<{\n delta: NodeJS.MemoryUsage;\n total: NodeJS.MemoryUsage;\n }>;\n\n cpu: NormalizedMetric<{\n delta: NodeJS.CpuUsage;\n total: NodeJS.CpuUsage;\n }>;\n\n loop: NormalizedMetric<{\n delta: EventLoopUtilization;\n total: EventLoopUtilization;\n }>;\n\n /** Timestamp in milliseconds when this metric was collected */\n collectedAt: number;\n\n /** Timestamp in milliseconds of the previous metric collection */\n previousCollectedAt: number;\n\n /** Interval in milliseconds at which the monitor is running */\n interval: number;\n\n /** Actual elapsed time in milliseconds since the last collection */\n actualElapsed: number;\n}\n\n/**\n * Options for createMonitorObserver.\n */\nexport interface CreateMonitorObserverOptions {\n /** Interval between samples in ms. Default: 500 */\n interval?: number;\n\n /** Maximum RSS memory in megabytes (MB) used for normalization. */\n maxMemory?: number;\n\n /** Optional callback invoked on each metrics sample. */\n callback?: (metrics: PerformanceMetrics) => void;\n}\n\n/**\n * Public API returned by `createMonitorObserver`.\n *\n * Provides methods to start and stop monitoring, retrieve the latest metrics,\n * and update the monitor configuration at runtime.\n */\nexport interface ReturnCreateMonitor {\n /** Stops the monitoring interval */\n stop: () => void;\n\n /** Starts the monitoring interval */\n start: () => void;\n\n /** Returns the last collected metrics or null if none have been collected yet */\n getMetrics: () => PerformanceMetrics | null;\n\n /** Allows updating the monitor configuration on the fly */\n updateConfig: (newConfig: Partial<CreateMonitorObserverOptions>) => void;\n}\n","import { DEFAULT_MAX_PROCESS_MEMORY_MB, WORST_SWEEP_INTERVAL } from \"../defaults\";\n\nimport { getProcessMemoryLimit } from \"./get-process-memory-limit\";\nimport {\n createMonitorObserver,\n type PerformanceMetrics,\n type ReturnCreateMonitor,\n} from \"./process-monitor\";\n\nlet _monitorInstance: ReturnCreateMonitor | null = null;\n\n/** Latest collected metrics from the monitor */\nexport let _metrics: PerformanceMetrics | null;\n\n/** Maximum memory limit for the monitor (in MB) */\nexport let maxMemoryLimit: number = DEFAULT_MAX_PROCESS_MEMORY_MB;\n\n/** Use 90% of the effective limit */\nexport const SAFE_MEMORY_LIMIT_RATIO = 0.9;\n\nexport function startMonitor(): void {\n if (__BROWSER__) {\n // Ignore monitor in browser environments\n return;\n }\n\n if (!_monitorInstance) {\n try {\n const processMemoryLimit = getProcessMemoryLimit();\n\n if (processMemoryLimit && processMemoryLimit > 0) {\n maxMemoryLimit = (processMemoryLimit / 1024 / 1024) * SAFE_MEMORY_LIMIT_RATIO;\n }\n } catch {\n // TODO: proper logger\n // Ignore errors and use default\n // console.log(\"error getProcessMemoryLimit:\", e);\n }\n\n _monitorInstance = createMonitorObserver({\n callback(metrics) {\n _metrics = metrics;\n },\n interval: WORST_SWEEP_INTERVAL,\n maxMemory: maxMemoryLimit, // 1 GB\n });\n\n _monitorInstance.start();\n }\n}\n","import { _instancesCache } from \"../cache/create-cache\";\n\n/**\n * Updates the expired ratio for each cache instance based on the collected ratios.\n * @param currentExpiredRatios - An array of arrays containing expired ratios for each cache instance.\n * @internal\n */\nexport function _batchUpdateExpiredRatio(currentExpiredRatios: number[][]): void {\n for (const inst of _instancesCache) {\n const ratios = currentExpiredRatios[inst._instanceIndexState];\n if (ratios && ratios.length > 0) {\n const avgRatio = ratios.reduce((sum, val) => sum + val, 0) / ratios.length;\n\n const alpha = 0.6; // NOTE: this must be alway higher than 0.5 to prioritize recent avgRatio\n inst._expiredRatio = inst._expiredRatio * (1 - alpha) + avgRatio * alpha;\n }\n }\n}\n","/**\n * Interpolates a value between two numeric ranges.\n *\n * Maps `value` from [fromStart, fromEnd] to [toStart, toEnd].\n * Works with inverted ranges, negative values, and any numeric input.\n */\nexport function interpolate({\n value,\n fromStart,\n fromEnd,\n toStart,\n toEnd,\n}: {\n value: number;\n fromStart: number;\n fromEnd: number;\n toStart: number;\n toEnd: number;\n}): number {\n // Explicit and predictable: avoid division by zero.\n if (fromStart === fromEnd) return toStart;\n\n const t = (value - fromStart) / (fromEnd - fromStart);\n return toStart + t * (toEnd - toStart);\n}\n","import {\n DEFAULT_CPU_WEIGHT,\n DEFAULT_LOOP_WEIGHT,\n DEFAULT_MEMORY_WEIGHT,\n OPTIMAL_SWEEP_INTERVAL,\n WORST_SWEEP_INTERVAL,\n WORST_SWEEP_TIME_BUDGET,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport type { PerformanceMetrics } from \"../utils/process-monitor\";\n\n/**\n * Weights for calculating the weighted utilization ratio.\n * Each weight determines how strongly each metric influences the final ratio.\n */\nexport interface UtilizationWeights {\n /** Weight applied to memory utilization (non-inverted). Default: 1 */\n memory?: number;\n\n /** Weight applied to CPU utilization (inverted). Default: 1 */\n cpu?: number;\n\n /** Weight applied to event loop utilization (inverted). Default: 1 */\n loop?: number;\n}\n\n/**\n * Represents the calculated optimal sweep parameters based on system metrics.\n */\nexport interface OptimalSweepParams {\n /** The optimal interval in milliseconds between sweep operations. */\n sweepIntervalMs: number;\n\n /** The optimal maximum time budget in milliseconds for a sweep cycle. */\n sweepTimeBudgetMs: number;\n}\n\n/**\n * Options for customizing the sweep parameter calculation.\n */\ninterface CalculateOptimalSweepParamsOptions {\n /** System performance metrics to base the calculations on. */\n metrics: PerformanceMetrics;\n\n /** Optional custom weights for each utilization metric. */\n weights?: UtilizationWeights;\n\n /** Interval (ms) used when system load is minimal. */\n optimalSweepIntervalMs?: number;\n\n /** Interval (ms) used when system load is maximal. */\n worstSweepIntervalMs?: number;\n\n /** Maximum sweep time budget (ms) under worst-case load. */\n worstSweepTimeBudgetMs?: number;\n}\n\n/**\n * Calculates adaptive sweep parameters based on real-time system utilization.\n *\n * Memory utilization is used as-is: higher memory usage → more conservative sweeps.\n * CPU and event loop utilization are inverted: lower usage → more conservative sweeps.\n *\n * This inversion ensures:\n * - When CPU and loop are *free*, sweeping becomes more aggressive (worst-case behavior).\n * - When CPU and loop are *busy*, sweeping becomes more conservative (optimal behavior).\n *\n * The final ratio is a weighted average of the three metrics, clamped to [0, 1].\n * This ratio is then used to interpolate between optimal and worst-case sweep settings.\n *\n * @param options - Optional configuration for weights and sweep bounds.\n * @returns Interpolated sweep interval, time budget, and the ratio used.\n */\nexport const calculateOptimalSweepParams = (\n options: CalculateOptimalSweepParamsOptions,\n): OptimalSweepParams => {\n const {\n metrics,\n weights = {},\n optimalSweepIntervalMs = OPTIMAL_SWEEP_INTERVAL,\n worstSweepIntervalMs = WORST_SWEEP_INTERVAL,\n worstSweepTimeBudgetMs = WORST_SWEEP_TIME_BUDGET,\n } = options;\n\n // Resolve metric weights (default = 1)\n const memoryWeight = weights.memory ?? DEFAULT_MEMORY_WEIGHT;\n const cpuWeight = weights.cpu ?? DEFAULT_CPU_WEIGHT;\n const loopWeight = weights.loop ?? DEFAULT_LOOP_WEIGHT;\n\n // Memory utilization is used directly (0–1)\n const memoryUtilization = metrics?.memory.utilization ?? 0;\n\n // Raw CPU and loop utilization (0–1)\n const cpuUtilizationRaw = metrics?.cpu.utilization ?? 0;\n const loopUtilizationRaw = metrics?.loop.utilization ?? 0;\n\n // Invert CPU and loop utilization:\n // - Low CPU/loop usage → high inverted value → pushes toward worst-case behavior\n // - High CPU/loop usage → low inverted value → pushes toward optimal behavior\n const cpuUtilization = 1 - cpuUtilizationRaw;\n const loopUtilization = 1 - loopUtilizationRaw;\n\n // Weighted average of all metrics\n const weightedSum =\n memoryUtilization * memoryWeight + cpuUtilization * cpuWeight + loopUtilization * loopWeight;\n\n const totalWeight = memoryWeight + cpuWeight + loopWeight;\n\n // Final utilization ratio clamped to [0, 1]\n const ratio = Math.min(1, Math.max(0, weightedSum / totalWeight));\n\n // Interpolate sweep interval based on the ratio\n const sweepIntervalMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: optimalSweepIntervalMs,\n toEnd: worstSweepIntervalMs,\n });\n\n // Interpolate sweep time budget based on the ratio\n const sweepTimeBudgetMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: 0,\n toEnd: worstSweepTimeBudgetMs,\n });\n\n return {\n sweepIntervalMs,\n sweepTimeBudgetMs,\n };\n};\n","import { _instancesCache } from \"../cache/create-cache\";\nimport type { CacheState } from \"../types\";\n\n/**\n * Selects a cache instance to sweep based on sweep weights or round‑robin order.\n *\n * Two selection modes are supported:\n * - **Round‑robin mode**: If `totalSweepWeight` ≤ 0, instances are selected\n * deterministically in sequence using `batchSweep`. Once all instances\n * have been processed, returns `null`.\n * - **Weighted mode**: If sweep weights are available, performs a probabilistic\n * selection. Each instance’s `_sweepWeight` contributes proportionally to its\n * chance of being chosen.\n *\n * This function depends on `_updateWeightSweep` to maintain accurate sweep weights.\n *\n * @param totalSweepWeight - Sum of all sweep weights across instances.\n * @param batchSweep - Current batch index used for round‑robin selection.\n * @returns The selected `CacheState` instance, `null` if no instance remains,\n * or `undefined` if the cache is empty.\n */\nexport function _selectInstanceToSweep({\n totalSweepWeight,\n batchSweep,\n}: {\n totalSweepWeight: number;\n batchSweep: number;\n}): CacheState | null | undefined {\n // Default selection: initialize with the first instance in the cache list.\n // This acts as a fallback in case no weighted selection occurs.\n let instanceToSweep: CacheState | null | undefined = _instancesCache[0];\n\n if (totalSweepWeight <= 0) {\n // Case 1: No sweep weight assigned (all instances skipped or empty).\n // → Perform a deterministic round‑robin minimal sweep across all instances.\n // Each batch iteration selects the next instance in order.\n if (batchSweep > _instancesCache.length) {\n // If all instances have been processed in this cycle, no instance to sweep.\n instanceToSweep = null;\n }\n instanceToSweep = _instancesCache[batchSweep - 1] as CacheState;\n } else {\n // Case 2: Sweep weights are available.\n // → Perform a probabilistic selection based on relative sweep weights.\n // A random threshold is drawn in [0, totalSweepWeight].\n let threshold = Math.random() * totalSweepWeight;\n\n // Iterate through instances, subtracting each instance’s weight.\n // The first instance that reduces the threshold to ≤ 0 is selected.\n // This ensures that instances with higher weights have proportionally\n // higher probability of being chosen for sweeping.\n for (const inst of _instancesCache) {\n threshold -= inst._sweepWeight;\n if (threshold <= 0) {\n instanceToSweep = inst;\n break;\n }\n }\n }\n\n return instanceToSweep;\n}\n","import type { CacheState } from \"../types\";\n\nexport const enum DELETE_REASON {\n MANUAL = \"manual\",\n EXPIRED = \"expired\",\n STALE = \"stale\",\n}\n\n/**\n * Deletes a key from the cache.\n * @param state - The cache state.\n * @param key - The key.\n * @returns A boolean indicating whether the key was successfully deleted.\n */\nexport const deleteKey = (\n state: CacheState,\n key: string,\n reason: DELETE_REASON = DELETE_REASON.MANUAL,\n): boolean => {\n const onDelete = state.onDelete;\n const onExpire = state.onExpire;\n\n if (!onDelete && !onExpire) {\n return state.store.delete(key);\n }\n\n const entry = state.store.get(key);\n if (!entry) return false;\n\n state.store.delete(key);\n state.onDelete?.(key, entry[1], reason);\n if (reason !== DELETE_REASON.MANUAL) {\n state.onExpire?.(key, entry[1], reason);\n }\n\n return true;\n};\n","import type { DELETE_REASON } from \"./cache/delete\";\n\n/**\n * Base configuration shared between CacheOptions and CacheState.\n */\nexport interface CacheConfigBase {\n /**\n * Callback invoked when a key expires naturally.\n * @param key - The expired key.\n * @param value - The value associated with the expired key.\n * @param reason - The reason for deletion ('expired', or 'stale').\n */\n onExpire?: (\n key: string,\n value: unknown,\n reason: Exclude<DELETE_REASON, DELETE_REASON.MANUAL>,\n ) => void;\n\n /**\n * Callback invoked when a key is deleted, either manually or due to expiration.\n * @param key - The deleted key.\n * @param value - The value of the deleted key.\n * @param reason - The reason for deletion ('manual', 'expired', or 'stale').\n */\n onDelete?: (key: string, value: unknown, reason: DELETE_REASON) => void;\n\n /**\n * Default TTL (Time-To-Live) in milliseconds for entries without explicit TTL.\n * @default 1_800_000 (30 minutes)\n */\n defaultTtl: number;\n\n /**\n * Default stale window in milliseconds for entries that do not\n * specify their own `staleWindowMs`.\n *\n * This window determines how long an entry may continue to be\n * served as stale after it reaches its expiration time.\n *\n * The window is always relative to the entry’s own expiration\n * moment, regardless of whether that expiration comes from an\n * explicit `ttl` or from the cache’s default TTL.\n * @default null (No stale window)\n */\n defaultStaleWindow: number;\n\n /**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\n maxSize: number;\n\n /**\n * Maximum memory size in MB the cache can use.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\n maxMemorySize: number;\n\n /**\n * Controls how stale entries are handled when read from the cache.\n *\n * - true → stale entries are purged immediately after being returned.\n * - false → stale entries are retained after being returned.\n *\n * @default false\n */\n purgeStaleOnGet: boolean;\n\n /**\n * Controls how stale entries are handled during sweep operations.\n *\n * - true → stale entries are purged during sweeps.\n * - false → stale entries are retained during sweeps.\n *\n * @default false\n */\n purgeStaleOnSweep: boolean;\n\n /**\n * Whether to automatically start the sweep process when the cache is created.\n *\n * - true → sweep starts automatically.\n * - false → sweep does not start automatically, allowing manual control.\n *\n * @internal\n * @default true\n */\n _autoStartSweep: boolean;\n\n /**\n * Allowed expired ratio for the cache instance.\n */\n _maxAllowExpiredRatio: number;\n}\n\n/**\n * Public configuration options for the TTL cache.\n */\nexport type CacheOptions = Partial<CacheConfigBase>;\n\n/**\n * Options for `invalidateTag` operation. Kept intentionally extensible so\n * future flags can be added without breaking callers.\n */\nexport interface InvalidateTagOptions {\n /** If true, mark affected entries as stale instead of fully expired. */\n asStale?: boolean;\n\n // Allow additional option fields for forward-compatibility.\n [key: string]: unknown;\n}\n\n/**\n * Lifecycle timestamps stored in a Tuple:\n * - 0 → createdAt\n * - 1 → expiresAt\n * - 2 → staleExpiresAt\n */\nexport type EntryTimestamp = [\n /** createdAt: Absolute timestamp the entry was created (Date.now()). */\n number,\n\n /** expiresAt: Absolute timestamp when the entry becomes invalid (Date.now() + TTL). */\n number,\n\n /** staleExpiresAt: Absolute timestamp when the entry stops being stale (Date.now() + staleTTL). */\n number,\n];\n\n/**\n * Represents a single cache entry.\n */\nexport type CacheEntry = [\n EntryTimestamp,\n\n /** The stored value. */\n unknown,\n\n (\n /**\n * Optional list of tags associated with this entry.\n * Tags can be used for:\n * - Group invalidation (e.g., clearing all entries with a given tag)\n * - Namespacing or categorization\n * - Tracking dependencies\n *\n * If no tags are associated, this field is `null`.\n */\n string[] | null\n ),\n];\n\n/**\n * Status of a cache entry.\n */\nexport enum ENTRY_STATUS {\n /** The entry is fresh and valid. */\n FRESH = \"fresh\",\n /** The entry is stale but can still be served. */\n STALE = \"stale\",\n /** The entry has expired and is no longer valid. */\n EXPIRED = \"expired\",\n}\n\n/**\n * Internal state of the TTL cache.\n */\nexport interface CacheState extends CacheConfigBase {\n /** Map storing key-value entries. */\n store: Map<string, CacheEntry>;\n\n /** Current size */\n size: number;\n\n /** Iterator for sweeping keys. */\n _sweepIter: MapIterator<[string, CacheEntry]> | null;\n\n /** Index of this instance for sweep all. */\n _instanceIndexState: number;\n\n /** Expire ratio avg for instance */\n _expiredRatio: number;\n\n /** Sweep weight for instance, calculate based on size and _expiredRatio */\n _sweepWeight: number;\n\n /**\n * Tag invalidation state.\n * Each tag stores:\n * - 0 → moment when the tag was marked as expired (0 if never)\n * - 1 → moment when the tag was marked as stale (0 if never)\n *\n * These timestamps define whether a tag affects an entry based on\n * the entry's creation time. */\n _tags: Map<string, [number, number]>;\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\n\n/**\n * Computes the derived status of a cache entry based on its associated tags.\n *\n * Tags may impose stricter expiration or stale rules on the entry. Only tags\n * created at or after the entry's creation timestamp are considered relevant.\n *\n * Resolution rules:\n * - If any applicable tag marks the entry as expired, the status becomes `EXPIRED`.\n * - Otherwise, if any applicable tag marks it as stale, the status becomes `STALE`.\n * - If no tag imposes stricter rules, the entry remains `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry whose status is being evaluated.\n * @returns A tuple containing:\n * - The final {@link ENTRY_STATUS} imposed by tags.\n * - The earliest timestamp at which a tag marked the entry as stale\n * (or 0 if no tag imposed a stale rule).\n */\nexport function _statusFromTags(state: CacheState, entry: CacheEntry): [ENTRY_STATUS, number] {\n const entryCreatedAt = entry[0][0];\n\n // Tracks the earliest point in time when any tag marked this entry as stale.\n // Initialized to Infinity so that comparisons always pick the minimum.\n let earliestTagStaleInvalidation = Infinity;\n\n // Default assumption: entry is fresh unless tags override.\n let status = ENTRY_STATUS.FRESH;\n\n const tags = entry[2];\n if (tags) {\n for (const tag of tags) {\n const ts = state._tags.get(tag);\n if (!ts) continue;\n\n // Each tag provides two timestamps:\n // - tagExpiredAt: when the tag forces expiration\n // - tagStaleSinceAt: when the tag forces stale status\n const [tagExpiredAt, tagStaleSinceAt] = ts;\n\n // A tag can only override if it was created after the entry itself.\n if (tagExpiredAt >= entryCreatedAt) {\n status = ENTRY_STATUS.EXPIRED;\n break; // Expired overrides everything, no need to check further.\n }\n\n if (tagStaleSinceAt >= entryCreatedAt) {\n // Keep track of the earliest stale timestamp across all tags.\n if (tagStaleSinceAt < earliestTagStaleInvalidation) {\n earliestTagStaleInvalidation = tagStaleSinceAt;\n }\n status = ENTRY_STATUS.STALE;\n }\n }\n }\n\n // If no tag imposed stale, return 0 for the timestamp.\n return [status, status === ENTRY_STATUS.STALE ? earliestTagStaleInvalidation : 0];\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\nimport { _statusFromTags } from \"../utils/status-from-tags\";\n\n/**\n * Computes the final derived status of a cache entry by combining:\n *\n * - The entry's own expiration timestamps (TTL and stale TTL).\n * - Any stricter expiration or stale rules imposed by its associated tags.\n *\n * Precedence rules:\n * - `EXPIRED` overrides everything.\n * - `STALE` overrides `FRESH`.\n * - If neither the entry nor its tags impose stricter rules, the entry is `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry being evaluated.\n * @returns The final {@link ENTRY_STATUS} for the entry.\n */\nexport function computeEntryStatus(\n state: CacheState,\n entry: CacheEntry,\n\n /** @internal */\n now: number,\n): ENTRY_STATUS {\n const [__createdAt, expiresAt, staleExpiresAt] = entry[0];\n\n // 1. Status derived from tags\n const [tagStatus, earliestTagStaleInvalidation] = _statusFromTags(state, entry);\n if (tagStatus === ENTRY_STATUS.EXPIRED) return ENTRY_STATUS.EXPIRED;\n const windowStale = staleExpiresAt - expiresAt;\n if (\n tagStatus === ENTRY_STATUS.STALE &&\n staleExpiresAt > 0 &&\n now < earliestTagStaleInvalidation + windowStale\n ) {\n // A tag can mark the entry as stale only if the entry itself supports a stale window.\n // The tag's stale invalidation time is extended by the entry's stale window duration.\n // If \"now\" is still within that extended window, the entry is considered stale.\n return ENTRY_STATUS.STALE;\n }\n\n // 2. Status derived from entry timestamps\n if (now < expiresAt) {\n return ENTRY_STATUS.FRESH;\n }\n if (staleExpiresAt > 0 && now < staleExpiresAt) {\n return ENTRY_STATUS.STALE;\n }\n\n return ENTRY_STATUS.EXPIRED;\n}\n\n// ---------------------------------------------------------------------------\n// Entry status wrappers (semantic helpers built on top of computeEntryStatus)\n// ---------------------------------------------------------------------------\n/**\n * Determines whether a cache entry is fresh.\n *\n * A fresh entry is one whose final derived status is `FRESH`, meaning:\n * - It has not expired according to its own timestamps, and\n * - No associated tag imposes a stricter stale or expired rule.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is fresh.\n */\nexport const isFresh = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's fresh only if that status is FRESH.\n return entry === ENTRY_STATUS.FRESH;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.FRESH;\n};\n/**\n * Determines whether a cache entry is stale.\n *\n * A stale entry is one whose final derived status is `STALE`, meaning:\n * - It has passed its TTL but is still within its stale window, or\n * - A tag imposes a stale rule that applies to this entry.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is stale.\n */\nexport const isStale = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n\n /** @internal */\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's stale only if that status is STALE.\n return entry === ENTRY_STATUS.STALE;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.STALE;\n};\n\n/**\n * Determines whether a cache entry is expired.\n *\n * An expired entry is one whose final derived status is `EXPIRED`, meaning:\n * - It has exceeded both its TTL and stale TTL, or\n * - A tag imposes an expiration rule that applies to this entry.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is expired.\n */\nexport const isExpired = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n\n /** @internal */\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's expired only if that status is EXPIRED.\n return entry === ENTRY_STATUS.EXPIRED;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.EXPIRED;\n};\n\n/**\n * Determines whether a cache entry is valid.\n *\n * A valid entry is one whose final derived status is either:\n * - `FRESH`, or\n * - `STALE` (still within its stale window).\n *\n * Expired entries are considered invalid.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS},\n * or undefined/null if the entry was not found. Passing a pre-computed status avoids\n * recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry, pre-computed status, or undefined/null if not found.\n * @param now - The current timestamp (defaults to {@link Date.now}).\n * @returns True if the entry exists and is fresh or stale.\n */\nexport const isValid = (\n state: CacheState,\n entry?: CacheEntry | ENTRY_STATUS | null,\n\n /** @internal */\n now: number = Date.now(),\n): boolean => {\n if (!entry) return false;\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's valid if it's FRESH or STALE.\n return entry === ENTRY_STATUS.FRESH || entry === ENTRY_STATUS.STALE;\n }\n\n const status = computeEntryStatus(state, entry, now);\n return status === ENTRY_STATUS.FRESH || status === ENTRY_STATUS.STALE;\n};\n","import { DELETE_REASON, deleteKey } from \"../cache/delete\";\nimport { computeEntryStatus, isExpired, isStale } from \"../cache/validators\";\nimport { MAX_KEYS_PER_BATCH } from \"../defaults\";\nimport { type CacheState } from \"../types\";\n\n/**\n * Performs a single sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n * @param _maxKeysPerBatch - Maximum number of keys to process in this sweep.\n * @returns An object containing statistics about the sweep operation.\n */\nexport function _sweepOnce(\n state: CacheState,\n\n /**\n * Maximum number of keys to process in this sweep.\n * @default 1000\n */\n _maxKeysPerBatch: number = MAX_KEYS_PER_BATCH,\n): { processed: number; expiredCount: number; staleCount: number; ratio: number } {\n if (!state._sweepIter) {\n state._sweepIter = state.store.entries();\n }\n\n let processed = 0;\n let expiredCount = 0;\n let staleCount = 0;\n\n for (let i = 0; i < _maxKeysPerBatch; i++) {\n const next = state._sweepIter.next();\n\n if (next.done) {\n state._sweepIter = state.store.entries();\n break;\n }\n\n processed += 1;\n const [key, entry] = next.value;\n\n const now = Date.now();\n\n const status = computeEntryStatus(state, entry, now);\n if (isExpired(state, status, now)) {\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n expiredCount += 1;\n } else if (isStale(state, status, now)) {\n staleCount += 1;\n\n if (state.purgeStaleOnSweep) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n }\n }\n\n const expiredStaleCount = state.purgeStaleOnSweep ? staleCount : 0;\n return {\n processed,\n expiredCount,\n staleCount,\n ratio: processed > 0 ? (expiredCount + expiredStaleCount) / processed : 0,\n };\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n EXPIRED_RATIO_MEMORY_THRESHOLD,\n MINIMAL_EXPIRED_RATIO,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport { _metrics, SAFE_MEMORY_LIMIT_RATIO } from \"../utils/start-monitor\";\n\n/**\n * Calculates the optimal maximum expired ratio based on current memory utilization.\n *\n * This function interpolates between `maxAllowExpiredRatio` and `MINIMAL_EXPIRED_RATIO`\n * depending on the memory usage reported by `_metrics`. At low memory usage (0%),\n * the optimal ratio equals `maxAllowExpiredRatio`. As memory usage approaches or exceeds\n * 80% of the memory limit, the optimal ratio decreases toward `MINIMAL_EXPIRED_RATIO`.\n *\n * @param maxAllowExpiredRatio - The maximum allowed expired ratio at minimal memory usage.\n * Defaults to `DEFAULT_MAX_EXPIRED_RATIO`.\n * @returns A normalized value between 0 and 1 representing the optimal expired ratio.\n */\nexport function calculateOptimalMaxExpiredRatio(\n maxAllowExpiredRatio: number = DEFAULT_MAX_EXPIRED_RATIO,\n): number {\n const EFFECTIVE_MEMORY_THRESHOLD = EXPIRED_RATIO_MEMORY_THRESHOLD / SAFE_MEMORY_LIMIT_RATIO;\n\n const optimalExpiredRatio = interpolate({\n value: _metrics?.memory.utilization ?? 0,\n\n fromStart: 0, // baseline: memory usage ratio at 0%\n fromEnd: EFFECTIVE_MEMORY_THRESHOLD, // threshold: memory usage ratio at 80% of safe limit\n\n toStart: maxAllowExpiredRatio, // allowed ratio at minimal memory usage\n toEnd: MINIMAL_EXPIRED_RATIO, // allowed ratio at high memory usage (≥80%)\n });\n\n // At 0% memory usage, the optimalExpiredRatio equals maxAllowExpiredRatio.\n // At or above 80% memory usage, the optimalExpiredRatio approaches or falls below MINIMAL_EXPIRED_RATIO.\n\n return Math.min(1, Math.max(0, optimalExpiredRatio));\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport { MINIMAL_EXPIRED_RATIO } from \"../defaults\";\n\nimport { calculateOptimalMaxExpiredRatio } from \"./calculate-optimal-max-expired-ratio\";\n\n/**\n * Updates the sweep weight (`_sweepWeight`) for each cache instance.\n *\n * The sweep weight determines the probability that an instance will be selected\n * for a cleanup (sweep) process. It is calculated based on the store size and\n * the ratio of expired keys.\n *\n * This function complements (`_selectInstanceToSweep`), which is responsible\n * for selecting the correct instance based on the weights assigned here.\n *\n * ---\n *\n * ### Sweep systems:\n * 1. **Normal sweep**\n * - Runs whenever the percentage of expired keys exceeds the allowed threshold\n * calculated by `calculateOptimalMaxExpiredRatio`.\n * - It is the main cleanup mechanism and is applied proportionally to the\n * store size and the expired‑key ratio.\n *\n * 2. **Memory‑conditioned sweep (control)**\n * - Works exactly like the normal sweep, except it may run even when it\n * normally wouldn’t.\n * - Only activates under **high memory pressure**.\n * - Serves as an additional control mechanism to adjust weights, keep the\n * system updated, and help prevent memory overflows.\n *\n * 3. **Round‑robin sweep (minimal control)**\n * - Always runs, even if the expired ratio is low or memory usage does not\n * require it.\n * - Processes a very small number of keys per instance, much smaller than\n * the normal sweep.\n * - Its main purpose is to ensure that all instances receive at least a\n * periodic weight update and minimal expired‑key control.\n *\n * ---\n * #### Important notes:\n * - A minimum `MINIMAL_EXPIRED_RATIO` (e.g., 5%) is assumed to ensure that\n * control sweeps can always run under high‑memory scenarios.\n * - Even with a minimum ratio, the normal sweep and the memory‑conditioned sweep\n * may **skip execution** if memory usage allows it and the expired ratio is\n * below the optimal maximum.\n * - The round‑robin sweep is never skipped: it always runs with a very small,\n * almost imperceptible cost.\n *\n * @returns The total accumulated sweep weight across all cache instances.\n */\nexport function _updateWeightSweep(): number {\n let totalSweepWeight = 0;\n\n for (const instCache of _instancesCache) {\n if (instCache.store.size <= 0) {\n // Empty instance → no sweep weight needed, skip sweep for this instance.\n instCache._sweepWeight = 0;\n continue;\n }\n\n // Ensure a minimum expired ratio to allow control sweeps.\n // If the real ratio is higher than the minimum, use the real ratio.\n let expiredRatio = MINIMAL_EXPIRED_RATIO;\n if (instCache._expiredRatio > MINIMAL_EXPIRED_RATIO) {\n expiredRatio = instCache._expiredRatio;\n }\n\n if (!__BROWSER__) {\n // In non‑browser environments, compute an optimal maximum allowed ratio.\n const optimalMaxExpiredRatio = calculateOptimalMaxExpiredRatio(\n instCache._maxAllowExpiredRatio,\n );\n\n if (expiredRatio <= optimalMaxExpiredRatio) {\n // If memory usage allows it and the expired ratio is low,\n // this sweep can be skipped. The reduced round‑robin sweep will still run.\n instCache._sweepWeight = 0;\n continue;\n }\n }\n\n // Normal sweep: weight proportional to store size and expired ratio.\n instCache._sweepWeight = instCache.store.size * expiredRatio;\n totalSweepWeight += instCache._sweepWeight;\n }\n\n return totalSweepWeight;\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport {\n MAX_KEYS_PER_BATCH,\n OPTIMAL_SWEEP_INTERVAL,\n OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE,\n} from \"../defaults\";\nimport type { CacheState } from \"../types\";\nimport { _metrics } from \"../utils/start-monitor\";\n\nimport { _batchUpdateExpiredRatio } from \"./batchUpdateExpiredRatio\";\nimport { calculateOptimalSweepParams } from \"./calculate-optimal-sweep-params\";\nimport { _selectInstanceToSweep } from \"./select-instance-to-sweep\";\nimport { _sweepOnce } from \"./sweep-once\";\nimport { _updateWeightSweep } from \"./update-weight\";\n\n/**\n * Performs a sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n */\nexport const sweep = async (\n state: CacheState,\n\n /** @internal */\n utilities: SweepUtilities = {},\n): Promise<void> => {\n const {\n schedule = defaultSchedule,\n yieldFn = defaultYieldFn,\n now = Date.now(),\n runOnlyOne = false,\n } = utilities;\n const startTime = now;\n\n let sweepIntervalMs = OPTIMAL_SWEEP_INTERVAL;\n let sweepTimeBudgetMs = OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE;\n if (!__BROWSER__ && _metrics) {\n ({ sweepIntervalMs, sweepTimeBudgetMs } = calculateOptimalSweepParams({ metrics: _metrics }));\n }\n\n const totalSweepWeight = _updateWeightSweep();\n const currentExpiredRatios: number[][] = [];\n\n // Reduce the maximum number of keys per batch only when no instance weights are available\n // and the sweep is running in minimal round‑robin control mode. In this case, execute the\n // smallest possible sweep (equivalent to one batch, but divided across instances).\n const maxKeysPerBatch =\n totalSweepWeight <= 0 ? MAX_KEYS_PER_BATCH / _instancesCache.length : MAX_KEYS_PER_BATCH;\n\n let batchSweep = 0;\n while (true) {\n batchSweep += 1;\n\n const instanceToSweep = _selectInstanceToSweep({ batchSweep, totalSweepWeight });\n if (!instanceToSweep) {\n // No instance to sweep\n break;\n }\n\n const { ratio } = _sweepOnce(instanceToSweep, maxKeysPerBatch);\n // Initialize or update `currentExpiredRatios` array for current ratios\n (currentExpiredRatios[instanceToSweep._instanceIndexState] ??= []).push(ratio);\n\n if (Date.now() - startTime > sweepTimeBudgetMs) {\n break;\n }\n\n await yieldFn();\n }\n\n _batchUpdateExpiredRatio(currentExpiredRatios);\n\n // Schedule next sweep\n if (!runOnlyOne) {\n schedule(() => void sweep(state, utilities), sweepIntervalMs);\n }\n};\n\n// Default utilities for scheduling and yielding --------------------------------\nconst defaultSchedule: scheduleType = (fn, ms) => {\n const t = setTimeout(fn, ms);\n if (typeof t.unref === \"function\") t.unref();\n};\nexport const defaultYieldFn: yieldFnType = () => new Promise(resolve => setImmediate(resolve));\n\n// Types for internal utilities -----------------------------------------------\ntype scheduleType = (fn: () => void, ms: number) => void;\ntype yieldFnType = () => Promise<void>;\ninterface SweepUtilities {\n /**\n * Default scheduling function using setTimeout.\n * This can be overridden for testing.\n * @internal\n */\n schedule?: scheduleType;\n\n /**\n * Default yielding function using setImmediate.\n * This can be overridden for testing.\n * @internal\n */\n yieldFn?: yieldFnType;\n\n /** Current timestamp for testing purposes. */\n now?: number;\n\n /**\n * If true, only run one sweep cycle.\n * @internal\n */\n runOnlyOne?: boolean;\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n DEFAULT_MAX_MEMORY_SIZE,\n DEFAULT_MAX_SIZE,\n DEFAULT_STALE_WINDOW,\n DEFAULT_TTL,\n} from \"../defaults\";\nimport { sweep } from \"../sweep/sweep\";\nimport type { CacheOptions, CacheState } from \"../types\";\nimport { startMonitor } from \"../utils/start-monitor\";\n\nlet _instanceCount = 0;\nconst INSTANCE_WARNING_THRESHOLD = 99;\nexport const _instancesCache: CacheState[] = [];\n\n/**\n * Resets the instance count for testing purposes.\n * This function is intended for use in tests to avoid instance limits.\n */\nexport const _resetInstanceCount = (): void => {\n _instanceCount = 0;\n};\n\nlet _initSweepScheduled = false;\n\n/**\n * Creates the initial state for the TTL cache.\n * @param options - Configuration options for the cache.\n * @returns The initial cache state.\n */\nexport const createCache = (options: CacheOptions = {}): CacheState => {\n const {\n onExpire,\n onDelete,\n defaultTtl = DEFAULT_TTL,\n maxSize = DEFAULT_MAX_SIZE,\n maxMemorySize = DEFAULT_MAX_MEMORY_SIZE,\n _maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO,\n defaultStaleWindow = DEFAULT_STALE_WINDOW,\n purgeStaleOnGet = false,\n purgeStaleOnSweep = false,\n _autoStartSweep = true,\n } = options;\n\n _instanceCount++;\n\n // NEXT: warn if internal parameters are touch by user\n\n if (_instanceCount > INSTANCE_WARNING_THRESHOLD) {\n // NEXT: Use a proper logging mechanism\n // NEXT: Create documentation for this\n console.warn(\n `Too many instances detected (${_instanceCount}). This may indicate a configuration issue; consider minimizing instance creation or grouping keys by expected expiration ranges. See the documentation: https://github.com/neezco/cache/docs/getting-started.md`,\n );\n }\n\n const state: CacheState = {\n store: new Map(),\n _sweepIter: null,\n get size() {\n return state.store.size;\n },\n onExpire,\n onDelete,\n maxSize,\n maxMemorySize,\n defaultTtl,\n defaultStaleWindow,\n purgeStaleOnGet,\n purgeStaleOnSweep,\n _maxAllowExpiredRatio,\n _autoStartSweep,\n _instanceIndexState: -1,\n _expiredRatio: 0,\n _sweepWeight: 0,\n _tags: new Map(),\n };\n\n state._instanceIndexState = _instancesCache.push(state) - 1;\n\n // Start the sweep process\n if (_autoStartSweep) {\n if (_initSweepScheduled) return state;\n _initSweepScheduled = true;\n void sweep(state);\n }\n\n startMonitor();\n\n return state;\n};\n","import type { CacheState } from \"../types\";\n\nimport { DELETE_REASON, deleteKey } from \"./delete\";\nimport { computeEntryStatus, isFresh, isStale } from \"./validators\";\n\n/**\n * Retrieves a value from the cache if the entry is valid.\n * @param state - The cache state.\n * @param key - The key to retrieve.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns The cached value if valid, null otherwise.\n */\nexport const get = (state: CacheState, key: string, now: number = Date.now()): unknown => {\n const entry = state.store.get(key);\n\n if (!entry) return undefined;\n\n const status = computeEntryStatus(state, entry, now);\n\n if (isFresh(state, status, now)) return entry[1];\n\n if (isStale(state, status, now)) {\n if (state.purgeStaleOnGet) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n return entry[1];\n }\n\n // If it expired, always delete it\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n\n return undefined;\n};\n","import type { CacheState } from \"../types\";\n\nimport { get } from \"./get\";\n\n/**\n * Checks if a key exists in the cache and is not expired.\n * @param state - The cache state.\n * @param key - The key to check.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns True if the key exists and is valid, false otherwise.\n */\nexport const has = (state: CacheState, key: string, now: number = Date.now()): boolean => {\n return get(state, key, now) !== undefined;\n};\n","import type { CacheState, InvalidateTagOptions } from \"../types\";\n\n/**\n * Invalidates one or more tags so that entries associated with them\n * become expired or stale from this moment onward.\n *\n * Semantics:\n * - Each tag maintains two timestamps in `state._tags`:\n * [expiredAt, staleSinceAt].\n * - Calling this function updates one of those timestamps to `_now`,\n * depending on whether the tag should force expiration or staleness.\n *\n * Rules:\n * - If `asStale` is false (default), the tag forces expiration:\n * entries created before `_now` will be considered expired.\n * - If `asStale` is true, the tag forces staleness:\n * entries created before `_now` will be considered stale,\n * but only if they support a stale window.\n *\n * Behavior:\n * - Each call replaces any previous invalidation timestamp for the tag.\n * - Entries created after `_now` are unaffected.\n *\n * @param state - The cache state containing tag metadata.\n * @param tags - A tag or list of tags to invalidate.\n * @param options.asStale - Whether the tag should mark entries as stale.\n */\nexport function invalidateTag(\n state: CacheState,\n tags: string | string[],\n options: InvalidateTagOptions = {},\n\n /** @internal */\n _now: number = Date.now(),\n): void {\n const tagList = Array.isArray(tags) ? tags : [tags];\n const asStale = options.asStale ?? false;\n\n for (const tag of tagList) {\n const currentTag = state._tags.get(tag);\n\n if (currentTag) {\n // Update existing tag timestamps:\n // index 0 = expiredAt, index 1 = staleSinceAt\n if (asStale) {\n currentTag[1] = _now;\n } else {\n currentTag[0] = _now;\n }\n } else {\n // Initialize new tag entry with appropriate timestamp.\n // If marking as stale, expiredAt = 0 and staleSinceAt = _now.\n // If marking as expired, expiredAt = _now and staleSinceAt = 0.\n state._tags.set(tag, [asStale ? 0 : _now, asStale ? _now : 0]);\n }\n }\n}\n","import type { CacheState, CacheEntry } from \"../types\";\nimport { _metrics } from \"../utils/start-monitor\";\n\n/**\n * Sets or updates a value in the cache with TTL and an optional stale window.\n *\n * @param state - The cache state.\n * @param input - Cache entry definition (key, value, ttl, staleWindow, tags).\n * @param now - Optional timestamp override used as the base time (defaults to Date.now()).\n * @returns True if the entry was created or updated, false if rejected due to limits or invalid input.\n *\n * @remarks\n * - `ttl` defines when the entry becomes expired.\n * - `staleWindow` defines how long the entry may still be served as stale\n * after the expiration moment (`now + ttl`).\n * - Returns false if value is `undefined` (entry ignored, existing value untouched).\n * - Returns false if new entry would exceed `maxSize` limit (existing keys always allowed).\n * - Returns false if new entry would exceed `maxMemorySize` limit (existing keys always allowed).\n * - Returns true if entry was set or updated (or if existing key was updated at limit).\n */\nexport const setOrUpdate = (\n state: CacheState,\n input: CacheSetOrUpdateInput,\n\n /** @internal */\n now: number = Date.now(),\n): boolean => {\n const { key, value, ttl: ttlInput, staleWindow: staleWindowInput, tags } = input;\n\n if (value === undefined) return false; // Ignore undefined values, leaving existing entry intact if it exists\n if (key == null) throw new Error(\"Missing key.\");\n if (state.size >= state.maxSize && !state.store.has(key)) {\n // Ignore new entries when max size is reached, but allow updates to existing keys\n return false;\n }\n if (\n !__BROWSER__ &&\n _metrics?.memory.total.rss &&\n _metrics?.memory.total.rss >= state.maxMemorySize * 1024 * 1024 &&\n !state.store.has(key)\n ) {\n // Ignore new entries when max memory size is reached, but allow updates to existing keys\n return false;\n }\n\n const ttl = ttlInput ?? state.defaultTtl;\n const staleWindow = staleWindowInput ?? state.defaultStaleWindow;\n\n const expiresAt = ttl > 0 ? now + ttl : Infinity;\n const entry: CacheEntry = [\n [\n now, // createdAt\n expiresAt, // expiresAt\n staleWindow > 0 ? expiresAt + staleWindow : 0, // staleExpiresAt (relative to expiration)\n ],\n value,\n typeof tags === \"string\" ? [tags] : Array.isArray(tags) ? tags : null,\n ];\n\n state.store.set(key, entry);\n return true;\n};\n\n/**\n * Input parameters for setting or updating a cache entry.\n */\nexport interface CacheSetOrUpdateInput {\n /**\n * Key under which the value will be stored.\n */\n key: string;\n\n /**\n * Value to be written to the cache.\n *\n * Considerations:\n * - Always overwrites any previous value, if one exists.\n * - `undefined` is ignored, leaving any previous value intact, if one exists.\n * - `null` is explicitly stored as a null value, replacing any previous value, if one exists.\n */\n value: unknown;\n\n /**\n * TTL (Time-To-Live) in milliseconds for this entry.\n */\n ttl?: number;\n\n /**\n * Optional stale window in milliseconds.\n *\n * Defines how long the entry may continue to be served as stale\n * after it has reached its expiration time.\n *\n * The window is always relative to the entry’s own expiration moment,\n * whether that expiration comes from an explicit `ttl` or from the\n * cache’s default TTL.\n *\n * If omitted, the cache-level default stale window is used.\n */\n staleWindow?: number;\n\n /**\n * Optional tags associated with this entry.\n */\n tags?: string | string[];\n}\n","import { clear } from \"./cache/clear\";\nimport { createCache } from \"./cache/create-cache\";\nimport { deleteKey } from \"./cache/delete\";\nimport { get } from \"./cache/get\";\nimport { has } from \"./cache/has\";\nimport { invalidateTag } from \"./cache/invalidate-tag\";\nimport { setOrUpdate } from \"./cache/set\";\nimport type { CacheOptions, CacheState, InvalidateTagOptions } from \"./types\";\n\nexport type { CacheOptions, InvalidateTagOptions } from \"./types\";\n\n/**\n * A TTL (Time-To-Live) cache implementation with support for expiration,\n * stale windows, tag-based invalidation, and automatic sweeping.\n *\n * Provides O(1) constant-time operations for all core methods.\n *\n * @example\n * ```typescript\n * const cache = new LocalTtlCache();\n * cache.set(\"user:123\", { name: \"Alice\" }, { ttl: 5 * 60 * 1000 });\n * const user = cache.get(\"user:123\"); // { name: \"Alice\" }\n * ```\n */\nexport class LocalTtlCache {\n private state: CacheState;\n\n /**\n * Creates a new cache instance.\n *\n * @param options - Configuration options for the cache (defaultTtl, defaultStaleWindow, maxSize, etc.)\n *\n * @example\n * ```typescript\n * const cache = new LocalTtlCache({\n * defaultTtl: 30 * 60 * 1000, // 30 minutes\n * defaultStaleWindow: 5 * 60 * 1000, // 5 minutes\n * maxSize: 500_000, // Maximum 500_000 entries\n * onExpire: (key, value) => console.log(`Expired: ${key}`),\n * onDelete: (key, value, reason) => console.log(`Deleted: ${key}, reason: ${reason}`),\n * });\n * ```\n */\n constructor(options?: CacheOptions) {\n this.state = createCache(options);\n }\n\n /**\n * Gets the current number of entries tracked by the cache.\n *\n * This value may include entries that are already expired but have not yet been\n * removed by the lazy cleanup system. Expired keys are cleaned only when it is\n * efficient to do so, so the count can temporarily be higher than the number of\n * actually valid (non‑expired) entries.\n *\n * @returns The number of entries currently stored (including entries pending cleanup)\n *\n * @example\n * ```typescript\n * console.log(cache.size); // e.g., 42\n * ```\n */\n get size(): number {\n return this.state.size;\n }\n\n /**\n * Retrieves a value from the cache.\n *\n * Returns the value if it exists and is not fully expired. If an entry is in the\n * stale window (expired but still within staleWindow), the stale value is returned.\n *\n\n * @param key - The key to retrieve\n * @returns The cached value if valid, undefined otherwise\n *\n * @example\n * ```typescript\n * const user = cache.get<{ name: string }>(\"user:123\");\n * ```\n *\n * @edge-cases\n * - Returns `undefined` if the key doesn't exist\n * - Returns `undefined` if the key has expired beyond the stale window\n * - Returns the stale value if within the stale window\n * - If `purgeStaleOnGet` is enabled, stale entries are deleted after being returned\n */\n get<T = unknown>(key: string): T | undefined {\n return get(this.state, key) as T | undefined;\n }\n\n /**\n * Sets or updates a value in the cache.\n *\n * If the key already exists, it will be completely replaced.\n *\n * @param key - The key under which to store the value\n * @param value - The value to cache (any type)\n * @param options - Optional configuration for this specific entry\n * @param options.ttl - Time-To-Live in milliseconds. Defaults to `defaultTtl`\n * @param options.staleWindow - How long to serve stale data after expiration (milliseconds)\n * @param options.tags - One or more tags for group invalidation\n * @returns True if the entry was set or updated, false if rejected due to limits or invalid input\n *\n * @example\n * ```typescript\n * const success = cache.set(\"user:123\", { name: \"Alice\" }, {\n * ttl: 5 * 60 * 1000,\n * staleWindow: 1 * 60 * 1000,\n * tags: \"user:123\",\n * });\n *\n * if (!success) {\n * console.log(\"Entry was rejected due to size or memory limits\");\n * }\n * ```\n *\n * @edge-cases\n * - Overwriting an existing key replaces it completely\n * - If `ttl` is 0 or Infinite, the entry never expires\n * - If `staleWindow` is larger than `ttl`, the entry can be served as stale longer than it was fresh\n * - Tags are optional; only necessary for group invalidation via `invalidateTag()`\n * - Returns `false` if value is `undefined` (existing value remains untouched)\n * - Returns `false` if new key would exceed [`maxSize`](./docs/configuration.md#maxsize-number) limit\n * - Returns `false` if new key would exceed [`maxMemorySize`](./docs/configuration.md#maxmemorysize-number) limit\n * - Updating existing keys always succeeds, even at limit\n */\n set(\n key: string,\n value: unknown,\n options?: {\n ttl?: number;\n staleWindow?: number;\n tags?: string | string[];\n },\n ): boolean {\n return setOrUpdate(this.state, {\n key,\n value,\n ttl: options?.ttl,\n staleWindow: options?.staleWindow,\n tags: options?.tags,\n });\n }\n\n /**\n * Deletes a specific key from the cache.\n *\n * @param key - The key to delete\n * @returns True if the key was deleted, false if it didn't exist\n *\n * @example\n * ```typescript\n * const wasDeleted = cache.delete(\"user:123\");\n * ```\n *\n * @edge-cases\n * - Triggers the `onDelete` callback with reason `'manual'`\n * - Does not trigger the `onExpire` callback\n * - Returns `false` if the key was already expired\n * - Deleting a non-existent key returns `false` without error\n */\n delete(key: string): boolean {\n return deleteKey(this.state, key);\n }\n\n /**\n * Checks if a key exists in the cache and is not fully expired.\n *\n * Returns true if the key exists and is either fresh or within the stale window.\n * Use this when you only need to check existence without retrieving the value.\n *\n * @param key - The key to check\n * @returns True if the key exists and is valid, false otherwise\n *\n * @example\n * ```typescript\n * if (cache.has(\"user:123\")) {\n * // Key exists (either fresh or stale)\n * }\n * ```\n *\n * @edge-cases\n * - Returns `false` if the key doesn't exist\n * - Returns `false` if the key has expired beyond the stale window\n * - Returns `true` if the key is in the stale window (still being served)\n * - Both `has()` and `get()` have O(1) complexity; prefer `get()` if you need the value\n */\n has(key: string): boolean {\n return has(this.state, key);\n }\n\n /**\n * Removes all entries from the cache at once.\n *\n * This is useful for resetting the cache or freeing memory when needed.\n * The `onDelete` callback is NOT invoked during clear (intentional optimization).\n *\n * @example\n * ```typescript\n * cache.clear(); // cache.size is now 0\n * ```\n *\n * @edge-cases\n * - The `onDelete` callback is NOT triggered during clear\n * - Clears both expired and fresh entries\n * - Resets `cache.size` to 0\n */\n clear(): void {\n // NEXT: optional supor for onClear callback?\n clear(this.state);\n }\n\n /**\n * Marks all entries with one or more tags as expired (or stale, if requested).\n *\n * If an entry has multiple tags, invalidating ANY of those tags will invalidate the entry.\n *\n * @param tags - A single tag (string) or array of tags to invalidate\n * @param asStale - If true, marks entries as stale instead of fully expired (still served from stale window)\n *\n * @example\n * ```typescript\n * // Invalidate a single tag\n * cache.invalidateTag(\"user:123\");\n *\n * // Invalidate multiple tags\n * cache.invalidateTag([\"user:123\", \"posts:456\"]);\n * ```\n *\n * @edge-cases\n * - Does not throw errors if a tag has no associated entries\n * - Invalidating a tag doesn't prevent new entries from being tagged with it later\n * - The `onDelete` callback is triggered with reason `'expired'` (even if `asStale` is true)\n */\n invalidateTag(tags: string | string[], options?: InvalidateTagOptions): void {\n invalidateTag(this.state, tags, options ?? {});\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAWA,MAAa,SAAS,UAA4B;AAChD,OAAM,MAAM,OAAO;;;;;ACVrB,MAAM,aAAqB;AAC3B,MAAM,aAAqB,KAAK;;;;;;;;;;;AAahC,MAAa,cAAsB,KAAK;;;;;AAMxC,MAAa,uBAA+B;;;;;AAM5C,MAAa,mBAA2B;;;;;;AAOxC,MAAa,0BAAkC;;;;;;;;;;;AAa/C,MAAa,qBAA6B;;;;;AAM1C,MAAa,wBAAgC;;;;;AAM7C,MAAa,iCAAyC;;;;;;AAOtD,MAAa,4BAAoC;;;;;;;;;;;AAajD,MAAa,yBAAiC,IAAI;;;;;AAMlD,MAAa,uBAA+B;;;;;AAM5C,MAAa,0BAAkC;;;;;AAM/C,MAAa,sDAA8D;;;;;;;;;;;;AAc3E,MAAa,gCAAwC;;;;;;;;;;;;AAcrD,MAAa,wBAAgC;;;;;AAM7C,MAAa,qBAA6B;;;;;AAM1C,MAAa,sBAA8B;;;;;;;;;AClI3C,SAAS,WAAW,MAA6B;AAC/C,KAAI;EACF,MAAM,MAAM,WAAG,aAAa,MAAM,OAAO,CAAC,MAAM;EAChD,MAAM,IAAI,OAAO,IAAI;AACrB,SAAO,OAAO,SAAS,EAAE,GAAG,IAAI;SAC1B;AACN,SAAO;;;;;;;AAQX,SAAS,iBAAgC;CAEvC,MAAM,KAAK,WAAW,4BAA4B;AAClD,KAAI,OAAO,KAAM,QAAO;CAGxB,MAAM,KAAK,WAAW,8CAA8C;AACpE,KAAI,OAAO,KAAM,QAAO;AAExB,QAAO;;;;;;AAOT,SAAgB,wBAAgC;CAC9C,MAAM,YAAY,WAAG,mBAAmB,CAAC;CACzC,MAAM,cAAc,gBAAgB;AAEpC,KAAI,eAAe,cAAc,KAAK,cAAc,SAClD,QAAO,KAAK,IAAI,WAAW,YAAY;AAGzC,QAAO;;;;;;;;;;;;;;;;;;AC/BT,SAAgB,sBACd,SACqB;CACrB,IAAI,aAAoC;CAExC,IAAI,cAAyC;CAE7C,IAAI,aAAa,QAAQ,OAAO,QAAQ;CAExC,IAAI,UAAU,QAAQ,aAAa;CACnC,IAAI,UAAU,QAAQ,UAAU;CAChC,IAAI,WAAWA,uBAAY,sBAAsB;CACjD,IAAI,kBAAkB,KAAK,KAAK;CAEhC,MAAM,SAAS;EACb,UAAU,SAAS,YAAY;EAE/B,YAAY,SAAS,aAAa,OAAO,OAAO;EACjD;CAED,SAAS,QAAc;AACrB,MAAI,WAAY;AAEhB,eAAa,kBAAkB;AAC7B,OAAI;IACF,MAAM,MAAM,KAAK,KAAK;IAEtB,MAAM,UAAU,eAAe;KAC7B;KACA;KACA;KACA;KACA,WAAW,OAAO;KAClB,eAAe;KACf,uBAAuB;KACvB,UAAU,OAAO;KAClB,CAAC;AAEF,kBAAc;AACd,aAAS,WAAW,QAAQ;AAE5B,cAAU,QAAQ,IAAI;AACtB,eAAW,QAAQ,KAAK;AACxB,cAAU,QAAQ,OAAO;AAEzB,iBAAa,QAAQ,OAAO,QAAQ;AACpC,sBAAkB;YACX,GAAY;AACnB,UAAM;AACN,UAAM,IAAI,MAAM,kCAAkC,EAAE,OAAO,GAAG,CAAC;;KAEhE,OAAO,SAAS;AAEnB,MAAI,OAAO,WAAW,UAAU,WAC9B,YAAW,OAAO;;CAItB,SAAS,OAAa;AACpB,MAAI,YAAY;AACd,iBAAc,WAAW;AACzB,gBAAa;;;CAIjB,SAAS,aAAwC;AAC/C,MAAI,YACF,QAAO;AAET,SAAO;;CAGT,SAAS,aAAa,WAAwD;AAC5E,MAAI,UAAU,cAAc,OAE1B,QAAO,YAAY,UAAU,YAAY,OAAO;AAGlD,MAAI,UAAU,aAAa,QAAW;AACpC,UAAO,WAAW,UAAU;AAG5B,OAAI,YAAY;AACd,UAAM;AACN,WAAO;;;;AAKb,QAAO;EACL;EACA;EACA;EACA;EACD;;;;;;;;;;;;;AAcH,SAAgB,eAAe,OASR;CACrB,MAAM,YAAY,QAAQ,OAAO,QAAQ;CAGzC,MAAM,YADY,OAAO,YAAY,MAAM,WAAW,GACxB;CAC9B,MAAM,gBAAgB,MAAM,gBAAgB,MAAM;CAElD,MAAM,MAAM,QAAQ,aAAa;CACjC,MAAM,WAA+B;EACnC,KAAK,IAAI,MAAM,MAAM,QAAQ;EAC7B,WAAW,IAAI,YAAY,MAAM,QAAQ;EACzC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,cAAc,IAAI,eAAe,MAAM,QAAQ;EAChD;CACD,MAAM,WAAW,KAAK,IAAI,GAAG,IAAI,MAAM,MAAM,UAAU;CAEvD,MAAM,WAAW,QAAQ,SAAS,MAAM,QAAQ;CAEhD,MAAM,YADS,SAAS,SAAS,SAAS,QAAQ,MACzB;CAEzB,MAAM,OAAOA,uBAAY,qBAAqB,MAAM,SAAS;AAE7D,QAAO;EACL,KAAK;GAEH,aAAa;GACb,OAAO;GACP,OAAO,QAAQ,UAAU;GAC1B;EAED,MAAM;GACJ,aAAa,KAAK;GAClB,OAAO;GACP,OAAOA,uBAAY,sBAAsB;GAC1C;EAED,QAAQ;GACN,aAAa;GACb,OAAO;GACP,OAAO;GACR;EAED,aAAa,MAAM;EACnB,qBAAqB,MAAM;EAC3B,UAAU,MAAM;EAChB;EACD;;;;;AC1KH,IAAI,mBAA+C;;AAGnD,IAAW;;AAGX,IAAW,iBAAyB;;AAGpC,MAAa,0BAA0B;AAEvC,SAAgB,eAAqB;AAMnC,KAAI,CAAC,kBAAkB;AACrB,MAAI;GACF,MAAM,qBAAqB,uBAAuB;AAElD,OAAI,sBAAsB,qBAAqB,EAC7C,kBAAkB,qBAAqB,OAAO,OAAQ;UAElD;AAMR,qBAAmB,sBAAsB;GACvC,SAAS,SAAS;AAChB,eAAW;;GAEb,UAAU;GACV,WAAW;GACZ,CAAC;AAEF,mBAAiB,OAAO;;;;;;;;;;;ACxC5B,SAAgB,yBAAyB,sBAAwC;AAC/E,MAAK,MAAM,QAAQ,iBAAiB;EAClC,MAAM,SAAS,qBAAqB,KAAK;AACzC,MAAI,UAAU,OAAO,SAAS,GAAG;GAC/B,MAAM,WAAW,OAAO,QAAQ,KAAK,QAAQ,MAAM,KAAK,EAAE,GAAG,OAAO;GAEpE,MAAM,QAAQ;AACd,QAAK,gBAAgB,KAAK,iBAAiB,IAAI,SAAS,WAAW;;;;;;;;;;;;;ACRzE,SAAgB,YAAY,EAC1B,OACA,WACA,SACA,SACA,SAOS;AAET,KAAI,cAAc,QAAS,QAAO;AAGlC,QAAO,WADI,QAAQ,cAAc,UAAU,cACrB,QAAQ;;;;;;;;;;;;;;;;;;;;;ACkDhC,MAAa,+BACX,YACuB;CACvB,MAAM,EACJ,SACA,UAAU,EAAE,EACZ,yBAAyB,wBACzB,uBAAuB,sBACvB,yBAAyB,4BACvB;CAGJ,MAAM,eAAe,QAAQ,UAAU;CACvC,MAAM,YAAY,QAAQ,OAAO;CACjC,MAAM,aAAa,QAAQ,QAAQ;CAGnC,MAAM,oBAAoB,SAAS,OAAO,eAAe;CAGzD,MAAM,oBAAoB,SAAS,IAAI,eAAe;CACtD,MAAM,qBAAqB,SAAS,KAAK,eAAe;CAKxD,MAAM,iBAAiB,IAAI;CAC3B,MAAM,kBAAkB,IAAI;CAG5B,MAAM,cACJ,oBAAoB,eAAe,iBAAiB,YAAY,kBAAkB;CAEpF,MAAM,cAAc,eAAe,YAAY;CAG/C,MAAM,QAAQ,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,cAAc,YAAY,CAAC;AAoBjE,QAAO;EACL,iBAlBsB,YAAY;GAClC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAaA,mBAVwB,YAAY;GACpC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAKD;;;;;;;;;;;;;;;;;;;;;;;AC/GH,SAAgB,uBAAuB,EACrC,kBACA,cAIgC;CAGhC,IAAI,kBAAiD,gBAAgB;AAErE,KAAI,oBAAoB,GAAG;AAIzB,MAAI,aAAa,gBAAgB,OAE/B,mBAAkB;AAEpB,oBAAkB,gBAAgB,aAAa;QAC1C;EAIL,IAAI,YAAY,KAAK,QAAQ,GAAG;AAMhC,OAAK,MAAM,QAAQ,iBAAiB;AAClC,gBAAa,KAAK;AAClB,OAAI,aAAa,GAAG;AAClB,sBAAkB;AAClB;;;;AAKN,QAAO;;;;;AC1DT,IAAkB,0DAAX;AACL;AACA;AACA;;;;;;;;;AASF,MAAa,aACX,OACA,KACA,SAAwB,cAAc,WAC1B;CACZ,MAAM,WAAW,MAAM;CACvB,MAAM,WAAW,MAAM;AAEvB,KAAI,CAAC,YAAY,CAAC,SAChB,QAAO,MAAM,MAAM,OAAO,IAAI;CAGhC,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAClC,KAAI,CAAC,MAAO,QAAO;AAEnB,OAAM,MAAM,OAAO,IAAI;AACvB,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AACvC,KAAI,WAAW,cAAc,OAC3B,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AAGzC,QAAO;;;;;;;;AC0HT,IAAY,wDAAL;;AAEL;;AAEA;;AAEA;;;;;;;;;;;;;;;;;;;;;;;;AC/IF,SAAgB,gBAAgB,OAAmB,OAA2C;CAC5F,MAAM,iBAAiB,MAAM,GAAG;CAIhC,IAAI,+BAA+B;CAGnC,IAAI,SAAS,aAAa;CAE1B,MAAM,OAAO,MAAM;AACnB,KAAI,KACF,MAAK,MAAM,OAAO,MAAM;EACtB,MAAM,KAAK,MAAM,MAAM,IAAI,IAAI;AAC/B,MAAI,CAAC,GAAI;EAKT,MAAM,CAAC,cAAc,mBAAmB;AAGxC,MAAI,gBAAgB,gBAAgB;AAClC,YAAS,aAAa;AACtB;;AAGF,MAAI,mBAAmB,gBAAgB;AAErC,OAAI,kBAAkB,6BACpB,gCAA+B;AAEjC,YAAS,aAAa;;;AAM5B,QAAO,CAAC,QAAQ,WAAW,aAAa,QAAQ,+BAA+B,EAAE;;;;;;;;;;;;;;;;;;;;ACxCnF,SAAgB,mBACd,OACA,OAGA,KACc;CACd,MAAM,CAAC,aAAa,WAAW,kBAAkB,MAAM;CAGvD,MAAM,CAAC,WAAW,gCAAgC,gBAAgB,OAAO,MAAM;AAC/E,KAAI,cAAc,aAAa,QAAS,QAAO,aAAa;CAC5D,MAAM,cAAc,iBAAiB;AACrC,KACE,cAAc,aAAa,SAC3B,iBAAiB,KACjB,MAAM,+BAA+B,YAKrC,QAAO,aAAa;AAItB,KAAI,MAAM,UACR,QAAO,aAAa;AAEtB,KAAI,iBAAiB,KAAK,MAAM,eAC9B,QAAO,aAAa;AAGtB,QAAO,aAAa;;;;;;;;;;;;;;;;;AAqBtB,MAAa,WACX,OACA,OACA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;;;;;;AAiBhE,MAAa,WACX,OACA,OAGA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;;;;;;AAkBhE,MAAa,aACX,OACA,OAGA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;AChIhE,SAAgB,WACd,OAMA,mBAA2B,oBACqD;AAChF,KAAI,CAAC,MAAM,WACT,OAAM,aAAa,MAAM,MAAM,SAAS;CAG1C,IAAI,YAAY;CAChB,IAAI,eAAe;CACnB,IAAI,aAAa;AAEjB,MAAK,IAAI,IAAI,GAAG,IAAI,kBAAkB,KAAK;EACzC,MAAM,OAAO,MAAM,WAAW,MAAM;AAEpC,MAAI,KAAK,MAAM;AACb,SAAM,aAAa,MAAM,MAAM,SAAS;AACxC;;AAGF,eAAa;EACb,MAAM,CAAC,KAAK,SAAS,KAAK;EAE1B,MAAM,MAAM,KAAK,KAAK;EAEtB,MAAM,SAAS,mBAAmB,OAAO,OAAO,IAAI;AACpD,MAAI,UAAU,OAAO,QAAQ,IAAI,EAAE;AACjC,aAAU,OAAO,KAAK,cAAc,QAAQ;AAC5C,mBAAgB;aACP,QAAQ,OAAO,QAAQ,IAAI,EAAE;AACtC,iBAAc;AAEd,OAAI,MAAM,kBACR,WAAU,OAAO,KAAK,cAAc,MAAM;;;CAKhD,MAAM,oBAAoB,MAAM,oBAAoB,aAAa;AACjE,QAAO;EACL;EACA;EACA;EACA,OAAO,YAAY,KAAK,eAAe,qBAAqB,YAAY;EACzE;;;;;;;;;;;;;;;;;ACzCH,SAAgB,gCACd,uBAA+B,2BACvB;CACR,MAAM,6BAA6B,iCAAiC;CAEpE,MAAM,sBAAsB,YAAY;EACtC,OAAO,UAAU,OAAO,eAAe;EAEvC,WAAW;EACX,SAAS;EAET,SAAS;EACT,OAAO;EACR,CAAC;AAKF,QAAO,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,oBAAoB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACatD,SAAgB,qBAA6B;CAC3C,IAAI,mBAAmB;AAEvB,MAAK,MAAM,aAAa,iBAAiB;AACvC,MAAI,UAAU,MAAM,QAAQ,GAAG;AAE7B,aAAU,eAAe;AACzB;;EAKF,IAAI,eAAe;AACnB,MAAI,UAAU,gBAAgB,sBAC5B,gBAAe,UAAU;EAGT;GAEhB,MAAM,yBAAyB,gCAC7B,UAAU,sBACX;AAED,OAAI,gBAAgB,wBAAwB;AAG1C,cAAU,eAAe;AACzB;;;AAKJ,YAAU,eAAe,UAAU,MAAM,OAAO;AAChD,sBAAoB,UAAU;;AAGhC,QAAO;;;;;;;;;;ACnET,MAAa,QAAQ,OACnB,OAGA,YAA4B,EAAE,KACZ;CAClB,MAAM,EACJ,WAAW,iBACX,UAAU,gBACV,MAAM,KAAK,KAAK,EAChB,aAAa,UACX;CACJ,MAAM,YAAY;CAElB,IAAI,kBAAkB;CACtB,IAAI,oBAAoB;AACxB,KAAoB,SAClB,EAAC,CAAE,iBAAiB,qBAAsB,4BAA4B,EAAE,SAAS,UAAU,CAAC;CAG9F,MAAM,mBAAmB,oBAAoB;CAC7C,MAAM,uBAAmC,EAAE;CAK3C,MAAM,kBACJ,oBAAoB,IAAI,qBAAqB,gBAAgB,SAAS;CAExE,IAAI,aAAa;AACjB,QAAO,MAAM;AACX,gBAAc;EAEd,MAAM,kBAAkB,uBAAuB;GAAE;GAAY;GAAkB,CAAC;AAChF,MAAI,CAAC,gBAEH;EAGF,MAAM,EAAE,UAAU,WAAW,iBAAiB,gBAAgB;AAE9D,GAAC,qBAAqB,gBAAgB,yBAAyB,EAAE,EAAE,KAAK,MAAM;AAE9E,MAAI,KAAK,KAAK,GAAG,YAAY,kBAC3B;AAGF,QAAM,SAAS;;AAGjB,0BAAyB,qBAAqB;AAG9C,KAAI,CAAC,WACH,gBAAe,KAAK,MAAM,OAAO,UAAU,EAAE,gBAAgB;;AAKjE,MAAM,mBAAiC,IAAI,OAAO;CAChD,MAAM,IAAI,WAAW,IAAI,GAAG;AAC5B,KAAI,OAAO,EAAE,UAAU,WAAY,GAAE,OAAO;;AAE9C,MAAa,uBAAoC,IAAI,SAAQ,YAAW,aAAa,QAAQ,CAAC;;;;ACxE9F,IAAI,iBAAiB;AACrB,MAAM,6BAA6B;AACnC,MAAa,kBAAgC,EAAE;AAU/C,IAAI,sBAAsB;;;;;;AAO1B,MAAa,eAAe,UAAwB,EAAE,KAAiB;CACrE,MAAM,EACJ,UACA,UACA,aAAa,aACb,UAAU,kBACV,gBAAgB,yBAChB,wBAAwB,2BACxB,qBAAqB,sBACrB,kBAAkB,OAClB,oBAAoB,OACpB,kBAAkB,SAChB;AAEJ;AAIA,KAAI,iBAAiB,2BAGnB,SAAQ,KACN,gCAAgC,eAAe,kNAChD;CAGH,MAAM,QAAoB;EACxB,uBAAO,IAAI,KAAK;EAChB,YAAY;EACZ,IAAI,OAAO;AACT,UAAO,MAAM,MAAM;;EAErB;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,qBAAqB;EACrB,eAAe;EACf,cAAc;EACd,uBAAO,IAAI,KAAK;EACjB;AAED,OAAM,sBAAsB,gBAAgB,KAAK,MAAM,GAAG;AAG1D,KAAI,iBAAiB;AACnB,MAAI,oBAAqB,QAAO;AAChC,wBAAsB;AACtB,EAAK,MAAM,MAAM;;AAGnB,eAAc;AAEd,QAAO;;;;;;;;;;;;AC7ET,MAAa,OAAO,OAAmB,KAAa,MAAc,KAAK,KAAK,KAAc;CACxF,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAElC,KAAI,CAAC,MAAO,QAAO;CAEnB,MAAM,SAAS,mBAAmB,OAAO,OAAO,IAAI;AAEpD,KAAI,QAAQ,OAAO,QAAQ,IAAI,CAAE,QAAO,MAAM;AAE9C,KAAI,QAAQ,OAAO,QAAQ,IAAI,EAAE;AAC/B,MAAI,MAAM,gBACR,WAAU,OAAO,KAAK,cAAc,MAAM;AAE5C,SAAO,MAAM;;AAIf,WAAU,OAAO,KAAK,cAAc,QAAQ;;;;;;;;;;;;AClB9C,MAAa,OAAO,OAAmB,KAAa,MAAc,KAAK,KAAK,KAAc;AACxF,QAAO,IAAI,OAAO,KAAK,IAAI,KAAK;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACelC,SAAgB,cACd,OACA,MACA,UAAgC,EAAE,EAGlC,OAAe,KAAK,KAAK,EACnB;CACN,MAAM,UAAU,MAAM,QAAQ,KAAK,GAAG,OAAO,CAAC,KAAK;CACnD,MAAM,UAAU,QAAQ,WAAW;AAEnC,MAAK,MAAM,OAAO,SAAS;EACzB,MAAM,aAAa,MAAM,MAAM,IAAI,IAAI;AAEvC,MAAI,WAGF,KAAI,QACF,YAAW,KAAK;MAEhB,YAAW,KAAK;MAMlB,OAAM,MAAM,IAAI,KAAK,CAAC,UAAU,IAAI,MAAM,UAAU,OAAO,EAAE,CAAC;;;;;;;;;;;;;;;;;;;;;;;ACjCpE,MAAa,eACX,OACA,OAGA,MAAc,KAAK,KAAK,KACZ;CACZ,MAAM,EAAE,KAAK,OAAO,KAAK,UAAU,aAAa,kBAAkB,SAAS;AAE3E,KAAI,UAAU,OAAW,QAAO;AAChC,KAAI,OAAO,KAAM,OAAM,IAAI,MAAM,eAAe;AAChD,KAAI,MAAM,QAAQ,MAAM,WAAW,CAAC,MAAM,MAAM,IAAI,IAAI,CAEtD,QAAO;AAET,KAEE,UAAU,OAAO,MAAM,OACvB,UAAU,OAAO,MAAM,OAAO,MAAM,gBAAgB,OAAO,QAC3D,CAAC,MAAM,MAAM,IAAI,IAAI,CAGrB,QAAO;CAGT,MAAM,MAAM,YAAY,MAAM;CAC9B,MAAM,cAAc,oBAAoB,MAAM;CAE9C,MAAM,YAAY,MAAM,IAAI,MAAM,MAAM;CACxC,MAAM,QAAoB;EACxB;GACE;GACA;GACA,cAAc,IAAI,YAAY,cAAc;GAC7C;EACD;EACA,OAAO,SAAS,WAAW,CAAC,KAAK,GAAG,MAAM,QAAQ,KAAK,GAAG,OAAO;EAClE;AAED,OAAM,MAAM,IAAI,KAAK,MAAM;AAC3B,QAAO;;;;;;;;;;;;;;;;;;ACpCT,IAAa,gBAAb,MAA2B;CACzB,AAAQ;;;;;;;;;;;;;;;;;CAkBR,YAAY,SAAwB;AAClC,OAAK,QAAQ,YAAY,QAAQ;;;;;;;;;;;;;;;;;CAkBnC,IAAI,OAAe;AACjB,SAAO,KAAK,MAAM;;;;;;;;;;;;;;;;;;;;;;;CAwBpB,IAAiB,KAA4B;AAC3C,SAAO,IAAI,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAuC7B,IACE,KACA,OACA,SAKS;AACT,SAAO,YAAY,KAAK,OAAO;GAC7B;GACA;GACA,KAAK,SAAS;GACd,aAAa,SAAS;GACtB,MAAM,SAAS;GAChB,CAAC;;;;;;;;;;;;;;;;;;;CAoBJ,OAAO,KAAsB;AAC3B,SAAO,UAAU,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;;;;;;;CAyBnC,IAAI,KAAsB;AACxB,SAAO,IAAI,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;CAmB7B,QAAc;AAEZ,QAAM,KAAK,MAAM;;;;;;;;;;;;;;;;;;;;;;;;CAyBnB,cAAc,MAAyB,SAAsC;AAC3E,gBAAc,KAAK,OAAO,MAAM,WAAW,EAAE,CAAC"}