@neezco/cache 0.2.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"index.cjs","names":["performance"],"sources":["../../src/cache/clear.ts","../../src/defaults.ts","../../src/utils/get-process-memory-limit.ts","../../src/utils/process-monitor.ts","../../src/utils/start-monitor.ts","../../src/sweep/batchUpdateExpiredRatio.ts","../../src/utils/interpolate.ts","../../src/sweep/calculate-optimal-sweep-params.ts","../../src/sweep/select-instance-to-sweep.ts","../../src/cache/delete.ts","../../src/types.ts","../../src/utils/status-from-tags.ts","../../src/cache/validators.ts","../../src/sweep/sweep-once.ts","../../src/sweep/calculate-optimal-max-expired-ratio.ts","../../src/sweep/update-weight.ts","../../src/sweep/sweep.ts","../../src/cache/create-cache.ts","../../src/cache/get.ts","../../src/cache/has.ts","../../src/cache/invalidate-tag.ts","../../src/cache/set.ts","../../src/index.ts"],"sourcesContent":["import type { CacheState } from \"../types\";\n\n/**\n * Clears all entries from the cache without invoking callbacks.\n *\n * @note The `onDelete` callback is NOT invoked during a clear operation.\n * This is intentional to avoid unnecessary overhead when bulk-removing entries.\n *\n * @param state - The cache state.\n * @returns void\n */\nexport const clear = (state: CacheState): void => {\n state.store.clear();\n};\n","// Time Unit Constants\n// Base temporal units used throughout the caching system.\nconst ONE_SECOND: number = 1000;\nconst ONE_MINUTE: number = 60 * ONE_SECOND;\n\n/**\n * ===================================================================\n * Cache Entry Lifecycle\n * Default TTL and stale window settings for short-lived cache entries.\n * ===================================================================\n */\n\n/**\n * Default Time-To-Live in milliseconds for cache entries.\n * @default 1_800_000 (30 minutes)\n */\nexport const DEFAULT_TTL: number = 30 * ONE_MINUTE;\n\n/**\n * Default stale window in milliseconds after expiration.\n * Allows serving slightly outdated data while fetching fresh data.\n */\nexport const DEFAULT_STALE_WINDOW: number = 0 as const;\n\n/**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, new entries are ignored.\n */\nexport const DEFAULT_MAX_SIZE: number = Infinity;\n\n/**\n * Default maximum memory size in MB the cache can use.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\nexport const DEFAULT_MAX_MEMORY_SIZE: number = Infinity;\n\n/**\n * ===================================================================\n * Sweep & Cleanup Operations\n * Parameters controlling how and when expired entries are removed.\n * ===================================================================\n */\n\n/**\n * Maximum number of keys to process in a single sweep batch.\n * Higher values = more aggressive cleanup, lower latency overhead.\n */\nexport const MAX_KEYS_PER_BATCH: number = 1000;\n\n/**\n * Minimal expired ratio enforced during sweeps.\n * Ensures control sweeps run above {@link EXPIRED_RATIO_MEMORY_THRESHOLD}.\n */\nexport const MINIMAL_EXPIRED_RATIO: number = 0.05;\n\n/**\n * Memory usage threshold (normalized 0–1) triggering control sweeps.\n * At or above this level, sweeping becomes more aggressive.\n */\nexport const EXPIRED_RATIO_MEMORY_THRESHOLD: number = 0.8;\n\n/**\n * Maximum allowed expired ratio when memory usage is low.\n * Upper bound for interpolation with MINIMAL_EXPIRED_RATIO.\n * Recommended range: `0.3 – 0.5` .\n */\nexport const DEFAULT_MAX_EXPIRED_RATIO: number = 0.4;\n\n/**\n * ===================================================================\n * Sweep Intervals & Timing\n * Frequency and time budgets for cleanup operations.\n * ===================================================================\n */\n\n/**\n * Optimal interval in milliseconds between sweeps.\n * Used when system load is minimal and metrics are available.\n */\nexport const OPTIMAL_SWEEP_INTERVAL: number = 2 * ONE_SECOND;\n\n/**\n * Worst-case interval in milliseconds between sweeps.\n * Used when system load is high or metrics unavailable.\n */\nexport const WORST_SWEEP_INTERVAL: number = 200;\n\n/**\n * Maximum time budget in milliseconds for sweep operations.\n * Prevents sweeping from consuming excessive CPU during high load.\n */\nexport const WORST_SWEEP_TIME_BUDGET: number = 40;\n\n/**\n * Optimal time budget in milliseconds for each sweep cycle.\n * Used when performance metrics are not available or unreliable.\n */\nexport const OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE: number = 15;\n\n/**\n * ===================================================================\n * Memory Management\n * Process limits and memory-safe thresholds.\n * ===================================================================\n */\n\n/**\n * Default maximum process memory limit in megabytes.\n * Acts as fallback when environment detection is unavailable.\n * NOTE: Overridable via environment detection at runtime.\n */\nexport const DEFAULT_MAX_PROCESS_MEMORY_MB: number = 1024;\n\n/**\n * ===================================================================\n * System Utilization Weights\n * Balance how memory, CPU, and event-loop pressure influence sweep behavior.\n * Sum of all weights: 10 + 8.5 + 6.5 = 25\n * ===================================================================\n */\n\n/**\n * Weight applied to memory utilization in sweep calculations.\n * Higher weight = memory pressure has more influence on sweep aggressiveness.\n */\nexport const DEFAULT_MEMORY_WEIGHT: number = 10;\n\n/**\n * Weight applied to CPU utilization in sweep calculations.\n * Combined with event-loop weight to balance CPU-related pressure.\n */\nexport const DEFAULT_CPU_WEIGHT: number = 8.5;\n\n/**\n * Weight applied to event-loop utilization in sweep calculations.\n * Complements CPU weight to assess overall processing capacity.\n */\nexport const DEFAULT_LOOP_WEIGHT: number = 6.5;\n","import fs from \"fs\";\nimport v8 from \"v8\";\n\n/**\n * Reads a number from a file.\n * @param path File path to read the number from.\n * @returns The number read from the file, or null if reading fails.\n */\nfunction readNumber(path: string): number | null {\n try {\n const raw = fs.readFileSync(path, \"utf8\").trim();\n const n = Number(raw);\n return Number.isFinite(n) ? n : null;\n } catch {\n return null;\n }\n}\n\n/**\n * Gets the memory limit imposed by cgroups, if any.\n * @return The memory limit in bytes, or null if no limit is found.\n */\nfunction getCgroupLimit(): number | null {\n // cgroup v2\n const v2 = readNumber(\"/sys/fs/cgroup/memory.max\");\n if (v2 !== null) return v2;\n\n // cgroup v1\n const v1 = readNumber(\"/sys/fs/cgroup/memory/memory.limit_in_bytes\");\n if (v1 !== null) return v1;\n\n return null;\n}\n\n/**\n * Gets the effective memory limit for the current process, considering both V8 heap limits and cgroup limits.\n * @returns The effective memory limit in bytes.\n */\nexport function getProcessMemoryLimit(): number {\n const heapLimit = v8.getHeapStatistics().heap_size_limit;\n const cgroupLimit = getCgroupLimit();\n\n if (cgroupLimit && cgroupLimit > 0 && cgroupLimit < Infinity) {\n return Math.min(heapLimit, cgroupLimit);\n }\n\n return heapLimit;\n}\n","import { performance, type EventLoopUtilization } from \"perf_hooks\";\n\n/**\n * Creates a performance monitor that periodically samples memory usage,\n * CPU usage, and event loop utilization for the current Node.js process.\n *\n * The monitor runs on a configurable interval and optionally invokes a\n * callback with the collected metrics on each cycle. It also exposes\n * methods to start and stop monitoring, retrieve the latest metrics,\n * and update configuration dynamically.\n *\n * @param options Configuration options for the monitor, including sampling\n * interval, maximum thresholds for normalization, and an optional callback.\n * @returns An API object that allows controlling the monitor lifecycle.\n */\nexport function createMonitorObserver(\n options?: Partial<CreateMonitorObserverOptions>,\n): ReturnCreateMonitor {\n let intervalId: NodeJS.Timeout | null = null;\n\n let lastMetrics: PerformanceMetrics | null = null;\n\n let prevHrtime = process.hrtime.bigint();\n\n let prevMem = process.memoryUsage();\n let prevCpu = process.cpuUsage();\n let prevLoop = performance.eventLoopUtilization();\n let lastCollectedAt = Date.now();\n\n const config = {\n interval: options?.interval ?? 500,\n // options.maxMemory is expected in MB; store bytes internally\n maxMemory: (options?.maxMemory ?? 512) * 1024 * 1024,\n };\n\n function start(): void {\n if (intervalId) return; // already running\n\n intervalId = setInterval(() => {\n try {\n const now = Date.now();\n\n const metrics = collectMetrics({\n prevCpu,\n prevHrtime,\n prevMem,\n prevLoop,\n maxMemory: config.maxMemory,\n collectedAtMs: now,\n previousCollectedAtMs: lastCollectedAt,\n interval: config.interval,\n });\n\n lastMetrics = metrics;\n options?.callback?.(metrics);\n\n prevCpu = metrics.cpu.total;\n prevLoop = metrics.loop.total;\n prevMem = metrics.memory.total;\n\n prevHrtime = process.hrtime.bigint();\n lastCollectedAt = now;\n } catch (e: unknown) {\n stop();\n throw new Error(\"MonitorObserver: Not available\", { cause: e });\n }\n }, config.interval);\n\n if (typeof intervalId.unref === \"function\") {\n intervalId.unref();\n }\n }\n\n function stop(): void {\n if (intervalId) {\n clearInterval(intervalId);\n intervalId = null;\n }\n }\n\n function getMetrics(): PerformanceMetrics | null {\n if (lastMetrics) {\n return lastMetrics;\n }\n return null;\n }\n\n function updateConfig(newConfig: Partial<CreateMonitorObserverOptions>): void {\n if (newConfig.maxMemory !== undefined) {\n // convert MB -> bytes\n config.maxMemory = newConfig.maxMemory * 1024 * 1024;\n }\n\n if (newConfig.interval !== undefined) {\n config.interval = newConfig.interval;\n\n // restart if active to apply new interval\n if (intervalId) {\n stop();\n start();\n }\n }\n }\n\n return {\n start,\n stop,\n getMetrics,\n updateConfig,\n };\n}\n\n/**\n * Collects and normalizes performance metrics for the current process,\n * including memory usage, CPU usage, and event loop utilization.\n *\n * CPU and event loop metrics are computed as deltas relative to previously\n * recorded values. All metrics are normalized into a utilization between 0 and 1\n * based on the configured maximum thresholds.\n *\n * @param props Previous metric snapshots and normalization limits.\n * @returns A structured object containing normalized performance metrics.\n */\nexport function collectMetrics(props: {\n prevMem: NodeJS.MemoryUsage;\n prevCpu: NodeJS.CpuUsage;\n prevHrtime: bigint;\n prevLoop: EventLoopUtilization;\n maxMemory: number; // bytes\n collectedAtMs: number;\n previousCollectedAtMs: number;\n interval: number;\n}): PerformanceMetrics {\n const nowHrtime = process.hrtime.bigint();\n\n const elapsedNs = Number(nowHrtime - props.prevHrtime);\n const elapsedMs = elapsedNs / 1e6;\n const actualElapsed = props.collectedAtMs - props.previousCollectedAtMs;\n\n const mem = process.memoryUsage();\n const deltaMem: NodeJS.MemoryUsage = {\n rss: mem.rss - props.prevMem.rss,\n heapTotal: mem.heapTotal - props.prevMem.heapTotal,\n heapUsed: mem.heapUsed - props.prevMem.heapUsed,\n external: mem.external - props.prevMem.external,\n arrayBuffers: mem.arrayBuffers - props.prevMem.arrayBuffers,\n };\n const memRatio = Math.min(1, mem.rss / props.maxMemory);\n\n const cpuDelta = process.cpuUsage(props.prevCpu);\n const cpuMs = (cpuDelta.system + cpuDelta.user) / 1e3;\n const cpuRatio = cpuMs / elapsedMs;\n\n const loop = performance.eventLoopUtilization(props.prevLoop);\n\n return {\n cpu: {\n // deltaMs: cpuMs, // remove to avoid confusion with different unit type\n utilization: cpuRatio,\n delta: cpuDelta,\n total: process.cpuUsage(),\n },\n\n loop: {\n utilization: loop.utilization,\n delta: loop,\n total: performance.eventLoopUtilization(),\n },\n\n memory: {\n utilization: memRatio,\n delta: deltaMem,\n total: mem,\n },\n\n collectedAt: props.collectedAtMs,\n previousCollectedAt: props.previousCollectedAtMs,\n interval: props.interval,\n actualElapsed,\n };\n}\n\n// -----------------------------------------------------------------\n\n/**\n * Represents a metric extended with a normalized utilization between 0 and 1.\n *\n * The utilization indicates how close the metric is to its configured maximum\n * threshold, where 0 means minimal usage and 1 means the limit has been reached.\n *\n * @typeParam T The underlying metric type being normalized.\n */\nexport type NormalizedMetric<T> = T & {\n /** Normalized value between 0 and 1 */\n utilization: number;\n};\n\n/**\n * PerformanceMetrics describes the actual shape returned by collectMetrics.\n * All metric groups include raw `delta` and `total` objects plus a normalized utilization.\n */\nexport interface PerformanceMetrics {\n memory: NormalizedMetric<{\n delta: NodeJS.MemoryUsage;\n total: NodeJS.MemoryUsage;\n }>;\n\n cpu: NormalizedMetric<{\n delta: NodeJS.CpuUsage;\n total: NodeJS.CpuUsage;\n }>;\n\n loop: NormalizedMetric<{\n delta: EventLoopUtilization;\n total: EventLoopUtilization;\n }>;\n\n /** Timestamp in milliseconds when this metric was collected */\n collectedAt: number;\n\n /** Timestamp in milliseconds of the previous metric collection */\n previousCollectedAt: number;\n\n /** Interval in milliseconds at which the monitor is running */\n interval: number;\n\n /** Actual elapsed time in milliseconds since the last collection */\n actualElapsed: number;\n}\n\n/**\n * Options for createMonitorObserver.\n */\nexport interface CreateMonitorObserverOptions {\n /** Interval between samples in ms. Default: 500 */\n interval?: number;\n\n /** Maximum RSS memory in megabytes (MB) used for normalization. */\n maxMemory?: number;\n\n /** Optional callback invoked on each metrics sample. */\n callback?: (metrics: PerformanceMetrics) => void;\n}\n\n/**\n * Public API returned by `createMonitorObserver`.\n *\n * Provides methods to start and stop monitoring, retrieve the latest metrics,\n * and update the monitor configuration at runtime.\n */\nexport interface ReturnCreateMonitor {\n /** Stops the monitoring interval */\n stop: () => void;\n\n /** Starts the monitoring interval */\n start: () => void;\n\n /** Returns the last collected metrics or null if none have been collected yet */\n getMetrics: () => PerformanceMetrics | null;\n\n /** Allows updating the monitor configuration on the fly */\n updateConfig: (newConfig: Partial<CreateMonitorObserverOptions>) => void;\n}\n","import { DEFAULT_MAX_PROCESS_MEMORY_MB, WORST_SWEEP_INTERVAL } from \"../defaults\";\n\nimport { getProcessMemoryLimit } from \"./get-process-memory-limit\";\nimport {\n createMonitorObserver,\n type PerformanceMetrics,\n type ReturnCreateMonitor,\n} from \"./process-monitor\";\n\nlet _monitorInstance: ReturnCreateMonitor | null = null;\n\n/** Latest collected metrics from the monitor */\nexport let _metrics: PerformanceMetrics | null;\n\n/** Maximum memory limit for the monitor (in MB) */\nexport let maxMemoryLimit: number = DEFAULT_MAX_PROCESS_MEMORY_MB;\n\n/** Use 90% of the effective limit */\nexport const SAFE_MEMORY_LIMIT_RATIO = 0.9;\n\nexport function startMonitor(): void {\n if (__BROWSER__) {\n // Ignore monitor in browser environments\n return;\n }\n\n if (!_monitorInstance) {\n try {\n const processMemoryLimit = getProcessMemoryLimit();\n\n if (processMemoryLimit && processMemoryLimit > 0) {\n maxMemoryLimit = (processMemoryLimit / 1024 / 1024) * SAFE_MEMORY_LIMIT_RATIO;\n }\n } catch {\n // TODO: proper logger\n // Ignore errors and use default\n // console.log(\"error getProcessMemoryLimit:\", e);\n }\n\n _monitorInstance = createMonitorObserver({\n callback(metrics) {\n _metrics = metrics;\n },\n interval: WORST_SWEEP_INTERVAL,\n maxMemory: maxMemoryLimit, // 1 GB\n });\n\n _monitorInstance.start();\n }\n}\n","import { _instancesCache } from \"../cache/create-cache\";\n\n/**\n * Updates the expired ratio for each cache instance based on the collected ratios.\n * @param currentExpiredRatios - An array of arrays containing expired ratios for each cache instance.\n * @internal\n */\nexport function _batchUpdateExpiredRatio(currentExpiredRatios: number[][]): void {\n for (const inst of _instancesCache) {\n const ratios = currentExpiredRatios[inst._instanceIndexState];\n if (ratios && ratios.length > 0) {\n const avgRatio = ratios.reduce((sum, val) => sum + val, 0) / ratios.length;\n\n const alpha = 0.6; // NOTE: this must be alway higher than 0.5 to prioritize recent avgRatio\n inst._expiredRatio = inst._expiredRatio * (1 - alpha) + avgRatio * alpha;\n }\n }\n}\n","/**\n * Interpolates a value between two numeric ranges.\n *\n * Maps `value` from [fromStart, fromEnd] to [toStart, toEnd].\n * Works with inverted ranges, negative values, and any numeric input.\n */\nexport function interpolate({\n value,\n fromStart,\n fromEnd,\n toStart,\n toEnd,\n}: {\n value: number;\n fromStart: number;\n fromEnd: number;\n toStart: number;\n toEnd: number;\n}): number {\n // Explicit and predictable: avoid division by zero.\n if (fromStart === fromEnd) return toStart;\n\n const t = (value - fromStart) / (fromEnd - fromStart);\n return toStart + t * (toEnd - toStart);\n}\n","import {\n DEFAULT_CPU_WEIGHT,\n DEFAULT_LOOP_WEIGHT,\n DEFAULT_MEMORY_WEIGHT,\n OPTIMAL_SWEEP_INTERVAL,\n WORST_SWEEP_INTERVAL,\n WORST_SWEEP_TIME_BUDGET,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport type { PerformanceMetrics } from \"../utils/process-monitor\";\n\n/**\n * Weights for calculating the weighted utilization ratio.\n * Each weight determines how strongly each metric influences the final ratio.\n */\nexport interface UtilizationWeights {\n /** Weight applied to memory utilization (non-inverted). Default: 1 */\n memory?: number;\n\n /** Weight applied to CPU utilization (inverted). Default: 1 */\n cpu?: number;\n\n /** Weight applied to event loop utilization (inverted). Default: 1 */\n loop?: number;\n}\n\n/**\n * Represents the calculated optimal sweep parameters based on system metrics.\n */\nexport interface OptimalSweepParams {\n /** The optimal interval in milliseconds between sweep operations. */\n sweepIntervalMs: number;\n\n /** The optimal maximum time budget in milliseconds for a sweep cycle. */\n sweepTimeBudgetMs: number;\n}\n\n/**\n * Options for customizing the sweep parameter calculation.\n */\ninterface CalculateOptimalSweepParamsOptions {\n /** System performance metrics to base the calculations on. */\n metrics: PerformanceMetrics;\n\n /** Optional custom weights for each utilization metric. */\n weights?: UtilizationWeights;\n\n /** Interval (ms) used when system load is minimal. */\n optimalSweepIntervalMs?: number;\n\n /** Interval (ms) used when system load is maximal. */\n worstSweepIntervalMs?: number;\n\n /** Maximum sweep time budget (ms) under worst-case load. */\n worstSweepTimeBudgetMs?: number;\n}\n\n/**\n * Calculates adaptive sweep parameters based on real-time system utilization.\n *\n * Memory utilization is used as-is: higher memory usage → more conservative sweeps.\n * CPU and event loop utilization are inverted: lower usage → more conservative sweeps.\n *\n * This inversion ensures:\n * - When CPU and loop are *free*, sweeping becomes more aggressive (worst-case behavior).\n * - When CPU and loop are *busy*, sweeping becomes more conservative (optimal behavior).\n *\n * The final ratio is a weighted average of the three metrics, clamped to [0, 1].\n * This ratio is then used to interpolate between optimal and worst-case sweep settings.\n *\n * @param options - Optional configuration for weights and sweep bounds.\n * @returns Interpolated sweep interval, time budget, and the ratio used.\n */\nexport const calculateOptimalSweepParams = (\n options: CalculateOptimalSweepParamsOptions,\n): OptimalSweepParams => {\n const {\n metrics,\n weights = {},\n optimalSweepIntervalMs = OPTIMAL_SWEEP_INTERVAL,\n worstSweepIntervalMs = WORST_SWEEP_INTERVAL,\n worstSweepTimeBudgetMs = WORST_SWEEP_TIME_BUDGET,\n } = options;\n\n // Resolve metric weights (default = 1)\n const memoryWeight = weights.memory ?? DEFAULT_MEMORY_WEIGHT;\n const cpuWeight = weights.cpu ?? DEFAULT_CPU_WEIGHT;\n const loopWeight = weights.loop ?? DEFAULT_LOOP_WEIGHT;\n\n // Memory utilization is used directly (0–1)\n const memoryUtilization = metrics?.memory.utilization ?? 0;\n\n // Raw CPU and loop utilization (0–1)\n const cpuUtilizationRaw = metrics?.cpu.utilization ?? 0;\n const loopUtilizationRaw = metrics?.loop.utilization ?? 0;\n\n // Invert CPU and loop utilization:\n // - Low CPU/loop usage → high inverted value → pushes toward worst-case behavior\n // - High CPU/loop usage → low inverted value → pushes toward optimal behavior\n const cpuUtilization = 1 - cpuUtilizationRaw;\n const loopUtilization = 1 - loopUtilizationRaw;\n\n // Weighted average of all metrics\n const weightedSum =\n memoryUtilization * memoryWeight + cpuUtilization * cpuWeight + loopUtilization * loopWeight;\n\n const totalWeight = memoryWeight + cpuWeight + loopWeight;\n\n // Final utilization ratio clamped to [0, 1]\n const ratio = Math.min(1, Math.max(0, weightedSum / totalWeight));\n\n // Interpolate sweep interval based on the ratio\n const sweepIntervalMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: optimalSweepIntervalMs,\n toEnd: worstSweepIntervalMs,\n });\n\n // Interpolate sweep time budget based on the ratio\n const sweepTimeBudgetMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: 0,\n toEnd: worstSweepTimeBudgetMs,\n });\n\n return {\n sweepIntervalMs,\n sweepTimeBudgetMs,\n };\n};\n","import { _instancesCache } from \"../cache/create-cache\";\nimport type { CacheState } from \"../types\";\n\n/**\n * Selects a cache instance to sweep based on sweep weights or round‑robin order.\n *\n * Two selection modes are supported:\n * - **Round‑robin mode**: If `totalSweepWeight` ≤ 0, instances are selected\n * deterministically in sequence using `batchSweep`. Once all instances\n * have been processed, returns `null`.\n * - **Weighted mode**: If sweep weights are available, performs a probabilistic\n * selection. Each instance’s `_sweepWeight` contributes proportionally to its\n * chance of being chosen.\n *\n * This function depends on `_updateWeightSweep` to maintain accurate sweep weights.\n *\n * @param totalSweepWeight - Sum of all sweep weights across instances.\n * @param batchSweep - Current batch index used for round‑robin selection.\n * @returns The selected `CacheState` instance, `null` if no instance remains,\n * or `undefined` if the cache is empty.\n */\nexport function _selectInstanceToSweep({\n totalSweepWeight,\n batchSweep,\n}: {\n totalSweepWeight: number;\n batchSweep: number;\n}): CacheState | null | undefined {\n // Default selection: initialize with the first instance in the cache list.\n // This acts as a fallback in case no weighted selection occurs.\n let instanceToSweep: CacheState | null | undefined = _instancesCache[0];\n\n if (totalSweepWeight <= 0) {\n // Case 1: No sweep weight assigned (all instances skipped or empty).\n // → Perform a deterministic round‑robin minimal sweep across all instances.\n // Each batch iteration selects the next instance in order.\n if (batchSweep > _instancesCache.length) {\n // If all instances have been processed in this cycle, no instance to sweep.\n instanceToSweep = null;\n }\n instanceToSweep = _instancesCache[batchSweep - 1] as CacheState;\n } else {\n // Case 2: Sweep weights are available.\n // → Perform a probabilistic selection based on relative sweep weights.\n // A random threshold is drawn in [0, totalSweepWeight].\n let threshold = Math.random() * totalSweepWeight;\n\n // Iterate through instances, subtracting each instance’s weight.\n // The first instance that reduces the threshold to ≤ 0 is selected.\n // This ensures that instances with higher weights have proportionally\n // higher probability of being chosen for sweeping.\n for (const inst of _instancesCache) {\n threshold -= inst._sweepWeight;\n if (threshold <= 0) {\n instanceToSweep = inst;\n break;\n }\n }\n }\n\n return instanceToSweep;\n}\n","import type { CacheState } from \"../types\";\n\nexport const enum DELETE_REASON {\n MANUAL = \"manual\",\n EXPIRED = \"expired\",\n STALE = \"stale\",\n}\n\n/**\n * Deletes a key from the cache.\n * @param state - The cache state.\n * @param key - The key.\n * @returns A boolean indicating whether the key was successfully deleted.\n */\nexport const deleteKey = (\n state: CacheState,\n key: string,\n reason: DELETE_REASON = DELETE_REASON.MANUAL,\n): boolean => {\n const onDelete = state.onDelete;\n const onExpire = state.onExpire;\n\n if (!onDelete && !onExpire) {\n return state.store.delete(key);\n }\n\n const entry = state.store.get(key);\n if (!entry) return false;\n\n state.store.delete(key);\n state.onDelete?.(key, entry[1], reason);\n if (reason !== DELETE_REASON.MANUAL) {\n state.onExpire?.(key, entry[1], reason);\n }\n\n return true;\n};\n","import type { DELETE_REASON } from \"./cache/delete\";\n\n/**\n * Base configuration shared between CacheOptions and CacheState.\n */\nexport interface CacheConfigBase {\n /**\n * Callback invoked when a key expires naturally.\n * @param key - The expired key.\n * @param value - The value associated with the expired key.\n * @param reason - The reason for deletion ('expired', or 'stale').\n */\n onExpire?: (\n key: string,\n value: unknown,\n reason: Exclude<DELETE_REASON, DELETE_REASON.MANUAL>,\n ) => void;\n\n /**\n * Callback invoked when a key is deleted, either manually or due to expiration.\n * @param key - The deleted key.\n * @param value - The value of the deleted key.\n * @param reason - The reason for deletion ('manual', 'expired', or 'stale').\n */\n onDelete?: (key: string, value: unknown, reason: DELETE_REASON) => void;\n\n /**\n * Default TTL (Time-To-Live) in milliseconds for entries without explicit TTL.\n * @default 1_800_000 (30 minutes)\n */\n defaultTtl: number;\n\n /**\n * Default stale window in milliseconds for entries that do not\n * specify their own `staleWindowMs`.\n *\n * This window determines how long an entry may continue to be\n * served as stale after it reaches its expiration time.\n *\n * The window is always relative to the entry’s own expiration\n * moment, regardless of whether that expiration comes from an\n * explicit `ttl` or from the cache’s default TTL.\n * @default null (No stale window)\n */\n defaultStaleWindow: number;\n\n /**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\n maxSize: number;\n\n /**\n * Maximum memory size in MB the cache can use.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\n maxMemorySize: number;\n\n /**\n * Controls how stale entries are handled when read from the cache.\n *\n * - true → stale entries are purged immediately after being returned.\n * - false → stale entries are retained after being returned.\n *\n * @default false\n */\n purgeStaleOnGet: boolean;\n\n /**\n * Controls how stale entries are handled during sweep operations.\n *\n * - true → stale entries are purged during sweeps.\n * - false → stale entries are retained during sweeps.\n *\n * @default false\n */\n purgeStaleOnSweep: boolean;\n\n /**\n * Whether to automatically start the sweep process when the cache is created.\n *\n * - true → sweep starts automatically.\n * - false → sweep does not start automatically, allowing manual control.\n *\n * @internal\n * @default true\n */\n _autoStartSweep: boolean;\n\n /**\n * Allowed expired ratio for the cache instance.\n */\n _maxAllowExpiredRatio: number;\n}\n\n/**\n * Public configuration options for the TTL cache.\n */\nexport type CacheOptions = Partial<CacheConfigBase>;\n\n/**\n * Options for `invalidateTag` operation. Kept intentionally extensible so\n * future flags can be added without breaking callers.\n */\nexport interface InvalidateTagOptions {\n /** If true, mark affected entries as stale instead of fully expired. */\n asStale?: boolean;\n\n // Allow additional option fields for forward-compatibility.\n [key: string]: unknown;\n}\n\n/**\n * Lifecycle timestamps stored in a Tuple:\n * - 0 → createdAt\n * - 1 → expiresAt\n * - 2 → staleExpiresAt\n */\nexport type EntryTimestamp = [\n /** createdAt: Absolute timestamp the entry was created (Date.now()). */\n number,\n\n /** expiresAt: Absolute timestamp when the entry becomes invalid (Date.now() + TTL). */\n number,\n\n /** staleExpiresAt: Absolute timestamp when the entry stops being stale (Date.now() + staleTTL). */\n number,\n];\n\n/**\n * Represents a single cache entry.\n */\nexport type CacheEntry = [\n EntryTimestamp,\n\n /** The stored value. */\n unknown,\n\n (\n /**\n * Optional list of tags associated with this entry.\n * Tags can be used for:\n * - Group invalidation (e.g., clearing all entries with a given tag)\n * - Namespacing or categorization\n * - Tracking dependencies\n *\n * If no tags are associated, this field is `null`.\n */\n string[] | null\n ),\n];\n\n/**\n * Status of a cache entry.\n */\nexport enum ENTRY_STATUS {\n /** The entry is fresh and valid. */\n FRESH = \"fresh\",\n /** The entry is stale but can still be served. */\n STALE = \"stale\",\n /** The entry has expired and is no longer valid. */\n EXPIRED = \"expired\",\n}\n\n/**\n * Internal state of the TTL cache.\n */\nexport interface CacheState extends CacheConfigBase {\n /** Map storing key-value entries. */\n store: Map<string, CacheEntry>;\n\n /** Current size */\n size: number;\n\n /** Iterator for sweeping keys. */\n _sweepIter: MapIterator<[string, CacheEntry]> | null;\n\n /** Index of this instance for sweep all. */\n _instanceIndexState: number;\n\n /** Expire ratio avg for instance */\n _expiredRatio: number;\n\n /** Sweep weight for instance, calculate based on size and _expiredRatio */\n _sweepWeight: number;\n\n /**\n * Tag invalidation state.\n * Each tag stores:\n * - 0 → moment when the tag was marked as expired (0 if never)\n * - 1 → moment when the tag was marked as stale (0 if never)\n *\n * These timestamps define whether a tag affects an entry based on\n * the entry's creation time. */\n _tags: Map<string, [number, number]>;\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\n\n/**\n * Computes the derived status of a cache entry based on its associated tags.\n *\n * Tags may impose stricter expiration or stale rules on the entry. Only tags\n * created at or after the entry's creation timestamp are considered relevant.\n *\n * Resolution rules:\n * - If any applicable tag marks the entry as expired, the status becomes `EXPIRED`.\n * - Otherwise, if any applicable tag marks it as stale, the status becomes `STALE`.\n * - If no tag imposes stricter rules, the entry remains `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry whose status is being evaluated.\n * @returns A tuple containing:\n * - The final {@link ENTRY_STATUS} imposed by tags.\n * - The earliest timestamp at which a tag marked the entry as stale\n * (or 0 if no tag imposed a stale rule).\n */\nexport function _statusFromTags(state: CacheState, entry: CacheEntry): [ENTRY_STATUS, number] {\n const entryCreatedAt = entry[0][0];\n\n // Tracks the earliest point in time when any tag marked this entry as stale.\n // Initialized to Infinity so that comparisons always pick the minimum.\n let earliestTagStaleInvalidation = Infinity;\n\n // Default assumption: entry is fresh unless tags override.\n let status = ENTRY_STATUS.FRESH;\n\n const tags = entry[2];\n if (tags) {\n for (const tag of tags) {\n const ts = state._tags.get(tag);\n if (!ts) continue;\n\n // Each tag provides two timestamps:\n // - tagExpiredAt: when the tag forces expiration\n // - tagStaleSinceAt: when the tag forces stale status\n const [tagExpiredAt, tagStaleSinceAt] = ts;\n\n // A tag can only override if it was created after the entry itself.\n if (tagExpiredAt >= entryCreatedAt) {\n status = ENTRY_STATUS.EXPIRED;\n break; // Expired overrides everything, no need to check further.\n }\n\n if (tagStaleSinceAt >= entryCreatedAt) {\n // Keep track of the earliest stale timestamp across all tags.\n if (tagStaleSinceAt < earliestTagStaleInvalidation) {\n earliestTagStaleInvalidation = tagStaleSinceAt;\n }\n status = ENTRY_STATUS.STALE;\n }\n }\n }\n\n // If no tag imposed stale, return 0 for the timestamp.\n return [status, status === ENTRY_STATUS.STALE ? earliestTagStaleInvalidation : 0];\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\nimport { _statusFromTags } from \"../utils/status-from-tags\";\n\n/**\n * Computes the final derived status of a cache entry by combining:\n *\n * - The entry's own expiration timestamps (TTL and stale TTL).\n * - Any stricter expiration or stale rules imposed by its associated tags.\n *\n * Precedence rules:\n * - `EXPIRED` overrides everything.\n * - `STALE` overrides `FRESH`.\n * - If neither the entry nor its tags impose stricter rules, the entry is `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry being evaluated.\n * @returns The final {@link ENTRY_STATUS} for the entry.\n */\nexport function computeEntryStatus(\n state: CacheState,\n entry: CacheEntry,\n\n /** @internal */\n now: number,\n): ENTRY_STATUS {\n const [__createdAt, expiresAt, staleExpiresAt] = entry[0];\n\n // 1. Status derived from tags\n const [tagStatus, earliestTagStaleInvalidation] = _statusFromTags(state, entry);\n if (tagStatus === ENTRY_STATUS.EXPIRED) return ENTRY_STATUS.EXPIRED;\n const windowStale = staleExpiresAt - expiresAt;\n if (\n tagStatus === ENTRY_STATUS.STALE &&\n staleExpiresAt > 0 &&\n now < earliestTagStaleInvalidation + windowStale\n ) {\n // A tag can mark the entry as stale only if the entry itself supports a stale window.\n // The tag's stale invalidation time is extended by the entry's stale window duration.\n // If \"now\" is still within that extended window, the entry is considered stale.\n return ENTRY_STATUS.STALE;\n }\n\n // 2. Status derived from entry timestamps\n if (now < expiresAt) {\n return ENTRY_STATUS.FRESH;\n }\n if (staleExpiresAt > 0 && now < staleExpiresAt) {\n return ENTRY_STATUS.STALE;\n }\n\n return ENTRY_STATUS.EXPIRED;\n}\n\n// ---------------------------------------------------------------------------\n// Entry status wrappers (semantic helpers built on top of computeEntryStatus)\n// ---------------------------------------------------------------------------\n/**\n * Determines whether a cache entry is fresh.\n *\n * A fresh entry is one whose final derived status is `FRESH`, meaning:\n * - It has not expired according to its own timestamps, and\n * - No associated tag imposes a stricter stale or expired rule.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is fresh.\n */\nexport const isFresh = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's fresh only if that status is FRESH.\n return entry === ENTRY_STATUS.FRESH;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.FRESH;\n};\n/**\n * Determines whether a cache entry is stale.\n *\n * A stale entry is one whose final derived status is `STALE`, meaning:\n * - It has passed its TTL but is still within its stale window, or\n * - A tag imposes a stale rule that applies to this entry.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is stale.\n */\nexport const isStale = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n\n /** @internal */\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's stale only if that status is STALE.\n return entry === ENTRY_STATUS.STALE;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.STALE;\n};\n\n/**\n * Determines whether a cache entry is expired.\n *\n * An expired entry is one whose final derived status is `EXPIRED`, meaning:\n * - It has exceeded both its TTL and stale TTL, or\n * - A tag imposes an expiration rule that applies to this entry.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is expired.\n */\nexport const isExpired = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n\n /** @internal */\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's expired only if that status is EXPIRED.\n return entry === ENTRY_STATUS.EXPIRED;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.EXPIRED;\n};\n\n/**\n * Determines whether a cache entry is valid.\n *\n * A valid entry is one whose final derived status is either:\n * - `FRESH`, or\n * - `STALE` (still within its stale window).\n *\n * Expired entries are considered invalid.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS},\n * or undefined/null if the entry was not found. Passing a pre-computed status avoids\n * recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry, pre-computed status, or undefined/null if not found.\n * @param now - The current timestamp (defaults to {@link Date.now}).\n * @returns True if the entry exists and is fresh or stale.\n */\nexport const isValid = (\n state: CacheState,\n entry?: CacheEntry | ENTRY_STATUS | null,\n\n /** @internal */\n now: number = Date.now(),\n): boolean => {\n if (!entry) return false;\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's valid if it's FRESH or STALE.\n return entry === ENTRY_STATUS.FRESH || entry === ENTRY_STATUS.STALE;\n }\n\n const status = computeEntryStatus(state, entry, now);\n return status === ENTRY_STATUS.FRESH || status === ENTRY_STATUS.STALE;\n};\n","import { DELETE_REASON, deleteKey } from \"../cache/delete\";\nimport { computeEntryStatus, isExpired, isStale } from \"../cache/validators\";\nimport { MAX_KEYS_PER_BATCH } from \"../defaults\";\nimport { type CacheState } from \"../types\";\n\n/**\n * Performs a single sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n * @param _maxKeysPerBatch - Maximum number of keys to process in this sweep.\n * @returns An object containing statistics about the sweep operation.\n */\nexport function _sweepOnce(\n state: CacheState,\n\n /**\n * Maximum number of keys to process in this sweep.\n * @default 1000\n */\n _maxKeysPerBatch: number = MAX_KEYS_PER_BATCH,\n): { processed: number; expiredCount: number; staleCount: number; ratio: number } {\n if (!state._sweepIter) {\n state._sweepIter = state.store.entries();\n }\n\n let processed = 0;\n let expiredCount = 0;\n let staleCount = 0;\n\n for (let i = 0; i < _maxKeysPerBatch; i++) {\n const next = state._sweepIter.next();\n\n if (next.done) {\n state._sweepIter = state.store.entries();\n break;\n }\n\n processed += 1;\n const [key, entry] = next.value;\n\n const now = Date.now();\n\n const status = computeEntryStatus(state, entry, now);\n if (isExpired(state, status, now)) {\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n expiredCount += 1;\n } else if (isStale(state, status, now)) {\n staleCount += 1;\n\n if (state.purgeStaleOnSweep) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n }\n }\n\n const expiredStaleCount = state.purgeStaleOnSweep ? staleCount : 0;\n return {\n processed,\n expiredCount,\n staleCount,\n ratio: processed > 0 ? (expiredCount + expiredStaleCount) / processed : 0,\n };\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n EXPIRED_RATIO_MEMORY_THRESHOLD,\n MINIMAL_EXPIRED_RATIO,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport { _metrics, SAFE_MEMORY_LIMIT_RATIO } from \"../utils/start-monitor\";\n\n/**\n * Calculates the optimal maximum expired ratio based on current memory utilization.\n *\n * This function interpolates between `maxAllowExpiredRatio` and `MINIMAL_EXPIRED_RATIO`\n * depending on the memory usage reported by `_metrics`. At low memory usage (0%),\n * the optimal ratio equals `maxAllowExpiredRatio`. As memory usage approaches or exceeds\n * 80% of the memory limit, the optimal ratio decreases toward `MINIMAL_EXPIRED_RATIO`.\n *\n * @param maxAllowExpiredRatio - The maximum allowed expired ratio at minimal memory usage.\n * Defaults to `DEFAULT_MAX_EXPIRED_RATIO`.\n * @returns A normalized value between 0 and 1 representing the optimal expired ratio.\n */\nexport function calculateOptimalMaxExpiredRatio(\n maxAllowExpiredRatio: number = DEFAULT_MAX_EXPIRED_RATIO,\n): number {\n const EFFECTIVE_MEMORY_THRESHOLD = EXPIRED_RATIO_MEMORY_THRESHOLD / SAFE_MEMORY_LIMIT_RATIO;\n\n const optimalExpiredRatio = interpolate({\n value: _metrics?.memory.utilization ?? 0,\n\n fromStart: 0, // baseline: memory usage ratio at 0%\n fromEnd: EFFECTIVE_MEMORY_THRESHOLD, // threshold: memory usage ratio at 80% of safe limit\n\n toStart: maxAllowExpiredRatio, // allowed ratio at minimal memory usage\n toEnd: MINIMAL_EXPIRED_RATIO, // allowed ratio at high memory usage (≥80%)\n });\n\n // At 0% memory usage, the optimalExpiredRatio equals maxAllowExpiredRatio.\n // At or above 80% memory usage, the optimalExpiredRatio approaches or falls below MINIMAL_EXPIRED_RATIO.\n\n return Math.min(1, Math.max(0, optimalExpiredRatio));\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport { MINIMAL_EXPIRED_RATIO } from \"../defaults\";\n\nimport { calculateOptimalMaxExpiredRatio } from \"./calculate-optimal-max-expired-ratio\";\n\n/**\n * Updates the sweep weight (`_sweepWeight`) for each cache instance.\n *\n * The sweep weight determines the probability that an instance will be selected\n * for a cleanup (sweep) process. It is calculated based on the store size and\n * the ratio of expired keys.\n *\n * This function complements (`_selectInstanceToSweep`), which is responsible\n * for selecting the correct instance based on the weights assigned here.\n *\n * ---\n *\n * ### Sweep systems:\n * 1. **Normal sweep**\n * - Runs whenever the percentage of expired keys exceeds the allowed threshold\n * calculated by `calculateOptimalMaxExpiredRatio`.\n * - It is the main cleanup mechanism and is applied proportionally to the\n * store size and the expired‑key ratio.\n *\n * 2. **Memory‑conditioned sweep (control)**\n * - Works exactly like the normal sweep, except it may run even when it\n * normally wouldn’t.\n * - Only activates under **high memory pressure**.\n * - Serves as an additional control mechanism to adjust weights, keep the\n * system updated, and help prevent memory overflows.\n *\n * 3. **Round‑robin sweep (minimal control)**\n * - Always runs, even if the expired ratio is low or memory usage does not\n * require it.\n * - Processes a very small number of keys per instance, much smaller than\n * the normal sweep.\n * - Its main purpose is to ensure that all instances receive at least a\n * periodic weight update and minimal expired‑key control.\n *\n * ---\n * #### Important notes:\n * - A minimum `MINIMAL_EXPIRED_RATIO` (e.g., 5%) is assumed to ensure that\n * control sweeps can always run under high‑memory scenarios.\n * - Even with a minimum ratio, the normal sweep and the memory‑conditioned sweep\n * may **skip execution** if memory usage allows it and the expired ratio is\n * below the optimal maximum.\n * - The round‑robin sweep is never skipped: it always runs with a very small,\n * almost imperceptible cost.\n *\n * @returns The total accumulated sweep weight across all cache instances.\n */\nexport function _updateWeightSweep(): number {\n let totalSweepWeight = 0;\n\n for (const instCache of _instancesCache) {\n if (instCache.store.size <= 0) {\n // Empty instance → no sweep weight needed, skip sweep for this instance.\n instCache._sweepWeight = 0;\n continue;\n }\n\n // Ensure a minimum expired ratio to allow control sweeps.\n // If the real ratio is higher than the minimum, use the real ratio.\n let expiredRatio = MINIMAL_EXPIRED_RATIO;\n if (instCache._expiredRatio > MINIMAL_EXPIRED_RATIO) {\n expiredRatio = instCache._expiredRatio;\n }\n\n if (!__BROWSER__) {\n // In non‑browser environments, compute an optimal maximum allowed ratio.\n const optimalMaxExpiredRatio = calculateOptimalMaxExpiredRatio(\n instCache._maxAllowExpiredRatio,\n );\n\n if (expiredRatio <= optimalMaxExpiredRatio) {\n // If memory usage allows it and the expired ratio is low,\n // this sweep can be skipped. The reduced round‑robin sweep will still run.\n instCache._sweepWeight = 0;\n continue;\n }\n }\n\n // Normal sweep: weight proportional to store size and expired ratio.\n instCache._sweepWeight = instCache.store.size * expiredRatio;\n totalSweepWeight += instCache._sweepWeight;\n }\n\n return totalSweepWeight;\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport {\n MAX_KEYS_PER_BATCH,\n OPTIMAL_SWEEP_INTERVAL,\n OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE,\n} from \"../defaults\";\nimport type { CacheState } from \"../types\";\nimport { _metrics } from \"../utils/start-monitor\";\n\nimport { _batchUpdateExpiredRatio } from \"./batchUpdateExpiredRatio\";\nimport { calculateOptimalSweepParams } from \"./calculate-optimal-sweep-params\";\nimport { _selectInstanceToSweep } from \"./select-instance-to-sweep\";\nimport { _sweepOnce } from \"./sweep-once\";\nimport { _updateWeightSweep } from \"./update-weight\";\n\n/**\n * Performs a sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n */\nexport const sweep = async (\n state: CacheState,\n\n /** @internal */\n utilities: SweepUtilities = {},\n): Promise<void> => {\n const {\n schedule = defaultSchedule,\n yieldFn = defaultYieldFn,\n now = Date.now(),\n runOnlyOne = false,\n } = utilities;\n const startTime = now;\n\n let sweepIntervalMs = OPTIMAL_SWEEP_INTERVAL;\n let sweepTimeBudgetMs = OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE;\n if (!__BROWSER__ && _metrics) {\n ({ sweepIntervalMs, sweepTimeBudgetMs } = calculateOptimalSweepParams({ metrics: _metrics }));\n }\n\n const totalSweepWeight = _updateWeightSweep();\n const currentExpiredRatios: number[][] = [];\n\n // Reduce the maximum number of keys per batch only when no instance weights are available\n // and the sweep is running in minimal round‑robin control mode. In this case, execute the\n // smallest possible sweep (equivalent to one batch, but divided across instances).\n const maxKeysPerBatch =\n totalSweepWeight <= 0 ? MAX_KEYS_PER_BATCH / _instancesCache.length : MAX_KEYS_PER_BATCH;\n\n let batchSweep = 0;\n while (true) {\n batchSweep += 1;\n\n const instanceToSweep = _selectInstanceToSweep({ batchSweep, totalSweepWeight });\n if (!instanceToSweep) {\n // No instance to sweep\n break;\n }\n\n const { ratio } = _sweepOnce(instanceToSweep, maxKeysPerBatch);\n // Initialize or update `currentExpiredRatios` array for current ratios\n (currentExpiredRatios[instanceToSweep._instanceIndexState] ??= []).push(ratio);\n\n if (Date.now() - startTime > sweepTimeBudgetMs) {\n break;\n }\n\n await yieldFn();\n }\n\n _batchUpdateExpiredRatio(currentExpiredRatios);\n\n // Schedule next sweep\n if (!runOnlyOne) {\n schedule(() => void sweep(state, utilities), sweepIntervalMs);\n }\n};\n\n// Default utilities for scheduling and yielding --------------------------------\nconst defaultSchedule: scheduleType = (fn, ms) => {\n const t = setTimeout(fn, ms);\n if (typeof t.unref === \"function\") t.unref();\n};\nexport const defaultYieldFn: yieldFnType = () => new Promise(resolve => setImmediate(resolve));\n\n// Types for internal utilities -----------------------------------------------\ntype scheduleType = (fn: () => void, ms: number) => void;\ntype yieldFnType = () => Promise<void>;\ninterface SweepUtilities {\n /**\n * Default scheduling function using setTimeout.\n * This can be overridden for testing.\n * @internal\n */\n schedule?: scheduleType;\n\n /**\n * Default yielding function using setImmediate.\n * This can be overridden for testing.\n * @internal\n */\n yieldFn?: yieldFnType;\n\n /** Current timestamp for testing purposes. */\n now?: number;\n\n /**\n * If true, only run one sweep cycle.\n * @internal\n */\n runOnlyOne?: boolean;\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n DEFAULT_MAX_MEMORY_SIZE,\n DEFAULT_MAX_SIZE,\n DEFAULT_STALE_WINDOW,\n DEFAULT_TTL,\n} from \"../defaults\";\nimport { sweep } from \"../sweep/sweep\";\nimport type { CacheOptions, CacheState } from \"../types\";\nimport { startMonitor } from \"../utils/start-monitor\";\n\nlet _instanceCount = 0;\nconst INSTANCE_WARNING_THRESHOLD = 99;\nexport const _instancesCache: CacheState[] = [];\n\n/**\n * Resets the instance count for testing purposes.\n * This function is intended for use in tests to avoid instance limits.\n */\nexport const _resetInstanceCount = (): void => {\n _instanceCount = 0;\n};\n\nlet _initSweepScheduled = false;\n\n/**\n * Creates the initial state for the TTL cache.\n * @param options - Configuration options for the cache.\n * @returns The initial cache state.\n */\nexport const createCache = (options: CacheOptions = {}): CacheState => {\n const {\n onExpire,\n onDelete,\n defaultTtl = DEFAULT_TTL,\n maxSize = DEFAULT_MAX_SIZE,\n maxMemorySize = DEFAULT_MAX_MEMORY_SIZE,\n _maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO,\n defaultStaleWindow = DEFAULT_STALE_WINDOW,\n purgeStaleOnGet = false,\n purgeStaleOnSweep = false,\n _autoStartSweep = true,\n } = options;\n\n _instanceCount++;\n\n // NEXT: warn if internal parameters are touch by user\n\n if (_instanceCount > INSTANCE_WARNING_THRESHOLD) {\n // NEXT: Use a proper logging mechanism\n // NEXT: Create documentation for this\n console.warn(\n `Too many instances detected (${_instanceCount}). This may indicate a configuration issue; consider minimizing instance creation or grouping keys by expected expiration ranges. See the documentation: https://github.com/neezco/cache/docs/getting-started.md`,\n );\n }\n\n const state: CacheState = {\n store: new Map(),\n _sweepIter: null,\n get size() {\n return state.store.size;\n },\n onExpire,\n onDelete,\n maxSize,\n maxMemorySize,\n defaultTtl,\n defaultStaleWindow,\n purgeStaleOnGet,\n purgeStaleOnSweep,\n _maxAllowExpiredRatio,\n _autoStartSweep,\n _instanceIndexState: -1,\n _expiredRatio: 0,\n _sweepWeight: 0,\n _tags: new Map(),\n };\n\n state._instanceIndexState = _instancesCache.push(state) - 1;\n\n // Start the sweep process\n if (_autoStartSweep) {\n if (_initSweepScheduled) return state;\n _initSweepScheduled = true;\n void sweep(state);\n }\n\n startMonitor();\n\n return state;\n};\n","import type { CacheState } from \"../types\";\n\nimport { DELETE_REASON, deleteKey } from \"./delete\";\nimport { computeEntryStatus, isFresh, isStale } from \"./validators\";\n\n/**\n * Retrieves a value from the cache if the entry is valid.\n * @param state - The cache state.\n * @param key - The key to retrieve.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns The cached value if valid, null otherwise.\n */\nexport const get = (state: CacheState, key: string, now: number = Date.now()): unknown => {\n const entry = state.store.get(key);\n\n if (!entry) return undefined;\n\n const status = computeEntryStatus(state, entry, now);\n\n if (isFresh(state, status, now)) return entry[1];\n\n if (isStale(state, status, now)) {\n if (state.purgeStaleOnGet) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n return entry[1];\n }\n\n // If it expired, always delete it\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n\n return undefined;\n};\n","import type { CacheState } from \"../types\";\n\nimport { get } from \"./get\";\n\n/**\n * Checks if a key exists in the cache and is not expired.\n * @param state - The cache state.\n * @param key - The key to check.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns True if the key exists and is valid, false otherwise.\n */\nexport const has = (state: CacheState, key: string, now: number = Date.now()): boolean => {\n return get(state, key, now) !== undefined;\n};\n","import type { CacheState, InvalidateTagOptions } from \"../types\";\n\n/**\n * Invalidates one or more tags so that entries associated with them\n * become expired or stale from this moment onward.\n *\n * Semantics:\n * - Each tag maintains two timestamps in `state._tags`:\n * [expiredAt, staleSinceAt].\n * - Calling this function updates one of those timestamps to `_now`,\n * depending on whether the tag should force expiration or staleness.\n *\n * Rules:\n * - If `asStale` is false (default), the tag forces expiration:\n * entries created before `_now` will be considered expired.\n * - If `asStale` is true, the tag forces staleness:\n * entries created before `_now` will be considered stale,\n * but only if they support a stale window.\n *\n * Behavior:\n * - Each call replaces any previous invalidation timestamp for the tag.\n * - Entries created after `_now` are unaffected.\n *\n * @param state - The cache state containing tag metadata.\n * @param tags - A tag or list of tags to invalidate.\n * @param options.asStale - Whether the tag should mark entries as stale.\n */\nexport function invalidateTag(\n state: CacheState,\n tags: string | string[],\n options: InvalidateTagOptions = {},\n\n /** @internal */\n _now: number = Date.now(),\n): void {\n const tagList = Array.isArray(tags) ? tags : [tags];\n const asStale = options.asStale ?? false;\n\n for (const tag of tagList) {\n const currentTag = state._tags.get(tag);\n\n if (currentTag) {\n // Update existing tag timestamps:\n // index 0 = expiredAt, index 1 = staleSinceAt\n if (asStale) {\n currentTag[1] = _now;\n } else {\n currentTag[0] = _now;\n }\n } else {\n // Initialize new tag entry with appropriate timestamp.\n // If marking as stale, expiredAt = 0 and staleSinceAt = _now.\n // If marking as expired, expiredAt = _now and staleSinceAt = 0.\n state._tags.set(tag, [asStale ? 0 : _now, asStale ? _now : 0]);\n }\n }\n}\n","import type { CacheState, CacheEntry } from \"../types\";\nimport { _metrics } from \"../utils/start-monitor\";\n\n/**\n * Sets or updates a value in the cache with TTL and an optional stale window.\n *\n * @param state - The cache state.\n * @param input - Cache entry definition (key, value, ttl, staleWindow, tags).\n * @param now - Optional timestamp override used as the base time (defaults to Date.now()).\n * @returns True if the entry was created or updated, false if rejected due to limits or invalid input.\n *\n * @remarks\n * - `ttl` defines when the entry becomes expired.\n * - `staleWindow` defines how long the entry may still be served as stale\n * after the expiration moment (`now + ttl`).\n * - Returns false if value is `undefined` (entry ignored, existing value untouched).\n * - Returns false if new entry would exceed `maxSize` limit (existing keys always allowed).\n * - Returns false if new entry would exceed `maxMemorySize` limit (existing keys always allowed).\n * - Returns true if entry was set or updated (or if existing key was updated at limit).\n */\nexport const setOrUpdate = (\n state: CacheState,\n input: CacheSetOrUpdateInput,\n\n /** @internal */\n now: number = Date.now(),\n): boolean => {\n const { key, value, ttl: ttlInput, staleWindow: staleWindowInput, tags } = input;\n\n if (value === undefined) return false; // Ignore undefined values, leaving existing entry intact if it exists\n if (key == null) throw new Error(\"Missing key.\");\n if (state.size >= state.maxSize && !state.store.has(key)) {\n // Ignore new entries when max size is reached, but allow updates to existing keys\n return false;\n }\n if (\n !__BROWSER__ &&\n _metrics?.memory.total.rss &&\n _metrics?.memory.total.rss >= state.maxMemorySize * 1024 * 1024 &&\n !state.store.has(key)\n ) {\n // Ignore new entries when max memory size is reached, but allow updates to existing keys\n return false;\n }\n\n const ttl = ttlInput ?? state.defaultTtl;\n const staleWindow = staleWindowInput ?? state.defaultStaleWindow;\n\n const expiresAt = ttl > 0 ? now + ttl : Infinity;\n const entry: CacheEntry = [\n [\n now, // createdAt\n expiresAt, // expiresAt\n staleWindow > 0 ? expiresAt + staleWindow : 0, // staleExpiresAt (relative to expiration)\n ],\n value,\n typeof tags === \"string\" ? [tags] : Array.isArray(tags) ? tags : null,\n ];\n\n state.store.set(key, entry);\n return true;\n};\n\n/**\n * Input parameters for setting or updating a cache entry.\n */\nexport interface CacheSetOrUpdateInput {\n /**\n * Key under which the value will be stored.\n */\n key: string;\n\n /**\n * Value to be written to the cache.\n *\n * Considerations:\n * - Always overwrites any previous value, if one exists.\n * - `undefined` is ignored, leaving any previous value intact, if one exists.\n * - `null` is explicitly stored as a null value, replacing any previous value, if one exists.\n */\n value: unknown;\n\n /**\n * TTL (Time-To-Live) in milliseconds for this entry.\n */\n ttl?: number;\n\n /**\n * Optional stale window in milliseconds.\n *\n * Defines how long the entry may continue to be served as stale\n * after it has reached its expiration time.\n *\n * The window is always relative to the entry’s own expiration moment,\n * whether that expiration comes from an explicit `ttl` or from the\n * cache’s default TTL.\n *\n * If omitted, the cache-level default stale window is used.\n */\n staleWindow?: number;\n\n /**\n * Optional tags associated with this entry.\n */\n tags?: string | string[];\n}\n","import { clear } from \"./cache/clear\";\nimport { createCache } from \"./cache/create-cache\";\nimport { deleteKey } from \"./cache/delete\";\nimport { get } from \"./cache/get\";\nimport { has } from \"./cache/has\";\nimport { invalidateTag } from \"./cache/invalidate-tag\";\nimport { setOrUpdate } from \"./cache/set\";\nimport type { CacheOptions, CacheState, InvalidateTagOptions } from \"./types\";\n\nexport type { CacheOptions, InvalidateTagOptions } from \"./types\";\n\n/**\n * A TTL (Time-To-Live) cache implementation with support for expiration,\n * stale windows, tag-based invalidation, and automatic sweeping.\n *\n * Provides O(1) constant-time operations for all core methods.\n *\n * @example\n * ```typescript\n * const cache = new LocalTtlCache();\n * cache.set(\"user:123\", { name: \"Alice\" }, { ttl: 5 * 60 * 1000 });\n * const user = cache.get(\"user:123\"); // { name: \"Alice\" }\n * ```\n */\nexport class LocalTtlCache {\n private state: CacheState;\n\n /**\n * Creates a new cache instance.\n *\n * @param options - Configuration options for the cache (defaultTtl, defaultStaleWindow, maxSize, etc.)\n *\n * @example\n * ```typescript\n * const cache = new LocalTtlCache({\n * defaultTtl: 30 * 60 * 1000, // 30 minutes\n * defaultStaleWindow: 5 * 60 * 1000, // 5 minutes\n * maxSize: 500_000, // Maximum 500_000 entries\n * onExpire: (key, value) => console.log(`Expired: ${key}`),\n * onDelete: (key, value, reason) => console.log(`Deleted: ${key}, reason: ${reason}`),\n * });\n * ```\n */\n constructor(options?: CacheOptions) {\n this.state = createCache(options);\n }\n\n /**\n * Gets the current number of entries tracked by the cache.\n *\n * This value may include entries that are already expired but have not yet been\n * removed by the lazy cleanup system. Expired keys are cleaned only when it is\n * efficient to do so, so the count can temporarily be higher than the number of\n * actually valid (non‑expired) entries.\n *\n * @returns The number of entries currently stored (including entries pending cleanup)\n *\n * @example\n * ```typescript\n * console.log(cache.size); // e.g., 42\n * ```\n */\n get size(): number {\n return this.state.size;\n }\n\n /**\n * Retrieves a value from the cache.\n *\n * Returns the value if it exists and is not fully expired. If an entry is in the\n * stale window (expired but still within staleWindow), the stale value is returned.\n *\n\n * @param key - The key to retrieve\n * @returns The cached value if valid, undefined otherwise\n *\n * @example\n * ```typescript\n * const user = cache.get<{ name: string }>(\"user:123\");\n * ```\n *\n * @edge-cases\n * - Returns `undefined` if the key doesn't exist\n * - Returns `undefined` if the key has expired beyond the stale window\n * - Returns the stale value if within the stale window\n * - If `purgeStaleOnGet` is enabled, stale entries are deleted after being returned\n */\n get<T = unknown>(key: string): T | undefined {\n return get(this.state, key) as T | undefined;\n }\n\n /**\n * Sets or updates a value in the cache.\n *\n * If the key already exists, it will be completely replaced.\n *\n * @param key - The key under which to store the value\n * @param value - The value to cache (any type)\n * @param options - Optional configuration for this specific entry\n * @param options.ttl - Time-To-Live in milliseconds. Defaults to `defaultTtl`\n * @param options.staleWindow - How long to serve stale data after expiration (milliseconds)\n * @param options.tags - One or more tags for group invalidation\n * @returns True if the entry was set or updated, false if rejected due to limits or invalid input\n *\n * @example\n * ```typescript\n * const success = cache.set(\"user:123\", { name: \"Alice\" }, {\n * ttl: 5 * 60 * 1000,\n * staleWindow: 1 * 60 * 1000,\n * tags: \"user:123\",\n * });\n *\n * if (!success) {\n * console.log(\"Entry was rejected due to size or memory limits\");\n * }\n * ```\n *\n * @edge-cases\n * - Overwriting an existing key replaces it completely\n * - If `ttl` is 0 or Infinite, the entry never expires\n * - If `staleWindow` is larger than `ttl`, the entry can be served as stale longer than it was fresh\n * - Tags are optional; only necessary for group invalidation via `invalidateTag()`\n * - Returns `false` if value is `undefined` (existing value remains untouched)\n * - Returns `false` if new key would exceed [`maxSize`](./docs/configuration.md#maxsize-number) limit\n * - Returns `false` if new key would exceed [`maxMemorySize`](./docs/configuration.md#maxmemorysize-number) limit\n * - Updating existing keys always succeeds, even at limit\n */\n set(\n key: string,\n value: unknown,\n options?: {\n ttl?: number;\n staleWindow?: number;\n tags?: string | string[];\n },\n ): boolean {\n return setOrUpdate(this.state, {\n key,\n value,\n ttl: options?.ttl,\n staleWindow: options?.staleWindow,\n tags: options?.tags,\n });\n }\n\n /**\n * Deletes a specific key from the cache.\n *\n * @param key - The key to delete\n * @returns True if the key was deleted, false if it didn't exist\n *\n * @example\n * ```typescript\n * const wasDeleted = cache.delete(\"user:123\");\n * ```\n *\n * @edge-cases\n * - Triggers the `onDelete` callback with reason `'manual'`\n * - Does not trigger the `onExpire` callback\n * - Returns `false` if the key was already expired\n * - Deleting a non-existent key returns `false` without error\n */\n delete(key: string): boolean {\n return deleteKey(this.state, key);\n }\n\n /**\n * Checks if a key exists in the cache and is not fully expired.\n *\n * Returns true if the key exists and is either fresh or within the stale window.\n * Use this when you only need to check existence without retrieving the value.\n *\n * @param key - The key to check\n * @returns True if the key exists and is valid, false otherwise\n *\n * @example\n * ```typescript\n * if (cache.has(\"user:123\")) {\n * // Key exists (either fresh or stale)\n * }\n * ```\n *\n * @edge-cases\n * - Returns `false` if the key doesn't exist\n * - Returns `false` if the key has expired beyond the stale window\n * - Returns `true` if the key is in the stale window (still being served)\n * - Both `has()` and `get()` have O(1) complexity; prefer `get()` if you need the value\n */\n has(key: string): boolean {\n return has(this.state, key);\n }\n\n /**\n * Removes all entries from the cache at once.\n *\n * This is useful for resetting the cache or freeing memory when needed.\n * The `onDelete` callback is NOT invoked during clear (intentional optimization).\n *\n * @example\n * ```typescript\n * cache.clear(); // cache.size is now 0\n * ```\n *\n * @edge-cases\n * - The `onDelete` callback is NOT triggered during clear\n * - Clears both expired and fresh entries\n * - Resets `cache.size` to 0\n */\n clear(): void {\n // NEXT: optional supor for onClear callback?\n clear(this.state);\n }\n\n /**\n * Marks all entries with one or more tags as expired (or stale, if requested).\n *\n * If an entry has multiple tags, invalidating ANY of those tags will invalidate the entry.\n *\n * @param tags - A single tag (string) or array of tags to invalidate\n * @param asStale - If true, marks entries as stale instead of fully expired (still served from stale window)\n *\n * @example\n * ```typescript\n * // Invalidate a single tag\n * cache.invalidateTag(\"user:123\");\n *\n * // Invalidate multiple tags\n * cache.invalidateTag([\"user:123\", \"posts:456\"]);\n * ```\n *\n * @edge-cases\n * - Does not throw errors if a tag has no associated entries\n * - Invalidating a tag doesn't prevent new entries from being tagged with it later\n * - The `onDelete` callback is triggered with reason `'expired'` (even if `asStale` is true)\n */\n invalidateTag(tags: string | string[], options?: InvalidateTagOptions): void {\n invalidateTag(this.state, tags, options ?? {});\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAWA,MAAa,SAAS,UAA4B;AAChD,OAAM,MAAM,OAAO;;;;;ACVrB,MAAM,aAAqB;AAC3B,MAAM,aAAqB,KAAK;;;;;;;;;;;AAahC,MAAa,cAAsB,KAAK;;;;;AAMxC,MAAa,uBAA+B;;;;;AAM5C,MAAa,mBAA2B;;;;;;AAOxC,MAAa,0BAAkC;;;;;;;;;;;AAa/C,MAAa,qBAA6B;;;;;AAM1C,MAAa,wBAAgC;;;;;AAM7C,MAAa,iCAAyC;;;;;;AAOtD,MAAa,4BAAoC;;;;;;;;;;;AAajD,MAAa,yBAAiC,IAAI;;;;;AAMlD,MAAa,uBAA+B;;;;;AAM5C,MAAa,0BAAkC;;;;;AAM/C,MAAa,sDAA8D;;;;;;;;;;;;AAc3E,MAAa,gCAAwC;;;;;;;;;;;;AAcrD,MAAa,wBAAgC;;;;;AAM7C,MAAa,qBAA6B;;;;;AAM1C,MAAa,sBAA8B;;;;;;;;;AClI3C,SAAS,WAAW,MAA6B;AAC/C,KAAI;EACF,MAAM,MAAM,WAAG,aAAa,MAAM,OAAO,CAAC,MAAM;EAChD,MAAM,IAAI,OAAO,IAAI;AACrB,SAAO,OAAO,SAAS,EAAE,GAAG,IAAI;SAC1B;AACN,SAAO;;;;;;;AAQX,SAAS,iBAAgC;CAEvC,MAAM,KAAK,WAAW,4BAA4B;AAClD,KAAI,OAAO,KAAM,QAAO;CAGxB,MAAM,KAAK,WAAW,8CAA8C;AACpE,KAAI,OAAO,KAAM,QAAO;AAExB,QAAO;;;;;;AAOT,SAAgB,wBAAgC;CAC9C,MAAM,YAAY,WAAG,mBAAmB,CAAC;CACzC,MAAM,cAAc,gBAAgB;AAEpC,KAAI,eAAe,cAAc,KAAK,cAAc,SAClD,QAAO,KAAK,IAAI,WAAW,YAAY;AAGzC,QAAO;;;;;;;;;;;;;;;;;;AC/BT,SAAgB,sBACd,SACqB;CACrB,IAAI,aAAoC;CAExC,IAAI,cAAyC;CAE7C,IAAI,aAAa,QAAQ,OAAO,QAAQ;CAExC,IAAI,UAAU,QAAQ,aAAa;CACnC,IAAI,UAAU,QAAQ,UAAU;CAChC,IAAI,WAAWA,uBAAY,sBAAsB;CACjD,IAAI,kBAAkB,KAAK,KAAK;CAEhC,MAAM,SAAS;EACb,UAAU,SAAS,YAAY;EAE/B,YAAY,SAAS,aAAa,OAAO,OAAO;EACjD;CAED,SAAS,QAAc;AACrB,MAAI,WAAY;AAEhB,eAAa,kBAAkB;AAC7B,OAAI;IACF,MAAM,MAAM,KAAK,KAAK;IAEtB,MAAM,UAAU,eAAe;KAC7B;KACA;KACA;KACA;KACA,WAAW,OAAO;KAClB,eAAe;KACf,uBAAuB;KACvB,UAAU,OAAO;KAClB,CAAC;AAEF,kBAAc;AACd,aAAS,WAAW,QAAQ;AAE5B,cAAU,QAAQ,IAAI;AACtB,eAAW,QAAQ,KAAK;AACxB,cAAU,QAAQ,OAAO;AAEzB,iBAAa,QAAQ,OAAO,QAAQ;AACpC,sBAAkB;YACX,GAAY;AACnB,UAAM;AACN,UAAM,IAAI,MAAM,kCAAkC,EAAE,OAAO,GAAG,CAAC;;KAEhE,OAAO,SAAS;AAEnB,MAAI,OAAO,WAAW,UAAU,WAC9B,YAAW,OAAO;;CAItB,SAAS,OAAa;AACpB,MAAI,YAAY;AACd,iBAAc,WAAW;AACzB,gBAAa;;;CAIjB,SAAS,aAAwC;AAC/C,MAAI,YACF,QAAO;AAET,SAAO;;CAGT,SAAS,aAAa,WAAwD;AAC5E,MAAI,UAAU,cAAc,OAE1B,QAAO,YAAY,UAAU,YAAY,OAAO;AAGlD,MAAI,UAAU,aAAa,QAAW;AACpC,UAAO,WAAW,UAAU;AAG5B,OAAI,YAAY;AACd,UAAM;AACN,WAAO;;;;AAKb,QAAO;EACL;EACA;EACA;EACA;EACD;;;;;;;;;;;;;AAcH,SAAgB,eAAe,OASR;CACrB,MAAM,YAAY,QAAQ,OAAO,QAAQ;CAGzC,MAAM,YADY,OAAO,YAAY,MAAM,WAAW,GACxB;CAC9B,MAAM,gBAAgB,MAAM,gBAAgB,MAAM;CAElD,MAAM,MAAM,QAAQ,aAAa;CACjC,MAAM,WAA+B;EACnC,KAAK,IAAI,MAAM,MAAM,QAAQ;EAC7B,WAAW,IAAI,YAAY,MAAM,QAAQ;EACzC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,cAAc,IAAI,eAAe,MAAM,QAAQ;EAChD;CACD,MAAM,WAAW,KAAK,IAAI,GAAG,IAAI,MAAM,MAAM,UAAU;CAEvD,MAAM,WAAW,QAAQ,SAAS,MAAM,QAAQ;CAEhD,MAAM,YADS,SAAS,SAAS,SAAS,QAAQ,MACzB;CAEzB,MAAM,OAAOA,uBAAY,qBAAqB,MAAM,SAAS;AAE7D,QAAO;EACL,KAAK;GAEH,aAAa;GACb,OAAO;GACP,OAAO,QAAQ,UAAU;GAC1B;EAED,MAAM;GACJ,aAAa,KAAK;GAClB,OAAO;GACP,OAAOA,uBAAY,sBAAsB;GAC1C;EAED,QAAQ;GACN,aAAa;GACb,OAAO;GACP,OAAO;GACR;EAED,aAAa,MAAM;EACnB,qBAAqB,MAAM;EAC3B,UAAU,MAAM;EAChB;EACD;;;;;AC1KH,IAAI,mBAA+C;;AAGnD,IAAW;;AAGX,IAAW,iBAAyB;;AAGpC,MAAa,0BAA0B;AAEvC,SAAgB,eAAqB;AAMnC,KAAI,CAAC,kBAAkB;AACrB,MAAI;GACF,MAAM,qBAAqB,uBAAuB;AAElD,OAAI,sBAAsB,qBAAqB,EAC7C,kBAAkB,qBAAqB,OAAO,OAAQ;UAElD;AAMR,qBAAmB,sBAAsB;GACvC,SAAS,SAAS;AAChB,eAAW;;GAEb,UAAU;GACV,WAAW;GACZ,CAAC;AAEF,mBAAiB,OAAO;;;;;;;;;;;ACxC5B,SAAgB,yBAAyB,sBAAwC;AAC/E,MAAK,MAAM,QAAQ,iBAAiB;EAClC,MAAM,SAAS,qBAAqB,KAAK;AACzC,MAAI,UAAU,OAAO,SAAS,GAAG;GAC/B,MAAM,WAAW,OAAO,QAAQ,KAAK,QAAQ,MAAM,KAAK,EAAE,GAAG,OAAO;GAEpE,MAAM,QAAQ;AACd,QAAK,gBAAgB,KAAK,iBAAiB,IAAI,SAAS,WAAW;;;;;;;;;;;;;ACRzE,SAAgB,YAAY,EAC1B,OACA,WACA,SACA,SACA,SAOS;AAET,KAAI,cAAc,QAAS,QAAO;AAGlC,QAAO,WADI,QAAQ,cAAc,UAAU,cACrB,QAAQ;;;;;;;;;;;;;;;;;;;;;ACkDhC,MAAa,+BACX,YACuB;CACvB,MAAM,EACJ,SACA,UAAU,EAAE,EACZ,yBAAyB,wBACzB,uBAAuB,sBACvB,yBAAyB,4BACvB;CAGJ,MAAM,eAAe,QAAQ,UAAU;CACvC,MAAM,YAAY,QAAQ,OAAO;CACjC,MAAM,aAAa,QAAQ,QAAQ;CAGnC,MAAM,oBAAoB,SAAS,OAAO,eAAe;CAGzD,MAAM,oBAAoB,SAAS,IAAI,eAAe;CACtD,MAAM,qBAAqB,SAAS,KAAK,eAAe;CAKxD,MAAM,iBAAiB,IAAI;CAC3B,MAAM,kBAAkB,IAAI;CAG5B,MAAM,cACJ,oBAAoB,eAAe,iBAAiB,YAAY,kBAAkB;CAEpF,MAAM,cAAc,eAAe,YAAY;CAG/C,MAAM,QAAQ,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,cAAc,YAAY,CAAC;AAoBjE,QAAO;EACL,iBAlBsB,YAAY;GAClC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAaA,mBAVwB,YAAY;GACpC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAKD;;;;;;;;;;;;;;;;;;;;;;;AC/GH,SAAgB,uBAAuB,EACrC,kBACA,cAIgC;CAGhC,IAAI,kBAAiD,gBAAgB;AAErE,KAAI,oBAAoB,GAAG;AAIzB,MAAI,aAAa,gBAAgB,OAE/B,mBAAkB;AAEpB,oBAAkB,gBAAgB,aAAa;QAC1C;EAIL,IAAI,YAAY,KAAK,QAAQ,GAAG;AAMhC,OAAK,MAAM,QAAQ,iBAAiB;AAClC,gBAAa,KAAK;AAClB,OAAI,aAAa,GAAG;AAClB,sBAAkB;AAClB;;;;AAKN,QAAO;;;;;AC1DT,IAAkB,0DAAX;AACL;AACA;AACA;;;;;;;;;AASF,MAAa,aACX,OACA,KACA,SAAwB,cAAc,WAC1B;CACZ,MAAM,WAAW,MAAM;CACvB,MAAM,WAAW,MAAM;AAEvB,KAAI,CAAC,YAAY,CAAC,SAChB,QAAO,MAAM,MAAM,OAAO,IAAI;CAGhC,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAClC,KAAI,CAAC,MAAO,QAAO;AAEnB,OAAM,MAAM,OAAO,IAAI;AACvB,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AACvC,KAAI,WAAW,cAAc,OAC3B,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AAGzC,QAAO;;;;;;;;AC0HT,IAAY,wDAAL;;AAEL;;AAEA;;AAEA;;;;;;;;;;;;;;;;;;;;;;;;AC/IF,SAAgB,gBAAgB,OAAmB,OAA2C;CAC5F,MAAM,iBAAiB,MAAM,GAAG;CAIhC,IAAI,+BAA+B;CAGnC,IAAI,SAAS,aAAa;CAE1B,MAAM,OAAO,MAAM;AACnB,KAAI,KACF,MAAK,MAAM,OAAO,MAAM;EACtB,MAAM,KAAK,MAAM,MAAM,IAAI,IAAI;AAC/B,MAAI,CAAC,GAAI;EAKT,MAAM,CAAC,cAAc,mBAAmB;AAGxC,MAAI,gBAAgB,gBAAgB;AAClC,YAAS,aAAa;AACtB;;AAGF,MAAI,mBAAmB,gBAAgB;AAErC,OAAI,kBAAkB,6BACpB,gCAA+B;AAEjC,YAAS,aAAa;;;AAM5B,QAAO,CAAC,QAAQ,WAAW,aAAa,QAAQ,+BAA+B,EAAE;;;;;;;;;;;;;;;;;;;;ACxCnF,SAAgB,mBACd,OACA,OAGA,KACc;CACd,MAAM,CAAC,aAAa,WAAW,kBAAkB,MAAM;CAGvD,MAAM,CAAC,WAAW,gCAAgC,gBAAgB,OAAO,MAAM;AAC/E,KAAI,cAAc,aAAa,QAAS,QAAO,aAAa;CAC5D,MAAM,cAAc,iBAAiB;AACrC,KACE,cAAc,aAAa,SAC3B,iBAAiB,KACjB,MAAM,+BAA+B,YAKrC,QAAO,aAAa;AAItB,KAAI,MAAM,UACR,QAAO,aAAa;AAEtB,KAAI,iBAAiB,KAAK,MAAM,eAC9B,QAAO,aAAa;AAGtB,QAAO,aAAa;;;;;;;;;;;;;;;;;AAqBtB,MAAa,WACX,OACA,OACA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;;;;;;AAiBhE,MAAa,WACX,OACA,OAGA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;;;;;;AAkBhE,MAAa,aACX,OACA,OAGA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;AChIhE,SAAgB,WACd,OAMA,mBAA2B,oBACqD;AAChF,KAAI,CAAC,MAAM,WACT,OAAM,aAAa,MAAM,MAAM,SAAS;CAG1C,IAAI,YAAY;CAChB,IAAI,eAAe;CACnB,IAAI,aAAa;AAEjB,MAAK,IAAI,IAAI,GAAG,IAAI,kBAAkB,KAAK;EACzC,MAAM,OAAO,MAAM,WAAW,MAAM;AAEpC,MAAI,KAAK,MAAM;AACb,SAAM,aAAa,MAAM,MAAM,SAAS;AACxC;;AAGF,eAAa;EACb,MAAM,CAAC,KAAK,SAAS,KAAK;EAE1B,MAAM,MAAM,KAAK,KAAK;EAEtB,MAAM,SAAS,mBAAmB,OAAO,OAAO,IAAI;AACpD,MAAI,UAAU,OAAO,QAAQ,IAAI,EAAE;AACjC,aAAU,OAAO,KAAK,cAAc,QAAQ;AAC5C,mBAAgB;aACP,QAAQ,OAAO,QAAQ,IAAI,EAAE;AACtC,iBAAc;AAEd,OAAI,MAAM,kBACR,WAAU,OAAO,KAAK,cAAc,MAAM;;;CAKhD,MAAM,oBAAoB,MAAM,oBAAoB,aAAa;AACjE,QAAO;EACL;EACA;EACA;EACA,OAAO,YAAY,KAAK,eAAe,qBAAqB,YAAY;EACzE;;;;;;;;;;;;;;;;;ACzCH,SAAgB,gCACd,uBAA+B,2BACvB;CACR,MAAM,6BAA6B,iCAAiC;CAEpE,MAAM,sBAAsB,YAAY;EACtC,OAAO,UAAU,OAAO,eAAe;EAEvC,WAAW;EACX,SAAS;EAET,SAAS;EACT,OAAO;EACR,CAAC;AAKF,QAAO,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,oBAAoB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACatD,SAAgB,qBAA6B;CAC3C,IAAI,mBAAmB;AAEvB,MAAK,MAAM,aAAa,iBAAiB;AACvC,MAAI,UAAU,MAAM,QAAQ,GAAG;AAE7B,aAAU,eAAe;AACzB;;EAKF,IAAI,eAAe;AACnB,MAAI,UAAU,gBAAgB,sBAC5B,gBAAe,UAAU;EAGT;GAEhB,MAAM,yBAAyB,gCAC7B,UAAU,sBACX;AAED,OAAI,gBAAgB,wBAAwB;AAG1C,cAAU,eAAe;AACzB;;;AAKJ,YAAU,eAAe,UAAU,MAAM,OAAO;AAChD,sBAAoB,UAAU;;AAGhC,QAAO;;;;;;;;;;ACnET,MAAa,QAAQ,OACnB,OAGA,YAA4B,EAAE,KACZ;CAClB,MAAM,EACJ,WAAW,iBACX,UAAU,gBACV,MAAM,KAAK,KAAK,EAChB,aAAa,UACX;CACJ,MAAM,YAAY;CAElB,IAAI,kBAAkB;CACtB,IAAI,oBAAoB;AACxB,KAAoB,SAClB,EAAC,CAAE,iBAAiB,qBAAsB,4BAA4B,EAAE,SAAS,UAAU,CAAC;CAG9F,MAAM,mBAAmB,oBAAoB;CAC7C,MAAM,uBAAmC,EAAE;CAK3C,MAAM,kBACJ,oBAAoB,IAAI,qBAAqB,gBAAgB,SAAS;CAExE,IAAI,aAAa;AACjB,QAAO,MAAM;AACX,gBAAc;EAEd,MAAM,kBAAkB,uBAAuB;GAAE;GAAY;GAAkB,CAAC;AAChF,MAAI,CAAC,gBAEH;EAGF,MAAM,EAAE,UAAU,WAAW,iBAAiB,gBAAgB;AAE9D,GAAC,qBAAqB,gBAAgB,yBAAyB,EAAE,EAAE,KAAK,MAAM;AAE9E,MAAI,KAAK,KAAK,GAAG,YAAY,kBAC3B;AAGF,QAAM,SAAS;;AAGjB,0BAAyB,qBAAqB;AAG9C,KAAI,CAAC,WACH,gBAAe,KAAK,MAAM,OAAO,UAAU,EAAE,gBAAgB;;AAKjE,MAAM,mBAAiC,IAAI,OAAO;CAChD,MAAM,IAAI,WAAW,IAAI,GAAG;AAC5B,KAAI,OAAO,EAAE,UAAU,WAAY,GAAE,OAAO;;AAE9C,MAAa,uBAAoC,IAAI,SAAQ,YAAW,aAAa,QAAQ,CAAC;;;;ACxE9F,IAAI,iBAAiB;AACrB,MAAM,6BAA6B;AACnC,MAAa,kBAAgC,EAAE;AAU/C,IAAI,sBAAsB;;;;;;AAO1B,MAAa,eAAe,UAAwB,EAAE,KAAiB;CACrE,MAAM,EACJ,UACA,UACA,aAAa,aACb,UAAU,kBACV,gBAAgB,yBAChB,wBAAwB,2BACxB,qBAAqB,sBACrB,kBAAkB,OAClB,oBAAoB,OACpB,kBAAkB,SAChB;AAEJ;AAIA,KAAI,iBAAiB,2BAGnB,SAAQ,KACN,gCAAgC,eAAe,kNAChD;CAGH,MAAM,QAAoB;EACxB,uBAAO,IAAI,KAAK;EAChB,YAAY;EACZ,IAAI,OAAO;AACT,UAAO,MAAM,MAAM;;EAErB;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,qBAAqB;EACrB,eAAe;EACf,cAAc;EACd,uBAAO,IAAI,KAAK;EACjB;AAED,OAAM,sBAAsB,gBAAgB,KAAK,MAAM,GAAG;AAG1D,KAAI,iBAAiB;AACnB,MAAI,oBAAqB,QAAO;AAChC,wBAAsB;AACtB,EAAK,MAAM,MAAM;;AAGnB,eAAc;AAEd,QAAO;;;;;;;;;;;;AC7ET,MAAa,OAAO,OAAmB,KAAa,MAAc,KAAK,KAAK,KAAc;CACxF,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAElC,KAAI,CAAC,MAAO,QAAO;CAEnB,MAAM,SAAS,mBAAmB,OAAO,OAAO,IAAI;AAEpD,KAAI,QAAQ,OAAO,QAAQ,IAAI,CAAE,QAAO,MAAM;AAE9C,KAAI,QAAQ,OAAO,QAAQ,IAAI,EAAE;AAC/B,MAAI,MAAM,gBACR,WAAU,OAAO,KAAK,cAAc,MAAM;AAE5C,SAAO,MAAM;;AAIf,WAAU,OAAO,KAAK,cAAc,QAAQ;;;;;;;;;;;;AClB9C,MAAa,OAAO,OAAmB,KAAa,MAAc,KAAK,KAAK,KAAc;AACxF,QAAO,IAAI,OAAO,KAAK,IAAI,KAAK;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACelC,SAAgB,cACd,OACA,MACA,UAAgC,EAAE,EAGlC,OAAe,KAAK,KAAK,EACnB;CACN,MAAM,UAAU,MAAM,QAAQ,KAAK,GAAG,OAAO,CAAC,KAAK;CACnD,MAAM,UAAU,QAAQ,WAAW;AAEnC,MAAK,MAAM,OAAO,SAAS;EACzB,MAAM,aAAa,MAAM,MAAM,IAAI,IAAI;AAEvC,MAAI,WAGF,KAAI,QACF,YAAW,KAAK;MAEhB,YAAW,KAAK;MAMlB,OAAM,MAAM,IAAI,KAAK,CAAC,UAAU,IAAI,MAAM,UAAU,OAAO,EAAE,CAAC;;;;;;;;;;;;;;;;;;;;;;;ACjCpE,MAAa,eACX,OACA,OAGA,MAAc,KAAK,KAAK,KACZ;CACZ,MAAM,EAAE,KAAK,OAAO,KAAK,UAAU,aAAa,kBAAkB,SAAS;AAE3E,KAAI,UAAU,OAAW,QAAO;AAChC,KAAI,OAAO,KAAM,OAAM,IAAI,MAAM,eAAe;AAChD,KAAI,MAAM,QAAQ,MAAM,WAAW,CAAC,MAAM,MAAM,IAAI,IAAI,CAEtD,QAAO;AAET,KAEE,UAAU,OAAO,MAAM,OACvB,UAAU,OAAO,MAAM,OAAO,MAAM,gBAAgB,OAAO,QAC3D,CAAC,MAAM,MAAM,IAAI,IAAI,CAGrB,QAAO;CAGT,MAAM,MAAM,YAAY,MAAM;CAC9B,MAAM,cAAc,oBAAoB,MAAM;CAE9C,MAAM,YAAY,MAAM,IAAI,MAAM,MAAM;CACxC,MAAM,QAAoB;EACxB;GACE;GACA;GACA,cAAc,IAAI,YAAY,cAAc;GAC7C;EACD;EACA,OAAO,SAAS,WAAW,CAAC,KAAK,GAAG,MAAM,QAAQ,KAAK,GAAG,OAAO;EAClE;AAED,OAAM,MAAM,IAAI,KAAK,MAAM;AAC3B,QAAO;;;;;;;;;;;;;;;;;;ACpCT,IAAa,gBAAb,MAA2B;CACzB,AAAQ;;;;;;;;;;;;;;;;;CAkBR,YAAY,SAAwB;AAClC,OAAK,QAAQ,YAAY,QAAQ;;;;;;;;;;;;;;;;;CAkBnC,IAAI,OAAe;AACjB,SAAO,KAAK,MAAM;;;;;;;;;;;;;;;;;;;;;;;CAwBpB,IAAiB,KAA4B;AAC3C,SAAO,IAAI,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAuC7B,IACE,KACA,OACA,SAKS;AACT,SAAO,YAAY,KAAK,OAAO;GAC7B;GACA;GACA,KAAK,SAAS;GACd,aAAa,SAAS;GACtB,MAAM,SAAS;GAChB,CAAC;;;;;;;;;;;;;;;;;;;CAoBJ,OAAO,KAAsB;AAC3B,SAAO,UAAU,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;;;;;;;CAyBnC,IAAI,KAAsB;AACxB,SAAO,IAAI,KAAK,OAAO,IAAI;;;;;;;;;;;;;;;;;;CAmB7B,QAAc;AAEZ,QAAM,KAAK,MAAM;;;;;;;;;;;;;;;;;;;;;;;;CAyBnB,cAAc,MAAyB,SAAsC;AAC3E,gBAAc,KAAK,OAAO,MAAM,WAAW,EAAE,CAAC"}
1
+ {"version":3,"file":"index.cjs","names":["performance"],"sources":["../../src/cache/clear.ts","../../src/defaults.ts","../../src/resolve-purge-config/validators.ts","../../src/resolve-purge-config/formatters.ts","../../src/resolve-purge-config/warnings.ts","../../src/resolve-purge-config/core.ts","../../src/resolve-purge-config/get.ts","../../src/resolve-purge-config/metric.ts","../../src/resolve-purge-config/sweep.ts","../../src/utils/get-process-memory-limit.ts","../../src/utils/process-monitor.ts","../../src/utils/start-monitor.ts","../../src/sweep/batchUpdateExpiredRatio.ts","../../src/utils/interpolate.ts","../../src/sweep/calculate-optimal-sweep-params.ts","../../src/sweep/select-instance-to-sweep.ts","../../src/cache/delete.ts","../../src/types.ts","../../src/utils/status-from-tags.ts","../../src/cache/validators.ts","../../src/utils/purge-eval.ts","../../src/sweep/sweep-once.ts","../../src/sweep/calculate-optimal-max-expired-ratio.ts","../../src/sweep/update-weight.ts","../../src/sweep/sweep.ts","../../src/cache/create-cache.ts","../../src/cache/get.ts","../../src/cache/has.ts","../../src/cache/invalidate-tag.ts","../../src/cache/set.ts","../../src/index.ts"],"sourcesContent":["import type { CacheState } from \"../types\";\n\n/**\n * Clears all entries from the cache without invoking callbacks.\n *\n * @note The `onDelete` callback is NOT invoked during a clear operation.\n * This is intentional to avoid unnecessary overhead when bulk-removing entries.\n *\n * @param state - The cache state.\n * @returns void\n */\nexport const clear = (state: CacheState): void => {\n state.store.clear();\n};\n","// Time Unit Constants\n// Base temporal units used throughout the caching system.\nconst ONE_SECOND: number = 1000;\nconst ONE_MINUTE: number = 60 * ONE_SECOND;\n\n/**\n * ===================================================================\n * Cache Entry Lifecycle\n * Default TTL and stale window settings for short-lived cache entries.\n * ===================================================================\n */\n\n/**\n * Default Time-To-Live in milliseconds for cache entries.\n * @default 1_800_000 (30 minutes)\n */\nexport const DEFAULT_TTL: number = 30 * ONE_MINUTE;\n\n/**\n * Default stale window in milliseconds after expiration.\n * Allows serving slightly outdated data while fetching fresh data.\n */\nexport const DEFAULT_STALE_WINDOW: number = 0 as const;\n\n/**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, new entries are ignored.\n */\nexport const DEFAULT_MAX_SIZE: number = Infinity;\n\n/**\n * Default maximum memory size in MB the cache can use.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\nexport const DEFAULT_MAX_MEMORY_SIZE: number = Infinity;\n\n/**\n * ===================================================================\n * Sweep & Cleanup Operations\n * Parameters controlling how and when expired entries are removed.\n * ===================================================================\n */\n\n/**\n * Maximum number of keys to process in a single sweep batch.\n * Higher values = more aggressive cleanup, lower latency overhead.\n */\nexport const MAX_KEYS_PER_BATCH: number = 1000;\n\n/**\n * Minimal expired ratio enforced during sweeps.\n * Ensures control sweeps run above {@link EXPIRED_RATIO_MEMORY_THRESHOLD}.\n */\nexport const MINIMAL_EXPIRED_RATIO: number = 0.05;\n\n/**\n * Memory usage threshold (normalized 0–1) triggering control sweeps.\n * At or above this level, sweeping becomes more aggressive.\n */\nexport const EXPIRED_RATIO_MEMORY_THRESHOLD: number = 0.8;\n\n/**\n * Maximum allowed expired ratio when memory usage is low.\n * Upper bound for interpolation with MINIMAL_EXPIRED_RATIO.\n * Recommended range: `0.3 – 0.5` .\n */\nexport const DEFAULT_MAX_EXPIRED_RATIO: number = 0.4;\n\n/**\n * ===================================================================\n * Sweep Intervals & Timing\n * Frequency and time budgets for cleanup operations.\n * ===================================================================\n */\n\n/**\n * Optimal interval in milliseconds between sweeps.\n * Used when system load is minimal and metrics are available.\n */\nexport const OPTIMAL_SWEEP_INTERVAL: number = 2 * ONE_SECOND;\n\n/**\n * Worst-case interval in milliseconds between sweeps.\n * Used when system load is high or metrics unavailable.\n */\nexport const WORST_SWEEP_INTERVAL: number = 200;\n\n/**\n * Maximum time budget in milliseconds for sweep operations.\n * Prevents sweeping from consuming excessive CPU during high load.\n */\nexport const WORST_SWEEP_TIME_BUDGET: number = 40;\n\n/**\n * Optimal time budget in milliseconds for each sweep cycle.\n * Used when performance metrics are not available or unreliable.\n */\nexport const OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE: number = 15;\n\n/**\n * ===================================================================\n * Memory Management\n * Process limits and memory-safe thresholds.\n * ===================================================================\n */\n\n/**\n * Default maximum process memory limit in megabytes.\n * Acts as fallback when environment detection is unavailable.\n * NOTE: Overridable via environment detection at runtime.\n */\nexport const DEFAULT_MAX_PROCESS_MEMORY_MB: number = 1024;\n\n/**\n * ===================================================================\n * System Utilization Weights\n * Balance how memory, CPU, and event-loop pressure influence sweep behavior.\n * Sum of all weights: 10 + 8.5 + 6.5 = 25\n * ===================================================================\n */\n\n/**\n * Weight applied to memory utilization in sweep calculations.\n * Higher weight = memory pressure has more influence on sweep aggressiveness.\n */\nexport const DEFAULT_MEMORY_WEIGHT: number = 10;\n\n/**\n * Weight applied to CPU utilization in sweep calculations.\n * Combined with event-loop weight to balance CPU-related pressure.\n */\nexport const DEFAULT_CPU_WEIGHT: number = 8.5;\n\n/**\n * Weight applied to event-loop utilization in sweep calculations.\n * Complements CPU weight to assess overall processing capacity.\n */\nexport const DEFAULT_LOOP_WEIGHT: number = 6.5;\n\n/**\n * ===================================================================\n * Stale Entry Purging\n * Thresholds and metric selection for stale entry cleanup strategy.\n * ===================================================================\n */\n\n/**\n * Default metric used for resource‑based stale purging\n * when both size and memory limits are available.\n */\nexport const DEFAULT_PURGE_RESOURCE_METRIC: \"higher\" = \"higher\" as const;\n\n/**\n * Fallback behavior for stale purging on GET\n * when no resource limits are defined.\n *\n * In this scenario, threshold-based purging is disabled,\n * so GET operations do NOT purge stale entries.\n */\nexport const DEFAULT_PURGE_STALE_ON_GET_NO_LIMITS: boolean = false;\n\n/**\n * Fallback behavior for stale purging on SWEEP\n * when no resource limits are defined.\n *\n * In this scenario, threshold-based purging is disabled,\n * so SWEEP operations DO purge stale entries to prevent buildup.\n */\nexport const DEFAULT_PURGE_STALE_ON_SWEEP_NO_LIMITS: boolean = true;\n\n/**\n * Default threshold for purging stale entries on get operations (backend with limits).\n * Stale entries are purged when resource usage exceeds 80%.\n *\n * Note: This is used when limits are configured.\n * When no limits are defined, purgeStaleOnGet defaults to false.\n */\nexport const DEFAULT_PURGE_STALE_ON_GET_THRESHOLD: number = 0.8;\n\n/**\n * Default threshold for purging stale entries during sweep operations (backend with limits).\n * Stale entries are purged when resource usage exceeds 50%.\n *\n * Note: This is used when limits are configured.\n * When no limits are defined, purgeStaleOnSweep defaults to true.\n */\nexport const DEFAULT_PURGE_STALE_ON_SWEEP_THRESHOLD: number = 0.5;\n","/**\n * Validates if a numeric value is a valid positive limit.\n * @internal\n */\nexport const isValidLimit = (value: number): boolean => Number.isFinite(value) && value > 0;\n\n/**\n * Checks if the required limits are configured for the given metric.\n * @internal\n */\nexport const checkRequiredLimits = (\n metric: \"size\" | \"memory\" | \"higher\" | \"fixed\",\n limitStatus: {\n hasSizeLimit: boolean;\n hasMemoryLimit: boolean;\n },\n): boolean => {\n if (metric === \"fixed\") return false;\n if (metric === \"size\") return limitStatus.hasSizeLimit;\n if (metric === \"memory\") return limitStatus.hasMemoryLimit;\n if (metric === \"higher\") return limitStatus.hasSizeLimit && limitStatus.hasMemoryLimit;\n return false;\n};\n","import type { PurgeMode } from \"../types\";\n\n/**\n * Gets the requirement text for a metric when limits are missing.\n * @internal\n */\nexport const getLimitRequirementText = (metric: \"size\" | \"memory\" | \"higher\" | \"fixed\"): string => {\n if (metric === \"fixed\") return \"Numeric thresholds are not supported (metric is 'fixed')\";\n if (metric === \"size\") return \"'maxSize' must be a valid positive number\";\n if (metric === \"memory\") return \"'maxMemorySize' must be a valid positive number\";\n if (metric === \"higher\")\n return \"both 'maxSize' and 'maxMemorySize' must be valid positive numbers\";\n return \"required configuration\";\n};\n\n/**\n * Formats a purge mode value for display.\n * @internal\n */\nexport const formatPurgeValue = (mode: PurgeMode): string => {\n if (typeof mode === \"number\") return `threshold ${(mode * 100).toFixed(0)}%`;\n return `${mode}`;\n};\n","import type { PurgeMode } from \"../types\";\n\nimport { formatPurgeValue } from \"./formatters\";\nimport { getLimitRequirementText } from \"./formatters\";\n\n/**\n * Warns user about invalid purge configuration.\n * Only called when user-provided threshold value is invalid.\n *\n * @internal\n */\nexport const warnInvalidPurgeMode = (\n config: {\n /** User-provided purge mode value (threshold or boolean). */\n mode: PurgeMode;\n /** Selected purge resource metric. */\n metric: \"size\" | \"memory\" | \"higher\" | \"fixed\";\n /** Operation that triggered validation: purgeStaleOnGet or purgeStaleOnSweep. */\n operation: \"purgeStaleOnGet\" | \"purgeStaleOnSweep\";\n /** Default fallback value when user value is invalid. */\n fallback: PurgeMode;\n },\n invalidConditions: {\n /** Numeric value outside valid threshold range (0 < value ≤ 1). */\n isOutOfRange: boolean;\n /** Numeric threshold used with unsupported metric='fixed'. */\n isIncompatibleWithMetric: boolean;\n /** Numeric threshold without required configuration limits for metric. */\n isMissingLimits: boolean;\n },\n): void => {\n // Threshold range validation: must be 0 < value <= 1\n if (invalidConditions.isOutOfRange) {\n console.warn(\n `[Cache] ${config.operation}: Set to ${formatPurgeValue(config.mode)} with purgeResourceMetric '${config.metric}'.\\n` +\n ` ⚠ Invalid: Numeric threshold must be between 0 (exclusive) and 1 (inclusive).\\n` +\n ` ✓ Fallback: ${config.operation} = ${formatPurgeValue(config.fallback)}, purgeResourceMetric = '${config.metric}'`,\n );\n return;\n }\n\n // Metric compatibility: 'fixed' metric doesn't support threshold values\n if (invalidConditions.isIncompatibleWithMetric) {\n console.warn(\n `[Cache] ${config.operation}: Set to ${formatPurgeValue(config.mode)} with purgeResourceMetric '${config.metric}'.\\n` +\n ` ⚠ Not supported: Numeric thresholds don't work with purgeResourceMetric 'fixed'.\\n` +\n ` ✓ Fallback: ${config.operation} = ${formatPurgeValue(config.fallback)}, purgeResourceMetric = '${config.metric}'`,\n );\n return;\n }\n\n // Configuration validation: metric requires matching limits\n if (invalidConditions.isMissingLimits) {\n const requirement = getLimitRequirementText(config.metric);\n console.warn(\n `[Cache] ${config.operation}: Set to ${formatPurgeValue(config.mode)} with purgeResourceMetric '${config.metric}'.\\n` +\n ` ⚠ Not supported: ${requirement}\\n` +\n ` ✓ Fallback: ${config.operation} = ${formatPurgeValue(config.fallback)}, purgeResourceMetric = '${config.metric}'`,\n );\n }\n};\n","import type { PurgeMode } from \"../types\";\n\nimport { checkRequiredLimits, isValidLimit } from \"./validators\";\nimport { warnInvalidPurgeMode } from \"./warnings\";\n\n/**\n * Generic purge mode resolver that handles both get and sweep operations.\n *\n * Resolves valid user values or returns appropriate defaults based on:\n * - Available configuration limits (maxSize, maxMemorySize)\n * - Purge resource metric support (size, memory, higher, fixed)\n * - User-provided threshold validity (0 < value ≤ 1)\n *\n * Behavior:\n * - Boolean values (true/false): always valid, returns as-is\n * - Numeric thresholds (0-1): validated against 3 conditions:\n * 1. Range validation: must be 0 < value ≤ 1\n * 2. Metric compatibility: metric must support thresholds (not 'fixed')\n * 3. Configuration requirement: metric's required limits must be set\n * - Invalid numerics: logs warning and returns configuration default\n *\n * Defaults:\n * - With required limits: threshold-based (0.80 for get, 0.5 for sweep)\n * - Without required limits: boolean (false for get, true for sweep)\n *\n * @internal\n */\nexport const resolvePurgeMode = (\n limits: {\n maxSize: number;\n maxMemorySize: number;\n },\n config: {\n purgeResourceMetric: \"size\" | \"memory\" | \"higher\" | \"fixed\";\n operation: \"purgeStaleOnGet\" | \"purgeStaleOnSweep\";\n },\n defaults: {\n withLimits: number;\n withoutLimits: boolean;\n },\n userValue?: PurgeMode,\n): PurgeMode => {\n const hasSizeLimit = isValidLimit(limits.maxSize);\n const hasMemoryLimit = isValidLimit(limits.maxMemorySize);\n const hasRequiredLimits = checkRequiredLimits(config.purgeResourceMetric, {\n hasSizeLimit,\n hasMemoryLimit,\n });\n\n const fallback = hasRequiredLimits ? defaults.withLimits : defaults.withoutLimits;\n\n if (userValue !== undefined) {\n // Compute validity conditions once\n const isNumeric = typeof userValue === \"number\";\n const isOutOfRange = isNumeric && (userValue <= 0 || userValue > 1);\n const isIncompatibleWithMetric = isNumeric && config.purgeResourceMetric === \"fixed\";\n const isMissingLimits = isNumeric && !hasRequiredLimits;\n\n // Only warn if any condition is invalid\n if (isOutOfRange || isIncompatibleWithMetric || isMissingLimits) {\n warnInvalidPurgeMode(\n {\n mode: userValue,\n metric: config.purgeResourceMetric,\n operation: config.operation,\n fallback,\n },\n {\n isOutOfRange,\n isIncompatibleWithMetric,\n isMissingLimits,\n },\n );\n return fallback;\n }\n\n return userValue;\n }\n\n return fallback;\n};\n","import {\n DEFAULT_PURGE_STALE_ON_GET_NO_LIMITS,\n DEFAULT_PURGE_STALE_ON_GET_THRESHOLD,\n} from \"../defaults\";\nimport type { PurgeMode } from \"../types\";\n\nimport { resolvePurgeMode } from \"./core\";\n\n/**\n * Resolves the purgeStaleOnGet mode based on available configuration.\n *\n * Returns:\n * - User value if valid (boolean always valid; numeric must satisfy all conditions)\n * - Configuration default if user value is invalid\n *\n * Validation for numeric user values (0-1 thresholds):\n * - Must be in range: 0 < value ≤ 1\n * - Metric must support thresholds: not 'fixed'\n * - Metric must have required limits: 'size' needs maxSize, 'memory' needs maxMemorySize, 'higher' needs both\n *\n * Configuration defaults:\n * - With limits matching metric: 0.80 (80% purge threshold)\n * - Without matching limits: false (preserve stale entries)\n *\n * @param config - Configuration with limits, purgeResourceMetric, and optional userValue\n * @returns Valid purgeStaleOnGet value (boolean or threshold 0-1)\n *\n * @internal\n */\nexport const resolvePurgeStaleOnGet = (config: {\n limits: {\n maxSize: number;\n maxMemorySize: number;\n };\n purgeResourceMetric: \"size\" | \"memory\" | \"higher\" | \"fixed\";\n userValue?: PurgeMode;\n}): PurgeMode =>\n resolvePurgeMode(\n config.limits,\n {\n purgeResourceMetric: config.purgeResourceMetric,\n operation: \"purgeStaleOnGet\",\n },\n {\n withLimits: DEFAULT_PURGE_STALE_ON_GET_THRESHOLD,\n withoutLimits: DEFAULT_PURGE_STALE_ON_GET_NO_LIMITS,\n },\n config.userValue,\n );\n","import { isValidLimit } from \"./validators\";\n\n/**\n * Resolves the purge resource metric based on available limits and environment.\n *\n * - Browser: returns \"size\" if maxSize is valid, otherwise \"fixed\"\n * - Backend with both maxSize and maxMemorySize: returns \"higher\"\n * - Backend with only maxMemorySize: returns \"memory\"\n * - Backend with only maxSize: returns \"size\"\n * - Backend with no limits: returns \"fixed\"\n *\n * @param config - Configuration object with maxSize and maxMemorySize limits\n * @returns The appropriate purge resource metric for this configuration\n *\n * @internal\n */\nexport const resolvePurgeResourceMetric = (config: {\n maxSize: number;\n maxMemorySize: number;\n}): \"size\" | \"memory\" | \"higher\" | \"fixed\" => {\n const limitStatus = {\n hasSizeLimit: isValidLimit(config.maxSize),\n hasMemoryLimit: isValidLimit(config.maxMemorySize),\n };\n\n if (__BROWSER__) {\n return limitStatus.hasSizeLimit ? \"size\" : \"fixed\";\n }\n\n if (limitStatus.hasSizeLimit && limitStatus.hasMemoryLimit) return \"higher\";\n if (limitStatus.hasMemoryLimit) return \"memory\";\n if (limitStatus.hasSizeLimit) return \"size\";\n\n return \"fixed\";\n};\n","import { DEFAULT_PURGE_STALE_ON_SWEEP_THRESHOLD } from \"../defaults\";\nimport type { PurgeMode } from \"../types\";\n\nimport { resolvePurgeMode } from \"./core\";\n\n/**\n * Resolves the purgeStaleOnSweep mode based on available configuration.\n *\n * Returns:\n * - User value if valid (boolean always valid; numeric must satisfy all conditions)\n * - Configuration default if user value is invalid\n *\n * Validation for numeric user values (0-1 thresholds):\n * - Must be in range: 0 < value ≤ 1\n * - Metric must support thresholds: not 'fixed'\n * - Metric must have required limits: 'size' needs maxSize, 'memory' needs maxMemorySize, 'higher' needs both\n *\n * Configuration defaults:\n * - With limits matching metric: 0.5 (50% purge threshold)\n * - Without matching limits: true (always purge to prevent unbounded growth)\n *\n * @param config - Configuration with limits, purgeResourceMetric, and optional userValue\n * @returns Valid purgeStaleOnSweep value (boolean or threshold 0-1)\n *\n * @internal\n */\nexport const resolvePurgeStaleOnSweep = (config: {\n limits: {\n maxSize: number;\n maxMemorySize: number;\n };\n purgeResourceMetric: \"size\" | \"memory\" | \"higher\" | \"fixed\";\n userValue?: PurgeMode;\n}): PurgeMode =>\n resolvePurgeMode(\n config.limits,\n {\n purgeResourceMetric: config.purgeResourceMetric,\n operation: \"purgeStaleOnSweep\",\n },\n {\n withLimits: DEFAULT_PURGE_STALE_ON_SWEEP_THRESHOLD,\n withoutLimits: true,\n },\n config.userValue,\n );\n","import fs from \"fs\";\nimport v8 from \"v8\";\n\n/**\n * Reads a number from a file.\n * @param path File path to read the number from.\n * @returns The number read from the file, or null if reading fails.\n */\nfunction readNumber(path: string): number | null {\n try {\n const raw = fs.readFileSync(path, \"utf8\").trim();\n const n = Number(raw);\n return Number.isFinite(n) ? n : null;\n } catch {\n return null;\n }\n}\n\n/**\n * Gets the memory limit imposed by cgroups, if any.\n * @return The memory limit in bytes, or null if no limit is found.\n */\nfunction getCgroupLimit(): number | null {\n // cgroup v2\n const v2 = readNumber(\"/sys/fs/cgroup/memory.max\");\n if (v2 !== null) return v2;\n\n // cgroup v1\n const v1 = readNumber(\"/sys/fs/cgroup/memory/memory.limit_in_bytes\");\n if (v1 !== null) return v1;\n\n return null;\n}\n\n/**\n * Gets the effective memory limit for the current process, considering both V8 heap limits and cgroup limits.\n * @returns The effective memory limit in bytes.\n */\nexport function getProcessMemoryLimit(): number {\n const heapLimit = v8.getHeapStatistics().heap_size_limit;\n const cgroupLimit = getCgroupLimit();\n\n if (cgroupLimit && cgroupLimit > 0 && cgroupLimit < Infinity) {\n return Math.min(heapLimit, cgroupLimit);\n }\n\n return heapLimit;\n}\n","import { performance, type EventLoopUtilization } from \"perf_hooks\";\n\n/**\n * Creates a performance monitor that periodically samples memory usage,\n * CPU usage, and event loop utilization for the current Node.js process.\n *\n * The monitor runs on a configurable interval and optionally invokes a\n * callback with the collected metrics on each cycle. It also exposes\n * methods to start and stop monitoring, retrieve the latest metrics,\n * and update configuration dynamically.\n *\n * @param options Configuration options for the monitor, including sampling\n * interval, maximum thresholds for normalization, and an optional callback.\n * @returns An API object that allows controlling the monitor lifecycle.\n */\nexport function createMonitorObserver(\n options?: Partial<CreateMonitorObserverOptions>,\n): ReturnCreateMonitor {\n let intervalId: NodeJS.Timeout | null = null;\n\n let lastMetrics: PerformanceMetrics | null = null;\n\n let prevHrtime = process.hrtime.bigint();\n\n let prevMem = process.memoryUsage();\n let prevCpu = process.cpuUsage();\n let prevLoop = performance.eventLoopUtilization();\n let lastCollectedAt = Date.now();\n\n const config = {\n interval: options?.interval ?? 500,\n // options.maxMemory is expected in MB; store bytes internally\n maxMemory: (options?.maxMemory ?? 512) * 1024 * 1024,\n };\n\n function start(): void {\n if (intervalId) return; // already running\n\n intervalId = setInterval(() => {\n try {\n const now = Date.now();\n\n const metrics = collectMetrics({\n prevCpu,\n prevHrtime,\n prevMem,\n prevLoop,\n maxMemory: config.maxMemory,\n collectedAtMs: now,\n previousCollectedAtMs: lastCollectedAt,\n interval: config.interval,\n });\n\n lastMetrics = metrics;\n options?.callback?.(metrics);\n\n prevCpu = metrics.cpu.total;\n prevLoop = metrics.loop.total;\n prevMem = metrics.memory.total;\n\n prevHrtime = process.hrtime.bigint();\n lastCollectedAt = now;\n } catch (e: unknown) {\n stop();\n throw new Error(\"MonitorObserver: Not available\", { cause: e });\n }\n }, config.interval);\n\n if (typeof intervalId.unref === \"function\") {\n intervalId.unref();\n }\n }\n\n function stop(): void {\n if (intervalId) {\n clearInterval(intervalId);\n intervalId = null;\n }\n }\n\n function getMetrics(): PerformanceMetrics | null {\n if (lastMetrics) {\n return lastMetrics;\n }\n return null;\n }\n\n function updateConfig(newConfig: Partial<CreateMonitorObserverOptions>): void {\n if (newConfig.maxMemory !== undefined) {\n // convert MB -> bytes\n config.maxMemory = newConfig.maxMemory * 1024 * 1024;\n }\n\n if (newConfig.interval !== undefined) {\n config.interval = newConfig.interval;\n\n // restart if active to apply new interval\n if (intervalId) {\n stop();\n start();\n }\n }\n }\n\n return {\n start,\n stop,\n getMetrics,\n updateConfig,\n };\n}\n\n/**\n * Collects and normalizes performance metrics for the current process,\n * including memory usage, CPU usage, and event loop utilization.\n *\n * CPU and event loop metrics are computed as deltas relative to previously\n * recorded values. All metrics are normalized into a utilization between 0 and 1\n * based on the configured maximum thresholds.\n *\n * @param props Previous metric snapshots and normalization limits.\n * @returns A structured object containing normalized performance metrics.\n */\nexport function collectMetrics(props: {\n prevMem: NodeJS.MemoryUsage;\n prevCpu: NodeJS.CpuUsage;\n prevHrtime: bigint;\n prevLoop: EventLoopUtilization;\n maxMemory: number; // bytes\n collectedAtMs: number;\n previousCollectedAtMs: number;\n interval: number;\n}): PerformanceMetrics {\n const nowHrtime = process.hrtime.bigint();\n\n const elapsedNs = Number(nowHrtime - props.prevHrtime);\n const elapsedMs = elapsedNs / 1e6;\n const actualElapsed = props.collectedAtMs - props.previousCollectedAtMs;\n\n const mem = process.memoryUsage();\n const deltaMem: NodeJS.MemoryUsage = {\n rss: mem.rss - props.prevMem.rss,\n heapTotal: mem.heapTotal - props.prevMem.heapTotal,\n heapUsed: mem.heapUsed - props.prevMem.heapUsed,\n external: mem.external - props.prevMem.external,\n arrayBuffers: mem.arrayBuffers - props.prevMem.arrayBuffers,\n };\n const memRatio = Math.min(1, mem.rss / props.maxMemory);\n\n const cpuDelta = process.cpuUsage(props.prevCpu);\n const cpuMs = (cpuDelta.system + cpuDelta.user) / 1e3;\n const cpuRatio = cpuMs / elapsedMs;\n\n const loop = performance.eventLoopUtilization(props.prevLoop);\n\n return {\n cpu: {\n // deltaMs: cpuMs, // remove to avoid confusion with different unit type\n utilization: cpuRatio,\n delta: cpuDelta,\n total: process.cpuUsage(),\n },\n\n loop: {\n utilization: loop.utilization,\n delta: loop,\n total: performance.eventLoopUtilization(),\n },\n\n memory: {\n utilization: memRatio,\n delta: deltaMem,\n total: mem,\n },\n\n collectedAt: props.collectedAtMs,\n previousCollectedAt: props.previousCollectedAtMs,\n interval: props.interval,\n actualElapsed,\n };\n}\n\n// -----------------------------------------------------------------\n\n/**\n * Represents a metric extended with a normalized utilization between 0 and 1.\n *\n * The utilization indicates how close the metric is to its configured maximum\n * threshold, where 0 means minimal usage and 1 means the limit has been reached.\n *\n * @typeParam T The underlying metric type being normalized.\n */\nexport type NormalizedMetric<T> = T & {\n /** Normalized value between 0 and 1 */\n utilization: number;\n};\n\n/**\n * PerformanceMetrics describes the actual shape returned by collectMetrics.\n * All metric groups include raw `delta` and `total` objects plus a normalized utilization.\n */\nexport interface PerformanceMetrics {\n memory: NormalizedMetric<{\n delta: NodeJS.MemoryUsage;\n total: NodeJS.MemoryUsage;\n }>;\n\n cpu: NormalizedMetric<{\n delta: NodeJS.CpuUsage;\n total: NodeJS.CpuUsage;\n }>;\n\n loop: NormalizedMetric<{\n delta: EventLoopUtilization;\n total: EventLoopUtilization;\n }>;\n\n /** Timestamp in milliseconds when this metric was collected */\n collectedAt: number;\n\n /** Timestamp in milliseconds of the previous metric collection */\n previousCollectedAt: number;\n\n /** Interval in milliseconds at which the monitor is running */\n interval: number;\n\n /** Actual elapsed time in milliseconds since the last collection */\n actualElapsed: number;\n}\n\n/**\n * Options for createMonitorObserver.\n */\nexport interface CreateMonitorObserverOptions {\n /** Interval between samples in ms. Default: 500 */\n interval?: number;\n\n /** Maximum RSS memory in megabytes (MB) used for normalization. */\n maxMemory?: number;\n\n /** Optional callback invoked on each metrics sample. */\n callback?: (metrics: PerformanceMetrics) => void;\n}\n\n/**\n * Public API returned by `createMonitorObserver`.\n *\n * Provides methods to start and stop monitoring, retrieve the latest metrics,\n * and update the monitor configuration at runtime.\n */\nexport interface ReturnCreateMonitor {\n /** Stops the monitoring interval */\n stop: () => void;\n\n /** Starts the monitoring interval */\n start: () => void;\n\n /** Returns the last collected metrics or null if none have been collected yet */\n getMetrics: () => PerformanceMetrics | null;\n\n /** Allows updating the monitor configuration on the fly */\n updateConfig: (newConfig: Partial<CreateMonitorObserverOptions>) => void;\n}\n","import { DEFAULT_MAX_PROCESS_MEMORY_MB, WORST_SWEEP_INTERVAL } from \"../defaults\";\n\nimport { getProcessMemoryLimit } from \"./get-process-memory-limit\";\nimport {\n createMonitorObserver,\n type PerformanceMetrics,\n type ReturnCreateMonitor,\n} from \"./process-monitor\";\n\nexport let _monitorInstance: ReturnCreateMonitor | null = null;\n\n/** Latest collected metrics from the monitor */\nexport let _metrics: PerformanceMetrics | null;\n\n/** Maximum memory limit for the monitor (in MB) */\nexport let maxMemoryLimit: number = DEFAULT_MAX_PROCESS_MEMORY_MB;\n\n/** Use 90% of the effective limit */\nexport const SAFE_MEMORY_LIMIT_RATIO = 0.9;\n\nexport function startMonitor(): void {\n if (__BROWSER__) {\n // Ignore monitor in browser environments\n return;\n }\n\n if (!_monitorInstance) {\n try {\n const processMemoryLimit = getProcessMemoryLimit();\n\n if (processMemoryLimit && processMemoryLimit > 0) {\n maxMemoryLimit = (processMemoryLimit / 1024 / 1024) * SAFE_MEMORY_LIMIT_RATIO;\n }\n } catch {\n // TODO: proper logger\n // Ignore errors and use default\n // console.log(\"error getProcessMemoryLimit:\", e);\n }\n\n _monitorInstance = createMonitorObserver({\n callback(metrics) {\n _metrics = metrics;\n },\n interval: WORST_SWEEP_INTERVAL,\n maxMemory: maxMemoryLimit, // 1 GB\n });\n\n _monitorInstance.start();\n }\n}\n","import { _instancesCache } from \"../cache/create-cache\";\n\n/**\n * Updates the expired ratio for each cache instance based on the collected ratios.\n * @param currentExpiredRatios - An array of arrays containing expired ratios for each cache instance.\n * @internal\n */\nexport function _batchUpdateExpiredRatio(currentExpiredRatios: number[][]): void {\n for (const inst of _instancesCache) {\n const ratios = currentExpiredRatios[inst._instanceIndexState];\n if (ratios && ratios.length > 0) {\n const avgRatio = ratios.reduce((sum, val) => sum + val, 0) / ratios.length;\n\n const alpha = 0.6; // NOTE: this must be alway higher than 0.5 to prioritize recent avgRatio\n inst._expiredRatio = inst._expiredRatio * (1 - alpha) + avgRatio * alpha;\n }\n }\n}\n","/**\n * Interpolates a value between two numeric ranges.\n *\n * Maps `value` from [fromStart, fromEnd] to [toStart, toEnd].\n * Works with inverted ranges, negative values, and any numeric input.\n */\nexport function interpolate({\n value,\n fromStart,\n fromEnd,\n toStart,\n toEnd,\n}: {\n value: number;\n fromStart: number;\n fromEnd: number;\n toStart: number;\n toEnd: number;\n}): number {\n // Explicit and predictable: avoid division by zero.\n if (fromStart === fromEnd) return toStart;\n\n const t = (value - fromStart) / (fromEnd - fromStart);\n return toStart + t * (toEnd - toStart);\n}\n","import {\n DEFAULT_CPU_WEIGHT,\n DEFAULT_LOOP_WEIGHT,\n DEFAULT_MEMORY_WEIGHT,\n OPTIMAL_SWEEP_INTERVAL,\n WORST_SWEEP_INTERVAL,\n WORST_SWEEP_TIME_BUDGET,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport type { PerformanceMetrics } from \"../utils/process-monitor\";\n\n/**\n * Weights for calculating the weighted utilization ratio.\n * Each weight determines how strongly each metric influences the final ratio.\n */\nexport interface UtilizationWeights {\n /** Weight applied to memory utilization (non-inverted). Default: 1 */\n memory?: number;\n\n /** Weight applied to CPU utilization (inverted). Default: 1 */\n cpu?: number;\n\n /** Weight applied to event loop utilization (inverted). Default: 1 */\n loop?: number;\n}\n\n/**\n * Represents the calculated optimal sweep parameters based on system metrics.\n */\nexport interface OptimalSweepParams {\n /** The optimal interval in milliseconds between sweep operations. */\n sweepIntervalMs: number;\n\n /** The optimal maximum time budget in milliseconds for a sweep cycle. */\n sweepTimeBudgetMs: number;\n}\n\n/**\n * Options for customizing the sweep parameter calculation.\n */\ninterface CalculateOptimalSweepParamsOptions {\n /** System performance metrics to base the calculations on. */\n metrics: PerformanceMetrics;\n\n /** Optional custom weights for each utilization metric. */\n weights?: UtilizationWeights;\n\n /** Interval (ms) used when system load is minimal. */\n optimalSweepIntervalMs?: number;\n\n /** Interval (ms) used when system load is maximal. */\n worstSweepIntervalMs?: number;\n\n /** Maximum sweep time budget (ms) under worst-case load. */\n worstSweepTimeBudgetMs?: number;\n}\n\n/**\n * Calculates adaptive sweep parameters based on real-time system utilization.\n *\n * Memory utilization is used as-is: higher memory usage → more conservative sweeps.\n * CPU and event loop utilization are inverted: lower usage → more conservative sweeps.\n *\n * This inversion ensures:\n * - When CPU and loop are *free*, sweeping becomes more aggressive (worst-case behavior).\n * - When CPU and loop are *busy*, sweeping becomes more conservative (optimal behavior).\n *\n * The final ratio is a weighted average of the three metrics, clamped to [0, 1].\n * This ratio is then used to interpolate between optimal and worst-case sweep settings.\n *\n * @param options - Optional configuration for weights and sweep bounds.\n * @returns Interpolated sweep interval, time budget, and the ratio used.\n */\nexport const calculateOptimalSweepParams = (\n options: CalculateOptimalSweepParamsOptions,\n): OptimalSweepParams => {\n const {\n metrics,\n weights = {},\n optimalSweepIntervalMs = OPTIMAL_SWEEP_INTERVAL,\n worstSweepIntervalMs = WORST_SWEEP_INTERVAL,\n worstSweepTimeBudgetMs = WORST_SWEEP_TIME_BUDGET,\n } = options;\n\n // Resolve metric weights (default = 1)\n const memoryWeight = weights.memory ?? DEFAULT_MEMORY_WEIGHT;\n const cpuWeight = weights.cpu ?? DEFAULT_CPU_WEIGHT;\n const loopWeight = weights.loop ?? DEFAULT_LOOP_WEIGHT;\n\n // Memory utilization is used directly (0–1)\n const memoryUtilization = metrics?.memory.utilization ?? 0;\n\n // Raw CPU and loop utilization (0–1)\n const cpuUtilizationRaw = metrics?.cpu.utilization ?? 0;\n const loopUtilizationRaw = metrics?.loop.utilization ?? 0;\n\n // Invert CPU and loop utilization:\n // - Low CPU/loop usage → high inverted value → pushes toward worst-case behavior\n // - High CPU/loop usage → low inverted value → pushes toward optimal behavior\n const cpuUtilization = 1 - cpuUtilizationRaw;\n const loopUtilization = 1 - loopUtilizationRaw;\n\n // Weighted average of all metrics\n const weightedSum =\n memoryUtilization * memoryWeight + cpuUtilization * cpuWeight + loopUtilization * loopWeight;\n\n const totalWeight = memoryWeight + cpuWeight + loopWeight;\n\n // Final utilization ratio clamped to [0, 1]\n const ratio = Math.min(1, Math.max(0, weightedSum / totalWeight));\n\n // Interpolate sweep interval based on the ratio\n const sweepIntervalMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: optimalSweepIntervalMs,\n toEnd: worstSweepIntervalMs,\n });\n\n // Interpolate sweep time budget based on the ratio\n const sweepTimeBudgetMs = interpolate({\n value: ratio,\n fromStart: 0,\n fromEnd: 1,\n toStart: 0,\n toEnd: worstSweepTimeBudgetMs,\n });\n\n return {\n sweepIntervalMs,\n sweepTimeBudgetMs,\n };\n};\n","import { _instancesCache } from \"../cache/create-cache\";\nimport type { CacheState } from \"../types\";\n\n/**\n * Selects a cache instance to sweep based on sweep weights or round‑robin order.\n *\n * Two selection modes are supported:\n * - **Round‑robin mode**: If `totalSweepWeight` ≤ 0, instances are selected\n * deterministically in sequence using `batchSweep`. Once all instances\n * have been processed, returns `null`.\n * - **Weighted mode**: If sweep weights are available, performs a probabilistic\n * selection. Each instance’s `_sweepWeight` contributes proportionally to its\n * chance of being chosen.\n *\n * This function depends on `_updateWeightSweep` to maintain accurate sweep weights.\n *\n * @param totalSweepWeight - Sum of all sweep weights across instances.\n * @param batchSweep - Current batch index used for round‑robin selection.\n * @returns The selected `CacheState` instance, `null` if no instance remains,\n * or `undefined` if the cache is empty.\n */\nexport function _selectInstanceToSweep({\n totalSweepWeight,\n batchSweep,\n}: {\n totalSweepWeight: number;\n batchSweep: number;\n}): CacheState | null | undefined {\n // Default selection: initialize with the first instance in the cache list.\n // This acts as a fallback in case no weighted selection occurs.\n let instanceToSweep: CacheState | null | undefined = _instancesCache[0];\n\n if (totalSweepWeight <= 0) {\n // Case 1: No sweep weight assigned (all instances skipped or empty).\n // → Perform a deterministic round‑robin minimal sweep across all instances.\n // Each batch iteration selects the next instance in order.\n if (batchSweep > _instancesCache.length) {\n // If all instances have been processed in this cycle, no instance to sweep.\n instanceToSweep = null;\n }\n instanceToSweep = _instancesCache[batchSweep - 1] as CacheState;\n } else {\n // Case 2: Sweep weights are available.\n // → Perform a probabilistic selection based on relative sweep weights.\n // A random threshold is drawn in [0, totalSweepWeight].\n let threshold = Math.random() * totalSweepWeight;\n\n // Iterate through instances, subtracting each instance’s weight.\n // The first instance that reduces the threshold to ≤ 0 is selected.\n // This ensures that instances with higher weights have proportionally\n // higher probability of being chosen for sweeping.\n for (const inst of _instancesCache) {\n threshold -= inst._sweepWeight;\n if (threshold <= 0) {\n instanceToSweep = inst;\n break;\n }\n }\n }\n\n return instanceToSweep;\n}\n","import type { CacheState } from \"../types\";\n\nexport const enum DELETE_REASON {\n MANUAL = \"manual\",\n EXPIRED = \"expired\",\n STALE = \"stale\",\n}\n\n/**\n * Deletes a key from the cache.\n * @param state - The cache state.\n * @param key - The key.\n * @returns A boolean indicating whether the key was successfully deleted.\n */\nexport const deleteKey = (\n state: CacheState,\n key: string,\n reason: DELETE_REASON = DELETE_REASON.MANUAL,\n): boolean => {\n const onDelete = state.onDelete;\n const onExpire = state.onExpire;\n\n if (!onDelete && !onExpire) {\n return state.store.delete(key);\n }\n\n const entry = state.store.get(key);\n if (!entry) return false;\n\n state.store.delete(key);\n state.onDelete?.(key, entry[1], reason);\n if (reason !== DELETE_REASON.MANUAL) {\n state.onExpire?.(key, entry[1], reason);\n }\n\n return true;\n};\n","import type { DELETE_REASON } from \"./cache/delete\";\n\n/**\n * Base configuration shared between CacheOptions and CacheState.\n */\nexport interface CacheConfigBase {\n /**\n * Callback invoked when an entry expires naturally (not manually deleted).\n * @param key - The expired key.\n * @param value - The value associated with the expired key.\n * @param reason - The reason for expiration: 'expired' (fully expired) or 'stale' (stale window expired).\n */\n onExpire?: (\n key: string,\n value: unknown,\n reason: Exclude<DELETE_REASON, DELETE_REASON.MANUAL>,\n ) => void;\n\n /**\n * Callback invoked when a key is deleted, either manually or due to expiration.\n * @param key - The deleted key.\n * @param value - The value of the deleted key.\n * @param reason - The reason for deletion ('manual', 'expired', or 'stale').\n */\n onDelete?: (key: string, value: unknown, reason: DELETE_REASON) => void;\n\n /**\n * Default TTL (Time-To-Live) in milliseconds for entries without explicit TTL.\n * @default 1_800_000 (30 minutes)\n */\n defaultTtl: number;\n\n /**\n * Default stale window in milliseconds for entries without explicit `staleWindow`.\n *\n * Defines how long an entry can be served as stale after expiration.\n * The window is relative to each entry's expiration moment, whether from\n * explicit `ttl` or the cache's `defaultTtl`.\n *\n * @default 0 (no stale window)\n */\n defaultStaleWindow: number;\n\n /**\n * Maximum number of entries the cache can hold.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\n maxSize: number;\n\n /**\n * Maximum memory size in MB the cache can use.\n * Beyond this limit, new entries are ignored.\n * @default Infinite (unlimited)\n */\n maxMemorySize: number;\n\n /**\n * Controls stale entry purging behavior on `get()` operations.\n *\n * Possible values:\n * - `true` → purge stale entries immediately after read.\n * - `false` → retain stale entries after read.\n * - `number (0-1)` → purge when `resourceUsage ≥ threshold` (uses `purgeResourceMetric`).\n *\n * Numeric threshold validation:\n * - Must be 0 < value ≤ 1 (boolean fallback if invalid range)\n * - Requires purgeResourceMetric to support thresholds (not 'fixed')\n * - Requires matching configuration limits for the metric:\n * * 'size' metric requires maxSize\n * * 'memory' metric requires maxMemorySize\n * * 'higher' metric requires both maxSize and maxMemorySize\n * - Invalid numeric values fallback to default: 0.80 (with limits) or false (without)\n *\n * Environment notes:\n * - Backend: `\"memory\"` and `\"higher\"` metrics available; frontend: only `\"size\"`.\n * - Can be overridden per-read via `get(key, { purgeStale })`.\n *\n * Defaults:\n * - With matching limits → `0.80` (80% resource usage).\n * - Without matching limits → `false`.\n */\n purgeStaleOnGet: PurgeMode;\n\n /**\n * Controls stale entry purging behavior during sweep operations.\n *\n * Possible values:\n * - `true` → purge stale entries during sweeps.\n * - `false` → retain stale entries during sweeps.\n * - `number (0-1)` → purge when `resourceUsage ≥ threshold` (uses `purgeResourceMetric`).\n *\n * Numeric threshold validation:\n * - Must be 0 < value ≤ 1 (boolean fallback if invalid range)\n * - Requires purgeResourceMetric to support thresholds (not 'fixed')\n * - Requires matching configuration limits for the metric:\n * * 'size' metric requires maxSize\n * * 'memory' metric requires maxMemorySize\n * * 'higher' metric requires both maxSize and maxMemorySize\n * - Invalid numeric values fallback to default: 0.5 (with limits) or true (without)\n *\n * Prevents stale entry accumulation when enabled. Without limits, defaults to `true`\n * to prevent unbounded growth.\n *\n * Environment notes:\n * - Backend: `\"memory\"` and `\"higher\"` metrics available; frontend: only `\"size\"`.\n *\n * Defaults:\n * - With matching limits → `0.5` (50% resource usage).\n * - Without matching limits → `true` (prevent unbounded accumulation).\n */\n purgeStaleOnSweep: PurgeMode;\n\n /**\n * Metric used to evaluate resource usage for threshold-based stale purging.\n *\n * Applies when `purgeStaleOnGet` or `purgeStaleOnSweep` are numeric (0-1).\n *\n * Metric options:\n * - `\"size\"` → normalized entry count (`current / maxSize`).\n * - `\"memory\"` → normalized RAM (`currentMB / maxMemorySize`).\n * - `\"higher\"` → max of both metrics (recommended for dual limits).\n * - `\"fixed\"` → disable threshold purging; only bool values apply.\n *\n * Environment support:\n * - Backend: all metrics available.\n * - Frontend: only `\"size\"`; numeric thresholds fallback to `\"fixed\"`.\n *\n * Auto-selection (if not specified):\n * - Backend: `\"higher\"` (both limits) → `\"memory\"` → `\"size\"` → `\"fixed\"`.\n * - Frontend: `\"size\"` (if valid) → `\"fixed\"`.\n *\n * @default Depends on environment and valid limits.\n */\n purgeResourceMetric?: \"memory\" | \"size\" | \"higher\" | \"fixed\";\n\n /**\n * Auto-start sweep process on cache initialization.\n *\n * @internal\n * @default true\n */\n _autoStartSweep: boolean;\n\n /**\n * @internal Maximum allowed ratio of expired entries before aggressive sweep.\n */\n _maxAllowExpiredRatio: number;\n}\n\n/**\n * Purge mode: boolean for immediate/skip, or 0-1 for threshold-based purging.\n */\nexport type PurgeMode = boolean | number;\n\n/**\n * Public cache configuration (all fields optional).\n */\nexport type CacheOptions = Partial<CacheConfigBase>;\n\n/**\n * Options for tag invalidation. Extensible for forward-compatibility.\n */\nexport interface InvalidateTagOptions {\n /**\n * If true, mark entries as stale instead of fully expired.\n * They remain accessible via stale window if configured.\n */\n asStale?: boolean;\n\n [key: string]: unknown;\n}\n\n/**\n * Cache entry lifecycle timestamps (tuple format).\n *\n * - [0] `createdAt` → Entry creation timestamp (absolute).\n * - [1] `expiresAt` → Expiration timestamp (absolute).\n * - [2] `staleExpiresAt` → Stale window expiration (absolute).\n */\nexport type EntryTimestamp = [\n /** Absolute timestamp when entry was created. */\n number,\n /** Absolute timestamp when entry expires. */\n number,\n /** Absolute timestamp when stale window expires. */\n number,\n];\n\n/**\n * Cache entry tuple structure: `[timestamps, value, tags]`.\n *\n * Tuple indices:\n * - [0] `EntryTimestamp` → Creation, expiration, and stale timestamps.\n * - [1] `value` → The cached data.\n * - [2] `tags` → Associated tags for group invalidation, or null.\n */\nexport type CacheEntry = [\n EntryTimestamp,\n /** Cached value (any type). */\n unknown,\n (\n /**\n * Tags for group invalidation and categorization.\n * Null if no tags are set.\n */\n string[] | null\n ),\n];\n\n/**\n * Entry status: fresh, stale, or expired.\n */\nexport enum ENTRY_STATUS {\n /** Valid and within TTL. */\n FRESH = \"fresh\",\n /** Expired but within stale window; still served. */\n STALE = \"stale\",\n /** Beyond stale window; not served. */\n EXPIRED = \"expired\",\n}\n\n/**\n * Metadata returned from `get()` with `includeMetadata: true`.\n * Provides complete entry state including timing, status, and tags.\n */\nexport interface EntryMetadata<T = unknown> {\n /** The cached value. */\n data: T;\n /** Absolute timestamp (ms) when entry expires. */\n expirationTime: number;\n /** Absolute timestamp (ms) when stale window ends. */\n staleWindowExpiration: number;\n /** Current entry status. */\n status: ENTRY_STATUS;\n /** Associated tags for group invalidation, or null. */\n tags: string[] | null;\n}\n\n/**\n * @internal Runtime state for cache instances.\n */\nexport interface CacheState extends CacheConfigBase {\n /** Key-value store for all entries. */\n store: Map<string, CacheEntry>;\n /** Current entry count. */\n size: number;\n /** Iterator for incremental sweep operations. */\n _sweepIter: IterableIterator<[string, CacheEntry]> | null;\n /** Instance index for multi-instance sweep scheduling. */\n _instanceIndexState: number;\n /** Average ratio of expired entries in this instance. */\n _expiredRatio: number;\n /** Relative weight for sweep operation priority. */\n _sweepWeight: number;\n /**\n * Tag invalidation timestamps.\n * Each tag maps to `[expiredAt, staleAt]` (0 = never set).\n * Used to determine if entries with this tag are invalidated.\n */\n _tags: Map<string, [number, number]>;\n}\n\n/**\n * Options for `get()` without metadata (default).\n * Returns only the cached value.\n */\nexport interface GetOptionsWithoutMetadata {\n /**\n * If false (or omitted), returns value only without metadata.\n * @default false\n */\n includeMetadata?: false;\n\n /**\n * Controls stale entry purging on this read.\n *\n * - `true` → purge immediately after return.\n * - `false` → keep stale entries.\n * - `number (0-1)` → purge at resource usage threshold.\n *\n * Overrides global `purgeStaleOnGet` setting.\n */\n purgeStale?: PurgeMode;\n}\n\n/**\n * Options for `get()` with metadata.\n * Returns value and complete entry state.\n */\nexport interface GetOptionsWithMetadata {\n /**\n * If true, returns `EntryMetadata<T>` object with value, timing, and tags.\n */\n includeMetadata: true;\n\n /**\n * Controls stale entry purging on this read.\n *\n * - `true` → purge immediately after return.\n * - `false` → keep stale entries.\n * - `number (0-1)` → purge at resource usage threshold.\n *\n * Overrides global `purgeStaleOnGet` setting.\n */\n purgeStale?: PurgeMode;\n}\n\n/**\n * Options for `set()` method.\n * Controls TTL, stale window, and tagging per entry.\n */\nexport interface SetOptions {\n /**\n * Time-To-Live in milliseconds.\n * Determines fresh period before expiration.\n *\n * Special values:\n * - `0` | `Infinity` → entry never expires\n *\n * Falls back to cache's `defaultTtl` if omitted.\n */\n ttl?: number;\n\n /**\n * Stale window duration in milliseconds.\n *\n * Determines how long entry serves stale after expiration.\n * Falls back to cache's `defaultStaleWindow` if omitted.\n */\n staleWindow?: number;\n\n /**\n * One or more tags for group-based invalidation.\n *\n * Tags enable batch invalidation via `invalidateTag()`.\n * Invalidating ANY tag on an entry invalidates the whole entry.\n *\n * Falls back to cache's default if omitted.\n */\n tags?: string | string[];\n}\n\n/**\n * TTL cache public interface.\n * Implemented by `LocalTtlCache` class.\n */\nexport interface LocalTtlCacheInterface {\n /**\n * Current number of entries (may include expired entries pending cleanup).\n */\n readonly size: number;\n\n /**\n * Retrieves value from cache.\n * Returns fresh, stale, or undefined (expired or not found).\n *\n * @overload `get<T>(key)` → `T | undefined` (no metadata)\n * @overload `get<T>(key, { includeMetadata: true })` → `EntryMetadata<T> | undefined` (with metadata)\n */\n get<T = unknown>(key: string): T | undefined;\n get<T = unknown>(key: string, options: GetOptionsWithMetadata): EntryMetadata<T> | undefined;\n get<T = unknown>(key: string, options: GetOptionsWithoutMetadata): T | undefined;\n\n /**\n * Sets or replaces a cache entry.\n * @returns true if set/updated, false if rejected (limits/invalid).\n */\n set(key: string, value: unknown, options?: SetOptions): boolean;\n\n /**\n * Deletes a specific key from cache.\n * @returns true if deleted, false if not found.\n */\n delete(key: string): boolean;\n\n /**\n * Checks if key exists (fresh or stale).\n * @returns true if valid, false if not found or fully expired.\n */\n has(key: string): boolean;\n\n /**\n * Removes all entries from cache.\n * Does NOT trigger `onDelete` callbacks (optimization).\n */\n clear(): void;\n\n /**\n * Marks entries with given tags as expired (or stale).\n * Invalidating ANY tag on an entry invalidates it.\n */\n invalidateTag(tags: string | string[], options?: InvalidateTagOptions): void;\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\n\n/**\n * Computes the derived status of a cache entry based on its associated tags.\n *\n * Tags may impose stricter expiration or stale rules on the entry. Only tags\n * created at or after the entry's creation timestamp are considered relevant.\n *\n * Resolution rules:\n * - If any applicable tag marks the entry as expired, the status becomes `EXPIRED`.\n * - Otherwise, if any applicable tag marks it as stale, the status becomes `STALE`.\n * - If no tag imposes stricter rules, the entry remains `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry whose status is being evaluated.\n * @returns A tuple containing:\n * - The final {@link ENTRY_STATUS} imposed by tags.\n * - The earliest timestamp at which a tag marked the entry as stale\n * (or 0 if no tag imposed a stale rule).\n */\nexport function _statusFromTags(state: CacheState, entry: CacheEntry): [ENTRY_STATUS, number] {\n const entryCreatedAt = entry[0][0];\n\n // Tracks the earliest point in time when any tag marked this entry as stale.\n // Initialized to Infinity so that comparisons always pick the minimum.\n let earliestTagStaleInvalidation = Infinity;\n\n // Default assumption: entry is fresh unless tags override.\n let status = ENTRY_STATUS.FRESH;\n\n const tags = entry[2];\n if (tags) {\n for (const tag of tags) {\n const ts = state._tags.get(tag);\n if (!ts) continue;\n\n // Each tag provides two timestamps:\n // - tagExpiredAt: when the tag forces expiration\n // - tagStaleSinceAt: when the tag forces stale status\n const [tagExpiredAt, tagStaleSinceAt] = ts;\n\n // A tag can only override if it was created after the entry itself.\n if (tagExpiredAt >= entryCreatedAt) {\n status = ENTRY_STATUS.EXPIRED;\n break; // Expired overrides everything, no need to check further.\n }\n\n if (tagStaleSinceAt >= entryCreatedAt) {\n // Keep track of the earliest stale timestamp across all tags.\n if (tagStaleSinceAt < earliestTagStaleInvalidation) {\n earliestTagStaleInvalidation = tagStaleSinceAt;\n }\n status = ENTRY_STATUS.STALE;\n }\n }\n }\n\n // If no tag imposed stale, return 0 for the timestamp.\n return [status, status === ENTRY_STATUS.STALE ? earliestTagStaleInvalidation : 0];\n}\n","import { ENTRY_STATUS, type CacheEntry, type CacheState } from \"../types\";\nimport { _statusFromTags } from \"../utils/status-from-tags\";\n\n/**\n * Computes the final derived status of a cache entry by combining:\n *\n * - The entry's own expiration timestamps (TTL and stale TTL).\n * - Any stricter expiration or stale rules imposed by its associated tags.\n *\n * Precedence rules:\n * - `EXPIRED` overrides everything.\n * - `STALE` overrides `FRESH`.\n * - If neither the entry nor its tags impose stricter rules, the entry is `FRESH`.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry being evaluated.\n * @returns The final {@link ENTRY_STATUS} for the entry.\n */\nexport function computeEntryStatus(\n state: CacheState,\n entry: CacheEntry,\n\n /** @internal */\n now: number,\n): ENTRY_STATUS {\n const [__createdAt, expiresAt, staleExpiresAt] = entry[0];\n\n // 1. Status derived from tags\n const [tagStatus, earliestTagStaleInvalidation] = _statusFromTags(state, entry);\n if (tagStatus === ENTRY_STATUS.EXPIRED) return ENTRY_STATUS.EXPIRED;\n const windowStale = staleExpiresAt - expiresAt;\n if (\n tagStatus === ENTRY_STATUS.STALE &&\n staleExpiresAt > 0 &&\n now < earliestTagStaleInvalidation + windowStale &&\n now <= staleExpiresAt\n ) {\n // A tag can mark the entry as stale only if the entry itself supports a stale window.\n // The tag's stale invalidation time is extended by the entry's stale window duration.\n // If \"now\" is still within that extended window, the entry is considered stale.\n return ENTRY_STATUS.STALE;\n }\n\n // 2. Status derived from entry timestamps\n if (now < expiresAt) {\n return ENTRY_STATUS.FRESH;\n }\n if (staleExpiresAt > 0 && now < staleExpiresAt) {\n return ENTRY_STATUS.STALE;\n }\n\n return ENTRY_STATUS.EXPIRED;\n}\n\n// ---------------------------------------------------------------------------\n// Entry status wrappers (semantic helpers built on top of computeEntryStatus)\n// ---------------------------------------------------------------------------\n/**\n * Determines whether a cache entry is fresh.\n *\n * A fresh entry is one whose final derived status is `FRESH`, meaning:\n * - It has not expired according to its own timestamps, and\n * - No associated tag imposes a stricter stale or expired rule.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is fresh.\n */\nexport const isFresh = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's fresh only if that status is FRESH.\n return entry === ENTRY_STATUS.FRESH;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.FRESH;\n};\n/**\n * Determines whether a cache entry is stale.\n *\n * A stale entry is one whose final derived status is `STALE`, meaning:\n * - It has passed its TTL but is still within its stale window, or\n * - A tag imposes a stale rule that applies to this entry.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is stale.\n */\nexport const isStale = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n\n /** @internal */\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's stale only if that status is STALE.\n return entry === ENTRY_STATUS.STALE;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.STALE;\n};\n\n/**\n * Determines whether a cache entry is expired.\n *\n * An expired entry is one whose final derived status is `EXPIRED`, meaning:\n * - It has exceeded both its TTL and stale TTL, or\n * - A tag imposes an expiration rule that applies to this entry.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS}.\n * Passing a pre-computed status avoids recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry or pre-computed status being evaluated.\n * @param now - The current timestamp.\n * @returns True if the entry is expired.\n */\nexport const isExpired = (\n state: CacheState,\n entry: CacheEntry | ENTRY_STATUS,\n\n /** @internal */\n now: number,\n): boolean => {\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's expired only if that status is EXPIRED.\n return entry === ENTRY_STATUS.EXPIRED;\n }\n\n return computeEntryStatus(state, entry, now) === ENTRY_STATUS.EXPIRED;\n};\n\n/**\n * Determines whether a cache entry is valid.\n *\n * A valid entry is one whose final derived status is either:\n * - `FRESH`, or\n * - `STALE` (still within its stale window).\n *\n * Expired entries are considered invalid.\n *\n * `entry` can be either a {@link CacheEntry} or a pre-computed {@link ENTRY_STATUS},\n * or undefined/null if the entry was not found. Passing a pre-computed status avoids\n * recalculating the entry status.\n *\n * @param state - The cache state containing tag metadata.\n * @param entry - The cache entry, pre-computed status, or undefined/null if not found.\n * @param now - The current timestamp (defaults to {@link Date.now}).\n * @returns True if the entry exists and is fresh or stale.\n */\nexport const isValid = (\n state: CacheState,\n entry?: CacheEntry | ENTRY_STATUS | null,\n\n /** @internal */\n now: number = Date.now(),\n): boolean => {\n if (!entry) return false;\n if (typeof entry === \"string\") {\n // If entry is already a pre-computed status (from tags), it's valid if it's FRESH or STALE.\n return entry === ENTRY_STATUS.FRESH || entry === ENTRY_STATUS.STALE;\n }\n\n const status = computeEntryStatus(state, entry, now);\n return status === ENTRY_STATUS.FRESH || status === ENTRY_STATUS.STALE;\n};\n","import {\n DEFAULT_PURGE_STALE_ON_GET_NO_LIMITS,\n DEFAULT_PURGE_STALE_ON_SWEEP_NO_LIMITS,\n} from \"../defaults\";\nimport type { CacheState, PurgeMode } from \"../types\";\n\nimport { _metrics } from \"./start-monitor\";\n\n/**\n * Computes memory utilization as a normalized 0–1 value.\n *\n * In backend environments where metrics are available, returns the actual\n * memory utilization from the monitor. In browser environments or when\n * metrics are unavailable, returns 0.\n *\n * @returns Memory utilization in range [0, 1]\n *\n * @internal\n */\nconst getMemoryUtilization = (): number => {\n if (__BROWSER__ || !_metrics) return 0;\n return _metrics.memory?.utilization ?? 0;\n};\n\n/**\n * Computes size utilization as a normalized 0–1 value.\n *\n * If maxSize is finite, returns `currentSize / maxSize`. Otherwise returns 0.\n *\n * @param state - The cache state\n * @returns Size utilization in range [0, 1]\n *\n * @internal\n */\nconst getSizeUtilization = (state: CacheState): number => {\n if (!Number.isFinite(state.maxSize) || state.maxSize <= 0 || state.size <= 0) return 0;\n return Math.min(1, state.size / state.maxSize);\n};\n\n/**\n * Computes a 0–1 resource usage metric based on the configured purge metric.\n *\n * - `\"size\"`: Returns size utilization only.\n * - `\"memory\"`: Returns memory utilization (backend only; returns 0 in browser).\n * - `\"higher\"`: Returns the maximum of memory and size utilization.\n *\n * The result is always clamped to [0, 1].\n *\n * @param state - The cache state\n * @returns Resource usage in range [0, 1]\n *\n * @internal\n */\nexport const computeResourceUsage = (state: CacheState): number | null => {\n const metric = state.purgeResourceMetric;\n if (!metric || metric === \"fixed\") return null;\n\n if (metric === \"size\") {\n return getSizeUtilization(state);\n }\n\n if (metric === \"memory\") {\n return getMemoryUtilization();\n }\n\n if (metric === \"higher\") {\n return Math.min(1, Math.max(getMemoryUtilization(), getSizeUtilization(state)));\n }\n\n return null;\n};\n\n/**\n * Determines whether stale entries should be purged based on the purge mode and current resource usage.\n *\n * @param mode - The purge mode setting\n * - `false` → never purge\n * - `true` → always purge\n * - `number (0–1)` → purge when `resourceUsage >= threshold`\n * @param state - The cache state\n * @returns True if stale entries should be purged, false otherwise\n *\n * @internal\n */\nexport const shouldPurge = (\n mode: PurgeMode,\n state: CacheState,\n purgeContext: \"get\" | \"sweep\",\n): boolean => {\n if (mode === false) return false;\n if (mode === true) return true;\n\n const userThreshold = Number(mode);\n const defaultPurge =\n purgeContext === \"sweep\"\n ? DEFAULT_PURGE_STALE_ON_SWEEP_NO_LIMITS\n : DEFAULT_PURGE_STALE_ON_GET_NO_LIMITS;\n\n if (Number.isNaN(userThreshold)) return defaultPurge;\n\n const usage = computeResourceUsage(state);\n if (!usage) {\n return defaultPurge;\n }\n return usage >= Math.max(0, Math.min(1, userThreshold));\n};\n","import { DELETE_REASON, deleteKey } from \"../cache/delete\";\nimport { computeEntryStatus, isExpired, isStale } from \"../cache/validators\";\nimport { MAX_KEYS_PER_BATCH } from \"../defaults\";\nimport { type CacheState } from \"../types\";\nimport { shouldPurge } from \"../utils/purge-eval\";\n\n/**\n * Performs a single sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n * @param _maxKeysPerBatch - Maximum number of keys to process in this sweep.\n * @returns An object containing statistics about the sweep operation.\n */\nexport function _sweepOnce(\n state: CacheState,\n\n /**\n * Maximum number of keys to process in this sweep.\n * @default 1000\n */\n _maxKeysPerBatch: number = MAX_KEYS_PER_BATCH,\n): { processed: number; expiredCount: number; staleCount: number; ratio: number } {\n if (!state._sweepIter) {\n state._sweepIter = state.store.entries();\n }\n\n let processed = 0;\n let expiredCount = 0;\n let staleCount = 0;\n\n for (let i = 0; i < _maxKeysPerBatch; i++) {\n const next = state._sweepIter.next();\n\n if (next.done) {\n state._sweepIter = state.store.entries();\n break;\n }\n\n processed += 1;\n const [key, entry] = next.value;\n\n const now = Date.now();\n\n const status = computeEntryStatus(state, entry, now);\n if (isExpired(state, status, now)) {\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n expiredCount += 1;\n } else if (isStale(state, status, now)) {\n staleCount += 1;\n\n if (shouldPurge(state.purgeStaleOnSweep, state, \"sweep\")) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n }\n }\n\n const expiredStaleCount = shouldPurge(state.purgeStaleOnSweep, state, \"sweep\") ? staleCount : 0;\n return {\n processed,\n expiredCount,\n staleCount,\n ratio: processed > 0 ? (expiredCount + expiredStaleCount) / processed : 0,\n };\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n EXPIRED_RATIO_MEMORY_THRESHOLD,\n MINIMAL_EXPIRED_RATIO,\n} from \"../defaults\";\nimport { interpolate } from \"../utils/interpolate\";\nimport { _metrics, SAFE_MEMORY_LIMIT_RATIO } from \"../utils/start-monitor\";\n\n/**\n * Calculates the optimal maximum expired ratio based on current memory utilization.\n *\n * This function interpolates between `maxAllowExpiredRatio` and `MINIMAL_EXPIRED_RATIO`\n * depending on the memory usage reported by `_metrics`. At low memory usage (0%),\n * the optimal ratio equals `maxAllowExpiredRatio`. As memory usage approaches or exceeds\n * 80% of the memory limit, the optimal ratio decreases toward `MINIMAL_EXPIRED_RATIO`.\n *\n * @param maxAllowExpiredRatio - The maximum allowed expired ratio at minimal memory usage.\n * Defaults to `DEFAULT_MAX_EXPIRED_RATIO`.\n * @returns A normalized value between 0 and 1 representing the optimal expired ratio.\n */\nexport function calculateOptimalMaxExpiredRatio(\n maxAllowExpiredRatio: number = DEFAULT_MAX_EXPIRED_RATIO,\n): number {\n const EFFECTIVE_MEMORY_THRESHOLD = EXPIRED_RATIO_MEMORY_THRESHOLD / SAFE_MEMORY_LIMIT_RATIO;\n\n const optimalExpiredRatio = interpolate({\n value: _metrics?.memory.utilization ?? 0,\n\n fromStart: 0, // baseline: memory usage ratio at 0%\n fromEnd: EFFECTIVE_MEMORY_THRESHOLD, // threshold: memory usage ratio at 80% of safe limit\n\n toStart: maxAllowExpiredRatio, // allowed ratio at minimal memory usage\n toEnd: MINIMAL_EXPIRED_RATIO, // allowed ratio at high memory usage (≥80%)\n });\n\n // At 0% memory usage, the optimalExpiredRatio equals maxAllowExpiredRatio.\n // At or above 80% memory usage, the optimalExpiredRatio approaches or falls below MINIMAL_EXPIRED_RATIO.\n\n return Math.min(1, Math.max(0, optimalExpiredRatio));\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport { MINIMAL_EXPIRED_RATIO } from \"../defaults\";\n\nimport { calculateOptimalMaxExpiredRatio } from \"./calculate-optimal-max-expired-ratio\";\n\n/**\n * Updates the sweep weight (`_sweepWeight`) for each cache instance.\n *\n * The sweep weight determines the probability that an instance will be selected\n * for a cleanup (sweep) process. It is calculated based on the store size and\n * the ratio of expired keys.\n *\n * This function complements (`_selectInstanceToSweep`), which is responsible\n * for selecting the correct instance based on the weights assigned here.\n *\n * ---\n *\n * ### Sweep systems:\n * 1. **Normal sweep**\n * - Runs whenever the percentage of expired keys exceeds the allowed threshold\n * calculated by `calculateOptimalMaxExpiredRatio`.\n * - It is the main cleanup mechanism and is applied proportionally to the\n * store size and the expired‑key ratio.\n *\n * 2. **Memory‑conditioned sweep (control)**\n * - Works exactly like the normal sweep, except it may run even when it\n * normally wouldn’t.\n * - Only activates under **high memory pressure**.\n * - Serves as an additional control mechanism to adjust weights, keep the\n * system updated, and help prevent memory overflows.\n *\n * 3. **Round‑robin sweep (minimal control)**\n * - Always runs, even if the expired ratio is low or memory usage does not\n * require it.\n * - Processes a very small number of keys per instance, much smaller than\n * the normal sweep.\n * - Its main purpose is to ensure that all instances receive at least a\n * periodic weight update and minimal expired‑key control.\n *\n * ---\n * #### Important notes:\n * - A minimum `MINIMAL_EXPIRED_RATIO` (e.g., 5%) is assumed to ensure that\n * control sweeps can always run under high‑memory scenarios.\n * - Even with a minimum ratio, the normal sweep and the memory‑conditioned sweep\n * may **skip execution** if memory usage allows it and the expired ratio is\n * below the optimal maximum.\n * - The round‑robin sweep is never skipped: it always runs with a very small,\n * almost imperceptible cost.\n *\n * @returns The total accumulated sweep weight across all cache instances.\n */\nexport function _updateWeightSweep(): number {\n let totalSweepWeight = 0;\n\n for (const instCache of _instancesCache) {\n if (instCache.store.size <= 0) {\n // Empty instance → no sweep weight needed, skip sweep for this instance.\n instCache._sweepWeight = 0;\n continue;\n }\n\n // Ensure a minimum expired ratio to allow control sweeps.\n // If the real ratio is higher than the minimum, use the real ratio.\n let expiredRatio = MINIMAL_EXPIRED_RATIO;\n if (instCache._expiredRatio > MINIMAL_EXPIRED_RATIO) {\n expiredRatio = instCache._expiredRatio;\n }\n\n if (!__BROWSER__) {\n // In non‑browser environments, compute an optimal maximum allowed ratio.\n const optimalMaxExpiredRatio = calculateOptimalMaxExpiredRatio(\n instCache._maxAllowExpiredRatio,\n );\n\n if (expiredRatio <= optimalMaxExpiredRatio) {\n // If memory usage allows it and the expired ratio is low,\n // this sweep can be skipped. The reduced round‑robin sweep will still run.\n instCache._sweepWeight = 0;\n continue;\n }\n }\n\n // Normal sweep: weight proportional to store size and expired ratio.\n instCache._sweepWeight = instCache.store.size * expiredRatio;\n totalSweepWeight += instCache._sweepWeight;\n }\n\n return totalSweepWeight;\n}\n","import { _instancesCache } from \"../cache/create-cache\";\nimport {\n MAX_KEYS_PER_BATCH,\n OPTIMAL_SWEEP_INTERVAL,\n OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE,\n} from \"../defaults\";\nimport type { CacheState } from \"../types\";\nimport { _metrics, _monitorInstance, startMonitor } from \"../utils/start-monitor\";\n\nimport { _batchUpdateExpiredRatio } from \"./batchUpdateExpiredRatio\";\nimport { calculateOptimalSweepParams } from \"./calculate-optimal-sweep-params\";\nimport { _selectInstanceToSweep } from \"./select-instance-to-sweep\";\nimport { _sweepOnce } from \"./sweep-once\";\nimport { _updateWeightSweep } from \"./update-weight\";\n\nlet _isSweepActive = false;\nlet _pendingSweepTimeout: NodeJS.Timeout | null = null;\n\nexport function startSweep(state: CacheState): void {\n if (_isSweepActive) return;\n _isSweepActive = true;\n startMonitor();\n void sweep(state); // schedule next sweep\n}\n\nexport function stopSweep(): void {\n if (_pendingSweepTimeout) {\n clearTimeout(_pendingSweepTimeout);\n _pendingSweepTimeout = null;\n }\n _monitorInstance?.stop();\n _isSweepActive = false;\n}\n\n/**\n * Performs a sweep operation on the cache to remove expired and optionally stale entries.\n * Uses a linear scan with a saved pointer to resume from the last processed key.\n * @param state - The cache state.\n */\nexport const sweep = async (\n state: CacheState,\n\n /** @internal */\n utilities: SweepUtilities = {},\n): Promise<void> => {\n const {\n schedule = defaultSchedule,\n yieldFn = defaultYieldFn,\n now = Date.now(),\n runOnlyOne = false,\n } = utilities;\n const startTime = now;\n\n let sweepIntervalMs = OPTIMAL_SWEEP_INTERVAL;\n let sweepTimeBudgetMs = OPTIMAL_SWEEP_TIME_BUDGET_IF_NOTE_METRICS_AVAILABLE;\n if (!__BROWSER__ && _metrics) {\n ({ sweepIntervalMs, sweepTimeBudgetMs } = calculateOptimalSweepParams({ metrics: _metrics }));\n }\n\n const totalSweepWeight = _updateWeightSweep();\n const currentExpiredRatios: number[][] = [];\n\n // Reduce the maximum number of keys per batch only when no instance weights are available\n // and the sweep is running in minimal round‑robin control mode. In this case, execute the\n // smallest possible sweep (equivalent to one batch, but divided across instances).\n const maxKeysPerBatch =\n totalSweepWeight <= 0 ? MAX_KEYS_PER_BATCH / _instancesCache.length : MAX_KEYS_PER_BATCH;\n\n let batchSweep = 0;\n while (true) {\n batchSweep += 1;\n\n const instanceToSweep = _selectInstanceToSweep({ batchSweep, totalSweepWeight });\n if (!instanceToSweep) {\n // No instance to sweep\n break;\n }\n\n const { ratio } = _sweepOnce(instanceToSweep, maxKeysPerBatch);\n // Initialize or update `currentExpiredRatios` array for current ratios\n (currentExpiredRatios[instanceToSweep._instanceIndexState] ??= []).push(ratio);\n\n if (Date.now() - startTime > sweepTimeBudgetMs) {\n break;\n }\n\n await yieldFn();\n }\n\n _batchUpdateExpiredRatio(currentExpiredRatios);\n\n // Schedule next sweep\n if (!runOnlyOne) {\n schedule(() => void sweep(state, utilities), sweepIntervalMs);\n }\n};\n\n// Default utilities for scheduling and yielding --------------------------------\nconst defaultSchedule: scheduleType = (fn, ms) => {\n _pendingSweepTimeout = setTimeout(fn, ms);\n if (typeof _pendingSweepTimeout.unref === \"function\") _pendingSweepTimeout.unref();\n};\nexport const defaultYieldFn: yieldFnType = () => new Promise(resolve => setImmediate(resolve));\n\n// Types for internal utilities -----------------------------------------------\ntype scheduleType = (fn: () => void, ms: number) => void;\ntype yieldFnType = () => Promise<void>;\ninterface SweepUtilities {\n /**\n * Default scheduling function using setTimeout.\n * This can be overridden for testing.\n * @internal\n */\n schedule?: scheduleType;\n\n /**\n * Default yielding function using setImmediate.\n * This can be overridden for testing.\n * @internal\n */\n yieldFn?: yieldFnType;\n\n /** Current timestamp for testing purposes. */\n now?: number;\n\n /**\n * If true, only run one sweep cycle.\n * @internal\n */\n runOnlyOne?: boolean;\n}\n","import {\n DEFAULT_MAX_EXPIRED_RATIO,\n DEFAULT_MAX_MEMORY_SIZE,\n DEFAULT_MAX_SIZE,\n DEFAULT_STALE_WINDOW,\n DEFAULT_TTL,\n} from \"../defaults\";\nimport { resolvePurgeStaleOnGet } from \"../resolve-purge-config/get\";\nimport { resolvePurgeResourceMetric } from \"../resolve-purge-config/metric\";\nimport { resolvePurgeStaleOnSweep } from \"../resolve-purge-config/sweep\";\nimport { startSweep } from \"../sweep/sweep\";\nimport type { CacheOptions, CacheState } from \"../types\";\n\nlet _instanceCount = 0;\nconst INSTANCE_WARNING_THRESHOLD = 99;\nexport const _instancesCache: CacheState[] = [];\n\n/**\n * Resets the instance count for testing purposes.\n * This function is intended for use in tests to avoid instance limits.\n */\nexport const _resetInstanceCount = (): void => {\n _instanceCount = 0;\n};\n\n/**\n * Creates the initial state for the TTL cache.\n * @param options - Configuration options for the cache.\n * @returns The initial cache state.\n */\nexport const createCache = (options: CacheOptions = {}): CacheState => {\n const {\n onExpire,\n onDelete,\n defaultTtl = DEFAULT_TTL,\n maxSize = DEFAULT_MAX_SIZE,\n maxMemorySize = DEFAULT_MAX_MEMORY_SIZE,\n _maxAllowExpiredRatio = DEFAULT_MAX_EXPIRED_RATIO,\n defaultStaleWindow = DEFAULT_STALE_WINDOW,\n purgeStaleOnGet,\n purgeStaleOnSweep,\n purgeResourceMetric,\n _autoStartSweep = true,\n } = options;\n\n _instanceCount++;\n\n // NEXT: warn if internal parameters are touch by user\n\n if (_instanceCount > INSTANCE_WARNING_THRESHOLD) {\n // NEXT: Use a proper logging mechanism\n // NEXT: Create documentation for this\n console.warn(\n `Too many instances detected (${_instanceCount}). This may indicate a configuration issue; consider minimizing instance creation or grouping keys by expected expiration ranges. See the documentation: https://github.com/neezco/cache/docs/getting-started.md`,\n );\n }\n\n const resolvedPurgeResourceMetric =\n purgeResourceMetric ??\n resolvePurgeResourceMetric({\n maxSize,\n maxMemorySize,\n });\n\n const resolvedPurgeStaleOnGet = resolvePurgeStaleOnGet({\n limits: {\n maxSize,\n maxMemorySize,\n },\n purgeResourceMetric: resolvedPurgeResourceMetric,\n userValue: purgeStaleOnGet,\n });\n const resolvedPurgeStaleOnSweep = resolvePurgeStaleOnSweep({\n limits: {\n maxSize,\n maxMemorySize,\n },\n purgeResourceMetric: resolvedPurgeResourceMetric,\n userValue: purgeStaleOnSweep,\n });\n\n const state: CacheState = {\n store: new Map(),\n _sweepIter: null,\n get size() {\n return state.store.size;\n },\n onExpire,\n onDelete,\n maxSize,\n maxMemorySize,\n defaultTtl,\n defaultStaleWindow,\n purgeStaleOnGet: resolvedPurgeStaleOnGet,\n purgeStaleOnSweep: resolvedPurgeStaleOnSweep,\n purgeResourceMetric: resolvedPurgeResourceMetric,\n _maxAllowExpiredRatio,\n _autoStartSweep,\n _instanceIndexState: -1,\n _expiredRatio: 0,\n _sweepWeight: 0,\n _tags: new Map(),\n };\n\n state._instanceIndexState = _instancesCache.push(state) - 1;\n\n startSweep(state);\n\n return state;\n};\n","import type { CacheEntry, CacheState, ENTRY_STATUS, PurgeMode } from \"../types\";\nimport { shouldPurge } from \"../utils/purge-eval\";\n\nimport { DELETE_REASON, deleteKey } from \"./delete\";\nimport { computeEntryStatus, isFresh, isStale } from \"./validators\";\n\n/**\n * Internal function that retrieves a value from the cache with its status information.\n * Returns a tuple containing the entry status and the complete cache entry.\n *\n * @param state - The cache state.\n * @param key - The key to retrieve.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns A tuple of [status, entry] if the entry is valid, or [null, undefined] if not found or expired.\n *\n * @internal\n */\nexport const getWithStatus = (\n state: CacheState,\n key: string,\n purgeMode?: PurgeMode,\n now: number = Date.now(),\n): [ENTRY_STATUS | null, CacheEntry | undefined] => {\n const entry = state.store.get(key);\n\n if (!entry) return [null, undefined];\n\n const status = computeEntryStatus(state, entry, now);\n\n if (isFresh(state, status, now)) return [status, entry];\n\n if (isStale(state, status, now)) {\n const purgeModeToUse = purgeMode ?? state.purgeStaleOnGet;\n if (shouldPurge(purgeModeToUse, state, \"get\")) {\n deleteKey(state, key, DELETE_REASON.STALE);\n }\n return [status, entry];\n }\n\n // If it expired, always delete it\n deleteKey(state, key, DELETE_REASON.EXPIRED);\n\n return [status, undefined];\n};\n\n/**\n * Retrieves a value from the cache if the entry is valid.\n * @param state - The cache state.\n * @param key - The key to retrieve.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns The cached value if valid, undefined otherwise.\n *\n * @internal\n */\nexport const get = (\n state: CacheState,\n key: string,\n purgeMode?: PurgeMode,\n now: number = Date.now(),\n): unknown => {\n const [, entry] = getWithStatus(state, key, purgeMode, now);\n return entry ? entry[1] : undefined;\n};\n","import type { CacheState } from \"../types\";\n\nimport { get } from \"./get\";\n\n/**\n * Checks if a key exists in the cache and is not expired.\n * @param state - The cache state.\n * @param key - The key to check.\n * @param now - Optional timestamp override (defaults to Date.now()).\n * @returns True if the key exists and is valid, false otherwise.\n */\nexport const has = (state: CacheState, key: string, now: number = Date.now()): boolean => {\n return get(state, key, now) !== undefined;\n};\n","import type { CacheState, InvalidateTagOptions } from \"../types\";\n\n/**\n * Invalidates one or more tags so that entries associated with them\n * become expired or stale from this moment onward.\n *\n * Semantics:\n * - Each tag maintains two timestamps in `state._tags`:\n * [expiredAt, staleSinceAt].\n * - Calling this function updates one of those timestamps to `_now`,\n * depending on whether the tag should force expiration or staleness.\n *\n * Rules:\n * - If `asStale` is false (default), the tag forces expiration:\n * entries created before `_now` will be considered expired.\n * - If `asStale` is true, the tag forces staleness:\n * entries created before `_now` will be considered stale,\n * but only if they support a stale window.\n *\n * Behavior:\n * - Each call replaces any previous invalidation timestamp for the tag.\n * - Entries created after `_now` are unaffected.\n *\n * @param state - The cache state containing tag metadata.\n * @param tags - A tag or list of tags to invalidate.\n * @param options.asStale - Whether the tag should mark entries as stale.\n */\nexport function invalidateTag(\n state: CacheState,\n tags: string | string[],\n options: InvalidateTagOptions = {},\n\n /** @internal */\n _now: number = Date.now(),\n): void {\n const tagList = Array.isArray(tags) ? tags : [tags];\n const asStale = options.asStale ?? false;\n\n for (const tag of tagList) {\n const currentTag = state._tags.get(tag);\n\n if (currentTag) {\n // Update existing tag timestamps:\n // index 0 = expiredAt, index 1 = staleSinceAt\n if (asStale) {\n currentTag[1] = _now;\n } else {\n currentTag[0] = _now;\n }\n } else {\n // Initialize new tag entry with appropriate timestamp.\n // If marking as stale, expiredAt = 0 and staleSinceAt = _now.\n // If marking as expired, expiredAt = _now and staleSinceAt = 0.\n state._tags.set(tag, [asStale ? 0 : _now, asStale ? _now : 0]);\n }\n }\n}\n","import type { CacheState, CacheEntry } from \"../types\";\nimport { _metrics } from \"../utils/start-monitor\";\n\n/**\n * Sets or updates a value in the cache with TTL and an optional stale window.\n *\n * @param state - The cache state.\n * @param input - Cache entry definition (key, value, ttl, staleWindow, tags).\n * @param now - Optional timestamp override used as the base time (defaults to Date.now()).\n * @returns True if the entry was created or updated, false if rejected due to limits or invalid input.\n *\n * @remarks\n * - `ttl` defines when the entry becomes expired.\n * - `staleWindow` defines how long the entry may still be served as stale\n * after the expiration moment (`now + ttl`).\n * - Returns false if value is `undefined` (entry ignored, existing value untouched).\n * - Returns false if new entry would exceed `maxSize` limit (existing keys always allowed).\n * - Returns false if new entry would exceed `maxMemorySize` limit (existing keys always allowed).\n * - Returns true if entry was set or updated (or if existing key was updated at limit).\n */\nexport const setOrUpdate = (\n state: CacheState,\n input: CacheSetOrUpdateInput,\n\n /** @internal */\n now: number = Date.now(),\n): boolean => {\n const { key, value, ttl: ttlInput, staleWindow: staleWindowInput, tags } = input;\n\n if (value === undefined) return false; // Ignore undefined values, leaving existing entry intact if it exists\n if (key == null) throw new Error(\"Missing key.\");\n if (state.size >= state.maxSize && !state.store.has(key)) {\n // Ignore new entries when max size is reached, but allow updates to existing keys\n return false;\n }\n if (\n !__BROWSER__ &&\n _metrics?.memory.total.rss &&\n _metrics?.memory.total.rss >= state.maxMemorySize * 1024 * 1024 &&\n !state.store.has(key)\n ) {\n // Ignore new entries when max memory size is reached, but allow updates to existing keys\n return false;\n }\n\n const ttl = ttlInput ?? state.defaultTtl;\n const staleWindow = staleWindowInput ?? state.defaultStaleWindow;\n\n const expiresAt = ttl > 0 ? now + ttl : Infinity;\n const entry: CacheEntry = [\n [\n now, // createdAt\n expiresAt, // expiresAt\n staleWindow > 0 ? expiresAt + staleWindow : 0, // staleExpiresAt (relative to expiration)\n ],\n value,\n typeof tags === \"string\" ? [tags] : Array.isArray(tags) ? tags : null,\n ];\n\n state.store.set(key, entry);\n return true;\n};\n\n/**\n * Input parameters for setting or updating a cache entry.\n */\nexport interface CacheSetOrUpdateInput {\n /**\n * Key under which the value will be stored.\n */\n key: string;\n\n /**\n * Value to be written to the cache.\n *\n * Considerations:\n * - Always overwrites any previous value, if one exists.\n * - `undefined` is ignored, leaving any previous value intact, if one exists.\n * - `null` is explicitly stored as a null value, replacing any previous value, if one exists.\n */\n value: unknown;\n\n /**\n * TTL (Time-To-Live) in milliseconds for this entry.\n */\n ttl?: number;\n\n /**\n * Optional stale window in milliseconds.\n *\n * Defines how long the entry may continue to be served as stale\n * after it has reached its expiration time.\n *\n * The window is always relative to the entry’s own expiration moment,\n * whether that expiration comes from an explicit `ttl` or from the\n * cache’s default TTL.\n *\n * If omitted, the cache-level default stale window is used.\n */\n staleWindow?: number;\n\n /**\n * Optional tags associated with this entry.\n */\n tags?: string | string[];\n}\n","import { clear } from \"./cache/clear\";\nimport { createCache } from \"./cache/create-cache\";\nimport { deleteKey } from \"./cache/delete\";\nimport { get, getWithStatus } from \"./cache/get\";\nimport { has } from \"./cache/has\";\nimport { invalidateTag } from \"./cache/invalidate-tag\";\nimport { setOrUpdate } from \"./cache/set\";\nimport type {\n GetOptionsWithMetadata,\n GetOptionsWithoutMetadata,\n SetOptions,\n LocalTtlCacheInterface,\n CacheOptions,\n CacheState,\n EntryMetadata,\n InvalidateTagOptions,\n} from \"./types\";\n\n// Re-export public types\nexport type {\n CacheOptions,\n InvalidateTagOptions,\n EntryMetadata,\n GetOptionsWithMetadata,\n GetOptionsWithoutMetadata,\n SetOptions,\n LocalTtlCacheInterface,\n PurgeMode,\n} from \"./types\";\n\n// Re-export public enum and values\nexport { ENTRY_STATUS } from \"./types\";\n\n/**\n * A TTL (Time-To-Live) cache implementation with support for expiration,\n * stale windows, tag-based invalidation, and smart automatic sweeping.\n *\n * Provides O(1) constant-time operations for all core methods with support for:\n * - Expiration and stale windows\n * - Tag-based invalidation\n * - Automatic sweeping\n */\nexport class LocalTtlCache implements LocalTtlCacheInterface {\n private state: CacheState;\n\n /**\n * Creates a new cache instance.\n *\n * @param options - Configuration options for the cache (defaultTtl, defaultStaleWindow, maxSize, etc.)\n *\n */\n constructor(options?: CacheOptions) {\n this.state = createCache(options);\n }\n\n get size(): number {\n return this.state.size;\n }\n\n get<T = unknown>(key: string): T | undefined;\n get<T = unknown>(key: string, options: GetOptionsWithMetadata): EntryMetadata<T>;\n get<T = unknown>(key: string, options: GetOptionsWithoutMetadata): T | undefined;\n get<T = unknown>(\n key: string,\n options?: GetOptionsWithoutMetadata | GetOptionsWithMetadata,\n ): T | undefined | EntryMetadata<T> {\n if (options?.includeMetadata === true) {\n const [status, entry] = getWithStatus(this.state, key, options.purgeStale);\n if (!entry) return undefined;\n\n const [timestamps, value, tags] = entry;\n const [, expiresAt, staleExpiresAt] = timestamps;\n\n return {\n data: value as T,\n expirationTime: expiresAt,\n staleWindowExpiration: staleExpiresAt,\n status,\n tags,\n } as EntryMetadata<T>;\n }\n\n return get(this.state, key, options?.purgeStale) as T | undefined;\n }\n\n set(key: string, value: unknown, options?: SetOptions): boolean {\n return setOrUpdate(this.state, {\n key,\n value,\n ttl: options?.ttl,\n staleWindow: options?.staleWindow,\n tags: options?.tags,\n });\n }\n\n delete(key: string): boolean {\n return deleteKey(this.state, key);\n }\n\n has(key: string): boolean {\n return has(this.state, key);\n }\n\n clear(): void {\n clear(this.state);\n }\n\n invalidateTag(tags: string | string[], options?: InvalidateTagOptions): void {\n invalidateTag(this.state, tags, options ?? {});\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAWA,MAAa,SAAS,UAA4B;AAChD,OAAM,MAAM,OAAO;;;;;ACVrB,MAAM,aAAqB;AAC3B,MAAM,aAAqB,KAAK;;;;;;;;;;;AAahC,MAAa,cAAsB,KAAK;;;;;AAMxC,MAAa,uBAA+B;;;;;AAM5C,MAAa,mBAA2B;;;;;;AAOxC,MAAa,0BAAkC;;;;;;;;;;;AAa/C,MAAa,qBAA6B;;;;;AAM1C,MAAa,wBAAgC;;;;;AAM7C,MAAa,iCAAyC;;;;;;AAOtD,MAAa,4BAAoC;;;;;;;;;;;AAajD,MAAa,yBAAiC,IAAI;;;;;AAMlD,MAAa,uBAA+B;;;;;AAM5C,MAAa,0BAAkC;;;;;AAM/C,MAAa,sDAA8D;;;;;;;;;;;;AAc3E,MAAa,gCAAwC;;;;;;;;;;;;AAcrD,MAAa,wBAAgC;;;;;AAM7C,MAAa,qBAA6B;;;;;AAM1C,MAAa,sBAA8B;;;;;;;;AAsB3C,MAAa,uCAAgD;;;;;;;;AAS7D,MAAa,yCAAkD;;;;;;;;AAS/D,MAAa,uCAA+C;;;;;;;;AAS5D,MAAa,yCAAiD;;;;;;;;ACvL9D,MAAa,gBAAgB,UAA2B,OAAO,SAAS,MAAM,IAAI,QAAQ;;;;;AAM1F,MAAa,uBACX,QACA,gBAIY;AACZ,KAAI,WAAW,QAAS,QAAO;AAC/B,KAAI,WAAW,OAAQ,QAAO,YAAY;AAC1C,KAAI,WAAW,SAAU,QAAO,YAAY;AAC5C,KAAI,WAAW,SAAU,QAAO,YAAY,gBAAgB,YAAY;AACxE,QAAO;;;;;;;;;ACfT,MAAa,2BAA2B,WAA2D;AACjG,KAAI,WAAW,QAAS,QAAO;AAC/B,KAAI,WAAW,OAAQ,QAAO;AAC9B,KAAI,WAAW,SAAU,QAAO;AAChC,KAAI,WAAW,SACb,QAAO;AACT,QAAO;;;;;;AAOT,MAAa,oBAAoB,SAA4B;AAC3D,KAAI,OAAO,SAAS,SAAU,QAAO,cAAc,OAAO,KAAK,QAAQ,EAAE,CAAC;AAC1E,QAAO,GAAG;;;;;;;;;;;ACVZ,MAAa,wBACX,QAUA,sBAQS;AAET,KAAI,kBAAkB,cAAc;AAClC,UAAQ,KACN,WAAW,OAAO,UAAU,WAAW,iBAAiB,OAAO,KAAK,CAAC,6BAA6B,OAAO,OAAO,qGAE7F,OAAO,UAAU,KAAK,iBAAiB,OAAO,SAAS,CAAC,2BAA2B,OAAO,OAAO,GACrH;AACD;;AAIF,KAAI,kBAAkB,0BAA0B;AAC9C,UAAQ,KACN,WAAW,OAAO,UAAU,WAAW,iBAAiB,OAAO,KAAK,CAAC,6BAA6B,OAAO,OAAO,wGAE7F,OAAO,UAAU,KAAK,iBAAiB,OAAO,SAAS,CAAC,2BAA2B,OAAO,OAAO,GACrH;AACD;;AAIF,KAAI,kBAAkB,iBAAiB;EACrC,MAAM,cAAc,wBAAwB,OAAO,OAAO;AAC1D,UAAQ,KACN,WAAW,OAAO,UAAU,WAAW,iBAAiB,OAAO,KAAK,CAAC,6BAA6B,OAAO,OAAO,yBACxF,YAAY,kBACjB,OAAO,UAAU,KAAK,iBAAiB,OAAO,SAAS,CAAC,2BAA2B,OAAO,OAAO,GACrH;;;;;;;;;;;;;;;;;;;;;;;;;;;;AC/BL,MAAa,oBACX,QAIA,QAIA,UAIA,cACc;CACd,MAAM,eAAe,aAAa,OAAO,QAAQ;CACjD,MAAM,iBAAiB,aAAa,OAAO,cAAc;CACzD,MAAM,oBAAoB,oBAAoB,OAAO,qBAAqB;EACxE;EACA;EACD,CAAC;CAEF,MAAM,WAAW,oBAAoB,SAAS,aAAa,SAAS;AAEpE,KAAI,cAAc,QAAW;EAE3B,MAAM,YAAY,OAAO,cAAc;EACvC,MAAM,eAAe,cAAc,aAAa,KAAK,YAAY;EACjE,MAAM,2BAA2B,aAAa,OAAO,wBAAwB;EAC7E,MAAM,kBAAkB,aAAa,CAAC;AAGtC,MAAI,gBAAgB,4BAA4B,iBAAiB;AAC/D,wBACE;IACE,MAAM;IACN,QAAQ,OAAO;IACf,WAAW,OAAO;IAClB;IACD,EACD;IACE;IACA;IACA;IACD,CACF;AACD,UAAO;;AAGT,SAAO;;AAGT,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;AClDT,MAAa,0BAA0B,WAQrC,iBACE,OAAO,QACP;CACE,qBAAqB,OAAO;CAC5B,WAAW;CACZ,EACD;CACE,YAAY;CACZ,eAAe;CAChB,EACD,OAAO,UACR;;;;;;;;;;;;;;;;;;AChCH,MAAa,8BAA8B,WAGG;CAC5C,MAAM,cAAc;EAClB,cAAc,aAAa,OAAO,QAAQ;EAC1C,gBAAgB,aAAa,OAAO,cAAc;EACnD;AAMD,KAAI,YAAY,gBAAgB,YAAY,eAAgB,QAAO;AACnE,KAAI,YAAY,eAAgB,QAAO;AACvC,KAAI,YAAY,aAAc,QAAO;AAErC,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;ACPT,MAAa,4BAA4B,WAQvC,iBACE,OAAO,QACP;CACE,qBAAqB,OAAO;CAC5B,WAAW;CACZ,EACD;CACE,YAAY;CACZ,eAAe;CAChB,EACD,OAAO,UACR;;;;;;;;;ACrCH,SAAS,WAAW,MAA6B;AAC/C,KAAI;EACF,MAAM,MAAM,WAAG,aAAa,MAAM,OAAO,CAAC,MAAM;EAChD,MAAM,IAAI,OAAO,IAAI;AACrB,SAAO,OAAO,SAAS,EAAE,GAAG,IAAI;SAC1B;AACN,SAAO;;;;;;;AAQX,SAAS,iBAAgC;CAEvC,MAAM,KAAK,WAAW,4BAA4B;AAClD,KAAI,OAAO,KAAM,QAAO;CAGxB,MAAM,KAAK,WAAW,8CAA8C;AACpE,KAAI,OAAO,KAAM,QAAO;AAExB,QAAO;;;;;;AAOT,SAAgB,wBAAgC;CAC9C,MAAM,YAAY,WAAG,mBAAmB,CAAC;CACzC,MAAM,cAAc,gBAAgB;AAEpC,KAAI,eAAe,cAAc,KAAK,cAAc,SAClD,QAAO,KAAK,IAAI,WAAW,YAAY;AAGzC,QAAO;;;;;;;;;;;;;;;;;;AC/BT,SAAgB,sBACd,SACqB;CACrB,IAAI,aAAoC;CAExC,IAAI,cAAyC;CAE7C,IAAI,aAAa,QAAQ,OAAO,QAAQ;CAExC,IAAI,UAAU,QAAQ,aAAa;CACnC,IAAI,UAAU,QAAQ,UAAU;CAChC,IAAI,WAAWA,uBAAY,sBAAsB;CACjD,IAAI,kBAAkB,KAAK,KAAK;CAEhC,MAAM,SAAS;EACb,UAAU,SAAS,YAAY;EAE/B,YAAY,SAAS,aAAa,OAAO,OAAO;EACjD;CAED,SAAS,QAAc;AACrB,MAAI,WAAY;AAEhB,eAAa,kBAAkB;AAC7B,OAAI;IACF,MAAM,MAAM,KAAK,KAAK;IAEtB,MAAM,UAAU,eAAe;KAC7B;KACA;KACA;KACA;KACA,WAAW,OAAO;KAClB,eAAe;KACf,uBAAuB;KACvB,UAAU,OAAO;KAClB,CAAC;AAEF,kBAAc;AACd,aAAS,WAAW,QAAQ;AAE5B,cAAU,QAAQ,IAAI;AACtB,eAAW,QAAQ,KAAK;AACxB,cAAU,QAAQ,OAAO;AAEzB,iBAAa,QAAQ,OAAO,QAAQ;AACpC,sBAAkB;YACX,GAAY;AACnB,UAAM;AACN,UAAM,IAAI,MAAM,kCAAkC,EAAE,OAAO,GAAG,CAAC;;KAEhE,OAAO,SAAS;AAEnB,MAAI,OAAO,WAAW,UAAU,WAC9B,YAAW,OAAO;;CAItB,SAAS,OAAa;AACpB,MAAI,YAAY;AACd,iBAAc,WAAW;AACzB,gBAAa;;;CAIjB,SAAS,aAAwC;AAC/C,MAAI,YACF,QAAO;AAET,SAAO;;CAGT,SAAS,aAAa,WAAwD;AAC5E,MAAI,UAAU,cAAc,OAE1B,QAAO,YAAY,UAAU,YAAY,OAAO;AAGlD,MAAI,UAAU,aAAa,QAAW;AACpC,UAAO,WAAW,UAAU;AAG5B,OAAI,YAAY;AACd,UAAM;AACN,WAAO;;;;AAKb,QAAO;EACL;EACA;EACA;EACA;EACD;;;;;;;;;;;;;AAcH,SAAgB,eAAe,OASR;CACrB,MAAM,YAAY,QAAQ,OAAO,QAAQ;CAGzC,MAAM,YADY,OAAO,YAAY,MAAM,WAAW,GACxB;CAC9B,MAAM,gBAAgB,MAAM,gBAAgB,MAAM;CAElD,MAAM,MAAM,QAAQ,aAAa;CACjC,MAAM,WAA+B;EACnC,KAAK,IAAI,MAAM,MAAM,QAAQ;EAC7B,WAAW,IAAI,YAAY,MAAM,QAAQ;EACzC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,UAAU,IAAI,WAAW,MAAM,QAAQ;EACvC,cAAc,IAAI,eAAe,MAAM,QAAQ;EAChD;CACD,MAAM,WAAW,KAAK,IAAI,GAAG,IAAI,MAAM,MAAM,UAAU;CAEvD,MAAM,WAAW,QAAQ,SAAS,MAAM,QAAQ;CAEhD,MAAM,YADS,SAAS,SAAS,SAAS,QAAQ,MACzB;CAEzB,MAAM,OAAOA,uBAAY,qBAAqB,MAAM,SAAS;AAE7D,QAAO;EACL,KAAK;GAEH,aAAa;GACb,OAAO;GACP,OAAO,QAAQ,UAAU;GAC1B;EAED,MAAM;GACJ,aAAa,KAAK;GAClB,OAAO;GACP,OAAOA,uBAAY,sBAAsB;GAC1C;EAED,QAAQ;GACN,aAAa;GACb,OAAO;GACP,OAAO;GACR;EAED,aAAa,MAAM;EACnB,qBAAqB,MAAM;EAC3B,UAAU,MAAM;EAChB;EACD;;;;;AC1KH,IAAW,mBAA+C;;AAG1D,IAAW;;AAGX,IAAW,iBAAyB;;AAGpC,MAAa,0BAA0B;AAEvC,SAAgB,eAAqB;AAMnC,KAAI,CAAC,kBAAkB;AACrB,MAAI;GACF,MAAM,qBAAqB,uBAAuB;AAElD,OAAI,sBAAsB,qBAAqB,EAC7C,kBAAkB,qBAAqB,OAAO,OAAQ;UAElD;AAMR,qBAAmB,sBAAsB;GACvC,SAAS,SAAS;AAChB,eAAW;;GAEb,UAAU;GACV,WAAW;GACZ,CAAC;AAEF,mBAAiB,OAAO;;;;;;;;;;;ACxC5B,SAAgB,yBAAyB,sBAAwC;AAC/E,MAAK,MAAM,QAAQ,iBAAiB;EAClC,MAAM,SAAS,qBAAqB,KAAK;AACzC,MAAI,UAAU,OAAO,SAAS,GAAG;GAC/B,MAAM,WAAW,OAAO,QAAQ,KAAK,QAAQ,MAAM,KAAK,EAAE,GAAG,OAAO;GAEpE,MAAM,QAAQ;AACd,QAAK,gBAAgB,KAAK,iBAAiB,IAAI,SAAS,WAAW;;;;;;;;;;;;;ACRzE,SAAgB,YAAY,EAC1B,OACA,WACA,SACA,SACA,SAOS;AAET,KAAI,cAAc,QAAS,QAAO;AAGlC,QAAO,WADI,QAAQ,cAAc,UAAU,cACrB,QAAQ;;;;;;;;;;;;;;;;;;;;;ACkDhC,MAAa,+BACX,YACuB;CACvB,MAAM,EACJ,SACA,UAAU,EAAE,EACZ,yBAAyB,wBACzB,uBAAuB,sBACvB,yBAAyB,4BACvB;CAGJ,MAAM,eAAe,QAAQ,UAAU;CACvC,MAAM,YAAY,QAAQ,OAAO;CACjC,MAAM,aAAa,QAAQ,QAAQ;CAGnC,MAAM,oBAAoB,SAAS,OAAO,eAAe;CAGzD,MAAM,oBAAoB,SAAS,IAAI,eAAe;CACtD,MAAM,qBAAqB,SAAS,KAAK,eAAe;CAKxD,MAAM,iBAAiB,IAAI;CAC3B,MAAM,kBAAkB,IAAI;CAG5B,MAAM,cACJ,oBAAoB,eAAe,iBAAiB,YAAY,kBAAkB;CAEpF,MAAM,cAAc,eAAe,YAAY;CAG/C,MAAM,QAAQ,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,cAAc,YAAY,CAAC;AAoBjE,QAAO;EACL,iBAlBsB,YAAY;GAClC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAaA,mBAVwB,YAAY;GACpC,OAAO;GACP,WAAW;GACX,SAAS;GACT,SAAS;GACT,OAAO;GACR,CAAC;EAKD;;;;;;;;;;;;;;;;;;;;;;;AC/GH,SAAgB,uBAAuB,EACrC,kBACA,cAIgC;CAGhC,IAAI,kBAAiD,gBAAgB;AAErE,KAAI,oBAAoB,GAAG;AAIzB,MAAI,aAAa,gBAAgB,OAE/B,mBAAkB;AAEpB,oBAAkB,gBAAgB,aAAa;QAC1C;EAIL,IAAI,YAAY,KAAK,QAAQ,GAAG;AAMhC,OAAK,MAAM,QAAQ,iBAAiB;AAClC,gBAAa,KAAK;AAClB,OAAI,aAAa,GAAG;AAClB,sBAAkB;AAClB;;;;AAKN,QAAO;;;;;AC1DT,IAAkB,0DAAX;AACL;AACA;AACA;;;;;;;;;AASF,MAAa,aACX,OACA,KACA,SAAwB,cAAc,WAC1B;CACZ,MAAM,WAAW,MAAM;CACvB,MAAM,WAAW,MAAM;AAEvB,KAAI,CAAC,YAAY,CAAC,SAChB,QAAO,MAAM,MAAM,OAAO,IAAI;CAGhC,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAClC,KAAI,CAAC,MAAO,QAAO;AAEnB,OAAM,MAAM,OAAO,IAAI;AACvB,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AACvC,KAAI,WAAW,cAAc,OAC3B,OAAM,WAAW,KAAK,MAAM,IAAI,OAAO;AAGzC,QAAO;;;;;;;;ACkLT,IAAY,wDAAL;;AAEL;;AAEA;;AAEA;;;;;;;;;;;;;;;;;;;;;;;;ACvMF,SAAgB,gBAAgB,OAAmB,OAA2C;CAC5F,MAAM,iBAAiB,MAAM,GAAG;CAIhC,IAAI,+BAA+B;CAGnC,IAAI,SAAS,aAAa;CAE1B,MAAM,OAAO,MAAM;AACnB,KAAI,KACF,MAAK,MAAM,OAAO,MAAM;EACtB,MAAM,KAAK,MAAM,MAAM,IAAI,IAAI;AAC/B,MAAI,CAAC,GAAI;EAKT,MAAM,CAAC,cAAc,mBAAmB;AAGxC,MAAI,gBAAgB,gBAAgB;AAClC,YAAS,aAAa;AACtB;;AAGF,MAAI,mBAAmB,gBAAgB;AAErC,OAAI,kBAAkB,6BACpB,gCAA+B;AAEjC,YAAS,aAAa;;;AAM5B,QAAO,CAAC,QAAQ,WAAW,aAAa,QAAQ,+BAA+B,EAAE;;;;;;;;;;;;;;;;;;;;ACxCnF,SAAgB,mBACd,OACA,OAGA,KACc;CACd,MAAM,CAAC,aAAa,WAAW,kBAAkB,MAAM;CAGvD,MAAM,CAAC,WAAW,gCAAgC,gBAAgB,OAAO,MAAM;AAC/E,KAAI,cAAc,aAAa,QAAS,QAAO,aAAa;CAC5D,MAAM,cAAc,iBAAiB;AACrC,KACE,cAAc,aAAa,SAC3B,iBAAiB,KACjB,MAAM,+BAA+B,eACrC,OAAO,eAKP,QAAO,aAAa;AAItB,KAAI,MAAM,UACR,QAAO,aAAa;AAEtB,KAAI,iBAAiB,KAAK,MAAM,eAC9B,QAAO,aAAa;AAGtB,QAAO,aAAa;;;;;;;;;;;;;;;;;AAqBtB,MAAa,WACX,OACA,OACA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;;;;;;AAiBhE,MAAa,WACX,OACA,OAGA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;;;;;;AAkBhE,MAAa,aACX,OACA,OAGA,QACY;AACZ,KAAI,OAAO,UAAU,SAEnB,QAAO,UAAU,aAAa;AAGhC,QAAO,mBAAmB,OAAO,OAAO,IAAI,KAAK,aAAa;;;;;;;;;;;;;;;;AC1HhE,MAAM,6BAAqC;AACzC,KAAmB,CAAC,SAAU,QAAO;AACrC,QAAO,SAAS,QAAQ,eAAe;;;;;;;;;;;;AAazC,MAAM,sBAAsB,UAA8B;AACxD,KAAI,CAAC,OAAO,SAAS,MAAM,QAAQ,IAAI,MAAM,WAAW,KAAK,MAAM,QAAQ,EAAG,QAAO;AACrF,QAAO,KAAK,IAAI,GAAG,MAAM,OAAO,MAAM,QAAQ;;;;;;;;;;;;;;;;AAiBhD,MAAa,wBAAwB,UAAqC;CACxE,MAAM,SAAS,MAAM;AACrB,KAAI,CAAC,UAAU,WAAW,QAAS,QAAO;AAE1C,KAAI,WAAW,OACb,QAAO,mBAAmB,MAAM;AAGlC,KAAI,WAAW,SACb,QAAO,sBAAsB;AAG/B,KAAI,WAAW,SACb,QAAO,KAAK,IAAI,GAAG,KAAK,IAAI,sBAAsB,EAAE,mBAAmB,MAAM,CAAC,CAAC;AAGjF,QAAO;;;;;;;;;;;;;;AAeT,MAAa,eACX,MACA,OACA,iBACY;AACZ,KAAI,SAAS,MAAO,QAAO;AAC3B,KAAI,SAAS,KAAM,QAAO;CAE1B,MAAM,gBAAgB,OAAO,KAAK;CAClC,MAAM,eACJ,iBAAiB,UACb,yCACA;AAEN,KAAI,OAAO,MAAM,cAAc,CAAE,QAAO;CAExC,MAAM,QAAQ,qBAAqB,MAAM;AACzC,KAAI,CAAC,MACH,QAAO;AAET,QAAO,SAAS,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,cAAc,CAAC;;;;;;;;;;;;AC3FzD,SAAgB,WACd,OAMA,mBAA2B,oBACqD;AAChF,KAAI,CAAC,MAAM,WACT,OAAM,aAAa,MAAM,MAAM,SAAS;CAG1C,IAAI,YAAY;CAChB,IAAI,eAAe;CACnB,IAAI,aAAa;AAEjB,MAAK,IAAI,IAAI,GAAG,IAAI,kBAAkB,KAAK;EACzC,MAAM,OAAO,MAAM,WAAW,MAAM;AAEpC,MAAI,KAAK,MAAM;AACb,SAAM,aAAa,MAAM,MAAM,SAAS;AACxC;;AAGF,eAAa;EACb,MAAM,CAAC,KAAK,SAAS,KAAK;EAE1B,MAAM,MAAM,KAAK,KAAK;EAEtB,MAAM,SAAS,mBAAmB,OAAO,OAAO,IAAI;AACpD,MAAI,UAAU,OAAO,QAAQ,IAAI,EAAE;AACjC,aAAU,OAAO,KAAK,cAAc,QAAQ;AAC5C,mBAAgB;aACP,QAAQ,OAAO,QAAQ,IAAI,EAAE;AACtC,iBAAc;AAEd,OAAI,YAAY,MAAM,mBAAmB,OAAO,QAAQ,CACtD,WAAU,OAAO,KAAK,cAAc,MAAM;;;CAKhD,MAAM,oBAAoB,YAAY,MAAM,mBAAmB,OAAO,QAAQ,GAAG,aAAa;AAC9F,QAAO;EACL;EACA;EACA;EACA,OAAO,YAAY,KAAK,eAAe,qBAAqB,YAAY;EACzE;;;;;;;;;;;;;;;;;AC1CH,SAAgB,gCACd,uBAA+B,2BACvB;CACR,MAAM,6BAA6B,iCAAiC;CAEpE,MAAM,sBAAsB,YAAY;EACtC,OAAO,UAAU,OAAO,eAAe;EAEvC,WAAW;EACX,SAAS;EAET,SAAS;EACT,OAAO;EACR,CAAC;AAKF,QAAO,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,oBAAoB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACatD,SAAgB,qBAA6B;CAC3C,IAAI,mBAAmB;AAEvB,MAAK,MAAM,aAAa,iBAAiB;AACvC,MAAI,UAAU,MAAM,QAAQ,GAAG;AAE7B,aAAU,eAAe;AACzB;;EAKF,IAAI,eAAe;AACnB,MAAI,UAAU,gBAAgB,sBAC5B,gBAAe,UAAU;EAGT;GAEhB,MAAM,yBAAyB,gCAC7B,UAAU,sBACX;AAED,OAAI,gBAAgB,wBAAwB;AAG1C,cAAU,eAAe;AACzB;;;AAKJ,YAAU,eAAe,UAAU,MAAM,OAAO;AAChD,sBAAoB,UAAU;;AAGhC,QAAO;;;;;ACxET,IAAI,iBAAiB;AACrB,IAAI,uBAA8C;AAElD,SAAgB,WAAW,OAAyB;AAClD,KAAI,eAAgB;AACpB,kBAAiB;AACjB,eAAc;AACd,CAAK,MAAM,MAAM;;;;;;;AAiBnB,MAAa,QAAQ,OACnB,OAGA,YAA4B,EAAE,KACZ;CAClB,MAAM,EACJ,WAAW,iBACX,UAAU,gBACV,MAAM,KAAK,KAAK,EAChB,aAAa,UACX;CACJ,MAAM,YAAY;CAElB,IAAI,kBAAkB;CACtB,IAAI,oBAAoB;AACxB,KAAoB,SAClB,EAAC,CAAE,iBAAiB,qBAAsB,4BAA4B,EAAE,SAAS,UAAU,CAAC;CAG9F,MAAM,mBAAmB,oBAAoB;CAC7C,MAAM,uBAAmC,EAAE;CAK3C,MAAM,kBACJ,oBAAoB,IAAI,qBAAqB,gBAAgB,SAAS;CAExE,IAAI,aAAa;AACjB,QAAO,MAAM;AACX,gBAAc;EAEd,MAAM,kBAAkB,uBAAuB;GAAE;GAAY;GAAkB,CAAC;AAChF,MAAI,CAAC,gBAEH;EAGF,MAAM,EAAE,UAAU,WAAW,iBAAiB,gBAAgB;AAE9D,GAAC,qBAAqB,gBAAgB,yBAAyB,EAAE,EAAE,KAAK,MAAM;AAE9E,MAAI,KAAK,KAAK,GAAG,YAAY,kBAC3B;AAGF,QAAM,SAAS;;AAGjB,0BAAyB,qBAAqB;AAG9C,KAAI,CAAC,WACH,gBAAe,KAAK,MAAM,OAAO,UAAU,EAAE,gBAAgB;;AAKjE,MAAM,mBAAiC,IAAI,OAAO;AAChD,wBAAuB,WAAW,IAAI,GAAG;AACzC,KAAI,OAAO,qBAAqB,UAAU,WAAY,sBAAqB,OAAO;;AAEpF,MAAa,uBAAoC,IAAI,SAAQ,YAAW,aAAa,QAAQ,CAAC;;;;ACzF9F,IAAI,iBAAiB;AACrB,MAAM,6BAA6B;AACnC,MAAa,kBAAgC,EAAE;;;;;;AAe/C,MAAa,eAAe,UAAwB,EAAE,KAAiB;CACrE,MAAM,EACJ,UACA,UACA,aAAa,aACb,UAAU,kBACV,gBAAgB,yBAChB,wBAAwB,2BACxB,qBAAqB,sBACrB,iBACA,mBACA,qBACA,kBAAkB,SAChB;AAEJ;AAIA,KAAI,iBAAiB,2BAGnB,SAAQ,KACN,gCAAgC,eAAe,kNAChD;CAGH,MAAM,8BACJ,uBACA,2BAA2B;EACzB;EACA;EACD,CAAC;CAEJ,MAAM,0BAA0B,uBAAuB;EACrD,QAAQ;GACN;GACA;GACD;EACD,qBAAqB;EACrB,WAAW;EACZ,CAAC;CACF,MAAM,4BAA4B,yBAAyB;EACzD,QAAQ;GACN;GACA;GACD;EACD,qBAAqB;EACrB,WAAW;EACZ,CAAC;CAEF,MAAM,QAAoB;EACxB,uBAAO,IAAI,KAAK;EAChB,YAAY;EACZ,IAAI,OAAO;AACT,UAAO,MAAM,MAAM;;EAErB;EACA;EACA;EACA;EACA;EACA;EACA,iBAAiB;EACjB,mBAAmB;EACnB,qBAAqB;EACrB;EACA;EACA,qBAAqB;EACrB,eAAe;EACf,cAAc;EACd,uBAAO,IAAI,KAAK;EACjB;AAED,OAAM,sBAAsB,gBAAgB,KAAK,MAAM,GAAG;AAE1D,YAAW,MAAM;AAEjB,QAAO;;;;;;;;;;;;;;;;AC3FT,MAAa,iBACX,OACA,KACA,WACA,MAAc,KAAK,KAAK,KAC0B;CAClD,MAAM,QAAQ,MAAM,MAAM,IAAI,IAAI;AAElC,KAAI,CAAC,MAAO,QAAO,CAAC,MAAM,OAAU;CAEpC,MAAM,SAAS,mBAAmB,OAAO,OAAO,IAAI;AAEpD,KAAI,QAAQ,OAAO,QAAQ,IAAI,CAAE,QAAO,CAAC,QAAQ,MAAM;AAEvD,KAAI,QAAQ,OAAO,QAAQ,IAAI,EAAE;AAE/B,MAAI,YADmB,aAAa,MAAM,iBACV,OAAO,MAAM,CAC3C,WAAU,OAAO,KAAK,cAAc,MAAM;AAE5C,SAAO,CAAC,QAAQ,MAAM;;AAIxB,WAAU,OAAO,KAAK,cAAc,QAAQ;AAE5C,QAAO,CAAC,QAAQ,OAAU;;;;;;;;;;;AAY5B,MAAa,OACX,OACA,KACA,WACA,MAAc,KAAK,KAAK,KACZ;CACZ,MAAM,GAAG,SAAS,cAAc,OAAO,KAAK,WAAW,IAAI;AAC3D,QAAO,QAAQ,MAAM,KAAK;;;;;;;;;;;;AClD5B,MAAa,OAAO,OAAmB,KAAa,MAAc,KAAK,KAAK,KAAc;AACxF,QAAO,IAAI,OAAO,KAAK,IAAI,KAAK;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACelC,SAAgB,cACd,OACA,MACA,UAAgC,EAAE,EAGlC,OAAe,KAAK,KAAK,EACnB;CACN,MAAM,UAAU,MAAM,QAAQ,KAAK,GAAG,OAAO,CAAC,KAAK;CACnD,MAAM,UAAU,QAAQ,WAAW;AAEnC,MAAK,MAAM,OAAO,SAAS;EACzB,MAAM,aAAa,MAAM,MAAM,IAAI,IAAI;AAEvC,MAAI,WAGF,KAAI,QACF,YAAW,KAAK;MAEhB,YAAW,KAAK;MAMlB,OAAM,MAAM,IAAI,KAAK,CAAC,UAAU,IAAI,MAAM,UAAU,OAAO,EAAE,CAAC;;;;;;;;;;;;;;;;;;;;;;;ACjCpE,MAAa,eACX,OACA,OAGA,MAAc,KAAK,KAAK,KACZ;CACZ,MAAM,EAAE,KAAK,OAAO,KAAK,UAAU,aAAa,kBAAkB,SAAS;AAE3E,KAAI,UAAU,OAAW,QAAO;AAChC,KAAI,OAAO,KAAM,OAAM,IAAI,MAAM,eAAe;AAChD,KAAI,MAAM,QAAQ,MAAM,WAAW,CAAC,MAAM,MAAM,IAAI,IAAI,CAEtD,QAAO;AAET,KAEE,UAAU,OAAO,MAAM,OACvB,UAAU,OAAO,MAAM,OAAO,MAAM,gBAAgB,OAAO,QAC3D,CAAC,MAAM,MAAM,IAAI,IAAI,CAGrB,QAAO;CAGT,MAAM,MAAM,YAAY,MAAM;CAC9B,MAAM,cAAc,oBAAoB,MAAM;CAE9C,MAAM,YAAY,MAAM,IAAI,MAAM,MAAM;CACxC,MAAM,QAAoB;EACxB;GACE;GACA;GACA,cAAc,IAAI,YAAY,cAAc;GAC7C;EACD;EACA,OAAO,SAAS,WAAW,CAAC,KAAK,GAAG,MAAM,QAAQ,KAAK,GAAG,OAAO;EAClE;AAED,OAAM,MAAM,IAAI,KAAK,MAAM;AAC3B,QAAO;;;;;;;;;;;;;;AClBT,IAAa,gBAAb,MAA6D;CAC3D,AAAQ;;;;;;;CAQR,YAAY,SAAwB;AAClC,OAAK,QAAQ,YAAY,QAAQ;;CAGnC,IAAI,OAAe;AACjB,SAAO,KAAK,MAAM;;CAMpB,IACE,KACA,SACkC;AAClC,MAAI,SAAS,oBAAoB,MAAM;GACrC,MAAM,CAAC,QAAQ,SAAS,cAAc,KAAK,OAAO,KAAK,QAAQ,WAAW;AAC1E,OAAI,CAAC,MAAO,QAAO;GAEnB,MAAM,CAAC,YAAY,OAAO,QAAQ;GAClC,MAAM,GAAG,WAAW,kBAAkB;AAEtC,UAAO;IACL,MAAM;IACN,gBAAgB;IAChB,uBAAuB;IACvB;IACA;IACD;;AAGH,SAAO,IAAI,KAAK,OAAO,KAAK,SAAS,WAAW;;CAGlD,IAAI,KAAa,OAAgB,SAA+B;AAC9D,SAAO,YAAY,KAAK,OAAO;GAC7B;GACA;GACA,KAAK,SAAS;GACd,aAAa,SAAS;GACtB,MAAM,SAAS;GAChB,CAAC;;CAGJ,OAAO,KAAsB;AAC3B,SAAO,UAAU,KAAK,OAAO,IAAI;;CAGnC,IAAI,KAAsB;AACxB,SAAO,IAAI,KAAK,OAAO,IAAI;;CAG7B,QAAc;AACZ,QAAM,KAAK,MAAM;;CAGnB,cAAc,MAAyB,SAAsC;AAC3E,gBAAc,KAAK,OAAO,MAAM,WAAW,EAAE,CAAC"}