layercache 1.2.0 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,12 +1,12 @@
1
1
  # layercache
2
2
 
3
- **Multi-layer caching for Node.js — memory Redis your DB, unified in one API.**
3
+ **Production-ready multi-layer caching for Node.js — memory, Redis, persistence, invalidation, and resilience in one API.**
4
4
 
5
5
  [![npm version](https://img.shields.io/npm/v/layercache)](https://www.npmjs.com/package/layercache)
6
6
  [![npm downloads](https://img.shields.io/npm/dw/layercache)](https://www.npmjs.com/package/layercache)
7
7
  [![license](https://img.shields.io/npm/l/layercache)](LICENSE)
8
8
  [![TypeScript](https://img.shields.io/badge/TypeScript-first-blue)](https://www.typescriptlang.org/)
9
- [![test coverage](https://img.shields.io/badge/tests-132%20passing-brightgreen)](https://github.com/flyingsquirrel0419/layercache)
9
+ [![test coverage](https://img.shields.io/badge/tests-164%20passing-brightgreen)](https://github.com/flyingsquirrel0419/layercache)
10
10
 
11
11
  ```
12
12
  L1 hit ~0.01 ms ← served from memory, zero network
@@ -26,6 +26,8 @@ Most Node.js services end up with the same problem:
26
26
 
27
27
  layercache solves all three. You declare your layers once and call `get`. Everything else is handled.
28
28
 
29
+ It is designed for production services that need predictable cache behavior under load: stampede prevention, cross-instance invalidation, layered TTL control, operational metrics, and safer persistence defaults.
30
+
29
31
  ```ts
30
32
  const user = await cache.get('user:123', () => db.findUser(123))
31
33
  // ↑ only called on a full miss
@@ -40,15 +42,22 @@ On a hit, the value is returned from the fastest layer that has it, and automati
40
42
  - **Layered reads & automatic backfill** — hits in slower layers propagate up
41
43
  - **Cache stampede prevention** — mutex-based deduplication per key
42
44
  - **Tag-based invalidation** — `set('user:123:posts', posts, { tags: ['user:123'] })` then `invalidateByTag('user:123')`
45
+ - **Batch tag invalidation** — `invalidateByTags(['tenant:a', 'users'], 'all')` for OR/AND invalidation in one call
43
46
  - **Pattern invalidation** — `invalidateByPattern('user:*')`
47
+ - **Prefix invalidation** — efficient `invalidateByPrefix('user:123:')` for hierarchical keys
48
+ - **Generation-based invalidation** — `generation` prefixes keys with `vN:` and `bumpGeneration()` rotates the namespace instantly
44
49
  - **Per-layer TTL overrides** — different TTLs for memory vs. Redis in one call
50
+ - **TTL policies** — align TTLs to time boundaries (`until-midnight`, `next-hour`, `{ alignTo }`, or a function)
45
51
  - **Negative caching** — cache known misses for a short TTL to protect the database
46
52
  - **Stale strategies** — `staleWhileRevalidate` and `staleIfError` as opt-in read behavior
47
53
  - **TTL jitter** — spread expirations to avoid synchronized stampedes
48
54
  - **Sliding & adaptive TTL** — extend TTL on every read or ramp it up for hot keys
49
55
  - **Refresh-ahead** — trigger background refresh when TTL drops below a threshold
56
+ - **Fetcher rate limiting** — cap concurrent fetchers or requests per interval
50
57
  - **Best-effort writes** — tolerate partial layer write failures when desired
58
+ - **Write-behind mode** — write local layers immediately and flush slower remote layers asynchronously
51
59
  - **Bulk reads** — `mget` uses layer-level `getMany()` when available
60
+ - **Bulk writes** — `mset` uses layer-level `setMany()` when available
52
61
  - **Distributed tag index** — `RedisTagIndex` keeps tag state consistent across multiple servers
53
62
  - **Optional distributed single-flight** — plug in a coordinator to dedupe misses across instances
54
63
  - **Cross-server L1 invalidation** — Redis pub/sub bus flushes stale memory on other instances when you write or delete
@@ -59,17 +68,22 @@ On a hit, the value is returned from the fastest layer that has it, and automati
59
68
  - **Graceful degradation** — skip a failing layer for a configurable retry window
60
69
  - **Circuit breaker** — per-key or global; opens after N failures, recovers after cooldown
61
70
  - **Compression** — transparent async gzip/brotli in `RedisLayer` (non-blocking) with a byte threshold
71
+ - **Serializer fallback chains** — transparently read legacy payloads (for example JSON) and rewrite them with the primary serializer
62
72
  - **Metrics & stats** — per-layer hit/miss counters, **per-layer latency tracking**, circuit-breaker trips, degraded operations; HTTP stats handler
73
+ - **Health checks** — `cache.healthCheck()` returns per-layer health and latency
63
74
  - **Persistence** — `exportState` / `importState` for in-process snapshots; `persistToFile` / `restoreFromFile` for disk
64
- - **Admin CLI** — `layercache stats | keys | invalidate` against any Redis URL
65
- - **Framework integrations** — Express middleware, Fastify plugin, tRPC middleware, GraphQL resolver wrapper
75
+ - **Admin CLI** — `layercache stats | keys | inspect | invalidate` against any Redis URL
76
+ - **Framework integrations** — Express middleware, Fastify plugin, Hono middleware, tRPC middleware, GraphQL resolver wrapper
77
+ - **OpenTelemetry plugin** — instrument `get` / `set` / invalidation flows with spans
66
78
  - **MessagePack serializer** — drop-in replacement for lower Redis memory usage
67
79
  - **NestJS module** — `CacheStackModule.forRoot(...)` and `forRootAsync(...)` with `@InjectCacheStack()`
68
80
  - **`getOrThrow()`** — throws `CacheMissError` instead of returning `null`, for strict use cases
69
81
  - **`inspect()`** — debug a key: see which layers hold it, remaining TTLs, tags, and staleness state
82
+ - **MemoryLayer cleanup hooks** — periodic TTL cleanup and `onEvict` callbacks
70
83
  - **Conditional caching** — `shouldCache` predicate to skip caching specific fetcher results
71
- - **Nested namespaces** — `namespace('a').namespace('b')` for composable key prefixes
84
+ - **Nested namespaces** — `namespace('a').namespace('b')` for composable key prefixes with namespace-scoped metrics
72
85
  - **Custom layers** — implement the 5-method `CacheLayer` interface to plug in Memcached, DynamoDB, or anything else
86
+ - **Edge-safe entry point** — `layercache/edge` exports the non-Node helpers for Worker-style runtimes
73
87
  - **ESM + CJS** — works with both module systems, Node.js ≥ 18
74
88
 
75
89
  ---
@@ -168,6 +182,15 @@ await cache.set('user:123:posts', posts, { tags: ['user:123'] })
168
182
  await cache.invalidateByTag('user:123') // both keys gone
169
183
  ```
170
184
 
185
+ ### `cache.invalidateByTags(tags, mode?): Promise<void>`
186
+
187
+ Delete keys that match any or all of a set of tags.
188
+
189
+ ```ts
190
+ await cache.invalidateByTags(['tenant:a', 'users'], 'all') // keys tagged with both
191
+ await cache.invalidateByTags(['users', 'posts'], 'any') // keys tagged with either
192
+ ```
193
+
171
194
  ### `cache.invalidateByPattern(pattern): Promise<void>`
172
195
 
173
196
  Glob-style deletion against the tracked key set.
@@ -176,6 +199,16 @@ Glob-style deletion against the tracked key set.
176
199
  await cache.invalidateByPattern('user:*') // deletes user:1, user:2, …
177
200
  ```
178
201
 
202
+ ### `cache.invalidateByPrefix(prefix): Promise<void>`
203
+
204
+ Prefer this over glob invalidation when your keys are hierarchical.
205
+
206
+ ```ts
207
+ await cache.invalidateByPrefix('user:123:') // deletes user:123:profile, user:123:posts, ...
208
+ ```
209
+
210
+ The prefix is matched as-is. You do not need to append `*`, and namespace helpers pass their namespace prefix directly.
211
+
179
212
  ### `cache.mget<T>(entries): Promise<Array<T | null>>`
180
213
 
181
214
  Concurrent multi-key fetch, each with its own optional fetcher.
@@ -195,6 +228,13 @@ const [user1, user2] = await cache.mget([
195
228
  const { hits, misses, fetches, staleHits, refreshes, writeFailures } = cache.getMetrics()
196
229
  ```
197
230
 
231
+ ### `cache.healthCheck(): Promise<CacheHealthCheckResult[]>`
232
+
233
+ ```ts
234
+ const health = await cache.healthCheck()
235
+ // [{ layer: 'memory', healthy: true, latencyMs: 0.03 }, ...]
236
+ ```
237
+
198
238
  ### `cache.resetMetrics(): void`
199
239
 
200
240
  Resets all counters to zero — useful for per-interval reporting.
@@ -229,6 +269,21 @@ const getUser = cache.wrap(
229
269
  )
230
270
  ```
231
271
 
272
+ ### Generation-based invalidation
273
+
274
+ Add a generation prefix to every key and rotate it when you want to invalidate the whole cache namespace without scanning:
275
+
276
+ ```ts
277
+ const cache = new CacheStack([...], { generation: 1 })
278
+
279
+ await cache.set('user:123', user)
280
+ cache.bumpGeneration() // now reads use v2:user:123
281
+ ```
282
+
283
+ ### OpenTelemetry note
284
+
285
+ `createOpenTelemetryPlugin()` currently wraps a `CacheStack` instance's methods directly. Use one OpenTelemetry plugin per cache instance; if you need to compose multiple wrappers, install them in a fixed order and uninstall them in reverse order.
286
+
232
287
  ### `cache.warm(entries, options?)`
233
288
 
234
289
  Pre-populate layers at startup from a prioritised list. Higher `priority` values run first.
@@ -246,7 +301,7 @@ await cache.warm(
246
301
 
247
302
  ### `cache.namespace(prefix): CacheNamespace`
248
303
 
249
- Returns a scoped view with the same full API (`get`, `set`, `delete`, `clear`, `mget`, `wrap`, `warm`, `invalidateByTag`, `invalidateByPattern`, `getMetrics`). `clear()` only touches `prefix:*` keys.
304
+ Returns a scoped view with the same full API (`get`, `set`, `delete`, `clear`, `mget`, `wrap`, `warm`, `invalidateByTag`, `invalidateByPattern`, `getMetrics`). `clear()` only touches `prefix:*` keys, and namespace metrics are serialized per `CacheStack` instance so unrelated caches do not block each other while metrics are collected.
250
305
 
251
306
  ```ts
252
307
  const users = cache.namespace('users')
@@ -305,6 +360,19 @@ const data = await cache.get('api:response', fetchFromApi, {
305
360
  // If fetchFromApi returns { status: 500 }, the value is returned but NOT cached
306
361
  ```
307
362
 
363
+ ### TTL policies
364
+
365
+ Align expirations to calendar or boundary-based schedules:
366
+
367
+ ```ts
368
+ await cache.set('daily-report', report, { ttlPolicy: 'until-midnight' })
369
+ await cache.set('hourly-rollup', rollup, { ttlPolicy: 'next-hour' })
370
+ await cache.set('aligned', value, { ttlPolicy: { alignTo: 300 } }) // next 5-minute boundary
371
+ await cache.set('custom', value, {
372
+ ttlPolicy: ({ key, value }) => key.startsWith('hot:') ? 30 : 300
373
+ })
374
+ ```
375
+
308
376
  ---
309
377
 
310
378
  ## Negative + stale caching
@@ -386,13 +454,14 @@ const cache = new CacheStack(
386
454
  {
387
455
  singleFlightCoordinator: coordinator,
388
456
  singleFlightLeaseMs: 30_000,
457
+ singleFlightRenewIntervalMs: 10_000,
389
458
  singleFlightTimeoutMs: 5_000,
390
459
  singleFlightPollMs: 50
391
460
  }
392
461
  )
393
462
  ```
394
463
 
395
- When another instance already owns the miss, the current process waits for the value to appear in the shared layer instead of running the fetcher again.
464
+ When another instance already owns the miss, the current process waits for the value to appear in the shared layer instead of running the fetcher again. `RedisSingleFlightCoordinator` also renews its Redis lease while the worker is still running, so long fetches are less likely to expire their lock mid-flight. Keep `singleFlightLeaseMs` comfortably above your expected fetch latency, and use `singleFlightRenewIntervalMs` if you need tighter control over renewal cadence.
396
465
 
397
466
  ### Cross-server L1 invalidation
398
467
 
@@ -428,7 +497,8 @@ import { RedisTagIndex } from 'layercache'
428
497
 
429
498
  const sharedTagIndex = new RedisTagIndex({
430
499
  client: redis,
431
- prefix: 'myapp:tag-index' // namespaced so it doesn't collide with other data
500
+ prefix: 'myapp:tag-index', // namespaced so it doesn't collide with other data
501
+ knownKeysShards: 8
432
502
  })
433
503
 
434
504
  // Every CacheStack instance should use the same Redis-backed tag index config
@@ -462,6 +532,38 @@ new RedisLayer({
462
532
  })
463
533
  ```
464
534
 
535
+ For production Redis, also set an explicit `prefix`, enforce Redis authentication/network isolation, and configure Redis `maxmemory` / eviction policy so cache growth cannot starve unrelated workloads.
536
+
537
+ ### DiskLayer safety
538
+
539
+ `DiskLayer` is best used with an application-controlled directory and an explicit `maxFiles` bound.
540
+
541
+ ```ts
542
+ import { resolve } from 'node:path'
543
+
544
+ const disk = new DiskLayer({
545
+ directory: resolve('./var/cache/layercache'),
546
+ maxFiles: 10_000
547
+ })
548
+ ```
549
+
550
+ The library hashes cache keys before turning them into filenames, validates the configured directory, uses atomic temp-file writes, and removes malformed on-disk entries. You should still keep the directory outside any user-controlled path and set filesystem permissions so only your app can read or write it.
551
+
552
+ ### Scoped fetcher rate limiting
553
+
554
+ Rate limits are global by default, but you can scope them per cache key or per fetcher function when different backends should not throttle each other.
555
+
556
+ ```ts
557
+ await cache.get('user:123', fetchUser, {
558
+ fetcherRateLimit: {
559
+ maxConcurrent: 1,
560
+ scope: 'key'
561
+ }
562
+ })
563
+ ```
564
+
565
+ Use `scope: 'fetcher'` to share a bucket across calls using the same fetcher function reference, or `bucketKey: 'billing-api'` for a custom named bucket.
566
+
465
567
  ---
466
568
 
467
569
  ## Per-layer TTL overrides
@@ -0,0 +1,312 @@
1
+ import {
2
+ PatternMatcher,
3
+ unwrapStoredValue
4
+ } from "./chunk-ZMDB5KOK.js";
5
+
6
+ // src/layers/MemoryLayer.ts
7
+ var MemoryLayer = class {
8
+ name;
9
+ defaultTtl;
10
+ isLocal = true;
11
+ maxSize;
12
+ evictionPolicy;
13
+ onEvict;
14
+ entries = /* @__PURE__ */ new Map();
15
+ cleanupTimer;
16
+ constructor(options = {}) {
17
+ this.name = options.name ?? "memory";
18
+ this.defaultTtl = options.ttl;
19
+ this.maxSize = options.maxSize ?? 1e3;
20
+ this.evictionPolicy = options.evictionPolicy ?? "lru";
21
+ this.onEvict = options.onEvict;
22
+ if (options.cleanupIntervalMs && options.cleanupIntervalMs > 0) {
23
+ this.cleanupTimer = setInterval(() => {
24
+ this.pruneExpired();
25
+ }, options.cleanupIntervalMs);
26
+ this.cleanupTimer.unref?.();
27
+ }
28
+ }
29
+ async get(key) {
30
+ const value = await this.getEntry(key);
31
+ return unwrapStoredValue(value);
32
+ }
33
+ async getEntry(key) {
34
+ const entry = this.entries.get(key);
35
+ if (!entry) {
36
+ return null;
37
+ }
38
+ if (this.isExpired(entry)) {
39
+ this.entries.delete(key);
40
+ return null;
41
+ }
42
+ if (this.evictionPolicy === "lru") {
43
+ this.entries.delete(key);
44
+ entry.accessCount += 1;
45
+ this.entries.set(key, entry);
46
+ } else if (this.evictionPolicy === "lfu") {
47
+ entry.accessCount += 1;
48
+ }
49
+ return entry.value;
50
+ }
51
+ async getMany(keys) {
52
+ const values = [];
53
+ for (const key of keys) {
54
+ values.push(await this.getEntry(key));
55
+ }
56
+ return values;
57
+ }
58
+ async setMany(entries) {
59
+ for (const entry of entries) {
60
+ await this.set(entry.key, entry.value, entry.ttl);
61
+ }
62
+ }
63
+ async set(key, value, ttl = this.defaultTtl) {
64
+ this.entries.delete(key);
65
+ this.entries.set(key, {
66
+ value,
67
+ expiresAt: ttl && ttl > 0 ? Date.now() + ttl * 1e3 : null,
68
+ accessCount: 0,
69
+ insertedAt: Date.now()
70
+ });
71
+ while (this.entries.size > this.maxSize) {
72
+ this.evict();
73
+ }
74
+ }
75
+ async has(key) {
76
+ const entry = this.entries.get(key);
77
+ if (!entry) {
78
+ return false;
79
+ }
80
+ if (this.isExpired(entry)) {
81
+ this.entries.delete(key);
82
+ return false;
83
+ }
84
+ return true;
85
+ }
86
+ async ttl(key) {
87
+ const entry = this.entries.get(key);
88
+ if (!entry) {
89
+ return null;
90
+ }
91
+ if (this.isExpired(entry)) {
92
+ this.entries.delete(key);
93
+ return null;
94
+ }
95
+ if (entry.expiresAt === null) {
96
+ return null;
97
+ }
98
+ return Math.max(0, Math.ceil((entry.expiresAt - Date.now()) / 1e3));
99
+ }
100
+ async size() {
101
+ this.pruneExpired();
102
+ return this.entries.size;
103
+ }
104
+ async delete(key) {
105
+ this.entries.delete(key);
106
+ }
107
+ async deleteMany(keys) {
108
+ for (const key of keys) {
109
+ this.entries.delete(key);
110
+ }
111
+ }
112
+ async clear() {
113
+ this.entries.clear();
114
+ }
115
+ async ping() {
116
+ return true;
117
+ }
118
+ async dispose() {
119
+ if (this.cleanupTimer) {
120
+ clearInterval(this.cleanupTimer);
121
+ this.cleanupTimer = void 0;
122
+ }
123
+ }
124
+ async keys() {
125
+ this.pruneExpired();
126
+ return [...this.entries.keys()];
127
+ }
128
+ exportState() {
129
+ this.pruneExpired();
130
+ return [...this.entries.entries()].map(([key, entry]) => ({
131
+ key,
132
+ value: entry.value,
133
+ expiresAt: entry.expiresAt
134
+ }));
135
+ }
136
+ importState(entries) {
137
+ for (const entry of entries) {
138
+ if (entry.expiresAt !== null && entry.expiresAt <= Date.now()) {
139
+ continue;
140
+ }
141
+ this.entries.set(entry.key, {
142
+ value: entry.value,
143
+ expiresAt: entry.expiresAt,
144
+ accessCount: 0,
145
+ insertedAt: Date.now()
146
+ });
147
+ }
148
+ while (this.entries.size > this.maxSize) {
149
+ this.evict();
150
+ }
151
+ }
152
+ evict() {
153
+ if (this.evictionPolicy === "lru" || this.evictionPolicy === "fifo") {
154
+ const oldestKey = this.entries.keys().next().value;
155
+ if (oldestKey !== void 0) {
156
+ const entry = this.entries.get(oldestKey);
157
+ this.entries.delete(oldestKey);
158
+ if (entry) {
159
+ this.onEvict?.(oldestKey, unwrapStoredValue(entry.value));
160
+ }
161
+ }
162
+ return;
163
+ }
164
+ let victimKey;
165
+ let minCount = Number.POSITIVE_INFINITY;
166
+ let minInsertedAt = Number.POSITIVE_INFINITY;
167
+ for (const [key, entry] of this.entries.entries()) {
168
+ if (entry.accessCount < minCount || entry.accessCount === minCount && entry.insertedAt < minInsertedAt) {
169
+ minCount = entry.accessCount;
170
+ minInsertedAt = entry.insertedAt;
171
+ victimKey = key;
172
+ }
173
+ }
174
+ if (victimKey !== void 0) {
175
+ const victim = this.entries.get(victimKey);
176
+ this.entries.delete(victimKey);
177
+ if (victim) {
178
+ this.onEvict?.(victimKey, unwrapStoredValue(victim.value));
179
+ }
180
+ }
181
+ }
182
+ pruneExpired() {
183
+ for (const [key, entry] of this.entries.entries()) {
184
+ if (this.isExpired(entry)) {
185
+ this.entries.delete(key);
186
+ }
187
+ }
188
+ }
189
+ isExpired(entry) {
190
+ return entry.expiresAt !== null && entry.expiresAt <= Date.now();
191
+ }
192
+ };
193
+
194
+ // src/invalidation/TagIndex.ts
195
+ var TagIndex = class {
196
+ tagToKeys = /* @__PURE__ */ new Map();
197
+ keyToTags = /* @__PURE__ */ new Map();
198
+ knownKeys = /* @__PURE__ */ new Set();
199
+ maxKnownKeys;
200
+ constructor(options = {}) {
201
+ this.maxKnownKeys = options.maxKnownKeys;
202
+ }
203
+ async touch(key) {
204
+ this.knownKeys.add(key);
205
+ this.pruneKnownKeysIfNeeded();
206
+ }
207
+ async track(key, tags) {
208
+ this.knownKeys.add(key);
209
+ this.pruneKnownKeysIfNeeded();
210
+ if (tags.length === 0) {
211
+ return;
212
+ }
213
+ const existingTags = this.keyToTags.get(key);
214
+ if (existingTags) {
215
+ for (const tag of existingTags) {
216
+ this.tagToKeys.get(tag)?.delete(key);
217
+ }
218
+ }
219
+ const tagSet = new Set(tags);
220
+ this.keyToTags.set(key, tagSet);
221
+ for (const tag of tagSet) {
222
+ const keys = this.tagToKeys.get(tag) ?? /* @__PURE__ */ new Set();
223
+ keys.add(key);
224
+ this.tagToKeys.set(tag, keys);
225
+ }
226
+ }
227
+ async remove(key) {
228
+ this.removeKey(key);
229
+ }
230
+ async keysForTag(tag) {
231
+ return [...this.tagToKeys.get(tag) ?? /* @__PURE__ */ new Set()];
232
+ }
233
+ async keysForPrefix(prefix) {
234
+ return [...this.knownKeys].filter((key) => key.startsWith(prefix));
235
+ }
236
+ async tagsForKey(key) {
237
+ return [...this.keyToTags.get(key) ?? /* @__PURE__ */ new Set()];
238
+ }
239
+ async matchPattern(pattern) {
240
+ return [...this.knownKeys].filter((key) => PatternMatcher.matches(pattern, key));
241
+ }
242
+ async clear() {
243
+ this.tagToKeys.clear();
244
+ this.keyToTags.clear();
245
+ this.knownKeys.clear();
246
+ }
247
+ pruneKnownKeysIfNeeded() {
248
+ if (this.maxKnownKeys === void 0 || this.knownKeys.size <= this.maxKnownKeys) {
249
+ return;
250
+ }
251
+ const toRemove = Math.ceil(this.maxKnownKeys * 0.1);
252
+ let removed = 0;
253
+ for (const key of this.knownKeys) {
254
+ if (removed >= toRemove) {
255
+ break;
256
+ }
257
+ this.removeKey(key);
258
+ removed += 1;
259
+ }
260
+ }
261
+ removeKey(key) {
262
+ this.knownKeys.delete(key);
263
+ const tags = this.keyToTags.get(key);
264
+ if (!tags) {
265
+ return;
266
+ }
267
+ for (const tag of tags) {
268
+ const keys = this.tagToKeys.get(tag);
269
+ if (!keys) {
270
+ continue;
271
+ }
272
+ keys.delete(key);
273
+ if (keys.size === 0) {
274
+ this.tagToKeys.delete(tag);
275
+ }
276
+ }
277
+ this.keyToTags.delete(key);
278
+ }
279
+ };
280
+
281
+ // src/integrations/hono.ts
282
+ function createHonoCacheMiddleware(cache, options = {}) {
283
+ const allowedMethods = new Set((options.methods ?? ["GET"]).map((method) => method.toUpperCase()));
284
+ return async (context, next) => {
285
+ const method = (context.req.method ?? "GET").toUpperCase();
286
+ if (!allowedMethods.has(method)) {
287
+ await next();
288
+ return;
289
+ }
290
+ const key = options.keyResolver ? options.keyResolver(context.req) : `${method}:${context.req.path ?? context.req.url ?? "/"}`;
291
+ const cached = await cache.get(key, void 0, options);
292
+ if (cached !== null) {
293
+ context.header?.("x-cache", "HIT");
294
+ context.header?.("content-type", "application/json; charset=utf-8");
295
+ context.json(cached);
296
+ return;
297
+ }
298
+ const originalJson = context.json.bind(context);
299
+ context.json = (body, status) => {
300
+ context.header?.("x-cache", "MISS");
301
+ void cache.set(key, body, options);
302
+ return originalJson(body, status);
303
+ };
304
+ await next();
305
+ };
306
+ }
307
+
308
+ export {
309
+ MemoryLayer,
310
+ TagIndex,
311
+ createHonoCacheMiddleware
312
+ };