layercache 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +371 -13
- package/dist/chunk-IILH5XTS.js +103 -0
- package/dist/cli.cjs +228 -0
- package/dist/cli.d.cts +4 -0
- package/dist/cli.d.ts +4 -0
- package/dist/cli.js +96 -0
- package/dist/index.cjs +1214 -98
- package/dist/index.d.cts +245 -7
- package/dist/index.d.ts +245 -7
- package/dist/index.js +1200 -185
- package/package.json +9 -2
- package/packages/nestjs/dist/index.cjs +971 -89
- package/packages/nestjs/dist/index.d.cts +227 -2
- package/packages/nestjs/dist/index.d.ts +227 -2
- package/packages/nestjs/dist/index.js +970 -89
package/README.md
CHANGED
|
@@ -6,6 +6,7 @@
|
|
|
6
6
|
[](https://www.npmjs.com/package/layercache)
|
|
7
7
|
[](LICENSE)
|
|
8
8
|
[](https://www.typescriptlang.org/)
|
|
9
|
+
[](https://github.com/flyingsquirrel0419/layercache)
|
|
9
10
|
|
|
10
11
|
```
|
|
11
12
|
L1 hit ~0.01 ms ← served from memory, zero network
|
|
@@ -41,9 +42,27 @@ On a hit, the value is returned from the fastest layer that has it, and automati
|
|
|
41
42
|
- **Tag-based invalidation** — `set('user:123:posts', posts, { tags: ['user:123'] })` then `invalidateByTag('user:123')`
|
|
42
43
|
- **Pattern invalidation** — `invalidateByPattern('user:*')`
|
|
43
44
|
- **Per-layer TTL overrides** — different TTLs for memory vs. Redis in one call
|
|
45
|
+
- **Negative caching** — cache known misses for a short TTL to protect the database
|
|
46
|
+
- **Stale strategies** — `staleWhileRevalidate` and `staleIfError` as opt-in read behavior
|
|
47
|
+
- **TTL jitter** — spread expirations to avoid synchronized stampedes
|
|
48
|
+
- **Sliding & adaptive TTL** — extend TTL on every read or ramp it up for hot keys
|
|
49
|
+
- **Refresh-ahead** — trigger background refresh when TTL drops below a threshold
|
|
50
|
+
- **Best-effort writes** — tolerate partial layer write failures when desired
|
|
51
|
+
- **Bulk reads** — `mget` uses layer-level `getMany()` when available
|
|
44
52
|
- **Distributed tag index** — `RedisTagIndex` keeps tag state consistent across multiple servers
|
|
53
|
+
- **Optional distributed single-flight** — plug in a coordinator to dedupe misses across instances
|
|
45
54
|
- **Cross-server L1 invalidation** — Redis pub/sub bus flushes stale memory on other instances when you write or delete
|
|
46
|
-
- **
|
|
55
|
+
- **`wrap()` decorator API** — turn any async function into a cached version with auto-generated keys
|
|
56
|
+
- **Cache warming** — pre-populate layers with a prioritised list of entries at startup
|
|
57
|
+
- **Namespaces** — scope a `CacheStack` to a key prefix for multi-tenant or module isolation
|
|
58
|
+
- **Event hooks** — `EventEmitter`-based events for hits, misses, stale serves, errors, and more
|
|
59
|
+
- **Graceful degradation** — skip a failing layer for a configurable retry window
|
|
60
|
+
- **Circuit breaker** — per-key or global; opens after N failures, recovers after cooldown
|
|
61
|
+
- **Compression** — transparent gzip/brotli in `RedisLayer` with a byte threshold
|
|
62
|
+
- **Metrics & stats** — per-layer hit/miss counters, circuit-breaker trips, degraded operations; HTTP stats handler
|
|
63
|
+
- **Persistence** — `exportState` / `importState` for in-process snapshots; `persistToFile` / `restoreFromFile` for disk
|
|
64
|
+
- **Admin CLI** — `layercache stats | keys | invalidate` against any Redis URL
|
|
65
|
+
- **Framework integrations** — Fastify plugin, tRPC middleware, GraphQL resolver wrapper
|
|
47
66
|
- **MessagePack serializer** — drop-in replacement for lower Redis memory usage
|
|
48
67
|
- **NestJS module** — `CacheStackModule.forRoot(...)` with `@InjectCacheStack()`
|
|
49
68
|
- **Custom layers** — implement the 5-method `CacheLayer` interface to plug in Memcached, DynamoDB, or anything else
|
|
@@ -106,7 +125,12 @@ const user = await cache.get<User>('user:123', () => db.findUser(123))
|
|
|
106
125
|
// With options
|
|
107
126
|
const user = await cache.get<User>('user:123', () => db.findUser(123), {
|
|
108
127
|
ttl: { memory: 30, redis: 600 }, // per-layer TTL
|
|
109
|
-
tags: ['user', 'user:123']
|
|
128
|
+
tags: ['user', 'user:123'], // tag this key for bulk invalidation
|
|
129
|
+
negativeCache: true, // cache null fetches
|
|
130
|
+
negativeTtl: 15, // short TTL for misses
|
|
131
|
+
staleWhileRevalidate: 30, // serve stale and refresh in background
|
|
132
|
+
staleIfError: 300, // serve stale if refresh fails
|
|
133
|
+
ttlJitter: 5 // +/- 5s expiry spread
|
|
110
134
|
})
|
|
111
135
|
```
|
|
112
136
|
|
|
@@ -117,7 +141,10 @@ Writes to all layers simultaneously.
|
|
|
117
141
|
```ts
|
|
118
142
|
await cache.set('user:123', user, {
|
|
119
143
|
ttl: { memory: 60, redis: 600 }, // per-layer TTL (seconds)
|
|
120
|
-
tags: ['user', 'user:123']
|
|
144
|
+
tags: ['user', 'user:123'],
|
|
145
|
+
staleWhileRevalidate: { redis: 30 },
|
|
146
|
+
staleIfError: { redis: 120 },
|
|
147
|
+
ttlJitter: { redis: 5 }
|
|
121
148
|
})
|
|
122
149
|
|
|
123
150
|
await cache.set('user:123', user, {
|
|
@@ -149,6 +176,8 @@ await cache.invalidateByPattern('user:*') // deletes user:1, user:2, …
|
|
|
149
176
|
|
|
150
177
|
Concurrent multi-key fetch, each with its own optional fetcher.
|
|
151
178
|
|
|
179
|
+
If every entry is a simple read (`{ key }` only), `CacheStack` will use layer-level `getMany()` fast paths when the layer implements one.
|
|
180
|
+
|
|
152
181
|
```ts
|
|
153
182
|
const [user1, user2] = await cache.mget([
|
|
154
183
|
{ key: 'user:1', fetch: () => db.findUser(1) },
|
|
@@ -159,9 +188,109 @@ const [user1, user2] = await cache.mget([
|
|
|
159
188
|
### `cache.getMetrics(): CacheMetricsSnapshot`
|
|
160
189
|
|
|
161
190
|
```ts
|
|
162
|
-
const { hits, misses, fetches,
|
|
191
|
+
const { hits, misses, fetches, staleHits, refreshes, writeFailures } = cache.getMetrics()
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
### `cache.resetMetrics(): void`
|
|
195
|
+
|
|
196
|
+
Resets all counters to zero — useful for per-interval reporting.
|
|
197
|
+
|
|
198
|
+
```ts
|
|
199
|
+
cache.resetMetrics()
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### `cache.getStats(): CacheStatsSnapshot`
|
|
203
|
+
|
|
204
|
+
Returns metrics, per-layer degradation state, and the number of in-flight background refreshes.
|
|
205
|
+
|
|
206
|
+
```ts
|
|
207
|
+
const { metrics, layers, backgroundRefreshes } = cache.getStats()
|
|
208
|
+
// layers: [{ name, isLocal, degradedUntil }]
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### `cache.wrap(prefix, fetcher, options?)`
|
|
212
|
+
|
|
213
|
+
Wraps an async function so every call is transparently cached. The key is derived from the function arguments unless you supply a `keyResolver`.
|
|
214
|
+
|
|
215
|
+
```ts
|
|
216
|
+
const getUser = cache.wrap('user', (id: number) => db.findUser(id))
|
|
217
|
+
|
|
218
|
+
const user = await getUser(123) // key → "user:123"
|
|
219
|
+
|
|
220
|
+
// Custom key resolver
|
|
221
|
+
const getUser = cache.wrap(
|
|
222
|
+
'user',
|
|
223
|
+
(id: number) => db.findUser(id),
|
|
224
|
+
{ keyResolver: (id) => String(id), ttl: 300 }
|
|
225
|
+
)
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
### `cache.warm(entries, options?)`
|
|
229
|
+
|
|
230
|
+
Pre-populate layers at startup from a prioritised list. Higher `priority` values run first.
|
|
231
|
+
|
|
232
|
+
```ts
|
|
233
|
+
await cache.warm(
|
|
234
|
+
[
|
|
235
|
+
{ key: 'config', fetcher: () => db.getConfig(), priority: 10 },
|
|
236
|
+
{ key: 'user:1', fetcher: () => db.findUser(1), priority: 5 },
|
|
237
|
+
{ key: 'user:2', fetcher: () => db.findUser(2), priority: 5 },
|
|
238
|
+
],
|
|
239
|
+
{ concurrency: 4, continueOnError: true }
|
|
240
|
+
)
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
### `cache.namespace(prefix): CacheNamespace`
|
|
244
|
+
|
|
245
|
+
Returns a scoped view with the same full API (`get`, `set`, `delete`, `clear`, `mget`, `wrap`, `warm`, `invalidateByTag`, `invalidateByPattern`, `getMetrics`). `clear()` only touches `prefix:*` keys.
|
|
246
|
+
|
|
247
|
+
```ts
|
|
248
|
+
const users = cache.namespace('users')
|
|
249
|
+
const posts = cache.namespace('posts')
|
|
250
|
+
|
|
251
|
+
await users.set('123', userData) // stored as "users:123"
|
|
252
|
+
await users.clear() // only deletes "users:*"
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
---
|
|
256
|
+
|
|
257
|
+
## Negative + stale caching
|
|
258
|
+
|
|
259
|
+
`negativeCache` stores fetcher misses for a short TTL, which is useful for "user not found" or "feature flag absent" style lookups.
|
|
260
|
+
|
|
261
|
+
```ts
|
|
262
|
+
const user = await cache.get(`user:${id}`, () => db.findUser(id), {
|
|
263
|
+
negativeCache: true,
|
|
264
|
+
negativeTtl: 15
|
|
265
|
+
})
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
`staleWhileRevalidate` returns the last cached value immediately after expiry and refreshes it in the background. `staleIfError` keeps serving the stale value if the refresh fails.
|
|
269
|
+
|
|
270
|
+
```ts
|
|
271
|
+
await cache.set('config', currentConfig, {
|
|
272
|
+
ttl: 60,
|
|
273
|
+
staleWhileRevalidate: 30,
|
|
274
|
+
staleIfError: 300
|
|
275
|
+
})
|
|
276
|
+
```
|
|
277
|
+
|
|
278
|
+
---
|
|
279
|
+
|
|
280
|
+
## Write failure policy
|
|
281
|
+
|
|
282
|
+
Default writes are strict: if any layer write fails, the operation throws.
|
|
283
|
+
|
|
284
|
+
If you prefer "at least one layer succeeds", enable best-effort mode:
|
|
285
|
+
|
|
286
|
+
```ts
|
|
287
|
+
const cache = new CacheStack([...], {
|
|
288
|
+
writePolicy: 'best-effort'
|
|
289
|
+
})
|
|
163
290
|
```
|
|
164
291
|
|
|
292
|
+
`best-effort` logs the failed layers, increments `writeFailures`, and only throws if *every* layer failed.
|
|
293
|
+
|
|
165
294
|
---
|
|
166
295
|
|
|
167
296
|
## Cache stampede prevention
|
|
@@ -190,6 +319,28 @@ new CacheStack([...], { stampedePrevention: false })
|
|
|
190
319
|
|
|
191
320
|
## Distributed deployments
|
|
192
321
|
|
|
322
|
+
### Distributed single-flight
|
|
323
|
+
|
|
324
|
+
Local stampede prevention only deduplicates requests inside one Node.js process. To dedupe cross-instance misses, configure a shared coordinator.
|
|
325
|
+
|
|
326
|
+
```ts
|
|
327
|
+
import { RedisSingleFlightCoordinator } from 'layercache'
|
|
328
|
+
|
|
329
|
+
const coordinator = new RedisSingleFlightCoordinator({ client: redis })
|
|
330
|
+
|
|
331
|
+
const cache = new CacheStack(
|
|
332
|
+
[new MemoryLayer({ ttl: 60 }), new RedisLayer({ client: redis, ttl: 300 })],
|
|
333
|
+
{
|
|
334
|
+
singleFlightCoordinator: coordinator,
|
|
335
|
+
singleFlightLeaseMs: 30_000,
|
|
336
|
+
singleFlightTimeoutMs: 5_000,
|
|
337
|
+
singleFlightPollMs: 50
|
|
338
|
+
}
|
|
339
|
+
)
|
|
340
|
+
```
|
|
341
|
+
|
|
342
|
+
When another instance already owns the miss, the current process waits for the value to appear in the shared layer instead of running the fetcher again.
|
|
343
|
+
|
|
193
344
|
### Cross-server L1 invalidation
|
|
194
345
|
|
|
195
346
|
When one server writes or deletes a key, other servers' memory layers go stale. The `RedisInvalidationBus` propagates invalidation events over Redis pub/sub so every instance stays consistent.
|
|
@@ -287,6 +438,200 @@ await cache.set('key', value, { ttl: { local: 15, shared: 600 } })
|
|
|
287
438
|
|
|
288
439
|
---
|
|
289
440
|
|
|
441
|
+
## Sliding & adaptive TTL
|
|
442
|
+
|
|
443
|
+
**Sliding TTL** resets the TTL on every read so frequently-accessed keys never expire.
|
|
444
|
+
|
|
445
|
+
```ts
|
|
446
|
+
const value = await cache.get('session:abc', fetchSession, { slidingTtl: true })
|
|
447
|
+
```
|
|
448
|
+
|
|
449
|
+
**Adaptive TTL** automatically increases the TTL of hot keys up to a ceiling.
|
|
450
|
+
|
|
451
|
+
```ts
|
|
452
|
+
await cache.get('popular-post', fetchPost, {
|
|
453
|
+
adaptiveTtl: {
|
|
454
|
+
hotAfter: 5, // ramp up after 5 hits
|
|
455
|
+
step: 60, // add 60s per hit
|
|
456
|
+
maxTtl: 3600 // cap at 1h
|
|
457
|
+
}
|
|
458
|
+
})
|
|
459
|
+
```
|
|
460
|
+
|
|
461
|
+
**Refresh-ahead** triggers a background refresh when the remaining TTL drops below a threshold, so callers never see a miss.
|
|
462
|
+
|
|
463
|
+
```ts
|
|
464
|
+
await cache.get('leaderboard', fetchLeaderboard, {
|
|
465
|
+
ttl: 120,
|
|
466
|
+
refreshAhead: 30 // start refreshing when ≤30s remain
|
|
467
|
+
})
|
|
468
|
+
```
|
|
469
|
+
|
|
470
|
+
---
|
|
471
|
+
|
|
472
|
+
## Graceful degradation & circuit breaker
|
|
473
|
+
|
|
474
|
+
**Graceful degradation** marks a layer as degraded on failure and skips it for a retry window, keeping the cache available even if Redis is briefly unreachable.
|
|
475
|
+
|
|
476
|
+
```ts
|
|
477
|
+
new CacheStack([...], {
|
|
478
|
+
gracefulDegradation: { retryAfterMs: 10_000 }
|
|
479
|
+
})
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
**Circuit breaker** opens after repeated fetcher failures for a key, returning `null` instead of hammering a broken downstream.
|
|
483
|
+
|
|
484
|
+
```ts
|
|
485
|
+
new CacheStack([...], {
|
|
486
|
+
circuitBreaker: {
|
|
487
|
+
failureThreshold: 5, // open after 5 consecutive failures
|
|
488
|
+
cooldownMs: 30_000 // retry after 30s
|
|
489
|
+
}
|
|
490
|
+
})
|
|
491
|
+
|
|
492
|
+
// Or per-operation
|
|
493
|
+
await cache.get('fragile-key', fetch, {
|
|
494
|
+
circuitBreaker: { failureThreshold: 3, cooldownMs: 10_000 }
|
|
495
|
+
})
|
|
496
|
+
```
|
|
497
|
+
|
|
498
|
+
---
|
|
499
|
+
|
|
500
|
+
## Compression
|
|
501
|
+
|
|
502
|
+
`RedisLayer` can transparently compress values before writing. Values smaller than `compressionThreshold` are stored as-is.
|
|
503
|
+
|
|
504
|
+
```ts
|
|
505
|
+
new RedisLayer({
|
|
506
|
+
client: redis,
|
|
507
|
+
ttl: 300,
|
|
508
|
+
compression: 'gzip', // or 'brotli'
|
|
509
|
+
compressionThreshold: 1_024 // bytes — skip compression for small values
|
|
510
|
+
})
|
|
511
|
+
```
|
|
512
|
+
|
|
513
|
+
---
|
|
514
|
+
|
|
515
|
+
## Stats & HTTP endpoint
|
|
516
|
+
|
|
517
|
+
`cache.getStats()` returns a full snapshot suitable for dashboards or health checks.
|
|
518
|
+
|
|
519
|
+
```ts
|
|
520
|
+
const stats = cache.getStats()
|
|
521
|
+
// {
|
|
522
|
+
// metrics: { hits, misses, fetches, circuitBreakerTrips, ... },
|
|
523
|
+
// layers: [{ name, isLocal, degradedUntil }],
|
|
524
|
+
// backgroundRefreshes: 2
|
|
525
|
+
// }
|
|
526
|
+
```
|
|
527
|
+
|
|
528
|
+
Mount a JSON endpoint with the built-in HTTP handler (works with Express, Fastify, Next.js):
|
|
529
|
+
|
|
530
|
+
```ts
|
|
531
|
+
import { createCacheStatsHandler } from 'layercache'
|
|
532
|
+
import http from 'node:http'
|
|
533
|
+
|
|
534
|
+
const statsHandler = createCacheStatsHandler(cache)
|
|
535
|
+
http.createServer(statsHandler).listen(9090)
|
|
536
|
+
// GET / → JSON stats
|
|
537
|
+
```
|
|
538
|
+
|
|
539
|
+
Or use the Fastify plugin:
|
|
540
|
+
|
|
541
|
+
```ts
|
|
542
|
+
import { createFastifyLayercachePlugin } from 'layercache/integrations/fastify'
|
|
543
|
+
|
|
544
|
+
await fastify.register(createFastifyLayercachePlugin(cache, {
|
|
545
|
+
statsPath: '/cache/stats' // default; set exposeStatsRoute: false to disable
|
|
546
|
+
}))
|
|
547
|
+
// fastify.cache is now available in all handlers
|
|
548
|
+
```
|
|
549
|
+
|
|
550
|
+
---
|
|
551
|
+
|
|
552
|
+
## Persistence & snapshots
|
|
553
|
+
|
|
554
|
+
Transfer cache state between `CacheStack` instances or survive a restart.
|
|
555
|
+
|
|
556
|
+
```ts
|
|
557
|
+
// In-memory snapshot
|
|
558
|
+
const snapshot = await cache.exportState()
|
|
559
|
+
await anotherCache.importState(snapshot)
|
|
560
|
+
|
|
561
|
+
// Disk snapshot
|
|
562
|
+
await cache.persistToFile('./cache-snapshot.json')
|
|
563
|
+
await cache.restoreFromFile('./cache-snapshot.json')
|
|
564
|
+
```
|
|
565
|
+
|
|
566
|
+
---
|
|
567
|
+
|
|
568
|
+
## Event hooks
|
|
569
|
+
|
|
570
|
+
`CacheStack` extends `EventEmitter`. Subscribe to events for monitoring or custom side-effects.
|
|
571
|
+
|
|
572
|
+
| Event | Payload |
|
|
573
|
+
|-------|---------|
|
|
574
|
+
| `hit` | `{ key, layer }` |
|
|
575
|
+
| `miss` | `{ key }` |
|
|
576
|
+
| `set` | `{ key }` |
|
|
577
|
+
| `delete` | `{ key }` |
|
|
578
|
+
| `stale-serve` | `{ key, state, layer }` |
|
|
579
|
+
| `stampede-dedupe` | `{ key }` |
|
|
580
|
+
| `backfill` | `{ key, fromLayer, toLayer }` |
|
|
581
|
+
| `warm` | `{ key }` |
|
|
582
|
+
| `error` | `{ event, context }` |
|
|
583
|
+
|
|
584
|
+
```ts
|
|
585
|
+
cache.on('hit', ({ key, layer }) => metrics.inc('cache.hit', { layer }))
|
|
586
|
+
cache.on('miss', ({ key }) => metrics.inc('cache.miss'))
|
|
587
|
+
cache.on('error', ({ event, context }) => logger.error(event, context))
|
|
588
|
+
```
|
|
589
|
+
|
|
590
|
+
---
|
|
591
|
+
|
|
592
|
+
## Framework integrations
|
|
593
|
+
|
|
594
|
+
### tRPC
|
|
595
|
+
|
|
596
|
+
```ts
|
|
597
|
+
import { createTrpcCacheMiddleware } from 'layercache/integrations/trpc'
|
|
598
|
+
|
|
599
|
+
const cacheMiddleware = createTrpcCacheMiddleware(cache, 'trpc', { ttl: 60 })
|
|
600
|
+
|
|
601
|
+
export const cachedProcedure = t.procedure.use(cacheMiddleware)
|
|
602
|
+
```
|
|
603
|
+
|
|
604
|
+
### GraphQL
|
|
605
|
+
|
|
606
|
+
```ts
|
|
607
|
+
import { cacheGraphqlResolver } from 'layercache/integrations/graphql'
|
|
608
|
+
|
|
609
|
+
const resolvers = {
|
|
610
|
+
Query: {
|
|
611
|
+
user: cacheGraphqlResolver(cache, 'user', (_root, { id }) => db.findUser(id), {
|
|
612
|
+
keyResolver: (_root, { id }) => id,
|
|
613
|
+
ttl: 300
|
|
614
|
+
})
|
|
615
|
+
}
|
|
616
|
+
}
|
|
617
|
+
```
|
|
618
|
+
|
|
619
|
+
---
|
|
620
|
+
|
|
621
|
+
## Admin CLI
|
|
622
|
+
|
|
623
|
+
Inspect and manage a Redis-backed cache without writing code.
|
|
624
|
+
|
|
625
|
+
```bash
|
|
626
|
+
# Requires ioredis
|
|
627
|
+
npx layercache stats --redis redis://localhost:6379
|
|
628
|
+
npx layercache keys --redis redis://localhost:6379 --pattern "user:*"
|
|
629
|
+
npx layercache invalidate --redis redis://localhost:6379 --tag user:123
|
|
630
|
+
npx layercache invalidate --redis redis://localhost:6379 --pattern "session:*"
|
|
631
|
+
```
|
|
632
|
+
|
|
633
|
+
---
|
|
634
|
+
|
|
290
635
|
## MessagePack serialization
|
|
291
636
|
|
|
292
637
|
Reduces Redis memory usage and speeds up serialization for large values:
|
|
@@ -316,6 +661,8 @@ class MemcachedLayer implements CacheLayer {
|
|
|
316
661
|
readonly isLocal = false
|
|
317
662
|
|
|
318
663
|
async get<T>(key: string): Promise<T | null> { /* … */ }
|
|
664
|
+
async getEntry?(key: string): Promise<unknown | null> { /* optional raw access */ }
|
|
665
|
+
async getMany?(keys: string[]): Promise<Array<unknown | null>> { /* optional bulk read */ }
|
|
319
666
|
async set(key: string, value: unknown, ttl?: number): Promise<void> { /* … */ }
|
|
320
667
|
async delete(key: string): Promise<void> { /* … */ }
|
|
321
668
|
async clear(): Promise<void> { /* … */ }
|
|
@@ -433,24 +780,35 @@ Example output from a local run:
|
|
|
433
780
|
|
|
434
781
|
## Comparison
|
|
435
782
|
|
|
436
|
-
| | node-cache |
|
|
783
|
+
| | node-cache-manager | keyv | cacheable | **layercache** |
|
|
437
784
|
|---|:---:|:---:|:---:|:---:|
|
|
438
|
-
| Multi-layer |
|
|
785
|
+
| Multi-layer | △ | Plugin | ❌ | ✅ |
|
|
439
786
|
| Auto backfill | ❌ | ❌ | ❌ | ✅ |
|
|
440
787
|
| Stampede prevention | ❌ | ❌ | ❌ | ✅ |
|
|
441
|
-
| Tag invalidation | ❌ | ❌ |
|
|
788
|
+
| Tag invalidation | ❌ | ❌ | ✅ | ✅ |
|
|
442
789
|
| Distributed tags | ❌ | ❌ | ❌ | ✅ |
|
|
443
790
|
| Cross-server L1 flush | ❌ | ❌ | ❌ | ✅ |
|
|
444
|
-
| TypeScript-first |
|
|
445
|
-
|
|
|
446
|
-
|
|
|
791
|
+
| TypeScript-first | △ | ✅ | ✅ | ✅ |
|
|
792
|
+
| Wrap / decorator API | ✅ | ❌ | ❌ | ✅ |
|
|
793
|
+
| Cache warming | ❌ | ❌ | ❌ | ✅ |
|
|
794
|
+
| Namespaces | ❌ | ✅ | ✅ | ✅ |
|
|
795
|
+
| Sliding / adaptive TTL | ❌ | ❌ | ❌ | ✅ |
|
|
796
|
+
| Event hooks | ✅ | ✅ | ✅ | ✅ |
|
|
797
|
+
| Circuit breaker | ❌ | ❌ | ❌ | ✅ |
|
|
798
|
+
| Graceful degradation | ❌ | ❌ | ❌ | ✅ |
|
|
799
|
+
| Compression | ❌ | ❌ | ✅ | ✅ |
|
|
800
|
+
| Persistence / snapshots | ❌ | ❌ | ❌ | ✅ |
|
|
801
|
+
| Admin CLI | ❌ | ❌ | ❌ | ✅ |
|
|
802
|
+
| Pluggable logger | ❌ | ❌ | ✅ | ✅ |
|
|
803
|
+
| NestJS module | ❌ | ❌ | ❌ | ✅ |
|
|
804
|
+
| Custom layers | △ | ❌ | ❌ | ✅ |
|
|
447
805
|
|
|
448
806
|
---
|
|
449
807
|
|
|
450
808
|
## Debug logging
|
|
451
809
|
|
|
452
810
|
```bash
|
|
453
|
-
DEBUG=
|
|
811
|
+
DEBUG=layercache:debug node server.js
|
|
454
812
|
```
|
|
455
813
|
|
|
456
814
|
Or pass a logger instance:
|
|
@@ -476,8 +834,8 @@ new CacheStack([...], {
|
|
|
476
834
|
## Contributing
|
|
477
835
|
|
|
478
836
|
```bash
|
|
479
|
-
git clone https://github.com/flyingsquirrel0419/
|
|
480
|
-
cd
|
|
837
|
+
git clone https://github.com/flyingsquirrel0419/layercache
|
|
838
|
+
cd layercache
|
|
481
839
|
npm install
|
|
482
840
|
npm test # vitest
|
|
483
841
|
npm run build:all # esm + cjs + nestjs package
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
// src/invalidation/PatternMatcher.ts
|
|
2
|
+
var PatternMatcher = class {
|
|
3
|
+
static matches(pattern, value) {
|
|
4
|
+
const escaped = pattern.replace(/[.+^${}()|[\]\\]/g, "\\$&");
|
|
5
|
+
const regex = new RegExp(`^${escaped.replace(/\*/g, ".*").replace(/\?/g, ".")}$`);
|
|
6
|
+
return regex.test(value);
|
|
7
|
+
}
|
|
8
|
+
};
|
|
9
|
+
|
|
10
|
+
// src/invalidation/RedisTagIndex.ts
|
|
11
|
+
var RedisTagIndex = class {
|
|
12
|
+
client;
|
|
13
|
+
prefix;
|
|
14
|
+
scanCount;
|
|
15
|
+
constructor(options) {
|
|
16
|
+
this.client = options.client;
|
|
17
|
+
this.prefix = options.prefix ?? "layercache:tag-index";
|
|
18
|
+
this.scanCount = options.scanCount ?? 100;
|
|
19
|
+
}
|
|
20
|
+
async touch(key) {
|
|
21
|
+
await this.client.sadd(this.knownKeysKey(), key);
|
|
22
|
+
}
|
|
23
|
+
async track(key, tags) {
|
|
24
|
+
const keyTagsKey = this.keyTagsKey(key);
|
|
25
|
+
const existingTags = await this.client.smembers(keyTagsKey);
|
|
26
|
+
const pipeline = this.client.pipeline();
|
|
27
|
+
pipeline.sadd(this.knownKeysKey(), key);
|
|
28
|
+
for (const tag of existingTags) {
|
|
29
|
+
pipeline.srem(this.tagKeysKey(tag), key);
|
|
30
|
+
}
|
|
31
|
+
pipeline.del(keyTagsKey);
|
|
32
|
+
if (tags.length > 0) {
|
|
33
|
+
pipeline.sadd(keyTagsKey, ...tags);
|
|
34
|
+
for (const tag of new Set(tags)) {
|
|
35
|
+
pipeline.sadd(this.tagKeysKey(tag), key);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
await pipeline.exec();
|
|
39
|
+
}
|
|
40
|
+
async remove(key) {
|
|
41
|
+
const keyTagsKey = this.keyTagsKey(key);
|
|
42
|
+
const existingTags = await this.client.smembers(keyTagsKey);
|
|
43
|
+
const pipeline = this.client.pipeline();
|
|
44
|
+
pipeline.srem(this.knownKeysKey(), key);
|
|
45
|
+
pipeline.del(keyTagsKey);
|
|
46
|
+
for (const tag of existingTags) {
|
|
47
|
+
pipeline.srem(this.tagKeysKey(tag), key);
|
|
48
|
+
}
|
|
49
|
+
await pipeline.exec();
|
|
50
|
+
}
|
|
51
|
+
async keysForTag(tag) {
|
|
52
|
+
return this.client.smembers(this.tagKeysKey(tag));
|
|
53
|
+
}
|
|
54
|
+
async matchPattern(pattern) {
|
|
55
|
+
const matches = [];
|
|
56
|
+
let cursor = "0";
|
|
57
|
+
do {
|
|
58
|
+
const [nextCursor, keys] = await this.client.sscan(
|
|
59
|
+
this.knownKeysKey(),
|
|
60
|
+
cursor,
|
|
61
|
+
"MATCH",
|
|
62
|
+
pattern,
|
|
63
|
+
"COUNT",
|
|
64
|
+
this.scanCount
|
|
65
|
+
);
|
|
66
|
+
cursor = nextCursor;
|
|
67
|
+
matches.push(...keys.filter((key) => PatternMatcher.matches(pattern, key)));
|
|
68
|
+
} while (cursor !== "0");
|
|
69
|
+
return matches;
|
|
70
|
+
}
|
|
71
|
+
async clear() {
|
|
72
|
+
const indexKeys = await this.scanIndexKeys();
|
|
73
|
+
if (indexKeys.length === 0) {
|
|
74
|
+
return;
|
|
75
|
+
}
|
|
76
|
+
await this.client.del(...indexKeys);
|
|
77
|
+
}
|
|
78
|
+
async scanIndexKeys() {
|
|
79
|
+
const matches = [];
|
|
80
|
+
let cursor = "0";
|
|
81
|
+
const pattern = `${this.prefix}:*`;
|
|
82
|
+
do {
|
|
83
|
+
const [nextCursor, keys] = await this.client.scan(cursor, "MATCH", pattern, "COUNT", this.scanCount);
|
|
84
|
+
cursor = nextCursor;
|
|
85
|
+
matches.push(...keys);
|
|
86
|
+
} while (cursor !== "0");
|
|
87
|
+
return matches;
|
|
88
|
+
}
|
|
89
|
+
knownKeysKey() {
|
|
90
|
+
return `${this.prefix}:keys`;
|
|
91
|
+
}
|
|
92
|
+
keyTagsKey(key) {
|
|
93
|
+
return `${this.prefix}:key:${encodeURIComponent(key)}`;
|
|
94
|
+
}
|
|
95
|
+
tagKeysKey(tag) {
|
|
96
|
+
return `${this.prefix}:tag:${encodeURIComponent(tag)}`;
|
|
97
|
+
}
|
|
98
|
+
};
|
|
99
|
+
|
|
100
|
+
export {
|
|
101
|
+
PatternMatcher,
|
|
102
|
+
RedisTagIndex
|
|
103
|
+
};
|