layercache 1.0.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +286 -7
- package/benchmarks/latency.ts +1 -1
- package/benchmarks/stampede.ts +1 -4
- package/dist/chunk-QUB5VZFZ.js +132 -0
- package/dist/cli.cjs +296 -0
- package/dist/cli.d.cts +4 -0
- package/dist/cli.d.ts +4 -0
- package/dist/cli.js +135 -0
- package/dist/index.cjs +1576 -184
- package/dist/index.d.cts +465 -7
- package/dist/index.d.ts +465 -7
- package/dist/index.js +1526 -266
- package/examples/express-api/index.ts +12 -8
- package/examples/nestjs-module/app.module.ts +2 -5
- package/examples/nextjs-api-routes/route.ts +1 -4
- package/package.json +10 -2
- package/packages/nestjs/dist/index.cjs +1058 -155
- package/packages/nestjs/dist/index.d.cts +345 -2
- package/packages/nestjs/dist/index.d.ts +345 -2
- package/packages/nestjs/dist/index.js +1057 -155
package/README.md
CHANGED
|
@@ -6,6 +6,7 @@
|
|
|
6
6
|
[](https://www.npmjs.com/package/layercache)
|
|
7
7
|
[](LICENSE)
|
|
8
8
|
[](https://www.typescriptlang.org/)
|
|
9
|
+
[](https://github.com/flyingsquirrel0419/layercache)
|
|
9
10
|
|
|
10
11
|
```
|
|
11
12
|
L1 hit ~0.01 ms ← served from memory, zero network
|
|
@@ -44,12 +45,24 @@ On a hit, the value is returned from the fastest layer that has it, and automati
|
|
|
44
45
|
- **Negative caching** — cache known misses for a short TTL to protect the database
|
|
45
46
|
- **Stale strategies** — `staleWhileRevalidate` and `staleIfError` as opt-in read behavior
|
|
46
47
|
- **TTL jitter** — spread expirations to avoid synchronized stampedes
|
|
48
|
+
- **Sliding & adaptive TTL** — extend TTL on every read or ramp it up for hot keys
|
|
49
|
+
- **Refresh-ahead** — trigger background refresh when TTL drops below a threshold
|
|
47
50
|
- **Best-effort writes** — tolerate partial layer write failures when desired
|
|
48
51
|
- **Bulk reads** — `mget` uses layer-level `getMany()` when available
|
|
49
52
|
- **Distributed tag index** — `RedisTagIndex` keeps tag state consistent across multiple servers
|
|
50
53
|
- **Optional distributed single-flight** — plug in a coordinator to dedupe misses across instances
|
|
51
54
|
- **Cross-server L1 invalidation** — Redis pub/sub bus flushes stale memory on other instances when you write or delete
|
|
52
|
-
- **
|
|
55
|
+
- **`wrap()` decorator API** — turn any async function into a cached version with auto-generated keys
|
|
56
|
+
- **Cache warming** — pre-populate layers with a prioritised list of entries at startup
|
|
57
|
+
- **Namespaces** — scope a `CacheStack` to a key prefix for multi-tenant or module isolation
|
|
58
|
+
- **Event hooks** — `EventEmitter`-based events for hits, misses, stale serves, errors, and more
|
|
59
|
+
- **Graceful degradation** — skip a failing layer for a configurable retry window
|
|
60
|
+
- **Circuit breaker** — per-key or global; opens after N failures, recovers after cooldown
|
|
61
|
+
- **Compression** — transparent gzip/brotli in `RedisLayer` with a byte threshold
|
|
62
|
+
- **Metrics & stats** — per-layer hit/miss counters, circuit-breaker trips, degraded operations; HTTP stats handler
|
|
63
|
+
- **Persistence** — `exportState` / `importState` for in-process snapshots; `persistToFile` / `restoreFromFile` for disk
|
|
64
|
+
- **Admin CLI** — `layercache stats | keys | invalidate` against any Redis URL
|
|
65
|
+
- **Framework integrations** — Fastify plugin, tRPC middleware, GraphQL resolver wrapper
|
|
53
66
|
- **MessagePack serializer** — drop-in replacement for lower Redis memory usage
|
|
54
67
|
- **NestJS module** — `CacheStackModule.forRoot(...)` with `@InjectCacheStack()`
|
|
55
68
|
- **Custom layers** — implement the 5-method `CacheLayer` interface to plug in Memcached, DynamoDB, or anything else
|
|
@@ -178,6 +191,67 @@ const [user1, user2] = await cache.mget([
|
|
|
178
191
|
const { hits, misses, fetches, staleHits, refreshes, writeFailures } = cache.getMetrics()
|
|
179
192
|
```
|
|
180
193
|
|
|
194
|
+
### `cache.resetMetrics(): void`
|
|
195
|
+
|
|
196
|
+
Resets all counters to zero — useful for per-interval reporting.
|
|
197
|
+
|
|
198
|
+
```ts
|
|
199
|
+
cache.resetMetrics()
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### `cache.getStats(): CacheStatsSnapshot`
|
|
203
|
+
|
|
204
|
+
Returns metrics, per-layer degradation state, and the number of in-flight background refreshes.
|
|
205
|
+
|
|
206
|
+
```ts
|
|
207
|
+
const { metrics, layers, backgroundRefreshes } = cache.getStats()
|
|
208
|
+
// layers: [{ name, isLocal, degradedUntil }]
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### `cache.wrap(prefix, fetcher, options?)`
|
|
212
|
+
|
|
213
|
+
Wraps an async function so every call is transparently cached. The key is derived from the function arguments unless you supply a `keyResolver`.
|
|
214
|
+
|
|
215
|
+
```ts
|
|
216
|
+
const getUser = cache.wrap('user', (id: number) => db.findUser(id))
|
|
217
|
+
|
|
218
|
+
const user = await getUser(123) // key → "user:123"
|
|
219
|
+
|
|
220
|
+
// Custom key resolver
|
|
221
|
+
const getUser = cache.wrap(
|
|
222
|
+
'user',
|
|
223
|
+
(id: number) => db.findUser(id),
|
|
224
|
+
{ keyResolver: (id) => String(id), ttl: 300 }
|
|
225
|
+
)
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
### `cache.warm(entries, options?)`
|
|
229
|
+
|
|
230
|
+
Pre-populate layers at startup from a prioritised list. Higher `priority` values run first.
|
|
231
|
+
|
|
232
|
+
```ts
|
|
233
|
+
await cache.warm(
|
|
234
|
+
[
|
|
235
|
+
{ key: 'config', fetcher: () => db.getConfig(), priority: 10 },
|
|
236
|
+
{ key: 'user:1', fetcher: () => db.findUser(1), priority: 5 },
|
|
237
|
+
{ key: 'user:2', fetcher: () => db.findUser(2), priority: 5 },
|
|
238
|
+
],
|
|
239
|
+
{ concurrency: 4, continueOnError: true }
|
|
240
|
+
)
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
### `cache.namespace(prefix): CacheNamespace`
|
|
244
|
+
|
|
245
|
+
Returns a scoped view with the same full API (`get`, `set`, `delete`, `clear`, `mget`, `wrap`, `warm`, `invalidateByTag`, `invalidateByPattern`, `getMetrics`). `clear()` only touches `prefix:*` keys.
|
|
246
|
+
|
|
247
|
+
```ts
|
|
248
|
+
const users = cache.namespace('users')
|
|
249
|
+
const posts = cache.namespace('posts')
|
|
250
|
+
|
|
251
|
+
await users.set('123', userData) // stored as "users:123"
|
|
252
|
+
await users.clear() // only deletes "users:*"
|
|
253
|
+
```
|
|
254
|
+
|
|
181
255
|
---
|
|
182
256
|
|
|
183
257
|
## Negative + stale caching
|
|
@@ -364,6 +438,200 @@ await cache.set('key', value, { ttl: { local: 15, shared: 600 } })
|
|
|
364
438
|
|
|
365
439
|
---
|
|
366
440
|
|
|
441
|
+
## Sliding & adaptive TTL
|
|
442
|
+
|
|
443
|
+
**Sliding TTL** resets the TTL on every read so frequently-accessed keys never expire.
|
|
444
|
+
|
|
445
|
+
```ts
|
|
446
|
+
const value = await cache.get('session:abc', fetchSession, { slidingTtl: true })
|
|
447
|
+
```
|
|
448
|
+
|
|
449
|
+
**Adaptive TTL** automatically increases the TTL of hot keys up to a ceiling.
|
|
450
|
+
|
|
451
|
+
```ts
|
|
452
|
+
await cache.get('popular-post', fetchPost, {
|
|
453
|
+
adaptiveTtl: {
|
|
454
|
+
hotAfter: 5, // ramp up after 5 hits
|
|
455
|
+
step: 60, // add 60s per hit
|
|
456
|
+
maxTtl: 3600 // cap at 1h
|
|
457
|
+
}
|
|
458
|
+
})
|
|
459
|
+
```
|
|
460
|
+
|
|
461
|
+
**Refresh-ahead** triggers a background refresh when the remaining TTL drops below a threshold, so callers never see a miss.
|
|
462
|
+
|
|
463
|
+
```ts
|
|
464
|
+
await cache.get('leaderboard', fetchLeaderboard, {
|
|
465
|
+
ttl: 120,
|
|
466
|
+
refreshAhead: 30 // start refreshing when ≤30s remain
|
|
467
|
+
})
|
|
468
|
+
```
|
|
469
|
+
|
|
470
|
+
---
|
|
471
|
+
|
|
472
|
+
## Graceful degradation & circuit breaker
|
|
473
|
+
|
|
474
|
+
**Graceful degradation** marks a layer as degraded on failure and skips it for a retry window, keeping the cache available even if Redis is briefly unreachable.
|
|
475
|
+
|
|
476
|
+
```ts
|
|
477
|
+
new CacheStack([...], {
|
|
478
|
+
gracefulDegradation: { retryAfterMs: 10_000 }
|
|
479
|
+
})
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
**Circuit breaker** opens after repeated fetcher failures for a key, returning `null` instead of hammering a broken downstream.
|
|
483
|
+
|
|
484
|
+
```ts
|
|
485
|
+
new CacheStack([...], {
|
|
486
|
+
circuitBreaker: {
|
|
487
|
+
failureThreshold: 5, // open after 5 consecutive failures
|
|
488
|
+
cooldownMs: 30_000 // retry after 30s
|
|
489
|
+
}
|
|
490
|
+
})
|
|
491
|
+
|
|
492
|
+
// Or per-operation
|
|
493
|
+
await cache.get('fragile-key', fetch, {
|
|
494
|
+
circuitBreaker: { failureThreshold: 3, cooldownMs: 10_000 }
|
|
495
|
+
})
|
|
496
|
+
```
|
|
497
|
+
|
|
498
|
+
---
|
|
499
|
+
|
|
500
|
+
## Compression
|
|
501
|
+
|
|
502
|
+
`RedisLayer` can transparently compress values before writing. Values smaller than `compressionThreshold` are stored as-is.
|
|
503
|
+
|
|
504
|
+
```ts
|
|
505
|
+
new RedisLayer({
|
|
506
|
+
client: redis,
|
|
507
|
+
ttl: 300,
|
|
508
|
+
compression: 'gzip', // or 'brotli'
|
|
509
|
+
compressionThreshold: 1_024 // bytes — skip compression for small values
|
|
510
|
+
})
|
|
511
|
+
```
|
|
512
|
+
|
|
513
|
+
---
|
|
514
|
+
|
|
515
|
+
## Stats & HTTP endpoint
|
|
516
|
+
|
|
517
|
+
`cache.getStats()` returns a full snapshot suitable for dashboards or health checks.
|
|
518
|
+
|
|
519
|
+
```ts
|
|
520
|
+
const stats = cache.getStats()
|
|
521
|
+
// {
|
|
522
|
+
// metrics: { hits, misses, fetches, circuitBreakerTrips, ... },
|
|
523
|
+
// layers: [{ name, isLocal, degradedUntil }],
|
|
524
|
+
// backgroundRefreshes: 2
|
|
525
|
+
// }
|
|
526
|
+
```
|
|
527
|
+
|
|
528
|
+
Mount a JSON endpoint with the built-in HTTP handler (works with Express, Fastify, Next.js):
|
|
529
|
+
|
|
530
|
+
```ts
|
|
531
|
+
import { createCacheStatsHandler } from 'layercache'
|
|
532
|
+
import http from 'node:http'
|
|
533
|
+
|
|
534
|
+
const statsHandler = createCacheStatsHandler(cache)
|
|
535
|
+
http.createServer(statsHandler).listen(9090)
|
|
536
|
+
// GET / → JSON stats
|
|
537
|
+
```
|
|
538
|
+
|
|
539
|
+
Or use the Fastify plugin:
|
|
540
|
+
|
|
541
|
+
```ts
|
|
542
|
+
import { createFastifyLayercachePlugin } from 'layercache/integrations/fastify'
|
|
543
|
+
|
|
544
|
+
await fastify.register(createFastifyLayercachePlugin(cache, {
|
|
545
|
+
statsPath: '/cache/stats' // default; set exposeStatsRoute: false to disable
|
|
546
|
+
}))
|
|
547
|
+
// fastify.cache is now available in all handlers
|
|
548
|
+
```
|
|
549
|
+
|
|
550
|
+
---
|
|
551
|
+
|
|
552
|
+
## Persistence & snapshots
|
|
553
|
+
|
|
554
|
+
Transfer cache state between `CacheStack` instances or survive a restart.
|
|
555
|
+
|
|
556
|
+
```ts
|
|
557
|
+
// In-memory snapshot
|
|
558
|
+
const snapshot = await cache.exportState()
|
|
559
|
+
await anotherCache.importState(snapshot)
|
|
560
|
+
|
|
561
|
+
// Disk snapshot
|
|
562
|
+
await cache.persistToFile('./cache-snapshot.json')
|
|
563
|
+
await cache.restoreFromFile('./cache-snapshot.json')
|
|
564
|
+
```
|
|
565
|
+
|
|
566
|
+
---
|
|
567
|
+
|
|
568
|
+
## Event hooks
|
|
569
|
+
|
|
570
|
+
`CacheStack` extends `EventEmitter`. Subscribe to events for monitoring or custom side-effects.
|
|
571
|
+
|
|
572
|
+
| Event | Payload |
|
|
573
|
+
|-------|---------|
|
|
574
|
+
| `hit` | `{ key, layer }` |
|
|
575
|
+
| `miss` | `{ key }` |
|
|
576
|
+
| `set` | `{ key }` |
|
|
577
|
+
| `delete` | `{ key }` |
|
|
578
|
+
| `stale-serve` | `{ key, state, layer }` |
|
|
579
|
+
| `stampede-dedupe` | `{ key }` |
|
|
580
|
+
| `backfill` | `{ key, fromLayer, toLayer }` |
|
|
581
|
+
| `warm` | `{ key }` |
|
|
582
|
+
| `error` | `{ event, context }` |
|
|
583
|
+
|
|
584
|
+
```ts
|
|
585
|
+
cache.on('hit', ({ key, layer }) => metrics.inc('cache.hit', { layer }))
|
|
586
|
+
cache.on('miss', ({ key }) => metrics.inc('cache.miss'))
|
|
587
|
+
cache.on('error', ({ event, context }) => logger.error(event, context))
|
|
588
|
+
```
|
|
589
|
+
|
|
590
|
+
---
|
|
591
|
+
|
|
592
|
+
## Framework integrations
|
|
593
|
+
|
|
594
|
+
### tRPC
|
|
595
|
+
|
|
596
|
+
```ts
|
|
597
|
+
import { createTrpcCacheMiddleware } from 'layercache/integrations/trpc'
|
|
598
|
+
|
|
599
|
+
const cacheMiddleware = createTrpcCacheMiddleware(cache, 'trpc', { ttl: 60 })
|
|
600
|
+
|
|
601
|
+
export const cachedProcedure = t.procedure.use(cacheMiddleware)
|
|
602
|
+
```
|
|
603
|
+
|
|
604
|
+
### GraphQL
|
|
605
|
+
|
|
606
|
+
```ts
|
|
607
|
+
import { cacheGraphqlResolver } from 'layercache/integrations/graphql'
|
|
608
|
+
|
|
609
|
+
const resolvers = {
|
|
610
|
+
Query: {
|
|
611
|
+
user: cacheGraphqlResolver(cache, 'user', (_root, { id }) => db.findUser(id), {
|
|
612
|
+
keyResolver: (_root, { id }) => id,
|
|
613
|
+
ttl: 300
|
|
614
|
+
})
|
|
615
|
+
}
|
|
616
|
+
}
|
|
617
|
+
```
|
|
618
|
+
|
|
619
|
+
---
|
|
620
|
+
|
|
621
|
+
## Admin CLI
|
|
622
|
+
|
|
623
|
+
Inspect and manage a Redis-backed cache without writing code.
|
|
624
|
+
|
|
625
|
+
```bash
|
|
626
|
+
# Requires ioredis
|
|
627
|
+
npx layercache stats --redis redis://localhost:6379
|
|
628
|
+
npx layercache keys --redis redis://localhost:6379 --pattern "user:*"
|
|
629
|
+
npx layercache invalidate --redis redis://localhost:6379 --tag user:123
|
|
630
|
+
npx layercache invalidate --redis redis://localhost:6379 --pattern "session:*"
|
|
631
|
+
```
|
|
632
|
+
|
|
633
|
+
---
|
|
634
|
+
|
|
367
635
|
## MessagePack serialization
|
|
368
636
|
|
|
369
637
|
Reduces Redis memory usage and speeds up serialization for large values:
|
|
@@ -512,17 +780,28 @@ Example output from a local run:
|
|
|
512
780
|
|
|
513
781
|
## Comparison
|
|
514
782
|
|
|
515
|
-
| | node-cache |
|
|
783
|
+
| | node-cache-manager | keyv | cacheable | **layercache** |
|
|
516
784
|
|---|:---:|:---:|:---:|:---:|
|
|
517
|
-
| Multi-layer |
|
|
785
|
+
| Multi-layer | △ | Plugin | ❌ | ✅ |
|
|
518
786
|
| Auto backfill | ❌ | ❌ | ❌ | ✅ |
|
|
519
787
|
| Stampede prevention | ❌ | ❌ | ❌ | ✅ |
|
|
520
|
-
| Tag invalidation | ❌ | ❌ |
|
|
788
|
+
| Tag invalidation | ❌ | ❌ | ✅ | ✅ |
|
|
521
789
|
| Distributed tags | ❌ | ❌ | ❌ | ✅ |
|
|
522
790
|
| Cross-server L1 flush | ❌ | ❌ | ❌ | ✅ |
|
|
523
|
-
| TypeScript-first |
|
|
524
|
-
|
|
|
525
|
-
|
|
|
791
|
+
| TypeScript-first | △ | ✅ | ✅ | ✅ |
|
|
792
|
+
| Wrap / decorator API | ✅ | ❌ | ❌ | ✅ |
|
|
793
|
+
| Cache warming | ❌ | ❌ | ❌ | ✅ |
|
|
794
|
+
| Namespaces | ❌ | ✅ | ✅ | ✅ |
|
|
795
|
+
| Sliding / adaptive TTL | ❌ | ❌ | ❌ | ✅ |
|
|
796
|
+
| Event hooks | ✅ | ✅ | ✅ | ✅ |
|
|
797
|
+
| Circuit breaker | ❌ | ❌ | ❌ | ✅ |
|
|
798
|
+
| Graceful degradation | ❌ | ❌ | ❌ | ✅ |
|
|
799
|
+
| Compression | ❌ | ❌ | ✅ | ✅ |
|
|
800
|
+
| Persistence / snapshots | ❌ | ❌ | ❌ | ✅ |
|
|
801
|
+
| Admin CLI | ❌ | ❌ | ❌ | ✅ |
|
|
802
|
+
| Pluggable logger | ❌ | ❌ | ✅ | ✅ |
|
|
803
|
+
| NestJS module | ❌ | ❌ | ❌ | ✅ |
|
|
804
|
+
| Custom layers | △ | ❌ | ❌ | ✅ |
|
|
526
805
|
|
|
527
806
|
---
|
|
528
807
|
|
package/benchmarks/latency.ts
CHANGED
package/benchmarks/stampede.ts
CHANGED
|
@@ -3,10 +3,7 @@ import { CacheStack, MemoryLayer, RedisLayer } from '../src'
|
|
|
3
3
|
|
|
4
4
|
async function main(): Promise<void> {
|
|
5
5
|
const redis = new Redis()
|
|
6
|
-
const cache = new CacheStack([
|
|
7
|
-
new MemoryLayer({ ttl: 60 }),
|
|
8
|
-
new RedisLayer({ client: redis, ttl: 300 })
|
|
9
|
-
])
|
|
6
|
+
const cache = new CacheStack([new MemoryLayer({ ttl: 60 }), new RedisLayer({ client: redis, ttl: 300 })])
|
|
10
7
|
|
|
11
8
|
let executions = 0
|
|
12
9
|
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
// src/invalidation/PatternMatcher.ts
|
|
2
|
+
var PatternMatcher = class _PatternMatcher {
|
|
3
|
+
/**
|
|
4
|
+
* Tests whether a glob-style pattern matches a value.
|
|
5
|
+
* Supports `*` (any sequence of characters) and `?` (any single character).
|
|
6
|
+
* Uses a linear-time algorithm to avoid ReDoS vulnerabilities.
|
|
7
|
+
*/
|
|
8
|
+
static matches(pattern, value) {
|
|
9
|
+
return _PatternMatcher.matchLinear(pattern, value);
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Linear-time glob matching using dynamic programming.
|
|
13
|
+
* Avoids catastrophic backtracking that RegExp-based glob matching can cause.
|
|
14
|
+
*/
|
|
15
|
+
static matchLinear(pattern, value) {
|
|
16
|
+
const m = pattern.length;
|
|
17
|
+
const n = value.length;
|
|
18
|
+
const dp = Array.from({ length: m + 1 }, () => new Array(n + 1).fill(false));
|
|
19
|
+
dp[0][0] = true;
|
|
20
|
+
for (let i = 1; i <= m; i++) {
|
|
21
|
+
if (pattern[i - 1] === "*") {
|
|
22
|
+
dp[i][0] = dp[i - 1]?.[0];
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
for (let i = 1; i <= m; i++) {
|
|
26
|
+
for (let j = 1; j <= n; j++) {
|
|
27
|
+
const pc = pattern[i - 1];
|
|
28
|
+
if (pc === "*") {
|
|
29
|
+
dp[i][j] = dp[i - 1]?.[j] || dp[i]?.[j - 1];
|
|
30
|
+
} else if (pc === "?" || pc === value[j - 1]) {
|
|
31
|
+
dp[i][j] = dp[i - 1]?.[j - 1];
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
return dp[m]?.[n];
|
|
36
|
+
}
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
// src/invalidation/RedisTagIndex.ts
|
|
40
|
+
var RedisTagIndex = class {
|
|
41
|
+
client;
|
|
42
|
+
prefix;
|
|
43
|
+
scanCount;
|
|
44
|
+
constructor(options) {
|
|
45
|
+
this.client = options.client;
|
|
46
|
+
this.prefix = options.prefix ?? "layercache:tag-index";
|
|
47
|
+
this.scanCount = options.scanCount ?? 100;
|
|
48
|
+
}
|
|
49
|
+
async touch(key) {
|
|
50
|
+
await this.client.sadd(this.knownKeysKey(), key);
|
|
51
|
+
}
|
|
52
|
+
async track(key, tags) {
|
|
53
|
+
const keyTagsKey = this.keyTagsKey(key);
|
|
54
|
+
const existingTags = await this.client.smembers(keyTagsKey);
|
|
55
|
+
const pipeline = this.client.pipeline();
|
|
56
|
+
pipeline.sadd(this.knownKeysKey(), key);
|
|
57
|
+
for (const tag of existingTags) {
|
|
58
|
+
pipeline.srem(this.tagKeysKey(tag), key);
|
|
59
|
+
}
|
|
60
|
+
pipeline.del(keyTagsKey);
|
|
61
|
+
if (tags.length > 0) {
|
|
62
|
+
pipeline.sadd(keyTagsKey, ...tags);
|
|
63
|
+
for (const tag of new Set(tags)) {
|
|
64
|
+
pipeline.sadd(this.tagKeysKey(tag), key);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
await pipeline.exec();
|
|
68
|
+
}
|
|
69
|
+
async remove(key) {
|
|
70
|
+
const keyTagsKey = this.keyTagsKey(key);
|
|
71
|
+
const existingTags = await this.client.smembers(keyTagsKey);
|
|
72
|
+
const pipeline = this.client.pipeline();
|
|
73
|
+
pipeline.srem(this.knownKeysKey(), key);
|
|
74
|
+
pipeline.del(keyTagsKey);
|
|
75
|
+
for (const tag of existingTags) {
|
|
76
|
+
pipeline.srem(this.tagKeysKey(tag), key);
|
|
77
|
+
}
|
|
78
|
+
await pipeline.exec();
|
|
79
|
+
}
|
|
80
|
+
async keysForTag(tag) {
|
|
81
|
+
return this.client.smembers(this.tagKeysKey(tag));
|
|
82
|
+
}
|
|
83
|
+
async matchPattern(pattern) {
|
|
84
|
+
const matches = [];
|
|
85
|
+
let cursor = "0";
|
|
86
|
+
do {
|
|
87
|
+
const [nextCursor, keys] = await this.client.sscan(
|
|
88
|
+
this.knownKeysKey(),
|
|
89
|
+
cursor,
|
|
90
|
+
"MATCH",
|
|
91
|
+
pattern,
|
|
92
|
+
"COUNT",
|
|
93
|
+
this.scanCount
|
|
94
|
+
);
|
|
95
|
+
cursor = nextCursor;
|
|
96
|
+
matches.push(...keys.filter((key) => PatternMatcher.matches(pattern, key)));
|
|
97
|
+
} while (cursor !== "0");
|
|
98
|
+
return matches;
|
|
99
|
+
}
|
|
100
|
+
async clear() {
|
|
101
|
+
const indexKeys = await this.scanIndexKeys();
|
|
102
|
+
if (indexKeys.length === 0) {
|
|
103
|
+
return;
|
|
104
|
+
}
|
|
105
|
+
await this.client.del(...indexKeys);
|
|
106
|
+
}
|
|
107
|
+
async scanIndexKeys() {
|
|
108
|
+
const matches = [];
|
|
109
|
+
let cursor = "0";
|
|
110
|
+
const pattern = `${this.prefix}:*`;
|
|
111
|
+
do {
|
|
112
|
+
const [nextCursor, keys] = await this.client.scan(cursor, "MATCH", pattern, "COUNT", this.scanCount);
|
|
113
|
+
cursor = nextCursor;
|
|
114
|
+
matches.push(...keys);
|
|
115
|
+
} while (cursor !== "0");
|
|
116
|
+
return matches;
|
|
117
|
+
}
|
|
118
|
+
knownKeysKey() {
|
|
119
|
+
return `${this.prefix}:keys`;
|
|
120
|
+
}
|
|
121
|
+
keyTagsKey(key) {
|
|
122
|
+
return `${this.prefix}:key:${encodeURIComponent(key)}`;
|
|
123
|
+
}
|
|
124
|
+
tagKeysKey(tag) {
|
|
125
|
+
return `${this.prefix}:tag:${encodeURIComponent(tag)}`;
|
|
126
|
+
}
|
|
127
|
+
};
|
|
128
|
+
|
|
129
|
+
export {
|
|
130
|
+
PatternMatcher,
|
|
131
|
+
RedisTagIndex
|
|
132
|
+
};
|