@upstash/ratelimit 0.2.0 → 0.3.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,707 @@
1
+ // src/duration.ts
2
+ function ms(d) {
3
+ const [timeString, duration] = d.split(" ");
4
+ const time = parseFloat(timeString);
5
+ switch (duration) {
6
+ case "ms":
7
+ return time;
8
+ case "s":
9
+ return time * 1e3;
10
+ case "m":
11
+ return time * 1e3 * 60;
12
+ case "h":
13
+ return time * 1e3 * 60 * 60;
14
+ case "d":
15
+ return time * 1e3 * 60 * 60 * 24;
16
+ default:
17
+ throw new Error(`Unable to parse window size: ${d}`);
18
+ }
19
+ }
20
+
21
+ // src/analytics.ts
22
+ var Analytics = class {
23
+ redis;
24
+ prefix;
25
+ constructor(config) {
26
+ this.redis = config.redis;
27
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
28
+ }
29
+ extractGeo(req) {
30
+ if (typeof req.geo !== "undefined") {
31
+ return req.geo;
32
+ }
33
+ if (typeof req.cf !== "undefined") {
34
+ return req.cf;
35
+ }
36
+ return {};
37
+ }
38
+ async record(event) {
39
+ const bucket = new Date().setUTCHours(0, 0, 0, 0).toFixed(0);
40
+ const key = [this.prefix, "events", bucket].join(":");
41
+ await this.redis.hincrby(
42
+ key,
43
+ JSON.stringify({
44
+ ...event,
45
+ time: void 0
46
+ }),
47
+ 1
48
+ );
49
+ }
50
+ async aggregate(aggregateBy, cutoff = 0) {
51
+ const keys = [];
52
+ let cursor = 0;
53
+ do {
54
+ const [nextCursor, found] = await this.redis.scan(cursor, {
55
+ match: [this.prefix, "events", "*"].join(":"),
56
+ count: 1e3
57
+ });
58
+ cursor = nextCursor;
59
+ for (const key of found) {
60
+ const timestamp = parseInt(key.split(":").pop());
61
+ if (timestamp >= cutoff) {
62
+ keys.push(key);
63
+ }
64
+ }
65
+ } while (cursor !== 0);
66
+ const days = {};
67
+ await Promise.all(
68
+ keys.sort().map(async (key) => {
69
+ const fields = await this.redis.hgetall(key);
70
+ if (!fields) {
71
+ return;
72
+ }
73
+ const day = {};
74
+ for (const [field, count] of Object.entries(fields)) {
75
+ const r = JSON.parse(field);
76
+ for (const [k, v] of Object.entries(r)) {
77
+ if (k !== aggregateBy) {
78
+ continue;
79
+ }
80
+ if (!day[v]) {
81
+ day[v] = {
82
+ success: 0,
83
+ blocked: 0
84
+ };
85
+ }
86
+ if (r.success) {
87
+ day[v].success += count;
88
+ } else {
89
+ day[v].blocked += count;
90
+ }
91
+ }
92
+ }
93
+ days[key.split(":")[2]] = day;
94
+ })
95
+ );
96
+ return days;
97
+ }
98
+ async series(aggregateBy, cutoff = 0) {
99
+ const keys = [];
100
+ let cursor = 0;
101
+ do {
102
+ const [nextCursor, found] = await this.redis.scan(cursor, {
103
+ match: [this.prefix, "events", "*"].join(":"),
104
+ count: 1e3
105
+ });
106
+ cursor = nextCursor;
107
+ for (const key of found) {
108
+ const timestamp = parseInt(key.split(":").pop());
109
+ if (timestamp >= cutoff) {
110
+ keys.push(key);
111
+ }
112
+ }
113
+ } while (cursor !== 0);
114
+ const days = await Promise.all(
115
+ keys.sort().map(async (key) => {
116
+ const fields = await this.redis.hgetall(key);
117
+ const day = { time: parseInt(key.split(":")[2]) };
118
+ if (!fields) {
119
+ return day;
120
+ }
121
+ for (const [field, count] of Object.entries(fields)) {
122
+ const r = JSON.parse(field);
123
+ for (const [k, v] of Object.entries(r)) {
124
+ console.log({ k, v });
125
+ if (k !== aggregateBy) {
126
+ continue;
127
+ }
128
+ if (!day[v]) {
129
+ day[v] = 0;
130
+ }
131
+ day[v] += count;
132
+ }
133
+ }
134
+ return day;
135
+ })
136
+ );
137
+ return days;
138
+ }
139
+ async getUsage(cutoff = 0) {
140
+ const records = await this.aggregate("identifier", cutoff);
141
+ const usage = {};
142
+ for (const day of Object.values(records)) {
143
+ for (const [k, v] of Object.entries(day)) {
144
+ if (!usage[k]) {
145
+ usage[k] = { success: 0, blocked: 0 };
146
+ }
147
+ usage[k].success += v.success;
148
+ usage[k].blocked += v.blocked;
149
+ }
150
+ }
151
+ return usage;
152
+ }
153
+ };
154
+
155
+ // src/cache.ts
156
+ var Cache = class {
157
+ cache;
158
+ constructor(cache) {
159
+ this.cache = cache;
160
+ }
161
+ isBlocked(identifier) {
162
+ if (!this.cache.has(identifier)) {
163
+ return { blocked: false, reset: 0 };
164
+ }
165
+ const reset = this.cache.get(identifier);
166
+ if (reset < Date.now()) {
167
+ this.cache.delete(identifier);
168
+ return { blocked: false, reset: 0 };
169
+ }
170
+ return { blocked: true, reset };
171
+ }
172
+ blockUntil(identifier, reset) {
173
+ this.cache.set(identifier, reset);
174
+ }
175
+ set(key, value) {
176
+ this.cache.set(key, value);
177
+ }
178
+ get(key) {
179
+ return this.cache.get(key) || null;
180
+ }
181
+ incr(key) {
182
+ let value = this.cache.get(key) ?? 0;
183
+ value += 1;
184
+ this.cache.set(key, value);
185
+ return value;
186
+ }
187
+ };
188
+
189
+ // src/ratelimit.ts
190
+ var Ratelimit = class {
191
+ limiter;
192
+ ctx;
193
+ prefix;
194
+ timeout;
195
+ analytics;
196
+ constructor(config) {
197
+ this.ctx = config.ctx;
198
+ this.limiter = config.limiter;
199
+ this.timeout = config.timeout;
200
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
201
+ this.analytics = config.analytics !== false ? new Analytics({
202
+ redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,
203
+ prefix: this.prefix
204
+ }) : void 0;
205
+ if (config.ephemeralCache instanceof Map) {
206
+ this.ctx.cache = new Cache(config.ephemeralCache);
207
+ } else if (typeof config.ephemeralCache === "undefined") {
208
+ this.ctx.cache = new Cache(/* @__PURE__ */ new Map());
209
+ }
210
+ }
211
+ limit = async (identifier, req) => {
212
+ const key = [this.prefix, identifier].join(":");
213
+ let timeoutId = null;
214
+ try {
215
+ const arr = [this.limiter(this.ctx, key)];
216
+ if (this.timeout) {
217
+ arr.push(
218
+ new Promise((resolve) => {
219
+ timeoutId = setTimeout(() => {
220
+ resolve({
221
+ success: true,
222
+ limit: 0,
223
+ remaining: 0,
224
+ reset: 0,
225
+ pending: Promise.resolve()
226
+ });
227
+ }, this.timeout);
228
+ })
229
+ );
230
+ }
231
+ const res = await Promise.race(arr);
232
+ if (this.analytics) {
233
+ const geo = req ? this.analytics.extractGeo(req) : void 0;
234
+ const analyticsP = this.analytics.record({
235
+ identifier,
236
+ time: Date.now(),
237
+ success: res.success,
238
+ ...geo
239
+ });
240
+ res.pending = Promise.all([res.pending, analyticsP]);
241
+ }
242
+ return res;
243
+ } finally {
244
+ if (timeoutId) {
245
+ clearTimeout(timeoutId);
246
+ }
247
+ }
248
+ };
249
+ blockUntilReady = async (identifier, timeout) => {
250
+ if (timeout <= 0) {
251
+ throw new Error("timeout must be positive");
252
+ }
253
+ let res;
254
+ const deadline = Date.now() + timeout;
255
+ while (true) {
256
+ res = await this.limit(identifier);
257
+ if (res.success) {
258
+ break;
259
+ }
260
+ if (res.reset === 0) {
261
+ throw new Error("This should not happen");
262
+ }
263
+ const wait = Math.min(res.reset, deadline) - Date.now();
264
+ await new Promise((r) => setTimeout(r, wait));
265
+ if (Date.now() > deadline) {
266
+ break;
267
+ }
268
+ }
269
+ return res;
270
+ };
271
+ };
272
+
273
+ // src/single.ts
274
+ var RegionRatelimit = class extends Ratelimit {
275
+ constructor(config) {
276
+ super({
277
+ prefix: config.prefix,
278
+ limiter: config.limiter,
279
+ timeout: config.timeout,
280
+ analytics: config.analytics,
281
+ ctx: {
282
+ redis: config.redis
283
+ },
284
+ ephemeralCache: config.ephemeralCache
285
+ });
286
+ }
287
+ static fixedWindow(tokens, window) {
288
+ const windowDuration = ms(window);
289
+ const script = `
290
+ local key = KEYS[1]
291
+ local window = ARGV[1]
292
+
293
+ local r = redis.call("INCR", key)
294
+ if r == 1 then
295
+ -- The first time this key is set, the value will be 1.
296
+ -- So we only need the expire command once
297
+ redis.call("PEXPIRE", key, window)
298
+ end
299
+
300
+ return r
301
+ `;
302
+ return async function(ctx, identifier) {
303
+ const bucket = Math.floor(Date.now() / windowDuration);
304
+ const key = [identifier, bucket].join(":");
305
+ if (ctx.cache) {
306
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
307
+ if (blocked) {
308
+ return {
309
+ success: false,
310
+ limit: tokens,
311
+ remaining: 0,
312
+ reset: reset2,
313
+ pending: Promise.resolve()
314
+ };
315
+ }
316
+ }
317
+ const usedTokensAfterUpdate = await ctx.redis.eval(script, [key], [windowDuration]);
318
+ const success = usedTokensAfterUpdate <= tokens;
319
+ const reset = (bucket + 1) * windowDuration;
320
+ if (ctx.cache && !success) {
321
+ ctx.cache.blockUntil(identifier, reset);
322
+ }
323
+ return {
324
+ success,
325
+ limit: tokens,
326
+ remaining: tokens - usedTokensAfterUpdate,
327
+ reset,
328
+ pending: Promise.resolve()
329
+ };
330
+ };
331
+ }
332
+ static slidingWindow(tokens, window) {
333
+ const script = `
334
+ local currentKey = KEYS[1] -- identifier including prefixes
335
+ local previousKey = KEYS[2] -- key of the previous bucket
336
+ local tokens = tonumber(ARGV[1]) -- tokens per window
337
+ local now = ARGV[2] -- current timestamp in milliseconds
338
+ local window = ARGV[3] -- interval in milliseconds
339
+
340
+ local requestsInCurrentWindow = redis.call("GET", currentKey)
341
+ if requestsInCurrentWindow == false then
342
+ requestsInCurrentWindow = 0
343
+ end
344
+
345
+
346
+ local requestsInPreviousWindow = redis.call("GET", previousKey)
347
+ if requestsInPreviousWindow == false then
348
+ requestsInPreviousWindow = 0
349
+ end
350
+ local percentageInCurrent = ( now % window) / window
351
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
352
+ return 0
353
+ end
354
+
355
+ local newValue = redis.call("INCR", currentKey)
356
+ if newValue == 1 then
357
+ -- The first time this key is set, the value will be 1.
358
+ -- So we only need the expire command once
359
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
360
+ end
361
+ return tokens - newValue
362
+ `;
363
+ const windowSize = ms(window);
364
+ return async function(ctx, identifier) {
365
+ const now = Date.now();
366
+ const currentWindow = Math.floor(now / windowSize);
367
+ const currentKey = [identifier, currentWindow].join(":");
368
+ const previousWindow = currentWindow - windowSize;
369
+ const previousKey = [identifier, previousWindow].join(":");
370
+ if (ctx.cache) {
371
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
372
+ if (blocked) {
373
+ return {
374
+ success: false,
375
+ limit: tokens,
376
+ remaining: 0,
377
+ reset: reset2,
378
+ pending: Promise.resolve()
379
+ };
380
+ }
381
+ }
382
+ const remaining = await ctx.redis.eval(script, [currentKey, previousKey], [tokens, now, windowSize]);
383
+ const success = remaining > 0;
384
+ const reset = (currentWindow + 1) * windowSize;
385
+ if (ctx.cache && !success) {
386
+ ctx.cache.blockUntil(identifier, reset);
387
+ }
388
+ return {
389
+ success,
390
+ limit: tokens,
391
+ remaining,
392
+ reset,
393
+ pending: Promise.resolve()
394
+ };
395
+ };
396
+ }
397
+ static tokenBucket(refillRate, interval, maxTokens) {
398
+ const script = `
399
+ local key = KEYS[1] -- identifier including prefixes
400
+ local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens
401
+ local interval = tonumber(ARGV[2]) -- size of the window in milliseconds
402
+ local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval
403
+ local now = tonumber(ARGV[4]) -- current timestamp in milliseconds
404
+ local remaining = 0
405
+
406
+ local bucket = redis.call("HMGET", key, "updatedAt", "tokens")
407
+
408
+ if bucket[1] == false then
409
+ -- The bucket does not exist yet, so we create it and add a ttl.
410
+ remaining = maxTokens - 1
411
+
412
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
413
+ redis.call("PEXPIRE", key, interval)
414
+
415
+ return {remaining, now + interval}
416
+ end
417
+
418
+ -- The bucket does exist
419
+
420
+ local updatedAt = tonumber(bucket[1])
421
+ local tokens = tonumber(bucket[2])
422
+
423
+ if now >= updatedAt + interval then
424
+ remaining = math.min(maxTokens, tokens + refillRate) - 1
425
+
426
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
427
+ return {remaining, now + interval}
428
+ end
429
+
430
+ if tokens > 0 then
431
+ remaining = tokens - 1
432
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
433
+ end
434
+
435
+ return {remaining, updatedAt + interval}
436
+ `;
437
+ const intervalDuration = ms(interval);
438
+ return async function(ctx, identifier) {
439
+ if (ctx.cache) {
440
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
441
+ if (blocked) {
442
+ return {
443
+ success: false,
444
+ limit: maxTokens,
445
+ remaining: 0,
446
+ reset: reset2,
447
+ pending: Promise.resolve()
448
+ };
449
+ }
450
+ }
451
+ const now = Date.now();
452
+ const key = [identifier, Math.floor(now / intervalDuration)].join(":");
453
+ const [remaining, reset] = await ctx.redis.eval(
454
+ script,
455
+ [key],
456
+ [maxTokens, intervalDuration, refillRate, now]
457
+ );
458
+ const success = remaining > 0;
459
+ if (ctx.cache && !success) {
460
+ ctx.cache.blockUntil(identifier, reset);
461
+ }
462
+ return {
463
+ success,
464
+ limit: maxTokens,
465
+ remaining,
466
+ reset,
467
+ pending: Promise.resolve()
468
+ };
469
+ };
470
+ }
471
+ static cachedFixedWindow(tokens, window) {
472
+ const windowDuration = ms(window);
473
+ const script = `
474
+ local key = KEYS[1]
475
+ local window = ARGV[1]
476
+
477
+ local r = redis.call("INCR", key)
478
+ if r == 1 then
479
+ -- The first time this key is set, the value will be 1.
480
+ -- So we only need the expire command once
481
+ redis.call("PEXPIRE", key, window)
482
+ end
483
+
484
+ return r
485
+ `;
486
+ return async function(ctx, identifier) {
487
+ if (!ctx.cache) {
488
+ throw new Error("This algorithm requires a cache");
489
+ }
490
+ const bucket = Math.floor(Date.now() / windowDuration);
491
+ const key = [identifier, bucket].join(":");
492
+ const reset = (bucket + 1) * windowDuration;
493
+ const hit = typeof ctx.cache.get(key) === "number";
494
+ if (hit) {
495
+ const cachedTokensAfterUpdate = ctx.cache.incr(key);
496
+ const success = cachedTokensAfterUpdate < tokens;
497
+ const pending = success ? ctx.redis.eval(script, [key], [windowDuration]).then((t) => {
498
+ ctx.cache.set(key, t);
499
+ }) : Promise.resolve();
500
+ return {
501
+ success,
502
+ limit: tokens,
503
+ remaining: tokens - cachedTokensAfterUpdate,
504
+ reset,
505
+ pending
506
+ };
507
+ }
508
+ const usedTokensAfterUpdate = await ctx.redis.eval(script, [key], [windowDuration]);
509
+ ctx.cache.set(key, usedTokensAfterUpdate);
510
+ const remaining = tokens - usedTokensAfterUpdate;
511
+ return {
512
+ success: remaining >= 0,
513
+ limit: tokens,
514
+ remaining,
515
+ reset,
516
+ pending: Promise.resolve()
517
+ };
518
+ };
519
+ }
520
+ };
521
+
522
+ // src/multi.ts
523
+ function randomId() {
524
+ let result = "";
525
+ const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
526
+ const charactersLength = characters.length;
527
+ for (let i = 0; i < 16; i++) {
528
+ result += characters.charAt(Math.floor(Math.random() * charactersLength));
529
+ }
530
+ return result;
531
+ }
532
+ var MultiRegionRatelimit = class extends Ratelimit {
533
+ constructor(config) {
534
+ super({
535
+ prefix: config.prefix,
536
+ limiter: config.limiter,
537
+ timeout: config.timeout,
538
+ analytics: config.analytics,
539
+ ctx: {
540
+ redis: config.redis,
541
+ cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
542
+ }
543
+ });
544
+ }
545
+ static fixedWindow(tokens, window) {
546
+ const windowDuration = ms(window);
547
+ const script = `
548
+ local key = KEYS[1]
549
+ local id = ARGV[1]
550
+ local window = ARGV[2]
551
+
552
+ redis.call("SADD", key, id)
553
+ local members = redis.call("SMEMBERS", key)
554
+ if #members == 1 then
555
+ -- The first time this key is set, the value will be 1.
556
+ -- So we only need the expire command once
557
+ redis.call("PEXPIRE", key, window)
558
+ end
559
+
560
+ return members
561
+ `;
562
+ return async function(ctx, identifier) {
563
+ if (ctx.cache) {
564
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
565
+ if (blocked) {
566
+ return {
567
+ success: false,
568
+ limit: tokens,
569
+ remaining: 0,
570
+ reset: reset2,
571
+ pending: Promise.resolve()
572
+ };
573
+ }
574
+ }
575
+ const requestID = randomId();
576
+ const bucket = Math.floor(Date.now() / windowDuration);
577
+ const key = [identifier, bucket].join(":");
578
+ const dbs = ctx.redis.map((redis) => ({
579
+ redis,
580
+ request: redis.eval(script, [key], [requestID, windowDuration])
581
+ }));
582
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
583
+ const usedTokens = firstResponse.length;
584
+ const remaining = tokens - usedTokens - 1;
585
+ async function sync() {
586
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
587
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
588
+ for (const db of dbs) {
589
+ const ids = await db.request;
590
+ if (ids.length >= tokens) {
591
+ continue;
592
+ }
593
+ const diff = allIDs.filter((id) => !ids.includes(id));
594
+ if (diff.length === 0) {
595
+ continue;
596
+ }
597
+ await db.redis.sadd(key, ...allIDs);
598
+ }
599
+ }
600
+ const success = remaining > 0;
601
+ const reset = (bucket + 1) * windowDuration;
602
+ if (ctx.cache && !success) {
603
+ ctx.cache.blockUntil(identifier, reset);
604
+ }
605
+ return {
606
+ success,
607
+ limit: tokens,
608
+ remaining,
609
+ reset,
610
+ pending: sync()
611
+ };
612
+ };
613
+ }
614
+ static slidingWindow(tokens, window) {
615
+ const windowSize = ms(window);
616
+ const script = `
617
+ local currentKey = KEYS[1] -- identifier including prefixes
618
+ local previousKey = KEYS[2] -- key of the previous bucket
619
+ local tokens = tonumber(ARGV[1]) -- tokens per window
620
+ local now = ARGV[2] -- current timestamp in milliseconds
621
+ local window = ARGV[3] -- interval in milliseconds
622
+ local requestID = ARGV[4] -- uuid for this request
623
+
624
+
625
+ local currentMembers = redis.call("SMEMBERS", currentKey)
626
+ local requestsInCurrentWindow = #currentMembers
627
+ local previousMembers = redis.call("SMEMBERS", previousKey)
628
+ local requestsInPreviousWindow = #previousMembers
629
+
630
+ local percentageInCurrent = ( now % window) / window
631
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
632
+ return {currentMembers, previousMembers}
633
+ end
634
+
635
+ redis.call("SADD", currentKey, requestID)
636
+ table.insert(currentMembers, requestID)
637
+ if requestsInCurrentWindow == 0 then
638
+ -- The first time this key is set, the value will be 1.
639
+ -- So we only need the expire command once
640
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
641
+ end
642
+ return {currentMembers, previousMembers}
643
+ `;
644
+ const windowDuration = ms(window);
645
+ return async function(ctx, identifier) {
646
+ if (ctx.cache) {
647
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
648
+ if (blocked) {
649
+ return {
650
+ success: false,
651
+ limit: tokens,
652
+ remaining: 0,
653
+ reset: reset2,
654
+ pending: Promise.resolve()
655
+ };
656
+ }
657
+ }
658
+ const requestID = randomId();
659
+ const now = Date.now();
660
+ const currentWindow = Math.floor(now / windowSize);
661
+ const currentKey = [identifier, currentWindow].join(":");
662
+ const previousWindow = currentWindow - windowSize;
663
+ const previousKey = [identifier, previousWindow].join(":");
664
+ const dbs = ctx.redis.map((redis) => ({
665
+ redis,
666
+ request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID])
667
+ }));
668
+ const percentageInCurrent = now % windowDuration / windowDuration;
669
+ const [current, previous] = await Promise.any(dbs.map((s) => s.request));
670
+ const usedTokens = previous.length * (1 - percentageInCurrent) + current.length;
671
+ const remaining = tokens - usedTokens;
672
+ async function sync() {
673
+ const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
674
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
675
+ for (const db of dbs) {
676
+ const [ids] = await db.request;
677
+ if (ids.length >= tokens) {
678
+ continue;
679
+ }
680
+ const diff = allIDs.filter((id) => !ids.includes(id));
681
+ if (diff.length === 0) {
682
+ continue;
683
+ }
684
+ await db.redis.sadd(currentKey, ...allIDs);
685
+ }
686
+ }
687
+ const success = remaining > 0;
688
+ const reset = (currentWindow + 1) * windowDuration;
689
+ if (ctx.cache && !success) {
690
+ ctx.cache.blockUntil(identifier, reset);
691
+ }
692
+ return {
693
+ success,
694
+ limit: tokens,
695
+ remaining,
696
+ reset,
697
+ pending: sync()
698
+ };
699
+ };
700
+ }
701
+ };
702
+ export {
703
+ Analytics,
704
+ MultiRegionRatelimit,
705
+ RegionRatelimit as Ratelimit
706
+ };
707
+ //# sourceMappingURL=index.mjs.map