@upstash/ratelimit 0.2.0 → 0.3.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,736 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ Analytics: () => Analytics,
24
+ MultiRegionRatelimit: () => MultiRegionRatelimit,
25
+ Ratelimit: () => RegionRatelimit
26
+ });
27
+ module.exports = __toCommonJS(src_exports);
28
+
29
+ // src/duration.ts
30
+ function ms(d) {
31
+ const [timeString, duration] = d.split(" ");
32
+ const time = parseFloat(timeString);
33
+ switch (duration) {
34
+ case "ms":
35
+ return time;
36
+ case "s":
37
+ return time * 1e3;
38
+ case "m":
39
+ return time * 1e3 * 60;
40
+ case "h":
41
+ return time * 1e3 * 60 * 60;
42
+ case "d":
43
+ return time * 1e3 * 60 * 60 * 24;
44
+ default:
45
+ throw new Error(`Unable to parse window size: ${d}`);
46
+ }
47
+ }
48
+
49
+ // src/analytics.ts
50
+ var Analytics = class {
51
+ redis;
52
+ prefix;
53
+ constructor(config) {
54
+ this.redis = config.redis;
55
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
56
+ }
57
+ extractGeo(req) {
58
+ if (typeof req.geo !== "undefined") {
59
+ return req.geo;
60
+ }
61
+ if (typeof req.cf !== "undefined") {
62
+ return req.cf;
63
+ }
64
+ return {};
65
+ }
66
+ async record(event) {
67
+ const bucket = new Date().setUTCHours(0, 0, 0, 0).toFixed(0);
68
+ const key = [this.prefix, "events", bucket].join(":");
69
+ await this.redis.hincrby(
70
+ key,
71
+ JSON.stringify({
72
+ ...event,
73
+ time: void 0
74
+ }),
75
+ 1
76
+ );
77
+ }
78
+ async aggregate(aggregateBy, cutoff = 0) {
79
+ const keys = [];
80
+ let cursor = 0;
81
+ do {
82
+ const [nextCursor, found] = await this.redis.scan(cursor, {
83
+ match: [this.prefix, "events", "*"].join(":"),
84
+ count: 1e3
85
+ });
86
+ cursor = nextCursor;
87
+ for (const key of found) {
88
+ const timestamp = parseInt(key.split(":").pop());
89
+ if (timestamp >= cutoff) {
90
+ keys.push(key);
91
+ }
92
+ }
93
+ } while (cursor !== 0);
94
+ const days = {};
95
+ await Promise.all(
96
+ keys.sort().map(async (key) => {
97
+ const fields = await this.redis.hgetall(key);
98
+ if (!fields) {
99
+ return;
100
+ }
101
+ const day = {};
102
+ for (const [field, count] of Object.entries(fields)) {
103
+ const r = JSON.parse(field);
104
+ for (const [k, v] of Object.entries(r)) {
105
+ if (k !== aggregateBy) {
106
+ continue;
107
+ }
108
+ if (!day[v]) {
109
+ day[v] = {
110
+ success: 0,
111
+ blocked: 0
112
+ };
113
+ }
114
+ if (r.success) {
115
+ day[v].success += count;
116
+ } else {
117
+ day[v].blocked += count;
118
+ }
119
+ }
120
+ }
121
+ days[key.split(":")[2]] = day;
122
+ })
123
+ );
124
+ return days;
125
+ }
126
+ async series(aggregateBy, cutoff = 0) {
127
+ const keys = [];
128
+ let cursor = 0;
129
+ do {
130
+ const [nextCursor, found] = await this.redis.scan(cursor, {
131
+ match: [this.prefix, "events", "*"].join(":"),
132
+ count: 1e3
133
+ });
134
+ cursor = nextCursor;
135
+ for (const key of found) {
136
+ const timestamp = parseInt(key.split(":").pop());
137
+ if (timestamp >= cutoff) {
138
+ keys.push(key);
139
+ }
140
+ }
141
+ } while (cursor !== 0);
142
+ const days = await Promise.all(
143
+ keys.sort().map(async (key) => {
144
+ const fields = await this.redis.hgetall(key);
145
+ const day = { time: parseInt(key.split(":")[2]) };
146
+ if (!fields) {
147
+ return day;
148
+ }
149
+ for (const [field, count] of Object.entries(fields)) {
150
+ const r = JSON.parse(field);
151
+ for (const [k, v] of Object.entries(r)) {
152
+ console.log({ k, v });
153
+ if (k !== aggregateBy) {
154
+ continue;
155
+ }
156
+ if (!day[v]) {
157
+ day[v] = 0;
158
+ }
159
+ day[v] += count;
160
+ }
161
+ }
162
+ return day;
163
+ })
164
+ );
165
+ return days;
166
+ }
167
+ async getUsage(cutoff = 0) {
168
+ const records = await this.aggregate("identifier", cutoff);
169
+ const usage = {};
170
+ for (const day of Object.values(records)) {
171
+ for (const [k, v] of Object.entries(day)) {
172
+ if (!usage[k]) {
173
+ usage[k] = { success: 0, blocked: 0 };
174
+ }
175
+ usage[k].success += v.success;
176
+ usage[k].blocked += v.blocked;
177
+ }
178
+ }
179
+ return usage;
180
+ }
181
+ };
182
+
183
+ // src/cache.ts
184
+ var Cache = class {
185
+ cache;
186
+ constructor(cache) {
187
+ this.cache = cache;
188
+ }
189
+ isBlocked(identifier) {
190
+ if (!this.cache.has(identifier)) {
191
+ return { blocked: false, reset: 0 };
192
+ }
193
+ const reset = this.cache.get(identifier);
194
+ if (reset < Date.now()) {
195
+ this.cache.delete(identifier);
196
+ return { blocked: false, reset: 0 };
197
+ }
198
+ return { blocked: true, reset };
199
+ }
200
+ blockUntil(identifier, reset) {
201
+ this.cache.set(identifier, reset);
202
+ }
203
+ set(key, value) {
204
+ this.cache.set(key, value);
205
+ }
206
+ get(key) {
207
+ return this.cache.get(key) || null;
208
+ }
209
+ incr(key) {
210
+ let value = this.cache.get(key) ?? 0;
211
+ value += 1;
212
+ this.cache.set(key, value);
213
+ return value;
214
+ }
215
+ };
216
+
217
+ // src/ratelimit.ts
218
+ var Ratelimit = class {
219
+ limiter;
220
+ ctx;
221
+ prefix;
222
+ timeout;
223
+ analytics;
224
+ constructor(config) {
225
+ this.ctx = config.ctx;
226
+ this.limiter = config.limiter;
227
+ this.timeout = config.timeout;
228
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
229
+ this.analytics = config.analytics !== false ? new Analytics({
230
+ redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,
231
+ prefix: this.prefix
232
+ }) : void 0;
233
+ if (config.ephemeralCache instanceof Map) {
234
+ this.ctx.cache = new Cache(config.ephemeralCache);
235
+ } else if (typeof config.ephemeralCache === "undefined") {
236
+ this.ctx.cache = new Cache(/* @__PURE__ */ new Map());
237
+ }
238
+ }
239
+ limit = async (identifier, req) => {
240
+ const key = [this.prefix, identifier].join(":");
241
+ let timeoutId = null;
242
+ try {
243
+ const arr = [this.limiter(this.ctx, key)];
244
+ if (this.timeout) {
245
+ arr.push(
246
+ new Promise((resolve) => {
247
+ timeoutId = setTimeout(() => {
248
+ resolve({
249
+ success: true,
250
+ limit: 0,
251
+ remaining: 0,
252
+ reset: 0,
253
+ pending: Promise.resolve()
254
+ });
255
+ }, this.timeout);
256
+ })
257
+ );
258
+ }
259
+ const res = await Promise.race(arr);
260
+ if (this.analytics) {
261
+ const geo = req ? this.analytics.extractGeo(req) : void 0;
262
+ const analyticsP = this.analytics.record({
263
+ identifier,
264
+ time: Date.now(),
265
+ success: res.success,
266
+ ...geo
267
+ });
268
+ res.pending = Promise.all([res.pending, analyticsP]);
269
+ }
270
+ return res;
271
+ } finally {
272
+ if (timeoutId) {
273
+ clearTimeout(timeoutId);
274
+ }
275
+ }
276
+ };
277
+ blockUntilReady = async (identifier, timeout) => {
278
+ if (timeout <= 0) {
279
+ throw new Error("timeout must be positive");
280
+ }
281
+ let res;
282
+ const deadline = Date.now() + timeout;
283
+ while (true) {
284
+ res = await this.limit(identifier);
285
+ if (res.success) {
286
+ break;
287
+ }
288
+ if (res.reset === 0) {
289
+ throw new Error("This should not happen");
290
+ }
291
+ const wait = Math.min(res.reset, deadline) - Date.now();
292
+ await new Promise((r) => setTimeout(r, wait));
293
+ if (Date.now() > deadline) {
294
+ break;
295
+ }
296
+ }
297
+ return res;
298
+ };
299
+ };
300
+
301
+ // src/single.ts
302
+ var RegionRatelimit = class extends Ratelimit {
303
+ constructor(config) {
304
+ super({
305
+ prefix: config.prefix,
306
+ limiter: config.limiter,
307
+ timeout: config.timeout,
308
+ analytics: config.analytics,
309
+ ctx: {
310
+ redis: config.redis
311
+ },
312
+ ephemeralCache: config.ephemeralCache
313
+ });
314
+ }
315
+ static fixedWindow(tokens, window) {
316
+ const windowDuration = ms(window);
317
+ const script = `
318
+ local key = KEYS[1]
319
+ local window = ARGV[1]
320
+
321
+ local r = redis.call("INCR", key)
322
+ if r == 1 then
323
+ -- The first time this key is set, the value will be 1.
324
+ -- So we only need the expire command once
325
+ redis.call("PEXPIRE", key, window)
326
+ end
327
+
328
+ return r
329
+ `;
330
+ return async function(ctx, identifier) {
331
+ const bucket = Math.floor(Date.now() / windowDuration);
332
+ const key = [identifier, bucket].join(":");
333
+ if (ctx.cache) {
334
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
335
+ if (blocked) {
336
+ return {
337
+ success: false,
338
+ limit: tokens,
339
+ remaining: 0,
340
+ reset: reset2,
341
+ pending: Promise.resolve()
342
+ };
343
+ }
344
+ }
345
+ const usedTokensAfterUpdate = await ctx.redis.eval(script, [key], [windowDuration]);
346
+ const success = usedTokensAfterUpdate <= tokens;
347
+ const reset = (bucket + 1) * windowDuration;
348
+ if (ctx.cache && !success) {
349
+ ctx.cache.blockUntil(identifier, reset);
350
+ }
351
+ return {
352
+ success,
353
+ limit: tokens,
354
+ remaining: tokens - usedTokensAfterUpdate,
355
+ reset,
356
+ pending: Promise.resolve()
357
+ };
358
+ };
359
+ }
360
+ static slidingWindow(tokens, window) {
361
+ const script = `
362
+ local currentKey = KEYS[1] -- identifier including prefixes
363
+ local previousKey = KEYS[2] -- key of the previous bucket
364
+ local tokens = tonumber(ARGV[1]) -- tokens per window
365
+ local now = ARGV[2] -- current timestamp in milliseconds
366
+ local window = ARGV[3] -- interval in milliseconds
367
+
368
+ local requestsInCurrentWindow = redis.call("GET", currentKey)
369
+ if requestsInCurrentWindow == false then
370
+ requestsInCurrentWindow = 0
371
+ end
372
+
373
+
374
+ local requestsInPreviousWindow = redis.call("GET", previousKey)
375
+ if requestsInPreviousWindow == false then
376
+ requestsInPreviousWindow = 0
377
+ end
378
+ local percentageInCurrent = ( now % window) / window
379
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
380
+ return 0
381
+ end
382
+
383
+ local newValue = redis.call("INCR", currentKey)
384
+ if newValue == 1 then
385
+ -- The first time this key is set, the value will be 1.
386
+ -- So we only need the expire command once
387
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
388
+ end
389
+ return tokens - newValue
390
+ `;
391
+ const windowSize = ms(window);
392
+ return async function(ctx, identifier) {
393
+ const now = Date.now();
394
+ const currentWindow = Math.floor(now / windowSize);
395
+ const currentKey = [identifier, currentWindow].join(":");
396
+ const previousWindow = currentWindow - windowSize;
397
+ const previousKey = [identifier, previousWindow].join(":");
398
+ if (ctx.cache) {
399
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
400
+ if (blocked) {
401
+ return {
402
+ success: false,
403
+ limit: tokens,
404
+ remaining: 0,
405
+ reset: reset2,
406
+ pending: Promise.resolve()
407
+ };
408
+ }
409
+ }
410
+ const remaining = await ctx.redis.eval(script, [currentKey, previousKey], [tokens, now, windowSize]);
411
+ const success = remaining > 0;
412
+ const reset = (currentWindow + 1) * windowSize;
413
+ if (ctx.cache && !success) {
414
+ ctx.cache.blockUntil(identifier, reset);
415
+ }
416
+ return {
417
+ success,
418
+ limit: tokens,
419
+ remaining,
420
+ reset,
421
+ pending: Promise.resolve()
422
+ };
423
+ };
424
+ }
425
+ static tokenBucket(refillRate, interval, maxTokens) {
426
+ const script = `
427
+ local key = KEYS[1] -- identifier including prefixes
428
+ local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens
429
+ local interval = tonumber(ARGV[2]) -- size of the window in milliseconds
430
+ local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval
431
+ local now = tonumber(ARGV[4]) -- current timestamp in milliseconds
432
+ local remaining = 0
433
+
434
+ local bucket = redis.call("HMGET", key, "updatedAt", "tokens")
435
+
436
+ if bucket[1] == false then
437
+ -- The bucket does not exist yet, so we create it and add a ttl.
438
+ remaining = maxTokens - 1
439
+
440
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
441
+ redis.call("PEXPIRE", key, interval)
442
+
443
+ return {remaining, now + interval}
444
+ end
445
+
446
+ -- The bucket does exist
447
+
448
+ local updatedAt = tonumber(bucket[1])
449
+ local tokens = tonumber(bucket[2])
450
+
451
+ if now >= updatedAt + interval then
452
+ remaining = math.min(maxTokens, tokens + refillRate) - 1
453
+
454
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
455
+ return {remaining, now + interval}
456
+ end
457
+
458
+ if tokens > 0 then
459
+ remaining = tokens - 1
460
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
461
+ end
462
+
463
+ return {remaining, updatedAt + interval}
464
+ `;
465
+ const intervalDuration = ms(interval);
466
+ return async function(ctx, identifier) {
467
+ if (ctx.cache) {
468
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
469
+ if (blocked) {
470
+ return {
471
+ success: false,
472
+ limit: maxTokens,
473
+ remaining: 0,
474
+ reset: reset2,
475
+ pending: Promise.resolve()
476
+ };
477
+ }
478
+ }
479
+ const now = Date.now();
480
+ const key = [identifier, Math.floor(now / intervalDuration)].join(":");
481
+ const [remaining, reset] = await ctx.redis.eval(
482
+ script,
483
+ [key],
484
+ [maxTokens, intervalDuration, refillRate, now]
485
+ );
486
+ const success = remaining > 0;
487
+ if (ctx.cache && !success) {
488
+ ctx.cache.blockUntil(identifier, reset);
489
+ }
490
+ return {
491
+ success,
492
+ limit: maxTokens,
493
+ remaining,
494
+ reset,
495
+ pending: Promise.resolve()
496
+ };
497
+ };
498
+ }
499
+ static cachedFixedWindow(tokens, window) {
500
+ const windowDuration = ms(window);
501
+ const script = `
502
+ local key = KEYS[1]
503
+ local window = ARGV[1]
504
+
505
+ local r = redis.call("INCR", key)
506
+ if r == 1 then
507
+ -- The first time this key is set, the value will be 1.
508
+ -- So we only need the expire command once
509
+ redis.call("PEXPIRE", key, window)
510
+ end
511
+
512
+ return r
513
+ `;
514
+ return async function(ctx, identifier) {
515
+ if (!ctx.cache) {
516
+ throw new Error("This algorithm requires a cache");
517
+ }
518
+ const bucket = Math.floor(Date.now() / windowDuration);
519
+ const key = [identifier, bucket].join(":");
520
+ const reset = (bucket + 1) * windowDuration;
521
+ const hit = typeof ctx.cache.get(key) === "number";
522
+ if (hit) {
523
+ const cachedTokensAfterUpdate = ctx.cache.incr(key);
524
+ const success = cachedTokensAfterUpdate < tokens;
525
+ const pending = success ? ctx.redis.eval(script, [key], [windowDuration]).then((t) => {
526
+ ctx.cache.set(key, t);
527
+ }) : Promise.resolve();
528
+ return {
529
+ success,
530
+ limit: tokens,
531
+ remaining: tokens - cachedTokensAfterUpdate,
532
+ reset,
533
+ pending
534
+ };
535
+ }
536
+ const usedTokensAfterUpdate = await ctx.redis.eval(script, [key], [windowDuration]);
537
+ ctx.cache.set(key, usedTokensAfterUpdate);
538
+ const remaining = tokens - usedTokensAfterUpdate;
539
+ return {
540
+ success: remaining >= 0,
541
+ limit: tokens,
542
+ remaining,
543
+ reset,
544
+ pending: Promise.resolve()
545
+ };
546
+ };
547
+ }
548
+ };
549
+
550
+ // src/multi.ts
551
+ function randomId() {
552
+ let result = "";
553
+ const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
554
+ const charactersLength = characters.length;
555
+ for (let i = 0; i < 16; i++) {
556
+ result += characters.charAt(Math.floor(Math.random() * charactersLength));
557
+ }
558
+ return result;
559
+ }
560
+ var MultiRegionRatelimit = class extends Ratelimit {
561
+ constructor(config) {
562
+ super({
563
+ prefix: config.prefix,
564
+ limiter: config.limiter,
565
+ timeout: config.timeout,
566
+ analytics: config.analytics,
567
+ ctx: {
568
+ redis: config.redis,
569
+ cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
570
+ }
571
+ });
572
+ }
573
+ static fixedWindow(tokens, window) {
574
+ const windowDuration = ms(window);
575
+ const script = `
576
+ local key = KEYS[1]
577
+ local id = ARGV[1]
578
+ local window = ARGV[2]
579
+
580
+ redis.call("SADD", key, id)
581
+ local members = redis.call("SMEMBERS", key)
582
+ if #members == 1 then
583
+ -- The first time this key is set, the value will be 1.
584
+ -- So we only need the expire command once
585
+ redis.call("PEXPIRE", key, window)
586
+ end
587
+
588
+ return members
589
+ `;
590
+ return async function(ctx, identifier) {
591
+ if (ctx.cache) {
592
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
593
+ if (blocked) {
594
+ return {
595
+ success: false,
596
+ limit: tokens,
597
+ remaining: 0,
598
+ reset: reset2,
599
+ pending: Promise.resolve()
600
+ };
601
+ }
602
+ }
603
+ const requestID = randomId();
604
+ const bucket = Math.floor(Date.now() / windowDuration);
605
+ const key = [identifier, bucket].join(":");
606
+ const dbs = ctx.redis.map((redis) => ({
607
+ redis,
608
+ request: redis.eval(script, [key], [requestID, windowDuration])
609
+ }));
610
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
611
+ const usedTokens = firstResponse.length;
612
+ const remaining = tokens - usedTokens - 1;
613
+ async function sync() {
614
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
615
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
616
+ for (const db of dbs) {
617
+ const ids = await db.request;
618
+ if (ids.length >= tokens) {
619
+ continue;
620
+ }
621
+ const diff = allIDs.filter((id) => !ids.includes(id));
622
+ if (diff.length === 0) {
623
+ continue;
624
+ }
625
+ await db.redis.sadd(key, ...allIDs);
626
+ }
627
+ }
628
+ const success = remaining > 0;
629
+ const reset = (bucket + 1) * windowDuration;
630
+ if (ctx.cache && !success) {
631
+ ctx.cache.blockUntil(identifier, reset);
632
+ }
633
+ return {
634
+ success,
635
+ limit: tokens,
636
+ remaining,
637
+ reset,
638
+ pending: sync()
639
+ };
640
+ };
641
+ }
642
+ static slidingWindow(tokens, window) {
643
+ const windowSize = ms(window);
644
+ const script = `
645
+ local currentKey = KEYS[1] -- identifier including prefixes
646
+ local previousKey = KEYS[2] -- key of the previous bucket
647
+ local tokens = tonumber(ARGV[1]) -- tokens per window
648
+ local now = ARGV[2] -- current timestamp in milliseconds
649
+ local window = ARGV[3] -- interval in milliseconds
650
+ local requestID = ARGV[4] -- uuid for this request
651
+
652
+
653
+ local currentMembers = redis.call("SMEMBERS", currentKey)
654
+ local requestsInCurrentWindow = #currentMembers
655
+ local previousMembers = redis.call("SMEMBERS", previousKey)
656
+ local requestsInPreviousWindow = #previousMembers
657
+
658
+ local percentageInCurrent = ( now % window) / window
659
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
660
+ return {currentMembers, previousMembers}
661
+ end
662
+
663
+ redis.call("SADD", currentKey, requestID)
664
+ table.insert(currentMembers, requestID)
665
+ if requestsInCurrentWindow == 0 then
666
+ -- The first time this key is set, the value will be 1.
667
+ -- So we only need the expire command once
668
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
669
+ end
670
+ return {currentMembers, previousMembers}
671
+ `;
672
+ const windowDuration = ms(window);
673
+ return async function(ctx, identifier) {
674
+ if (ctx.cache) {
675
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
676
+ if (blocked) {
677
+ return {
678
+ success: false,
679
+ limit: tokens,
680
+ remaining: 0,
681
+ reset: reset2,
682
+ pending: Promise.resolve()
683
+ };
684
+ }
685
+ }
686
+ const requestID = randomId();
687
+ const now = Date.now();
688
+ const currentWindow = Math.floor(now / windowSize);
689
+ const currentKey = [identifier, currentWindow].join(":");
690
+ const previousWindow = currentWindow - windowSize;
691
+ const previousKey = [identifier, previousWindow].join(":");
692
+ const dbs = ctx.redis.map((redis) => ({
693
+ redis,
694
+ request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID])
695
+ }));
696
+ const percentageInCurrent = now % windowDuration / windowDuration;
697
+ const [current, previous] = await Promise.any(dbs.map((s) => s.request));
698
+ const usedTokens = previous.length * (1 - percentageInCurrent) + current.length;
699
+ const remaining = tokens - usedTokens;
700
+ async function sync() {
701
+ const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
702
+ const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
703
+ for (const db of dbs) {
704
+ const [ids] = await db.request;
705
+ if (ids.length >= tokens) {
706
+ continue;
707
+ }
708
+ const diff = allIDs.filter((id) => !ids.includes(id));
709
+ if (diff.length === 0) {
710
+ continue;
711
+ }
712
+ await db.redis.sadd(currentKey, ...allIDs);
713
+ }
714
+ }
715
+ const success = remaining > 0;
716
+ const reset = (currentWindow + 1) * windowDuration;
717
+ if (ctx.cache && !success) {
718
+ ctx.cache.blockUntil(identifier, reset);
719
+ }
720
+ return {
721
+ success,
722
+ limit: tokens,
723
+ remaining,
724
+ reset,
725
+ pending: sync()
726
+ };
727
+ };
728
+ }
729
+ };
730
+ // Annotate the CommonJS export names for ESM import in node:
731
+ 0 && (module.exports = {
732
+ Analytics,
733
+ MultiRegionRatelimit,
734
+ Ratelimit
735
+ });
736
+ //# sourceMappingURL=index.js.map