@upstash/ratelimit 0.4.0 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -3
- package/dist/index.d.ts +137 -137
- package/dist/index.js +248 -248
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +248 -248
- package/dist/index.mjs.map +1 -1
- package/package.json +12 -12
package/dist/index.mjs
CHANGED
|
@@ -1,27 +1,3 @@
|
|
|
1
|
-
// src/duration.ts
|
|
2
|
-
function ms(d) {
|
|
3
|
-
const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/);
|
|
4
|
-
if (!match) {
|
|
5
|
-
throw new Error(`Unable to parse window size: ${d}`);
|
|
6
|
-
}
|
|
7
|
-
const time = parseInt(match[1]);
|
|
8
|
-
const unit = match[2];
|
|
9
|
-
switch (unit) {
|
|
10
|
-
case "ms":
|
|
11
|
-
return time;
|
|
12
|
-
case "s":
|
|
13
|
-
return time * 1e3;
|
|
14
|
-
case "m":
|
|
15
|
-
return time * 1e3 * 60;
|
|
16
|
-
case "h":
|
|
17
|
-
return time * 1e3 * 60 * 60;
|
|
18
|
-
case "d":
|
|
19
|
-
return time * 1e3 * 60 * 60 * 24;
|
|
20
|
-
default:
|
|
21
|
-
throw new Error(`Unable to parse window size: ${d}`);
|
|
22
|
-
}
|
|
23
|
-
}
|
|
24
|
-
|
|
25
1
|
// src/analytics.ts
|
|
26
2
|
import { Analytics as CoreAnalytics } from "@upstash/core-analytics";
|
|
27
3
|
var Analytics = class {
|
|
@@ -119,6 +95,30 @@ var Cache = class {
|
|
|
119
95
|
}
|
|
120
96
|
};
|
|
121
97
|
|
|
98
|
+
// src/duration.ts
|
|
99
|
+
function ms(d) {
|
|
100
|
+
const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/);
|
|
101
|
+
if (!match) {
|
|
102
|
+
throw new Error(`Unable to parse window size: ${d}`);
|
|
103
|
+
}
|
|
104
|
+
const time = parseInt(match[1]);
|
|
105
|
+
const unit = match[2];
|
|
106
|
+
switch (unit) {
|
|
107
|
+
case "ms":
|
|
108
|
+
return time;
|
|
109
|
+
case "s":
|
|
110
|
+
return time * 1e3;
|
|
111
|
+
case "m":
|
|
112
|
+
return time * 1e3 * 60;
|
|
113
|
+
case "h":
|
|
114
|
+
return time * 1e3 * 60 * 60;
|
|
115
|
+
case "d":
|
|
116
|
+
return time * 1e3 * 60 * 60 * 24;
|
|
117
|
+
default:
|
|
118
|
+
throw new Error(`Unable to parse window size: ${d}`);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
122
|
// src/ratelimit.ts
|
|
123
123
|
var Ratelimit = class {
|
|
124
124
|
limiter;
|
|
@@ -131,7 +131,7 @@ var Ratelimit = class {
|
|
|
131
131
|
this.limiter = config.limiter;
|
|
132
132
|
this.timeout = config.timeout ?? 5e3;
|
|
133
133
|
this.prefix = config.prefix ?? "@upstash/ratelimit";
|
|
134
|
-
this.analytics = config.analytics
|
|
134
|
+
this.analytics = config.analytics ? new Analytics({
|
|
135
135
|
redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,
|
|
136
136
|
prefix: this.prefix
|
|
137
137
|
}) : void 0;
|
|
@@ -207,7 +207,7 @@ var Ratelimit = class {
|
|
|
207
207
|
/**
|
|
208
208
|
* Block until the request may pass or timeout is reached.
|
|
209
209
|
*
|
|
210
|
-
* This method returns a
|
|
210
|
+
* This method returns a promise that resolves as soon as the request may be processed
|
|
211
211
|
* or after the timeoue has been reached.
|
|
212
212
|
*
|
|
213
213
|
* Use this if you want to delay the request until it is ready to get processed.
|
|
@@ -250,6 +250,224 @@ var Ratelimit = class {
|
|
|
250
250
|
};
|
|
251
251
|
};
|
|
252
252
|
|
|
253
|
+
// src/multi.ts
|
|
254
|
+
function randomId() {
|
|
255
|
+
let result = "";
|
|
256
|
+
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
|
|
257
|
+
const charactersLength = characters.length;
|
|
258
|
+
for (let i = 0; i < 16; i++) {
|
|
259
|
+
result += characters.charAt(Math.floor(Math.random() * charactersLength));
|
|
260
|
+
}
|
|
261
|
+
return result;
|
|
262
|
+
}
|
|
263
|
+
var MultiRegionRatelimit = class extends Ratelimit {
|
|
264
|
+
/**
|
|
265
|
+
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
266
|
+
*/
|
|
267
|
+
constructor(config) {
|
|
268
|
+
super({
|
|
269
|
+
prefix: config.prefix,
|
|
270
|
+
limiter: config.limiter,
|
|
271
|
+
timeout: config.timeout,
|
|
272
|
+
analytics: config.analytics,
|
|
273
|
+
ctx: {
|
|
274
|
+
redis: config.redis,
|
|
275
|
+
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
|
|
276
|
+
}
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Each requests inside a fixed time increases a counter.
|
|
281
|
+
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
282
|
+
* rejected.
|
|
283
|
+
*
|
|
284
|
+
* **Pro:**
|
|
285
|
+
*
|
|
286
|
+
* - Newer requests are not starved by old ones.
|
|
287
|
+
* - Low storage cost.
|
|
288
|
+
*
|
|
289
|
+
* **Con:**
|
|
290
|
+
*
|
|
291
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
292
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
293
|
+
*
|
|
294
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
295
|
+
* @param window - A fixed timeframe
|
|
296
|
+
*/
|
|
297
|
+
static fixedWindow(tokens, window) {
|
|
298
|
+
const windowDuration = ms(window);
|
|
299
|
+
const script = `
|
|
300
|
+
local key = KEYS[1]
|
|
301
|
+
local id = ARGV[1]
|
|
302
|
+
local window = ARGV[2]
|
|
303
|
+
|
|
304
|
+
redis.call("SADD", key, id)
|
|
305
|
+
local members = redis.call("SMEMBERS", key)
|
|
306
|
+
if #members == 1 then
|
|
307
|
+
-- The first time this key is set, the value will be 1.
|
|
308
|
+
-- So we only need the expire command once
|
|
309
|
+
redis.call("PEXPIRE", key, window)
|
|
310
|
+
end
|
|
311
|
+
|
|
312
|
+
return members
|
|
313
|
+
`;
|
|
314
|
+
return async function(ctx, identifier) {
|
|
315
|
+
if (ctx.cache) {
|
|
316
|
+
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
317
|
+
if (blocked) {
|
|
318
|
+
return {
|
|
319
|
+
success: false,
|
|
320
|
+
limit: tokens,
|
|
321
|
+
remaining: 0,
|
|
322
|
+
reset: reset2,
|
|
323
|
+
pending: Promise.resolve()
|
|
324
|
+
};
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
const requestID = randomId();
|
|
328
|
+
const bucket = Math.floor(Date.now() / windowDuration);
|
|
329
|
+
const key = [identifier, bucket].join(":");
|
|
330
|
+
const dbs = ctx.redis.map((redis) => ({
|
|
331
|
+
redis,
|
|
332
|
+
request: redis.eval(script, [key], [requestID, windowDuration])
|
|
333
|
+
}));
|
|
334
|
+
const firstResponse = await Promise.any(dbs.map((s) => s.request));
|
|
335
|
+
const usedTokens = firstResponse.length;
|
|
336
|
+
const remaining = tokens - usedTokens - 1;
|
|
337
|
+
async function sync() {
|
|
338
|
+
const individualIDs = await Promise.all(dbs.map((s) => s.request));
|
|
339
|
+
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
|
|
340
|
+
for (const db of dbs) {
|
|
341
|
+
const ids = await db.request;
|
|
342
|
+
if (ids.length >= tokens) {
|
|
343
|
+
continue;
|
|
344
|
+
}
|
|
345
|
+
const diff = allIDs.filter((id) => !ids.includes(id));
|
|
346
|
+
if (diff.length === 0) {
|
|
347
|
+
continue;
|
|
348
|
+
}
|
|
349
|
+
await db.redis.sadd(key, ...allIDs);
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
const success = remaining > 0;
|
|
353
|
+
const reset = (bucket + 1) * windowDuration;
|
|
354
|
+
if (ctx.cache && !success) {
|
|
355
|
+
ctx.cache.blockUntil(identifier, reset);
|
|
356
|
+
}
|
|
357
|
+
return {
|
|
358
|
+
success,
|
|
359
|
+
limit: tokens,
|
|
360
|
+
remaining,
|
|
361
|
+
reset,
|
|
362
|
+
pending: sync()
|
|
363
|
+
};
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
/**
|
|
367
|
+
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
368
|
+
* costs than `slidingLogs` and improved boundary behavior by calcualting a
|
|
369
|
+
* weighted score between two windows.
|
|
370
|
+
*
|
|
371
|
+
* **Pro:**
|
|
372
|
+
*
|
|
373
|
+
* Good performance allows this to scale to very high loads.
|
|
374
|
+
*
|
|
375
|
+
* **Con:**
|
|
376
|
+
*
|
|
377
|
+
* Nothing major.
|
|
378
|
+
*
|
|
379
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
380
|
+
* @param window - The duration in which the user can max X requests.
|
|
381
|
+
*/
|
|
382
|
+
static slidingWindow(tokens, window) {
|
|
383
|
+
const windowSize = ms(window);
|
|
384
|
+
const script = `
|
|
385
|
+
local currentKey = KEYS[1] -- identifier including prefixes
|
|
386
|
+
local previousKey = KEYS[2] -- key of the previous bucket
|
|
387
|
+
local tokens = tonumber(ARGV[1]) -- tokens per window
|
|
388
|
+
local now = ARGV[2] -- current timestamp in milliseconds
|
|
389
|
+
local window = ARGV[3] -- interval in milliseconds
|
|
390
|
+
local requestID = ARGV[4] -- uuid for this request
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
local currentMembers = redis.call("SMEMBERS", currentKey)
|
|
394
|
+
local requestsInCurrentWindow = #currentMembers
|
|
395
|
+
local previousMembers = redis.call("SMEMBERS", previousKey)
|
|
396
|
+
local requestsInPreviousWindow = #previousMembers
|
|
397
|
+
|
|
398
|
+
local percentageInCurrent = ( now % window) / window
|
|
399
|
+
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
|
|
400
|
+
return {currentMembers, previousMembers}
|
|
401
|
+
end
|
|
402
|
+
|
|
403
|
+
redis.call("SADD", currentKey, requestID)
|
|
404
|
+
table.insert(currentMembers, requestID)
|
|
405
|
+
if requestsInCurrentWindow == 0 then
|
|
406
|
+
-- The first time this key is set, the value will be 1.
|
|
407
|
+
-- So we only need the expire command once
|
|
408
|
+
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
|
|
409
|
+
end
|
|
410
|
+
return {currentMembers, previousMembers}
|
|
411
|
+
`;
|
|
412
|
+
const windowDuration = ms(window);
|
|
413
|
+
return async function(ctx, identifier) {
|
|
414
|
+
if (ctx.cache) {
|
|
415
|
+
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
416
|
+
if (blocked) {
|
|
417
|
+
return {
|
|
418
|
+
success: false,
|
|
419
|
+
limit: tokens,
|
|
420
|
+
remaining: 0,
|
|
421
|
+
reset: reset2,
|
|
422
|
+
pending: Promise.resolve()
|
|
423
|
+
};
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
const requestID = randomId();
|
|
427
|
+
const now = Date.now();
|
|
428
|
+
const currentWindow = Math.floor(now / windowSize);
|
|
429
|
+
const currentKey = [identifier, currentWindow].join(":");
|
|
430
|
+
const previousWindow = currentWindow - windowSize;
|
|
431
|
+
const previousKey = [identifier, previousWindow].join(":");
|
|
432
|
+
const dbs = ctx.redis.map((redis) => ({
|
|
433
|
+
redis,
|
|
434
|
+
request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID])
|
|
435
|
+
}));
|
|
436
|
+
const percentageInCurrent = now % windowDuration / windowDuration;
|
|
437
|
+
const [current, previous] = await Promise.any(dbs.map((s) => s.request));
|
|
438
|
+
const usedTokens = previous.length * (1 - percentageInCurrent) + current.length;
|
|
439
|
+
const remaining = tokens - usedTokens;
|
|
440
|
+
async function sync() {
|
|
441
|
+
const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
|
|
442
|
+
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
|
|
443
|
+
for (const db of dbs) {
|
|
444
|
+
const [ids] = await db.request;
|
|
445
|
+
if (ids.length >= tokens) {
|
|
446
|
+
continue;
|
|
447
|
+
}
|
|
448
|
+
const diff = allIDs.filter((id) => !ids.includes(id));
|
|
449
|
+
if (diff.length === 0) {
|
|
450
|
+
continue;
|
|
451
|
+
}
|
|
452
|
+
await db.redis.sadd(currentKey, ...allIDs);
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
const success = remaining > 0;
|
|
456
|
+
const reset = (currentWindow + 1) * windowDuration;
|
|
457
|
+
if (ctx.cache && !success) {
|
|
458
|
+
ctx.cache.blockUntil(identifier, reset);
|
|
459
|
+
}
|
|
460
|
+
return {
|
|
461
|
+
success,
|
|
462
|
+
limit: tokens,
|
|
463
|
+
remaining,
|
|
464
|
+
reset,
|
|
465
|
+
pending: sync()
|
|
466
|
+
};
|
|
467
|
+
};
|
|
468
|
+
}
|
|
469
|
+
};
|
|
470
|
+
|
|
253
471
|
// src/single.ts
|
|
254
472
|
var RegionRatelimit = class extends Ratelimit {
|
|
255
473
|
/**
|
|
@@ -356,7 +574,7 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
356
574
|
|
|
357
575
|
local requestsInCurrentWindow = redis.call("GET", currentKey)
|
|
358
576
|
if requestsInCurrentWindow == false then
|
|
359
|
-
requestsInCurrentWindow =
|
|
577
|
+
requestsInCurrentWindow = -1
|
|
360
578
|
end
|
|
361
579
|
|
|
362
580
|
|
|
@@ -366,7 +584,7 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
366
584
|
end
|
|
367
585
|
local percentageInCurrent = ( now % window) / window
|
|
368
586
|
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
|
|
369
|
-
return
|
|
587
|
+
return -1
|
|
370
588
|
end
|
|
371
589
|
|
|
372
590
|
local newValue = redis.call("INCR", currentKey)
|
|
@@ -397,7 +615,7 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
397
615
|
}
|
|
398
616
|
}
|
|
399
617
|
const remaining = await ctx.redis.eval(script, [currentKey, previousKey], [tokens, now, windowSize]);
|
|
400
|
-
const success = remaining
|
|
618
|
+
const success = remaining >= 0;
|
|
401
619
|
const reset = (currentWindow + 1) * windowSize;
|
|
402
620
|
if (ctx.cache && !success) {
|
|
403
621
|
ctx.cache.blockUntil(identifier, reset);
|
|
@@ -405,7 +623,7 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
405
623
|
return {
|
|
406
624
|
success,
|
|
407
625
|
limit: tokens,
|
|
408
|
-
remaining,
|
|
626
|
+
remaining: Math.max(0, remaining),
|
|
409
627
|
reset,
|
|
410
628
|
pending: Promise.resolve()
|
|
411
629
|
};
|
|
@@ -572,224 +790,6 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
572
790
|
};
|
|
573
791
|
}
|
|
574
792
|
};
|
|
575
|
-
|
|
576
|
-
// src/multi.ts
|
|
577
|
-
function randomId() {
|
|
578
|
-
let result = "";
|
|
579
|
-
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
|
|
580
|
-
const charactersLength = characters.length;
|
|
581
|
-
for (let i = 0; i < 16; i++) {
|
|
582
|
-
result += characters.charAt(Math.floor(Math.random() * charactersLength));
|
|
583
|
-
}
|
|
584
|
-
return result;
|
|
585
|
-
}
|
|
586
|
-
var MultiRegionRatelimit = class extends Ratelimit {
|
|
587
|
-
/**
|
|
588
|
-
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
589
|
-
*/
|
|
590
|
-
constructor(config) {
|
|
591
|
-
super({
|
|
592
|
-
prefix: config.prefix,
|
|
593
|
-
limiter: config.limiter,
|
|
594
|
-
timeout: config.timeout,
|
|
595
|
-
analytics: config.analytics,
|
|
596
|
-
ctx: {
|
|
597
|
-
redis: config.redis,
|
|
598
|
-
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
|
|
599
|
-
}
|
|
600
|
-
});
|
|
601
|
-
}
|
|
602
|
-
/**
|
|
603
|
-
* Each requests inside a fixed time increases a counter.
|
|
604
|
-
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
605
|
-
* rejected.
|
|
606
|
-
*
|
|
607
|
-
* **Pro:**
|
|
608
|
-
*
|
|
609
|
-
* - Newer requests are not starved by old ones.
|
|
610
|
-
* - Low storage cost.
|
|
611
|
-
*
|
|
612
|
-
* **Con:**
|
|
613
|
-
*
|
|
614
|
-
* A burst of requests near the boundary of a window can result in a very
|
|
615
|
-
* high request rate because two windows will be filled with requests quickly.
|
|
616
|
-
*
|
|
617
|
-
* @param tokens - How many requests a user can make in each time window.
|
|
618
|
-
* @param window - A fixed timeframe
|
|
619
|
-
*/
|
|
620
|
-
static fixedWindow(tokens, window) {
|
|
621
|
-
const windowDuration = ms(window);
|
|
622
|
-
const script = `
|
|
623
|
-
local key = KEYS[1]
|
|
624
|
-
local id = ARGV[1]
|
|
625
|
-
local window = ARGV[2]
|
|
626
|
-
|
|
627
|
-
redis.call("SADD", key, id)
|
|
628
|
-
local members = redis.call("SMEMBERS", key)
|
|
629
|
-
if #members == 1 then
|
|
630
|
-
-- The first time this key is set, the value will be 1.
|
|
631
|
-
-- So we only need the expire command once
|
|
632
|
-
redis.call("PEXPIRE", key, window)
|
|
633
|
-
end
|
|
634
|
-
|
|
635
|
-
return members
|
|
636
|
-
`;
|
|
637
|
-
return async function(ctx, identifier) {
|
|
638
|
-
if (ctx.cache) {
|
|
639
|
-
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
640
|
-
if (blocked) {
|
|
641
|
-
return {
|
|
642
|
-
success: false,
|
|
643
|
-
limit: tokens,
|
|
644
|
-
remaining: 0,
|
|
645
|
-
reset: reset2,
|
|
646
|
-
pending: Promise.resolve()
|
|
647
|
-
};
|
|
648
|
-
}
|
|
649
|
-
}
|
|
650
|
-
const requestID = randomId();
|
|
651
|
-
const bucket = Math.floor(Date.now() / windowDuration);
|
|
652
|
-
const key = [identifier, bucket].join(":");
|
|
653
|
-
const dbs = ctx.redis.map((redis) => ({
|
|
654
|
-
redis,
|
|
655
|
-
request: redis.eval(script, [key], [requestID, windowDuration])
|
|
656
|
-
}));
|
|
657
|
-
const firstResponse = await Promise.any(dbs.map((s) => s.request));
|
|
658
|
-
const usedTokens = firstResponse.length;
|
|
659
|
-
const remaining = tokens - usedTokens - 1;
|
|
660
|
-
async function sync() {
|
|
661
|
-
const individualIDs = await Promise.all(dbs.map((s) => s.request));
|
|
662
|
-
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
|
|
663
|
-
for (const db of dbs) {
|
|
664
|
-
const ids = await db.request;
|
|
665
|
-
if (ids.length >= tokens) {
|
|
666
|
-
continue;
|
|
667
|
-
}
|
|
668
|
-
const diff = allIDs.filter((id) => !ids.includes(id));
|
|
669
|
-
if (diff.length === 0) {
|
|
670
|
-
continue;
|
|
671
|
-
}
|
|
672
|
-
await db.redis.sadd(key, ...allIDs);
|
|
673
|
-
}
|
|
674
|
-
}
|
|
675
|
-
const success = remaining > 0;
|
|
676
|
-
const reset = (bucket + 1) * windowDuration;
|
|
677
|
-
if (ctx.cache && !success) {
|
|
678
|
-
ctx.cache.blockUntil(identifier, reset);
|
|
679
|
-
}
|
|
680
|
-
return {
|
|
681
|
-
success,
|
|
682
|
-
limit: tokens,
|
|
683
|
-
remaining,
|
|
684
|
-
reset,
|
|
685
|
-
pending: sync()
|
|
686
|
-
};
|
|
687
|
-
};
|
|
688
|
-
}
|
|
689
|
-
/**
|
|
690
|
-
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
691
|
-
* costs than `slidingLogs` and improved boundary behavior by calcualting a
|
|
692
|
-
* weighted score between two windows.
|
|
693
|
-
*
|
|
694
|
-
* **Pro:**
|
|
695
|
-
*
|
|
696
|
-
* Good performance allows this to scale to very high loads.
|
|
697
|
-
*
|
|
698
|
-
* **Con:**
|
|
699
|
-
*
|
|
700
|
-
* Nothing major.
|
|
701
|
-
*
|
|
702
|
-
* @param tokens - How many requests a user can make in each time window.
|
|
703
|
-
* @param window - The duration in which the user can max X requests.
|
|
704
|
-
*/
|
|
705
|
-
static slidingWindow(tokens, window) {
|
|
706
|
-
const windowSize = ms(window);
|
|
707
|
-
const script = `
|
|
708
|
-
local currentKey = KEYS[1] -- identifier including prefixes
|
|
709
|
-
local previousKey = KEYS[2] -- key of the previous bucket
|
|
710
|
-
local tokens = tonumber(ARGV[1]) -- tokens per window
|
|
711
|
-
local now = ARGV[2] -- current timestamp in milliseconds
|
|
712
|
-
local window = ARGV[3] -- interval in milliseconds
|
|
713
|
-
local requestID = ARGV[4] -- uuid for this request
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
local currentMembers = redis.call("SMEMBERS", currentKey)
|
|
717
|
-
local requestsInCurrentWindow = #currentMembers
|
|
718
|
-
local previousMembers = redis.call("SMEMBERS", previousKey)
|
|
719
|
-
local requestsInPreviousWindow = #previousMembers
|
|
720
|
-
|
|
721
|
-
local percentageInCurrent = ( now % window) / window
|
|
722
|
-
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
|
|
723
|
-
return {currentMembers, previousMembers}
|
|
724
|
-
end
|
|
725
|
-
|
|
726
|
-
redis.call("SADD", currentKey, requestID)
|
|
727
|
-
table.insert(currentMembers, requestID)
|
|
728
|
-
if requestsInCurrentWindow == 0 then
|
|
729
|
-
-- The first time this key is set, the value will be 1.
|
|
730
|
-
-- So we only need the expire command once
|
|
731
|
-
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
|
|
732
|
-
end
|
|
733
|
-
return {currentMembers, previousMembers}
|
|
734
|
-
`;
|
|
735
|
-
const windowDuration = ms(window);
|
|
736
|
-
return async function(ctx, identifier) {
|
|
737
|
-
if (ctx.cache) {
|
|
738
|
-
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
739
|
-
if (blocked) {
|
|
740
|
-
return {
|
|
741
|
-
success: false,
|
|
742
|
-
limit: tokens,
|
|
743
|
-
remaining: 0,
|
|
744
|
-
reset: reset2,
|
|
745
|
-
pending: Promise.resolve()
|
|
746
|
-
};
|
|
747
|
-
}
|
|
748
|
-
}
|
|
749
|
-
const requestID = randomId();
|
|
750
|
-
const now = Date.now();
|
|
751
|
-
const currentWindow = Math.floor(now / windowSize);
|
|
752
|
-
const currentKey = [identifier, currentWindow].join(":");
|
|
753
|
-
const previousWindow = currentWindow - windowSize;
|
|
754
|
-
const previousKey = [identifier, previousWindow].join(":");
|
|
755
|
-
const dbs = ctx.redis.map((redis) => ({
|
|
756
|
-
redis,
|
|
757
|
-
request: redis.eval(script, [currentKey, previousKey], [tokens, now, windowDuration, requestID])
|
|
758
|
-
}));
|
|
759
|
-
const percentageInCurrent = now % windowDuration / windowDuration;
|
|
760
|
-
const [current, previous] = await Promise.any(dbs.map((s) => s.request));
|
|
761
|
-
const usedTokens = previous.length * (1 - percentageInCurrent) + current.length;
|
|
762
|
-
const remaining = tokens - usedTokens;
|
|
763
|
-
async function sync() {
|
|
764
|
-
const [individualIDs] = await Promise.all(dbs.map((s) => s.request));
|
|
765
|
-
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
|
|
766
|
-
for (const db of dbs) {
|
|
767
|
-
const [ids] = await db.request;
|
|
768
|
-
if (ids.length >= tokens) {
|
|
769
|
-
continue;
|
|
770
|
-
}
|
|
771
|
-
const diff = allIDs.filter((id) => !ids.includes(id));
|
|
772
|
-
if (diff.length === 0) {
|
|
773
|
-
continue;
|
|
774
|
-
}
|
|
775
|
-
await db.redis.sadd(currentKey, ...allIDs);
|
|
776
|
-
}
|
|
777
|
-
}
|
|
778
|
-
const success = remaining > 0;
|
|
779
|
-
const reset = (currentWindow + 1) * windowDuration;
|
|
780
|
-
if (ctx.cache && !success) {
|
|
781
|
-
ctx.cache.blockUntil(identifier, reset);
|
|
782
|
-
}
|
|
783
|
-
return {
|
|
784
|
-
success,
|
|
785
|
-
limit: tokens,
|
|
786
|
-
remaining,
|
|
787
|
-
reset,
|
|
788
|
-
pending: sync()
|
|
789
|
-
};
|
|
790
|
-
};
|
|
791
|
-
}
|
|
792
|
-
};
|
|
793
793
|
export {
|
|
794
794
|
Analytics,
|
|
795
795
|
MultiRegionRatelimit,
|