@upstash/ratelimit 0.3.2 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +27 -0
- package/dist/index.d.ts +3 -21
- package/dist/index.js +188 -107
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +188 -107
- package/dist/index.mjs.map +1 -1
- package/package.json +5 -2
package/README.md
CHANGED
|
@@ -87,6 +87,7 @@ import { Redis } from "@upstash/redis";
|
|
|
87
87
|
const ratelimit = new Ratelimit({
|
|
88
88
|
redis: Redis.fromEnv(),
|
|
89
89
|
limiter: Ratelimit.slidingWindow(10, "10 s"),
|
|
90
|
+
analytics: true
|
|
90
91
|
});
|
|
91
92
|
|
|
92
93
|
// Use a constant string to limit all requests with a single ratelimit
|
|
@@ -161,6 +162,7 @@ const ratelimit = new Ratelimit({
|
|
|
161
162
|
redis: Redis.fromEnv(),
|
|
162
163
|
limiter: Ratelimit.slidingWindow(10, "10 s"),
|
|
163
164
|
timeout: 1000, // 1 second
|
|
165
|
+
analytics: true
|
|
164
166
|
});
|
|
165
167
|
```
|
|
166
168
|
|
|
@@ -184,6 +186,7 @@ in a finite amount of time.
|
|
|
184
186
|
const ratelimit = new Ratelimit({
|
|
185
187
|
redis: Redis.fromEnv(),
|
|
186
188
|
limiter: Ratelimit.slidingWindow(10, "10 s"),
|
|
189
|
+
analytics: true
|
|
187
190
|
});
|
|
188
191
|
|
|
189
192
|
// `blockUntilReady` returns a promise that resolves as soon as the request is allowed to be processed, or after 30 seconds
|
|
@@ -259,6 +262,7 @@ const ratelimit = new MultiRegionRatelimit({
|
|
|
259
262
|
}),
|
|
260
263
|
],
|
|
261
264
|
limiter: MultiRegionRatelimit.slidingWindow(10, "10 s"),
|
|
265
|
+
analytics: true
|
|
262
266
|
});
|
|
263
267
|
|
|
264
268
|
// Use a constant string to limit all requests with a single ratelimit
|
|
@@ -326,6 +330,7 @@ Create a new ratelimiter, that allows 10 requests per 10 seconds.
|
|
|
326
330
|
const ratelimit = new Ratelimit({
|
|
327
331
|
redis: Redis.fromEnv(),
|
|
328
332
|
limiter: Ratelimit.fixedWindow(10, "10 s"),
|
|
333
|
+
analytics: true
|
|
329
334
|
});
|
|
330
335
|
```
|
|
331
336
|
|
|
@@ -366,6 +371,7 @@ Create a new ratelimiter, that allows 10 requests per 10 seconds.
|
|
|
366
371
|
const ratelimit = new Ratelimit({
|
|
367
372
|
redis: Redis.fromEnv(),
|
|
368
373
|
limiter: Ratelimit.slidingWindow(10, "10 s"),
|
|
374
|
+
analytics: true
|
|
369
375
|
});
|
|
370
376
|
```
|
|
371
377
|
|
|
@@ -397,9 +403,30 @@ size of 10.
|
|
|
397
403
|
const ratelimit = new Ratelimit({
|
|
398
404
|
redis: Redis.fromEnv(),
|
|
399
405
|
limiter: Ratelimit.tokenBucket(5, "10 s", 10),
|
|
406
|
+
analytics: true
|
|
400
407
|
});
|
|
401
408
|
```
|
|
402
409
|
|
|
410
|
+
## Analytics
|
|
411
|
+
|
|
412
|
+
You can enable analytics to get a better understanding of how your ratelimiting
|
|
413
|
+
is performing. This is done by setting `analytics: true` in the options.
|
|
414
|
+
|
|
415
|
+
All data is stored in the same Redis database.
|
|
416
|
+
|
|
417
|
+
```ts
|
|
418
|
+
const ratelimit = new Ratelimit({
|
|
419
|
+
redis: Redis.fromEnv(),
|
|
420
|
+
limiter: Ratelimit.tokenBucket(5, "10 s", 10),
|
|
421
|
+
analytics: true // <- Enable analytics
|
|
422
|
+
});
|
|
423
|
+
```
|
|
424
|
+
|
|
425
|
+
Go to the [Ratelimit Dashboard](https://console.upstash.com/rate-limit) and select the database you are using.
|
|
426
|
+
|
|
427
|
+

|
|
428
|
+
|
|
429
|
+
|
|
403
430
|
## Contributing
|
|
404
431
|
|
|
405
432
|
### [Install Deno](https://deno.land/#installation)
|
package/dist/index.d.ts
CHANGED
|
@@ -89,8 +89,8 @@ type AnalyticsConfig = {
|
|
|
89
89
|
* The Analytics package is experimental and can change at any time.
|
|
90
90
|
*/
|
|
91
91
|
declare class Analytics {
|
|
92
|
-
private readonly
|
|
93
|
-
private readonly
|
|
92
|
+
private readonly analytics;
|
|
93
|
+
private readonly table;
|
|
94
94
|
constructor(config: AnalyticsConfig);
|
|
95
95
|
/**
|
|
96
96
|
* Try to extract the geo information from the request
|
|
@@ -104,25 +104,7 @@ declare class Analytics {
|
|
|
104
104
|
cf?: Geo;
|
|
105
105
|
}): Geo;
|
|
106
106
|
record(event: Event): Promise<void>;
|
|
107
|
-
|
|
108
|
-
* Aggregates the events by the given field and returns the number of successes and failures per value
|
|
109
|
-
*
|
|
110
|
-
* @param aggregateBy - The field to aggregate by
|
|
111
|
-
* @param cutoff - Timestamp in milliseconds to limit the aggregation to `cutoff` until now
|
|
112
|
-
* @returns
|
|
113
|
-
*/
|
|
114
|
-
aggregate<TAggregateBy extends keyof Omit<Event, "time">>(aggregateBy: TAggregateBy, cutoff?: number): Promise<Record<string, Record<string, {
|
|
115
|
-
success: number;
|
|
116
|
-
blocked: number;
|
|
117
|
-
}>>>;
|
|
118
|
-
/**
|
|
119
|
-
* Builds a timeseries of the aggreagated value
|
|
120
|
-
*
|
|
121
|
-
* @param aggregateBy - The field to aggregate by
|
|
122
|
-
* @param cutoff - Timestamp in milliseconds to limit the aggregation to `cutoff` until now
|
|
123
|
-
* @returns
|
|
124
|
-
*/
|
|
125
|
-
series<TAggregateBy extends keyof Omit<Event, "time">>(aggregateBy: TAggregateBy, cutoff?: number): Promise<({
|
|
107
|
+
series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff?: number): Promise<({
|
|
126
108
|
time: number;
|
|
127
109
|
} & Record<string, number>)[]>;
|
|
128
110
|
getUsage(cutoff?: number): Promise<Record<string, {
|
package/dist/index.js
CHANGED
|
@@ -47,13 +47,25 @@ function ms(d) {
|
|
|
47
47
|
}
|
|
48
48
|
|
|
49
49
|
// src/analytics.ts
|
|
50
|
+
var import_core_analytics = require("@upstash/core-analytics");
|
|
50
51
|
var Analytics = class {
|
|
51
|
-
|
|
52
|
-
|
|
52
|
+
analytics;
|
|
53
|
+
table = "events";
|
|
53
54
|
constructor(config) {
|
|
54
|
-
this.
|
|
55
|
-
|
|
55
|
+
this.analytics = new import_core_analytics.Analytics({
|
|
56
|
+
redis: config.redis,
|
|
57
|
+
window: "1h",
|
|
58
|
+
prefix: config.prefix ?? "@upstash/ratelimit",
|
|
59
|
+
retention: "90d"
|
|
60
|
+
});
|
|
56
61
|
}
|
|
62
|
+
/**
|
|
63
|
+
* Try to extract the geo information from the request
|
|
64
|
+
*
|
|
65
|
+
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
|
|
66
|
+
* @param req
|
|
67
|
+
* @returns
|
|
68
|
+
*/
|
|
57
69
|
extractGeo(req) {
|
|
58
70
|
if (typeof req.geo !== "undefined") {
|
|
59
71
|
return req.geo;
|
|
@@ -64,116 +76,30 @@ var Analytics = class {
|
|
|
64
76
|
return {};
|
|
65
77
|
}
|
|
66
78
|
async record(event) {
|
|
67
|
-
|
|
68
|
-
const key = [this.prefix, "events", bucket].join(":");
|
|
69
|
-
await this.redis.hincrby(
|
|
70
|
-
key,
|
|
71
|
-
JSON.stringify({
|
|
72
|
-
...event,
|
|
73
|
-
time: void 0
|
|
74
|
-
}),
|
|
75
|
-
1
|
|
76
|
-
);
|
|
77
|
-
}
|
|
78
|
-
async aggregate(aggregateBy, cutoff = 0) {
|
|
79
|
-
const keys = [];
|
|
80
|
-
let cursor = 0;
|
|
81
|
-
do {
|
|
82
|
-
const [nextCursor, found] = await this.redis.scan(cursor, {
|
|
83
|
-
match: [this.prefix, "events", "*"].join(":"),
|
|
84
|
-
count: 1e3
|
|
85
|
-
});
|
|
86
|
-
cursor = nextCursor;
|
|
87
|
-
for (const key of found) {
|
|
88
|
-
const timestamp = parseInt(key.split(":").pop());
|
|
89
|
-
if (timestamp >= cutoff) {
|
|
90
|
-
keys.push(key);
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
} while (cursor !== 0);
|
|
94
|
-
const days = {};
|
|
95
|
-
await Promise.all(
|
|
96
|
-
keys.sort().map(async (key) => {
|
|
97
|
-
const fields = await this.redis.hgetall(key);
|
|
98
|
-
if (!fields) {
|
|
99
|
-
return;
|
|
100
|
-
}
|
|
101
|
-
const day = {};
|
|
102
|
-
for (const [field, count] of Object.entries(fields)) {
|
|
103
|
-
const r = JSON.parse(field);
|
|
104
|
-
for (const [k, v] of Object.entries(r)) {
|
|
105
|
-
if (k !== aggregateBy) {
|
|
106
|
-
continue;
|
|
107
|
-
}
|
|
108
|
-
if (!day[v]) {
|
|
109
|
-
day[v] = {
|
|
110
|
-
success: 0,
|
|
111
|
-
blocked: 0
|
|
112
|
-
};
|
|
113
|
-
}
|
|
114
|
-
if (r.success) {
|
|
115
|
-
day[v].success += count;
|
|
116
|
-
} else {
|
|
117
|
-
day[v].blocked += count;
|
|
118
|
-
}
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
days[key.split(":")[2]] = day;
|
|
122
|
-
})
|
|
123
|
-
);
|
|
124
|
-
return days;
|
|
79
|
+
await this.analytics.ingest(this.table, event);
|
|
125
80
|
}
|
|
126
|
-
async series(
|
|
127
|
-
const
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
count: 1e3
|
|
133
|
-
});
|
|
134
|
-
cursor = nextCursor;
|
|
135
|
-
for (const key of found) {
|
|
136
|
-
const timestamp = parseInt(key.split(":").pop());
|
|
137
|
-
if (timestamp >= cutoff) {
|
|
138
|
-
keys.push(key);
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
} while (cursor !== 0);
|
|
142
|
-
const days = await Promise.all(
|
|
143
|
-
keys.sort().map(async (key) => {
|
|
144
|
-
const fields = await this.redis.hgetall(key);
|
|
145
|
-
const day = { time: parseInt(key.split(":")[2]) };
|
|
146
|
-
if (!fields) {
|
|
147
|
-
return day;
|
|
148
|
-
}
|
|
149
|
-
for (const [field, count] of Object.entries(fields)) {
|
|
150
|
-
const r = JSON.parse(field);
|
|
151
|
-
for (const [k, v] of Object.entries(r)) {
|
|
152
|
-
console.log({ k, v });
|
|
153
|
-
if (k !== aggregateBy) {
|
|
154
|
-
continue;
|
|
155
|
-
}
|
|
156
|
-
if (!day[v]) {
|
|
157
|
-
day[v] = 0;
|
|
158
|
-
}
|
|
159
|
-
day[v] += count;
|
|
160
|
-
}
|
|
161
|
-
}
|
|
162
|
-
return day;
|
|
163
|
-
})
|
|
164
|
-
);
|
|
165
|
-
return days;
|
|
81
|
+
async series(filter, cutoff = 0) {
|
|
82
|
+
const records = await this.analytics.query(this.table, {
|
|
83
|
+
filter: [filter],
|
|
84
|
+
range: cutoff ? [cutoff] : void 0
|
|
85
|
+
});
|
|
86
|
+
return records;
|
|
166
87
|
}
|
|
167
88
|
async getUsage(cutoff = 0) {
|
|
168
|
-
const records = await this.
|
|
89
|
+
const records = await this.analytics.aggregateBy(this.table, "identifier", {
|
|
90
|
+
range: cutoff ? [cutoff] : void 0
|
|
91
|
+
});
|
|
169
92
|
const usage = {};
|
|
170
|
-
for (const
|
|
171
|
-
for (const [k, v] of Object.entries(
|
|
93
|
+
for (const bucket of records) {
|
|
94
|
+
for (const [k, v] of Object.entries(bucket)) {
|
|
95
|
+
if (k === "time") {
|
|
96
|
+
continue;
|
|
97
|
+
}
|
|
172
98
|
if (!usage[k]) {
|
|
173
99
|
usage[k] = { success: 0, blocked: 0 };
|
|
174
100
|
}
|
|
175
|
-
usage[k].success += v
|
|
176
|
-
usage[k].blocked += v
|
|
101
|
+
usage[k].success += v;
|
|
102
|
+
usage[k].blocked += v;
|
|
177
103
|
}
|
|
178
104
|
}
|
|
179
105
|
return usage;
|
|
@@ -182,6 +108,9 @@ var Analytics = class {
|
|
|
182
108
|
|
|
183
109
|
// src/cache.ts
|
|
184
110
|
var Cache = class {
|
|
111
|
+
/**
|
|
112
|
+
* Stores identifier -> reset (in milliseconds)
|
|
113
|
+
*/
|
|
185
114
|
cache;
|
|
186
115
|
constructor(cache) {
|
|
187
116
|
this.cache = cache;
|
|
@@ -236,6 +165,25 @@ var Ratelimit = class {
|
|
|
236
165
|
this.ctx.cache = new Cache(/* @__PURE__ */ new Map());
|
|
237
166
|
}
|
|
238
167
|
}
|
|
168
|
+
/**
|
|
169
|
+
* Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
|
|
170
|
+
*
|
|
171
|
+
* Use this if you want to reject all requests that you can not handle right now.
|
|
172
|
+
*
|
|
173
|
+
* @example
|
|
174
|
+
* ```ts
|
|
175
|
+
* const ratelimit = new Ratelimit({
|
|
176
|
+
* redis: Redis.fromEnv(),
|
|
177
|
+
* limiter: Ratelimit.slidingWindow(10, "10 s")
|
|
178
|
+
* })
|
|
179
|
+
*
|
|
180
|
+
* const { success } = await ratelimit.limit(id)
|
|
181
|
+
* if (!success){
|
|
182
|
+
* return "Nope"
|
|
183
|
+
* }
|
|
184
|
+
* return "Yes"
|
|
185
|
+
* ```
|
|
186
|
+
*/
|
|
239
187
|
limit = async (identifier, req) => {
|
|
240
188
|
const key = [this.prefix, identifier].join(":");
|
|
241
189
|
let timeoutId = null;
|
|
@@ -274,6 +222,28 @@ var Ratelimit = class {
|
|
|
274
222
|
}
|
|
275
223
|
}
|
|
276
224
|
};
|
|
225
|
+
/**
|
|
226
|
+
* Block until the request may pass or timeout is reached.
|
|
227
|
+
*
|
|
228
|
+
* This method returns a promsie that resolves as soon as the request may be processed
|
|
229
|
+
* or after the timeoue has been reached.
|
|
230
|
+
*
|
|
231
|
+
* Use this if you want to delay the request until it is ready to get processed.
|
|
232
|
+
*
|
|
233
|
+
* @example
|
|
234
|
+
* ```ts
|
|
235
|
+
* const ratelimit = new Ratelimit({
|
|
236
|
+
* redis: Redis.fromEnv(),
|
|
237
|
+
* limiter: Ratelimit.slidingWindow(10, "10 s")
|
|
238
|
+
* })
|
|
239
|
+
*
|
|
240
|
+
* const { success } = await ratelimit.blockUntilReady(id, 60_000)
|
|
241
|
+
* if (!success){
|
|
242
|
+
* return "Nope"
|
|
243
|
+
* }
|
|
244
|
+
* return "Yes"
|
|
245
|
+
* ```
|
|
246
|
+
*/
|
|
277
247
|
blockUntilReady = async (identifier, timeout) => {
|
|
278
248
|
if (timeout <= 0) {
|
|
279
249
|
throw new Error("timeout must be positive");
|
|
@@ -300,6 +270,9 @@ var Ratelimit = class {
|
|
|
300
270
|
|
|
301
271
|
// src/single.ts
|
|
302
272
|
var RegionRatelimit = class extends Ratelimit {
|
|
273
|
+
/**
|
|
274
|
+
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
275
|
+
*/
|
|
303
276
|
constructor(config) {
|
|
304
277
|
super({
|
|
305
278
|
prefix: config.prefix,
|
|
@@ -312,6 +285,24 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
312
285
|
ephemeralCache: config.ephemeralCache
|
|
313
286
|
});
|
|
314
287
|
}
|
|
288
|
+
/**
|
|
289
|
+
* Each requests inside a fixed time increases a counter.
|
|
290
|
+
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
291
|
+
* rejected.
|
|
292
|
+
*
|
|
293
|
+
* **Pro:**
|
|
294
|
+
*
|
|
295
|
+
* - Newer requests are not starved by old ones.
|
|
296
|
+
* - Low storage cost.
|
|
297
|
+
*
|
|
298
|
+
* **Con:**
|
|
299
|
+
*
|
|
300
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
301
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
302
|
+
*
|
|
303
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
304
|
+
* @param window - A fixed timeframe
|
|
305
|
+
*/
|
|
315
306
|
static fixedWindow(tokens, window) {
|
|
316
307
|
const windowDuration = ms(window);
|
|
317
308
|
const script = `
|
|
@@ -357,6 +348,22 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
357
348
|
};
|
|
358
349
|
};
|
|
359
350
|
}
|
|
351
|
+
/**
|
|
352
|
+
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
353
|
+
* costs than `slidingLogs` and improved boundary behavior by calcualting a
|
|
354
|
+
* weighted score between two windows.
|
|
355
|
+
*
|
|
356
|
+
* **Pro:**
|
|
357
|
+
*
|
|
358
|
+
* Good performance allows this to scale to very high loads.
|
|
359
|
+
*
|
|
360
|
+
* **Con:**
|
|
361
|
+
*
|
|
362
|
+
* Nothing major.
|
|
363
|
+
*
|
|
364
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
365
|
+
* @param window - The duration in which the user can max X requests.
|
|
366
|
+
*/
|
|
360
367
|
static slidingWindow(tokens, window) {
|
|
361
368
|
const script = `
|
|
362
369
|
local currentKey = KEYS[1] -- identifier including prefixes
|
|
@@ -422,6 +429,19 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
422
429
|
};
|
|
423
430
|
};
|
|
424
431
|
}
|
|
432
|
+
/**
|
|
433
|
+
* You have a bucket filled with `{maxTokens}` tokens that refills constantly
|
|
434
|
+
* at `{refillRate}` per `{interval}`.
|
|
435
|
+
* Every request will remove one token from the bucket and if there is no
|
|
436
|
+
* token to take, the request is rejected.
|
|
437
|
+
*
|
|
438
|
+
* **Pro:**
|
|
439
|
+
*
|
|
440
|
+
* - Bursts of requests are smoothed out and you can process them at a constant
|
|
441
|
+
* rate.
|
|
442
|
+
* - Allows to set a higher initial burst limit by setting `maxTokens` higher
|
|
443
|
+
* than `refillRate`
|
|
444
|
+
*/
|
|
425
445
|
static tokenBucket(refillRate, interval, maxTokens) {
|
|
426
446
|
const script = `
|
|
427
447
|
local key = KEYS[1] -- identifier including prefixes
|
|
@@ -496,6 +516,30 @@ var RegionRatelimit = class extends Ratelimit {
|
|
|
496
516
|
};
|
|
497
517
|
};
|
|
498
518
|
}
|
|
519
|
+
/**
|
|
520
|
+
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
|
|
521
|
+
* it asynchronously.
|
|
522
|
+
* This is experimental and not yet recommended for production use.
|
|
523
|
+
*
|
|
524
|
+
* @experimental
|
|
525
|
+
*
|
|
526
|
+
* Each requests inside a fixed time increases a counter.
|
|
527
|
+
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
528
|
+
* rejected.
|
|
529
|
+
*
|
|
530
|
+
* **Pro:**
|
|
531
|
+
*
|
|
532
|
+
* - Newer requests are not starved by old ones.
|
|
533
|
+
* - Low storage cost.
|
|
534
|
+
*
|
|
535
|
+
* **Con:**
|
|
536
|
+
*
|
|
537
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
538
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
539
|
+
*
|
|
540
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
541
|
+
* @param window - A fixed timeframe
|
|
542
|
+
*/
|
|
499
543
|
static cachedFixedWindow(tokens, window) {
|
|
500
544
|
const windowDuration = ms(window);
|
|
501
545
|
const script = `
|
|
@@ -558,6 +602,9 @@ function randomId() {
|
|
|
558
602
|
return result;
|
|
559
603
|
}
|
|
560
604
|
var MultiRegionRatelimit = class extends Ratelimit {
|
|
605
|
+
/**
|
|
606
|
+
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
607
|
+
*/
|
|
561
608
|
constructor(config) {
|
|
562
609
|
super({
|
|
563
610
|
prefix: config.prefix,
|
|
@@ -570,6 +617,24 @@ var MultiRegionRatelimit = class extends Ratelimit {
|
|
|
570
617
|
}
|
|
571
618
|
});
|
|
572
619
|
}
|
|
620
|
+
/**
|
|
621
|
+
* Each requests inside a fixed time increases a counter.
|
|
622
|
+
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
623
|
+
* rejected.
|
|
624
|
+
*
|
|
625
|
+
* **Pro:**
|
|
626
|
+
*
|
|
627
|
+
* - Newer requests are not starved by old ones.
|
|
628
|
+
* - Low storage cost.
|
|
629
|
+
*
|
|
630
|
+
* **Con:**
|
|
631
|
+
*
|
|
632
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
633
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
634
|
+
*
|
|
635
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
636
|
+
* @param window - A fixed timeframe
|
|
637
|
+
*/
|
|
573
638
|
static fixedWindow(tokens, window) {
|
|
574
639
|
const windowDuration = ms(window);
|
|
575
640
|
const script = `
|
|
@@ -639,6 +704,22 @@ var MultiRegionRatelimit = class extends Ratelimit {
|
|
|
639
704
|
};
|
|
640
705
|
};
|
|
641
706
|
}
|
|
707
|
+
/**
|
|
708
|
+
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
709
|
+
* costs than `slidingLogs` and improved boundary behavior by calcualting a
|
|
710
|
+
* weighted score between two windows.
|
|
711
|
+
*
|
|
712
|
+
* **Pro:**
|
|
713
|
+
*
|
|
714
|
+
* Good performance allows this to scale to very high loads.
|
|
715
|
+
*
|
|
716
|
+
* **Con:**
|
|
717
|
+
*
|
|
718
|
+
* Nothing major.
|
|
719
|
+
*
|
|
720
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
721
|
+
* @param window - The duration in which the user can max X requests.
|
|
722
|
+
*/
|
|
642
723
|
static slidingWindow(tokens, window) {
|
|
643
724
|
const windowSize = ms(window);
|
|
644
725
|
const script = `
|