@upstash/ratelimit 0.4.0 → 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -3
- package/dist/index.d.ts +137 -137
- package/dist/index.js +248 -248
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +248 -248
- package/dist/index.mjs.map +1 -1
- package/package.json +12 -12
package/README.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Upstash
|
|
1
|
+
# Upstash Rate Limit
|
|
2
2
|
|
|
3
3
|
[](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
|
|
4
4
|

|
|
@@ -51,7 +51,7 @@ for:
|
|
|
51
51
|
|
|
52
52
|
## Docs
|
|
53
53
|
|
|
54
|
-
[doc.deno.land](https://
|
|
54
|
+
[doc.deno.land](https://deno.land/x/upstash_ratelimit/packages/sdk/src/index.ts)
|
|
55
55
|
|
|
56
56
|
## Quick Start
|
|
57
57
|
|
|
@@ -86,7 +86,13 @@ import { Redis } from "@upstash/redis";
|
|
|
86
86
|
const ratelimit = new Ratelimit({
|
|
87
87
|
redis: Redis.fromEnv(),
|
|
88
88
|
limiter: Ratelimit.slidingWindow(10, "10 s"),
|
|
89
|
-
analytics: true
|
|
89
|
+
analytics: true,
|
|
90
|
+
/**
|
|
91
|
+
* Optional prefix for the keys used in redis. This is useful if you want to share a redis
|
|
92
|
+
* instance with other applications and want to avoid key collisions. The default prefix is
|
|
93
|
+
* "@upstash/ratelimit"
|
|
94
|
+
*/
|
|
95
|
+
prefix: "@upstash/ratelimit",
|
|
90
96
|
});
|
|
91
97
|
|
|
92
98
|
// Use a constant string to limit all requests with a single ratelimit
|
|
@@ -224,6 +230,41 @@ instance outside of your handler function. While the function is still hot, the
|
|
|
224
230
|
ratelimiter can block requests without having to request data from redis, thus
|
|
225
231
|
saving time and money.
|
|
226
232
|
|
|
233
|
+
|
|
234
|
+
## Using multiple limits
|
|
235
|
+
|
|
236
|
+
Sometimes you might want to apply different limits to different users. For example you might want to allow 10 requests per 10 seconds for free users, but 60 requests per 10 seconds for paid users.
|
|
237
|
+
|
|
238
|
+
Here's how you could do that:
|
|
239
|
+
|
|
240
|
+
```ts
|
|
241
|
+
import { Redis } from "@upstash/redis"
|
|
242
|
+
import { Ratelimit from "@upstash/ratelimit"
|
|
243
|
+
|
|
244
|
+
const redis = Redis.fromEnv()
|
|
245
|
+
|
|
246
|
+
const ratelimit = {
|
|
247
|
+
free: new Ratelimit({
|
|
248
|
+
redis,
|
|
249
|
+
analytics: true,
|
|
250
|
+
prefix: "ratelimit:free",
|
|
251
|
+
limiter: Ratelimit.slidingWindow(10, "10s"),
|
|
252
|
+
}),
|
|
253
|
+
paid: new Ratelimit({
|
|
254
|
+
redis,
|
|
255
|
+
analytics: true,
|
|
256
|
+
prefix: "ratelimit:paid",
|
|
257
|
+
limiter: Ratelimit.slidingWindow(60, "10s"),
|
|
258
|
+
})
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
await ratelimit.free.limit(ip)
|
|
263
|
+
// or for a paid user you might have an email or userId available:
|
|
264
|
+
await ratelimit.paid.limit(userId)
|
|
265
|
+
|
|
266
|
+
```
|
|
267
|
+
|
|
227
268
|
## MultiRegion replicated ratelimiting
|
|
228
269
|
|
|
229
270
|
Using a single redis instance has the downside of providing low latencies only
|
package/dist/index.d.ts
CHANGED
|
@@ -1,5 +1,48 @@
|
|
|
1
1
|
import { Redis } from '@upstash/redis';
|
|
2
2
|
|
|
3
|
+
type Geo = {
|
|
4
|
+
country?: string;
|
|
5
|
+
city?: string;
|
|
6
|
+
region?: string;
|
|
7
|
+
ip?: string;
|
|
8
|
+
};
|
|
9
|
+
type Event = Geo & {
|
|
10
|
+
identifier: string;
|
|
11
|
+
time: number;
|
|
12
|
+
success: boolean;
|
|
13
|
+
};
|
|
14
|
+
type AnalyticsConfig = {
|
|
15
|
+
redis: Redis;
|
|
16
|
+
prefix?: string;
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* The Analytics package is experimental and can change at any time.
|
|
20
|
+
*/
|
|
21
|
+
declare class Analytics {
|
|
22
|
+
private readonly analytics;
|
|
23
|
+
private readonly table;
|
|
24
|
+
constructor(config: AnalyticsConfig);
|
|
25
|
+
/**
|
|
26
|
+
* Try to extract the geo information from the request
|
|
27
|
+
*
|
|
28
|
+
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
|
|
29
|
+
* @param req
|
|
30
|
+
* @returns
|
|
31
|
+
*/
|
|
32
|
+
extractGeo(req: {
|
|
33
|
+
geo?: Geo;
|
|
34
|
+
cf?: Geo;
|
|
35
|
+
}): Geo;
|
|
36
|
+
record(event: Event): Promise<void>;
|
|
37
|
+
series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<({
|
|
38
|
+
time: number;
|
|
39
|
+
} & Record<string, number>)[]>;
|
|
40
|
+
getUsage(cutoff?: number): Promise<Record<string, {
|
|
41
|
+
success: number;
|
|
42
|
+
blocked: number;
|
|
43
|
+
}>>;
|
|
44
|
+
}
|
|
45
|
+
|
|
3
46
|
type Unit = "ms" | "s" | "m" | "h" | "d";
|
|
4
47
|
type Duration = `${number} ${Unit}` | `${number}${Unit}`;
|
|
5
48
|
|
|
@@ -70,49 +113,6 @@ type Algorithm<TContext> = (ctx: TContext, identifier: string, opts?: {
|
|
|
70
113
|
cache?: EphemeralCache;
|
|
71
114
|
}) => Promise<RatelimitResponse>;
|
|
72
115
|
|
|
73
|
-
type Geo = {
|
|
74
|
-
country?: string;
|
|
75
|
-
city?: string;
|
|
76
|
-
region?: string;
|
|
77
|
-
ip?: string;
|
|
78
|
-
};
|
|
79
|
-
type Event = Geo & {
|
|
80
|
-
identifier: string;
|
|
81
|
-
time: number;
|
|
82
|
-
success: boolean;
|
|
83
|
-
};
|
|
84
|
-
type AnalyticsConfig = {
|
|
85
|
-
redis: Redis;
|
|
86
|
-
prefix?: string;
|
|
87
|
-
};
|
|
88
|
-
/**
|
|
89
|
-
* The Analytics package is experimental and can change at any time.
|
|
90
|
-
*/
|
|
91
|
-
declare class Analytics {
|
|
92
|
-
private readonly analytics;
|
|
93
|
-
private readonly table;
|
|
94
|
-
constructor(config: AnalyticsConfig);
|
|
95
|
-
/**
|
|
96
|
-
* Try to extract the geo information from the request
|
|
97
|
-
*
|
|
98
|
-
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
|
|
99
|
-
* @param req
|
|
100
|
-
* @returns
|
|
101
|
-
*/
|
|
102
|
-
extractGeo(req: {
|
|
103
|
-
geo?: Geo;
|
|
104
|
-
cf?: Geo;
|
|
105
|
-
}): Geo;
|
|
106
|
-
record(event: Event): Promise<void>;
|
|
107
|
-
series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<({
|
|
108
|
-
time: number;
|
|
109
|
-
} & Record<string, number>)[]>;
|
|
110
|
-
getUsage(cutoff?: number): Promise<Record<string, {
|
|
111
|
-
success: number;
|
|
112
|
-
blocked: number;
|
|
113
|
-
}>>;
|
|
114
|
-
}
|
|
115
|
-
|
|
116
116
|
type RatelimitConfig<TContext> = {
|
|
117
117
|
/**
|
|
118
118
|
* The ratelimiter function to use.
|
|
@@ -161,7 +161,7 @@ type RatelimitConfig<TContext> = {
|
|
|
161
161
|
* If enabled, the ratelimiter will store analytics data in redis, which you can check out at
|
|
162
162
|
* https://upstash.com/ratelimit
|
|
163
163
|
*
|
|
164
|
-
* @default
|
|
164
|
+
* @default false
|
|
165
165
|
*/
|
|
166
166
|
analytics?: boolean;
|
|
167
167
|
};
|
|
@@ -212,7 +212,7 @@ declare abstract class Ratelimit<TContext extends Context> {
|
|
|
212
212
|
/**
|
|
213
213
|
* Block until the request may pass or timeout is reached.
|
|
214
214
|
*
|
|
215
|
-
* This method returns a
|
|
215
|
+
* This method returns a promise that resolves as soon as the request may be processed
|
|
216
216
|
* or after the timeoue has been reached.
|
|
217
217
|
*
|
|
218
218
|
* Use this if you want to delay the request until it is ready to get processed.
|
|
@@ -234,23 +234,20 @@ declare abstract class Ratelimit<TContext extends Context> {
|
|
|
234
234
|
blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
|
|
235
235
|
}
|
|
236
236
|
|
|
237
|
-
type
|
|
237
|
+
type MultiRegionRatelimitConfig = {
|
|
238
238
|
/**
|
|
239
|
-
*
|
|
239
|
+
* Instances of `@upstash/redis`
|
|
240
240
|
* @see https://github.com/upstash/upstash-redis#quick-start
|
|
241
241
|
*/
|
|
242
|
-
redis: Redis;
|
|
242
|
+
redis: Redis[];
|
|
243
243
|
/**
|
|
244
244
|
* The ratelimiter function to use.
|
|
245
245
|
*
|
|
246
246
|
* Choose one of the predefined ones or implement your own.
|
|
247
247
|
* Available algorithms are exposed via static methods:
|
|
248
|
-
* -
|
|
249
|
-
* - Ratelimiter.slidingLogs
|
|
250
|
-
* - Ratelimiter.slidingWindow
|
|
251
|
-
* - Ratelimiter.tokenBucket
|
|
248
|
+
* - MultiRegionRatelimit.fixedWindow
|
|
252
249
|
*/
|
|
253
|
-
limiter: Algorithm<
|
|
250
|
+
limiter: Algorithm<MultiRegionContext>;
|
|
254
251
|
/**
|
|
255
252
|
* All keys in redis are prefixed with this.
|
|
256
253
|
*
|
|
@@ -271,7 +268,7 @@ type RegionRatelimitConfig = {
|
|
|
271
268
|
* Set to `false` to disable.
|
|
272
269
|
*
|
|
273
270
|
* If left undefined, a map is created automatically, but it can only work
|
|
274
|
-
* if the map or
|
|
271
|
+
* if the map or th ratelimit instance is created outside your serverless function handler.
|
|
275
272
|
*/
|
|
276
273
|
ephemeralCache?: Map<string, number> | false;
|
|
277
274
|
/**
|
|
@@ -293,21 +290,21 @@ type RegionRatelimitConfig = {
|
|
|
293
290
|
*
|
|
294
291
|
* @example
|
|
295
292
|
* ```ts
|
|
296
|
-
* const { limit } = new
|
|
293
|
+
* const { limit } = new MultiRegionRatelimit({
|
|
297
294
|
* redis: Redis.fromEnv(),
|
|
298
|
-
* limiter:
|
|
299
|
-
* "30 m", // interval of 30 minutes
|
|
295
|
+
* limiter: MultiRegionRatelimit.fixedWindow(
|
|
300
296
|
* 10, // Allow 10 requests per window of 30 minutes
|
|
297
|
+
* "30 m", // interval of 30 minutes
|
|
301
298
|
* )
|
|
302
299
|
* })
|
|
303
300
|
*
|
|
304
301
|
* ```
|
|
305
302
|
*/
|
|
306
|
-
declare class
|
|
303
|
+
declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
|
|
307
304
|
/**
|
|
308
305
|
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
309
306
|
*/
|
|
310
|
-
constructor(config:
|
|
307
|
+
constructor(config: MultiRegionRatelimitConfig);
|
|
311
308
|
/**
|
|
312
309
|
* Each requests inside a fixed time increases a counter.
|
|
313
310
|
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
@@ -334,7 +331,7 @@ declare class RegionRatelimit extends Ratelimit<RegionContext> {
|
|
|
334
331
|
/**
|
|
335
332
|
* The duration in which `tokens` requests are allowed.
|
|
336
333
|
*/
|
|
337
|
-
window: Duration): Algorithm<
|
|
334
|
+
window: Duration): Algorithm<MultiRegionContext>;
|
|
338
335
|
/**
|
|
339
336
|
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
340
337
|
* costs than `slidingLogs` and improved boundary behavior by calcualting a
|
|
@@ -359,86 +356,26 @@ declare class RegionRatelimit extends Ratelimit<RegionContext> {
|
|
|
359
356
|
/**
|
|
360
357
|
* The duration in which `tokens` requests are allowed.
|
|
361
358
|
*/
|
|
362
|
-
window: Duration): Algorithm<
|
|
363
|
-
/**
|
|
364
|
-
* You have a bucket filled with `{maxTokens}` tokens that refills constantly
|
|
365
|
-
* at `{refillRate}` per `{interval}`.
|
|
366
|
-
* Every request will remove one token from the bucket and if there is no
|
|
367
|
-
* token to take, the request is rejected.
|
|
368
|
-
*
|
|
369
|
-
* **Pro:**
|
|
370
|
-
*
|
|
371
|
-
* - Bursts of requests are smoothed out and you can process them at a constant
|
|
372
|
-
* rate.
|
|
373
|
-
* - Allows to set a higher initial burst limit by setting `maxTokens` higher
|
|
374
|
-
* than `refillRate`
|
|
375
|
-
*/
|
|
376
|
-
static tokenBucket(
|
|
377
|
-
/**
|
|
378
|
-
* How many tokens are refilled per `interval`
|
|
379
|
-
*
|
|
380
|
-
* An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.
|
|
381
|
-
*/
|
|
382
|
-
refillRate: number,
|
|
383
|
-
/**
|
|
384
|
-
* The interval for the `refillRate`
|
|
385
|
-
*/
|
|
386
|
-
interval: Duration,
|
|
387
|
-
/**
|
|
388
|
-
* Maximum number of tokens.
|
|
389
|
-
* A newly created bucket starts with this many tokens.
|
|
390
|
-
* Useful to allow higher burst limits.
|
|
391
|
-
*/
|
|
392
|
-
maxTokens: number): Algorithm<RegionContext>;
|
|
393
|
-
/**
|
|
394
|
-
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
|
|
395
|
-
* it asynchronously.
|
|
396
|
-
* This is experimental and not yet recommended for production use.
|
|
397
|
-
*
|
|
398
|
-
* @experimental
|
|
399
|
-
*
|
|
400
|
-
* Each requests inside a fixed time increases a counter.
|
|
401
|
-
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
402
|
-
* rejected.
|
|
403
|
-
*
|
|
404
|
-
* **Pro:**
|
|
405
|
-
*
|
|
406
|
-
* - Newer requests are not starved by old ones.
|
|
407
|
-
* - Low storage cost.
|
|
408
|
-
*
|
|
409
|
-
* **Con:**
|
|
410
|
-
*
|
|
411
|
-
* A burst of requests near the boundary of a window can result in a very
|
|
412
|
-
* high request rate because two windows will be filled with requests quickly.
|
|
413
|
-
*
|
|
414
|
-
* @param tokens - How many requests a user can make in each time window.
|
|
415
|
-
* @param window - A fixed timeframe
|
|
416
|
-
*/
|
|
417
|
-
static cachedFixedWindow(
|
|
418
|
-
/**
|
|
419
|
-
* How many requests are allowed per window.
|
|
420
|
-
*/
|
|
421
|
-
tokens: number,
|
|
422
|
-
/**
|
|
423
|
-
* The duration in which `tokens` requests are allowed.
|
|
424
|
-
*/
|
|
425
|
-
window: Duration): Algorithm<RegionContext>;
|
|
359
|
+
window: Duration): Algorithm<MultiRegionContext>;
|
|
426
360
|
}
|
|
427
361
|
|
|
428
|
-
type
|
|
362
|
+
type RegionRatelimitConfig = {
|
|
429
363
|
/**
|
|
430
|
-
*
|
|
364
|
+
* Instance of `@upstash/redis`
|
|
431
365
|
* @see https://github.com/upstash/upstash-redis#quick-start
|
|
432
366
|
*/
|
|
433
|
-
redis: Redis
|
|
367
|
+
redis: Redis;
|
|
434
368
|
/**
|
|
435
369
|
* The ratelimiter function to use.
|
|
436
370
|
*
|
|
437
371
|
* Choose one of the predefined ones or implement your own.
|
|
438
372
|
* Available algorithms are exposed via static methods:
|
|
439
|
-
* -
|
|
373
|
+
* - Ratelimiter.fixedWindow
|
|
374
|
+
* - Ratelimiter.slidingLogs
|
|
375
|
+
* - Ratelimiter.slidingWindow
|
|
376
|
+
* - Ratelimiter.tokenBucket
|
|
440
377
|
*/
|
|
441
|
-
limiter: Algorithm<
|
|
378
|
+
limiter: Algorithm<RegionContext>;
|
|
442
379
|
/**
|
|
443
380
|
* All keys in redis are prefixed with this.
|
|
444
381
|
*
|
|
@@ -459,7 +396,7 @@ type MultiRegionRatelimitConfig = {
|
|
|
459
396
|
* Set to `false` to disable.
|
|
460
397
|
*
|
|
461
398
|
* If left undefined, a map is created automatically, but it can only work
|
|
462
|
-
* if the map or
|
|
399
|
+
* if the map or the ratelimit instance is created outside your serverless function handler.
|
|
463
400
|
*/
|
|
464
401
|
ephemeralCache?: Map<string, number> | false;
|
|
465
402
|
/**
|
|
@@ -481,21 +418,21 @@ type MultiRegionRatelimitConfig = {
|
|
|
481
418
|
*
|
|
482
419
|
* @example
|
|
483
420
|
* ```ts
|
|
484
|
-
* const { limit } = new
|
|
421
|
+
* const { limit } = new Ratelimit({
|
|
485
422
|
* redis: Redis.fromEnv(),
|
|
486
|
-
* limiter:
|
|
487
|
-
* 10, // Allow 10 requests per window of 30 minutes
|
|
423
|
+
* limiter: Ratelimit.slidingWindow(
|
|
488
424
|
* "30 m", // interval of 30 minutes
|
|
425
|
+
* 10, // Allow 10 requests per window of 30 minutes
|
|
489
426
|
* )
|
|
490
427
|
* })
|
|
491
428
|
*
|
|
492
429
|
* ```
|
|
493
430
|
*/
|
|
494
|
-
declare class
|
|
431
|
+
declare class RegionRatelimit extends Ratelimit<RegionContext> {
|
|
495
432
|
/**
|
|
496
433
|
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
497
434
|
*/
|
|
498
|
-
constructor(config:
|
|
435
|
+
constructor(config: RegionRatelimitConfig);
|
|
499
436
|
/**
|
|
500
437
|
* Each requests inside a fixed time increases a counter.
|
|
501
438
|
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
@@ -522,7 +459,7 @@ declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
|
|
|
522
459
|
/**
|
|
523
460
|
* The duration in which `tokens` requests are allowed.
|
|
524
461
|
*/
|
|
525
|
-
window: Duration): Algorithm<
|
|
462
|
+
window: Duration): Algorithm<RegionContext>;
|
|
526
463
|
/**
|
|
527
464
|
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
528
465
|
* costs than `slidingLogs` and improved boundary behavior by calcualting a
|
|
@@ -547,7 +484,70 @@ declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
|
|
|
547
484
|
/**
|
|
548
485
|
* The duration in which `tokens` requests are allowed.
|
|
549
486
|
*/
|
|
550
|
-
window: Duration): Algorithm<
|
|
487
|
+
window: Duration): Algorithm<RegionContext>;
|
|
488
|
+
/**
|
|
489
|
+
* You have a bucket filled with `{maxTokens}` tokens that refills constantly
|
|
490
|
+
* at `{refillRate}` per `{interval}`.
|
|
491
|
+
* Every request will remove one token from the bucket and if there is no
|
|
492
|
+
* token to take, the request is rejected.
|
|
493
|
+
*
|
|
494
|
+
* **Pro:**
|
|
495
|
+
*
|
|
496
|
+
* - Bursts of requests are smoothed out and you can process them at a constant
|
|
497
|
+
* rate.
|
|
498
|
+
* - Allows to set a higher initial burst limit by setting `maxTokens` higher
|
|
499
|
+
* than `refillRate`
|
|
500
|
+
*/
|
|
501
|
+
static tokenBucket(
|
|
502
|
+
/**
|
|
503
|
+
* How many tokens are refilled per `interval`
|
|
504
|
+
*
|
|
505
|
+
* An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.
|
|
506
|
+
*/
|
|
507
|
+
refillRate: number,
|
|
508
|
+
/**
|
|
509
|
+
* The interval for the `refillRate`
|
|
510
|
+
*/
|
|
511
|
+
interval: Duration,
|
|
512
|
+
/**
|
|
513
|
+
* Maximum number of tokens.
|
|
514
|
+
* A newly created bucket starts with this many tokens.
|
|
515
|
+
* Useful to allow higher burst limits.
|
|
516
|
+
*/
|
|
517
|
+
maxTokens: number): Algorithm<RegionContext>;
|
|
518
|
+
/**
|
|
519
|
+
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
|
|
520
|
+
* it asynchronously.
|
|
521
|
+
* This is experimental and not yet recommended for production use.
|
|
522
|
+
*
|
|
523
|
+
* @experimental
|
|
524
|
+
*
|
|
525
|
+
* Each requests inside a fixed time increases a counter.
|
|
526
|
+
* Once the counter reaches a maxmimum allowed number, all further requests are
|
|
527
|
+
* rejected.
|
|
528
|
+
*
|
|
529
|
+
* **Pro:**
|
|
530
|
+
*
|
|
531
|
+
* - Newer requests are not starved by old ones.
|
|
532
|
+
* - Low storage cost.
|
|
533
|
+
*
|
|
534
|
+
* **Con:**
|
|
535
|
+
*
|
|
536
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
537
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
538
|
+
*
|
|
539
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
540
|
+
* @param window - A fixed timeframe
|
|
541
|
+
*/
|
|
542
|
+
static cachedFixedWindow(
|
|
543
|
+
/**
|
|
544
|
+
* How many requests are allowed per window.
|
|
545
|
+
*/
|
|
546
|
+
tokens: number,
|
|
547
|
+
/**
|
|
548
|
+
* The duration in which `tokens` requests are allowed.
|
|
549
|
+
*/
|
|
550
|
+
window: Duration): Algorithm<RegionContext>;
|
|
551
551
|
}
|
|
552
552
|
|
|
553
553
|
export { Algorithm, Analytics, AnalyticsConfig, MultiRegionRatelimit, MultiRegionRatelimitConfig, RegionRatelimit as Ratelimit, RegionRatelimitConfig as RatelimitConfig };
|