@upstash/ratelimit 0.0.0-ci.e36c7a4668d76e92b9a8650ce5d3967a8d0a7e36-20241006173732
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +106 -0
- package/dist/index.d.mts +770 -0
- package/dist/index.d.ts +770 -0
- package/dist/index.js +1665 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +1641 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +1 -0
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,770 @@
|
|
|
1
|
+
import { Aggregate } from '@upstash/core-analytics';
|
|
2
|
+
import { Pipeline } from '@upstash/redis';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* EphemeralCache is used to block certain identifiers right away in case they have already exceeded the ratelimit.
|
|
6
|
+
*/
|
|
7
|
+
type EphemeralCache = {
|
|
8
|
+
isBlocked: (identifier: string) => {
|
|
9
|
+
blocked: boolean;
|
|
10
|
+
reset: number;
|
|
11
|
+
};
|
|
12
|
+
blockUntil: (identifier: string, reset: number) => void;
|
|
13
|
+
set: (key: string, value: number) => void;
|
|
14
|
+
get: (key: string) => number | null;
|
|
15
|
+
incr: (key: string) => number;
|
|
16
|
+
pop: (key: string) => void;
|
|
17
|
+
empty: () => void;
|
|
18
|
+
size: () => number;
|
|
19
|
+
};
|
|
20
|
+
type RegionContext = {
|
|
21
|
+
redis: Redis;
|
|
22
|
+
cache?: EphemeralCache;
|
|
23
|
+
};
|
|
24
|
+
type MultiRegionContext = {
|
|
25
|
+
regionContexts: Omit<RegionContext[], "cache">;
|
|
26
|
+
cache?: EphemeralCache;
|
|
27
|
+
};
|
|
28
|
+
type RatelimitResponseType = "timeout" | "cacheBlock" | "denyList";
|
|
29
|
+
type Context = RegionContext | MultiRegionContext;
|
|
30
|
+
type RatelimitResponse = {
|
|
31
|
+
/**
|
|
32
|
+
* Whether the request may pass(true) or exceeded the limit(false)
|
|
33
|
+
*/
|
|
34
|
+
success: boolean;
|
|
35
|
+
/**
|
|
36
|
+
* Maximum number of requests allowed within a window.
|
|
37
|
+
*/
|
|
38
|
+
limit: number;
|
|
39
|
+
/**
|
|
40
|
+
* How many requests the user has left within the current window.
|
|
41
|
+
*/
|
|
42
|
+
remaining: number;
|
|
43
|
+
/**
|
|
44
|
+
* Unix timestamp in milliseconds when the limits are reset.
|
|
45
|
+
*/
|
|
46
|
+
reset: number;
|
|
47
|
+
/**
|
|
48
|
+
* For the MultiRegion setup we do some synchronizing in the background, after returning the current limit.
|
|
49
|
+
* Or when analytics is enabled, we send the analytics asynchronously after returning the limit.
|
|
50
|
+
* In most case you can simply ignore this.
|
|
51
|
+
*
|
|
52
|
+
* On Vercel Edge or Cloudflare workers, you need to explicitly handle the pending Promise like this:
|
|
53
|
+
*
|
|
54
|
+
* ```ts
|
|
55
|
+
* const { pending } = await ratelimit.limit("id")
|
|
56
|
+
* context.waitUntil(pending)
|
|
57
|
+
* ```
|
|
58
|
+
*
|
|
59
|
+
* See `waitUntil` documentation in
|
|
60
|
+
* [Cloudflare](https://developers.cloudflare.com/workers/runtime-apis/handlers/fetch/#contextwaituntil)
|
|
61
|
+
* and [Vercel](https://vercel.com/docs/functions/edge-middleware/middleware-api#waituntil)
|
|
62
|
+
* for more details.
|
|
63
|
+
* ```
|
|
64
|
+
*/
|
|
65
|
+
pending: Promise<unknown>;
|
|
66
|
+
/**
|
|
67
|
+
* Reason behind the result in `success` field.
|
|
68
|
+
* - Is set to "timeout" when request times out
|
|
69
|
+
* - Is set to "cacheBlock" when an identifier is blocked through cache without calling redis because it was
|
|
70
|
+
* rate limited previously.
|
|
71
|
+
* - Is set to "denyList" when identifier or one of ip/user-agent/country parameters is in deny list. To enable
|
|
72
|
+
* deny list, see `enableProtection` parameter. To edit the deny list, see the Upstash Ratelimit Dashboard
|
|
73
|
+
* at https://console.upstash.com/ratelimit.
|
|
74
|
+
* - Is set to undefined if rate limit check had to use Redis. This happens in cases when `success` field in
|
|
75
|
+
* the response is true. It can also happen the first time sucecss is false.
|
|
76
|
+
*/
|
|
77
|
+
reason?: RatelimitResponseType;
|
|
78
|
+
/**
|
|
79
|
+
* The value which was in the deny list if reason: "denyList"
|
|
80
|
+
*/
|
|
81
|
+
deniedValue?: DeniedValue;
|
|
82
|
+
};
|
|
83
|
+
type Algorithm<TContext> = () => {
|
|
84
|
+
limit: (ctx: TContext, identifier: string, rate?: number, opts?: {
|
|
85
|
+
cache?: EphemeralCache;
|
|
86
|
+
}) => Promise<RatelimitResponse>;
|
|
87
|
+
getRemaining: (ctx: TContext, identifier: string) => Promise<{
|
|
88
|
+
remaining: number;
|
|
89
|
+
reset: number;
|
|
90
|
+
}>;
|
|
91
|
+
resetTokens: (ctx: TContext, identifier: string) => Promise<void>;
|
|
92
|
+
};
|
|
93
|
+
type IsDenied = 0 | 1;
|
|
94
|
+
type DeniedValue = string | undefined;
|
|
95
|
+
type LimitOptions = {
|
|
96
|
+
geo?: Geo;
|
|
97
|
+
rate?: number;
|
|
98
|
+
ip?: string;
|
|
99
|
+
userAgent?: string;
|
|
100
|
+
country?: string;
|
|
101
|
+
};
|
|
102
|
+
/**
|
|
103
|
+
* This is all we need from the redis sdk.
|
|
104
|
+
*/
|
|
105
|
+
type Redis = {
|
|
106
|
+
sadd: <TData>(key: string, ...members: TData[]) => Promise<number>;
|
|
107
|
+
hset: <TValue>(key: string, obj: {
|
|
108
|
+
[key: string]: TValue;
|
|
109
|
+
}) => Promise<number>;
|
|
110
|
+
eval: <TArgs extends unknown[], TData = unknown>(...args: [script: string, keys: string[], args: TArgs]) => Promise<TData>;
|
|
111
|
+
evalsha: <TArgs extends unknown[], TData = unknown>(...args: [sha1: string, keys: string[], args: TArgs]) => Promise<TData>;
|
|
112
|
+
scriptLoad: (...args: [script: string]) => Promise<string>;
|
|
113
|
+
smismember: (key: string, members: string[]) => Promise<IsDenied[]>;
|
|
114
|
+
multi: () => Pipeline;
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
type Geo = {
|
|
118
|
+
country?: string;
|
|
119
|
+
city?: string;
|
|
120
|
+
region?: string;
|
|
121
|
+
ip?: string;
|
|
122
|
+
};
|
|
123
|
+
/**
|
|
124
|
+
* denotes the success field in the analytics submission.
|
|
125
|
+
* Set to true when ratelimit check passes. False when request is ratelimited.
|
|
126
|
+
* Set to "denied" when some request value is in deny list.
|
|
127
|
+
*/
|
|
128
|
+
type EventSuccess = boolean | "denied";
|
|
129
|
+
type Event = Geo & {
|
|
130
|
+
identifier: string;
|
|
131
|
+
time: number;
|
|
132
|
+
success: EventSuccess;
|
|
133
|
+
};
|
|
134
|
+
type AnalyticsConfig = {
|
|
135
|
+
redis: Redis;
|
|
136
|
+
prefix?: string;
|
|
137
|
+
};
|
|
138
|
+
/**
|
|
139
|
+
* The Analytics package is experimental and can change at any time.
|
|
140
|
+
*/
|
|
141
|
+
declare class Analytics {
|
|
142
|
+
private readonly analytics;
|
|
143
|
+
private readonly table;
|
|
144
|
+
constructor(config: AnalyticsConfig);
|
|
145
|
+
/**
|
|
146
|
+
* Try to extract the geo information from the request
|
|
147
|
+
*
|
|
148
|
+
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
|
|
149
|
+
* @param req
|
|
150
|
+
* @returns
|
|
151
|
+
*/
|
|
152
|
+
extractGeo(req: {
|
|
153
|
+
geo?: Geo;
|
|
154
|
+
cf?: Geo;
|
|
155
|
+
}): Geo;
|
|
156
|
+
record(event: Event): Promise<void>;
|
|
157
|
+
series<TFilter extends keyof Omit<Event, "time">>(filter: TFilter, cutoff: number): Promise<Aggregate[]>;
|
|
158
|
+
getUsage(cutoff?: number): Promise<Record<string, {
|
|
159
|
+
success: number;
|
|
160
|
+
blocked: number;
|
|
161
|
+
}>>;
|
|
162
|
+
getUsageOverTime<TFilter extends keyof Omit<Event, "time">>(timestampCount: number, groupby: TFilter): Promise<Aggregate[]>;
|
|
163
|
+
getMostAllowedBlocked(timestampCount: number, getTop?: number, checkAtMost?: number): Promise<{
|
|
164
|
+
allowed: {
|
|
165
|
+
identifier: string;
|
|
166
|
+
count: number;
|
|
167
|
+
}[];
|
|
168
|
+
ratelimited: {
|
|
169
|
+
identifier: string;
|
|
170
|
+
count: number;
|
|
171
|
+
}[];
|
|
172
|
+
denied: {
|
|
173
|
+
identifier: string;
|
|
174
|
+
count: number;
|
|
175
|
+
}[];
|
|
176
|
+
}>;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
type Unit = "ms" | "s" | "m" | "h" | "d";
|
|
180
|
+
type Duration = `${number} ${Unit}` | `${number}${Unit}`;
|
|
181
|
+
|
|
182
|
+
type RatelimitConfig<TContext> = {
|
|
183
|
+
/**
|
|
184
|
+
* The ratelimiter function to use.
|
|
185
|
+
*
|
|
186
|
+
* Choose one of the predefined ones or implement your own.
|
|
187
|
+
* Available algorithms are exposed via static methods:
|
|
188
|
+
* - Ratelimiter.fixedWindow
|
|
189
|
+
* - Ratelimiter.slidingWindow
|
|
190
|
+
* - Ratelimiter.tokenBucket
|
|
191
|
+
*/
|
|
192
|
+
limiter: Algorithm<TContext>;
|
|
193
|
+
ctx: TContext;
|
|
194
|
+
/**
|
|
195
|
+
* All keys in redis are prefixed with this.
|
|
196
|
+
*
|
|
197
|
+
* @default `@upstash/ratelimit`
|
|
198
|
+
*/
|
|
199
|
+
prefix?: string;
|
|
200
|
+
/**
|
|
201
|
+
* If enabled, the ratelimiter will keep a global cache of identifiers, that have
|
|
202
|
+
* exhausted their ratelimit. In serverless environments this is only possible if
|
|
203
|
+
* you create the ratelimiter instance outside of your handler function. While the
|
|
204
|
+
* function is still hot, the ratelimiter can block requests without having to
|
|
205
|
+
* request data from redis, thus saving time and money.
|
|
206
|
+
*
|
|
207
|
+
* Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
|
|
208
|
+
* internal list together with its reset timestamp. If the same identifier makes a
|
|
209
|
+
* new request before it is reset, we can immediately reject it.
|
|
210
|
+
*
|
|
211
|
+
* Set to `false` to disable.
|
|
212
|
+
*
|
|
213
|
+
* If left undefined, a map is created automatically, but it can only work
|
|
214
|
+
* if the map or the ratelimit instance is created outside your serverless function handler.
|
|
215
|
+
*/
|
|
216
|
+
ephemeralCache?: Map<string, number> | false;
|
|
217
|
+
/**
|
|
218
|
+
* If set, the ratelimiter will allow requests to pass after this many milliseconds.
|
|
219
|
+
*
|
|
220
|
+
* Use this if you want to allow requests in case of network problems
|
|
221
|
+
*
|
|
222
|
+
* @default 5000
|
|
223
|
+
*/
|
|
224
|
+
timeout?: number;
|
|
225
|
+
/**
|
|
226
|
+
* If enabled, the ratelimiter will store analytics data in redis, which you can check out at
|
|
227
|
+
* https://console.upstash.com/ratelimit
|
|
228
|
+
*
|
|
229
|
+
* @default false
|
|
230
|
+
*/
|
|
231
|
+
analytics?: boolean;
|
|
232
|
+
/**
|
|
233
|
+
* Enables deny list. If set to true, requests with identifier or ip/user-agent/countrie
|
|
234
|
+
* in the deny list will be rejected automatically. To edit the deny list, check out the
|
|
235
|
+
* ratelimit dashboard at https://console.upstash.com/ratelimit
|
|
236
|
+
*
|
|
237
|
+
* @default false
|
|
238
|
+
*/
|
|
239
|
+
enableProtection?: boolean;
|
|
240
|
+
denyListThreshold?: number;
|
|
241
|
+
};
|
|
242
|
+
/**
|
|
243
|
+
* Ratelimiter using serverless redis from https://upstash.com/
|
|
244
|
+
*
|
|
245
|
+
* @example
|
|
246
|
+
* ```ts
|
|
247
|
+
* const { limit } = new Ratelimit({
|
|
248
|
+
* redis: Redis.fromEnv(),
|
|
249
|
+
* limiter: Ratelimit.slidingWindow(
|
|
250
|
+
* 10, // Allow 10 requests per window of 30 minutes
|
|
251
|
+
* "30 m", // interval of 30 minutes
|
|
252
|
+
* ),
|
|
253
|
+
* })
|
|
254
|
+
*
|
|
255
|
+
* ```
|
|
256
|
+
*/
|
|
257
|
+
declare abstract class Ratelimit<TContext extends Context> {
|
|
258
|
+
protected readonly limiter: Algorithm<TContext>;
|
|
259
|
+
protected readonly ctx: TContext;
|
|
260
|
+
protected readonly prefix: string;
|
|
261
|
+
protected readonly timeout: number;
|
|
262
|
+
protected readonly primaryRedis: Redis;
|
|
263
|
+
protected readonly analytics?: Analytics;
|
|
264
|
+
protected readonly enableProtection: boolean;
|
|
265
|
+
protected readonly denyListThreshold: number;
|
|
266
|
+
constructor(config: RatelimitConfig<TContext>);
|
|
267
|
+
/**
|
|
268
|
+
* Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
|
|
269
|
+
*
|
|
270
|
+
* Use this if you want to reject all requests that you can not handle right now.
|
|
271
|
+
*
|
|
272
|
+
* @example
|
|
273
|
+
* ```ts
|
|
274
|
+
* const ratelimit = new Ratelimit({
|
|
275
|
+
* redis: Redis.fromEnv(),
|
|
276
|
+
* limiter: Ratelimit.slidingWindow(10, "10 s")
|
|
277
|
+
* })
|
|
278
|
+
*
|
|
279
|
+
* const { success } = await ratelimit.limit(id)
|
|
280
|
+
* if (!success){
|
|
281
|
+
* return "Nope"
|
|
282
|
+
* }
|
|
283
|
+
* return "Yes"
|
|
284
|
+
* ```
|
|
285
|
+
*
|
|
286
|
+
* @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.
|
|
287
|
+
*
|
|
288
|
+
* Usage with `req.rate`
|
|
289
|
+
* @example
|
|
290
|
+
* ```ts
|
|
291
|
+
* const ratelimit = new Ratelimit({
|
|
292
|
+
* redis: Redis.fromEnv(),
|
|
293
|
+
* limiter: Ratelimit.slidingWindow(100, "10 s")
|
|
294
|
+
* })
|
|
295
|
+
*
|
|
296
|
+
* const { success } = await ratelimit.limit(id, {rate: 10})
|
|
297
|
+
* if (!success){
|
|
298
|
+
* return "Nope"
|
|
299
|
+
* }
|
|
300
|
+
* return "Yes"
|
|
301
|
+
* ```
|
|
302
|
+
*/
|
|
303
|
+
limit: (identifier: string, req?: LimitOptions) => Promise<RatelimitResponse>;
|
|
304
|
+
/**
|
|
305
|
+
* Block until the request may pass or timeout is reached.
|
|
306
|
+
*
|
|
307
|
+
* This method returns a promise that resolves as soon as the request may be processed
|
|
308
|
+
* or after the timeout has been reached.
|
|
309
|
+
*
|
|
310
|
+
* Use this if you want to delay the request until it is ready to get processed.
|
|
311
|
+
*
|
|
312
|
+
* @example
|
|
313
|
+
* ```ts
|
|
314
|
+
* const ratelimit = new Ratelimit({
|
|
315
|
+
* redis: Redis.fromEnv(),
|
|
316
|
+
* limiter: Ratelimit.slidingWindow(10, "10 s")
|
|
317
|
+
* })
|
|
318
|
+
*
|
|
319
|
+
* const { success } = await ratelimit.blockUntilReady(id, 60_000)
|
|
320
|
+
* if (!success){
|
|
321
|
+
* return "Nope"
|
|
322
|
+
* }
|
|
323
|
+
* return "Yes"
|
|
324
|
+
* ```
|
|
325
|
+
*/
|
|
326
|
+
blockUntilReady: (identifier: string, timeout: number) => Promise<RatelimitResponse>;
|
|
327
|
+
resetUsedTokens: (identifier: string) => Promise<void>;
|
|
328
|
+
/**
|
|
329
|
+
* Returns the remaining token count together with a reset timestamps
|
|
330
|
+
*
|
|
331
|
+
* @param identifier identifir to check
|
|
332
|
+
* @returns object with `remaining` and reset fields. `remaining` denotes
|
|
333
|
+
* the remaining tokens and reset denotes the timestamp when the
|
|
334
|
+
* tokens reset.
|
|
335
|
+
*/
|
|
336
|
+
getRemaining: (identifier: string) => Promise<{
|
|
337
|
+
remaining: number;
|
|
338
|
+
reset: number;
|
|
339
|
+
}>;
|
|
340
|
+
/**
|
|
341
|
+
* Checks if the identifier or the values in req are in the deny list cache.
|
|
342
|
+
* If so, returns the default denied response.
|
|
343
|
+
*
|
|
344
|
+
* Otherwise, calls redis to check the rate limit and deny list. Returns after
|
|
345
|
+
* resolving the result. Resolving is overriding the rate limit result if
|
|
346
|
+
* the some value is in deny list.
|
|
347
|
+
*
|
|
348
|
+
* @param identifier identifier to block
|
|
349
|
+
* @param req options with ip, user agent, country, rate and geo info
|
|
350
|
+
* @returns rate limit response
|
|
351
|
+
*/
|
|
352
|
+
private getRatelimitResponse;
|
|
353
|
+
/**
|
|
354
|
+
* Creates an array with the original response promise and a timeout promise
|
|
355
|
+
* if this.timeout > 0.
|
|
356
|
+
*
|
|
357
|
+
* @param response Ratelimit response promise
|
|
358
|
+
* @returns array with the response and timeout promise. also includes the timeout id
|
|
359
|
+
*/
|
|
360
|
+
private applyTimeout;
|
|
361
|
+
/**
|
|
362
|
+
* submits analytics if this.analytics is set
|
|
363
|
+
*
|
|
364
|
+
* @param ratelimitResponse final rate limit response
|
|
365
|
+
* @param identifier identifier to submit
|
|
366
|
+
* @param req limit options
|
|
367
|
+
* @returns rate limit response after updating the .pending field
|
|
368
|
+
*/
|
|
369
|
+
private submitAnalytics;
|
|
370
|
+
private getKey;
|
|
371
|
+
/**
|
|
372
|
+
* returns a list of defined values from
|
|
373
|
+
* [identifier, req.ip, req.userAgent, req.country]
|
|
374
|
+
*
|
|
375
|
+
* @param identifier identifier
|
|
376
|
+
* @param req limit options
|
|
377
|
+
* @returns list of defined values
|
|
378
|
+
*/
|
|
379
|
+
private getDefinedMembers;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
type MultiRegionRatelimitConfig = {
|
|
383
|
+
/**
|
|
384
|
+
* Instances of `@upstash/redis`
|
|
385
|
+
* @see https://github.com/upstash/upstash-redis#quick-start
|
|
386
|
+
*/
|
|
387
|
+
redis: Redis[];
|
|
388
|
+
/**
|
|
389
|
+
* The ratelimiter function to use.
|
|
390
|
+
*
|
|
391
|
+
* Choose one of the predefined ones or implement your own.
|
|
392
|
+
* Available algorithms are exposed via static methods:
|
|
393
|
+
* - MultiRegionRatelimit.fixedWindow
|
|
394
|
+
*/
|
|
395
|
+
limiter: Algorithm<MultiRegionContext>;
|
|
396
|
+
/**
|
|
397
|
+
* All keys in redis are prefixed with this.
|
|
398
|
+
*
|
|
399
|
+
* @default `@upstash/ratelimit`
|
|
400
|
+
*/
|
|
401
|
+
prefix?: string;
|
|
402
|
+
/**
|
|
403
|
+
* If enabled, the ratelimiter will keep a global cache of identifiers, that have
|
|
404
|
+
* exhausted their ratelimit. In serverless environments this is only possible if
|
|
405
|
+
* you create the ratelimiter instance outside of your handler function. While the
|
|
406
|
+
* function is still hot, the ratelimiter can block requests without having to
|
|
407
|
+
* request data from redis, thus saving time and money.
|
|
408
|
+
*
|
|
409
|
+
* Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
|
|
410
|
+
* internal list together with its reset timestamp. If the same identifier makes a
|
|
411
|
+
* new request before it is reset, we can immediately reject it.
|
|
412
|
+
*
|
|
413
|
+
* Set to `false` to disable.
|
|
414
|
+
*
|
|
415
|
+
* If left undefined, a map is created automatically, but it can only work
|
|
416
|
+
* if the map or the ratelimit instance is created outside your serverless function handler.
|
|
417
|
+
*/
|
|
418
|
+
ephemeralCache?: Map<string, number> | false;
|
|
419
|
+
/**
|
|
420
|
+
* If set, the ratelimiter will allow requests to pass after this many milliseconds.
|
|
421
|
+
*
|
|
422
|
+
* Use this if you want to allow requests in case of network problems
|
|
423
|
+
*/
|
|
424
|
+
timeout?: number;
|
|
425
|
+
/**
|
|
426
|
+
* If enabled, the ratelimiter will store analytics data in redis, which you can check out at
|
|
427
|
+
* https://console.upstash.com/ratelimit
|
|
428
|
+
*
|
|
429
|
+
* @default false
|
|
430
|
+
*/
|
|
431
|
+
analytics?: boolean;
|
|
432
|
+
/**
|
|
433
|
+
* If enabled, lua scripts will be sent to Redis with SCRIPT LOAD durint the first request.
|
|
434
|
+
* In the subsequent requests, hash of the script will be used to invoke it
|
|
435
|
+
*
|
|
436
|
+
* @default true
|
|
437
|
+
*/
|
|
438
|
+
cacheScripts?: boolean;
|
|
439
|
+
};
|
|
440
|
+
/**
|
|
441
|
+
* Ratelimiter using serverless redis from https://upstash.com/
|
|
442
|
+
*
|
|
443
|
+
* @example
|
|
444
|
+
* ```ts
|
|
445
|
+
* const { limit } = new MultiRegionRatelimit({
|
|
446
|
+
* redis: Redis.fromEnv(),
|
|
447
|
+
* limiter: MultiRegionRatelimit.fixedWindow(
|
|
448
|
+
* 10, // Allow 10 requests per window of 30 minutes
|
|
449
|
+
* "30 m", // interval of 30 minutes
|
|
450
|
+
* )
|
|
451
|
+
* })
|
|
452
|
+
*
|
|
453
|
+
* ```
|
|
454
|
+
*/
|
|
455
|
+
declare class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
|
|
456
|
+
/**
|
|
457
|
+
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
458
|
+
*/
|
|
459
|
+
constructor(config: MultiRegionRatelimitConfig);
|
|
460
|
+
/**
|
|
461
|
+
* Each request inside a fixed time increases a counter.
|
|
462
|
+
* Once the counter reaches the maximum allowed number, all further requests are
|
|
463
|
+
* rejected.
|
|
464
|
+
*
|
|
465
|
+
* **Pro:**
|
|
466
|
+
*
|
|
467
|
+
* - Newer requests are not starved by old ones.
|
|
468
|
+
* - Low storage cost.
|
|
469
|
+
*
|
|
470
|
+
* **Con:**
|
|
471
|
+
*
|
|
472
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
473
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
474
|
+
*
|
|
475
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
476
|
+
* @param window - A fixed timeframe
|
|
477
|
+
*/
|
|
478
|
+
static fixedWindow(
|
|
479
|
+
/**
|
|
480
|
+
* How many requests are allowed per window.
|
|
481
|
+
*/
|
|
482
|
+
tokens: number,
|
|
483
|
+
/**
|
|
484
|
+
* The duration in which `tokens` requests are allowed.
|
|
485
|
+
*/
|
|
486
|
+
window: Duration): Algorithm<MultiRegionContext>;
|
|
487
|
+
/**
|
|
488
|
+
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
489
|
+
* costs than `slidingLogs` and improved boundary behavior by calculating a
|
|
490
|
+
* weighted score between two windows.
|
|
491
|
+
*
|
|
492
|
+
* **Pro:**
|
|
493
|
+
*
|
|
494
|
+
* Good performance allows this to scale to very high loads.
|
|
495
|
+
*
|
|
496
|
+
* **Con:**
|
|
497
|
+
*
|
|
498
|
+
* Nothing major.
|
|
499
|
+
*
|
|
500
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
501
|
+
* @param window - The duration in which the user can max X requests.
|
|
502
|
+
*/
|
|
503
|
+
static slidingWindow(
|
|
504
|
+
/**
|
|
505
|
+
* How many requests are allowed per window.
|
|
506
|
+
*/
|
|
507
|
+
tokens: number,
|
|
508
|
+
/**
|
|
509
|
+
* The duration in which `tokens` requests are allowed.
|
|
510
|
+
*/
|
|
511
|
+
window: Duration): Algorithm<MultiRegionContext>;
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
type RegionRatelimitConfig = {
|
|
515
|
+
/**
|
|
516
|
+
* Instance of `@upstash/redis`
|
|
517
|
+
* @see https://github.com/upstash/upstash-redis#quick-start
|
|
518
|
+
*/
|
|
519
|
+
redis: Redis;
|
|
520
|
+
/**
|
|
521
|
+
* The ratelimiter function to use.
|
|
522
|
+
*
|
|
523
|
+
* Choose one of the predefined ones or implement your own.
|
|
524
|
+
* Available algorithms are exposed via static methods:
|
|
525
|
+
* - Ratelimiter.fixedWindow
|
|
526
|
+
* - Ratelimiter.slidingWindow
|
|
527
|
+
* - Ratelimiter.tokenBucket
|
|
528
|
+
*/
|
|
529
|
+
limiter: Algorithm<RegionContext>;
|
|
530
|
+
/**
|
|
531
|
+
* All keys in redis are prefixed with this.
|
|
532
|
+
*
|
|
533
|
+
* @default `@upstash/ratelimit`
|
|
534
|
+
*/
|
|
535
|
+
prefix?: string;
|
|
536
|
+
/**
|
|
537
|
+
* If enabled, the ratelimiter will keep a global cache of identifiers, that have
|
|
538
|
+
* exhausted their ratelimit. In serverless environments this is only possible if
|
|
539
|
+
* you create the ratelimiter instance outside of your handler function. While the
|
|
540
|
+
* function is still hot, the ratelimiter can block requests without having to
|
|
541
|
+
* request data from redis, thus saving time and money.
|
|
542
|
+
*
|
|
543
|
+
* Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
|
|
544
|
+
* internal list together with its reset timestamp. If the same identifier makes a
|
|
545
|
+
* new request before it is reset, we can immediately reject it.
|
|
546
|
+
*
|
|
547
|
+
* Set to `false` to disable.
|
|
548
|
+
*
|
|
549
|
+
* If left undefined, a map is created automatically, but it can only work
|
|
550
|
+
* if the map or the ratelimit instance is created outside your serverless function handler.
|
|
551
|
+
*/
|
|
552
|
+
ephemeralCache?: Map<string, number> | false;
|
|
553
|
+
/**
|
|
554
|
+
* If set, the ratelimiter will allow requests to pass after this many milliseconds.
|
|
555
|
+
*
|
|
556
|
+
* Use this if you want to allow requests in case of network problems
|
|
557
|
+
*/
|
|
558
|
+
timeout?: number;
|
|
559
|
+
/**
|
|
560
|
+
* If enabled, the ratelimiter will store analytics data in redis, which you can check out at
|
|
561
|
+
* https://console.upstash.com/ratelimit
|
|
562
|
+
*
|
|
563
|
+
* @default false
|
|
564
|
+
*/
|
|
565
|
+
analytics?: boolean;
|
|
566
|
+
/**
|
|
567
|
+
* @deprecated Has no affect since v2.0.3. Instead, hash values of scripts are
|
|
568
|
+
* hardcoded in the sdk and it attempts to run the script using EVALSHA (with the hash).
|
|
569
|
+
* If it fails, runs script load.
|
|
570
|
+
*
|
|
571
|
+
* Previously, if enabled, lua scripts were sent to Redis with SCRIPT LOAD durint the first request.
|
|
572
|
+
* In the subsequent requests, hash of the script would be used to invoke the scripts
|
|
573
|
+
*
|
|
574
|
+
* @default true
|
|
575
|
+
*/
|
|
576
|
+
cacheScripts?: boolean;
|
|
577
|
+
/**
|
|
578
|
+
* @default false
|
|
579
|
+
*/
|
|
580
|
+
enableProtection?: boolean;
|
|
581
|
+
/**
|
|
582
|
+
* @default 6
|
|
583
|
+
*/
|
|
584
|
+
denyListThreshold?: number;
|
|
585
|
+
};
|
|
586
|
+
/**
|
|
587
|
+
* Ratelimiter using serverless redis from https://upstash.com/
|
|
588
|
+
*
|
|
589
|
+
* @example
|
|
590
|
+
* ```ts
|
|
591
|
+
* const { limit } = new Ratelimit({
|
|
592
|
+
* redis: Redis.fromEnv(),
|
|
593
|
+
* limiter: Ratelimit.slidingWindow(
|
|
594
|
+
* "30 m", // interval of 30 minutes
|
|
595
|
+
* 10, // Allow 10 requests per window of 30 minutes
|
|
596
|
+
* )
|
|
597
|
+
* })
|
|
598
|
+
*
|
|
599
|
+
* ```
|
|
600
|
+
*/
|
|
601
|
+
declare class RegionRatelimit extends Ratelimit<RegionContext> {
|
|
602
|
+
/**
|
|
603
|
+
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.
|
|
604
|
+
*/
|
|
605
|
+
constructor(config: RegionRatelimitConfig);
|
|
606
|
+
/**
|
|
607
|
+
* Each request inside a fixed time increases a counter.
|
|
608
|
+
* Once the counter reaches the maximum allowed number, all further requests are
|
|
609
|
+
* rejected.
|
|
610
|
+
*
|
|
611
|
+
* **Pro:**
|
|
612
|
+
*
|
|
613
|
+
* - Newer requests are not starved by old ones.
|
|
614
|
+
* - Low storage cost.
|
|
615
|
+
*
|
|
616
|
+
* **Con:**
|
|
617
|
+
*
|
|
618
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
619
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
620
|
+
*
|
|
621
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
622
|
+
* @param window - A fixed timeframe
|
|
623
|
+
*/
|
|
624
|
+
static fixedWindow(
|
|
625
|
+
/**
|
|
626
|
+
* How many requests are allowed per window.
|
|
627
|
+
*/
|
|
628
|
+
tokens: number,
|
|
629
|
+
/**
|
|
630
|
+
* The duration in which `tokens` requests are allowed.
|
|
631
|
+
*/
|
|
632
|
+
window: Duration): Algorithm<RegionContext>;
|
|
633
|
+
/**
|
|
634
|
+
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
635
|
+
* costs than `slidingLogs` and improved boundary behavior by calculating a
|
|
636
|
+
* weighted score between two windows.
|
|
637
|
+
*
|
|
638
|
+
* **Pro:**
|
|
639
|
+
*
|
|
640
|
+
* Good performance allows this to scale to very high loads.
|
|
641
|
+
*
|
|
642
|
+
* **Con:**
|
|
643
|
+
*
|
|
644
|
+
* Nothing major.
|
|
645
|
+
*
|
|
646
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
647
|
+
* @param window - The duration in which the user can max X requests.
|
|
648
|
+
*/
|
|
649
|
+
static slidingWindow(
|
|
650
|
+
/**
|
|
651
|
+
* How many requests are allowed per window.
|
|
652
|
+
*/
|
|
653
|
+
tokens: number,
|
|
654
|
+
/**
|
|
655
|
+
* The duration in which `tokens` requests are allowed.
|
|
656
|
+
*/
|
|
657
|
+
window: Duration): Algorithm<RegionContext>;
|
|
658
|
+
/**
|
|
659
|
+
* You have a bucket filled with `{maxTokens}` tokens that refills constantly
|
|
660
|
+
* at `{refillRate}` per `{interval}`.
|
|
661
|
+
* Every request will remove one token from the bucket and if there is no
|
|
662
|
+
* token to take, the request is rejected.
|
|
663
|
+
*
|
|
664
|
+
* **Pro:**
|
|
665
|
+
*
|
|
666
|
+
* - Bursts of requests are smoothed out and you can process them at a constant
|
|
667
|
+
* rate.
|
|
668
|
+
* - Allows to set a higher initial burst limit by setting `maxTokens` higher
|
|
669
|
+
* than `refillRate`
|
|
670
|
+
*/
|
|
671
|
+
static tokenBucket(
|
|
672
|
+
/**
|
|
673
|
+
* How many tokens are refilled per `interval`
|
|
674
|
+
*
|
|
675
|
+
* An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.
|
|
676
|
+
*/
|
|
677
|
+
refillRate: number,
|
|
678
|
+
/**
|
|
679
|
+
* The interval for the `refillRate`
|
|
680
|
+
*/
|
|
681
|
+
interval: Duration,
|
|
682
|
+
/**
|
|
683
|
+
* Maximum number of tokens.
|
|
684
|
+
* A newly created bucket starts with this many tokens.
|
|
685
|
+
* Useful to allow higher burst limits.
|
|
686
|
+
*/
|
|
687
|
+
maxTokens: number): Algorithm<RegionContext>;
|
|
688
|
+
/**
|
|
689
|
+
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
|
|
690
|
+
* it asynchronously.
|
|
691
|
+
* This is experimental and not yet recommended for production use.
|
|
692
|
+
*
|
|
693
|
+
* @experimental
|
|
694
|
+
*
|
|
695
|
+
* Each request inside a fixed time increases a counter.
|
|
696
|
+
* Once the counter reaches the maximum allowed number, all further requests are
|
|
697
|
+
* rejected.
|
|
698
|
+
*
|
|
699
|
+
* **Pro:**
|
|
700
|
+
*
|
|
701
|
+
* - Newer requests are not starved by old ones.
|
|
702
|
+
* - Low storage cost.
|
|
703
|
+
*
|
|
704
|
+
* **Con:**
|
|
705
|
+
*
|
|
706
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
707
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
708
|
+
*
|
|
709
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
710
|
+
* @param window - A fixed timeframe
|
|
711
|
+
*/
|
|
712
|
+
static cachedFixedWindow(
|
|
713
|
+
/**
|
|
714
|
+
* How many requests are allowed per window.
|
|
715
|
+
*/
|
|
716
|
+
tokens: number,
|
|
717
|
+
/**
|
|
718
|
+
* The duration in which `tokens` requests are allowed.
|
|
719
|
+
*/
|
|
720
|
+
window: Duration): Algorithm<RegionContext>;
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
declare class ThresholdError extends Error {
|
|
724
|
+
constructor(threshold: number);
|
|
725
|
+
}
|
|
726
|
+
/**
|
|
727
|
+
* Gets the list of ips from the github source which are not in the
|
|
728
|
+
* deny list already
|
|
729
|
+
*
|
|
730
|
+
* First, gets the ip list from github using the threshold. Then, calls redis with
|
|
731
|
+
* a transaction which does the following:
|
|
732
|
+
* - subtract the current ip deny list from all
|
|
733
|
+
* - delete current ip deny list
|
|
734
|
+
* - recreate ip deny list with the ips from github. Ips already in the users own lists
|
|
735
|
+
* are excluded.
|
|
736
|
+
* - status key is set to valid with ttl until next 2 AM UTC, which is a bit later than
|
|
737
|
+
* when the list is updated on github.
|
|
738
|
+
*
|
|
739
|
+
* @param redis redis instance
|
|
740
|
+
* @param prefix ratelimit prefix
|
|
741
|
+
* @param threshold ips with less than or equal to the threshold are not included
|
|
742
|
+
* @param ttl time to live in milliseconds for the status flag. Optional. If not
|
|
743
|
+
* passed, ttl is infferred from current time.
|
|
744
|
+
* @returns list of ips which are not in the deny list
|
|
745
|
+
*/
|
|
746
|
+
declare const updateIpDenyList: (redis: Redis, prefix: string, threshold: number, ttl?: number) => Promise<unknown[]>;
|
|
747
|
+
/**
|
|
748
|
+
* Disables the ip deny list by removing the ip deny list from the all
|
|
749
|
+
* set and removing the ip deny list. Also sets the status key to disabled
|
|
750
|
+
* with no ttl.
|
|
751
|
+
*
|
|
752
|
+
* @param redis redis instance
|
|
753
|
+
* @param prefix ratelimit prefix
|
|
754
|
+
* @returns
|
|
755
|
+
*/
|
|
756
|
+
declare const disableIpDenyList: (redis: Redis, prefix: string) => Promise<unknown[]>;
|
|
757
|
+
|
|
758
|
+
type ipDenyList_ThresholdError = ThresholdError;
|
|
759
|
+
declare const ipDenyList_ThresholdError: typeof ThresholdError;
|
|
760
|
+
declare const ipDenyList_disableIpDenyList: typeof disableIpDenyList;
|
|
761
|
+
declare const ipDenyList_updateIpDenyList: typeof updateIpDenyList;
|
|
762
|
+
declare namespace ipDenyList {
|
|
763
|
+
export {
|
|
764
|
+
ipDenyList_ThresholdError as ThresholdError,
|
|
765
|
+
ipDenyList_disableIpDenyList as disableIpDenyList,
|
|
766
|
+
ipDenyList_updateIpDenyList as updateIpDenyList,
|
|
767
|
+
};
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
export { Algorithm, Analytics, AnalyticsConfig, Duration, ipDenyList as IpDenyList, MultiRegionRatelimit, MultiRegionRatelimitConfig, RegionRatelimit as Ratelimit, RegionRatelimitConfig as RatelimitConfig };
|