@upstash/ratelimit 0.4.5-canary.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/dist/index.d.mts +556 -0
- package/dist/index.d.ts +556 -0
- package/dist/index.js +832 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +803 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +1 -22
- package/.github/actions/redis/action.yaml +0 -58
- package/.github/img/dashboard.png +0 -0
- package/.github/workflows/release.yml +0 -46
- package/.github/workflows/stale.yaml +0 -31
- package/.github/workflows/tests.yaml +0 -79
- package/biome.json +0 -37
- package/bun.lockb +0 -0
- package/cmd/set-version.js +0 -14
- package/examples/cloudflare-workers/package.json +0 -18
- package/examples/cloudflare-workers/src/index.ts +0 -35
- package/examples/cloudflare-workers/tsconfig.json +0 -105
- package/examples/cloudflare-workers/wrangler.toml +0 -3
- package/examples/nextjs/LICENSE +0 -21
- package/examples/nextjs/README.md +0 -17
- package/examples/nextjs/components/Breadcrumb.tsx +0 -67
- package/examples/nextjs/components/Header.tsx +0 -18
- package/examples/nextjs/components/ReadBlogPost.tsx +0 -9
- package/examples/nextjs/components/StarButton.tsx +0 -27
- package/examples/nextjs/middleware.ts +0 -35
- package/examples/nextjs/next-env.d.ts +0 -5
- package/examples/nextjs/package.json +0 -27
- package/examples/nextjs/pages/_app.tsx +0 -47
- package/examples/nextjs/pages/api/blocked.ts +0 -6
- package/examples/nextjs/pages/api/hello.ts +0 -5
- package/examples/nextjs/pages/index.tsx +0 -62
- package/examples/nextjs/postcss.config.js +0 -6
- package/examples/nextjs/public/favicon.ico +0 -0
- package/examples/nextjs/public/github.svg +0 -11
- package/examples/nextjs/public/upstash.svg +0 -27
- package/examples/nextjs/styles/globals.css +0 -76
- package/examples/nextjs/tailwind.config.js +0 -19
- package/examples/nextjs/tsconfig.json +0 -21
- package/examples/nextjs13/README.md +0 -38
- package/examples/nextjs13/app/favicon.ico +0 -0
- package/examples/nextjs13/app/globals.css +0 -107
- package/examples/nextjs13/app/layout.tsx +0 -18
- package/examples/nextjs13/app/page.module.css +0 -271
- package/examples/nextjs13/app/route.tsx +0 -14
- package/examples/nextjs13/next.config.js +0 -8
- package/examples/nextjs13/package.json +0 -22
- package/examples/nextjs13/public/next.svg +0 -1
- package/examples/nextjs13/public/thirteen.svg +0 -1
- package/examples/nextjs13/public/vercel.svg +0 -1
- package/examples/nextjs13/tsconfig.json +0 -28
- package/examples/remix/.env.example +0 -2
- package/examples/remix/.eslintrc.js +0 -4
- package/examples/remix/README.md +0 -59
- package/examples/remix/app/root.tsx +0 -25
- package/examples/remix/app/routes/index.tsx +0 -47
- package/examples/remix/package.json +0 -32
- package/examples/remix/public/favicon.ico +0 -0
- package/examples/remix/remix.config.js +0 -12
- package/examples/remix/remix.env.d.ts +0 -2
- package/examples/remix/server.js +0 -4
- package/examples/remix/tsconfig.json +0 -22
- package/examples/with-vercel-kv/README.md +0 -51
- package/examples/with-vercel-kv/app/favicon.ico +0 -0
- package/examples/with-vercel-kv/app/globals.css +0 -27
- package/examples/with-vercel-kv/app/layout.tsx +0 -21
- package/examples/with-vercel-kv/app/page.tsx +0 -71
- package/examples/with-vercel-kv/next.config.js +0 -8
- package/examples/with-vercel-kv/package.json +0 -25
- package/examples/with-vercel-kv/postcss.config.js +0 -6
- package/examples/with-vercel-kv/public/next.svg +0 -1
- package/examples/with-vercel-kv/public/vercel.svg +0 -1
- package/examples/with-vercel-kv/tailwind.config.js +0 -17
- package/examples/with-vercel-kv/tsconfig.json +0 -28
- package/src/analytics.test.ts +0 -23
- package/src/analytics.ts +0 -92
- package/src/blockUntilReady.test.ts +0 -56
- package/src/cache.test.ts +0 -41
- package/src/cache.ts +0 -43
- package/src/duration.test.ts +0 -23
- package/src/duration.ts +0 -30
- package/src/index.ts +0 -17
- package/src/multi.ts +0 -365
- package/src/ratelimit.test.ts +0 -155
- package/src/ratelimit.ts +0 -238
- package/src/single.ts +0 -487
- package/src/test_utils.ts +0 -65
- package/src/tools/seed.ts +0 -37
- package/src/types.ts +0 -78
- package/src/version.ts +0 -1
- package/tsconfig.json +0 -103
- package/tsup.config.js +0 -11
- package/turbo.json +0 -16
package/src/multi.ts
DELETED
|
@@ -1,365 +0,0 @@
|
|
|
1
|
-
import { Cache } from "./cache";
|
|
2
|
-
import type { Duration } from "./duration";
|
|
3
|
-
import { ms } from "./duration";
|
|
4
|
-
import { Ratelimit } from "./ratelimit";
|
|
5
|
-
import type { Algorithm, MultiRegionContext } from "./types";
|
|
6
|
-
import type { Redis } from "./types";
|
|
7
|
-
|
|
8
|
-
function randomId(): string {
|
|
9
|
-
let result = "";
|
|
10
|
-
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
|
|
11
|
-
const charactersLength = characters.length;
|
|
12
|
-
for (let i = 0; i < 16; i++) {
|
|
13
|
-
result += characters.charAt(Math.floor(Math.random() * charactersLength));
|
|
14
|
-
}
|
|
15
|
-
return result;
|
|
16
|
-
}
|
|
17
|
-
|
|
18
|
-
export type MultiRegionRatelimitConfig = {
|
|
19
|
-
/**
|
|
20
|
-
* Instances of `@upstash/redis`
|
|
21
|
-
* @see https://github.com/upstash/upstash-redis#quick-start
|
|
22
|
-
*/
|
|
23
|
-
redis: Redis[];
|
|
24
|
-
/**
|
|
25
|
-
* The ratelimiter function to use.
|
|
26
|
-
*
|
|
27
|
-
* Choose one of the predefined ones or implement your own.
|
|
28
|
-
* Available algorithms are exposed via static methods:
|
|
29
|
-
* - MultiRegionRatelimit.fixedWindow
|
|
30
|
-
*/
|
|
31
|
-
limiter: Algorithm<MultiRegionContext>;
|
|
32
|
-
/**
|
|
33
|
-
* All keys in redis are prefixed with this.
|
|
34
|
-
*
|
|
35
|
-
* @default `@upstash/ratelimit`
|
|
36
|
-
*/
|
|
37
|
-
prefix?: string;
|
|
38
|
-
|
|
39
|
-
/**
|
|
40
|
-
* If enabled, the ratelimiter will keep a global cache of identifiers, that have
|
|
41
|
-
* exhausted their ratelimit. In serverless environments this is only possible if
|
|
42
|
-
* you create the ratelimiter instance outside of your handler function. While the
|
|
43
|
-
* function is still hot, the ratelimiter can block requests without having to
|
|
44
|
-
* request data from redis, thus saving time and money.
|
|
45
|
-
*
|
|
46
|
-
* Whenever an identifier has exceeded its limit, the ratelimiter will add it to an
|
|
47
|
-
* internal list together with its reset timestamp. If the same identifier makes a
|
|
48
|
-
* new request before it is reset, we can immediately reject it.
|
|
49
|
-
*
|
|
50
|
-
* Set to `false` to disable.
|
|
51
|
-
*
|
|
52
|
-
* If left undefined, a map is created automatically, but it can only work
|
|
53
|
-
* if the map or the ratelimit instance is created outside your serverless function handler.
|
|
54
|
-
*/
|
|
55
|
-
ephemeralCache?: Map<string, number> | false;
|
|
56
|
-
|
|
57
|
-
/**
|
|
58
|
-
* If set, the ratelimiter will allow requests to pass after this many milliseconds.
|
|
59
|
-
*
|
|
60
|
-
* Use this if you want to allow requests in case of network problems
|
|
61
|
-
*/
|
|
62
|
-
timeout?: number;
|
|
63
|
-
|
|
64
|
-
/**
|
|
65
|
-
* If enabled, the ratelimiter will store analytics data in redis, which you can check out at
|
|
66
|
-
* https://console.upstash.com/ratelimit
|
|
67
|
-
*
|
|
68
|
-
* @default true
|
|
69
|
-
*/
|
|
70
|
-
analytics?: boolean;
|
|
71
|
-
};
|
|
72
|
-
|
|
73
|
-
/**
|
|
74
|
-
* Ratelimiter using serverless redis from https://upstash.com/
|
|
75
|
-
*
|
|
76
|
-
* @example
|
|
77
|
-
* ```ts
|
|
78
|
-
* const { limit } = new MultiRegionRatelimit({
|
|
79
|
-
* redis: Redis.fromEnv(),
|
|
80
|
-
* limiter: MultiRegionRatelimit.fixedWindow(
|
|
81
|
-
* 10, // Allow 10 requests per window of 30 minutes
|
|
82
|
-
* "30 m", // interval of 30 minutes
|
|
83
|
-
* )
|
|
84
|
-
* })
|
|
85
|
-
*
|
|
86
|
-
* ```
|
|
87
|
-
*/
|
|
88
|
-
export class MultiRegionRatelimit extends Ratelimit<MultiRegionContext> {
|
|
89
|
-
/**
|
|
90
|
-
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
91
|
-
*/
|
|
92
|
-
constructor(config: MultiRegionRatelimitConfig) {
|
|
93
|
-
super({
|
|
94
|
-
prefix: config.prefix,
|
|
95
|
-
limiter: config.limiter,
|
|
96
|
-
timeout: config.timeout,
|
|
97
|
-
analytics: config.analytics,
|
|
98
|
-
ctx: {
|
|
99
|
-
redis: config.redis,
|
|
100
|
-
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : undefined,
|
|
101
|
-
},
|
|
102
|
-
});
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
/**
|
|
106
|
-
* Each request inside a fixed time increases a counter.
|
|
107
|
-
* Once the counter reaches the maximum allowed number, all further requests are
|
|
108
|
-
* rejected.
|
|
109
|
-
*
|
|
110
|
-
* **Pro:**
|
|
111
|
-
*
|
|
112
|
-
* - Newer requests are not starved by old ones.
|
|
113
|
-
* - Low storage cost.
|
|
114
|
-
*
|
|
115
|
-
* **Con:**
|
|
116
|
-
*
|
|
117
|
-
* A burst of requests near the boundary of a window can result in a very
|
|
118
|
-
* high request rate because two windows will be filled with requests quickly.
|
|
119
|
-
*
|
|
120
|
-
* @param tokens - How many requests a user can make in each time window.
|
|
121
|
-
* @param window - A fixed timeframe
|
|
122
|
-
*/
|
|
123
|
-
static fixedWindow(
|
|
124
|
-
/**
|
|
125
|
-
* How many requests are allowed per window.
|
|
126
|
-
*/
|
|
127
|
-
tokens: number,
|
|
128
|
-
/**
|
|
129
|
-
* The duration in which `tokens` requests are allowed.
|
|
130
|
-
*/
|
|
131
|
-
window: Duration,
|
|
132
|
-
): Algorithm<MultiRegionContext> {
|
|
133
|
-
const windowDuration = ms(window);
|
|
134
|
-
const script = `
|
|
135
|
-
local key = KEYS[1]
|
|
136
|
-
local id = ARGV[1]
|
|
137
|
-
local window = ARGV[2]
|
|
138
|
-
|
|
139
|
-
redis.call("SADD", key, id)
|
|
140
|
-
local members = redis.call("SMEMBERS", key)
|
|
141
|
-
if #members == 1 then
|
|
142
|
-
-- The first time this key is set, the value will be 1.
|
|
143
|
-
-- So we only need the expire command once
|
|
144
|
-
redis.call("PEXPIRE", key, window)
|
|
145
|
-
end
|
|
146
|
-
|
|
147
|
-
return members
|
|
148
|
-
`;
|
|
149
|
-
|
|
150
|
-
return async function (ctx: MultiRegionContext, identifier: string) {
|
|
151
|
-
if (ctx.cache) {
|
|
152
|
-
const { blocked, reset } = ctx.cache.isBlocked(identifier);
|
|
153
|
-
if (blocked) {
|
|
154
|
-
return {
|
|
155
|
-
success: false,
|
|
156
|
-
limit: tokens,
|
|
157
|
-
remaining: 0,
|
|
158
|
-
reset: reset,
|
|
159
|
-
pending: Promise.resolve(),
|
|
160
|
-
};
|
|
161
|
-
}
|
|
162
|
-
}
|
|
163
|
-
|
|
164
|
-
const requestId = randomId();
|
|
165
|
-
const bucket = Math.floor(Date.now() / windowDuration);
|
|
166
|
-
const key = [identifier, bucket].join(":");
|
|
167
|
-
|
|
168
|
-
const dbs: { redis: Redis; request: Promise<string[]> }[] = ctx.redis.map((redis) => ({
|
|
169
|
-
redis,
|
|
170
|
-
request: redis.eval(script, [key], [requestId, windowDuration]) as Promise<string[]>,
|
|
171
|
-
}));
|
|
172
|
-
|
|
173
|
-
const firstResponse = await Promise.any(dbs.map((s) => s.request));
|
|
174
|
-
|
|
175
|
-
const usedTokens = firstResponse.length;
|
|
176
|
-
|
|
177
|
-
const remaining = tokens - usedTokens - 1;
|
|
178
|
-
|
|
179
|
-
/**
|
|
180
|
-
* If the length between two databases does not match, we sync the two databases
|
|
181
|
-
*/
|
|
182
|
-
async function sync() {
|
|
183
|
-
const individualIDs = await Promise.all(dbs.map((s) => s.request));
|
|
184
|
-
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
|
|
185
|
-
|
|
186
|
-
for (const db of dbs) {
|
|
187
|
-
const ids = await db.request;
|
|
188
|
-
/**
|
|
189
|
-
* If the bucket in this db is already full, it doesn't matter which ids it contains.
|
|
190
|
-
* So we do not have to sync.
|
|
191
|
-
*/
|
|
192
|
-
if (ids.length >= tokens) {
|
|
193
|
-
continue;
|
|
194
|
-
}
|
|
195
|
-
const diff = allIDs.filter((id) => !ids.includes(id));
|
|
196
|
-
/**
|
|
197
|
-
* Don't waste a request if there is nothing to send
|
|
198
|
-
*/
|
|
199
|
-
if (diff.length === 0) {
|
|
200
|
-
continue;
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
await db.redis.sadd(key, ...allIDs);
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
/**
|
|
208
|
-
* Do not await sync. This should not run in the critical path.
|
|
209
|
-
*/
|
|
210
|
-
|
|
211
|
-
const success = remaining > 0;
|
|
212
|
-
const reset = (bucket + 1) * windowDuration;
|
|
213
|
-
|
|
214
|
-
if (ctx.cache && !success) {
|
|
215
|
-
ctx.cache.blockUntil(identifier, reset);
|
|
216
|
-
}
|
|
217
|
-
return {
|
|
218
|
-
success,
|
|
219
|
-
limit: tokens,
|
|
220
|
-
remaining,
|
|
221
|
-
reset,
|
|
222
|
-
pending: sync(),
|
|
223
|
-
};
|
|
224
|
-
};
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
/**
|
|
228
|
-
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
229
|
-
* costs than `slidingLogs` and improved boundary behavior by calculating a
|
|
230
|
-
* weighted score between two windows.
|
|
231
|
-
*
|
|
232
|
-
* **Pro:**
|
|
233
|
-
*
|
|
234
|
-
* Good performance allows this to scale to very high loads.
|
|
235
|
-
*
|
|
236
|
-
* **Con:**
|
|
237
|
-
*
|
|
238
|
-
* Nothing major.
|
|
239
|
-
*
|
|
240
|
-
* @param tokens - How many requests a user can make in each time window.
|
|
241
|
-
* @param window - The duration in which the user can max X requests.
|
|
242
|
-
*/
|
|
243
|
-
static slidingWindow(
|
|
244
|
-
/**
|
|
245
|
-
* How many requests are allowed per window.
|
|
246
|
-
*/
|
|
247
|
-
tokens: number,
|
|
248
|
-
/**
|
|
249
|
-
* The duration in which `tokens` requests are allowed.
|
|
250
|
-
*/
|
|
251
|
-
window: Duration,
|
|
252
|
-
): Algorithm<MultiRegionContext> {
|
|
253
|
-
const windowSize = ms(window);
|
|
254
|
-
const script = `
|
|
255
|
-
local currentKey = KEYS[1] -- identifier including prefixes
|
|
256
|
-
local previousKey = KEYS[2] -- key of the previous bucket
|
|
257
|
-
local tokens = tonumber(ARGV[1]) -- tokens per window
|
|
258
|
-
local now = ARGV[2] -- current timestamp in milliseconds
|
|
259
|
-
local window = ARGV[3] -- interval in milliseconds
|
|
260
|
-
local requestId = ARGV[4] -- uuid for this request
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
local currentMembers = redis.call("SMEMBERS", currentKey)
|
|
264
|
-
local requestsInCurrentWindow = #currentMembers
|
|
265
|
-
local previousMembers = redis.call("SMEMBERS", previousKey)
|
|
266
|
-
local requestsInPreviousWindow = #previousMembers
|
|
267
|
-
|
|
268
|
-
local percentageInCurrent = ( now % window) / window
|
|
269
|
-
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
|
|
270
|
-
return {currentMembers, previousMembers, false}
|
|
271
|
-
end
|
|
272
|
-
|
|
273
|
-
redis.call("SADD", currentKey, requestId)
|
|
274
|
-
table.insert(currentMembers, requestId)
|
|
275
|
-
if requestsInCurrentWindow == 0 then
|
|
276
|
-
-- The first time this key is set, the value will be 1.
|
|
277
|
-
-- So we only need the expire command once
|
|
278
|
-
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
|
|
279
|
-
end
|
|
280
|
-
return {currentMembers, previousMembers, true}
|
|
281
|
-
`;
|
|
282
|
-
const windowDuration = ms(window);
|
|
283
|
-
|
|
284
|
-
return async function (ctx: MultiRegionContext, identifier: string) {
|
|
285
|
-
// if (ctx.cache) {
|
|
286
|
-
// const { blocked, reset } = ctx.cache.isBlocked(identifier);
|
|
287
|
-
// if (blocked) {
|
|
288
|
-
// return {
|
|
289
|
-
// success: false,
|
|
290
|
-
// limit: tokens,
|
|
291
|
-
// remaining: 0,
|
|
292
|
-
// reset: reset,
|
|
293
|
-
// pending: Promise.resolve(),
|
|
294
|
-
// };
|
|
295
|
-
// }
|
|
296
|
-
// }
|
|
297
|
-
|
|
298
|
-
const requestId = randomId();
|
|
299
|
-
const now = Date.now();
|
|
300
|
-
|
|
301
|
-
const currentWindow = Math.floor(now / windowSize);
|
|
302
|
-
const currentKey = [identifier, currentWindow].join(":");
|
|
303
|
-
const previousWindow = currentWindow - 1;
|
|
304
|
-
const previousKey = [identifier, previousWindow].join(":");
|
|
305
|
-
|
|
306
|
-
const dbs = ctx.redis.map((redis) => ({
|
|
307
|
-
redis,
|
|
308
|
-
request: redis.eval(
|
|
309
|
-
script,
|
|
310
|
-
[currentKey, previousKey],
|
|
311
|
-
[tokens, now, windowDuration, requestId],
|
|
312
|
-
// lua seems to return `1` for true and `null` for false
|
|
313
|
-
) as Promise<[string[], string[], 1 | null]>,
|
|
314
|
-
}));
|
|
315
|
-
|
|
316
|
-
const percentageInCurrent = (now % windowDuration) / windowDuration;
|
|
317
|
-
const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));
|
|
318
|
-
|
|
319
|
-
const previousPartialUsed = previous.length * (1 - percentageInCurrent);
|
|
320
|
-
const usedTokens = previousPartialUsed + current.length;
|
|
321
|
-
|
|
322
|
-
const remaining = tokens - usedTokens;
|
|
323
|
-
|
|
324
|
-
/**
|
|
325
|
-
* If a database differs from the consensus, we sync it
|
|
326
|
-
*/
|
|
327
|
-
async function sync() {
|
|
328
|
-
const res = await Promise.all(dbs.map((s) => s.request));
|
|
329
|
-
const allCurrentIds = res.flatMap(([current]) => current);
|
|
330
|
-
for (const db of dbs) {
|
|
331
|
-
const [ids] = await db.request;
|
|
332
|
-
/**
|
|
333
|
-
* If the bucket in this db is already full, it doesn't matter which ids it contains.
|
|
334
|
-
* So we do not have to sync.
|
|
335
|
-
*/
|
|
336
|
-
if (ids.length >= tokens) {
|
|
337
|
-
continue;
|
|
338
|
-
}
|
|
339
|
-
const diff = allCurrentIds.filter((id) => !ids.includes(id));
|
|
340
|
-
/**
|
|
341
|
-
* Don't waste a request if there is nothing to send
|
|
342
|
-
*/
|
|
343
|
-
if (diff.length === 0) {
|
|
344
|
-
continue;
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
await db.redis.sadd(currentKey, ...diff);
|
|
348
|
-
}
|
|
349
|
-
}
|
|
350
|
-
|
|
351
|
-
// const success = remaining >= 0;
|
|
352
|
-
const reset = (currentWindow + 1) * windowDuration;
|
|
353
|
-
if (ctx.cache && !success) {
|
|
354
|
-
ctx.cache.blockUntil(identifier, reset);
|
|
355
|
-
}
|
|
356
|
-
return {
|
|
357
|
-
success: Boolean(success),
|
|
358
|
-
limit: tokens,
|
|
359
|
-
remaining,
|
|
360
|
-
reset,
|
|
361
|
-
pending: sync(),
|
|
362
|
-
};
|
|
363
|
-
};
|
|
364
|
-
}
|
|
365
|
-
}
|
package/src/ratelimit.test.ts
DELETED
|
@@ -1,155 +0,0 @@
|
|
|
1
|
-
import { describe, expect, test } from "bun:test";
|
|
2
|
-
import { log } from "console";
|
|
3
|
-
import crypto from "node:crypto";
|
|
4
|
-
import { Redis } from "@upstash/redis";
|
|
5
|
-
import { Algorithm } from ".";
|
|
6
|
-
import type { Duration } from "./duration";
|
|
7
|
-
import { MultiRegionRatelimit } from "./multi";
|
|
8
|
-
import { Ratelimit } from "./ratelimit";
|
|
9
|
-
import { RegionRatelimit } from "./single";
|
|
10
|
-
import { TestHarness } from "./test_utils";
|
|
11
|
-
import type { Context, MultiRegionContext, RegionContext } from "./types";
|
|
12
|
-
|
|
13
|
-
type TestCase = {
|
|
14
|
-
// requests per second
|
|
15
|
-
rps: number;
|
|
16
|
-
/**
|
|
17
|
-
* Multiplier for rate
|
|
18
|
-
*
|
|
19
|
-
* rate = 10, load = 0.5 -> attack rate will be 5
|
|
20
|
-
*/
|
|
21
|
-
load: number;
|
|
22
|
-
};
|
|
23
|
-
const attackDuration = 60;
|
|
24
|
-
const window = 5;
|
|
25
|
-
const windowString: Duration = `${window} s`;
|
|
26
|
-
|
|
27
|
-
const testcases: TestCase[] = [];
|
|
28
|
-
|
|
29
|
-
for (const rps of [10, 100]) {
|
|
30
|
-
for (const load of [0.5, 1, 1.5]) {
|
|
31
|
-
testcases.push({ load, rps });
|
|
32
|
-
}
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
function run<TContext extends Context>(builder: (tc: TestCase) => Ratelimit<TContext>) {
|
|
36
|
-
for (const tc of testcases) {
|
|
37
|
-
const name = `${tc.rps.toString().padStart(4, " ")}/s - Load: ${(tc.load * 100)
|
|
38
|
-
.toString()
|
|
39
|
-
.padStart(3, " ")}% -> Sending ${(tc.rps * tc.load).toString().padStart(4, " ")}req/s`;
|
|
40
|
-
const ratelimit = builder(tc);
|
|
41
|
-
|
|
42
|
-
const isMultiRegion = ratelimit instanceof MultiRegionRatelimit;
|
|
43
|
-
const limits = {
|
|
44
|
-
lte: ((attackDuration * tc.rps) / window) * (isMultiRegion ? 1.5 : 1.2),
|
|
45
|
-
gte: ((attackDuration * tc.rps) / window) * (isMultiRegion ? 0.5 : 0.8),
|
|
46
|
-
};
|
|
47
|
-
describe(name, () => {
|
|
48
|
-
test(
|
|
49
|
-
`should be within ${limits.gte} - ${limits.lte}`,
|
|
50
|
-
async () => {
|
|
51
|
-
log(name);
|
|
52
|
-
const harness = new TestHarness(ratelimit);
|
|
53
|
-
await harness.attack(tc.rps * tc.load, attackDuration).catch((e) => {
|
|
54
|
-
console.error(e);
|
|
55
|
-
});
|
|
56
|
-
log(
|
|
57
|
-
"success:",
|
|
58
|
-
harness.metrics.success,
|
|
59
|
-
", blocked:",
|
|
60
|
-
harness.metrics.rejected,
|
|
61
|
-
"out of:",
|
|
62
|
-
harness.metrics.requests,
|
|
63
|
-
);
|
|
64
|
-
|
|
65
|
-
expect(harness.metrics.success).toBeLessThanOrEqual(limits.lte);
|
|
66
|
-
expect(harness.metrics.success).toBeGreaterThanOrEqual(limits.gte);
|
|
67
|
-
},
|
|
68
|
-
attackDuration * 1000 * 2,
|
|
69
|
-
);
|
|
70
|
-
});
|
|
71
|
-
}
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
function newMultiRegion(limiter: Algorithm<MultiRegionContext>): Ratelimit<MultiRegionContext> {
|
|
75
|
-
function ensureEnv(key: string): string {
|
|
76
|
-
const value = process.env[key];
|
|
77
|
-
if (!value) {
|
|
78
|
-
throw new Error(`Environment variable ${key} not found`);
|
|
79
|
-
}
|
|
80
|
-
return value;
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
return new MultiRegionRatelimit({
|
|
84
|
-
prefix: crypto.randomUUID(),
|
|
85
|
-
redis: [
|
|
86
|
-
new Redis({
|
|
87
|
-
url: ensureEnv("EU2_UPSTASH_REDIS_REST_URL"),
|
|
88
|
-
token: ensureEnv("EU2_UPSTASH_REDIS_REST_TOKEN"),
|
|
89
|
-
}),
|
|
90
|
-
new Redis({
|
|
91
|
-
url: ensureEnv("APN_UPSTASH_REDIS_REST_URL"),
|
|
92
|
-
token: ensureEnv("APN_UPSTASH_REDIS_REST_TOKEN"),
|
|
93
|
-
}),
|
|
94
|
-
new Redis({
|
|
95
|
-
url: ensureEnv("US1_UPSTASH_REDIS_REST_URL"),
|
|
96
|
-
token: ensureEnv("US1_UPSTASH_REDIS_REST_TOKEN"),
|
|
97
|
-
}),
|
|
98
|
-
],
|
|
99
|
-
limiter,
|
|
100
|
-
});
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
function newRegion(limiter: Algorithm<RegionContext>): Ratelimit<RegionContext> {
|
|
104
|
-
return new RegionRatelimit({
|
|
105
|
-
prefix: crypto.randomUUID(),
|
|
106
|
-
redis: Redis.fromEnv(),
|
|
107
|
-
limiter,
|
|
108
|
-
});
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
describe("timeout", () => {
|
|
112
|
-
test("pass after timeout", async () => {
|
|
113
|
-
const r = new RegionRatelimit({
|
|
114
|
-
prefix: crypto.randomUUID(),
|
|
115
|
-
// @ts-ignore - I just want to test the timeout
|
|
116
|
-
redis: {
|
|
117
|
-
...Redis.fromEnv(),
|
|
118
|
-
eval: () => new Promise((r) => setTimeout(r, 2000)),
|
|
119
|
-
},
|
|
120
|
-
limiter: RegionRatelimit.fixedWindow(1, "1 s"),
|
|
121
|
-
timeout: 1000,
|
|
122
|
-
});
|
|
123
|
-
const start = Date.now();
|
|
124
|
-
const res = await r.limit("id");
|
|
125
|
-
const duration = Date.now() - start;
|
|
126
|
-
expect(res.success).toBe(true);
|
|
127
|
-
expect(res.limit).toBe(0);
|
|
128
|
-
expect(res.remaining).toBe(0);
|
|
129
|
-
expect(res.reset).toBe(0);
|
|
130
|
-
expect(duration).toBeGreaterThanOrEqual(900);
|
|
131
|
-
expect(duration).toBeLessThanOrEqual(1100);
|
|
132
|
-
|
|
133
|
-
// stop the test from leaking
|
|
134
|
-
await new Promise((r) => setTimeout(r, 5000));
|
|
135
|
-
}, 10000);
|
|
136
|
-
});
|
|
137
|
-
|
|
138
|
-
describe("fixedWindow", () => {
|
|
139
|
-
describe("region", () =>
|
|
140
|
-
run((tc) => newRegion(RegionRatelimit.fixedWindow(tc.rps, windowString))));
|
|
141
|
-
|
|
142
|
-
describe("multiRegion", () =>
|
|
143
|
-
run((tc) => newMultiRegion(MultiRegionRatelimit.fixedWindow(tc.rps, windowString))));
|
|
144
|
-
});
|
|
145
|
-
describe("slidingWindow", () => {
|
|
146
|
-
describe("region", () =>
|
|
147
|
-
run((tc) => newRegion(RegionRatelimit.slidingWindow(tc.rps, windowString))));
|
|
148
|
-
describe("multiRegion", () =>
|
|
149
|
-
run((tc) => newMultiRegion(MultiRegionRatelimit.slidingWindow(tc.rps, windowString))));
|
|
150
|
-
});
|
|
151
|
-
|
|
152
|
-
describe("tokenBucket", () => {
|
|
153
|
-
describe("region", () =>
|
|
154
|
-
run((tc) => newRegion(RegionRatelimit.tokenBucket(tc.rps, windowString, tc.rps))));
|
|
155
|
-
});
|