@upstash/ratelimit 0.4.5-canary.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/dist/index.d.mts +556 -0
- package/dist/index.d.ts +556 -0
- package/dist/index.js +832 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +803 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +1 -22
- package/.github/actions/redis/action.yaml +0 -58
- package/.github/img/dashboard.png +0 -0
- package/.github/workflows/release.yml +0 -46
- package/.github/workflows/stale.yaml +0 -31
- package/.github/workflows/tests.yaml +0 -79
- package/biome.json +0 -37
- package/bun.lockb +0 -0
- package/cmd/set-version.js +0 -14
- package/examples/cloudflare-workers/package.json +0 -18
- package/examples/cloudflare-workers/src/index.ts +0 -35
- package/examples/cloudflare-workers/tsconfig.json +0 -105
- package/examples/cloudflare-workers/wrangler.toml +0 -3
- package/examples/nextjs/LICENSE +0 -21
- package/examples/nextjs/README.md +0 -17
- package/examples/nextjs/components/Breadcrumb.tsx +0 -67
- package/examples/nextjs/components/Header.tsx +0 -18
- package/examples/nextjs/components/ReadBlogPost.tsx +0 -9
- package/examples/nextjs/components/StarButton.tsx +0 -27
- package/examples/nextjs/middleware.ts +0 -35
- package/examples/nextjs/next-env.d.ts +0 -5
- package/examples/nextjs/package.json +0 -27
- package/examples/nextjs/pages/_app.tsx +0 -47
- package/examples/nextjs/pages/api/blocked.ts +0 -6
- package/examples/nextjs/pages/api/hello.ts +0 -5
- package/examples/nextjs/pages/index.tsx +0 -62
- package/examples/nextjs/postcss.config.js +0 -6
- package/examples/nextjs/public/favicon.ico +0 -0
- package/examples/nextjs/public/github.svg +0 -11
- package/examples/nextjs/public/upstash.svg +0 -27
- package/examples/nextjs/styles/globals.css +0 -76
- package/examples/nextjs/tailwind.config.js +0 -19
- package/examples/nextjs/tsconfig.json +0 -21
- package/examples/nextjs13/README.md +0 -38
- package/examples/nextjs13/app/favicon.ico +0 -0
- package/examples/nextjs13/app/globals.css +0 -107
- package/examples/nextjs13/app/layout.tsx +0 -18
- package/examples/nextjs13/app/page.module.css +0 -271
- package/examples/nextjs13/app/route.tsx +0 -14
- package/examples/nextjs13/next.config.js +0 -8
- package/examples/nextjs13/package.json +0 -22
- package/examples/nextjs13/public/next.svg +0 -1
- package/examples/nextjs13/public/thirteen.svg +0 -1
- package/examples/nextjs13/public/vercel.svg +0 -1
- package/examples/nextjs13/tsconfig.json +0 -28
- package/examples/remix/.env.example +0 -2
- package/examples/remix/.eslintrc.js +0 -4
- package/examples/remix/README.md +0 -59
- package/examples/remix/app/root.tsx +0 -25
- package/examples/remix/app/routes/index.tsx +0 -47
- package/examples/remix/package.json +0 -32
- package/examples/remix/public/favicon.ico +0 -0
- package/examples/remix/remix.config.js +0 -12
- package/examples/remix/remix.env.d.ts +0 -2
- package/examples/remix/server.js +0 -4
- package/examples/remix/tsconfig.json +0 -22
- package/examples/with-vercel-kv/README.md +0 -51
- package/examples/with-vercel-kv/app/favicon.ico +0 -0
- package/examples/with-vercel-kv/app/globals.css +0 -27
- package/examples/with-vercel-kv/app/layout.tsx +0 -21
- package/examples/with-vercel-kv/app/page.tsx +0 -71
- package/examples/with-vercel-kv/next.config.js +0 -8
- package/examples/with-vercel-kv/package.json +0 -25
- package/examples/with-vercel-kv/postcss.config.js +0 -6
- package/examples/with-vercel-kv/public/next.svg +0 -1
- package/examples/with-vercel-kv/public/vercel.svg +0 -1
- package/examples/with-vercel-kv/tailwind.config.js +0 -17
- package/examples/with-vercel-kv/tsconfig.json +0 -28
- package/src/analytics.test.ts +0 -23
- package/src/analytics.ts +0 -92
- package/src/blockUntilReady.test.ts +0 -56
- package/src/cache.test.ts +0 -41
- package/src/cache.ts +0 -43
- package/src/duration.test.ts +0 -23
- package/src/duration.ts +0 -30
- package/src/index.ts +0 -17
- package/src/multi.ts +0 -365
- package/src/ratelimit.test.ts +0 -155
- package/src/ratelimit.ts +0 -238
- package/src/single.ts +0 -487
- package/src/test_utils.ts +0 -65
- package/src/tools/seed.ts +0 -37
- package/src/types.ts +0 -78
- package/src/version.ts +0 -1
- package/tsconfig.json +0 -103
- package/tsup.config.js +0 -11
- package/turbo.json +0 -16
package/dist/index.js
ADDED
|
@@ -0,0 +1,832 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var src_exports = {};
|
|
22
|
+
__export(src_exports, {
|
|
23
|
+
Analytics: () => Analytics,
|
|
24
|
+
MultiRegionRatelimit: () => MultiRegionRatelimit,
|
|
25
|
+
Ratelimit: () => RegionRatelimit
|
|
26
|
+
});
|
|
27
|
+
module.exports = __toCommonJS(src_exports);
|
|
28
|
+
|
|
29
|
+
// src/analytics.ts
|
|
30
|
+
var import_core_analytics = require("@upstash/core-analytics");
|
|
31
|
+
var Analytics = class {
|
|
32
|
+
analytics;
|
|
33
|
+
table = "events";
|
|
34
|
+
constructor(config) {
|
|
35
|
+
this.analytics = new import_core_analytics.Analytics({
|
|
36
|
+
// @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk
|
|
37
|
+
redis: config.redis,
|
|
38
|
+
window: "1h",
|
|
39
|
+
prefix: config.prefix ?? "@upstash/ratelimit",
|
|
40
|
+
retention: "90d"
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Try to extract the geo information from the request
|
|
45
|
+
*
|
|
46
|
+
* This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
|
|
47
|
+
* @param req
|
|
48
|
+
* @returns
|
|
49
|
+
*/
|
|
50
|
+
extractGeo(req) {
|
|
51
|
+
if (typeof req.geo !== "undefined") {
|
|
52
|
+
return req.geo;
|
|
53
|
+
}
|
|
54
|
+
if (typeof req.cf !== "undefined") {
|
|
55
|
+
return req.cf;
|
|
56
|
+
}
|
|
57
|
+
return {};
|
|
58
|
+
}
|
|
59
|
+
async record(event) {
|
|
60
|
+
await this.analytics.ingest(this.table, event);
|
|
61
|
+
}
|
|
62
|
+
async series(filter, cutoff) {
|
|
63
|
+
const records = await this.analytics.query(this.table, {
|
|
64
|
+
filter: [filter],
|
|
65
|
+
range: [cutoff, Date.now()]
|
|
66
|
+
});
|
|
67
|
+
return records;
|
|
68
|
+
}
|
|
69
|
+
async getUsage(cutoff = 0) {
|
|
70
|
+
const records = await this.analytics.aggregateBy(this.table, "identifier", {
|
|
71
|
+
range: [cutoff, Date.now()]
|
|
72
|
+
});
|
|
73
|
+
const usage = {};
|
|
74
|
+
for (const bucket of records) {
|
|
75
|
+
for (const [k, v] of Object.entries(bucket)) {
|
|
76
|
+
if (k === "time") {
|
|
77
|
+
continue;
|
|
78
|
+
}
|
|
79
|
+
if (!usage[k]) {
|
|
80
|
+
usage[k] = { success: 0, blocked: 0 };
|
|
81
|
+
}
|
|
82
|
+
usage[k].success += v.true ?? 0;
|
|
83
|
+
usage[k].blocked += v.false ?? 0;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
return usage;
|
|
87
|
+
}
|
|
88
|
+
};
|
|
89
|
+
|
|
90
|
+
// src/cache.ts
|
|
91
|
+
var Cache = class {
|
|
92
|
+
/**
|
|
93
|
+
* Stores identifier -> reset (in milliseconds)
|
|
94
|
+
*/
|
|
95
|
+
cache;
|
|
96
|
+
constructor(cache) {
|
|
97
|
+
this.cache = cache;
|
|
98
|
+
}
|
|
99
|
+
isBlocked(identifier) {
|
|
100
|
+
if (!this.cache.has(identifier)) {
|
|
101
|
+
return { blocked: false, reset: 0 };
|
|
102
|
+
}
|
|
103
|
+
const reset = this.cache.get(identifier);
|
|
104
|
+
if (reset < Date.now()) {
|
|
105
|
+
this.cache.delete(identifier);
|
|
106
|
+
return { blocked: false, reset: 0 };
|
|
107
|
+
}
|
|
108
|
+
return { blocked: true, reset };
|
|
109
|
+
}
|
|
110
|
+
blockUntil(identifier, reset) {
|
|
111
|
+
this.cache.set(identifier, reset);
|
|
112
|
+
}
|
|
113
|
+
set(key, value) {
|
|
114
|
+
this.cache.set(key, value);
|
|
115
|
+
}
|
|
116
|
+
get(key) {
|
|
117
|
+
return this.cache.get(key) || null;
|
|
118
|
+
}
|
|
119
|
+
incr(key) {
|
|
120
|
+
let value = this.cache.get(key) ?? 0;
|
|
121
|
+
value += 1;
|
|
122
|
+
this.cache.set(key, value);
|
|
123
|
+
return value;
|
|
124
|
+
}
|
|
125
|
+
};
|
|
126
|
+
|
|
127
|
+
// src/duration.ts
|
|
128
|
+
function ms(d) {
|
|
129
|
+
const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/);
|
|
130
|
+
if (!match) {
|
|
131
|
+
throw new Error(`Unable to parse window size: ${d}`);
|
|
132
|
+
}
|
|
133
|
+
const time = parseInt(match[1]);
|
|
134
|
+
const unit = match[2];
|
|
135
|
+
switch (unit) {
|
|
136
|
+
case "ms":
|
|
137
|
+
return time;
|
|
138
|
+
case "s":
|
|
139
|
+
return time * 1e3;
|
|
140
|
+
case "m":
|
|
141
|
+
return time * 1e3 * 60;
|
|
142
|
+
case "h":
|
|
143
|
+
return time * 1e3 * 60 * 60;
|
|
144
|
+
case "d":
|
|
145
|
+
return time * 1e3 * 60 * 60 * 24;
|
|
146
|
+
default:
|
|
147
|
+
throw new Error(`Unable to parse window size: ${d}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// src/ratelimit.ts
|
|
152
|
+
var Ratelimit = class {
|
|
153
|
+
limiter;
|
|
154
|
+
ctx;
|
|
155
|
+
prefix;
|
|
156
|
+
timeout;
|
|
157
|
+
analytics;
|
|
158
|
+
constructor(config) {
|
|
159
|
+
this.ctx = config.ctx;
|
|
160
|
+
this.limiter = config.limiter;
|
|
161
|
+
this.timeout = config.timeout ?? 5e3;
|
|
162
|
+
this.prefix = config.prefix ?? "@upstash/ratelimit";
|
|
163
|
+
this.analytics = config.analytics ? new Analytics({
|
|
164
|
+
redis: Array.isArray(this.ctx.redis) ? this.ctx.redis[0] : this.ctx.redis,
|
|
165
|
+
prefix: this.prefix
|
|
166
|
+
}) : void 0;
|
|
167
|
+
if (config.ephemeralCache instanceof Map) {
|
|
168
|
+
this.ctx.cache = new Cache(config.ephemeralCache);
|
|
169
|
+
} else if (typeof config.ephemeralCache === "undefined") {
|
|
170
|
+
this.ctx.cache = new Cache(/* @__PURE__ */ new Map());
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
/**
|
|
174
|
+
* Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
|
|
175
|
+
*
|
|
176
|
+
* Use this if you want to reject all requests that you can not handle right now.
|
|
177
|
+
*
|
|
178
|
+
* @example
|
|
179
|
+
* ```ts
|
|
180
|
+
* const ratelimit = new Ratelimit({
|
|
181
|
+
* redis: Redis.fromEnv(),
|
|
182
|
+
* limiter: Ratelimit.slidingWindow(10, "10 s")
|
|
183
|
+
* })
|
|
184
|
+
*
|
|
185
|
+
* const { success } = await ratelimit.limit(id)
|
|
186
|
+
* if (!success){
|
|
187
|
+
* return "Nope"
|
|
188
|
+
* }
|
|
189
|
+
* return "Yes"
|
|
190
|
+
* ```
|
|
191
|
+
*/
|
|
192
|
+
limit = async (identifier, req) => {
|
|
193
|
+
const key = [this.prefix, identifier].join(":");
|
|
194
|
+
let timeoutId = null;
|
|
195
|
+
try {
|
|
196
|
+
const arr = [this.limiter(this.ctx, key)];
|
|
197
|
+
if (this.timeout > 0) {
|
|
198
|
+
arr.push(
|
|
199
|
+
new Promise((resolve) => {
|
|
200
|
+
timeoutId = setTimeout(() => {
|
|
201
|
+
resolve({
|
|
202
|
+
success: true,
|
|
203
|
+
limit: 0,
|
|
204
|
+
remaining: 0,
|
|
205
|
+
reset: 0,
|
|
206
|
+
pending: Promise.resolve()
|
|
207
|
+
});
|
|
208
|
+
}, this.timeout);
|
|
209
|
+
})
|
|
210
|
+
);
|
|
211
|
+
}
|
|
212
|
+
const res = await Promise.race(arr);
|
|
213
|
+
if (this.analytics) {
|
|
214
|
+
try {
|
|
215
|
+
const geo = req ? this.analytics.extractGeo(req) : void 0;
|
|
216
|
+
const analyticsP = this.analytics.record({
|
|
217
|
+
identifier,
|
|
218
|
+
time: Date.now(),
|
|
219
|
+
success: res.success,
|
|
220
|
+
...geo
|
|
221
|
+
}).catch((err) => {
|
|
222
|
+
console.warn("Failed to record analytics", err);
|
|
223
|
+
});
|
|
224
|
+
res.pending = Promise.all([res.pending, analyticsP]);
|
|
225
|
+
} catch (err) {
|
|
226
|
+
console.warn("Failed to record analytics", err);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
return res;
|
|
230
|
+
} finally {
|
|
231
|
+
if (timeoutId) {
|
|
232
|
+
clearTimeout(timeoutId);
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
};
|
|
236
|
+
/**
|
|
237
|
+
* Block until the request may pass or timeout is reached.
|
|
238
|
+
*
|
|
239
|
+
* This method returns a promise that resolves as soon as the request may be processed
|
|
240
|
+
* or after the timeout has been reached.
|
|
241
|
+
*
|
|
242
|
+
* Use this if you want to delay the request until it is ready to get processed.
|
|
243
|
+
*
|
|
244
|
+
* @example
|
|
245
|
+
* ```ts
|
|
246
|
+
* const ratelimit = new Ratelimit({
|
|
247
|
+
* redis: Redis.fromEnv(),
|
|
248
|
+
* limiter: Ratelimit.slidingWindow(10, "10 s")
|
|
249
|
+
* })
|
|
250
|
+
*
|
|
251
|
+
* const { success } = await ratelimit.blockUntilReady(id, 60_000)
|
|
252
|
+
* if (!success){
|
|
253
|
+
* return "Nope"
|
|
254
|
+
* }
|
|
255
|
+
* return "Yes"
|
|
256
|
+
* ```
|
|
257
|
+
*/
|
|
258
|
+
blockUntilReady = async (identifier, timeout) => {
|
|
259
|
+
if (timeout <= 0) {
|
|
260
|
+
throw new Error("timeout must be positive");
|
|
261
|
+
}
|
|
262
|
+
let res;
|
|
263
|
+
const deadline = Date.now() + timeout;
|
|
264
|
+
while (true) {
|
|
265
|
+
res = await this.limit(identifier);
|
|
266
|
+
if (res.success) {
|
|
267
|
+
break;
|
|
268
|
+
}
|
|
269
|
+
if (res.reset === 0) {
|
|
270
|
+
throw new Error("This should not happen");
|
|
271
|
+
}
|
|
272
|
+
const wait = Math.min(res.reset, deadline) - Date.now();
|
|
273
|
+
await new Promise((r) => setTimeout(r, wait));
|
|
274
|
+
if (Date.now() > deadline) {
|
|
275
|
+
break;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
return res;
|
|
279
|
+
};
|
|
280
|
+
};
|
|
281
|
+
|
|
282
|
+
// src/multi.ts
|
|
283
|
+
function randomId() {
|
|
284
|
+
let result = "";
|
|
285
|
+
const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
|
|
286
|
+
const charactersLength = characters.length;
|
|
287
|
+
for (let i = 0; i < 16; i++) {
|
|
288
|
+
result += characters.charAt(Math.floor(Math.random() * charactersLength));
|
|
289
|
+
}
|
|
290
|
+
return result;
|
|
291
|
+
}
|
|
292
|
+
var MultiRegionRatelimit = class extends Ratelimit {
|
|
293
|
+
/**
|
|
294
|
+
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
|
|
295
|
+
*/
|
|
296
|
+
constructor(config) {
|
|
297
|
+
super({
|
|
298
|
+
prefix: config.prefix,
|
|
299
|
+
limiter: config.limiter,
|
|
300
|
+
timeout: config.timeout,
|
|
301
|
+
analytics: config.analytics,
|
|
302
|
+
ctx: {
|
|
303
|
+
redis: config.redis,
|
|
304
|
+
cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
|
|
305
|
+
}
|
|
306
|
+
});
|
|
307
|
+
}
|
|
308
|
+
/**
|
|
309
|
+
* Each request inside a fixed time increases a counter.
|
|
310
|
+
* Once the counter reaches the maximum allowed number, all further requests are
|
|
311
|
+
* rejected.
|
|
312
|
+
*
|
|
313
|
+
* **Pro:**
|
|
314
|
+
*
|
|
315
|
+
* - Newer requests are not starved by old ones.
|
|
316
|
+
* - Low storage cost.
|
|
317
|
+
*
|
|
318
|
+
* **Con:**
|
|
319
|
+
*
|
|
320
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
321
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
322
|
+
*
|
|
323
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
324
|
+
* @param window - A fixed timeframe
|
|
325
|
+
*/
|
|
326
|
+
static fixedWindow(tokens, window) {
|
|
327
|
+
const windowDuration = ms(window);
|
|
328
|
+
const script = `
|
|
329
|
+
local key = KEYS[1]
|
|
330
|
+
local id = ARGV[1]
|
|
331
|
+
local window = ARGV[2]
|
|
332
|
+
|
|
333
|
+
redis.call("SADD", key, id)
|
|
334
|
+
local members = redis.call("SMEMBERS", key)
|
|
335
|
+
if #members == 1 then
|
|
336
|
+
-- The first time this key is set, the value will be 1.
|
|
337
|
+
-- So we only need the expire command once
|
|
338
|
+
redis.call("PEXPIRE", key, window)
|
|
339
|
+
end
|
|
340
|
+
|
|
341
|
+
return members
|
|
342
|
+
`;
|
|
343
|
+
return async function(ctx, identifier) {
|
|
344
|
+
if (ctx.cache) {
|
|
345
|
+
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
346
|
+
if (blocked) {
|
|
347
|
+
return {
|
|
348
|
+
success: false,
|
|
349
|
+
limit: tokens,
|
|
350
|
+
remaining: 0,
|
|
351
|
+
reset: reset2,
|
|
352
|
+
pending: Promise.resolve()
|
|
353
|
+
};
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
const requestId = randomId();
|
|
357
|
+
const bucket = Math.floor(Date.now() / windowDuration);
|
|
358
|
+
const key = [identifier, bucket].join(":");
|
|
359
|
+
const dbs = ctx.redis.map((redis) => ({
|
|
360
|
+
redis,
|
|
361
|
+
request: redis.eval(script, [key], [requestId, windowDuration])
|
|
362
|
+
}));
|
|
363
|
+
const firstResponse = await Promise.any(dbs.map((s) => s.request));
|
|
364
|
+
const usedTokens = firstResponse.length;
|
|
365
|
+
const remaining = tokens - usedTokens - 1;
|
|
366
|
+
async function sync() {
|
|
367
|
+
const individualIDs = await Promise.all(dbs.map((s) => s.request));
|
|
368
|
+
const allIDs = Array.from(new Set(individualIDs.flatMap((_) => _)).values());
|
|
369
|
+
for (const db of dbs) {
|
|
370
|
+
const ids = await db.request;
|
|
371
|
+
if (ids.length >= tokens) {
|
|
372
|
+
continue;
|
|
373
|
+
}
|
|
374
|
+
const diff = allIDs.filter((id) => !ids.includes(id));
|
|
375
|
+
if (diff.length === 0) {
|
|
376
|
+
continue;
|
|
377
|
+
}
|
|
378
|
+
await db.redis.sadd(key, ...allIDs);
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
const success = remaining > 0;
|
|
382
|
+
const reset = (bucket + 1) * windowDuration;
|
|
383
|
+
if (ctx.cache && !success) {
|
|
384
|
+
ctx.cache.blockUntil(identifier, reset);
|
|
385
|
+
}
|
|
386
|
+
return {
|
|
387
|
+
success,
|
|
388
|
+
limit: tokens,
|
|
389
|
+
remaining,
|
|
390
|
+
reset,
|
|
391
|
+
pending: sync()
|
|
392
|
+
};
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
/**
|
|
396
|
+
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
397
|
+
* costs than `slidingLogs` and improved boundary behavior by calculating a
|
|
398
|
+
* weighted score between two windows.
|
|
399
|
+
*
|
|
400
|
+
* **Pro:**
|
|
401
|
+
*
|
|
402
|
+
* Good performance allows this to scale to very high loads.
|
|
403
|
+
*
|
|
404
|
+
* **Con:**
|
|
405
|
+
*
|
|
406
|
+
* Nothing major.
|
|
407
|
+
*
|
|
408
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
409
|
+
* @param window - The duration in which the user can max X requests.
|
|
410
|
+
*/
|
|
411
|
+
static slidingWindow(tokens, window) {
|
|
412
|
+
const windowSize = ms(window);
|
|
413
|
+
const script = `
|
|
414
|
+
local currentKey = KEYS[1] -- identifier including prefixes
|
|
415
|
+
local previousKey = KEYS[2] -- key of the previous bucket
|
|
416
|
+
local tokens = tonumber(ARGV[1]) -- tokens per window
|
|
417
|
+
local now = ARGV[2] -- current timestamp in milliseconds
|
|
418
|
+
local window = ARGV[3] -- interval in milliseconds
|
|
419
|
+
local requestId = ARGV[4] -- uuid for this request
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
local currentMembers = redis.call("SMEMBERS", currentKey)
|
|
423
|
+
local requestsInCurrentWindow = #currentMembers
|
|
424
|
+
local previousMembers = redis.call("SMEMBERS", previousKey)
|
|
425
|
+
local requestsInPreviousWindow = #previousMembers
|
|
426
|
+
|
|
427
|
+
local percentageInCurrent = ( now % window) / window
|
|
428
|
+
if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
|
|
429
|
+
return {currentMembers, previousMembers, false}
|
|
430
|
+
end
|
|
431
|
+
|
|
432
|
+
redis.call("SADD", currentKey, requestId)
|
|
433
|
+
table.insert(currentMembers, requestId)
|
|
434
|
+
if requestsInCurrentWindow == 0 then
|
|
435
|
+
-- The first time this key is set, the value will be 1.
|
|
436
|
+
-- So we only need the expire command once
|
|
437
|
+
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
|
|
438
|
+
end
|
|
439
|
+
return {currentMembers, previousMembers, true}
|
|
440
|
+
`;
|
|
441
|
+
const windowDuration = ms(window);
|
|
442
|
+
return async function(ctx, identifier) {
|
|
443
|
+
const requestId = randomId();
|
|
444
|
+
const now = Date.now();
|
|
445
|
+
const currentWindow = Math.floor(now / windowSize);
|
|
446
|
+
const currentKey = [identifier, currentWindow].join(":");
|
|
447
|
+
const previousWindow = currentWindow - 1;
|
|
448
|
+
const previousKey = [identifier, previousWindow].join(":");
|
|
449
|
+
const dbs = ctx.redis.map((redis) => ({
|
|
450
|
+
redis,
|
|
451
|
+
request: redis.eval(
|
|
452
|
+
script,
|
|
453
|
+
[currentKey, previousKey],
|
|
454
|
+
[tokens, now, windowDuration, requestId]
|
|
455
|
+
// lua seems to return `1` for true and `null` for false
|
|
456
|
+
)
|
|
457
|
+
}));
|
|
458
|
+
const percentageInCurrent = now % windowDuration / windowDuration;
|
|
459
|
+
const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));
|
|
460
|
+
const previousPartialUsed = previous.length * (1 - percentageInCurrent);
|
|
461
|
+
const usedTokens = previousPartialUsed + current.length;
|
|
462
|
+
const remaining = tokens - usedTokens;
|
|
463
|
+
async function sync() {
|
|
464
|
+
const res = await Promise.all(dbs.map((s) => s.request));
|
|
465
|
+
const allCurrentIds = res.flatMap(([current2]) => current2);
|
|
466
|
+
for (const db of dbs) {
|
|
467
|
+
const [ids] = await db.request;
|
|
468
|
+
if (ids.length >= tokens) {
|
|
469
|
+
continue;
|
|
470
|
+
}
|
|
471
|
+
const diff = allCurrentIds.filter((id) => !ids.includes(id));
|
|
472
|
+
if (diff.length === 0) {
|
|
473
|
+
continue;
|
|
474
|
+
}
|
|
475
|
+
await db.redis.sadd(currentKey, ...diff);
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
const reset = (currentWindow + 1) * windowDuration;
|
|
479
|
+
if (ctx.cache && !success) {
|
|
480
|
+
ctx.cache.blockUntil(identifier, reset);
|
|
481
|
+
}
|
|
482
|
+
return {
|
|
483
|
+
success: Boolean(success),
|
|
484
|
+
limit: tokens,
|
|
485
|
+
remaining,
|
|
486
|
+
reset,
|
|
487
|
+
pending: sync()
|
|
488
|
+
};
|
|
489
|
+
};
|
|
490
|
+
}
|
|
491
|
+
};
|
|
492
|
+
|
|
493
|
+
// src/single.ts
|
|
494
|
+
var RegionRatelimit = class extends Ratelimit {
|
|
495
|
+
/**
|
|
496
|
+
* Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.
|
|
497
|
+
*/
|
|
498
|
+
constructor(config) {
|
|
499
|
+
super({
|
|
500
|
+
prefix: config.prefix,
|
|
501
|
+
limiter: config.limiter,
|
|
502
|
+
timeout: config.timeout,
|
|
503
|
+
analytics: config.analytics,
|
|
504
|
+
ctx: {
|
|
505
|
+
redis: config.redis
|
|
506
|
+
},
|
|
507
|
+
ephemeralCache: config.ephemeralCache
|
|
508
|
+
});
|
|
509
|
+
}
|
|
510
|
+
/**
|
|
511
|
+
* Each request inside a fixed time increases a counter.
|
|
512
|
+
* Once the counter reaches the maximum allowed number, all further requests are
|
|
513
|
+
* rejected.
|
|
514
|
+
*
|
|
515
|
+
* **Pro:**
|
|
516
|
+
*
|
|
517
|
+
* - Newer requests are not starved by old ones.
|
|
518
|
+
* - Low storage cost.
|
|
519
|
+
*
|
|
520
|
+
* **Con:**
|
|
521
|
+
*
|
|
522
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
523
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
524
|
+
*
|
|
525
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
526
|
+
* @param window - A fixed timeframe
|
|
527
|
+
*/
|
|
528
|
+
static fixedWindow(tokens, window) {
|
|
529
|
+
const windowDuration = ms(window);
|
|
530
|
+
const script = `
|
|
531
|
+
local key = KEYS[1]
|
|
532
|
+
local window = ARGV[1]
|
|
533
|
+
|
|
534
|
+
local r = redis.call("INCR", key)
|
|
535
|
+
if r == 1 then
|
|
536
|
+
-- The first time this key is set, the value will be 1.
|
|
537
|
+
-- So we only need the expire command once
|
|
538
|
+
redis.call("PEXPIRE", key, window)
|
|
539
|
+
end
|
|
540
|
+
|
|
541
|
+
return r
|
|
542
|
+
`;
|
|
543
|
+
return async function(ctx, identifier) {
|
|
544
|
+
const bucket = Math.floor(Date.now() / windowDuration);
|
|
545
|
+
const key = [identifier, bucket].join(":");
|
|
546
|
+
if (ctx.cache) {
|
|
547
|
+
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
548
|
+
if (blocked) {
|
|
549
|
+
return {
|
|
550
|
+
success: false,
|
|
551
|
+
limit: tokens,
|
|
552
|
+
remaining: 0,
|
|
553
|
+
reset: reset2,
|
|
554
|
+
pending: Promise.resolve()
|
|
555
|
+
};
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
const usedTokensAfterUpdate = await ctx.redis.eval(
|
|
559
|
+
script,
|
|
560
|
+
[key],
|
|
561
|
+
[windowDuration]
|
|
562
|
+
);
|
|
563
|
+
const success = usedTokensAfterUpdate <= tokens;
|
|
564
|
+
const reset = (bucket + 1) * windowDuration;
|
|
565
|
+
if (ctx.cache && !success) {
|
|
566
|
+
ctx.cache.blockUntil(identifier, reset);
|
|
567
|
+
}
|
|
568
|
+
return {
|
|
569
|
+
success,
|
|
570
|
+
limit: tokens,
|
|
571
|
+
remaining: Math.max(0, tokens - usedTokensAfterUpdate),
|
|
572
|
+
reset,
|
|
573
|
+
pending: Promise.resolve()
|
|
574
|
+
};
|
|
575
|
+
};
|
|
576
|
+
}
|
|
577
|
+
/**
|
|
578
|
+
* Combined approach of `slidingLogs` and `fixedWindow` with lower storage
|
|
579
|
+
* costs than `slidingLogs` and improved boundary behavior by calculating a
|
|
580
|
+
* weighted score between two windows.
|
|
581
|
+
*
|
|
582
|
+
* **Pro:**
|
|
583
|
+
*
|
|
584
|
+
* Good performance allows this to scale to very high loads.
|
|
585
|
+
*
|
|
586
|
+
* **Con:**
|
|
587
|
+
*
|
|
588
|
+
* Nothing major.
|
|
589
|
+
*
|
|
590
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
591
|
+
* @param window - The duration in which the user can max X requests.
|
|
592
|
+
*/
|
|
593
|
+
static slidingWindow(tokens, window) {
|
|
594
|
+
const script = `
|
|
595
|
+
local currentKey = KEYS[1] -- identifier including prefixes
|
|
596
|
+
local previousKey = KEYS[2] -- key of the previous bucket
|
|
597
|
+
local tokens = tonumber(ARGV[1]) -- tokens per window
|
|
598
|
+
local now = ARGV[2] -- current timestamp in milliseconds
|
|
599
|
+
local window = ARGV[3] -- interval in milliseconds
|
|
600
|
+
|
|
601
|
+
local requestsInCurrentWindow = redis.call("GET", currentKey)
|
|
602
|
+
if requestsInCurrentWindow == false then
|
|
603
|
+
requestsInCurrentWindow = 0
|
|
604
|
+
end
|
|
605
|
+
|
|
606
|
+
local requestsInPreviousWindow = redis.call("GET", previousKey)
|
|
607
|
+
if requestsInPreviousWindow == false then
|
|
608
|
+
requestsInPreviousWindow = 0
|
|
609
|
+
end
|
|
610
|
+
local percentageInCurrent = ( now % window ) / window
|
|
611
|
+
-- weighted requests to consider from the previous window
|
|
612
|
+
requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)
|
|
613
|
+
if requestsInPreviousWindow + requestsInCurrentWindow >= tokens then
|
|
614
|
+
return -1
|
|
615
|
+
end
|
|
616
|
+
|
|
617
|
+
local newValue = redis.call("INCR", currentKey)
|
|
618
|
+
if newValue == 1 then
|
|
619
|
+
-- The first time this key is set, the value will be 1.
|
|
620
|
+
-- So we only need the expire command once
|
|
621
|
+
redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
|
|
622
|
+
end
|
|
623
|
+
return tokens - ( newValue + requestsInPreviousWindow )
|
|
624
|
+
`;
|
|
625
|
+
const windowSize = ms(window);
|
|
626
|
+
return async function(ctx, identifier) {
|
|
627
|
+
const now = Date.now();
|
|
628
|
+
const currentWindow = Math.floor(now / windowSize);
|
|
629
|
+
const currentKey = [identifier, currentWindow].join(":");
|
|
630
|
+
const previousWindow = currentWindow - 1;
|
|
631
|
+
const previousKey = [identifier, previousWindow].join(":");
|
|
632
|
+
if (ctx.cache) {
|
|
633
|
+
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
634
|
+
if (blocked) {
|
|
635
|
+
return {
|
|
636
|
+
success: false,
|
|
637
|
+
limit: tokens,
|
|
638
|
+
remaining: 0,
|
|
639
|
+
reset: reset2,
|
|
640
|
+
pending: Promise.resolve()
|
|
641
|
+
};
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
const remaining = await ctx.redis.eval(
|
|
645
|
+
script,
|
|
646
|
+
[currentKey, previousKey],
|
|
647
|
+
[tokens, now, windowSize]
|
|
648
|
+
);
|
|
649
|
+
const success = remaining >= 0;
|
|
650
|
+
const reset = (currentWindow + 1) * windowSize;
|
|
651
|
+
if (ctx.cache && !success) {
|
|
652
|
+
ctx.cache.blockUntil(identifier, reset);
|
|
653
|
+
}
|
|
654
|
+
return {
|
|
655
|
+
success,
|
|
656
|
+
limit: tokens,
|
|
657
|
+
remaining: Math.max(0, remaining),
|
|
658
|
+
reset,
|
|
659
|
+
pending: Promise.resolve()
|
|
660
|
+
};
|
|
661
|
+
};
|
|
662
|
+
}
|
|
663
|
+
/**
|
|
664
|
+
* You have a bucket filled with `{maxTokens}` tokens that refills constantly
|
|
665
|
+
* at `{refillRate}` per `{interval}`.
|
|
666
|
+
* Every request will remove one token from the bucket and if there is no
|
|
667
|
+
* token to take, the request is rejected.
|
|
668
|
+
*
|
|
669
|
+
* **Pro:**
|
|
670
|
+
*
|
|
671
|
+
* - Bursts of requests are smoothed out and you can process them at a constant
|
|
672
|
+
* rate.
|
|
673
|
+
* - Allows to set a higher initial burst limit by setting `maxTokens` higher
|
|
674
|
+
* than `refillRate`
|
|
675
|
+
*/
|
|
676
|
+
static tokenBucket(refillRate, interval, maxTokens) {
|
|
677
|
+
const script = `
|
|
678
|
+
local key = KEYS[1] -- identifier including prefixes
|
|
679
|
+
local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens
|
|
680
|
+
local interval = tonumber(ARGV[2]) -- size of the window in milliseconds
|
|
681
|
+
local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval
|
|
682
|
+
local now = tonumber(ARGV[4]) -- current timestamp in milliseconds
|
|
683
|
+
|
|
684
|
+
local bucket = redis.call("HMGET", key, "refilledAt", "tokens")
|
|
685
|
+
|
|
686
|
+
local refilledAt
|
|
687
|
+
local tokens
|
|
688
|
+
|
|
689
|
+
if bucket[1] == false then
|
|
690
|
+
refilledAt = now
|
|
691
|
+
tokens = maxTokens
|
|
692
|
+
else
|
|
693
|
+
refilledAt = tonumber(bucket[1])
|
|
694
|
+
tokens = tonumber(bucket[2])
|
|
695
|
+
end
|
|
696
|
+
|
|
697
|
+
if now >= refilledAt + interval then
|
|
698
|
+
local numRefills = math.floor((now - refilledAt) / interval)
|
|
699
|
+
tokens = math.min(maxTokens, tokens + numRefills * refillRate)
|
|
700
|
+
|
|
701
|
+
refilledAt = refilledAt + numRefills * interval
|
|
702
|
+
end
|
|
703
|
+
|
|
704
|
+
if tokens == 0 then
|
|
705
|
+
return {-1, refilledAt + interval}
|
|
706
|
+
end
|
|
707
|
+
|
|
708
|
+
local remaining = tokens - 1
|
|
709
|
+
local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval
|
|
710
|
+
|
|
711
|
+
redis.call("HSET", key, "refilledAt", refilledAt, "tokens", remaining)
|
|
712
|
+
redis.call("PEXPIRE", key, expireAt)
|
|
713
|
+
return {remaining, refilledAt + interval}
|
|
714
|
+
`;
|
|
715
|
+
const intervalDuration = ms(interval);
|
|
716
|
+
return async function(ctx, identifier) {
|
|
717
|
+
if (ctx.cache) {
|
|
718
|
+
const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
|
|
719
|
+
if (blocked) {
|
|
720
|
+
return {
|
|
721
|
+
success: false,
|
|
722
|
+
limit: maxTokens,
|
|
723
|
+
remaining: 0,
|
|
724
|
+
reset: reset2,
|
|
725
|
+
pending: Promise.resolve()
|
|
726
|
+
};
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
const now = Date.now();
|
|
730
|
+
const [remaining, reset] = await ctx.redis.eval(
|
|
731
|
+
script,
|
|
732
|
+
[identifier],
|
|
733
|
+
[maxTokens, intervalDuration, refillRate, now]
|
|
734
|
+
);
|
|
735
|
+
const success = remaining > 0;
|
|
736
|
+
if (ctx.cache && !success) {
|
|
737
|
+
ctx.cache.blockUntil(identifier, reset);
|
|
738
|
+
}
|
|
739
|
+
return {
|
|
740
|
+
success,
|
|
741
|
+
limit: maxTokens,
|
|
742
|
+
remaining,
|
|
743
|
+
reset,
|
|
744
|
+
pending: Promise.resolve()
|
|
745
|
+
};
|
|
746
|
+
};
|
|
747
|
+
}
|
|
748
|
+
/**
|
|
749
|
+
* cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
|
|
750
|
+
* it asynchronously.
|
|
751
|
+
* This is experimental and not yet recommended for production use.
|
|
752
|
+
*
|
|
753
|
+
* @experimental
|
|
754
|
+
*
|
|
755
|
+
* Each request inside a fixed time increases a counter.
|
|
756
|
+
* Once the counter reaches the maximum allowed number, all further requests are
|
|
757
|
+
* rejected.
|
|
758
|
+
*
|
|
759
|
+
* **Pro:**
|
|
760
|
+
*
|
|
761
|
+
* - Newer requests are not starved by old ones.
|
|
762
|
+
* - Low storage cost.
|
|
763
|
+
*
|
|
764
|
+
* **Con:**
|
|
765
|
+
*
|
|
766
|
+
* A burst of requests near the boundary of a window can result in a very
|
|
767
|
+
* high request rate because two windows will be filled with requests quickly.
|
|
768
|
+
*
|
|
769
|
+
* @param tokens - How many requests a user can make in each time window.
|
|
770
|
+
* @param window - A fixed timeframe
|
|
771
|
+
*/
|
|
772
|
+
static cachedFixedWindow(tokens, window) {
|
|
773
|
+
const windowDuration = ms(window);
|
|
774
|
+
const script = `
|
|
775
|
+
local key = KEYS[1]
|
|
776
|
+
local window = ARGV[1]
|
|
777
|
+
|
|
778
|
+
local r = redis.call("INCR", key)
|
|
779
|
+
if r == 1 then
|
|
780
|
+
-- The first time this key is set, the value will be 1.
|
|
781
|
+
-- So we only need the expire command once
|
|
782
|
+
redis.call("PEXPIRE", key, window)
|
|
783
|
+
end
|
|
784
|
+
|
|
785
|
+
return r
|
|
786
|
+
`;
|
|
787
|
+
return async function(ctx, identifier) {
|
|
788
|
+
if (!ctx.cache) {
|
|
789
|
+
throw new Error("This algorithm requires a cache");
|
|
790
|
+
}
|
|
791
|
+
const bucket = Math.floor(Date.now() / windowDuration);
|
|
792
|
+
const key = [identifier, bucket].join(":");
|
|
793
|
+
const reset = (bucket + 1) * windowDuration;
|
|
794
|
+
const hit = typeof ctx.cache.get(key) === "number";
|
|
795
|
+
if (hit) {
|
|
796
|
+
const cachedTokensAfterUpdate = ctx.cache.incr(key);
|
|
797
|
+
const success = cachedTokensAfterUpdate < tokens;
|
|
798
|
+
const pending = success ? ctx.redis.eval(script, [key], [windowDuration]).then((t) => {
|
|
799
|
+
ctx.cache.set(key, t);
|
|
800
|
+
}) : Promise.resolve();
|
|
801
|
+
return {
|
|
802
|
+
success,
|
|
803
|
+
limit: tokens,
|
|
804
|
+
remaining: tokens - cachedTokensAfterUpdate,
|
|
805
|
+
reset,
|
|
806
|
+
pending
|
|
807
|
+
};
|
|
808
|
+
}
|
|
809
|
+
const usedTokensAfterUpdate = await ctx.redis.eval(
|
|
810
|
+
script,
|
|
811
|
+
[key],
|
|
812
|
+
[windowDuration]
|
|
813
|
+
);
|
|
814
|
+
ctx.cache.set(key, usedTokensAfterUpdate);
|
|
815
|
+
const remaining = tokens - usedTokensAfterUpdate;
|
|
816
|
+
return {
|
|
817
|
+
success: remaining >= 0,
|
|
818
|
+
limit: tokens,
|
|
819
|
+
remaining,
|
|
820
|
+
reset,
|
|
821
|
+
pending: Promise.resolve()
|
|
822
|
+
};
|
|
823
|
+
};
|
|
824
|
+
}
|
|
825
|
+
};
|
|
826
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
827
|
+
0 && (module.exports = {
|
|
828
|
+
Analytics,
|
|
829
|
+
MultiRegionRatelimit,
|
|
830
|
+
Ratelimit
|
|
831
|
+
});
|
|
832
|
+
//# sourceMappingURL=index.js.map
|