@upstash/ratelimit 0.2.0 → 0.3.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/ratelimit.js DELETED
@@ -1,171 +0,0 @@
1
- import { Cache } from "./cache.js";
2
- export class TimeoutError extends Error {
3
- constructor() {
4
- super("Timeout");
5
- this.name = "TimeoutError";
6
- }
7
- }
8
- /**
9
- * Ratelimiter using serverless redis from https://upstash.com/
10
- *
11
- * @example
12
- * ```ts
13
- * const { limit } = new Ratelimit({
14
- * redis: Redis.fromEnv(),
15
- * limiter: Ratelimit.slidingWindow(
16
- * 10, // Allow 10 requests per window of 30 minutes
17
- * "30 m", // interval of 30 minutes
18
- * ),
19
- * })
20
- *
21
- * ```
22
- */
23
- export class Ratelimit {
24
- constructor(config) {
25
- Object.defineProperty(this, "limiter", {
26
- enumerable: true,
27
- configurable: true,
28
- writable: true,
29
- value: void 0
30
- });
31
- Object.defineProperty(this, "ctx", {
32
- enumerable: true,
33
- configurable: true,
34
- writable: true,
35
- value: void 0
36
- });
37
- Object.defineProperty(this, "prefix", {
38
- enumerable: true,
39
- configurable: true,
40
- writable: true,
41
- value: void 0
42
- });
43
- Object.defineProperty(this, "timeout", {
44
- enumerable: true,
45
- configurable: true,
46
- writable: true,
47
- value: void 0
48
- });
49
- /**
50
- * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
51
- *
52
- * Use this if you want to reject all requests that you can not handle right now.
53
- *
54
- * @example
55
- * ```ts
56
- * const ratelimit = new Ratelimit({
57
- * redis: Redis.fromEnv(),
58
- * limiter: Ratelimit.slidingWindow(10, "10 s")
59
- * })
60
- *
61
- * const { success } = await ratelimit.limit(id)
62
- * if (!success){
63
- * return "Nope"
64
- * }
65
- * return "Yes"
66
- * ```
67
- */
68
- Object.defineProperty(this, "limit", {
69
- enumerable: true,
70
- configurable: true,
71
- writable: true,
72
- value: async (identifier) => {
73
- const key = [this.prefix, identifier].join(":");
74
- let timeoutId = null;
75
- try {
76
- const arr = [this.limiter(this.ctx, key)];
77
- if (this.timeout) {
78
- arr.push(new Promise((resolve) => {
79
- timeoutId = setTimeout(() => {
80
- resolve({
81
- success: true,
82
- limit: 0,
83
- remaining: 0,
84
- reset: 0,
85
- pending: Promise.resolve(),
86
- });
87
- }, this.timeout);
88
- }));
89
- }
90
- return await Promise.race(arr);
91
- }
92
- finally {
93
- if (timeoutId) {
94
- clearTimeout(timeoutId);
95
- }
96
- }
97
- }
98
- });
99
- /**
100
- * Block until the request may pass or timeout is reached.
101
- *
102
- * This method returns a promsie that resolves as soon as the request may be processed
103
- * or after the timeoue has been reached.
104
- *
105
- * Use this if you want to delay the request until it is ready to get processed.
106
- *
107
- * @example
108
- * ```ts
109
- * const ratelimit = new Ratelimit({
110
- * redis: Redis.fromEnv(),
111
- * limiter: Ratelimit.slidingWindow(10, "10 s")
112
- * })
113
- *
114
- * const { success } = await ratelimit.blockUntilReady(id, 60_000)
115
- * if (!success){
116
- * return "Nope"
117
- * }
118
- * return "Yes"
119
- * ```
120
- */
121
- Object.defineProperty(this, "blockUntilReady", {
122
- enumerable: true,
123
- configurable: true,
124
- writable: true,
125
- value: async (
126
- /**
127
- * An identifier per user or api.
128
- * Choose a userID, or api token, or ip address.
129
- *
130
- * If you want to limit your api across all users, you can set a constant string.
131
- */
132
- identifier,
133
- /**
134
- * Maximum duration to wait in milliseconds.
135
- * After this time the request will be denied.
136
- */
137
- timeout) => {
138
- if (timeout <= 0) {
139
- throw new Error("timeout must be positive");
140
- }
141
- let res;
142
- const deadline = Date.now() + timeout;
143
- while (true) {
144
- res = await this.limit(identifier);
145
- if (res.success) {
146
- break;
147
- }
148
- if (res.reset === 0) {
149
- throw new Error("This should not happen");
150
- }
151
- const wait = Math.min(res.reset, deadline) - Date.now();
152
- await new Promise((r) => setTimeout(r, wait));
153
- if (Date.now() > deadline) {
154
- break;
155
- }
156
- }
157
- return res;
158
- }
159
- });
160
- this.ctx = config.ctx;
161
- this.limiter = config.limiter;
162
- this.timeout = config.timeout;
163
- this.prefix = config.prefix ?? "@upstash/ratelimit";
164
- if (config.ephemeralCache instanceof Map) {
165
- this.ctx.cache = new Cache(config.ephemeralCache);
166
- }
167
- else if (typeof config.ephemeralCache === "undefined") {
168
- this.ctx.cache = new Cache(new Map());
169
- }
170
- }
171
- }
package/esm/single.js DELETED
@@ -1,375 +0,0 @@
1
- import { ms } from "./duration.js";
2
- import { Ratelimit } from "./ratelimit.js";
3
- /**
4
- * Ratelimiter using serverless redis from https://upstash.com/
5
- *
6
- * @example
7
- * ```ts
8
- * const { limit } = new Ratelimit({
9
- * redis: Redis.fromEnv(),
10
- * limiter: Ratelimit.slidingWindow(
11
- * "30 m", // interval of 30 minutes
12
- * 10, // Allow 10 requests per window of 30 minutes
13
- * )
14
- * })
15
- *
16
- * ```
17
- */
18
- export class RegionRatelimit extends Ratelimit {
19
- /**
20
- * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
21
- */
22
- constructor(config) {
23
- super({
24
- prefix: config.prefix,
25
- limiter: config.limiter,
26
- timeout: config.timeout,
27
- ctx: {
28
- redis: config.redis,
29
- },
30
- ephemeralCache: config.ephemeralCache,
31
- });
32
- }
33
- /**
34
- * Each requests inside a fixed time increases a counter.
35
- * Once the counter reaches a maxmimum allowed number, all further requests are
36
- * rejected.
37
- *
38
- * **Pro:**
39
- *
40
- * - Newer requests are not starved by old ones.
41
- * - Low storage cost.
42
- *
43
- * **Con:**
44
- *
45
- * A burst of requests near the boundary of a window can result in a very
46
- * high request rate because two windows will be filled with requests quickly.
47
- *
48
- * @param tokens - How many requests a user can make in each time window.
49
- * @param window - A fixed timeframe
50
- */
51
- static fixedWindow(
52
- /**
53
- * How many requests are allowed per window.
54
- */
55
- tokens,
56
- /**
57
- * The duration in which `tokens` requests are allowed.
58
- */
59
- window) {
60
- const windowDuration = ms(window);
61
- const script = `
62
- local key = KEYS[1]
63
- local window = ARGV[1]
64
-
65
- local r = redis.call("INCR", key)
66
- if r == 1 then
67
- -- The first time this key is set, the value will be 1.
68
- -- So we only need the expire command once
69
- redis.call("PEXPIRE", key, window)
70
- end
71
-
72
- return r
73
- `;
74
- return async function (ctx, identifier) {
75
- const bucket = Math.floor(Date.now() / windowDuration);
76
- const key = [identifier, bucket].join(":");
77
- if (ctx.cache) {
78
- const { blocked, reset } = ctx.cache.isBlocked(identifier);
79
- if (blocked) {
80
- return {
81
- success: false,
82
- limit: tokens,
83
- remaining: 0,
84
- reset: reset,
85
- pending: Promise.resolve(),
86
- };
87
- }
88
- }
89
- const usedTokensAfterUpdate = (await ctx.redis.eval(script, [key], [windowDuration]));
90
- const success = usedTokensAfterUpdate <= tokens;
91
- const reset = (bucket + 1) * windowDuration;
92
- if (ctx.cache && !success) {
93
- ctx.cache.blockUntil(identifier, reset);
94
- }
95
- return {
96
- success,
97
- limit: tokens,
98
- remaining: tokens - usedTokensAfterUpdate,
99
- reset,
100
- pending: Promise.resolve(),
101
- };
102
- };
103
- }
104
- /**
105
- * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
106
- * costs than `slidingLogs` and improved boundary behavior by calcualting a
107
- * weighted score between two windows.
108
- *
109
- * **Pro:**
110
- *
111
- * Good performance allows this to scale to very high loads.
112
- *
113
- * **Con:**
114
- *
115
- * Nothing major.
116
- *
117
- * @param tokens - How many requests a user can make in each time window.
118
- * @param window - The duration in which the user can max X requests.
119
- */
120
- static slidingWindow(
121
- /**
122
- * How many requests are allowed per window.
123
- */
124
- tokens,
125
- /**
126
- * The duration in which `tokens` requests are allowed.
127
- */
128
- window) {
129
- const script = `
130
- local currentKey = KEYS[1] -- identifier including prefixes
131
- local previousKey = KEYS[2] -- key of the previous bucket
132
- local tokens = tonumber(ARGV[1]) -- tokens per window
133
- local now = ARGV[2] -- current timestamp in milliseconds
134
- local window = ARGV[3] -- interval in milliseconds
135
-
136
- local requestsInCurrentWindow = redis.call("GET", currentKey)
137
- if requestsInCurrentWindow == false then
138
- requestsInCurrentWindow = 0
139
- end
140
-
141
-
142
- local requestsInPreviousWindow = redis.call("GET", previousKey)
143
- if requestsInPreviousWindow == false then
144
- requestsInPreviousWindow = 0
145
- end
146
- local percentageInCurrent = ( now % window) / window
147
- if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
148
- return 0
149
- end
150
-
151
- local newValue = redis.call("INCR", currentKey)
152
- if newValue == 1 then
153
- -- The first time this key is set, the value will be 1.
154
- -- So we only need the expire command once
155
- redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
156
- end
157
- return tokens - newValue
158
- `;
159
- const windowSize = ms(window);
160
- return async function (ctx, identifier) {
161
- const now = Date.now();
162
- const currentWindow = Math.floor(now / windowSize);
163
- const currentKey = [identifier, currentWindow].join(":");
164
- const previousWindow = currentWindow - windowSize;
165
- const previousKey = [identifier, previousWindow].join(":");
166
- if (ctx.cache) {
167
- const { blocked, reset } = ctx.cache.isBlocked(identifier);
168
- if (blocked) {
169
- return {
170
- success: false,
171
- limit: tokens,
172
- remaining: 0,
173
- reset: reset,
174
- pending: Promise.resolve(),
175
- };
176
- }
177
- }
178
- const remaining = (await ctx.redis.eval(script, [currentKey, previousKey], [tokens, now, windowSize]));
179
- const success = remaining > 0;
180
- const reset = (currentWindow + 1) * windowSize;
181
- if (ctx.cache && !success) {
182
- ctx.cache.blockUntil(identifier, reset);
183
- }
184
- return {
185
- success,
186
- limit: tokens,
187
- remaining,
188
- reset,
189
- pending: Promise.resolve(),
190
- };
191
- };
192
- }
193
- /**
194
- * You have a bucket filled with `{maxTokens}` tokens that refills constantly
195
- * at `{refillRate}` per `{interval}`.
196
- * Every request will remove one token from the bucket and if there is no
197
- * token to take, the request is rejected.
198
- *
199
- * **Pro:**
200
- *
201
- * - Bursts of requests are smoothed out and you can process them at a constant
202
- * rate.
203
- * - Allows to set a higher initial burst limit by setting `maxTokens` higher
204
- * than `refillRate`
205
- */
206
- static tokenBucket(
207
- /**
208
- * How many tokens are refilled per `interval`
209
- *
210
- * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.
211
- */
212
- refillRate,
213
- /**
214
- * The interval for the `refillRate`
215
- */
216
- interval,
217
- /**
218
- * Maximum number of tokens.
219
- * A newly created bucket starts with this many tokens.
220
- * Useful to allow higher burst limits.
221
- */
222
- maxTokens) {
223
- const script = `
224
- local key = KEYS[1] -- identifier including prefixes
225
- local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens
226
- local interval = tonumber(ARGV[2]) -- size of the window in milliseconds
227
- local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval
228
- local now = tonumber(ARGV[4]) -- current timestamp in milliseconds
229
- local remaining = 0
230
-
231
- local bucket = redis.call("HMGET", key, "updatedAt", "tokens")
232
-
233
- if bucket[1] == false then
234
- -- The bucket does not exist yet, so we create it and add a ttl.
235
- remaining = maxTokens - 1
236
-
237
- redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
238
- redis.call("PEXPIRE", key, interval)
239
-
240
- return {remaining, now + interval}
241
- end
242
-
243
- -- The bucket does exist
244
-
245
- local updatedAt = tonumber(bucket[1])
246
- local tokens = tonumber(bucket[2])
247
-
248
- if now >= updatedAt + interval then
249
- remaining = math.min(maxTokens, tokens + refillRate) - 1
250
-
251
- redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
252
- return {remaining, now + interval}
253
- end
254
-
255
- if tokens > 0 then
256
- remaining = tokens - 1
257
- redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
258
- end
259
-
260
- return {remaining, updatedAt + interval}
261
- `;
262
- const intervalDuration = ms(interval);
263
- return async function (ctx, identifier) {
264
- if (ctx.cache) {
265
- const { blocked, reset } = ctx.cache.isBlocked(identifier);
266
- if (blocked) {
267
- return {
268
- success: false,
269
- limit: maxTokens,
270
- remaining: 0,
271
- reset: reset,
272
- pending: Promise.resolve(),
273
- };
274
- }
275
- }
276
- const now = Date.now();
277
- const key = [identifier, Math.floor(now / intervalDuration)].join(":");
278
- const [remaining, reset] = (await ctx.redis.eval(script, [key], [maxTokens, intervalDuration, refillRate, now]));
279
- const success = remaining > 0;
280
- if (ctx.cache && !success) {
281
- ctx.cache.blockUntil(identifier, reset);
282
- }
283
- return {
284
- success,
285
- limit: maxTokens,
286
- remaining,
287
- reset,
288
- pending: Promise.resolve(),
289
- };
290
- };
291
- }
292
- /**
293
- * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
294
- * it asynchronously.
295
- * This is experimental and not yet recommended for production use.
296
- *
297
- * @experimental
298
- *
299
- * Each requests inside a fixed time increases a counter.
300
- * Once the counter reaches a maxmimum allowed number, all further requests are
301
- * rejected.
302
- *
303
- * **Pro:**
304
- *
305
- * - Newer requests are not starved by old ones.
306
- * - Low storage cost.
307
- *
308
- * **Con:**
309
- *
310
- * A burst of requests near the boundary of a window can result in a very
311
- * high request rate because two windows will be filled with requests quickly.
312
- *
313
- * @param tokens - How many requests a user can make in each time window.
314
- * @param window - A fixed timeframe
315
- */
316
- static cachedFixedWindow(
317
- /**
318
- * How many requests are allowed per window.
319
- */
320
- tokens,
321
- /**
322
- * The duration in which `tokens` requests are allowed.
323
- */
324
- window) {
325
- const windowDuration = ms(window);
326
- const script = `
327
- local key = KEYS[1]
328
- local window = ARGV[1]
329
-
330
- local r = redis.call("INCR", key)
331
- if r == 1 then
332
- -- The first time this key is set, the value will be 1.
333
- -- So we only need the expire command once
334
- redis.call("PEXPIRE", key, window)
335
- end
336
-
337
- return r
338
- `;
339
- return async function (ctx, identifier) {
340
- if (!ctx.cache) {
341
- throw new Error("This algorithm requires a cache");
342
- }
343
- const bucket = Math.floor(Date.now() / windowDuration);
344
- const key = [identifier, bucket].join(":");
345
- const reset = (bucket + 1) * windowDuration;
346
- const hit = typeof ctx.cache.get(key) === "number";
347
- if (hit) {
348
- const cachedTokensAfterUpdate = ctx.cache.incr(key);
349
- const success = cachedTokensAfterUpdate < tokens;
350
- const pending = success
351
- ? ctx.redis.eval(script, [key], [windowDuration]).then((t) => {
352
- ctx.cache.set(key, t);
353
- })
354
- : Promise.resolve();
355
- return {
356
- success,
357
- limit: tokens,
358
- remaining: tokens - cachedTokensAfterUpdate,
359
- reset: reset,
360
- pending,
361
- };
362
- }
363
- const usedTokensAfterUpdate = (await ctx.redis.eval(script, [key], [windowDuration]));
364
- ctx.cache.set(key, usedTokensAfterUpdate);
365
- const remaining = tokens - usedTokensAfterUpdate;
366
- return {
367
- success: remaining >= 0,
368
- limit: tokens,
369
- remaining,
370
- reset: reset,
371
- pending: Promise.resolve(),
372
- };
373
- };
374
- }
375
- }
package/esm/types.js DELETED
@@ -1 +0,0 @@
1
- export {};
@@ -1,66 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.dntGlobalThis = exports.crypto = void 0;
4
- const shim_crypto_1 = require("@deno/shim-crypto");
5
- var shim_crypto_2 = require("@deno/shim-crypto");
6
- Object.defineProperty(exports, "crypto", { enumerable: true, get: function () { return shim_crypto_2.crypto; } });
7
- const dntGlobals = {
8
- crypto: shim_crypto_1.crypto,
9
- };
10
- exports.dntGlobalThis = createMergeProxy(globalThis, dntGlobals);
11
- // deno-lint-ignore ban-types
12
- function createMergeProxy(baseObj, extObj) {
13
- return new Proxy(baseObj, {
14
- get(_target, prop, _receiver) {
15
- if (prop in extObj) {
16
- return extObj[prop];
17
- }
18
- else {
19
- return baseObj[prop];
20
- }
21
- },
22
- set(_target, prop, value) {
23
- if (prop in extObj) {
24
- delete extObj[prop];
25
- }
26
- baseObj[prop] = value;
27
- return true;
28
- },
29
- deleteProperty(_target, prop) {
30
- let success = false;
31
- if (prop in extObj) {
32
- delete extObj[prop];
33
- success = true;
34
- }
35
- if (prop in baseObj) {
36
- delete baseObj[prop];
37
- success = true;
38
- }
39
- return success;
40
- },
41
- ownKeys(_target) {
42
- const baseKeys = Reflect.ownKeys(baseObj);
43
- const extKeys = Reflect.ownKeys(extObj);
44
- const extKeysSet = new Set(extKeys);
45
- return [...baseKeys.filter((k) => !extKeysSet.has(k)), ...extKeys];
46
- },
47
- defineProperty(_target, prop, desc) {
48
- if (prop in extObj) {
49
- delete extObj[prop];
50
- }
51
- Reflect.defineProperty(baseObj, prop, desc);
52
- return true;
53
- },
54
- getOwnPropertyDescriptor(_target, prop) {
55
- if (prop in extObj) {
56
- return Reflect.getOwnPropertyDescriptor(extObj, prop);
57
- }
58
- else {
59
- return Reflect.getOwnPropertyDescriptor(baseObj, prop);
60
- }
61
- },
62
- has(_target, prop) {
63
- return prop in extObj || prop in baseObj;
64
- },
65
- });
66
- }
package/script/cache.js DELETED
@@ -1,44 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Cache = void 0;
4
- class Cache {
5
- constructor(cache) {
6
- /**
7
- * Stores identifier -> reset (in milliseconds)
8
- */
9
- Object.defineProperty(this, "cache", {
10
- enumerable: true,
11
- configurable: true,
12
- writable: true,
13
- value: void 0
14
- });
15
- this.cache = cache;
16
- }
17
- isBlocked(identifier) {
18
- if (!this.cache.has(identifier)) {
19
- return { blocked: false, reset: 0 };
20
- }
21
- const reset = this.cache.get(identifier);
22
- if (reset < Date.now()) {
23
- this.cache.delete(identifier);
24
- return { blocked: false, reset: 0 };
25
- }
26
- return { blocked: true, reset: reset };
27
- }
28
- blockUntil(identifier, reset) {
29
- this.cache.set(identifier, reset);
30
- }
31
- set(key, value) {
32
- this.cache.set(key, value);
33
- }
34
- get(key) {
35
- return this.cache.get(key) || null;
36
- }
37
- incr(key) {
38
- let value = this.cache.get(key) ?? 0;
39
- value += 1;
40
- this.cache.set(key, value);
41
- return value;
42
- }
43
- }
44
- exports.Cache = Cache;
@@ -1,25 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ms = void 0;
4
- /**
5
- * Convert a human readable duration to milliseconds
6
- */
7
- function ms(d) {
8
- const [timeString, duration] = d.split(" ");
9
- const time = parseFloat(timeString);
10
- switch (duration) {
11
- case "ms":
12
- return time;
13
- case "s":
14
- return time * 1000;
15
- case "m":
16
- return time * 1000 * 60;
17
- case "h":
18
- return time * 1000 * 60 * 60;
19
- case "d":
20
- return time * 1000 * 60 * 60 * 24;
21
- default:
22
- throw new Error(`Unable to parse window size: ${d}`);
23
- }
24
- }
25
- exports.ms = ms;
package/script/mod.js DELETED
@@ -1,7 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MultiRegionRatelimit = exports.Ratelimit = void 0;
4
- var single_js_1 = require("./single.js");
5
- Object.defineProperty(exports, "Ratelimit", { enumerable: true, get: function () { return single_js_1.RegionRatelimit; } });
6
- var multi_js_1 = require("./multi.js");
7
- Object.defineProperty(exports, "MultiRegionRatelimit", { enumerable: true, get: function () { return multi_js_1.MultiRegionRatelimit; } });