@upstash/ratelimit 0.1.0-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2021 Upstash, Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,239 @@
1
+ # Upstash Redis
2
+
3
+ An HTTP/REST based Redis client built on top of Upstash REST API.
4
+ [Upstash REST API](https://docs.upstash.com/features/restapi).
5
+
6
+ [![Tests](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml/badge.svg)](https://github.com/upstash/ratelimit/actions/workflows/tests.yaml)
7
+ ![npm (scoped)](https://img.shields.io/npm/v/@upstash/ratelimit)
8
+ ![npm bundle size](https://img.shields.io/bundlephobia/minzip/@upstash/ratelimit)
9
+
10
+ It is the only connectionless (HTTP based) ratelimiter and designed for:
11
+
12
+ - Serverless functions (AWS Lambda ...)
13
+ - Cloudflare Workers
14
+ - Fastly Compute@Edge (see
15
+ - Next.js, Jamstack ...
16
+ - Client side web/mobile applications
17
+ - WebAssembly
18
+ - and other environments where HTTP is preferred over TCP.
19
+
20
+ ## Quick Start
21
+
22
+ ### Install
23
+
24
+ #### npm
25
+
26
+ ```bash
27
+ npm install @upstash/ratelimit
28
+ ```
29
+
30
+ #### Deno
31
+
32
+ ```ts
33
+ import { Redis } from "https://deno.land/x/upstash_ratelimit/mod.ts";
34
+ ```
35
+
36
+ ### Create database
37
+
38
+ Create a new redis database on [upstash](https://console.upstash.com/)
39
+
40
+ ### Use it
41
+
42
+ See [here](https://github.com/upstash/upstash-redis#quick-start) for
43
+ documentation on how to create a redis instance.
44
+
45
+ ```ts
46
+ import { Ratelimit } from "@upstash/ratelimit"; // for deno: see above
47
+ import { Redis } from "@upstash/redis";
48
+
49
+ // Create a new ratelimiter, that allows 10 requests per 10 seconds
50
+ const ratelimit = new Ratelimit({
51
+ redis: Redis.fromEnv(),
52
+ limiter: Ratelimit.slidingWindow(10, "10 s"),
53
+ });
54
+
55
+ // Use a constant string to limit all requests with a single ratelimit
56
+ // Or use a userID, apiKey or ip address for individual limits.
57
+ const identifier = "api";
58
+ const { success } = await ratelimit.limit(identifier);
59
+
60
+ if (!success) {
61
+ return "Unable to process at this time";
62
+ }
63
+ doExpensiveCalculation();
64
+ return "Here you go!";
65
+ ```
66
+
67
+ [Here's a complete nextjs example](https://github.com/upstash/ratelimit/tree/main/examples/nextjs)
68
+
69
+ The `limit` method returns some more metadata that might be useful to you:
70
+
71
+ ```ts
72
+ export type RatelimitResponse = {
73
+ /**
74
+ * Whether the request may pass(true) or exceeded the limit(false)
75
+ */
76
+ success: boolean;
77
+
78
+ /**
79
+ * Maximum number of requests allowed within a window.
80
+ */
81
+ limit: number;
82
+
83
+ /**
84
+ * How many requests the user has left within the current window.
85
+ */
86
+ remaining: number;
87
+
88
+ /**
89
+ * Unix timestamp in milliseconds when the limits are reset.
90
+ */
91
+ reset: number;
92
+ };
93
+ ```
94
+
95
+ ### Block until ready
96
+
97
+ In case you don't want to reject a request immediately but wait until it can be
98
+ processed, we also provide
99
+ `ratelimit.blockUntilReady(identifier: stirng, timeout: number): Promise<RatelimitResponse>`
100
+
101
+ It is very similar to the `limit` method and takes an identifier and returns the
102
+ same response. However if the current limit has already been exceeded, it will
103
+ automatically wait until the next window starts and will try again. Setting the
104
+ timeout parameter (in milliseconds) will cause the returned Promise to resolve
105
+ in a finite amount of time.
106
+
107
+ ```ts
108
+ // Create a new ratelimiter, that allows 10 requests per 10 seconds
109
+ const ratelimit = new Ratelimit({
110
+ redis: Redis.fromEnv(),
111
+ limiter: Ratelimit.slidingWindow(10, "10 s"),
112
+ });
113
+
114
+ // `blockUntilReady` returns a promise that resolves as soon as the request is allowed to be processed, or after 30 seconds
115
+ const { success } = await ratelimit.blockUntilReady("id", 30_000);
116
+
117
+ if (!success) {
118
+ return "Unable to process, even after 30 seconds";
119
+ }
120
+ doExpensiveCalculation();
121
+ return "Here you go!";
122
+ ```
123
+
124
+ ## Ratelimiting algorithms
125
+
126
+ We provide different algorithms to use out of the box. Each has pros and cons.
127
+
128
+ ### Fixed Window
129
+
130
+ This algorithm divides time into fixed durations/windows. For example each
131
+ window is 10 seconds long. When a new request comes in, the current time is used
132
+ to determine the window and a counter is increased. If the counter is larger
133
+ than the set limit, the request is rejected.
134
+
135
+ #### Pros:
136
+
137
+ - Very cheap in terms of data size and computation
138
+ - Newer requests are not starved due to a high burst in the past
139
+
140
+ #### Cons:
141
+
142
+ - Can cause high bursts at the window boundaries to leak through
143
+ - Causes request stampedes if many users are trying to access your server,
144
+ whenever a new window begins
145
+
146
+ #### Usage:
147
+
148
+ Create a new ratelimiter, that allows 10 requests per 10 seconds.
149
+
150
+ ```ts
151
+ const ratelimit = new Ratelimit({
152
+ redis: Redis.fromEnv(),
153
+ limiter: Ratelimit.fixedWindow(10, "10 s"),
154
+ });
155
+ ```
156
+
157
+ ### Sliding Window
158
+
159
+ Builds on top of fixed window but instead of a fixed window, we use a rolling
160
+ window. Take this example: We have a rate limit of 10 requests per 1 minute. We
161
+ dividie time into 1 minute slices, just like in the fixed window algorithm.
162
+ Window 1 will be from 00:00:00 to 00:01:00 (HH:MM:SS). Let's assume it is
163
+ currently 00:01:15 and we have received 4 requests in the first window and 5
164
+ requests so far in the current window. The approximation to determine if the
165
+ request should pass works like this:
166
+
167
+ ```ts
168
+ limit = 10
169
+
170
+ // 4 request from the old window, weighted + requests in current window
171
+ rate = 4 * ((60 - 15) / 60) + 5 = 8
172
+
173
+ return rate < limit // True means we should allow the request
174
+ ```
175
+
176
+ #### Pros:
177
+
178
+ - Solves the issue near boundary from fixed window.
179
+
180
+ #### Cons:
181
+
182
+ - More expensive in terms of storage and computation
183
+ - Is only an approximation, because it assumes a uniform request flow in the
184
+ previous window, but this is fine in most cases
185
+
186
+ #### Usage:
187
+
188
+ Create a new ratelimiter, that allows 10 requests per 10 seconds.
189
+
190
+ ```ts
191
+ const ratelimit = new Ratelimit({
192
+ redis: Redis.fromEnv(),
193
+ limiter: Ratelimit.slidingWindow(10, "10 s"),
194
+ });
195
+ ```
196
+
197
+ ### Token Bucket
198
+
199
+ Consider a bucket filled with `{maxTokens}` tokens that refills constantly at
200
+ `{refillRate}` per `{interval}`. Every request will remove one token from the
201
+ bucket and if there is no token to take, the request is rejected.
202
+
203
+ #### Pros:
204
+
205
+ - Bursts of requests are smoothed out and you can process them at a constant
206
+ rate.
207
+ - Allows to set a higher initial burst limit by setting `maxTokens` higher than
208
+ `refillRate`
209
+
210
+ #### Cons:
211
+
212
+ - Expensive in terms of computation
213
+
214
+ #### Usage:
215
+
216
+ Create a new bucket, that refills 5 tokens every 10 seconds and has a maximum
217
+ size of 10.
218
+
219
+ ```ts
220
+ const ratelimit = new Ratelimit({
221
+ redis: Redis.fromEnv(),
222
+ limiter: Ratelimit.tokenBucket(5, "10 s", 10),
223
+ });
224
+ ```
225
+
226
+ ## Contributing
227
+
228
+ ### [Install Deno](https://deno.land/#installation)
229
+
230
+ ### Database
231
+
232
+ Create a new redis database on [upstash](https://console.upstash.com/) and copy
233
+ the url and token.
234
+
235
+ ### Running tests
236
+
237
+ ```sh
238
+ UPSTASH_REDIS_REST_URL=".." UPSTASH_REDIS_REST_TOKEN=".." deno test -A
239
+ ```
@@ -0,0 +1,21 @@
1
+ /**
2
+ * Convert a human readable duration to milliseconds
3
+ */
4
+ export function ms(d) {
5
+ const [timeString, duration] = d.split(" ");
6
+ const time = parseFloat(timeString);
7
+ switch (duration) {
8
+ case "ms":
9
+ return time;
10
+ case "s":
11
+ return time * 1000;
12
+ case "m":
13
+ return time * 1000 * 60;
14
+ case "h":
15
+ return time * 1000 * 60 * 60;
16
+ case "d":
17
+ return time * 1000 * 60 * 60 * 24;
18
+ default:
19
+ throw new Error(`Unable to parse window size: ${d}`);
20
+ }
21
+ }
package/esm/mod.js ADDED
@@ -0,0 +1,2 @@
1
+ export * from "./ratelimiter.js";
2
+ export * from "./types.js";
@@ -0,0 +1,3 @@
1
+ {
2
+ "type": "module"
3
+ }
@@ -0,0 +1,384 @@
1
+ import { ms } from "./duration.js";
2
+ /**
3
+ * Ratelimiter using serverless redis from https://upstash.com/
4
+ *
5
+ * @example
6
+ * ```ts
7
+ * const { limit } = new Ratelimit({
8
+ * redis: Redis.fromEnv(),
9
+ * limiter: Ratelimit.slidingWindow(
10
+ * "30 m", // interval of 30 minutes
11
+ * 10, // Allow 10 requests per window of 30 minutes
12
+ * )
13
+ * })
14
+ *
15
+ * ```
16
+ */
17
+ export class Ratelimit {
18
+ /**
19
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
20
+ */
21
+ constructor(config) {
22
+ Object.defineProperty(this, "redis", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: void 0
27
+ });
28
+ Object.defineProperty(this, "limiter", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ Object.defineProperty(this, "prefix", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: void 0
39
+ });
40
+ /**
41
+ * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
42
+ *
43
+ * Use this if you want to reject all requests that you can not handle right now.
44
+ *
45
+ * @example
46
+ * ```ts
47
+ * const ratelimit = new Ratelimit({
48
+ * redis: Redis.fromEnv(),
49
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
50
+ * })
51
+ *
52
+ * const { success } = await ratelimit.limit(id)
53
+ * if (!success){
54
+ * return "Nope"
55
+ * }
56
+ * return "Yes"
57
+ * ```
58
+ */
59
+ Object.defineProperty(this, "limit", {
60
+ enumerable: true,
61
+ configurable: true,
62
+ writable: true,
63
+ value: async (identifier) => {
64
+ const key = [this.prefix, identifier].join(":");
65
+ return await this.limiter({ redis: this.redis }, key);
66
+ }
67
+ });
68
+ /**
69
+ * Block until the request may pass or timeout is reached.
70
+ *
71
+ * This method returns a promsie that resolves as soon as the request may be processed
72
+ * or after the timeoue has been reached.
73
+ *
74
+ * Use this if you want to delay the request until it is ready to get processed.
75
+ *
76
+ * @example
77
+ * ```ts
78
+ * const ratelimit = new Ratelimit({
79
+ * redis: Redis.fromEnv(),
80
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
81
+ * })
82
+ *
83
+ * const { success } = await ratelimit.blockUntilReady(id, 60_000)
84
+ * if (!success){
85
+ * return "Nope"
86
+ * }
87
+ * return "Yes"
88
+ * ```
89
+ */
90
+ Object.defineProperty(this, "blockUntilReady", {
91
+ enumerable: true,
92
+ configurable: true,
93
+ writable: true,
94
+ value: async (
95
+ /**
96
+ * An identifier per user or api.
97
+ * Choose a userID, or api token, or ip address.
98
+ *
99
+ * If you want to globally limit your api, you can set a constant string.
100
+ */
101
+ identifier,
102
+ /**
103
+ * Maximum duration to wait in milliseconds.
104
+ * After this time the request will be denied.
105
+ */
106
+ timeout) => {
107
+ if (timeout <= 0) {
108
+ throw new Error("timeout must be positive");
109
+ }
110
+ let res;
111
+ const deadline = Date.now() + timeout;
112
+ while (true) {
113
+ res = await this.limit(identifier);
114
+ if (res.success) {
115
+ break;
116
+ }
117
+ if (res.reset === 0) {
118
+ throw new Error("This should not happen");
119
+ }
120
+ const wait = Math.min(res.reset, deadline) - Date.now();
121
+ await new Promise((r) => setTimeout(r, wait));
122
+ if (Date.now() > deadline) {
123
+ break;
124
+ }
125
+ }
126
+ return res;
127
+ }
128
+ });
129
+ this.redis = config.redis;
130
+ this.limiter = config.limiter;
131
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
132
+ }
133
+ /**
134
+ * Each requests inside a fixed time increases a counter.
135
+ * Once the counter reaches a maxmimum allowed number, all further requests are
136
+ * rejected.
137
+ *
138
+ * **Pro:**
139
+ *
140
+ * - Newer requests are not starved by old ones.
141
+ * - Low storage cost.
142
+ *
143
+ * **Con:**
144
+ *
145
+ * A burst of requests near the boundary of a window can result in a very
146
+ * high request rate because two windows will be filled with requests quickly.
147
+ *
148
+ * @param tokens - How many requests a user can make in each time window.
149
+ * @param window - A fixed timeframe
150
+ */
151
+ static fixedWindow(
152
+ /**
153
+ * How many requests are allowed per window.
154
+ */
155
+ tokens,
156
+ /**
157
+ * The duration in which `tokens` requests are allowed.
158
+ */
159
+ window) {
160
+ const windowDuration = ms(window);
161
+ const script = `
162
+ local key = KEYS[1]
163
+ local window = ARGV[1]
164
+
165
+ local r = redis.call("INCR", key)
166
+ if r == 1 then
167
+ -- The first time this key is set, the value will be 1.
168
+ -- So we only need the expire command once
169
+ redis.call("PEXPIRE", key, window)
170
+ end
171
+
172
+ return r
173
+ `;
174
+ return async function (ctx, identifier) {
175
+ const bucket = Math.floor(Date.now() / windowDuration);
176
+ const key = [identifier, bucket].join(":");
177
+ const usedTokensAfterUpdate = (await ctx.redis.eval(script, [key], [windowDuration]));
178
+ return {
179
+ success: usedTokensAfterUpdate <= tokens,
180
+ limit: tokens,
181
+ remaining: tokens - usedTokensAfterUpdate,
182
+ reset: (bucket + 1) * windowDuration,
183
+ };
184
+ };
185
+ }
186
+ // /**
187
+ // * For each request all past requests in the last `{window}` are summed up and
188
+ // * if they exceed `{tokens}`, the request will be rejected.
189
+ // *
190
+ // * **Pro:**
191
+ // *
192
+ // * Does not have the problem of `fixedWindow` at the window boundaries.
193
+ // *
194
+ // * **Con:**
195
+ // *
196
+ // * More expensive to store and compute, which makes this unsuitable for apis
197
+ // * with very high traffic.
198
+ // *
199
+ // * @param window - The duration in which the user can max X requests.
200
+ // * @param tokens - How many requests a user can make in each time window.
201
+ // */
202
+ // static slidingLogs(window: Duration, tokens: number): Ratelimiter {
203
+ // const script = `
204
+ // local key = KEYS[1] -- identifier including prefixes
205
+ // local windowStart = ARGV[1] -- timestamp of window start
206
+ // local windowEnd = ARGV[2] -- timestamp of window end
207
+ // local tokens = ARGV[3] -- tokens per window
208
+ // local now = ARGV[4] -- current timestamp
209
+ // local count = redis.call("ZCOUNT", key, windowStart, windowEnd)
210
+ // if count < tonumber(tokens) then
211
+ // -- Log the current request
212
+ // redis.call("ZADD", key, now, now)
213
+ // -- Remove all previous requests that are outside the window
214
+ // redis.call("ZREMRANGEBYSCORE", key, "-inf", windowStart - 1)
215
+ // end
216
+ // return count
217
+ // `;
218
+ // return async function (ctx: Context, identifier: string) {
219
+ // const windowEnd = Date.now();
220
+ // const windowStart = windowEnd - ms(window);
221
+ // const count = (await ctx.redis.eval(
222
+ // script,
223
+ // [identifier],
224
+ // [windowStart, windowEnd, tokens, Date.now()]
225
+ // )) as number;
226
+ // return {
227
+ // success: count < tokens,
228
+ // limit: tokens,
229
+ // remaining: Math.max(0, tokens - count - 1),
230
+ // reset: windowEnd,
231
+ // };
232
+ // };
233
+ // }
234
+ /**
235
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
236
+ * costs than `slidingLogs` and improved boundary behavior by calcualting a
237
+ * weighted score between two windows.
238
+ *
239
+ * **Pro:**
240
+ *
241
+ * Good performance allows this to scale to very high loads.
242
+ *
243
+ * **Con:**
244
+ *
245
+ * Nothing major.
246
+ *
247
+ * @param tokens - How many requests a user can make in each time window.
248
+ * @param window - The duration in which the user can max X requests.
249
+ */
250
+ static slidingWindow(
251
+ /**
252
+ * How many requests are allowed per window.
253
+ */
254
+ tokens,
255
+ /**
256
+ * The duration in which `tokens` requests are allowed.
257
+ */
258
+ window) {
259
+ const script = `
260
+ local currentKey = KEYS[1] -- identifier including prefixes
261
+ local previousKey = KEYS[2] -- key of the previous bucket
262
+ local tokens = tonumber(ARGV[1]) -- tokens per window
263
+ local now = ARGV[2] -- current timestamp in milliseconds
264
+ local window = ARGV[3] -- interval in milliseconds
265
+
266
+ local requestsInCurrentWindow = redis.call("GET", currentKey)
267
+ if requestsInCurrentWindow == false then
268
+ requestsInCurrentWindow = 0
269
+ end
270
+
271
+
272
+ local requestsInPreviousWindow = redis.call("GET", previousKey)
273
+ if requestsInPreviousWindow == false then
274
+ requestsInPreviousWindow = 0
275
+ end
276
+ local percentageInCurrent = ( now % window) / window
277
+ if requestsInPreviousWindow * ( 1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
278
+ return 0
279
+ end
280
+
281
+ local newValue = redis.call("INCR", currentKey)
282
+ if newValue == 1 then
283
+ -- The first time this key is set, the value will be 1.
284
+ -- So we only need the expire command once
285
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
286
+ end
287
+ return tokens - newValue
288
+ `;
289
+ const windowSize = ms(window);
290
+ return async function (ctx, identifier) {
291
+ const now = Date.now();
292
+ const currentWindow = Math.floor(now / windowSize);
293
+ const currentKey = [identifier, currentWindow].join(":");
294
+ const previousWindow = currentWindow - windowSize;
295
+ const previousKey = [identifier, previousWindow].join(":");
296
+ const remaining = (await ctx.redis.eval(script, [currentKey, previousKey], [tokens, now, windowSize]));
297
+ return {
298
+ success: remaining > 0,
299
+ limit: tokens,
300
+ remaining,
301
+ reset: (currentWindow + 1) * windowSize,
302
+ };
303
+ };
304
+ }
305
+ /**
306
+ * You have a bucket filled with `{maxTokens}` tokens that refills constantly
307
+ * at `{refillRate}` per `{interval}`.
308
+ * Every request will remove one token from the bucket and if there is no
309
+ * token to take, the request is rejected.
310
+ *
311
+ * **Pro:**
312
+ *
313
+ * - Bursts of requests are smoothed out and you can process them at a constant
314
+ * rate.
315
+ * - Allows to set a higher initial burst limit by setting `maxTokens` higher
316
+ * than `refillRate`
317
+ *
318
+ * **Usage of Upstash Redis requests:**
319
+ */
320
+ static tokenBucket(
321
+ /**
322
+ * How many tokens are refilled per `interval`
323
+ *
324
+ * An interval of `10s` and refillRate of 5 will cause a new token to be added every 2 seconds.
325
+ */
326
+ refillRate,
327
+ /**
328
+ * The interval for the `refillRate`
329
+ */
330
+ interval,
331
+ /**
332
+ * Maximum number of tokens.
333
+ * A newly created bucket starts with this many tokens.
334
+ * Useful to allow higher burst limits.
335
+ */
336
+ maxTokens) {
337
+ const script = `
338
+ local key = KEYS[1] -- identifier including prefixes
339
+ local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens
340
+ local interval = tonumber(ARGV[2]) -- size of the window in milliseconds
341
+ local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval
342
+ local now = tonumber(ARGV[4]) -- current timestamp in milliseconds
343
+ local remaining = 0
344
+
345
+ local bucket = redis.call("HMGET", key, "updatedAt", "tokens")
346
+
347
+ if bucket[1] == false then
348
+ -- The bucket does not exist yet, so we create it and add a ttl.
349
+ remaining = maxTokens - 1
350
+
351
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
352
+ redis.call("PEXPIRE", key, interval)
353
+
354
+ return {remaining, now + interval}
355
+ end
356
+
357
+ -- The bucket does exist
358
+
359
+ local updatedAt = tonumber(bucket[1])
360
+ local tokens = tonumber(bucket[2])
361
+
362
+ if now >= updatedAt + interval then
363
+ remaining = math.min(maxTokens, tokens + refillRate) - 1
364
+
365
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
366
+ return {remaining, now + interval}
367
+ end
368
+
369
+ if tokens > 0 then
370
+ remaining = tokens - 1
371
+ redis.call("HMSET", key, "updatedAt", now, "tokens", remaining)
372
+ end
373
+
374
+ return {remaining, updatedAt + interval}
375
+ `;
376
+ const intervalDuration = ms(interval);
377
+ return async function (ctx, identifier) {
378
+ const now = Date.now();
379
+ const key = [identifier, Math.floor(now / intervalDuration)].join(":");
380
+ const [remaining, reset] = (await ctx.redis.eval(script, [key], [maxTokens, intervalDuration, refillRate, now]));
381
+ return { success: remaining > 0, limit: maxTokens, remaining, reset };
382
+ };
383
+ }
384
+ }
package/esm/types.js ADDED
@@ -0,0 +1 @@
1
+ export {};