@upstash/ratelimit 0.0.0-ci.f88fdef75b804d1f115e9d3e7bca57c88df0c108-20241006173348

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,1665 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ Analytics: () => Analytics,
24
+ IpDenyList: () => ip_deny_list_exports,
25
+ MultiRegionRatelimit: () => MultiRegionRatelimit,
26
+ Ratelimit: () => RegionRatelimit
27
+ });
28
+ module.exports = __toCommonJS(src_exports);
29
+
30
+ // src/analytics.ts
31
+ var import_core_analytics = require("@upstash/core-analytics");
32
+ var Analytics = class {
33
+ analytics;
34
+ table = "events";
35
+ constructor(config) {
36
+ this.analytics = new import_core_analytics.Analytics({
37
+ // @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk
38
+ redis: config.redis,
39
+ window: "1h",
40
+ prefix: config.prefix ?? "@upstash/ratelimit",
41
+ retention: "90d"
42
+ });
43
+ }
44
+ /**
45
+ * Try to extract the geo information from the request
46
+ *
47
+ * This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
48
+ * @param req
49
+ * @returns
50
+ */
51
+ extractGeo(req) {
52
+ if (req.geo !== void 0) {
53
+ return req.geo;
54
+ }
55
+ if (req.cf !== void 0) {
56
+ return req.cf;
57
+ }
58
+ return {};
59
+ }
60
+ async record(event) {
61
+ await this.analytics.ingest(this.table, event);
62
+ }
63
+ async series(filter, cutoff) {
64
+ const timestampCount = Math.min(
65
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
66
+ 256
67
+ );
68
+ return this.analytics.aggregateBucketsWithPipeline(this.table, filter, timestampCount);
69
+ }
70
+ async getUsage(cutoff = 0) {
71
+ const timestampCount = Math.min(
72
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
73
+ 256
74
+ );
75
+ const records = await this.analytics.getAllowedBlocked(this.table, timestampCount);
76
+ return records;
77
+ }
78
+ async getUsageOverTime(timestampCount, groupby) {
79
+ const result = await this.analytics.aggregateBucketsWithPipeline(this.table, groupby, timestampCount);
80
+ return result;
81
+ }
82
+ async getMostAllowedBlocked(timestampCount, getTop, checkAtMost) {
83
+ getTop = getTop ?? 5;
84
+ const timestamp = void 0;
85
+ return this.analytics.getMostAllowedBlocked(this.table, timestampCount, getTop, timestamp, checkAtMost);
86
+ }
87
+ };
88
+
89
+ // src/cache.ts
90
+ var Cache = class {
91
+ /**
92
+ * Stores identifier -> reset (in milliseconds)
93
+ */
94
+ cache;
95
+ constructor(cache) {
96
+ this.cache = cache;
97
+ }
98
+ isBlocked(identifier) {
99
+ if (!this.cache.has(identifier)) {
100
+ return { blocked: false, reset: 0 };
101
+ }
102
+ const reset = this.cache.get(identifier);
103
+ if (reset < Date.now()) {
104
+ this.cache.delete(identifier);
105
+ return { blocked: false, reset: 0 };
106
+ }
107
+ return { blocked: true, reset };
108
+ }
109
+ blockUntil(identifier, reset) {
110
+ this.cache.set(identifier, reset);
111
+ }
112
+ set(key, value) {
113
+ this.cache.set(key, value);
114
+ }
115
+ get(key) {
116
+ return this.cache.get(key) || null;
117
+ }
118
+ incr(key) {
119
+ let value = this.cache.get(key) ?? 0;
120
+ value += 1;
121
+ this.cache.set(key, value);
122
+ return value;
123
+ }
124
+ pop(key) {
125
+ this.cache.delete(key);
126
+ }
127
+ empty() {
128
+ this.cache.clear();
129
+ }
130
+ size() {
131
+ return this.cache.size;
132
+ }
133
+ };
134
+
135
+ // src/duration.ts
136
+ function ms(d) {
137
+ const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/);
138
+ if (!match) {
139
+ throw new Error(`Unable to parse window size: ${d}`);
140
+ }
141
+ const time = Number.parseInt(match[1]);
142
+ const unit = match[2];
143
+ switch (unit) {
144
+ case "ms": {
145
+ return time;
146
+ }
147
+ case "s": {
148
+ return time * 1e3;
149
+ }
150
+ case "m": {
151
+ return time * 1e3 * 60;
152
+ }
153
+ case "h": {
154
+ return time * 1e3 * 60 * 60;
155
+ }
156
+ case "d": {
157
+ return time * 1e3 * 60 * 60 * 24;
158
+ }
159
+ default: {
160
+ throw new Error(`Unable to parse window size: ${d}`);
161
+ }
162
+ }
163
+ }
164
+
165
+ // src/hash.ts
166
+ var safeEval = async (ctx, script, keys, args) => {
167
+ try {
168
+ return await ctx.redis.evalsha(script.hash, keys, args);
169
+ } catch (error) {
170
+ if (`${error}`.includes("NOSCRIPT")) {
171
+ const hash = await ctx.redis.scriptLoad(script.script);
172
+ if (hash !== script.hash) {
173
+ console.warn(
174
+ "Upstash Ratelimit: Expected hash and the hash received from Redis are different. Ratelimit will work as usual but performance will be reduced."
175
+ );
176
+ }
177
+ return await ctx.redis.evalsha(hash, keys, args);
178
+ }
179
+ throw error;
180
+ }
181
+ };
182
+
183
+ // src/lua-scripts/single.ts
184
+ var fixedWindowLimitScript = `
185
+ local key = KEYS[1]
186
+ local window = ARGV[1]
187
+ local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1
188
+
189
+ local r = redis.call("INCRBY", key, incrementBy)
190
+ if r == tonumber(incrementBy) then
191
+ -- The first time this key is set, the value will be equal to incrementBy.
192
+ -- So we only need the expire command once
193
+ redis.call("PEXPIRE", key, window)
194
+ end
195
+
196
+ return r
197
+ `;
198
+ var fixedWindowRemainingTokensScript = `
199
+ local key = KEYS[1]
200
+ local tokens = 0
201
+
202
+ local value = redis.call('GET', key)
203
+ if value then
204
+ tokens = value
205
+ end
206
+ return tokens
207
+ `;
208
+ var slidingWindowLimitScript = `
209
+ local currentKey = KEYS[1] -- identifier including prefixes
210
+ local previousKey = KEYS[2] -- key of the previous bucket
211
+ local tokens = tonumber(ARGV[1]) -- tokens per window
212
+ local now = ARGV[2] -- current timestamp in milliseconds
213
+ local window = ARGV[3] -- interval in milliseconds
214
+ local incrementBy = ARGV[4] -- increment rate per request at a given value, default is 1
215
+
216
+ local requestsInCurrentWindow = redis.call("GET", currentKey)
217
+ if requestsInCurrentWindow == false then
218
+ requestsInCurrentWindow = 0
219
+ end
220
+
221
+ local requestsInPreviousWindow = redis.call("GET", previousKey)
222
+ if requestsInPreviousWindow == false then
223
+ requestsInPreviousWindow = 0
224
+ end
225
+ local percentageInCurrent = ( now % window ) / window
226
+ -- weighted requests to consider from the previous window
227
+ requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)
228
+ if requestsInPreviousWindow + requestsInCurrentWindow >= tokens then
229
+ return -1
230
+ end
231
+
232
+ local newValue = redis.call("INCRBY", currentKey, incrementBy)
233
+ if newValue == tonumber(incrementBy) then
234
+ -- The first time this key is set, the value will be equal to incrementBy.
235
+ -- So we only need the expire command once
236
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
237
+ end
238
+ return tokens - ( newValue + requestsInPreviousWindow )
239
+ `;
240
+ var slidingWindowRemainingTokensScript = `
241
+ local currentKey = KEYS[1] -- identifier including prefixes
242
+ local previousKey = KEYS[2] -- key of the previous bucket
243
+ local now = ARGV[1] -- current timestamp in milliseconds
244
+ local window = ARGV[2] -- interval in milliseconds
245
+
246
+ local requestsInCurrentWindow = redis.call("GET", currentKey)
247
+ if requestsInCurrentWindow == false then
248
+ requestsInCurrentWindow = 0
249
+ end
250
+
251
+ local requestsInPreviousWindow = redis.call("GET", previousKey)
252
+ if requestsInPreviousWindow == false then
253
+ requestsInPreviousWindow = 0
254
+ end
255
+
256
+ local percentageInCurrent = ( now % window ) / window
257
+ -- weighted requests to consider from the previous window
258
+ requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)
259
+
260
+ return requestsInPreviousWindow + requestsInCurrentWindow
261
+ `;
262
+ var tokenBucketLimitScript = `
263
+ local key = KEYS[1] -- identifier including prefixes
264
+ local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens
265
+ local interval = tonumber(ARGV[2]) -- size of the window in milliseconds
266
+ local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval
267
+ local now = tonumber(ARGV[4]) -- current timestamp in milliseconds
268
+ local incrementBy = tonumber(ARGV[5]) -- how many tokens to consume, default is 1
269
+
270
+ local bucket = redis.call("HMGET", key, "refilledAt", "tokens")
271
+
272
+ local refilledAt
273
+ local tokens
274
+
275
+ if bucket[1] == false then
276
+ refilledAt = now
277
+ tokens = maxTokens
278
+ else
279
+ refilledAt = tonumber(bucket[1])
280
+ tokens = tonumber(bucket[2])
281
+ end
282
+
283
+ if now >= refilledAt + interval then
284
+ local numRefills = math.floor((now - refilledAt) / interval)
285
+ tokens = math.min(maxTokens, tokens + numRefills * refillRate)
286
+
287
+ refilledAt = refilledAt + numRefills * interval
288
+ end
289
+
290
+ if tokens == 0 then
291
+ return {-1, refilledAt + interval}
292
+ end
293
+
294
+ local remaining = tokens - incrementBy
295
+ local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval
296
+
297
+ redis.call("HSET", key, "refilledAt", refilledAt, "tokens", remaining)
298
+ redis.call("PEXPIRE", key, expireAt)
299
+ return {remaining, refilledAt + interval}
300
+ `;
301
+ var tokenBucketIdentifierNotFound = -1;
302
+ var tokenBucketRemainingTokensScript = `
303
+ local key = KEYS[1]
304
+ local maxTokens = tonumber(ARGV[1])
305
+
306
+ local bucket = redis.call("HMGET", key, "refilledAt", "tokens")
307
+
308
+ if bucket[1] == false then
309
+ return {maxTokens, ${tokenBucketIdentifierNotFound}}
310
+ end
311
+
312
+ return {tonumber(bucket[2]), tonumber(bucket[1])}
313
+ `;
314
+ var cachedFixedWindowLimitScript = `
315
+ local key = KEYS[1]
316
+ local window = ARGV[1]
317
+ local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1
318
+
319
+ local r = redis.call("INCRBY", key, incrementBy)
320
+ if r == incrementBy then
321
+ -- The first time this key is set, the value will be equal to incrementBy.
322
+ -- So we only need the expire command once
323
+ redis.call("PEXPIRE", key, window)
324
+ end
325
+
326
+ return r
327
+ `;
328
+ var cachedFixedWindowRemainingTokenScript = `
329
+ local key = KEYS[1]
330
+ local tokens = 0
331
+
332
+ local value = redis.call('GET', key)
333
+ if value then
334
+ tokens = value
335
+ end
336
+ return tokens
337
+ `;
338
+
339
+ // src/lua-scripts/multi.ts
340
+ var fixedWindowLimitScript2 = `
341
+ local key = KEYS[1]
342
+ local id = ARGV[1]
343
+ local window = ARGV[2]
344
+ local incrementBy = tonumber(ARGV[3])
345
+
346
+ redis.call("HSET", key, id, incrementBy)
347
+ local fields = redis.call("HGETALL", key)
348
+ if #fields == 2 and tonumber(fields[2])==incrementBy then
349
+ -- The first time this key is set, and the value will be equal to incrementBy.
350
+ -- So we only need the expire command once
351
+ redis.call("PEXPIRE", key, window)
352
+ end
353
+
354
+ return fields
355
+ `;
356
+ var fixedWindowRemainingTokensScript2 = `
357
+ local key = KEYS[1]
358
+ local tokens = 0
359
+
360
+ local fields = redis.call("HGETALL", key)
361
+
362
+ return fields
363
+ `;
364
+ var slidingWindowLimitScript2 = `
365
+ local currentKey = KEYS[1] -- identifier including prefixes
366
+ local previousKey = KEYS[2] -- key of the previous bucket
367
+ local tokens = tonumber(ARGV[1]) -- tokens per window
368
+ local now = ARGV[2] -- current timestamp in milliseconds
369
+ local window = ARGV[3] -- interval in milliseconds
370
+ local requestId = ARGV[4] -- uuid for this request
371
+ local incrementBy = tonumber(ARGV[5]) -- custom rate, default is 1
372
+
373
+ local currentFields = redis.call("HGETALL", currentKey)
374
+ local requestsInCurrentWindow = 0
375
+ for i = 2, #currentFields, 2 do
376
+ requestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])
377
+ end
378
+
379
+ local previousFields = redis.call("HGETALL", previousKey)
380
+ local requestsInPreviousWindow = 0
381
+ for i = 2, #previousFields, 2 do
382
+ requestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])
383
+ end
384
+
385
+ local percentageInCurrent = ( now % window) / window
386
+ if requestsInPreviousWindow * (1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
387
+ return {currentFields, previousFields, false}
388
+ end
389
+
390
+ redis.call("HSET", currentKey, requestId, incrementBy)
391
+
392
+ if requestsInCurrentWindow == 0 then
393
+ -- The first time this key is set, the value will be equal to incrementBy.
394
+ -- So we only need the expire command once
395
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
396
+ end
397
+ return {currentFields, previousFields, true}
398
+ `;
399
+ var slidingWindowRemainingTokensScript2 = `
400
+ local currentKey = KEYS[1] -- identifier including prefixes
401
+ local previousKey = KEYS[2] -- key of the previous bucket
402
+ local now = ARGV[1] -- current timestamp in milliseconds
403
+ local window = ARGV[2] -- interval in milliseconds
404
+
405
+ local currentFields = redis.call("HGETALL", currentKey)
406
+ local requestsInCurrentWindow = 0
407
+ for i = 2, #currentFields, 2 do
408
+ requestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])
409
+ end
410
+
411
+ local previousFields = redis.call("HGETALL", previousKey)
412
+ local requestsInPreviousWindow = 0
413
+ for i = 2, #previousFields, 2 do
414
+ requestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])
415
+ end
416
+
417
+ local percentageInCurrent = ( now % window) / window
418
+ requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)
419
+
420
+ return requestsInCurrentWindow + requestsInPreviousWindow
421
+ `;
422
+
423
+ // src/lua-scripts/reset.ts
424
+ var resetScript = `
425
+ local pattern = KEYS[1]
426
+
427
+ -- Initialize cursor to start from 0
428
+ local cursor = "0"
429
+
430
+ repeat
431
+ -- Scan for keys matching the pattern
432
+ local scan_result = redis.call('SCAN', cursor, 'MATCH', pattern)
433
+
434
+ -- Extract cursor for the next iteration
435
+ cursor = scan_result[1]
436
+
437
+ -- Extract keys from the scan result
438
+ local keys = scan_result[2]
439
+
440
+ for i=1, #keys do
441
+ redis.call('DEL', keys[i])
442
+ end
443
+
444
+ -- Continue scanning until cursor is 0 (end of keyspace)
445
+ until cursor == "0"
446
+ `;
447
+
448
+ // src/lua-scripts/hash.ts
449
+ var SCRIPTS = {
450
+ singleRegion: {
451
+ fixedWindow: {
452
+ limit: {
453
+ script: fixedWindowLimitScript,
454
+ hash: "b13943e359636db027ad280f1def143f02158c13"
455
+ },
456
+ getRemaining: {
457
+ script: fixedWindowRemainingTokensScript,
458
+ hash: "8c4c341934502aee132643ffbe58ead3450e5208"
459
+ }
460
+ },
461
+ slidingWindow: {
462
+ limit: {
463
+ script: slidingWindowLimitScript,
464
+ hash: "e1391e429b699c780eb0480350cd5b7280fd9213"
465
+ },
466
+ getRemaining: {
467
+ script: slidingWindowRemainingTokensScript,
468
+ hash: "65a73ac5a05bf9712903bc304b77268980c1c417"
469
+ }
470
+ },
471
+ tokenBucket: {
472
+ limit: {
473
+ script: tokenBucketLimitScript,
474
+ hash: "5bece90aeef8189a8cfd28995b479529e270b3c6"
475
+ },
476
+ getRemaining: {
477
+ script: tokenBucketRemainingTokensScript,
478
+ hash: "a15be2bb1db2a15f7c82db06146f9d08983900d0"
479
+ }
480
+ },
481
+ cachedFixedWindow: {
482
+ limit: {
483
+ script: cachedFixedWindowLimitScript,
484
+ hash: "c26b12703dd137939b9a69a3a9b18e906a2d940f"
485
+ },
486
+ getRemaining: {
487
+ script: cachedFixedWindowRemainingTokenScript,
488
+ hash: "8e8f222ccae68b595ee6e3f3bf2199629a62b91a"
489
+ }
490
+ }
491
+ },
492
+ multiRegion: {
493
+ fixedWindow: {
494
+ limit: {
495
+ script: fixedWindowLimitScript2,
496
+ hash: "a8c14f3835aa87bd70e5e2116081b81664abcf5c"
497
+ },
498
+ getRemaining: {
499
+ script: fixedWindowRemainingTokensScript2,
500
+ hash: "8ab8322d0ed5fe5ac8eb08f0c2e4557f1b4816fd"
501
+ }
502
+ },
503
+ slidingWindow: {
504
+ limit: {
505
+ script: slidingWindowLimitScript2,
506
+ hash: "cb4fdc2575056df7c6d422764df0de3a08d6753b"
507
+ },
508
+ getRemaining: {
509
+ script: slidingWindowRemainingTokensScript2,
510
+ hash: "558c9306b7ec54abb50747fe0b17e5d44bd24868"
511
+ }
512
+ }
513
+ }
514
+ };
515
+ var RESET_SCRIPT = {
516
+ script: resetScript,
517
+ hash: "54bd274ddc59fb3be0f42deee2f64322a10e2b50"
518
+ };
519
+
520
+ // src/types.ts
521
+ var DenyListExtension = "denyList";
522
+ var IpDenyListKey = "ipDenyList";
523
+ var IpDenyListStatusKey = "ipDenyListStatus";
524
+
525
+ // src/deny-list/scripts.ts
526
+ var checkDenyListScript = `
527
+ -- Checks if values provideed in ARGV are present in the deny lists.
528
+ -- This is done using the allDenyListsKey below.
529
+
530
+ -- Additionally, checks the status of the ip deny list using the
531
+ -- ipDenyListStatusKey below. Here are the possible states of the
532
+ -- ipDenyListStatusKey key:
533
+ -- * status == -1: set to "disabled" with no TTL
534
+ -- * status == -2: not set, meaning that is was set before but expired
535
+ -- * status > 0: set to "valid", with a TTL
536
+ --
537
+ -- In the case of status == -2, we set the status to "pending" with
538
+ -- 30 second ttl. During this time, the process which got status == -2
539
+ -- will update the ip deny list.
540
+
541
+ local allDenyListsKey = KEYS[1]
542
+ local ipDenyListStatusKey = KEYS[2]
543
+
544
+ local results = redis.call('SMISMEMBER', allDenyListsKey, unpack(ARGV))
545
+ local status = redis.call('TTL', ipDenyListStatusKey)
546
+ if status == -2 then
547
+ redis.call('SETEX', ipDenyListStatusKey, 30, "pending")
548
+ end
549
+
550
+ return { results, status }
551
+ `;
552
+
553
+ // src/deny-list/ip-deny-list.ts
554
+ var ip_deny_list_exports = {};
555
+ __export(ip_deny_list_exports, {
556
+ ThresholdError: () => ThresholdError,
557
+ disableIpDenyList: () => disableIpDenyList,
558
+ updateIpDenyList: () => updateIpDenyList
559
+ });
560
+
561
+ // src/deny-list/time.ts
562
+ var MILLISECONDS_IN_HOUR = 60 * 60 * 1e3;
563
+ var MILLISECONDS_IN_DAY = 24 * MILLISECONDS_IN_HOUR;
564
+ var MILLISECONDS_TO_2AM = 2 * MILLISECONDS_IN_HOUR;
565
+ var getIpListTTL = (time) => {
566
+ const now = time || Date.now();
567
+ const timeSinceLast2AM = (now - MILLISECONDS_TO_2AM) % MILLISECONDS_IN_DAY;
568
+ return MILLISECONDS_IN_DAY - timeSinceLast2AM;
569
+ };
570
+
571
+ // src/deny-list/ip-deny-list.ts
572
+ var baseUrl = "https://raw.githubusercontent.com/stamparm/ipsum/master/levels";
573
+ var ThresholdError = class extends Error {
574
+ constructor(threshold) {
575
+ super(`Allowed threshold values are from 1 to 8, 1 and 8 included. Received: ${threshold}`);
576
+ this.name = "ThresholdError";
577
+ }
578
+ };
579
+ var getIpDenyList = async (threshold) => {
580
+ if (typeof threshold !== "number" || threshold < 1 || threshold > 8) {
581
+ throw new ThresholdError(threshold);
582
+ }
583
+ try {
584
+ const response = await fetch(`${baseUrl}/${threshold}.txt`);
585
+ if (!response.ok) {
586
+ throw new Error(`Error fetching data: ${response.statusText}`);
587
+ }
588
+ const data = await response.text();
589
+ const lines = data.split("\n");
590
+ return lines.filter((value) => value.length > 0);
591
+ } catch (error) {
592
+ throw new Error(`Failed to fetch ip deny list: ${error}`);
593
+ }
594
+ };
595
+ var updateIpDenyList = async (redis, prefix, threshold, ttl) => {
596
+ const allIps = await getIpDenyList(threshold);
597
+ const allDenyLists = [prefix, DenyListExtension, "all"].join(":");
598
+ const ipDenyList = [prefix, DenyListExtension, IpDenyListKey].join(":");
599
+ const statusKey = [prefix, IpDenyListStatusKey].join(":");
600
+ const transaction = redis.multi();
601
+ transaction.sdiffstore(allDenyLists, allDenyLists, ipDenyList);
602
+ transaction.del(ipDenyList);
603
+ transaction.sadd(ipDenyList, ...allIps);
604
+ transaction.sdiffstore(ipDenyList, ipDenyList, allDenyLists);
605
+ transaction.sunionstore(allDenyLists, allDenyLists, ipDenyList);
606
+ transaction.set(statusKey, "valid", { px: ttl ?? getIpListTTL() });
607
+ return await transaction.exec();
608
+ };
609
+ var disableIpDenyList = async (redis, prefix) => {
610
+ const allDenyListsKey = [prefix, DenyListExtension, "all"].join(":");
611
+ const ipDenyListKey = [prefix, DenyListExtension, IpDenyListKey].join(":");
612
+ const statusKey = [prefix, IpDenyListStatusKey].join(":");
613
+ const transaction = redis.multi();
614
+ transaction.sdiffstore(allDenyListsKey, allDenyListsKey, ipDenyListKey);
615
+ transaction.del(ipDenyListKey);
616
+ transaction.set(statusKey, "disabled");
617
+ return await transaction.exec();
618
+ };
619
+
620
+ // src/deny-list/deny-list.ts
621
+ var denyListCache = new Cache(/* @__PURE__ */ new Map());
622
+ var checkDenyListCache = (members) => {
623
+ return members.find(
624
+ (member) => denyListCache.isBlocked(member).blocked
625
+ );
626
+ };
627
+ var blockMember = (member) => {
628
+ if (denyListCache.size() > 1e3)
629
+ denyListCache.empty();
630
+ denyListCache.blockUntil(member, Date.now() + 6e4);
631
+ };
632
+ var checkDenyList = async (redis, prefix, members) => {
633
+ const [deniedValues, ipDenyListStatus] = await redis.eval(
634
+ checkDenyListScript,
635
+ [
636
+ [prefix, DenyListExtension, "all"].join(":"),
637
+ [prefix, IpDenyListStatusKey].join(":")
638
+ ],
639
+ members
640
+ );
641
+ let deniedValue = void 0;
642
+ deniedValues.map((memberDenied, index) => {
643
+ if (memberDenied) {
644
+ blockMember(members[index]);
645
+ deniedValue = members[index];
646
+ }
647
+ });
648
+ return {
649
+ deniedValue,
650
+ invalidIpDenyList: ipDenyListStatus === -2
651
+ };
652
+ };
653
+ var resolveLimitPayload = (redis, prefix, [ratelimitResponse, denyListResponse], threshold) => {
654
+ if (denyListResponse.deniedValue) {
655
+ ratelimitResponse.success = false;
656
+ ratelimitResponse.remaining = 0;
657
+ ratelimitResponse.reason = "denyList";
658
+ ratelimitResponse.deniedValue = denyListResponse.deniedValue;
659
+ }
660
+ if (denyListResponse.invalidIpDenyList) {
661
+ const updatePromise = updateIpDenyList(redis, prefix, threshold);
662
+ ratelimitResponse.pending = Promise.all([
663
+ ratelimitResponse.pending,
664
+ updatePromise
665
+ ]);
666
+ }
667
+ return ratelimitResponse;
668
+ };
669
+ var defaultDeniedResponse = (deniedValue) => {
670
+ return {
671
+ success: false,
672
+ limit: 0,
673
+ remaining: 0,
674
+ reset: 0,
675
+ pending: Promise.resolve(),
676
+ reason: "denyList",
677
+ deniedValue
678
+ };
679
+ };
680
+
681
+ // src/ratelimit.ts
682
+ var Ratelimit = class {
683
+ limiter;
684
+ ctx;
685
+ prefix;
686
+ timeout;
687
+ primaryRedis;
688
+ analytics;
689
+ enableProtection;
690
+ denyListThreshold;
691
+ constructor(config) {
692
+ this.ctx = config.ctx;
693
+ this.limiter = config.limiter;
694
+ this.timeout = config.timeout ?? 5e3;
695
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
696
+ this.enableProtection = config.enableProtection ?? false;
697
+ this.denyListThreshold = config.denyListThreshold ?? 6;
698
+ this.primaryRedis = "redis" in this.ctx ? this.ctx.redis : this.ctx.regionContexts[0].redis;
699
+ this.analytics = config.analytics ? new Analytics({
700
+ redis: this.primaryRedis,
701
+ prefix: this.prefix
702
+ }) : void 0;
703
+ if (config.ephemeralCache instanceof Map) {
704
+ this.ctx.cache = new Cache(config.ephemeralCache);
705
+ } else if (config.ephemeralCache === void 0) {
706
+ this.ctx.cache = new Cache(/* @__PURE__ */ new Map());
707
+ }
708
+ }
709
+ /**
710
+ * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
711
+ *
712
+ * Use this if you want to reject all requests that you can not handle right now.
713
+ *
714
+ * @example
715
+ * ```ts
716
+ * const ratelimit = new Ratelimit({
717
+ * redis: Redis.fromEnv(),
718
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
719
+ * })
720
+ *
721
+ * const { success } = await ratelimit.limit(id)
722
+ * if (!success){
723
+ * return "Nope"
724
+ * }
725
+ * return "Yes"
726
+ * ```
727
+ *
728
+ * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.
729
+ *
730
+ * Usage with `req.rate`
731
+ * @example
732
+ * ```ts
733
+ * const ratelimit = new Ratelimit({
734
+ * redis: Redis.fromEnv(),
735
+ * limiter: Ratelimit.slidingWindow(100, "10 s")
736
+ * })
737
+ *
738
+ * const { success } = await ratelimit.limit(id, {rate: 10})
739
+ * if (!success){
740
+ * return "Nope"
741
+ * }
742
+ * return "Yes"
743
+ * ```
744
+ */
745
+ limit = async (identifier, req) => {
746
+ let timeoutId = null;
747
+ try {
748
+ const response = this.getRatelimitResponse(identifier, req);
749
+ const { responseArray, newTimeoutId } = this.applyTimeout(response);
750
+ timeoutId = newTimeoutId;
751
+ const timedResponse = await Promise.race(responseArray);
752
+ const finalResponse = this.submitAnalytics(timedResponse, identifier, req);
753
+ return finalResponse;
754
+ } finally {
755
+ if (timeoutId) {
756
+ clearTimeout(timeoutId);
757
+ }
758
+ }
759
+ };
760
+ /**
761
+ * Block until the request may pass or timeout is reached.
762
+ *
763
+ * This method returns a promise that resolves as soon as the request may be processed
764
+ * or after the timeout has been reached.
765
+ *
766
+ * Use this if you want to delay the request until it is ready to get processed.
767
+ *
768
+ * @example
769
+ * ```ts
770
+ * const ratelimit = new Ratelimit({
771
+ * redis: Redis.fromEnv(),
772
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
773
+ * })
774
+ *
775
+ * const { success } = await ratelimit.blockUntilReady(id, 60_000)
776
+ * if (!success){
777
+ * return "Nope"
778
+ * }
779
+ * return "Yes"
780
+ * ```
781
+ */
782
+ blockUntilReady = async (identifier, timeout) => {
783
+ if (timeout <= 0) {
784
+ throw new Error("timeout must be positive");
785
+ }
786
+ let res;
787
+ const deadline = Date.now() + timeout;
788
+ while (true) {
789
+ res = await this.limit(identifier);
790
+ if (res.success) {
791
+ break;
792
+ }
793
+ if (res.reset === 0) {
794
+ throw new Error("This should not happen");
795
+ }
796
+ const wait = Math.min(res.reset, deadline) - Date.now();
797
+ await new Promise((r) => setTimeout(r, wait));
798
+ if (Date.now() > deadline) {
799
+ break;
800
+ }
801
+ }
802
+ return res;
803
+ };
804
+ resetUsedTokens = async (identifier) => {
805
+ const pattern = [this.prefix, identifier].join(":");
806
+ await this.limiter().resetTokens(this.ctx, pattern);
807
+ };
808
+ /**
809
+ * Returns the remaining token count together with a reset timestamps
810
+ *
811
+ * @param identifier identifir to check
812
+ * @returns object with `remaining` and reset fields. `remaining` denotes
813
+ * the remaining tokens and reset denotes the timestamp when the
814
+ * tokens reset.
815
+ */
816
+ getRemaining = async (identifier) => {
817
+ const pattern = [this.prefix, identifier].join(":");
818
+ return await this.limiter().getRemaining(this.ctx, pattern);
819
+ };
820
+ /**
821
+ * Checks if the identifier or the values in req are in the deny list cache.
822
+ * If so, returns the default denied response.
823
+ *
824
+ * Otherwise, calls redis to check the rate limit and deny list. Returns after
825
+ * resolving the result. Resolving is overriding the rate limit result if
826
+ * the some value is in deny list.
827
+ *
828
+ * @param identifier identifier to block
829
+ * @param req options with ip, user agent, country, rate and geo info
830
+ * @returns rate limit response
831
+ */
832
+ getRatelimitResponse = async (identifier, req) => {
833
+ const key = this.getKey(identifier);
834
+ const definedMembers = this.getDefinedMembers(identifier, req);
835
+ const deniedValue = checkDenyListCache(definedMembers);
836
+ const result = deniedValue ? [defaultDeniedResponse(deniedValue), { deniedValue, invalidIpDenyList: false }] : await Promise.all([
837
+ this.limiter().limit(this.ctx, key, req?.rate),
838
+ this.enableProtection ? checkDenyList(this.primaryRedis, this.prefix, definedMembers) : { deniedValue: void 0, invalidIpDenyList: false }
839
+ ]);
840
+ return resolveLimitPayload(this.primaryRedis, this.prefix, result, this.denyListThreshold);
841
+ };
842
+ /**
843
+ * Creates an array with the original response promise and a timeout promise
844
+ * if this.timeout > 0.
845
+ *
846
+ * @param response Ratelimit response promise
847
+ * @returns array with the response and timeout promise. also includes the timeout id
848
+ */
849
+ applyTimeout = (response) => {
850
+ let newTimeoutId = null;
851
+ const responseArray = [response];
852
+ if (this.timeout > 0) {
853
+ const timeoutResponse = new Promise((resolve) => {
854
+ newTimeoutId = setTimeout(() => {
855
+ resolve({
856
+ success: true,
857
+ limit: 0,
858
+ remaining: 0,
859
+ reset: 0,
860
+ pending: Promise.resolve(),
861
+ reason: "timeout"
862
+ });
863
+ }, this.timeout);
864
+ });
865
+ responseArray.push(timeoutResponse);
866
+ }
867
+ return {
868
+ responseArray,
869
+ newTimeoutId
870
+ };
871
+ };
872
+ /**
873
+ * submits analytics if this.analytics is set
874
+ *
875
+ * @param ratelimitResponse final rate limit response
876
+ * @param identifier identifier to submit
877
+ * @param req limit options
878
+ * @returns rate limit response after updating the .pending field
879
+ */
880
+ submitAnalytics = (ratelimitResponse, identifier, req) => {
881
+ if (this.analytics) {
882
+ try {
883
+ const geo = req ? this.analytics.extractGeo(req) : void 0;
884
+ const analyticsP = this.analytics.record({
885
+ identifier: ratelimitResponse.reason === "denyList" ? ratelimitResponse.deniedValue : identifier,
886
+ time: Date.now(),
887
+ success: ratelimitResponse.reason === "denyList" ? "denied" : ratelimitResponse.success,
888
+ ...geo
889
+ }).catch((error) => {
890
+ let errorMessage = "Failed to record analytics";
891
+ if (`${error}`.includes("WRONGTYPE")) {
892
+ errorMessage = `
893
+ Failed to record analytics. See the information below:
894
+
895
+ This can occur when you uprade to Ratelimit version 1.1.2
896
+ or later from an earlier version.
897
+
898
+ This occurs simply because the way we store analytics data
899
+ has changed. To avoid getting this error, disable analytics
900
+ for *an hour*, then simply enable it back.
901
+
902
+ `;
903
+ }
904
+ console.warn(errorMessage, error);
905
+ });
906
+ ratelimitResponse.pending = Promise.all([ratelimitResponse.pending, analyticsP]);
907
+ } catch (error) {
908
+ console.warn("Failed to record analytics", error);
909
+ }
910
+ ;
911
+ }
912
+ ;
913
+ return ratelimitResponse;
914
+ };
915
+ getKey = (identifier) => {
916
+ return [this.prefix, identifier].join(":");
917
+ };
918
+ /**
919
+ * returns a list of defined values from
920
+ * [identifier, req.ip, req.userAgent, req.country]
921
+ *
922
+ * @param identifier identifier
923
+ * @param req limit options
924
+ * @returns list of defined values
925
+ */
926
+ getDefinedMembers = (identifier, req) => {
927
+ const members = [identifier, req?.ip, req?.userAgent, req?.country];
928
+ return members.filter((item) => Boolean(item));
929
+ };
930
+ };
931
+
932
+ // src/multi.ts
933
+ function randomId() {
934
+ let result = "";
935
+ const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
936
+ const charactersLength = characters.length;
937
+ for (let i = 0; i < 16; i++) {
938
+ result += characters.charAt(Math.floor(Math.random() * charactersLength));
939
+ }
940
+ return result;
941
+ }
942
+ var MultiRegionRatelimit = class extends Ratelimit {
943
+ /**
944
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
945
+ */
946
+ constructor(config) {
947
+ super({
948
+ prefix: config.prefix,
949
+ limiter: config.limiter,
950
+ timeout: config.timeout,
951
+ analytics: config.analytics,
952
+ ctx: {
953
+ regionContexts: config.redis.map((redis) => ({
954
+ redis
955
+ })),
956
+ cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
957
+ }
958
+ });
959
+ }
960
+ /**
961
+ * Each request inside a fixed time increases a counter.
962
+ * Once the counter reaches the maximum allowed number, all further requests are
963
+ * rejected.
964
+ *
965
+ * **Pro:**
966
+ *
967
+ * - Newer requests are not starved by old ones.
968
+ * - Low storage cost.
969
+ *
970
+ * **Con:**
971
+ *
972
+ * A burst of requests near the boundary of a window can result in a very
973
+ * high request rate because two windows will be filled with requests quickly.
974
+ *
975
+ * @param tokens - How many requests a user can make in each time window.
976
+ * @param window - A fixed timeframe
977
+ */
978
+ static fixedWindow(tokens, window) {
979
+ const windowDuration = ms(window);
980
+ return () => ({
981
+ async limit(ctx, identifier, rate) {
982
+ if (ctx.cache) {
983
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
984
+ if (blocked) {
985
+ return {
986
+ success: false,
987
+ limit: tokens,
988
+ remaining: 0,
989
+ reset: reset2,
990
+ pending: Promise.resolve(),
991
+ reason: "cacheBlock"
992
+ };
993
+ }
994
+ }
995
+ const requestId = randomId();
996
+ const bucket = Math.floor(Date.now() / windowDuration);
997
+ const key = [identifier, bucket].join(":");
998
+ const incrementBy = rate ? Math.max(1, rate) : 1;
999
+ const dbs = ctx.regionContexts.map((regionContext) => ({
1000
+ redis: regionContext.redis,
1001
+ request: safeEval(
1002
+ regionContext,
1003
+ SCRIPTS.multiRegion.fixedWindow.limit,
1004
+ [key],
1005
+ [requestId, windowDuration, incrementBy]
1006
+ )
1007
+ }));
1008
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
1009
+ const usedTokens = firstResponse.reduce((accTokens, usedToken, index) => {
1010
+ let parsedToken = 0;
1011
+ if (index % 2) {
1012
+ parsedToken = Number.parseInt(usedToken);
1013
+ }
1014
+ return accTokens + parsedToken;
1015
+ }, 0);
1016
+ const remaining = tokens - usedTokens;
1017
+ async function sync() {
1018
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
1019
+ const allIDs = [...new Set(
1020
+ individualIDs.flat().reduce((acc, curr, index) => {
1021
+ if (index % 2 === 0) {
1022
+ acc.push(curr);
1023
+ }
1024
+ return acc;
1025
+ }, [])
1026
+ ).values()];
1027
+ for (const db of dbs) {
1028
+ const usedDbTokensRequest = await db.request;
1029
+ const usedDbTokens = usedDbTokensRequest.reduce(
1030
+ (accTokens, usedToken, index) => {
1031
+ let parsedToken = 0;
1032
+ if (index % 2) {
1033
+ parsedToken = Number.parseInt(usedToken);
1034
+ }
1035
+ return accTokens + parsedToken;
1036
+ },
1037
+ 0
1038
+ );
1039
+ const dbIdsRequest = await db.request;
1040
+ const dbIds = dbIdsRequest.reduce((ids, currentId, index) => {
1041
+ if (index % 2 === 0) {
1042
+ ids.push(currentId);
1043
+ }
1044
+ return ids;
1045
+ }, []);
1046
+ if (usedDbTokens >= tokens) {
1047
+ continue;
1048
+ }
1049
+ const diff = allIDs.filter((id) => !dbIds.includes(id));
1050
+ if (diff.length === 0) {
1051
+ continue;
1052
+ }
1053
+ for (const requestId2 of diff) {
1054
+ await db.redis.hset(key, { [requestId2]: incrementBy });
1055
+ }
1056
+ }
1057
+ }
1058
+ const success = remaining > 0;
1059
+ const reset = (bucket + 1) * windowDuration;
1060
+ if (ctx.cache && !success) {
1061
+ ctx.cache.blockUntil(identifier, reset);
1062
+ }
1063
+ return {
1064
+ success,
1065
+ limit: tokens,
1066
+ remaining,
1067
+ reset,
1068
+ pending: sync()
1069
+ };
1070
+ },
1071
+ async getRemaining(ctx, identifier) {
1072
+ const bucket = Math.floor(Date.now() / windowDuration);
1073
+ const key = [identifier, bucket].join(":");
1074
+ const dbs = ctx.regionContexts.map((regionContext) => ({
1075
+ redis: regionContext.redis,
1076
+ request: safeEval(
1077
+ regionContext,
1078
+ SCRIPTS.multiRegion.fixedWindow.getRemaining,
1079
+ [key],
1080
+ [null]
1081
+ )
1082
+ }));
1083
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
1084
+ const usedTokens = firstResponse.reduce((accTokens, usedToken, index) => {
1085
+ let parsedToken = 0;
1086
+ if (index % 2) {
1087
+ parsedToken = Number.parseInt(usedToken);
1088
+ }
1089
+ return accTokens + parsedToken;
1090
+ }, 0);
1091
+ return {
1092
+ remaining: Math.max(0, tokens - usedTokens),
1093
+ reset: (bucket + 1) * windowDuration
1094
+ };
1095
+ },
1096
+ async resetTokens(ctx, identifier) {
1097
+ const pattern = [identifier, "*"].join(":");
1098
+ if (ctx.cache) {
1099
+ ctx.cache.pop(identifier);
1100
+ }
1101
+ await Promise.all(ctx.regionContexts.map((regionContext) => {
1102
+ safeEval(
1103
+ regionContext,
1104
+ RESET_SCRIPT,
1105
+ [pattern],
1106
+ [null]
1107
+ );
1108
+ }));
1109
+ }
1110
+ });
1111
+ }
1112
+ /**
1113
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
1114
+ * costs than `slidingLogs` and improved boundary behavior by calculating a
1115
+ * weighted score between two windows.
1116
+ *
1117
+ * **Pro:**
1118
+ *
1119
+ * Good performance allows this to scale to very high loads.
1120
+ *
1121
+ * **Con:**
1122
+ *
1123
+ * Nothing major.
1124
+ *
1125
+ * @param tokens - How many requests a user can make in each time window.
1126
+ * @param window - The duration in which the user can max X requests.
1127
+ */
1128
+ static slidingWindow(tokens, window) {
1129
+ const windowSize = ms(window);
1130
+ const windowDuration = ms(window);
1131
+ return () => ({
1132
+ async limit(ctx, identifier, rate) {
1133
+ if (ctx.cache) {
1134
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1135
+ if (blocked) {
1136
+ return {
1137
+ success: false,
1138
+ limit: tokens,
1139
+ remaining: 0,
1140
+ reset: reset2,
1141
+ pending: Promise.resolve(),
1142
+ reason: "cacheBlock"
1143
+ };
1144
+ }
1145
+ }
1146
+ const requestId = randomId();
1147
+ const now = Date.now();
1148
+ const currentWindow = Math.floor(now / windowSize);
1149
+ const currentKey = [identifier, currentWindow].join(":");
1150
+ const previousWindow = currentWindow - 1;
1151
+ const previousKey = [identifier, previousWindow].join(":");
1152
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1153
+ const dbs = ctx.regionContexts.map((regionContext) => ({
1154
+ redis: regionContext.redis,
1155
+ request: safeEval(
1156
+ regionContext,
1157
+ SCRIPTS.multiRegion.slidingWindow.limit,
1158
+ [currentKey, previousKey],
1159
+ [tokens, now, windowDuration, requestId, incrementBy]
1160
+ // lua seems to return `1` for true and `null` for false
1161
+ )
1162
+ }));
1163
+ const percentageInCurrent = now % windowDuration / windowDuration;
1164
+ const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));
1165
+ if (success) {
1166
+ current.push(requestId, incrementBy.toString());
1167
+ }
1168
+ const previousUsedTokens = previous.reduce((accTokens, usedToken, index) => {
1169
+ let parsedToken = 0;
1170
+ if (index % 2) {
1171
+ parsedToken = Number.parseInt(usedToken);
1172
+ }
1173
+ return accTokens + parsedToken;
1174
+ }, 0);
1175
+ const currentUsedTokens = current.reduce((accTokens, usedToken, index) => {
1176
+ let parsedToken = 0;
1177
+ if (index % 2) {
1178
+ parsedToken = Number.parseInt(usedToken);
1179
+ }
1180
+ return accTokens + parsedToken;
1181
+ }, 0);
1182
+ const previousPartialUsed = Math.ceil(previousUsedTokens * (1 - percentageInCurrent));
1183
+ const usedTokens = previousPartialUsed + currentUsedTokens;
1184
+ const remaining = tokens - usedTokens;
1185
+ async function sync() {
1186
+ const res = await Promise.all(dbs.map((s) => s.request));
1187
+ const allCurrentIds = [...new Set(
1188
+ res.flatMap(([current2]) => current2).reduce((acc, curr, index) => {
1189
+ if (index % 2 === 0) {
1190
+ acc.push(curr);
1191
+ }
1192
+ return acc;
1193
+ }, [])
1194
+ ).values()];
1195
+ for (const db of dbs) {
1196
+ const [current2, _previous, _success] = await db.request;
1197
+ const dbIds = current2.reduce((ids, currentId, index) => {
1198
+ if (index % 2 === 0) {
1199
+ ids.push(currentId);
1200
+ }
1201
+ return ids;
1202
+ }, []);
1203
+ const usedDbTokens = current2.reduce((accTokens, usedToken, index) => {
1204
+ let parsedToken = 0;
1205
+ if (index % 2) {
1206
+ parsedToken = Number.parseInt(usedToken);
1207
+ }
1208
+ return accTokens + parsedToken;
1209
+ }, 0);
1210
+ if (usedDbTokens >= tokens) {
1211
+ continue;
1212
+ }
1213
+ const diff = allCurrentIds.filter((id) => !dbIds.includes(id));
1214
+ if (diff.length === 0) {
1215
+ continue;
1216
+ }
1217
+ for (const requestId2 of diff) {
1218
+ await db.redis.hset(currentKey, { [requestId2]: incrementBy });
1219
+ }
1220
+ }
1221
+ }
1222
+ const reset = (currentWindow + 1) * windowDuration;
1223
+ if (ctx.cache && !success) {
1224
+ ctx.cache.blockUntil(identifier, reset);
1225
+ }
1226
+ return {
1227
+ success: Boolean(success),
1228
+ limit: tokens,
1229
+ remaining: Math.max(0, remaining),
1230
+ reset,
1231
+ pending: sync()
1232
+ };
1233
+ },
1234
+ async getRemaining(ctx, identifier) {
1235
+ const now = Date.now();
1236
+ const currentWindow = Math.floor(now / windowSize);
1237
+ const currentKey = [identifier, currentWindow].join(":");
1238
+ const previousWindow = currentWindow - 1;
1239
+ const previousKey = [identifier, previousWindow].join(":");
1240
+ const dbs = ctx.regionContexts.map((regionContext) => ({
1241
+ redis: regionContext.redis,
1242
+ request: safeEval(
1243
+ regionContext,
1244
+ SCRIPTS.multiRegion.slidingWindow.getRemaining,
1245
+ [currentKey, previousKey],
1246
+ [now, windowSize]
1247
+ // lua seems to return `1` for true and `null` for false
1248
+ )
1249
+ }));
1250
+ const usedTokens = await Promise.any(dbs.map((s) => s.request));
1251
+ return {
1252
+ remaining: Math.max(0, tokens - usedTokens),
1253
+ reset: (currentWindow + 1) * windowSize
1254
+ };
1255
+ },
1256
+ async resetTokens(ctx, identifier) {
1257
+ const pattern = [identifier, "*"].join(":");
1258
+ if (ctx.cache) {
1259
+ ctx.cache.pop(identifier);
1260
+ }
1261
+ await Promise.all(ctx.regionContexts.map((regionContext) => {
1262
+ safeEval(
1263
+ regionContext,
1264
+ RESET_SCRIPT,
1265
+ [pattern],
1266
+ [null]
1267
+ );
1268
+ }));
1269
+ }
1270
+ });
1271
+ }
1272
+ };
1273
+
1274
+ // src/single.ts
1275
+ var RegionRatelimit = class extends Ratelimit {
1276
+ /**
1277
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.
1278
+ */
1279
+ constructor(config) {
1280
+ super({
1281
+ prefix: config.prefix,
1282
+ limiter: config.limiter,
1283
+ timeout: config.timeout,
1284
+ analytics: config.analytics,
1285
+ ctx: {
1286
+ redis: config.redis
1287
+ },
1288
+ ephemeralCache: config.ephemeralCache,
1289
+ enableProtection: config.enableProtection,
1290
+ denyListThreshold: config.denyListThreshold
1291
+ });
1292
+ }
1293
+ /**
1294
+ * Each request inside a fixed time increases a counter.
1295
+ * Once the counter reaches the maximum allowed number, all further requests are
1296
+ * rejected.
1297
+ *
1298
+ * **Pro:**
1299
+ *
1300
+ * - Newer requests are not starved by old ones.
1301
+ * - Low storage cost.
1302
+ *
1303
+ * **Con:**
1304
+ *
1305
+ * A burst of requests near the boundary of a window can result in a very
1306
+ * high request rate because two windows will be filled with requests quickly.
1307
+ *
1308
+ * @param tokens - How many requests a user can make in each time window.
1309
+ * @param window - A fixed timeframe
1310
+ */
1311
+ static fixedWindow(tokens, window) {
1312
+ const windowDuration = ms(window);
1313
+ return () => ({
1314
+ async limit(ctx, identifier, rate) {
1315
+ const bucket = Math.floor(Date.now() / windowDuration);
1316
+ const key = [identifier, bucket].join(":");
1317
+ if (ctx.cache) {
1318
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1319
+ if (blocked) {
1320
+ return {
1321
+ success: false,
1322
+ limit: tokens,
1323
+ remaining: 0,
1324
+ reset: reset2,
1325
+ pending: Promise.resolve(),
1326
+ reason: "cacheBlock"
1327
+ };
1328
+ }
1329
+ }
1330
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1331
+ const usedTokensAfterUpdate = await safeEval(
1332
+ ctx,
1333
+ SCRIPTS.singleRegion.fixedWindow.limit,
1334
+ [key],
1335
+ [windowDuration, incrementBy]
1336
+ );
1337
+ const success = usedTokensAfterUpdate <= tokens;
1338
+ const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate);
1339
+ const reset = (bucket + 1) * windowDuration;
1340
+ if (ctx.cache && !success) {
1341
+ ctx.cache.blockUntil(identifier, reset);
1342
+ }
1343
+ return {
1344
+ success,
1345
+ limit: tokens,
1346
+ remaining: remainingTokens,
1347
+ reset,
1348
+ pending: Promise.resolve()
1349
+ };
1350
+ },
1351
+ async getRemaining(ctx, identifier) {
1352
+ const bucket = Math.floor(Date.now() / windowDuration);
1353
+ const key = [identifier, bucket].join(":");
1354
+ const usedTokens = await safeEval(
1355
+ ctx,
1356
+ SCRIPTS.singleRegion.fixedWindow.getRemaining,
1357
+ [key],
1358
+ [null]
1359
+ );
1360
+ return {
1361
+ remaining: Math.max(0, tokens - usedTokens),
1362
+ reset: (bucket + 1) * windowDuration
1363
+ };
1364
+ },
1365
+ async resetTokens(ctx, identifier) {
1366
+ const pattern = [identifier, "*"].join(":");
1367
+ if (ctx.cache) {
1368
+ ctx.cache.pop(identifier);
1369
+ }
1370
+ await safeEval(
1371
+ ctx,
1372
+ RESET_SCRIPT,
1373
+ [pattern],
1374
+ [null]
1375
+ );
1376
+ }
1377
+ });
1378
+ }
1379
+ /**
1380
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
1381
+ * costs than `slidingLogs` and improved boundary behavior by calculating a
1382
+ * weighted score between two windows.
1383
+ *
1384
+ * **Pro:**
1385
+ *
1386
+ * Good performance allows this to scale to very high loads.
1387
+ *
1388
+ * **Con:**
1389
+ *
1390
+ * Nothing major.
1391
+ *
1392
+ * @param tokens - How many requests a user can make in each time window.
1393
+ * @param window - The duration in which the user can max X requests.
1394
+ */
1395
+ static slidingWindow(tokens, window) {
1396
+ const windowSize = ms(window);
1397
+ return () => ({
1398
+ async limit(ctx, identifier, rate) {
1399
+ const now = Date.now();
1400
+ const currentWindow = Math.floor(now / windowSize);
1401
+ const currentKey = [identifier, currentWindow].join(":");
1402
+ const previousWindow = currentWindow - 1;
1403
+ const previousKey = [identifier, previousWindow].join(":");
1404
+ if (ctx.cache) {
1405
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1406
+ if (blocked) {
1407
+ return {
1408
+ success: false,
1409
+ limit: tokens,
1410
+ remaining: 0,
1411
+ reset: reset2,
1412
+ pending: Promise.resolve(),
1413
+ reason: "cacheBlock"
1414
+ };
1415
+ }
1416
+ }
1417
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1418
+ const remainingTokens = await safeEval(
1419
+ ctx,
1420
+ SCRIPTS.singleRegion.slidingWindow.limit,
1421
+ [currentKey, previousKey],
1422
+ [tokens, now, windowSize, incrementBy]
1423
+ );
1424
+ const success = remainingTokens >= 0;
1425
+ const reset = (currentWindow + 1) * windowSize;
1426
+ if (ctx.cache && !success) {
1427
+ ctx.cache.blockUntil(identifier, reset);
1428
+ }
1429
+ return {
1430
+ success,
1431
+ limit: tokens,
1432
+ remaining: Math.max(0, remainingTokens),
1433
+ reset,
1434
+ pending: Promise.resolve()
1435
+ };
1436
+ },
1437
+ async getRemaining(ctx, identifier) {
1438
+ const now = Date.now();
1439
+ const currentWindow = Math.floor(now / windowSize);
1440
+ const currentKey = [identifier, currentWindow].join(":");
1441
+ const previousWindow = currentWindow - 1;
1442
+ const previousKey = [identifier, previousWindow].join(":");
1443
+ const usedTokens = await safeEval(
1444
+ ctx,
1445
+ SCRIPTS.singleRegion.slidingWindow.getRemaining,
1446
+ [currentKey, previousKey],
1447
+ [now, windowSize]
1448
+ );
1449
+ return {
1450
+ remaining: Math.max(0, tokens - usedTokens),
1451
+ reset: (currentWindow + 1) * windowSize
1452
+ };
1453
+ },
1454
+ async resetTokens(ctx, identifier) {
1455
+ const pattern = [identifier, "*"].join(":");
1456
+ if (ctx.cache) {
1457
+ ctx.cache.pop(identifier);
1458
+ }
1459
+ await safeEval(
1460
+ ctx,
1461
+ RESET_SCRIPT,
1462
+ [pattern],
1463
+ [null]
1464
+ );
1465
+ }
1466
+ });
1467
+ }
1468
+ /**
1469
+ * You have a bucket filled with `{maxTokens}` tokens that refills constantly
1470
+ * at `{refillRate}` per `{interval}`.
1471
+ * Every request will remove one token from the bucket and if there is no
1472
+ * token to take, the request is rejected.
1473
+ *
1474
+ * **Pro:**
1475
+ *
1476
+ * - Bursts of requests are smoothed out and you can process them at a constant
1477
+ * rate.
1478
+ * - Allows to set a higher initial burst limit by setting `maxTokens` higher
1479
+ * than `refillRate`
1480
+ */
1481
+ static tokenBucket(refillRate, interval, maxTokens) {
1482
+ const intervalDuration = ms(interval);
1483
+ return () => ({
1484
+ async limit(ctx, identifier, rate) {
1485
+ if (ctx.cache) {
1486
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1487
+ if (blocked) {
1488
+ return {
1489
+ success: false,
1490
+ limit: maxTokens,
1491
+ remaining: 0,
1492
+ reset: reset2,
1493
+ pending: Promise.resolve(),
1494
+ reason: "cacheBlock"
1495
+ };
1496
+ }
1497
+ }
1498
+ const now = Date.now();
1499
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1500
+ const [remaining, reset] = await safeEval(
1501
+ ctx,
1502
+ SCRIPTS.singleRegion.tokenBucket.limit,
1503
+ [identifier],
1504
+ [maxTokens, intervalDuration, refillRate, now, incrementBy]
1505
+ );
1506
+ const success = remaining >= 0;
1507
+ if (ctx.cache && !success) {
1508
+ ctx.cache.blockUntil(identifier, reset);
1509
+ }
1510
+ return {
1511
+ success,
1512
+ limit: maxTokens,
1513
+ remaining,
1514
+ reset,
1515
+ pending: Promise.resolve()
1516
+ };
1517
+ },
1518
+ async getRemaining(ctx, identifier) {
1519
+ const [remainingTokens, refilledAt] = await safeEval(
1520
+ ctx,
1521
+ SCRIPTS.singleRegion.tokenBucket.getRemaining,
1522
+ [identifier],
1523
+ [maxTokens]
1524
+ );
1525
+ const freshRefillAt = Date.now() + intervalDuration;
1526
+ const identifierRefillsAt = refilledAt + intervalDuration;
1527
+ return {
1528
+ remaining: remainingTokens,
1529
+ reset: refilledAt === tokenBucketIdentifierNotFound ? freshRefillAt : identifierRefillsAt
1530
+ };
1531
+ },
1532
+ async resetTokens(ctx, identifier) {
1533
+ const pattern = identifier;
1534
+ if (ctx.cache) {
1535
+ ctx.cache.pop(identifier);
1536
+ }
1537
+ await safeEval(
1538
+ ctx,
1539
+ RESET_SCRIPT,
1540
+ [pattern],
1541
+ [null]
1542
+ );
1543
+ }
1544
+ });
1545
+ }
1546
+ /**
1547
+ * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
1548
+ * it asynchronously.
1549
+ * This is experimental and not yet recommended for production use.
1550
+ *
1551
+ * @experimental
1552
+ *
1553
+ * Each request inside a fixed time increases a counter.
1554
+ * Once the counter reaches the maximum allowed number, all further requests are
1555
+ * rejected.
1556
+ *
1557
+ * **Pro:**
1558
+ *
1559
+ * - Newer requests are not starved by old ones.
1560
+ * - Low storage cost.
1561
+ *
1562
+ * **Con:**
1563
+ *
1564
+ * A burst of requests near the boundary of a window can result in a very
1565
+ * high request rate because two windows will be filled with requests quickly.
1566
+ *
1567
+ * @param tokens - How many requests a user can make in each time window.
1568
+ * @param window - A fixed timeframe
1569
+ */
1570
+ static cachedFixedWindow(tokens, window) {
1571
+ const windowDuration = ms(window);
1572
+ return () => ({
1573
+ async limit(ctx, identifier, rate) {
1574
+ if (!ctx.cache) {
1575
+ throw new Error("This algorithm requires a cache");
1576
+ }
1577
+ const bucket = Math.floor(Date.now() / windowDuration);
1578
+ const key = [identifier, bucket].join(":");
1579
+ const reset = (bucket + 1) * windowDuration;
1580
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1581
+ const hit = typeof ctx.cache.get(key) === "number";
1582
+ if (hit) {
1583
+ const cachedTokensAfterUpdate = ctx.cache.incr(key);
1584
+ const success = cachedTokensAfterUpdate < tokens;
1585
+ const pending = success ? safeEval(
1586
+ ctx,
1587
+ SCRIPTS.singleRegion.cachedFixedWindow.limit,
1588
+ [key],
1589
+ [windowDuration, incrementBy]
1590
+ ) : Promise.resolve();
1591
+ return {
1592
+ success,
1593
+ limit: tokens,
1594
+ remaining: tokens - cachedTokensAfterUpdate,
1595
+ reset,
1596
+ pending
1597
+ };
1598
+ }
1599
+ const usedTokensAfterUpdate = await safeEval(
1600
+ ctx,
1601
+ SCRIPTS.singleRegion.cachedFixedWindow.limit,
1602
+ [key],
1603
+ [windowDuration, incrementBy]
1604
+ );
1605
+ ctx.cache.set(key, usedTokensAfterUpdate);
1606
+ const remaining = tokens - usedTokensAfterUpdate;
1607
+ return {
1608
+ success: remaining >= 0,
1609
+ limit: tokens,
1610
+ remaining,
1611
+ reset,
1612
+ pending: Promise.resolve()
1613
+ };
1614
+ },
1615
+ async getRemaining(ctx, identifier) {
1616
+ if (!ctx.cache) {
1617
+ throw new Error("This algorithm requires a cache");
1618
+ }
1619
+ const bucket = Math.floor(Date.now() / windowDuration);
1620
+ const key = [identifier, bucket].join(":");
1621
+ const hit = typeof ctx.cache.get(key) === "number";
1622
+ if (hit) {
1623
+ const cachedUsedTokens = ctx.cache.get(key) ?? 0;
1624
+ return {
1625
+ remaining: Math.max(0, tokens - cachedUsedTokens),
1626
+ reset: (bucket + 1) * windowDuration
1627
+ };
1628
+ }
1629
+ const usedTokens = await safeEval(
1630
+ ctx,
1631
+ SCRIPTS.singleRegion.cachedFixedWindow.getRemaining,
1632
+ [key],
1633
+ [null]
1634
+ );
1635
+ return {
1636
+ remaining: Math.max(0, tokens - usedTokens),
1637
+ reset: (bucket + 1) * windowDuration
1638
+ };
1639
+ },
1640
+ async resetTokens(ctx, identifier) {
1641
+ if (!ctx.cache) {
1642
+ throw new Error("This algorithm requires a cache");
1643
+ }
1644
+ const bucket = Math.floor(Date.now() / windowDuration);
1645
+ const key = [identifier, bucket].join(":");
1646
+ ctx.cache.pop(key);
1647
+ const pattern = [identifier, "*"].join(":");
1648
+ await safeEval(
1649
+ ctx,
1650
+ RESET_SCRIPT,
1651
+ [pattern],
1652
+ [null]
1653
+ );
1654
+ }
1655
+ });
1656
+ }
1657
+ };
1658
+ // Annotate the CommonJS export names for ESM import in node:
1659
+ 0 && (module.exports = {
1660
+ Analytics,
1661
+ IpDenyList,
1662
+ MultiRegionRatelimit,
1663
+ Ratelimit
1664
+ });
1665
+ //# sourceMappingURL=index.js.map