@upstash/ratelimit 0.0.0-ci.f88fdef75b804d1f115e9d3e7bca57c88df0c108-20241006173348

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,1641 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __export = (target, all) => {
3
+ for (var name in all)
4
+ __defProp(target, name, { get: all[name], enumerable: true });
5
+ };
6
+
7
+ // src/analytics.ts
8
+ import { Analytics as CoreAnalytics } from "@upstash/core-analytics";
9
+ var Analytics = class {
10
+ analytics;
11
+ table = "events";
12
+ constructor(config) {
13
+ this.analytics = new CoreAnalytics({
14
+ // @ts-expect-error we need to fix the types in core-analytics, it should only require the methods it needs, not the whole sdk
15
+ redis: config.redis,
16
+ window: "1h",
17
+ prefix: config.prefix ?? "@upstash/ratelimit",
18
+ retention: "90d"
19
+ });
20
+ }
21
+ /**
22
+ * Try to extract the geo information from the request
23
+ *
24
+ * This handles Vercel's `req.geo` and and Cloudflare's `request.cf` properties
25
+ * @param req
26
+ * @returns
27
+ */
28
+ extractGeo(req) {
29
+ if (req.geo !== void 0) {
30
+ return req.geo;
31
+ }
32
+ if (req.cf !== void 0) {
33
+ return req.cf;
34
+ }
35
+ return {};
36
+ }
37
+ async record(event) {
38
+ await this.analytics.ingest(this.table, event);
39
+ }
40
+ async series(filter, cutoff) {
41
+ const timestampCount = Math.min(
42
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
43
+ 256
44
+ );
45
+ return this.analytics.aggregateBucketsWithPipeline(this.table, filter, timestampCount);
46
+ }
47
+ async getUsage(cutoff = 0) {
48
+ const timestampCount = Math.min(
49
+ (this.analytics.getBucket(Date.now()) - this.analytics.getBucket(cutoff)) / (60 * 60 * 1e3),
50
+ 256
51
+ );
52
+ const records = await this.analytics.getAllowedBlocked(this.table, timestampCount);
53
+ return records;
54
+ }
55
+ async getUsageOverTime(timestampCount, groupby) {
56
+ const result = await this.analytics.aggregateBucketsWithPipeline(this.table, groupby, timestampCount);
57
+ return result;
58
+ }
59
+ async getMostAllowedBlocked(timestampCount, getTop, checkAtMost) {
60
+ getTop = getTop ?? 5;
61
+ const timestamp = void 0;
62
+ return this.analytics.getMostAllowedBlocked(this.table, timestampCount, getTop, timestamp, checkAtMost);
63
+ }
64
+ };
65
+
66
+ // src/cache.ts
67
+ var Cache = class {
68
+ /**
69
+ * Stores identifier -> reset (in milliseconds)
70
+ */
71
+ cache;
72
+ constructor(cache) {
73
+ this.cache = cache;
74
+ }
75
+ isBlocked(identifier) {
76
+ if (!this.cache.has(identifier)) {
77
+ return { blocked: false, reset: 0 };
78
+ }
79
+ const reset = this.cache.get(identifier);
80
+ if (reset < Date.now()) {
81
+ this.cache.delete(identifier);
82
+ return { blocked: false, reset: 0 };
83
+ }
84
+ return { blocked: true, reset };
85
+ }
86
+ blockUntil(identifier, reset) {
87
+ this.cache.set(identifier, reset);
88
+ }
89
+ set(key, value) {
90
+ this.cache.set(key, value);
91
+ }
92
+ get(key) {
93
+ return this.cache.get(key) || null;
94
+ }
95
+ incr(key) {
96
+ let value = this.cache.get(key) ?? 0;
97
+ value += 1;
98
+ this.cache.set(key, value);
99
+ return value;
100
+ }
101
+ pop(key) {
102
+ this.cache.delete(key);
103
+ }
104
+ empty() {
105
+ this.cache.clear();
106
+ }
107
+ size() {
108
+ return this.cache.size;
109
+ }
110
+ };
111
+
112
+ // src/duration.ts
113
+ function ms(d) {
114
+ const match = d.match(/^(\d+)\s?(ms|s|m|h|d)$/);
115
+ if (!match) {
116
+ throw new Error(`Unable to parse window size: ${d}`);
117
+ }
118
+ const time = Number.parseInt(match[1]);
119
+ const unit = match[2];
120
+ switch (unit) {
121
+ case "ms": {
122
+ return time;
123
+ }
124
+ case "s": {
125
+ return time * 1e3;
126
+ }
127
+ case "m": {
128
+ return time * 1e3 * 60;
129
+ }
130
+ case "h": {
131
+ return time * 1e3 * 60 * 60;
132
+ }
133
+ case "d": {
134
+ return time * 1e3 * 60 * 60 * 24;
135
+ }
136
+ default: {
137
+ throw new Error(`Unable to parse window size: ${d}`);
138
+ }
139
+ }
140
+ }
141
+
142
+ // src/hash.ts
143
+ var safeEval = async (ctx, script, keys, args) => {
144
+ try {
145
+ return await ctx.redis.evalsha(script.hash, keys, args);
146
+ } catch (error) {
147
+ if (`${error}`.includes("NOSCRIPT")) {
148
+ const hash = await ctx.redis.scriptLoad(script.script);
149
+ if (hash !== script.hash) {
150
+ console.warn(
151
+ "Upstash Ratelimit: Expected hash and the hash received from Redis are different. Ratelimit will work as usual but performance will be reduced."
152
+ );
153
+ }
154
+ return await ctx.redis.evalsha(hash, keys, args);
155
+ }
156
+ throw error;
157
+ }
158
+ };
159
+
160
+ // src/lua-scripts/single.ts
161
+ var fixedWindowLimitScript = `
162
+ local key = KEYS[1]
163
+ local window = ARGV[1]
164
+ local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1
165
+
166
+ local r = redis.call("INCRBY", key, incrementBy)
167
+ if r == tonumber(incrementBy) then
168
+ -- The first time this key is set, the value will be equal to incrementBy.
169
+ -- So we only need the expire command once
170
+ redis.call("PEXPIRE", key, window)
171
+ end
172
+
173
+ return r
174
+ `;
175
+ var fixedWindowRemainingTokensScript = `
176
+ local key = KEYS[1]
177
+ local tokens = 0
178
+
179
+ local value = redis.call('GET', key)
180
+ if value then
181
+ tokens = value
182
+ end
183
+ return tokens
184
+ `;
185
+ var slidingWindowLimitScript = `
186
+ local currentKey = KEYS[1] -- identifier including prefixes
187
+ local previousKey = KEYS[2] -- key of the previous bucket
188
+ local tokens = tonumber(ARGV[1]) -- tokens per window
189
+ local now = ARGV[2] -- current timestamp in milliseconds
190
+ local window = ARGV[3] -- interval in milliseconds
191
+ local incrementBy = ARGV[4] -- increment rate per request at a given value, default is 1
192
+
193
+ local requestsInCurrentWindow = redis.call("GET", currentKey)
194
+ if requestsInCurrentWindow == false then
195
+ requestsInCurrentWindow = 0
196
+ end
197
+
198
+ local requestsInPreviousWindow = redis.call("GET", previousKey)
199
+ if requestsInPreviousWindow == false then
200
+ requestsInPreviousWindow = 0
201
+ end
202
+ local percentageInCurrent = ( now % window ) / window
203
+ -- weighted requests to consider from the previous window
204
+ requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)
205
+ if requestsInPreviousWindow + requestsInCurrentWindow >= tokens then
206
+ return -1
207
+ end
208
+
209
+ local newValue = redis.call("INCRBY", currentKey, incrementBy)
210
+ if newValue == tonumber(incrementBy) then
211
+ -- The first time this key is set, the value will be equal to incrementBy.
212
+ -- So we only need the expire command once
213
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
214
+ end
215
+ return tokens - ( newValue + requestsInPreviousWindow )
216
+ `;
217
+ var slidingWindowRemainingTokensScript = `
218
+ local currentKey = KEYS[1] -- identifier including prefixes
219
+ local previousKey = KEYS[2] -- key of the previous bucket
220
+ local now = ARGV[1] -- current timestamp in milliseconds
221
+ local window = ARGV[2] -- interval in milliseconds
222
+
223
+ local requestsInCurrentWindow = redis.call("GET", currentKey)
224
+ if requestsInCurrentWindow == false then
225
+ requestsInCurrentWindow = 0
226
+ end
227
+
228
+ local requestsInPreviousWindow = redis.call("GET", previousKey)
229
+ if requestsInPreviousWindow == false then
230
+ requestsInPreviousWindow = 0
231
+ end
232
+
233
+ local percentageInCurrent = ( now % window ) / window
234
+ -- weighted requests to consider from the previous window
235
+ requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)
236
+
237
+ return requestsInPreviousWindow + requestsInCurrentWindow
238
+ `;
239
+ var tokenBucketLimitScript = `
240
+ local key = KEYS[1] -- identifier including prefixes
241
+ local maxTokens = tonumber(ARGV[1]) -- maximum number of tokens
242
+ local interval = tonumber(ARGV[2]) -- size of the window in milliseconds
243
+ local refillRate = tonumber(ARGV[3]) -- how many tokens are refilled after each interval
244
+ local now = tonumber(ARGV[4]) -- current timestamp in milliseconds
245
+ local incrementBy = tonumber(ARGV[5]) -- how many tokens to consume, default is 1
246
+
247
+ local bucket = redis.call("HMGET", key, "refilledAt", "tokens")
248
+
249
+ local refilledAt
250
+ local tokens
251
+
252
+ if bucket[1] == false then
253
+ refilledAt = now
254
+ tokens = maxTokens
255
+ else
256
+ refilledAt = tonumber(bucket[1])
257
+ tokens = tonumber(bucket[2])
258
+ end
259
+
260
+ if now >= refilledAt + interval then
261
+ local numRefills = math.floor((now - refilledAt) / interval)
262
+ tokens = math.min(maxTokens, tokens + numRefills * refillRate)
263
+
264
+ refilledAt = refilledAt + numRefills * interval
265
+ end
266
+
267
+ if tokens == 0 then
268
+ return {-1, refilledAt + interval}
269
+ end
270
+
271
+ local remaining = tokens - incrementBy
272
+ local expireAt = math.ceil(((maxTokens - remaining) / refillRate)) * interval
273
+
274
+ redis.call("HSET", key, "refilledAt", refilledAt, "tokens", remaining)
275
+ redis.call("PEXPIRE", key, expireAt)
276
+ return {remaining, refilledAt + interval}
277
+ `;
278
+ var tokenBucketIdentifierNotFound = -1;
279
+ var tokenBucketRemainingTokensScript = `
280
+ local key = KEYS[1]
281
+ local maxTokens = tonumber(ARGV[1])
282
+
283
+ local bucket = redis.call("HMGET", key, "refilledAt", "tokens")
284
+
285
+ if bucket[1] == false then
286
+ return {maxTokens, ${tokenBucketIdentifierNotFound}}
287
+ end
288
+
289
+ return {tonumber(bucket[2]), tonumber(bucket[1])}
290
+ `;
291
+ var cachedFixedWindowLimitScript = `
292
+ local key = KEYS[1]
293
+ local window = ARGV[1]
294
+ local incrementBy = ARGV[2] -- increment rate per request at a given value, default is 1
295
+
296
+ local r = redis.call("INCRBY", key, incrementBy)
297
+ if r == incrementBy then
298
+ -- The first time this key is set, the value will be equal to incrementBy.
299
+ -- So we only need the expire command once
300
+ redis.call("PEXPIRE", key, window)
301
+ end
302
+
303
+ return r
304
+ `;
305
+ var cachedFixedWindowRemainingTokenScript = `
306
+ local key = KEYS[1]
307
+ local tokens = 0
308
+
309
+ local value = redis.call('GET', key)
310
+ if value then
311
+ tokens = value
312
+ end
313
+ return tokens
314
+ `;
315
+
316
+ // src/lua-scripts/multi.ts
317
+ var fixedWindowLimitScript2 = `
318
+ local key = KEYS[1]
319
+ local id = ARGV[1]
320
+ local window = ARGV[2]
321
+ local incrementBy = tonumber(ARGV[3])
322
+
323
+ redis.call("HSET", key, id, incrementBy)
324
+ local fields = redis.call("HGETALL", key)
325
+ if #fields == 2 and tonumber(fields[2])==incrementBy then
326
+ -- The first time this key is set, and the value will be equal to incrementBy.
327
+ -- So we only need the expire command once
328
+ redis.call("PEXPIRE", key, window)
329
+ end
330
+
331
+ return fields
332
+ `;
333
+ var fixedWindowRemainingTokensScript2 = `
334
+ local key = KEYS[1]
335
+ local tokens = 0
336
+
337
+ local fields = redis.call("HGETALL", key)
338
+
339
+ return fields
340
+ `;
341
+ var slidingWindowLimitScript2 = `
342
+ local currentKey = KEYS[1] -- identifier including prefixes
343
+ local previousKey = KEYS[2] -- key of the previous bucket
344
+ local tokens = tonumber(ARGV[1]) -- tokens per window
345
+ local now = ARGV[2] -- current timestamp in milliseconds
346
+ local window = ARGV[3] -- interval in milliseconds
347
+ local requestId = ARGV[4] -- uuid for this request
348
+ local incrementBy = tonumber(ARGV[5]) -- custom rate, default is 1
349
+
350
+ local currentFields = redis.call("HGETALL", currentKey)
351
+ local requestsInCurrentWindow = 0
352
+ for i = 2, #currentFields, 2 do
353
+ requestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])
354
+ end
355
+
356
+ local previousFields = redis.call("HGETALL", previousKey)
357
+ local requestsInPreviousWindow = 0
358
+ for i = 2, #previousFields, 2 do
359
+ requestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])
360
+ end
361
+
362
+ local percentageInCurrent = ( now % window) / window
363
+ if requestsInPreviousWindow * (1 - percentageInCurrent ) + requestsInCurrentWindow >= tokens then
364
+ return {currentFields, previousFields, false}
365
+ end
366
+
367
+ redis.call("HSET", currentKey, requestId, incrementBy)
368
+
369
+ if requestsInCurrentWindow == 0 then
370
+ -- The first time this key is set, the value will be equal to incrementBy.
371
+ -- So we only need the expire command once
372
+ redis.call("PEXPIRE", currentKey, window * 2 + 1000) -- Enough time to overlap with a new window + 1 second
373
+ end
374
+ return {currentFields, previousFields, true}
375
+ `;
376
+ var slidingWindowRemainingTokensScript2 = `
377
+ local currentKey = KEYS[1] -- identifier including prefixes
378
+ local previousKey = KEYS[2] -- key of the previous bucket
379
+ local now = ARGV[1] -- current timestamp in milliseconds
380
+ local window = ARGV[2] -- interval in milliseconds
381
+
382
+ local currentFields = redis.call("HGETALL", currentKey)
383
+ local requestsInCurrentWindow = 0
384
+ for i = 2, #currentFields, 2 do
385
+ requestsInCurrentWindow = requestsInCurrentWindow + tonumber(currentFields[i])
386
+ end
387
+
388
+ local previousFields = redis.call("HGETALL", previousKey)
389
+ local requestsInPreviousWindow = 0
390
+ for i = 2, #previousFields, 2 do
391
+ requestsInPreviousWindow = requestsInPreviousWindow + tonumber(previousFields[i])
392
+ end
393
+
394
+ local percentageInCurrent = ( now % window) / window
395
+ requestsInPreviousWindow = math.floor(( 1 - percentageInCurrent ) * requestsInPreviousWindow)
396
+
397
+ return requestsInCurrentWindow + requestsInPreviousWindow
398
+ `;
399
+
400
+ // src/lua-scripts/reset.ts
401
+ var resetScript = `
402
+ local pattern = KEYS[1]
403
+
404
+ -- Initialize cursor to start from 0
405
+ local cursor = "0"
406
+
407
+ repeat
408
+ -- Scan for keys matching the pattern
409
+ local scan_result = redis.call('SCAN', cursor, 'MATCH', pattern)
410
+
411
+ -- Extract cursor for the next iteration
412
+ cursor = scan_result[1]
413
+
414
+ -- Extract keys from the scan result
415
+ local keys = scan_result[2]
416
+
417
+ for i=1, #keys do
418
+ redis.call('DEL', keys[i])
419
+ end
420
+
421
+ -- Continue scanning until cursor is 0 (end of keyspace)
422
+ until cursor == "0"
423
+ `;
424
+
425
+ // src/lua-scripts/hash.ts
426
+ var SCRIPTS = {
427
+ singleRegion: {
428
+ fixedWindow: {
429
+ limit: {
430
+ script: fixedWindowLimitScript,
431
+ hash: "b13943e359636db027ad280f1def143f02158c13"
432
+ },
433
+ getRemaining: {
434
+ script: fixedWindowRemainingTokensScript,
435
+ hash: "8c4c341934502aee132643ffbe58ead3450e5208"
436
+ }
437
+ },
438
+ slidingWindow: {
439
+ limit: {
440
+ script: slidingWindowLimitScript,
441
+ hash: "e1391e429b699c780eb0480350cd5b7280fd9213"
442
+ },
443
+ getRemaining: {
444
+ script: slidingWindowRemainingTokensScript,
445
+ hash: "65a73ac5a05bf9712903bc304b77268980c1c417"
446
+ }
447
+ },
448
+ tokenBucket: {
449
+ limit: {
450
+ script: tokenBucketLimitScript,
451
+ hash: "5bece90aeef8189a8cfd28995b479529e270b3c6"
452
+ },
453
+ getRemaining: {
454
+ script: tokenBucketRemainingTokensScript,
455
+ hash: "a15be2bb1db2a15f7c82db06146f9d08983900d0"
456
+ }
457
+ },
458
+ cachedFixedWindow: {
459
+ limit: {
460
+ script: cachedFixedWindowLimitScript,
461
+ hash: "c26b12703dd137939b9a69a3a9b18e906a2d940f"
462
+ },
463
+ getRemaining: {
464
+ script: cachedFixedWindowRemainingTokenScript,
465
+ hash: "8e8f222ccae68b595ee6e3f3bf2199629a62b91a"
466
+ }
467
+ }
468
+ },
469
+ multiRegion: {
470
+ fixedWindow: {
471
+ limit: {
472
+ script: fixedWindowLimitScript2,
473
+ hash: "a8c14f3835aa87bd70e5e2116081b81664abcf5c"
474
+ },
475
+ getRemaining: {
476
+ script: fixedWindowRemainingTokensScript2,
477
+ hash: "8ab8322d0ed5fe5ac8eb08f0c2e4557f1b4816fd"
478
+ }
479
+ },
480
+ slidingWindow: {
481
+ limit: {
482
+ script: slidingWindowLimitScript2,
483
+ hash: "cb4fdc2575056df7c6d422764df0de3a08d6753b"
484
+ },
485
+ getRemaining: {
486
+ script: slidingWindowRemainingTokensScript2,
487
+ hash: "558c9306b7ec54abb50747fe0b17e5d44bd24868"
488
+ }
489
+ }
490
+ }
491
+ };
492
+ var RESET_SCRIPT = {
493
+ script: resetScript,
494
+ hash: "54bd274ddc59fb3be0f42deee2f64322a10e2b50"
495
+ };
496
+
497
+ // src/types.ts
498
+ var DenyListExtension = "denyList";
499
+ var IpDenyListKey = "ipDenyList";
500
+ var IpDenyListStatusKey = "ipDenyListStatus";
501
+
502
+ // src/deny-list/scripts.ts
503
+ var checkDenyListScript = `
504
+ -- Checks if values provideed in ARGV are present in the deny lists.
505
+ -- This is done using the allDenyListsKey below.
506
+
507
+ -- Additionally, checks the status of the ip deny list using the
508
+ -- ipDenyListStatusKey below. Here are the possible states of the
509
+ -- ipDenyListStatusKey key:
510
+ -- * status == -1: set to "disabled" with no TTL
511
+ -- * status == -2: not set, meaning that is was set before but expired
512
+ -- * status > 0: set to "valid", with a TTL
513
+ --
514
+ -- In the case of status == -2, we set the status to "pending" with
515
+ -- 30 second ttl. During this time, the process which got status == -2
516
+ -- will update the ip deny list.
517
+
518
+ local allDenyListsKey = KEYS[1]
519
+ local ipDenyListStatusKey = KEYS[2]
520
+
521
+ local results = redis.call('SMISMEMBER', allDenyListsKey, unpack(ARGV))
522
+ local status = redis.call('TTL', ipDenyListStatusKey)
523
+ if status == -2 then
524
+ redis.call('SETEX', ipDenyListStatusKey, 30, "pending")
525
+ end
526
+
527
+ return { results, status }
528
+ `;
529
+
530
+ // src/deny-list/ip-deny-list.ts
531
+ var ip_deny_list_exports = {};
532
+ __export(ip_deny_list_exports, {
533
+ ThresholdError: () => ThresholdError,
534
+ disableIpDenyList: () => disableIpDenyList,
535
+ updateIpDenyList: () => updateIpDenyList
536
+ });
537
+
538
+ // src/deny-list/time.ts
539
+ var MILLISECONDS_IN_HOUR = 60 * 60 * 1e3;
540
+ var MILLISECONDS_IN_DAY = 24 * MILLISECONDS_IN_HOUR;
541
+ var MILLISECONDS_TO_2AM = 2 * MILLISECONDS_IN_HOUR;
542
+ var getIpListTTL = (time) => {
543
+ const now = time || Date.now();
544
+ const timeSinceLast2AM = (now - MILLISECONDS_TO_2AM) % MILLISECONDS_IN_DAY;
545
+ return MILLISECONDS_IN_DAY - timeSinceLast2AM;
546
+ };
547
+
548
+ // src/deny-list/ip-deny-list.ts
549
+ var baseUrl = "https://raw.githubusercontent.com/stamparm/ipsum/master/levels";
550
+ var ThresholdError = class extends Error {
551
+ constructor(threshold) {
552
+ super(`Allowed threshold values are from 1 to 8, 1 and 8 included. Received: ${threshold}`);
553
+ this.name = "ThresholdError";
554
+ }
555
+ };
556
+ var getIpDenyList = async (threshold) => {
557
+ if (typeof threshold !== "number" || threshold < 1 || threshold > 8) {
558
+ throw new ThresholdError(threshold);
559
+ }
560
+ try {
561
+ const response = await fetch(`${baseUrl}/${threshold}.txt`);
562
+ if (!response.ok) {
563
+ throw new Error(`Error fetching data: ${response.statusText}`);
564
+ }
565
+ const data = await response.text();
566
+ const lines = data.split("\n");
567
+ return lines.filter((value) => value.length > 0);
568
+ } catch (error) {
569
+ throw new Error(`Failed to fetch ip deny list: ${error}`);
570
+ }
571
+ };
572
+ var updateIpDenyList = async (redis, prefix, threshold, ttl) => {
573
+ const allIps = await getIpDenyList(threshold);
574
+ const allDenyLists = [prefix, DenyListExtension, "all"].join(":");
575
+ const ipDenyList = [prefix, DenyListExtension, IpDenyListKey].join(":");
576
+ const statusKey = [prefix, IpDenyListStatusKey].join(":");
577
+ const transaction = redis.multi();
578
+ transaction.sdiffstore(allDenyLists, allDenyLists, ipDenyList);
579
+ transaction.del(ipDenyList);
580
+ transaction.sadd(ipDenyList, ...allIps);
581
+ transaction.sdiffstore(ipDenyList, ipDenyList, allDenyLists);
582
+ transaction.sunionstore(allDenyLists, allDenyLists, ipDenyList);
583
+ transaction.set(statusKey, "valid", { px: ttl ?? getIpListTTL() });
584
+ return await transaction.exec();
585
+ };
586
+ var disableIpDenyList = async (redis, prefix) => {
587
+ const allDenyListsKey = [prefix, DenyListExtension, "all"].join(":");
588
+ const ipDenyListKey = [prefix, DenyListExtension, IpDenyListKey].join(":");
589
+ const statusKey = [prefix, IpDenyListStatusKey].join(":");
590
+ const transaction = redis.multi();
591
+ transaction.sdiffstore(allDenyListsKey, allDenyListsKey, ipDenyListKey);
592
+ transaction.del(ipDenyListKey);
593
+ transaction.set(statusKey, "disabled");
594
+ return await transaction.exec();
595
+ };
596
+
597
+ // src/deny-list/deny-list.ts
598
+ var denyListCache = new Cache(/* @__PURE__ */ new Map());
599
+ var checkDenyListCache = (members) => {
600
+ return members.find(
601
+ (member) => denyListCache.isBlocked(member).blocked
602
+ );
603
+ };
604
+ var blockMember = (member) => {
605
+ if (denyListCache.size() > 1e3)
606
+ denyListCache.empty();
607
+ denyListCache.blockUntil(member, Date.now() + 6e4);
608
+ };
609
+ var checkDenyList = async (redis, prefix, members) => {
610
+ const [deniedValues, ipDenyListStatus] = await redis.eval(
611
+ checkDenyListScript,
612
+ [
613
+ [prefix, DenyListExtension, "all"].join(":"),
614
+ [prefix, IpDenyListStatusKey].join(":")
615
+ ],
616
+ members
617
+ );
618
+ let deniedValue = void 0;
619
+ deniedValues.map((memberDenied, index) => {
620
+ if (memberDenied) {
621
+ blockMember(members[index]);
622
+ deniedValue = members[index];
623
+ }
624
+ });
625
+ return {
626
+ deniedValue,
627
+ invalidIpDenyList: ipDenyListStatus === -2
628
+ };
629
+ };
630
+ var resolveLimitPayload = (redis, prefix, [ratelimitResponse, denyListResponse], threshold) => {
631
+ if (denyListResponse.deniedValue) {
632
+ ratelimitResponse.success = false;
633
+ ratelimitResponse.remaining = 0;
634
+ ratelimitResponse.reason = "denyList";
635
+ ratelimitResponse.deniedValue = denyListResponse.deniedValue;
636
+ }
637
+ if (denyListResponse.invalidIpDenyList) {
638
+ const updatePromise = updateIpDenyList(redis, prefix, threshold);
639
+ ratelimitResponse.pending = Promise.all([
640
+ ratelimitResponse.pending,
641
+ updatePromise
642
+ ]);
643
+ }
644
+ return ratelimitResponse;
645
+ };
646
+ var defaultDeniedResponse = (deniedValue) => {
647
+ return {
648
+ success: false,
649
+ limit: 0,
650
+ remaining: 0,
651
+ reset: 0,
652
+ pending: Promise.resolve(),
653
+ reason: "denyList",
654
+ deniedValue
655
+ };
656
+ };
657
+
658
+ // src/ratelimit.ts
659
+ var Ratelimit = class {
660
+ limiter;
661
+ ctx;
662
+ prefix;
663
+ timeout;
664
+ primaryRedis;
665
+ analytics;
666
+ enableProtection;
667
+ denyListThreshold;
668
+ constructor(config) {
669
+ this.ctx = config.ctx;
670
+ this.limiter = config.limiter;
671
+ this.timeout = config.timeout ?? 5e3;
672
+ this.prefix = config.prefix ?? "@upstash/ratelimit";
673
+ this.enableProtection = config.enableProtection ?? false;
674
+ this.denyListThreshold = config.denyListThreshold ?? 6;
675
+ this.primaryRedis = "redis" in this.ctx ? this.ctx.redis : this.ctx.regionContexts[0].redis;
676
+ this.analytics = config.analytics ? new Analytics({
677
+ redis: this.primaryRedis,
678
+ prefix: this.prefix
679
+ }) : void 0;
680
+ if (config.ephemeralCache instanceof Map) {
681
+ this.ctx.cache = new Cache(config.ephemeralCache);
682
+ } else if (config.ephemeralCache === void 0) {
683
+ this.ctx.cache = new Cache(/* @__PURE__ */ new Map());
684
+ }
685
+ }
686
+ /**
687
+ * Determine if a request should pass or be rejected based on the identifier and previously chosen ratelimit.
688
+ *
689
+ * Use this if you want to reject all requests that you can not handle right now.
690
+ *
691
+ * @example
692
+ * ```ts
693
+ * const ratelimit = new Ratelimit({
694
+ * redis: Redis.fromEnv(),
695
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
696
+ * })
697
+ *
698
+ * const { success } = await ratelimit.limit(id)
699
+ * if (!success){
700
+ * return "Nope"
701
+ * }
702
+ * return "Yes"
703
+ * ```
704
+ *
705
+ * @param req.rate - The rate at which tokens will be added or consumed from the token bucket. A higher rate allows for more requests to be processed. Defaults to 1 token per interval if not specified.
706
+ *
707
+ * Usage with `req.rate`
708
+ * @example
709
+ * ```ts
710
+ * const ratelimit = new Ratelimit({
711
+ * redis: Redis.fromEnv(),
712
+ * limiter: Ratelimit.slidingWindow(100, "10 s")
713
+ * })
714
+ *
715
+ * const { success } = await ratelimit.limit(id, {rate: 10})
716
+ * if (!success){
717
+ * return "Nope"
718
+ * }
719
+ * return "Yes"
720
+ * ```
721
+ */
722
+ limit = async (identifier, req) => {
723
+ let timeoutId = null;
724
+ try {
725
+ const response = this.getRatelimitResponse(identifier, req);
726
+ const { responseArray, newTimeoutId } = this.applyTimeout(response);
727
+ timeoutId = newTimeoutId;
728
+ const timedResponse = await Promise.race(responseArray);
729
+ const finalResponse = this.submitAnalytics(timedResponse, identifier, req);
730
+ return finalResponse;
731
+ } finally {
732
+ if (timeoutId) {
733
+ clearTimeout(timeoutId);
734
+ }
735
+ }
736
+ };
737
+ /**
738
+ * Block until the request may pass or timeout is reached.
739
+ *
740
+ * This method returns a promise that resolves as soon as the request may be processed
741
+ * or after the timeout has been reached.
742
+ *
743
+ * Use this if you want to delay the request until it is ready to get processed.
744
+ *
745
+ * @example
746
+ * ```ts
747
+ * const ratelimit = new Ratelimit({
748
+ * redis: Redis.fromEnv(),
749
+ * limiter: Ratelimit.slidingWindow(10, "10 s")
750
+ * })
751
+ *
752
+ * const { success } = await ratelimit.blockUntilReady(id, 60_000)
753
+ * if (!success){
754
+ * return "Nope"
755
+ * }
756
+ * return "Yes"
757
+ * ```
758
+ */
759
+ blockUntilReady = async (identifier, timeout) => {
760
+ if (timeout <= 0) {
761
+ throw new Error("timeout must be positive");
762
+ }
763
+ let res;
764
+ const deadline = Date.now() + timeout;
765
+ while (true) {
766
+ res = await this.limit(identifier);
767
+ if (res.success) {
768
+ break;
769
+ }
770
+ if (res.reset === 0) {
771
+ throw new Error("This should not happen");
772
+ }
773
+ const wait = Math.min(res.reset, deadline) - Date.now();
774
+ await new Promise((r) => setTimeout(r, wait));
775
+ if (Date.now() > deadline) {
776
+ break;
777
+ }
778
+ }
779
+ return res;
780
+ };
781
+ resetUsedTokens = async (identifier) => {
782
+ const pattern = [this.prefix, identifier].join(":");
783
+ await this.limiter().resetTokens(this.ctx, pattern);
784
+ };
785
+ /**
786
+ * Returns the remaining token count together with a reset timestamps
787
+ *
788
+ * @param identifier identifir to check
789
+ * @returns object with `remaining` and reset fields. `remaining` denotes
790
+ * the remaining tokens and reset denotes the timestamp when the
791
+ * tokens reset.
792
+ */
793
+ getRemaining = async (identifier) => {
794
+ const pattern = [this.prefix, identifier].join(":");
795
+ return await this.limiter().getRemaining(this.ctx, pattern);
796
+ };
797
+ /**
798
+ * Checks if the identifier or the values in req are in the deny list cache.
799
+ * If so, returns the default denied response.
800
+ *
801
+ * Otherwise, calls redis to check the rate limit and deny list. Returns after
802
+ * resolving the result. Resolving is overriding the rate limit result if
803
+ * the some value is in deny list.
804
+ *
805
+ * @param identifier identifier to block
806
+ * @param req options with ip, user agent, country, rate and geo info
807
+ * @returns rate limit response
808
+ */
809
+ getRatelimitResponse = async (identifier, req) => {
810
+ const key = this.getKey(identifier);
811
+ const definedMembers = this.getDefinedMembers(identifier, req);
812
+ const deniedValue = checkDenyListCache(definedMembers);
813
+ const result = deniedValue ? [defaultDeniedResponse(deniedValue), { deniedValue, invalidIpDenyList: false }] : await Promise.all([
814
+ this.limiter().limit(this.ctx, key, req?.rate),
815
+ this.enableProtection ? checkDenyList(this.primaryRedis, this.prefix, definedMembers) : { deniedValue: void 0, invalidIpDenyList: false }
816
+ ]);
817
+ return resolveLimitPayload(this.primaryRedis, this.prefix, result, this.denyListThreshold);
818
+ };
819
+ /**
820
+ * Creates an array with the original response promise and a timeout promise
821
+ * if this.timeout > 0.
822
+ *
823
+ * @param response Ratelimit response promise
824
+ * @returns array with the response and timeout promise. also includes the timeout id
825
+ */
826
+ applyTimeout = (response) => {
827
+ let newTimeoutId = null;
828
+ const responseArray = [response];
829
+ if (this.timeout > 0) {
830
+ const timeoutResponse = new Promise((resolve) => {
831
+ newTimeoutId = setTimeout(() => {
832
+ resolve({
833
+ success: true,
834
+ limit: 0,
835
+ remaining: 0,
836
+ reset: 0,
837
+ pending: Promise.resolve(),
838
+ reason: "timeout"
839
+ });
840
+ }, this.timeout);
841
+ });
842
+ responseArray.push(timeoutResponse);
843
+ }
844
+ return {
845
+ responseArray,
846
+ newTimeoutId
847
+ };
848
+ };
849
+ /**
850
+ * submits analytics if this.analytics is set
851
+ *
852
+ * @param ratelimitResponse final rate limit response
853
+ * @param identifier identifier to submit
854
+ * @param req limit options
855
+ * @returns rate limit response after updating the .pending field
856
+ */
857
+ submitAnalytics = (ratelimitResponse, identifier, req) => {
858
+ if (this.analytics) {
859
+ try {
860
+ const geo = req ? this.analytics.extractGeo(req) : void 0;
861
+ const analyticsP = this.analytics.record({
862
+ identifier: ratelimitResponse.reason === "denyList" ? ratelimitResponse.deniedValue : identifier,
863
+ time: Date.now(),
864
+ success: ratelimitResponse.reason === "denyList" ? "denied" : ratelimitResponse.success,
865
+ ...geo
866
+ }).catch((error) => {
867
+ let errorMessage = "Failed to record analytics";
868
+ if (`${error}`.includes("WRONGTYPE")) {
869
+ errorMessage = `
870
+ Failed to record analytics. See the information below:
871
+
872
+ This can occur when you uprade to Ratelimit version 1.1.2
873
+ or later from an earlier version.
874
+
875
+ This occurs simply because the way we store analytics data
876
+ has changed. To avoid getting this error, disable analytics
877
+ for *an hour*, then simply enable it back.
878
+
879
+ `;
880
+ }
881
+ console.warn(errorMessage, error);
882
+ });
883
+ ratelimitResponse.pending = Promise.all([ratelimitResponse.pending, analyticsP]);
884
+ } catch (error) {
885
+ console.warn("Failed to record analytics", error);
886
+ }
887
+ ;
888
+ }
889
+ ;
890
+ return ratelimitResponse;
891
+ };
892
+ getKey = (identifier) => {
893
+ return [this.prefix, identifier].join(":");
894
+ };
895
+ /**
896
+ * returns a list of defined values from
897
+ * [identifier, req.ip, req.userAgent, req.country]
898
+ *
899
+ * @param identifier identifier
900
+ * @param req limit options
901
+ * @returns list of defined values
902
+ */
903
+ getDefinedMembers = (identifier, req) => {
904
+ const members = [identifier, req?.ip, req?.userAgent, req?.country];
905
+ return members.filter((item) => Boolean(item));
906
+ };
907
+ };
908
+
909
+ // src/multi.ts
910
+ function randomId() {
911
+ let result = "";
912
+ const characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
913
+ const charactersLength = characters.length;
914
+ for (let i = 0; i < 16; i++) {
915
+ result += characters.charAt(Math.floor(Math.random() * charactersLength));
916
+ }
917
+ return result;
918
+ }
919
+ var MultiRegionRatelimit = class extends Ratelimit {
920
+ /**
921
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithn of your choice.
922
+ */
923
+ constructor(config) {
924
+ super({
925
+ prefix: config.prefix,
926
+ limiter: config.limiter,
927
+ timeout: config.timeout,
928
+ analytics: config.analytics,
929
+ ctx: {
930
+ regionContexts: config.redis.map((redis) => ({
931
+ redis
932
+ })),
933
+ cache: config.ephemeralCache ? new Cache(config.ephemeralCache) : void 0
934
+ }
935
+ });
936
+ }
937
+ /**
938
+ * Each request inside a fixed time increases a counter.
939
+ * Once the counter reaches the maximum allowed number, all further requests are
940
+ * rejected.
941
+ *
942
+ * **Pro:**
943
+ *
944
+ * - Newer requests are not starved by old ones.
945
+ * - Low storage cost.
946
+ *
947
+ * **Con:**
948
+ *
949
+ * A burst of requests near the boundary of a window can result in a very
950
+ * high request rate because two windows will be filled with requests quickly.
951
+ *
952
+ * @param tokens - How many requests a user can make in each time window.
953
+ * @param window - A fixed timeframe
954
+ */
955
+ static fixedWindow(tokens, window) {
956
+ const windowDuration = ms(window);
957
+ return () => ({
958
+ async limit(ctx, identifier, rate) {
959
+ if (ctx.cache) {
960
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
961
+ if (blocked) {
962
+ return {
963
+ success: false,
964
+ limit: tokens,
965
+ remaining: 0,
966
+ reset: reset2,
967
+ pending: Promise.resolve(),
968
+ reason: "cacheBlock"
969
+ };
970
+ }
971
+ }
972
+ const requestId = randomId();
973
+ const bucket = Math.floor(Date.now() / windowDuration);
974
+ const key = [identifier, bucket].join(":");
975
+ const incrementBy = rate ? Math.max(1, rate) : 1;
976
+ const dbs = ctx.regionContexts.map((regionContext) => ({
977
+ redis: regionContext.redis,
978
+ request: safeEval(
979
+ regionContext,
980
+ SCRIPTS.multiRegion.fixedWindow.limit,
981
+ [key],
982
+ [requestId, windowDuration, incrementBy]
983
+ )
984
+ }));
985
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
986
+ const usedTokens = firstResponse.reduce((accTokens, usedToken, index) => {
987
+ let parsedToken = 0;
988
+ if (index % 2) {
989
+ parsedToken = Number.parseInt(usedToken);
990
+ }
991
+ return accTokens + parsedToken;
992
+ }, 0);
993
+ const remaining = tokens - usedTokens;
994
+ async function sync() {
995
+ const individualIDs = await Promise.all(dbs.map((s) => s.request));
996
+ const allIDs = [...new Set(
997
+ individualIDs.flat().reduce((acc, curr, index) => {
998
+ if (index % 2 === 0) {
999
+ acc.push(curr);
1000
+ }
1001
+ return acc;
1002
+ }, [])
1003
+ ).values()];
1004
+ for (const db of dbs) {
1005
+ const usedDbTokensRequest = await db.request;
1006
+ const usedDbTokens = usedDbTokensRequest.reduce(
1007
+ (accTokens, usedToken, index) => {
1008
+ let parsedToken = 0;
1009
+ if (index % 2) {
1010
+ parsedToken = Number.parseInt(usedToken);
1011
+ }
1012
+ return accTokens + parsedToken;
1013
+ },
1014
+ 0
1015
+ );
1016
+ const dbIdsRequest = await db.request;
1017
+ const dbIds = dbIdsRequest.reduce((ids, currentId, index) => {
1018
+ if (index % 2 === 0) {
1019
+ ids.push(currentId);
1020
+ }
1021
+ return ids;
1022
+ }, []);
1023
+ if (usedDbTokens >= tokens) {
1024
+ continue;
1025
+ }
1026
+ const diff = allIDs.filter((id) => !dbIds.includes(id));
1027
+ if (diff.length === 0) {
1028
+ continue;
1029
+ }
1030
+ for (const requestId2 of diff) {
1031
+ await db.redis.hset(key, { [requestId2]: incrementBy });
1032
+ }
1033
+ }
1034
+ }
1035
+ const success = remaining > 0;
1036
+ const reset = (bucket + 1) * windowDuration;
1037
+ if (ctx.cache && !success) {
1038
+ ctx.cache.blockUntil(identifier, reset);
1039
+ }
1040
+ return {
1041
+ success,
1042
+ limit: tokens,
1043
+ remaining,
1044
+ reset,
1045
+ pending: sync()
1046
+ };
1047
+ },
1048
+ async getRemaining(ctx, identifier) {
1049
+ const bucket = Math.floor(Date.now() / windowDuration);
1050
+ const key = [identifier, bucket].join(":");
1051
+ const dbs = ctx.regionContexts.map((regionContext) => ({
1052
+ redis: regionContext.redis,
1053
+ request: safeEval(
1054
+ regionContext,
1055
+ SCRIPTS.multiRegion.fixedWindow.getRemaining,
1056
+ [key],
1057
+ [null]
1058
+ )
1059
+ }));
1060
+ const firstResponse = await Promise.any(dbs.map((s) => s.request));
1061
+ const usedTokens = firstResponse.reduce((accTokens, usedToken, index) => {
1062
+ let parsedToken = 0;
1063
+ if (index % 2) {
1064
+ parsedToken = Number.parseInt(usedToken);
1065
+ }
1066
+ return accTokens + parsedToken;
1067
+ }, 0);
1068
+ return {
1069
+ remaining: Math.max(0, tokens - usedTokens),
1070
+ reset: (bucket + 1) * windowDuration
1071
+ };
1072
+ },
1073
+ async resetTokens(ctx, identifier) {
1074
+ const pattern = [identifier, "*"].join(":");
1075
+ if (ctx.cache) {
1076
+ ctx.cache.pop(identifier);
1077
+ }
1078
+ await Promise.all(ctx.regionContexts.map((regionContext) => {
1079
+ safeEval(
1080
+ regionContext,
1081
+ RESET_SCRIPT,
1082
+ [pattern],
1083
+ [null]
1084
+ );
1085
+ }));
1086
+ }
1087
+ });
1088
+ }
1089
+ /**
1090
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
1091
+ * costs than `slidingLogs` and improved boundary behavior by calculating a
1092
+ * weighted score between two windows.
1093
+ *
1094
+ * **Pro:**
1095
+ *
1096
+ * Good performance allows this to scale to very high loads.
1097
+ *
1098
+ * **Con:**
1099
+ *
1100
+ * Nothing major.
1101
+ *
1102
+ * @param tokens - How many requests a user can make in each time window.
1103
+ * @param window - The duration in which the user can max X requests.
1104
+ */
1105
+ static slidingWindow(tokens, window) {
1106
+ const windowSize = ms(window);
1107
+ const windowDuration = ms(window);
1108
+ return () => ({
1109
+ async limit(ctx, identifier, rate) {
1110
+ if (ctx.cache) {
1111
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1112
+ if (blocked) {
1113
+ return {
1114
+ success: false,
1115
+ limit: tokens,
1116
+ remaining: 0,
1117
+ reset: reset2,
1118
+ pending: Promise.resolve(),
1119
+ reason: "cacheBlock"
1120
+ };
1121
+ }
1122
+ }
1123
+ const requestId = randomId();
1124
+ const now = Date.now();
1125
+ const currentWindow = Math.floor(now / windowSize);
1126
+ const currentKey = [identifier, currentWindow].join(":");
1127
+ const previousWindow = currentWindow - 1;
1128
+ const previousKey = [identifier, previousWindow].join(":");
1129
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1130
+ const dbs = ctx.regionContexts.map((regionContext) => ({
1131
+ redis: regionContext.redis,
1132
+ request: safeEval(
1133
+ regionContext,
1134
+ SCRIPTS.multiRegion.slidingWindow.limit,
1135
+ [currentKey, previousKey],
1136
+ [tokens, now, windowDuration, requestId, incrementBy]
1137
+ // lua seems to return `1` for true and `null` for false
1138
+ )
1139
+ }));
1140
+ const percentageInCurrent = now % windowDuration / windowDuration;
1141
+ const [current, previous, success] = await Promise.any(dbs.map((s) => s.request));
1142
+ if (success) {
1143
+ current.push(requestId, incrementBy.toString());
1144
+ }
1145
+ const previousUsedTokens = previous.reduce((accTokens, usedToken, index) => {
1146
+ let parsedToken = 0;
1147
+ if (index % 2) {
1148
+ parsedToken = Number.parseInt(usedToken);
1149
+ }
1150
+ return accTokens + parsedToken;
1151
+ }, 0);
1152
+ const currentUsedTokens = current.reduce((accTokens, usedToken, index) => {
1153
+ let parsedToken = 0;
1154
+ if (index % 2) {
1155
+ parsedToken = Number.parseInt(usedToken);
1156
+ }
1157
+ return accTokens + parsedToken;
1158
+ }, 0);
1159
+ const previousPartialUsed = Math.ceil(previousUsedTokens * (1 - percentageInCurrent));
1160
+ const usedTokens = previousPartialUsed + currentUsedTokens;
1161
+ const remaining = tokens - usedTokens;
1162
+ async function sync() {
1163
+ const res = await Promise.all(dbs.map((s) => s.request));
1164
+ const allCurrentIds = [...new Set(
1165
+ res.flatMap(([current2]) => current2).reduce((acc, curr, index) => {
1166
+ if (index % 2 === 0) {
1167
+ acc.push(curr);
1168
+ }
1169
+ return acc;
1170
+ }, [])
1171
+ ).values()];
1172
+ for (const db of dbs) {
1173
+ const [current2, _previous, _success] = await db.request;
1174
+ const dbIds = current2.reduce((ids, currentId, index) => {
1175
+ if (index % 2 === 0) {
1176
+ ids.push(currentId);
1177
+ }
1178
+ return ids;
1179
+ }, []);
1180
+ const usedDbTokens = current2.reduce((accTokens, usedToken, index) => {
1181
+ let parsedToken = 0;
1182
+ if (index % 2) {
1183
+ parsedToken = Number.parseInt(usedToken);
1184
+ }
1185
+ return accTokens + parsedToken;
1186
+ }, 0);
1187
+ if (usedDbTokens >= tokens) {
1188
+ continue;
1189
+ }
1190
+ const diff = allCurrentIds.filter((id) => !dbIds.includes(id));
1191
+ if (diff.length === 0) {
1192
+ continue;
1193
+ }
1194
+ for (const requestId2 of diff) {
1195
+ await db.redis.hset(currentKey, { [requestId2]: incrementBy });
1196
+ }
1197
+ }
1198
+ }
1199
+ const reset = (currentWindow + 1) * windowDuration;
1200
+ if (ctx.cache && !success) {
1201
+ ctx.cache.blockUntil(identifier, reset);
1202
+ }
1203
+ return {
1204
+ success: Boolean(success),
1205
+ limit: tokens,
1206
+ remaining: Math.max(0, remaining),
1207
+ reset,
1208
+ pending: sync()
1209
+ };
1210
+ },
1211
+ async getRemaining(ctx, identifier) {
1212
+ const now = Date.now();
1213
+ const currentWindow = Math.floor(now / windowSize);
1214
+ const currentKey = [identifier, currentWindow].join(":");
1215
+ const previousWindow = currentWindow - 1;
1216
+ const previousKey = [identifier, previousWindow].join(":");
1217
+ const dbs = ctx.regionContexts.map((regionContext) => ({
1218
+ redis: regionContext.redis,
1219
+ request: safeEval(
1220
+ regionContext,
1221
+ SCRIPTS.multiRegion.slidingWindow.getRemaining,
1222
+ [currentKey, previousKey],
1223
+ [now, windowSize]
1224
+ // lua seems to return `1` for true and `null` for false
1225
+ )
1226
+ }));
1227
+ const usedTokens = await Promise.any(dbs.map((s) => s.request));
1228
+ return {
1229
+ remaining: Math.max(0, tokens - usedTokens),
1230
+ reset: (currentWindow + 1) * windowSize
1231
+ };
1232
+ },
1233
+ async resetTokens(ctx, identifier) {
1234
+ const pattern = [identifier, "*"].join(":");
1235
+ if (ctx.cache) {
1236
+ ctx.cache.pop(identifier);
1237
+ }
1238
+ await Promise.all(ctx.regionContexts.map((regionContext) => {
1239
+ safeEval(
1240
+ regionContext,
1241
+ RESET_SCRIPT,
1242
+ [pattern],
1243
+ [null]
1244
+ );
1245
+ }));
1246
+ }
1247
+ });
1248
+ }
1249
+ };
1250
+
1251
+ // src/single.ts
1252
+ var RegionRatelimit = class extends Ratelimit {
1253
+ /**
1254
+ * Create a new Ratelimit instance by providing a `@upstash/redis` instance and the algorithm of your choice.
1255
+ */
1256
+ constructor(config) {
1257
+ super({
1258
+ prefix: config.prefix,
1259
+ limiter: config.limiter,
1260
+ timeout: config.timeout,
1261
+ analytics: config.analytics,
1262
+ ctx: {
1263
+ redis: config.redis
1264
+ },
1265
+ ephemeralCache: config.ephemeralCache,
1266
+ enableProtection: config.enableProtection,
1267
+ denyListThreshold: config.denyListThreshold
1268
+ });
1269
+ }
1270
+ /**
1271
+ * Each request inside a fixed time increases a counter.
1272
+ * Once the counter reaches the maximum allowed number, all further requests are
1273
+ * rejected.
1274
+ *
1275
+ * **Pro:**
1276
+ *
1277
+ * - Newer requests are not starved by old ones.
1278
+ * - Low storage cost.
1279
+ *
1280
+ * **Con:**
1281
+ *
1282
+ * A burst of requests near the boundary of a window can result in a very
1283
+ * high request rate because two windows will be filled with requests quickly.
1284
+ *
1285
+ * @param tokens - How many requests a user can make in each time window.
1286
+ * @param window - A fixed timeframe
1287
+ */
1288
+ static fixedWindow(tokens, window) {
1289
+ const windowDuration = ms(window);
1290
+ return () => ({
1291
+ async limit(ctx, identifier, rate) {
1292
+ const bucket = Math.floor(Date.now() / windowDuration);
1293
+ const key = [identifier, bucket].join(":");
1294
+ if (ctx.cache) {
1295
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1296
+ if (blocked) {
1297
+ return {
1298
+ success: false,
1299
+ limit: tokens,
1300
+ remaining: 0,
1301
+ reset: reset2,
1302
+ pending: Promise.resolve(),
1303
+ reason: "cacheBlock"
1304
+ };
1305
+ }
1306
+ }
1307
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1308
+ const usedTokensAfterUpdate = await safeEval(
1309
+ ctx,
1310
+ SCRIPTS.singleRegion.fixedWindow.limit,
1311
+ [key],
1312
+ [windowDuration, incrementBy]
1313
+ );
1314
+ const success = usedTokensAfterUpdate <= tokens;
1315
+ const remainingTokens = Math.max(0, tokens - usedTokensAfterUpdate);
1316
+ const reset = (bucket + 1) * windowDuration;
1317
+ if (ctx.cache && !success) {
1318
+ ctx.cache.blockUntil(identifier, reset);
1319
+ }
1320
+ return {
1321
+ success,
1322
+ limit: tokens,
1323
+ remaining: remainingTokens,
1324
+ reset,
1325
+ pending: Promise.resolve()
1326
+ };
1327
+ },
1328
+ async getRemaining(ctx, identifier) {
1329
+ const bucket = Math.floor(Date.now() / windowDuration);
1330
+ const key = [identifier, bucket].join(":");
1331
+ const usedTokens = await safeEval(
1332
+ ctx,
1333
+ SCRIPTS.singleRegion.fixedWindow.getRemaining,
1334
+ [key],
1335
+ [null]
1336
+ );
1337
+ return {
1338
+ remaining: Math.max(0, tokens - usedTokens),
1339
+ reset: (bucket + 1) * windowDuration
1340
+ };
1341
+ },
1342
+ async resetTokens(ctx, identifier) {
1343
+ const pattern = [identifier, "*"].join(":");
1344
+ if (ctx.cache) {
1345
+ ctx.cache.pop(identifier);
1346
+ }
1347
+ await safeEval(
1348
+ ctx,
1349
+ RESET_SCRIPT,
1350
+ [pattern],
1351
+ [null]
1352
+ );
1353
+ }
1354
+ });
1355
+ }
1356
+ /**
1357
+ * Combined approach of `slidingLogs` and `fixedWindow` with lower storage
1358
+ * costs than `slidingLogs` and improved boundary behavior by calculating a
1359
+ * weighted score between two windows.
1360
+ *
1361
+ * **Pro:**
1362
+ *
1363
+ * Good performance allows this to scale to very high loads.
1364
+ *
1365
+ * **Con:**
1366
+ *
1367
+ * Nothing major.
1368
+ *
1369
+ * @param tokens - How many requests a user can make in each time window.
1370
+ * @param window - The duration in which the user can max X requests.
1371
+ */
1372
+ static slidingWindow(tokens, window) {
1373
+ const windowSize = ms(window);
1374
+ return () => ({
1375
+ async limit(ctx, identifier, rate) {
1376
+ const now = Date.now();
1377
+ const currentWindow = Math.floor(now / windowSize);
1378
+ const currentKey = [identifier, currentWindow].join(":");
1379
+ const previousWindow = currentWindow - 1;
1380
+ const previousKey = [identifier, previousWindow].join(":");
1381
+ if (ctx.cache) {
1382
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1383
+ if (blocked) {
1384
+ return {
1385
+ success: false,
1386
+ limit: tokens,
1387
+ remaining: 0,
1388
+ reset: reset2,
1389
+ pending: Promise.resolve(),
1390
+ reason: "cacheBlock"
1391
+ };
1392
+ }
1393
+ }
1394
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1395
+ const remainingTokens = await safeEval(
1396
+ ctx,
1397
+ SCRIPTS.singleRegion.slidingWindow.limit,
1398
+ [currentKey, previousKey],
1399
+ [tokens, now, windowSize, incrementBy]
1400
+ );
1401
+ const success = remainingTokens >= 0;
1402
+ const reset = (currentWindow + 1) * windowSize;
1403
+ if (ctx.cache && !success) {
1404
+ ctx.cache.blockUntil(identifier, reset);
1405
+ }
1406
+ return {
1407
+ success,
1408
+ limit: tokens,
1409
+ remaining: Math.max(0, remainingTokens),
1410
+ reset,
1411
+ pending: Promise.resolve()
1412
+ };
1413
+ },
1414
+ async getRemaining(ctx, identifier) {
1415
+ const now = Date.now();
1416
+ const currentWindow = Math.floor(now / windowSize);
1417
+ const currentKey = [identifier, currentWindow].join(":");
1418
+ const previousWindow = currentWindow - 1;
1419
+ const previousKey = [identifier, previousWindow].join(":");
1420
+ const usedTokens = await safeEval(
1421
+ ctx,
1422
+ SCRIPTS.singleRegion.slidingWindow.getRemaining,
1423
+ [currentKey, previousKey],
1424
+ [now, windowSize]
1425
+ );
1426
+ return {
1427
+ remaining: Math.max(0, tokens - usedTokens),
1428
+ reset: (currentWindow + 1) * windowSize
1429
+ };
1430
+ },
1431
+ async resetTokens(ctx, identifier) {
1432
+ const pattern = [identifier, "*"].join(":");
1433
+ if (ctx.cache) {
1434
+ ctx.cache.pop(identifier);
1435
+ }
1436
+ await safeEval(
1437
+ ctx,
1438
+ RESET_SCRIPT,
1439
+ [pattern],
1440
+ [null]
1441
+ );
1442
+ }
1443
+ });
1444
+ }
1445
+ /**
1446
+ * You have a bucket filled with `{maxTokens}` tokens that refills constantly
1447
+ * at `{refillRate}` per `{interval}`.
1448
+ * Every request will remove one token from the bucket and if there is no
1449
+ * token to take, the request is rejected.
1450
+ *
1451
+ * **Pro:**
1452
+ *
1453
+ * - Bursts of requests are smoothed out and you can process them at a constant
1454
+ * rate.
1455
+ * - Allows to set a higher initial burst limit by setting `maxTokens` higher
1456
+ * than `refillRate`
1457
+ */
1458
+ static tokenBucket(refillRate, interval, maxTokens) {
1459
+ const intervalDuration = ms(interval);
1460
+ return () => ({
1461
+ async limit(ctx, identifier, rate) {
1462
+ if (ctx.cache) {
1463
+ const { blocked, reset: reset2 } = ctx.cache.isBlocked(identifier);
1464
+ if (blocked) {
1465
+ return {
1466
+ success: false,
1467
+ limit: maxTokens,
1468
+ remaining: 0,
1469
+ reset: reset2,
1470
+ pending: Promise.resolve(),
1471
+ reason: "cacheBlock"
1472
+ };
1473
+ }
1474
+ }
1475
+ const now = Date.now();
1476
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1477
+ const [remaining, reset] = await safeEval(
1478
+ ctx,
1479
+ SCRIPTS.singleRegion.tokenBucket.limit,
1480
+ [identifier],
1481
+ [maxTokens, intervalDuration, refillRate, now, incrementBy]
1482
+ );
1483
+ const success = remaining >= 0;
1484
+ if (ctx.cache && !success) {
1485
+ ctx.cache.blockUntil(identifier, reset);
1486
+ }
1487
+ return {
1488
+ success,
1489
+ limit: maxTokens,
1490
+ remaining,
1491
+ reset,
1492
+ pending: Promise.resolve()
1493
+ };
1494
+ },
1495
+ async getRemaining(ctx, identifier) {
1496
+ const [remainingTokens, refilledAt] = await safeEval(
1497
+ ctx,
1498
+ SCRIPTS.singleRegion.tokenBucket.getRemaining,
1499
+ [identifier],
1500
+ [maxTokens]
1501
+ );
1502
+ const freshRefillAt = Date.now() + intervalDuration;
1503
+ const identifierRefillsAt = refilledAt + intervalDuration;
1504
+ return {
1505
+ remaining: remainingTokens,
1506
+ reset: refilledAt === tokenBucketIdentifierNotFound ? freshRefillAt : identifierRefillsAt
1507
+ };
1508
+ },
1509
+ async resetTokens(ctx, identifier) {
1510
+ const pattern = identifier;
1511
+ if (ctx.cache) {
1512
+ ctx.cache.pop(identifier);
1513
+ }
1514
+ await safeEval(
1515
+ ctx,
1516
+ RESET_SCRIPT,
1517
+ [pattern],
1518
+ [null]
1519
+ );
1520
+ }
1521
+ });
1522
+ }
1523
+ /**
1524
+ * cachedFixedWindow first uses the local cache to decide if a request may pass and then updates
1525
+ * it asynchronously.
1526
+ * This is experimental and not yet recommended for production use.
1527
+ *
1528
+ * @experimental
1529
+ *
1530
+ * Each request inside a fixed time increases a counter.
1531
+ * Once the counter reaches the maximum allowed number, all further requests are
1532
+ * rejected.
1533
+ *
1534
+ * **Pro:**
1535
+ *
1536
+ * - Newer requests are not starved by old ones.
1537
+ * - Low storage cost.
1538
+ *
1539
+ * **Con:**
1540
+ *
1541
+ * A burst of requests near the boundary of a window can result in a very
1542
+ * high request rate because two windows will be filled with requests quickly.
1543
+ *
1544
+ * @param tokens - How many requests a user can make in each time window.
1545
+ * @param window - A fixed timeframe
1546
+ */
1547
+ static cachedFixedWindow(tokens, window) {
1548
+ const windowDuration = ms(window);
1549
+ return () => ({
1550
+ async limit(ctx, identifier, rate) {
1551
+ if (!ctx.cache) {
1552
+ throw new Error("This algorithm requires a cache");
1553
+ }
1554
+ const bucket = Math.floor(Date.now() / windowDuration);
1555
+ const key = [identifier, bucket].join(":");
1556
+ const reset = (bucket + 1) * windowDuration;
1557
+ const incrementBy = rate ? Math.max(1, rate) : 1;
1558
+ const hit = typeof ctx.cache.get(key) === "number";
1559
+ if (hit) {
1560
+ const cachedTokensAfterUpdate = ctx.cache.incr(key);
1561
+ const success = cachedTokensAfterUpdate < tokens;
1562
+ const pending = success ? safeEval(
1563
+ ctx,
1564
+ SCRIPTS.singleRegion.cachedFixedWindow.limit,
1565
+ [key],
1566
+ [windowDuration, incrementBy]
1567
+ ) : Promise.resolve();
1568
+ return {
1569
+ success,
1570
+ limit: tokens,
1571
+ remaining: tokens - cachedTokensAfterUpdate,
1572
+ reset,
1573
+ pending
1574
+ };
1575
+ }
1576
+ const usedTokensAfterUpdate = await safeEval(
1577
+ ctx,
1578
+ SCRIPTS.singleRegion.cachedFixedWindow.limit,
1579
+ [key],
1580
+ [windowDuration, incrementBy]
1581
+ );
1582
+ ctx.cache.set(key, usedTokensAfterUpdate);
1583
+ const remaining = tokens - usedTokensAfterUpdate;
1584
+ return {
1585
+ success: remaining >= 0,
1586
+ limit: tokens,
1587
+ remaining,
1588
+ reset,
1589
+ pending: Promise.resolve()
1590
+ };
1591
+ },
1592
+ async getRemaining(ctx, identifier) {
1593
+ if (!ctx.cache) {
1594
+ throw new Error("This algorithm requires a cache");
1595
+ }
1596
+ const bucket = Math.floor(Date.now() / windowDuration);
1597
+ const key = [identifier, bucket].join(":");
1598
+ const hit = typeof ctx.cache.get(key) === "number";
1599
+ if (hit) {
1600
+ const cachedUsedTokens = ctx.cache.get(key) ?? 0;
1601
+ return {
1602
+ remaining: Math.max(0, tokens - cachedUsedTokens),
1603
+ reset: (bucket + 1) * windowDuration
1604
+ };
1605
+ }
1606
+ const usedTokens = await safeEval(
1607
+ ctx,
1608
+ SCRIPTS.singleRegion.cachedFixedWindow.getRemaining,
1609
+ [key],
1610
+ [null]
1611
+ );
1612
+ return {
1613
+ remaining: Math.max(0, tokens - usedTokens),
1614
+ reset: (bucket + 1) * windowDuration
1615
+ };
1616
+ },
1617
+ async resetTokens(ctx, identifier) {
1618
+ if (!ctx.cache) {
1619
+ throw new Error("This algorithm requires a cache");
1620
+ }
1621
+ const bucket = Math.floor(Date.now() / windowDuration);
1622
+ const key = [identifier, bucket].join(":");
1623
+ ctx.cache.pop(key);
1624
+ const pattern = [identifier, "*"].join(":");
1625
+ await safeEval(
1626
+ ctx,
1627
+ RESET_SCRIPT,
1628
+ [pattern],
1629
+ [null]
1630
+ );
1631
+ }
1632
+ });
1633
+ }
1634
+ };
1635
+ export {
1636
+ Analytics,
1637
+ ip_deny_list_exports as IpDenyList,
1638
+ MultiRegionRatelimit,
1639
+ RegionRatelimit as Ratelimit
1640
+ };
1641
+ //# sourceMappingURL=index.mjs.map