sidekiq-ts 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -12,6 +12,7 @@ A TypeScript implementation of [Sidekiq](https://sidekiq.org/) for Node.js. Proc
12
12
  - **Leader election** - Coordinate across distributed workers
13
13
  - **Cron jobs** - Periodic job scheduling with standard cron expressions
14
14
  - **Middleware** - Customize job enqueueing and execution
15
+ - **Rate limiting** - Control concurrent operations and API call rates
15
16
  - **CLI** - Run workers from the command line
16
17
  - **Testing utilities** - Fake and inline modes for testing
17
18
 
@@ -396,6 +397,157 @@ class TimingMiddleware {
396
397
  Sidekiq.useServerMiddleware(TimingMiddleware);
397
398
  ```
398
399
 
400
+ ## Rate Limiting
401
+
402
+ Limit concurrent operations or API call rates across all workers. Similar to [Sidekiq Enterprise rate limiting](https://github.com/sidekiq/sidekiq/wiki/Ent-Rate-Limiting).
403
+
404
+ ### Creating Limiters
405
+
406
+ ```typescript
407
+ import { Limiter } from "sidekiq-ts";
408
+
409
+ // Limit concurrent operations (distributed mutex)
410
+ const apiLimiter = Limiter.concurrent("external-api", 50);
411
+
412
+ // Limit operations per time boundary (resets at interval start)
413
+ const emailLimiter = Limiter.bucket("email-send", 100, "minute");
414
+
415
+ // Limit operations in a rolling window
416
+ const requestLimiter = Limiter.window("api-requests", 1000, "hour");
417
+ ```
418
+
419
+ ### Using in Jobs
420
+
421
+ ```typescript
422
+ import { Job, Limiter } from "sidekiq-ts";
423
+
424
+ const stripeLimiter = Limiter.concurrent("stripe-api", 25);
425
+
426
+ class ChargeCustomerJob extends Job<[string, number]> {
427
+ async perform(customerId: string, amount: number) {
428
+ await stripeLimiter.withinLimit(async () => {
429
+ await stripe.charges.create({
430
+ customer: customerId,
431
+ amount,
432
+ });
433
+ });
434
+ }
435
+ }
436
+ ```
437
+
438
+ When the limit is exceeded, `withinLimit()` throws an `OverLimitError`.
439
+
440
+ ### Auto-Reschedule with Middleware
441
+
442
+ To automatically reschedule jobs when rate limited (like Sidekiq Enterprise):
443
+
444
+ ```typescript
445
+ import { Sidekiq, ensureRateLimitMiddleware } from "sidekiq-ts";
446
+
447
+ // Add the middleware before starting the worker
448
+ ensureRateLimitMiddleware(Sidekiq.defaultConfiguration);
449
+
450
+ const runner = await Sidekiq.run();
451
+ ```
452
+
453
+ The middleware catches `OverLimitError` and reschedules the job with backoff (~5 minutes + jitter). After 20 reschedules, the job fails normally.
454
+
455
+ ### Limiter Types
456
+
457
+ #### Concurrent Limiter
458
+
459
+ Limits how many operations can run simultaneously across all workers:
460
+
461
+ ```typescript
462
+ // Max 10 concurrent Stripe API calls
463
+ const limiter = Limiter.concurrent("stripe", 10, {
464
+ lockTimeout: 180, // Auto-release lock after 180 seconds (default)
465
+ });
466
+ ```
467
+
468
+ #### Bucket Limiter
469
+
470
+ Allows N operations per time interval, resetting at boundaries:
471
+
472
+ ```typescript
473
+ // 100 emails per minute, resets at :00 of each minute
474
+ const limiter = Limiter.bucket("email", 100, "minute");
475
+
476
+ // With numeric interval (seconds)
477
+ const limiter = Limiter.bucket("sms", 50, 30); // 50 per 30 seconds
478
+ ```
479
+
480
+ Intervals: `"second"`, `"minute"`, `"hour"`, `"day"`, or number of seconds.
481
+
482
+ #### Window Limiter
483
+
484
+ Allows N operations in a rolling time window:
485
+
486
+ ```typescript
487
+ // 1000 requests per hour, rolling window
488
+ const limiter = Limiter.window("api", 1000, "hour");
489
+ ```
490
+
491
+ ### Limiter Methods
492
+
493
+ ```typescript
494
+ // Execute within the rate limit
495
+ await limiter.withinLimit(async () => {
496
+ await doWork();
497
+ });
498
+
499
+ // Check current status (non-blocking)
500
+ const status = await limiter.check();
501
+ console.log(status.allowed); // true if under limit
502
+ console.log(status.current); // current usage
503
+ console.log(status.limit); // max allowed
504
+
505
+ // Reset the limiter
506
+ await limiter.reset();
507
+ ```
508
+
509
+ ### Dynamic Limiters
510
+
511
+ Create per-user or per-resource limiters with dynamic names:
512
+
513
+ ```typescript
514
+ class UserApiCallJob extends Job<[string, string]> {
515
+ async perform(userId: string, endpoint: string) {
516
+ // Each user gets their own rate limit
517
+ const userLimiter = Limiter.window(`user-${userId}-api`, 100, "minute");
518
+
519
+ await userLimiter.withinLimit(async () => {
520
+ await fetch(endpoint);
521
+ });
522
+ }
523
+ }
524
+ ```
525
+
526
+ Redis keys are created per unique name: `limiter:user-123-api`, `limiter:user-456-api`, etc.
527
+
528
+ For high-throughput scenarios, cache limiter instances:
529
+
530
+ ```typescript
531
+ const userLimiters = new Map<string, ILimiter>();
532
+
533
+ function getUserLimiter(userId: string): ILimiter {
534
+ let limiter = userLimiters.get(userId);
535
+ if (!limiter) {
536
+ limiter = Limiter.window(`user-${userId}-api`, 100, "minute");
537
+ userLimiters.set(userId, limiter);
538
+ }
539
+ return limiter;
540
+ }
541
+ ```
542
+
543
+ ### Custom Key Prefix
544
+
545
+ ```typescript
546
+ const limiter = Limiter.concurrent("api", 10, {
547
+ keyPrefix: "myapp", // Redis key: myapp:api (default: limiter:api)
548
+ });
549
+ ```
550
+
399
551
  ## Leader Election
400
552
 
401
553
  For tasks that should only run on one worker (like cron jobs), use leader election:
@@ -633,66 +785,46 @@ The worker handles these signals:
633
785
  | `SIGTSTP` | Quiet mode (stop accepting new jobs) |
634
786
  | `SIGTTIN` | Dump current job state to logs |
635
787
 
636
- The shutdown timeout (default: 25 seconds) allows in-flight jobs to complete before forced termination.
788
+ ### Quiet Mode
637
789
 
638
- ## Production Deployment
790
+ When a worker enters quiet mode (`runner.quiet()` or `SIGTSTP`):
639
791
 
640
- ### Process Manager
792
+ 1. Stops polling queues for new jobs
793
+ 2. In-flight jobs continue running to completion
794
+ 3. Worker stays alive but idle
641
795
 
642
- Use a process manager like systemd, PM2, or Docker:
796
+ This is useful for graceful deploys—quiet the old workers, start new ones, then stop the old workers.
643
797
 
644
- **systemd example:**
798
+ ### Stop/Shutdown
645
799
 
646
- ```ini
647
- [Unit]
648
- Description=Sidekiq Worker
649
- After=network.target redis.service
800
+ When `runner.stop()` is called (or `SIGINT`/`SIGTERM` received):
650
801
 
651
- [Service]
652
- Type=simple
653
- User=app
654
- WorkingDirectory=/app
655
- ExecStart=/usr/bin/npx sidekiq -C /app/sidekiq.json
656
- Restart=always
657
- RestartSec=5
802
+ 1. **Quiet first** — Stops accepting new jobs
803
+ 2. **Signal jobs** — Aborts the `AbortSignal` so jobs can detect shutdown via `this.signal` or `this.interrupted()`
804
+ 3. **Wait for jobs** — Waits up to `timeout` seconds (default: 25) for in-flight jobs to complete
805
+ 4. **Requeue incomplete jobs** — Any jobs still running after the timeout are pushed back to their Redis queues (`RPUSH queue:<name>`) so another worker can pick them up
806
+ 5. **Cleanup Redis** — Removes this worker from the `processes` set and deletes heartbeat keys
658
807
 
659
- [Install]
660
- WantedBy=multi-user.target
661
- ```
808
+ ### Redis Cleanup on Shutdown
662
809
 
663
- **PM2 example:**
810
+ When a worker shuts down cleanly, it removes its presence from Redis:
664
811
 
665
- ```javascript
666
- // ecosystem.config.js
667
- module.exports = {
668
- apps: [{
669
- name: "sidekiq-worker",
670
- script: "npx",
671
- args: "sidekiq -C sidekiq.json",
672
- instances: 2,
673
- exec_mode: "cluster",
674
- }]
675
- };
812
+ ```
813
+ SREM processes <identity> # Remove from active processes set
814
+ UNLINK <identity>:work # Delete work-in-progress tracking
815
+ UNLINK <identity> # Delete heartbeat data
676
816
  ```
677
817
 
678
- **Docker example:**
818
+ Jobs that didn't complete within the timeout are requeued:
679
819
 
680
- ```dockerfile
681
- FROM node:24-alpine
682
- WORKDIR /app
683
- COPY package*.json ./
684
- RUN npm ci --omit=dev
685
- COPY dist ./dist
686
- COPY sidekiq.json ./
687
- CMD ["npx", "sidekiq", "-C", "sidekiq.json"]
688
820
  ```
689
-
690
- ```bash
691
- # Build and run
692
- docker build -t my-worker .
693
- docker run -e REDIS_URL=redis://host:6379 my-worker
821
+ RPUSH queue:<name> <job-payload> # Push back to queue for retry
694
822
  ```
695
823
 
824
+ This ensures no jobs are lost during deployments or restarts.
825
+
826
+ ## Production
827
+
696
828
  ### Redis Configuration
697
829
 
698
830
  For production Redis:
package/dist/index.d.ts CHANGED
@@ -8,9 +8,10 @@ export { IterableJob } from "./iterable.js";
8
8
  export { IterableAbort, IterableInterrupted, JobSkipError, } from "./iterable-errors.js";
9
9
  export { Job } from "./job.js";
10
10
  export { DefaultJobLogger } from "./job-logger.js";
11
+ export type { AcquireResult, BucketLimiterOptions, ConcurrentLimiterOptions, ILimiter, LimiterOptions, TimeInterval, WindowLimiterOptions, } from "./limiter/index.js";
12
+ export { BucketLimiter, ConcurrentLimiter, ensureRateLimitMiddleware, Limiter, OverLimitError, RateLimitMiddleware, WindowLimiter, } from "./limiter/index.js";
11
13
  export { createLogger, Formatters, SidekiqLogger } from "./logger.js";
12
14
  export { Runner } from "./runner.js";
13
15
  export { Sidekiq } from "./sidekiq.js";
14
16
  export { EmptyQueueError, Queues, Testing } from "./testing.js";
15
- export type * from "./types.js";
16
17
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,OAAO,EACP,SAAS,EACT,UAAU,EACV,KAAK,EACL,QAAQ,EACR,YAAY,EACZ,WAAW,EACX,KAAK,EACL,YAAY,EACZ,OAAO,GACR,MAAM,UAAU,CAAC;AAClB,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,cAAc,EAAE,MAAM,oBAAoB,CAAC;AACpD,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,gBAAgB,EAAE,MAAM,wBAAwB,CAAC;AAC1D,OAAO,EAAE,WAAW,EAAE,MAAM,eAAe,CAAC;AAC5C,OAAO,EACL,aAAa,EACb,mBAAmB,EACnB,YAAY,GACb,MAAM,sBAAsB,CAAC;AAC9B,OAAO,EAAE,GAAG,EAAE,MAAM,UAAU,CAAC;AAC/B,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,OAAO,EAAE,YAAY,EAAE,UAAU,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AACtE,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,eAAe,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AAChE,mBAAmB,YAAY,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,OAAO,EACP,SAAS,EACT,UAAU,EACV,KAAK,EACL,QAAQ,EACR,YAAY,EACZ,WAAW,EACX,KAAK,EACL,YAAY,EACZ,OAAO,GACR,MAAM,UAAU,CAAC;AAClB,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,cAAc,EAAE,MAAM,oBAAoB,CAAC;AACpD,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,gBAAgB,EAAE,MAAM,wBAAwB,CAAC;AAC1D,OAAO,EAAE,WAAW,EAAE,MAAM,eAAe,CAAC;AAC5C,OAAO,EACL,aAAa,EACb,mBAAmB,EACnB,YAAY,GACb,MAAM,sBAAsB,CAAC;AAC9B,OAAO,EAAE,GAAG,EAAE,MAAM,UAAU,CAAC;AAC/B,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,YAAY,EACV,aAAa,EACb,oBAAoB,EACpB,wBAAwB,EACxB,QAAQ,EACR,cAAc,EACd,YAAY,EACZ,oBAAoB,GACrB,MAAM,oBAAoB,CAAC;AAE5B,OAAO,EACL,aAAa,EACb,iBAAiB,EACjB,yBAAyB,EACzB,OAAO,EACP,cAAc,EACd,mBAAmB,EACnB,aAAa,GACd,MAAM,oBAAoB,CAAC;AAC5B,OAAO,EAAE,YAAY,EAAE,UAAU,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AACtE,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AACvC,OAAO,EAAE,eAAe,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC"}
package/dist/index.js CHANGED
@@ -8,6 +8,8 @@ export { IterableJob } from "./iterable.js";
8
8
  export { IterableAbort, IterableInterrupted, JobSkipError, } from "./iterable-errors.js";
9
9
  export { Job } from "./job.js";
10
10
  export { DefaultJobLogger } from "./job-logger.js";
11
+ // Rate limiting
12
+ export { BucketLimiter, ConcurrentLimiter, ensureRateLimitMiddleware, Limiter, OverLimitError, RateLimitMiddleware, WindowLimiter, } from "./limiter/index.js";
11
13
  export { createLogger, Formatters, SidekiqLogger } from "./logger.js";
12
14
  export { Runner } from "./runner.js";
13
15
  export { Sidekiq } from "./sidekiq.js";
@@ -0,0 +1,16 @@
1
+ import type { AcquireResult, ILimiter, LimiterOptions, RedisProvider } from "./types.js";
2
+ export declare abstract class BaseLimiter implements ILimiter {
3
+ readonly name: string;
4
+ protected readonly keyPrefix: string;
5
+ protected readonly getRedis: RedisProvider;
6
+ constructor(name: string, options?: LimiterOptions, redisProvider?: RedisProvider);
7
+ protected get redisKey(): string;
8
+ /** Attempt to acquire the limit. Implemented by subclasses. */
9
+ protected abstract tryAcquire(): Promise<AcquireResult>;
10
+ /** Release any held resources. Override in ConcurrentLimiter. */
11
+ protected release(): Promise<void>;
12
+ check(): Promise<AcquireResult>;
13
+ reset(): Promise<void>;
14
+ withinLimit<T>(fn: () => Promise<T> | T): Promise<T>;
15
+ }
16
+ //# sourceMappingURL=base.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"base.d.ts","sourceRoot":"","sources":["../../src/limiter/base.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EACV,aAAa,EACb,QAAQ,EACR,cAAc,EACd,aAAa,EACd,MAAM,YAAY,CAAC;AAEpB,8BAAsB,WAAY,YAAW,QAAQ;IACnD,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IACtB,SAAS,CAAC,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC;IACrC,SAAS,CAAC,QAAQ,CAAC,QAAQ,EAAE,aAAa,CAAC;gBAGzC,IAAI,EAAE,MAAM,EACZ,OAAO,GAAE,cAAmB,EAC5B,aAAa,CAAC,EAAE,aAAa;IAQ/B,SAAS,KAAK,QAAQ,IAAI,MAAM,CAE/B;IAED,+DAA+D;IAC/D,SAAS,CAAC,QAAQ,CAAC,UAAU,IAAI,OAAO,CAAC,aAAa,CAAC;IAEvD,iEAAiE;cACjD,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAIxC,KAAK,IAAI,OAAO,CAAC,aAAa,CAAC;IAIzB,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAKtB,WAAW,CAAC,CAAC,EAAE,EAAE,EAAE,MAAM,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;CAiB3D"}
@@ -0,0 +1,43 @@
1
+ import { Sidekiq } from "../sidekiq.js";
2
+ import { OverLimitError } from "./errors.js";
3
+ export class BaseLimiter {
4
+ name;
5
+ keyPrefix;
6
+ getRedis;
7
+ constructor(name, options = {}, redisProvider) {
8
+ this.name = name;
9
+ this.keyPrefix = options.keyPrefix ?? "limiter";
10
+ this.getRedis =
11
+ redisProvider ?? (() => Sidekiq.defaultConfiguration.getRedisClient());
12
+ }
13
+ get redisKey() {
14
+ return `${this.keyPrefix}:${this.name}`;
15
+ }
16
+ /** Release any held resources. Override in ConcurrentLimiter. */
17
+ async release() {
18
+ // Default: no-op
19
+ }
20
+ check() {
21
+ return this.tryAcquire();
22
+ }
23
+ async reset() {
24
+ const redis = await this.getRedis();
25
+ await redis.del(this.redisKey);
26
+ }
27
+ async withinLimit(fn) {
28
+ const result = await this.tryAcquire();
29
+ if (!result.allowed) {
30
+ throw new OverLimitError(this.name, {
31
+ retryAfter: result.retryAfter,
32
+ current: result.current,
33
+ limit: result.limit,
34
+ });
35
+ }
36
+ try {
37
+ return await fn();
38
+ }
39
+ finally {
40
+ await this.release();
41
+ }
42
+ }
43
+ }
@@ -0,0 +1,15 @@
1
+ import { BaseLimiter } from "./base.js";
2
+ import type { AcquireResult, BucketLimiterOptions, RedisProvider, TimeInterval } from "./types.js";
3
+ /**
4
+ * Limits operations per time boundary (e.g., 100 per minute).
5
+ * Resets at fixed time boundaries (start of minute, hour, etc).
6
+ */
7
+ export declare class BucketLimiter extends BaseLimiter {
8
+ private readonly maxCount;
9
+ private readonly intervalSeconds;
10
+ constructor(name: string, maxCount: number, interval: TimeInterval | number, options?: BucketLimiterOptions, redisProvider?: RedisProvider);
11
+ private getBucketTimestamp;
12
+ protected tryAcquire(): Promise<AcquireResult>;
13
+ check(): Promise<AcquireResult>;
14
+ }
15
+ //# sourceMappingURL=bucket.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"bucket.d.ts","sourceRoot":"","sources":["../../src/limiter/bucket.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,MAAM,WAAW,CAAC;AAExC,OAAO,KAAK,EACV,aAAa,EACb,oBAAoB,EACpB,aAAa,EACb,YAAY,EACb,MAAM,YAAY,CAAC;AASpB;;;GAGG;AACH,qBAAa,aAAc,SAAQ,WAAW;IAC5C,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAS;IAClC,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAS;gBAGvC,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,YAAY,GAAG,MAAM,EAC/B,OAAO,GAAE,oBAAyB,EAClC,aAAa,CAAC,EAAE,aAAa;IAQ/B,OAAO,CAAC,kBAAkB;cAOV,UAAU,IAAI,OAAO,CAAC,aAAa,CAAC;IA8B9C,KAAK,IAAI,OAAO,CAAC,aAAa,CAAC;CAYtC"}
@@ -0,0 +1,61 @@
1
+ import { BaseLimiter } from "./base.js";
2
+ import { LUA_BUCKET_ACQUIRE } from "./lua-scripts.js";
3
+ const INTERVAL_SECONDS = {
4
+ second: 1,
5
+ minute: 60,
6
+ hour: 3600,
7
+ day: 86_400,
8
+ };
9
+ /**
10
+ * Limits operations per time boundary (e.g., 100 per minute).
11
+ * Resets at fixed time boundaries (start of minute, hour, etc).
12
+ */
13
+ export class BucketLimiter extends BaseLimiter {
14
+ maxCount;
15
+ intervalSeconds;
16
+ constructor(name, maxCount, interval, options = {}, redisProvider) {
17
+ super(name, options, redisProvider);
18
+ this.maxCount = maxCount;
19
+ this.intervalSeconds =
20
+ typeof interval === "number" ? interval : INTERVAL_SECONDS[interval];
21
+ }
22
+ getBucketTimestamp() {
23
+ const now = Math.floor(Date.now() / 1000);
24
+ const bucketStart = Math.floor(now / this.intervalSeconds) * this.intervalSeconds;
25
+ return String(bucketStart);
26
+ }
27
+ async tryAcquire() {
28
+ const redis = await this.getRedis();
29
+ const bucketTs = this.getBucketTimestamp();
30
+ const result = (await redis.eval(LUA_BUCKET_ACQUIRE, {
31
+ keys: [this.redisKey],
32
+ arguments: [
33
+ String(this.maxCount),
34
+ bucketTs,
35
+ String(this.intervalSeconds + 1),
36
+ ],
37
+ }));
38
+ const [allowed, current] = result;
39
+ // Calculate actual retry time until next bucket
40
+ const now = Date.now() / 1000;
41
+ const bucketEnd = Math.floor(now / this.intervalSeconds) * this.intervalSeconds +
42
+ this.intervalSeconds;
43
+ const actualRetry = bucketEnd - now;
44
+ return {
45
+ allowed: allowed === 1,
46
+ current,
47
+ limit: this.maxCount,
48
+ retryAfter: allowed === 0 ? actualRetry : undefined,
49
+ };
50
+ }
51
+ async check() {
52
+ const redis = await this.getRedis();
53
+ const bucketTs = this.getBucketTimestamp();
54
+ const current = Number(await redis.hGet(this.redisKey, bucketTs)) || 0;
55
+ return {
56
+ allowed: current < this.maxCount,
57
+ current,
58
+ limit: this.maxCount,
59
+ };
60
+ }
61
+ }
@@ -0,0 +1,17 @@
1
+ import { BaseLimiter } from "./base.js";
2
+ import type { AcquireResult, ConcurrentLimiterOptions, RedisProvider } from "./types.js";
3
+ /**
4
+ * Limits concurrent executions across all processes.
5
+ * Uses Redis ZSET with expiring locks for distributed mutex behavior.
6
+ */
7
+ export declare class ConcurrentLimiter extends BaseLimiter {
8
+ private readonly maxConcurrent;
9
+ private readonly lockTimeout;
10
+ private currentLockId?;
11
+ constructor(name: string, maxConcurrent: number, options?: ConcurrentLimiterOptions, redisProvider?: RedisProvider);
12
+ private generateLockId;
13
+ protected tryAcquire(): Promise<AcquireResult>;
14
+ protected release(): Promise<void>;
15
+ check(): Promise<AcquireResult>;
16
+ }
17
+ //# sourceMappingURL=concurrent.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"concurrent.d.ts","sourceRoot":"","sources":["../../src/limiter/concurrent.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,MAAM,WAAW,CAAC;AAKxC,OAAO,KAAK,EACV,aAAa,EACb,wBAAwB,EACxB,aAAa,EACd,MAAM,YAAY,CAAC;AAEpB;;;GAGG;AACH,qBAAa,iBAAkB,SAAQ,WAAW;IAChD,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAS;IACvC,OAAO,CAAC,QAAQ,CAAC,WAAW,CAAS;IACrC,OAAO,CAAC,aAAa,CAAC,CAAS;gBAG7B,IAAI,EAAE,MAAM,EACZ,aAAa,EAAE,MAAM,EACrB,OAAO,GAAE,wBAA6B,EACtC,aAAa,CAAC,EAAE,aAAa;IAO/B,OAAO,CAAC,cAAc;cAIN,UAAU,IAAI,OAAO,CAAC,aAAa,CAAC;cA6BpC,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAalC,KAAK,IAAI,OAAO,CAAC,aAAa,CAAC;CActC"}
@@ -0,0 +1,66 @@
1
+ import { BaseLimiter } from "./base.js";
2
+ import { LUA_CONCURRENT_ACQUIRE, LUA_CONCURRENT_RELEASE, } from "./lua-scripts.js";
3
+ /**
4
+ * Limits concurrent executions across all processes.
5
+ * Uses Redis ZSET with expiring locks for distributed mutex behavior.
6
+ */
7
+ export class ConcurrentLimiter extends BaseLimiter {
8
+ maxConcurrent;
9
+ lockTimeout;
10
+ currentLockId;
11
+ constructor(name, maxConcurrent, options = {}, redisProvider) {
12
+ super(name, options, redisProvider);
13
+ this.maxConcurrent = maxConcurrent;
14
+ this.lockTimeout = options.lockTimeout ?? 180;
15
+ }
16
+ generateLockId() {
17
+ return `${process.pid}-${Date.now()}-${Math.random().toString(36).slice(2)}`;
18
+ }
19
+ async tryAcquire() {
20
+ const redis = await this.getRedis();
21
+ const lockId = this.generateLockId();
22
+ const now = Date.now() / 1000;
23
+ const result = (await redis.eval(LUA_CONCURRENT_ACQUIRE, {
24
+ keys: [this.redisKey],
25
+ arguments: [
26
+ String(this.maxConcurrent),
27
+ lockId,
28
+ String(now),
29
+ String(this.lockTimeout),
30
+ ],
31
+ }));
32
+ const [allowed, current, retryAfter] = result;
33
+ if (allowed === 1) {
34
+ this.currentLockId = lockId;
35
+ }
36
+ return {
37
+ allowed: allowed === 1,
38
+ current,
39
+ limit: this.maxConcurrent,
40
+ retryAfter: retryAfter > 0 ? retryAfter : undefined,
41
+ };
42
+ }
43
+ async release() {
44
+ if (!this.currentLockId) {
45
+ return;
46
+ }
47
+ const redis = await this.getRedis();
48
+ await redis.eval(LUA_CONCURRENT_RELEASE, {
49
+ keys: [this.redisKey],
50
+ arguments: [this.currentLockId],
51
+ });
52
+ this.currentLockId = undefined;
53
+ }
54
+ async check() {
55
+ const redis = await this.getRedis();
56
+ const now = Date.now() / 1000;
57
+ // Remove expired and count without acquiring
58
+ await redis.zRemRangeByScore(this.redisKey, "-inf", String(now));
59
+ const current = await redis.zCard(this.redisKey);
60
+ return {
61
+ allowed: current < this.maxConcurrent,
62
+ current,
63
+ limit: this.maxConcurrent,
64
+ };
65
+ }
66
+ }
@@ -0,0 +1,16 @@
1
+ /**
2
+ * Thrown when a rate limit is exceeded.
3
+ * Can be caught by RateLimitMiddleware to reschedule jobs.
4
+ */
5
+ export declare class OverLimitError extends Error {
6
+ readonly limiterName: string;
7
+ readonly retryAfter?: number;
8
+ readonly current?: number;
9
+ readonly limit?: number;
10
+ constructor(limiterName: string, options?: {
11
+ retryAfter?: number;
12
+ current?: number;
13
+ limit?: number;
14
+ });
15
+ }
16
+ //# sourceMappingURL=errors.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"errors.d.ts","sourceRoot":"","sources":["../../src/limiter/errors.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,qBAAa,cAAe,SAAQ,KAAK;IACvC,QAAQ,CAAC,WAAW,EAAE,MAAM,CAAC;IAC7B,QAAQ,CAAC,UAAU,CAAC,EAAE,MAAM,CAAC;IAC7B,QAAQ,CAAC,OAAO,CAAC,EAAE,MAAM,CAAC;IAC1B,QAAQ,CAAC,KAAK,CAAC,EAAE,MAAM,CAAC;gBAGtB,WAAW,EAAE,MAAM,EACnB,OAAO,CAAC,EAAE;QAAE,UAAU,CAAC,EAAE,MAAM,CAAC;QAAC,OAAO,CAAC,EAAE,MAAM,CAAC;QAAC,KAAK,CAAC,EAAE,MAAM,CAAA;KAAE;CAStE"}
@@ -0,0 +1,18 @@
1
+ /**
2
+ * Thrown when a rate limit is exceeded.
3
+ * Can be caught by RateLimitMiddleware to reschedule jobs.
4
+ */
5
+ export class OverLimitError extends Error {
6
+ limiterName;
7
+ retryAfter;
8
+ current;
9
+ limit;
10
+ constructor(limiterName, options) {
11
+ super(`Rate limit exceeded for limiter: ${limiterName}`);
12
+ this.name = "OverLimitError";
13
+ this.limiterName = limiterName;
14
+ this.retryAfter = options?.retryAfter;
15
+ this.current = options?.current;
16
+ this.limit = options?.limit;
17
+ }
18
+ }
@@ -0,0 +1,69 @@
1
+ import type { BucketLimiterOptions, ConcurrentLimiterOptions, ILimiter, RedisProvider, TimeInterval, WindowLimiterOptions } from "./types.js";
2
+ export { BucketLimiter } from "./bucket.js";
3
+ export { ConcurrentLimiter } from "./concurrent.js";
4
+ export * from "./errors.js";
5
+ export { ensureRateLimitMiddleware, RateLimitMiddleware, } from "./middleware.js";
6
+ export * from "./types.js";
7
+ export { WindowLimiter } from "./window.js";
8
+ /**
9
+ * Factory class for creating rate limiters.
10
+ * Provides a Sidekiq Enterprise-compatible API.
11
+ *
12
+ * @example
13
+ * ```typescript
14
+ * import { Limiter } from "sidekiq-ts";
15
+ *
16
+ * // Limit concurrent API calls to 50
17
+ * const apiLimiter = Limiter.concurrent("external-api", 50);
18
+ *
19
+ * // Limit to 100 emails per minute
20
+ * const emailLimiter = Limiter.bucket("email-send", 100, "minute");
21
+ *
22
+ * // Limit to 1000 requests per hour (rolling window)
23
+ * const rateLimiter = Limiter.window("api-requests", 1000, "hour");
24
+ *
25
+ * // Use in a job
26
+ * class ApiCallJob extends Job<[string]> {
27
+ * async perform(endpoint: string) {
28
+ * await apiLimiter.withinLimit(async () => {
29
+ * await fetch(endpoint);
30
+ * });
31
+ * }
32
+ * }
33
+ * ```
34
+ */
35
+ export declare class Limiter {
36
+ /**
37
+ * Create a concurrent limiter (distributed mutex with count).
38
+ * Limits concurrent executions across all processes.
39
+ *
40
+ * @param name - Unique identifier for the limiter
41
+ * @param maxCount - Maximum concurrent operations allowed
42
+ * @param options - Additional options (lockTimeout, keyPrefix)
43
+ * @param redisProvider - Optional custom Redis provider
44
+ */
45
+ static concurrent(name: string, maxCount: number, options?: ConcurrentLimiterOptions, redisProvider?: RedisProvider): ILimiter;
46
+ /**
47
+ * Create a bucket limiter (count per time boundary).
48
+ * Resets at fixed time boundaries (start of minute, hour, etc).
49
+ *
50
+ * @param name - Unique identifier for the limiter
51
+ * @param count - Operations allowed per interval
52
+ * @param interval - Time interval ("second", "minute", "hour", "day") or seconds
53
+ * @param options - Additional options (keyPrefix)
54
+ * @param redisProvider - Optional custom Redis provider
55
+ */
56
+ static bucket(name: string, count: number, interval: TimeInterval | number, options?: BucketLimiterOptions, redisProvider?: RedisProvider): ILimiter;
57
+ /**
58
+ * Create a window limiter (rolling window).
59
+ * Window starts from first operation and slides continuously.
60
+ *
61
+ * @param name - Unique identifier for the limiter
62
+ * @param count - Operations allowed within window
63
+ * @param interval - Window size ("second", "minute", "hour", "day") or seconds
64
+ * @param options - Additional options (keyPrefix)
65
+ * @param redisProvider - Optional custom Redis provider
66
+ */
67
+ static window(name: string, count: number, interval: TimeInterval | number, options?: WindowLimiterOptions, redisProvider?: RedisProvider): ILimiter;
68
+ }
69
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/limiter/index.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EACV,oBAAoB,EACpB,wBAAwB,EACxB,QAAQ,EACR,aAAa,EACb,YAAY,EACZ,oBAAoB,EACrB,MAAM,YAAY,CAAC;AAGpB,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAC5C,OAAO,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AACpD,cAAc,aAAa,CAAC;AAC5B,OAAO,EACL,yBAAyB,EACzB,mBAAmB,GACpB,MAAM,iBAAiB,CAAC;AACzB,cAAc,YAAY,CAAC;AAC3B,OAAO,EAAE,aAAa,EAAE,MAAM,aAAa,CAAC;AAE5C;;;;;;;;;;;;;;;;;;;;;;;;;;GA0BG;AAEH,qBAAa,OAAO;IAClB;;;;;;;;OAQG;IACH,MAAM,CAAC,UAAU,CACf,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,MAAM,EAChB,OAAO,CAAC,EAAE,wBAAwB,EAClC,aAAa,CAAC,EAAE,aAAa,GAC5B,QAAQ;IAIX;;;;;;;;;OASG;IACH,MAAM,CAAC,MAAM,CACX,IAAI,EAAE,MAAM,EACZ,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,YAAY,GAAG,MAAM,EAC/B,OAAO,CAAC,EAAE,oBAAoB,EAC9B,aAAa,CAAC,EAAE,aAAa,GAC5B,QAAQ;IAIX;;;;;;;;;OASG;IACH,MAAM,CAAC,MAAM,CACX,IAAI,EAAE,MAAM,EACZ,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,YAAY,GAAG,MAAM,EAC/B,OAAO,CAAC,EAAE,oBAAoB,EAC9B,aAAa,CAAC,EAAE,aAAa,GAC5B,QAAQ;CAGZ"}
@@ -0,0 +1,77 @@
1
+ import { BucketLimiter } from "./bucket.js";
2
+ import { ConcurrentLimiter } from "./concurrent.js";
3
+ import { WindowLimiter } from "./window.js";
4
+ export { BucketLimiter } from "./bucket.js";
5
+ export { ConcurrentLimiter } from "./concurrent.js";
6
+ export * from "./errors.js";
7
+ export { ensureRateLimitMiddleware, RateLimitMiddleware, } from "./middleware.js";
8
+ export * from "./types.js";
9
+ export { WindowLimiter } from "./window.js";
10
+ /**
11
+ * Factory class for creating rate limiters.
12
+ * Provides a Sidekiq Enterprise-compatible API.
13
+ *
14
+ * @example
15
+ * ```typescript
16
+ * import { Limiter } from "sidekiq-ts";
17
+ *
18
+ * // Limit concurrent API calls to 50
19
+ * const apiLimiter = Limiter.concurrent("external-api", 50);
20
+ *
21
+ * // Limit to 100 emails per minute
22
+ * const emailLimiter = Limiter.bucket("email-send", 100, "minute");
23
+ *
24
+ * // Limit to 1000 requests per hour (rolling window)
25
+ * const rateLimiter = Limiter.window("api-requests", 1000, "hour");
26
+ *
27
+ * // Use in a job
28
+ * class ApiCallJob extends Job<[string]> {
29
+ * async perform(endpoint: string) {
30
+ * await apiLimiter.withinLimit(async () => {
31
+ * await fetch(endpoint);
32
+ * });
33
+ * }
34
+ * }
35
+ * ```
36
+ */
37
+ // biome-ignore lint/complexity/noStaticOnlyClass: Factory class provides namespace for limiter creation
38
+ export class Limiter {
39
+ /**
40
+ * Create a concurrent limiter (distributed mutex with count).
41
+ * Limits concurrent executions across all processes.
42
+ *
43
+ * @param name - Unique identifier for the limiter
44
+ * @param maxCount - Maximum concurrent operations allowed
45
+ * @param options - Additional options (lockTimeout, keyPrefix)
46
+ * @param redisProvider - Optional custom Redis provider
47
+ */
48
+ static concurrent(name, maxCount, options, redisProvider) {
49
+ return new ConcurrentLimiter(name, maxCount, options, redisProvider);
50
+ }
51
+ /**
52
+ * Create a bucket limiter (count per time boundary).
53
+ * Resets at fixed time boundaries (start of minute, hour, etc).
54
+ *
55
+ * @param name - Unique identifier for the limiter
56
+ * @param count - Operations allowed per interval
57
+ * @param interval - Time interval ("second", "minute", "hour", "day") or seconds
58
+ * @param options - Additional options (keyPrefix)
59
+ * @param redisProvider - Optional custom Redis provider
60
+ */
61
+ static bucket(name, count, interval, options, redisProvider) {
62
+ return new BucketLimiter(name, count, interval, options, redisProvider);
63
+ }
64
+ /**
65
+ * Create a window limiter (rolling window).
66
+ * Window starts from first operation and slides continuously.
67
+ *
68
+ * @param name - Unique identifier for the limiter
69
+ * @param count - Operations allowed within window
70
+ * @param interval - Window size ("second", "minute", "hour", "day") or seconds
71
+ * @param options - Additional options (keyPrefix)
72
+ * @param redisProvider - Optional custom Redis provider
73
+ */
74
+ static window(name, count, interval, options, redisProvider) {
75
+ return new WindowLimiter(name, count, interval, options, redisProvider);
76
+ }
77
+ }
@@ -0,0 +1,36 @@
1
+ /**
2
+ * ConcurrentLimiter: Acquire a slot with expiring lock
3
+ * KEYS[1] = limiter key (ZSET of lockId -> expireTime)
4
+ * ARGV[1] = max concurrent
5
+ * ARGV[2] = lock ID (unique per request)
6
+ * ARGV[3] = current time (seconds)
7
+ * ARGV[4] = lock timeout (seconds)
8
+ * Returns: [allowed (0/1), current count, retry_after]
9
+ */
10
+ export declare const LUA_CONCURRENT_ACQUIRE = "\nlocal key = KEYS[1]\nlocal max_concurrent = tonumber(ARGV[1])\nlocal lock_id = ARGV[2]\nlocal now = tonumber(ARGV[3])\nlocal lock_timeout = tonumber(ARGV[4])\n\n-- Remove expired locks\nredis.call(\"ZREMRANGEBYSCORE\", key, \"-inf\", now)\n\n-- Check current count\nlocal current = redis.call(\"ZCARD\", key)\nif current < max_concurrent then\n -- Add lock with expiration time as score\n local expire_at = now + lock_timeout\n redis.call(\"ZADD\", key, expire_at, lock_id)\n redis.call(\"EXPIRE\", key, lock_timeout + 10)\n return {1, current + 1, 0}\nend\n\n-- Get time until next slot frees up\nlocal oldest = redis.call(\"ZRANGE\", key, 0, 0, \"WITHSCORES\")\nlocal retry_after = 0\nif oldest[2] then\n retry_after = math.max(0, tonumber(oldest[2]) - now)\nend\nreturn {0, current, retry_after}\n";
11
+ /**
12
+ * ConcurrentLimiter: Release a lock
13
+ * KEYS[1] = limiter key
14
+ * ARGV[1] = lock ID
15
+ */
16
+ export declare const LUA_CONCURRENT_RELEASE = "\nredis.call(\"ZREM\", KEYS[1], ARGV[1])\nreturn 1\n";
17
+ /**
18
+ * BucketLimiter: Increment counter at time boundary
19
+ * KEYS[1] = limiter key
20
+ * ARGV[1] = max count
21
+ * ARGV[2] = current bucket timestamp (boundary)
22
+ * ARGV[3] = TTL seconds
23
+ * Returns: [allowed (0/1), current count, seconds until reset]
24
+ */
25
+ export declare const LUA_BUCKET_ACQUIRE = "\nlocal key = KEYS[1]\nlocal max_count = tonumber(ARGV[1])\nlocal bucket_ts = ARGV[2]\nlocal ttl = tonumber(ARGV[3])\n\n-- Use hash: field is bucket timestamp, value is count\nlocal current = tonumber(redis.call(\"HGET\", key, bucket_ts) or \"0\")\n\nif current < max_count then\n redis.call(\"HINCRBY\", key, bucket_ts, 1)\n redis.call(\"EXPIRE\", key, ttl)\n -- Clean old buckets\n local fields = redis.call(\"HKEYS\", key)\n for _, f in ipairs(fields) do\n if f ~= bucket_ts then\n redis.call(\"HDEL\", key, f)\n end\n end\n return {1, current + 1, 0}\nend\n\nreturn {0, current, ttl}\n";
26
+ /**
27
+ * WindowLimiter: Sliding window using sorted set
28
+ * KEYS[1] = limiter key (ZSET of request timestamps)
29
+ * ARGV[1] = max count
30
+ * ARGV[2] = window size (seconds)
31
+ * ARGV[3] = current time (seconds, with decimals)
32
+ * ARGV[4] = request ID (unique)
33
+ * Returns: [allowed (0/1), current count, retry_after]
34
+ */
35
+ export declare const LUA_WINDOW_ACQUIRE = "\nlocal key = KEYS[1]\nlocal max_count = tonumber(ARGV[1])\nlocal window_size = tonumber(ARGV[2])\nlocal now = tonumber(ARGV[3])\nlocal request_id = ARGV[4]\n\nlocal window_start = now - window_size\n\n-- Remove entries outside window\nredis.call(\"ZREMRANGEBYSCORE\", key, \"-inf\", window_start)\n\n-- Count entries in window\nlocal current = redis.call(\"ZCARD\", key)\n\nif current < max_count then\n redis.call(\"ZADD\", key, now, request_id)\n redis.call(\"EXPIRE\", key, math.ceil(window_size) + 1)\n return {1, current + 1, 0}\nend\n\n-- Calculate retry_after based on oldest entry\nlocal oldest = redis.call(\"ZRANGE\", key, 0, 0, \"WITHSCORES\")\nlocal retry_after = 0\nif oldest[2] then\n retry_after = math.max(0, (tonumber(oldest[2]) + window_size) - now)\nend\nreturn {0, current, retry_after}\n";
36
+ //# sourceMappingURL=lua-scripts.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"lua-scripts.d.ts","sourceRoot":"","sources":["../../src/limiter/lua-scripts.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AACH,eAAO,MAAM,sBAAsB,6yBA2BlC,CAAC;AAEF;;;;GAIG;AACH,eAAO,MAAM,sBAAsB,yDAGlC,CAAC;AAEF;;;;;;;GAOG;AACH,eAAO,MAAM,kBAAkB,omBAuB9B,CAAC;AAEF;;;;;;;;GAQG;AACH,eAAO,MAAM,kBAAkB,kzBA4B9B,CAAC"}
@@ -0,0 +1,116 @@
1
+ /**
2
+ * ConcurrentLimiter: Acquire a slot with expiring lock
3
+ * KEYS[1] = limiter key (ZSET of lockId -> expireTime)
4
+ * ARGV[1] = max concurrent
5
+ * ARGV[2] = lock ID (unique per request)
6
+ * ARGV[3] = current time (seconds)
7
+ * ARGV[4] = lock timeout (seconds)
8
+ * Returns: [allowed (0/1), current count, retry_after]
9
+ */
10
+ export const LUA_CONCURRENT_ACQUIRE = `
11
+ local key = KEYS[1]
12
+ local max_concurrent = tonumber(ARGV[1])
13
+ local lock_id = ARGV[2]
14
+ local now = tonumber(ARGV[3])
15
+ local lock_timeout = tonumber(ARGV[4])
16
+
17
+ -- Remove expired locks
18
+ redis.call("ZREMRANGEBYSCORE", key, "-inf", now)
19
+
20
+ -- Check current count
21
+ local current = redis.call("ZCARD", key)
22
+ if current < max_concurrent then
23
+ -- Add lock with expiration time as score
24
+ local expire_at = now + lock_timeout
25
+ redis.call("ZADD", key, expire_at, lock_id)
26
+ redis.call("EXPIRE", key, lock_timeout + 10)
27
+ return {1, current + 1, 0}
28
+ end
29
+
30
+ -- Get time until next slot frees up
31
+ local oldest = redis.call("ZRANGE", key, 0, 0, "WITHSCORES")
32
+ local retry_after = 0
33
+ if oldest[2] then
34
+ retry_after = math.max(0, tonumber(oldest[2]) - now)
35
+ end
36
+ return {0, current, retry_after}
37
+ `;
38
+ /**
39
+ * ConcurrentLimiter: Release a lock
40
+ * KEYS[1] = limiter key
41
+ * ARGV[1] = lock ID
42
+ */
43
+ export const LUA_CONCURRENT_RELEASE = `
44
+ redis.call("ZREM", KEYS[1], ARGV[1])
45
+ return 1
46
+ `;
47
+ /**
48
+ * BucketLimiter: Increment counter at time boundary
49
+ * KEYS[1] = limiter key
50
+ * ARGV[1] = max count
51
+ * ARGV[2] = current bucket timestamp (boundary)
52
+ * ARGV[3] = TTL seconds
53
+ * Returns: [allowed (0/1), current count, seconds until reset]
54
+ */
55
+ export const LUA_BUCKET_ACQUIRE = `
56
+ local key = KEYS[1]
57
+ local max_count = tonumber(ARGV[1])
58
+ local bucket_ts = ARGV[2]
59
+ local ttl = tonumber(ARGV[3])
60
+
61
+ -- Use hash: field is bucket timestamp, value is count
62
+ local current = tonumber(redis.call("HGET", key, bucket_ts) or "0")
63
+
64
+ if current < max_count then
65
+ redis.call("HINCRBY", key, bucket_ts, 1)
66
+ redis.call("EXPIRE", key, ttl)
67
+ -- Clean old buckets
68
+ local fields = redis.call("HKEYS", key)
69
+ for _, f in ipairs(fields) do
70
+ if f ~= bucket_ts then
71
+ redis.call("HDEL", key, f)
72
+ end
73
+ end
74
+ return {1, current + 1, 0}
75
+ end
76
+
77
+ return {0, current, ttl}
78
+ `;
79
+ /**
80
+ * WindowLimiter: Sliding window using sorted set
81
+ * KEYS[1] = limiter key (ZSET of request timestamps)
82
+ * ARGV[1] = max count
83
+ * ARGV[2] = window size (seconds)
84
+ * ARGV[3] = current time (seconds, with decimals)
85
+ * ARGV[4] = request ID (unique)
86
+ * Returns: [allowed (0/1), current count, retry_after]
87
+ */
88
+ export const LUA_WINDOW_ACQUIRE = `
89
+ local key = KEYS[1]
90
+ local max_count = tonumber(ARGV[1])
91
+ local window_size = tonumber(ARGV[2])
92
+ local now = tonumber(ARGV[3])
93
+ local request_id = ARGV[4]
94
+
95
+ local window_start = now - window_size
96
+
97
+ -- Remove entries outside window
98
+ redis.call("ZREMRANGEBYSCORE", key, "-inf", window_start)
99
+
100
+ -- Count entries in window
101
+ local current = redis.call("ZCARD", key)
102
+
103
+ if current < max_count then
104
+ redis.call("ZADD", key, now, request_id)
105
+ redis.call("EXPIRE", key, math.ceil(window_size) + 1)
106
+ return {1, current + 1, 0}
107
+ end
108
+
109
+ -- Calculate retry_after based on oldest entry
110
+ local oldest = redis.call("ZRANGE", key, 0, 0, "WITHSCORES")
111
+ local retry_after = 0
112
+ if oldest[2] then
113
+ retry_after = math.max(0, (tonumber(oldest[2]) + window_size) - now)
114
+ end
115
+ return {0, current, retry_after}
116
+ `;
@@ -0,0 +1,21 @@
1
+ import type { Config } from "../config.js";
2
+ import type { JobPayload } from "../types.js";
3
+ /**
4
+ * Server middleware that catches OverLimitError and reschedules jobs.
5
+ * Follows Sidekiq Enterprise behavior:
6
+ * - Linear backoff starting at ~5 minutes
7
+ * - Maximum 20 reschedules (~1 day total)
8
+ * - After max reschedules, job fails normally
9
+ */
10
+ export declare class RateLimitMiddleware {
11
+ config?: Config;
12
+ call(_instance: unknown, payload: JobPayload, _queue: string, next: () => Promise<unknown> | unknown): Promise<unknown>;
13
+ private handleOverLimit;
14
+ private calculateBackoff;
15
+ }
16
+ /**
17
+ * Ensures the RateLimitMiddleware is registered.
18
+ * Call this in your configuration if using rate limiters in jobs.
19
+ */
20
+ export declare const ensureRateLimitMiddleware: (config: Config) => void;
21
+ //# sourceMappingURL=middleware.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"middleware.d.ts","sourceRoot":"","sources":["../../src/limiter/middleware.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,cAAc,CAAC;AAE3C,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,aAAa,CAAC;AAS9C;;;;;;GAMG;AACH,qBAAa,mBAAmB;IAC9B,MAAM,CAAC,EAAE,MAAM,CAAC;IAEV,IAAI,CACR,SAAS,EAAE,OAAO,EAClB,OAAO,EAAE,UAAU,EACnB,MAAM,EAAE,MAAM,EACd,IAAI,EAAE,MAAM,OAAO,CAAC,OAAO,CAAC,GAAG,OAAO,GACrC,OAAO,CAAC,OAAO,CAAC;YAWL,eAAe;IA8B7B,OAAO,CAAC,gBAAgB;CASzB;AAED;;;GAGG;AACH,eAAO,MAAM,yBAAyB,GAAI,QAAQ,MAAM,KAAG,IAI1D,CAAC"}
@@ -0,0 +1,62 @@
1
+ import { dumpJson } from "../json.js";
2
+ import { OverLimitError } from "./errors.js";
3
+ /** Maximum number of times a job can be rescheduled due to rate limiting */
4
+ const MAX_OVERRATED = 20;
5
+ /** Base delay in seconds for rate limit reschedule (5 minutes) */
6
+ const BASE_DELAY_SECONDS = 300;
7
+ /**
8
+ * Server middleware that catches OverLimitError and reschedules jobs.
9
+ * Follows Sidekiq Enterprise behavior:
10
+ * - Linear backoff starting at ~5 minutes
11
+ * - Maximum 20 reschedules (~1 day total)
12
+ * - After max reschedules, job fails normally
13
+ */
14
+ export class RateLimitMiddleware {
15
+ config;
16
+ async call(_instance, payload, _queue, next) {
17
+ try {
18
+ return await next();
19
+ }
20
+ catch (error) {
21
+ if (error instanceof OverLimitError) {
22
+ return this.handleOverLimit(payload, error);
23
+ }
24
+ throw error;
25
+ }
26
+ }
27
+ async handleOverLimit(payload, error) {
28
+ const overrated = (payload.overrated ?? 0) + 1;
29
+ payload.overrated = overrated;
30
+ if (overrated > MAX_OVERRATED) {
31
+ // Exceeded max reschedules, let it fail normally
32
+ throw error;
33
+ }
34
+ // Calculate backoff: linear ~5 min + jitter
35
+ const delay = this.calculateBackoff(overrated, error.retryAfter);
36
+ const retryAt = Date.now() / 1000 + delay;
37
+ // Add to retry queue
38
+ const redis = await this.config?.getRedisClient();
39
+ if (!redis) {
40
+ throw error;
41
+ }
42
+ await redis.zAdd("retry", [{ score: retryAt, value: dumpJson(payload) }]);
43
+ this.config?.logger.debug(() => `Rate limited: ${error.limiterName}, rescheduled in ${Math.round(delay)}s (attempt ${overrated}/${MAX_OVERRATED})`);
44
+ }
45
+ calculateBackoff(overrated, retryAfter) {
46
+ // Use limiter's retryAfter if available and reasonable
47
+ if (retryAfter !== undefined && retryAfter > 0 && retryAfter < 3600) {
48
+ return retryAfter + Math.random() * 30;
49
+ }
50
+ // Linear backoff: 5 min * attempt + jitter
51
+ return BASE_DELAY_SECONDS * overrated + Math.random() * BASE_DELAY_SECONDS;
52
+ }
53
+ }
54
+ /**
55
+ * Ensures the RateLimitMiddleware is registered.
56
+ * Call this in your configuration if using rate limiters in jobs.
57
+ */
58
+ export const ensureRateLimitMiddleware = (config) => {
59
+ if (!config.serverMiddleware.exists(RateLimitMiddleware)) {
60
+ config.serverMiddleware.add(RateLimitMiddleware);
61
+ }
62
+ };
@@ -0,0 +1,42 @@
1
+ import type { RedisClient } from "../redis.js";
2
+ /** Time intervals for bucket and window limiters */
3
+ export type TimeInterval = "second" | "minute" | "hour" | "day";
4
+ /** Result of an acquire attempt */
5
+ export interface AcquireResult {
6
+ allowed: boolean;
7
+ /** Seconds until the limit resets */
8
+ retryAfter?: number;
9
+ /** Current usage count */
10
+ current?: number;
11
+ /** Maximum allowed */
12
+ limit?: number;
13
+ }
14
+ /** Common options for all limiters */
15
+ export interface LimiterOptions {
16
+ /** Custom Redis key prefix. Default: 'limiter' */
17
+ keyPrefix?: string;
18
+ }
19
+ /** Options for ConcurrentLimiter */
20
+ export interface ConcurrentLimiterOptions extends LimiterOptions {
21
+ /** How long a lock is held before auto-release (seconds). Default: 180 */
22
+ lockTimeout?: number;
23
+ }
24
+ /** Options for BucketLimiter */
25
+ export interface BucketLimiterOptions extends LimiterOptions {
26
+ }
27
+ /** Options for WindowLimiter */
28
+ export interface WindowLimiterOptions extends LimiterOptions {
29
+ }
30
+ /** The limiter interface - all limiters implement this */
31
+ export interface ILimiter {
32
+ readonly name: string;
33
+ /** Execute callback within rate limit. Throws OverLimitError if limit exceeded. */
34
+ withinLimit<T>(fn: () => Promise<T> | T): Promise<T>;
35
+ /** Check current limit status without consuming */
36
+ check(): Promise<AcquireResult>;
37
+ /** Reset the limiter state */
38
+ reset(): Promise<void>;
39
+ }
40
+ /** Redis connection provider function */
41
+ export type RedisProvider = () => Promise<RedisClient>;
42
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/limiter/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAE/C,oDAAoD;AACpD,MAAM,MAAM,YAAY,GAAG,QAAQ,GAAG,QAAQ,GAAG,MAAM,GAAG,KAAK,CAAC;AAEhE,mCAAmC;AACnC,MAAM,WAAW,aAAa;IAC5B,OAAO,EAAE,OAAO,CAAC;IACjB,qCAAqC;IACrC,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,0BAA0B;IAC1B,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,sBAAsB;IACtB,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAED,sCAAsC;AACtC,MAAM,WAAW,cAAc;IAC7B,kDAAkD;IAClD,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED,oCAAoC;AACpC,MAAM,WAAW,wBAAyB,SAAQ,cAAc;IAC9D,0EAA0E;IAC1E,WAAW,CAAC,EAAE,MAAM,CAAC;CACtB;AAED,gCAAgC;AAChC,MAAM,WAAW,oBAAqB,SAAQ,cAAc;CAAG;AAE/D,gCAAgC;AAChC,MAAM,WAAW,oBAAqB,SAAQ,cAAc;CAAG;AAE/D,0DAA0D;AAC1D,MAAM,WAAW,QAAQ;IACvB,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IAEtB,mFAAmF;IACnF,WAAW,CAAC,CAAC,EAAE,EAAE,EAAE,MAAM,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC;IAErD,mDAAmD;IACnD,KAAK,IAAI,OAAO,CAAC,aAAa,CAAC,CAAC;IAEhC,8BAA8B;IAC9B,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;CACxB;AAED,yCAAyC;AACzC,MAAM,MAAM,aAAa,GAAG,MAAM,OAAO,CAAC,WAAW,CAAC,CAAC"}
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,15 @@
1
+ import { BaseLimiter } from "./base.js";
2
+ import type { AcquireResult, RedisProvider, TimeInterval, WindowLimiterOptions } from "./types.js";
3
+ /**
4
+ * Limits operations within a rolling time window.
5
+ * Window starts from first operation and slides continuously.
6
+ */
7
+ export declare class WindowLimiter extends BaseLimiter {
8
+ private readonly maxCount;
9
+ private readonly windowSeconds;
10
+ constructor(name: string, maxCount: number, interval: TimeInterval | number, options?: WindowLimiterOptions, redisProvider?: RedisProvider);
11
+ private generateRequestId;
12
+ protected tryAcquire(): Promise<AcquireResult>;
13
+ check(): Promise<AcquireResult>;
14
+ }
15
+ //# sourceMappingURL=window.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"window.d.ts","sourceRoot":"","sources":["../../src/limiter/window.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,MAAM,WAAW,CAAC;AAExC,OAAO,KAAK,EACV,aAAa,EACb,aAAa,EACb,YAAY,EACZ,oBAAoB,EACrB,MAAM,YAAY,CAAC;AASpB;;;GAGG;AACH,qBAAa,aAAc,SAAQ,WAAW;IAC5C,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAS;IAClC,OAAO,CAAC,QAAQ,CAAC,aAAa,CAAS;gBAGrC,IAAI,EAAE,MAAM,EACZ,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,YAAY,GAAG,MAAM,EAC/B,OAAO,GAAE,oBAAyB,EAClC,aAAa,CAAC,EAAE,aAAa;IAQ/B,OAAO,CAAC,iBAAiB;cAIT,UAAU,IAAI,OAAO,CAAC,aAAa,CAAC;IAyB9C,KAAK,IAAI,OAAO,CAAC,aAAa,CAAC;CAetC"}
@@ -0,0 +1,59 @@
1
+ import { BaseLimiter } from "./base.js";
2
+ import { LUA_WINDOW_ACQUIRE } from "./lua-scripts.js";
3
+ const INTERVAL_SECONDS = {
4
+ second: 1,
5
+ minute: 60,
6
+ hour: 3600,
7
+ day: 86_400,
8
+ };
9
+ /**
10
+ * Limits operations within a rolling time window.
11
+ * Window starts from first operation and slides continuously.
12
+ */
13
+ export class WindowLimiter extends BaseLimiter {
14
+ maxCount;
15
+ windowSeconds;
16
+ constructor(name, maxCount, interval, options = {}, redisProvider) {
17
+ super(name, options, redisProvider);
18
+ this.maxCount = maxCount;
19
+ this.windowSeconds =
20
+ typeof interval === "number" ? interval : INTERVAL_SECONDS[interval];
21
+ }
22
+ generateRequestId() {
23
+ return `${process.pid}-${Date.now()}-${Math.random().toString(36).slice(2)}`;
24
+ }
25
+ async tryAcquire() {
26
+ const redis = await this.getRedis();
27
+ const now = Date.now() / 1000;
28
+ const requestId = this.generateRequestId();
29
+ const result = (await redis.eval(LUA_WINDOW_ACQUIRE, {
30
+ keys: [this.redisKey],
31
+ arguments: [
32
+ String(this.maxCount),
33
+ String(this.windowSeconds),
34
+ String(now),
35
+ requestId,
36
+ ],
37
+ }));
38
+ const [allowed, current, retryAfter] = result;
39
+ return {
40
+ allowed: allowed === 1,
41
+ current,
42
+ limit: this.maxCount,
43
+ retryAfter: retryAfter > 0 ? retryAfter : undefined,
44
+ };
45
+ }
46
+ async check() {
47
+ const redis = await this.getRedis();
48
+ const now = Date.now() / 1000;
49
+ const windowStart = now - this.windowSeconds;
50
+ // Remove expired entries and count
51
+ await redis.zRemRangeByScore(this.redisKey, "-inf", String(windowStart));
52
+ const current = await redis.zCard(this.redisKey);
53
+ return {
54
+ allowed: current < this.maxCount,
55
+ current,
56
+ limit: this.maxCount,
57
+ };
58
+ }
59
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "sidekiq-ts",
3
- "version": "1.1.0",
3
+ "version": "1.2.0",
4
4
  "description": "TypeScript client for Sidekiq job processing",
5
5
  "license": "MIT",
6
6
  "keywords": [