@koala42/redis-highway 0.2.8 → 0.2.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,192 @@
1
+ # @koala42/redis-highway
2
+
3
+ High performance Redis stream-based queue for Node.js. Supports Redis single instances and Valkey single instances.
4
+ Designed for high throughput and massive concurrency with low overhead.
5
+
6
+ ## Features
7
+ - **Lightweight**: Uses optimized Lua scripts and pipelines for maximum performance and reduced I/O.
8
+ - **Micro-Batching**: Supports batch processing for high-volume message consumption.
9
+ - **Granular Retries**: Consumer group isolation - if one group fails, only that group retries.
10
+ - **Reliability**: Auto-claiming of stuck messages (crashed consumers) and Dead Letter Queue (DLQ) support.
11
+ - **Metrics**: Built-in tracking for throughput, queue depth, DLQ size, and retries. Prometheus export ready.
12
+
13
+ ## Installation
14
+
15
+ ```bash
16
+ npm install @koala42/redis-highway
17
+ ```
18
+
19
+ ## Usage
20
+
21
+ ### Producer
22
+
23
+ ```typescript
24
+ import { Redis } from 'ioredis';
25
+ import { Producer } from '@koala42/redis-highway';
26
+
27
+ const redis = new Redis();
28
+ const producer = new Producer<{hello: string}>(redis, 'my-stream');
29
+
30
+ // Send job
31
+ await producer.push(
32
+ { hello: 'world' }, // Type-safe payload
33
+ ['group-A', 'group-B'], // Target specific consumer groups
34
+ { ttl: 3600 } // Optional: expiration time in seconds
35
+ );
36
+ ```
37
+
38
+ ### Worker
39
+
40
+ ```typescript
41
+ import { Redis } from 'ioredis';
42
+ import { Worker } from '@koala42/redis-highway';
43
+
44
+ class MyWorker extends Worker<{hello: string}> {
45
+ async process(data: {hello: string}) {
46
+ console.log('Processing:', data.hello);
47
+ // throw new Error('fail'); // Triggers automatic retry logic
48
+ }
49
+ }
50
+
51
+ const redis = new Redis();
52
+ const worker = new MyWorker(
53
+ redis,
54
+ {
55
+ groupName: 'group-A',
56
+ streamName: 'my-stream',
57
+ concurrency: 10 // Number of concurrent jobs to process
58
+ }
59
+ );
60
+
61
+ await worker.start();
62
+
63
+ // To stop gracefully
64
+ // await worker.stop();
65
+ ```
66
+
67
+ ### Batch Worker
68
+ Process messages in batches for higher throughput.
69
+
70
+ ```typescript
71
+ import { Redis } from 'ioredis';
72
+ import { BatchWorker } from '@koala42/redis-highway';
73
+
74
+ class MyBatchWorker extends BatchWorker<{hello: string}> {
75
+ async process(batchedData: {hello: string}[]) {
76
+ console.log(`Processing batch of ${batchedData.length} items`);
77
+ // Example: Bulk insert into database
78
+ }
79
+ }
80
+
81
+ const batchWorker = new MyBatchWorker(
82
+ redis,
83
+ {
84
+ groupName: 'group-B',
85
+ streamName: 'my-stream',
86
+ concurrency: 50, // Total items processing limit
87
+ batchSize: 10, // Items per batch
88
+ maxFetchCount: 50
89
+ }
90
+ );
91
+
92
+ await batchWorker.start();
93
+ ```
94
+
95
+ ### Metrics
96
+
97
+ ```typescript
98
+ import { Metrics } from '@koala42/redis-highway';
99
+
100
+ const metrics = new Metrics(redis, 'my-stream');
101
+
102
+ // Get raw metrics object
103
+ const stats = await metrics.getMetrics(['group-A', 'group-B']);
104
+ console.log(stats.throughput);
105
+
106
+ // Get Prometheus formatted string
107
+ const promMetrics = await metrics.getPrometheusMetrics(['group-A'], 'my_app_queue');
108
+ ```
109
+
110
+ ## Configuration
111
+
112
+ ### Worker Options
113
+ The second argument to `Worker` and `BatchWorker` constructors is the primary configuration object.
114
+
115
+ | Option | Type | Description |
116
+ |--------|------|-------------|
117
+ | `groupName` | string | **Required**. The consumer group name (e.g., 'email-service'). |
118
+ | `streamName` | string | **Required**. The Redis stream key. |
119
+ | `concurrency` | number | **Required**. Maximum number of messages processed in parallel by this worker instance. |
120
+ | `batchSize` | number | **Required (BatchWorker only)**. Number of messages to process in a single call. |
121
+ | `maxFetchCount` | number | **Required (BatchWorker only)**. limit for XREADGROUP count. |
122
+
123
+ ### Control Options
124
+ The third argument is for fine-tuning retry and recovery behavior.
125
+
126
+ ```typescript
127
+ const worker = new MyWorker(redis, { ... }, {
128
+ maxRetries: 3, // Default: 3
129
+ blockTimeMs: 2000, // Default: 2000. XREADGROUP block time.
130
+ minIdleTimeMs: 120000, // Default: 2 minutes. Time before a message is considered stuck.
131
+ claimIntervalMs: 120000,// Default: 2 minutes. How often to check for stuck messages.
132
+ collectMetrics: true // Default: true. Enable throughput tracking.
133
+ });
134
+ ```
135
+
136
+ | Option | Default | Description |
137
+ |--------|---------|-------------|
138
+ | `maxRetries` | 3 | Number of times to retry a failed message before moving it to DLQ. |
139
+ | `blockTimeMs` | 2000 | Redis blocking timeout for fetching new messages (in ms). |
140
+ | `minIdleTimeMs` | 120000 | Messages pending longer than this are candidates for auto-claim (recovery). |
141
+ | `claimIntervalMs` | 120000 | Interval for checking and claiming stuck messages. |
142
+ | `collectMetrics` | true | If true, increments throughput counters in Redis. |
143
+
144
+ ## Usage with NestJS
145
+
146
+ ```typescript
147
+ // Producer Service
148
+ @Injectable()
149
+ export class EntryService {
150
+ private readonly producer: Producer<MyPayload>;
151
+
152
+ constructor(@InjectRedis() private readonly redis: Redis) {
153
+ this.producer = new Producer(this.redis, 'my-stream');
154
+ }
155
+
156
+ async addToQueue(data: MyPayload) {
157
+ await this.producer.push(data, ['group-A']);
158
+ }
159
+ }
160
+
161
+ // Worker Service
162
+ @Injectable()
163
+ export class ProcessorService extends Worker<MyPayload> implements OnModuleInit, OnModuleDestroy {
164
+ constructor(@InjectRedis() redis: Redis) {
165
+ super(redis, {
166
+ groupName: 'group-A',
167
+ streamName: 'my-stream',
168
+ concurrency: 50
169
+ });
170
+ }
171
+
172
+ async onModuleInit() {
173
+ await this.start();
174
+ }
175
+
176
+ async onModuleDestroy() {
177
+ await this.stop();
178
+ }
179
+
180
+ async process(data: MyPayload) {
181
+ // Process your job here
182
+ }
183
+ }
184
+ ```
185
+
186
+ ## Roadmap & Missing Features
187
+ tracked in [Github Issues](https://github.com/Koala42/redis-highway/issues)
188
+
189
+ ## AI Usage Disclosure
190
+ - AI will not be used for the development, ever
191
+ - AI may be used to do code reviews
192
+ - AI may be used to write unit tests
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@koala42/redis-highway",
3
- "version": "0.2.8",
3
+ "version": "0.2.10",
4
4
  "description": "High performance redis queue",
5
5
  "license": "MIT",
6
6
  "author": {
@@ -10,19 +10,17 @@
10
10
  },
11
11
  "repository": {
12
12
  "url": "https://github.com/Koala42/redis-highway",
13
- "type": "github",
14
- "directory": "packages/redis-highway"
13
+ "type": "git"
15
14
  },
16
15
  "type": "commonjs",
17
- "main": "dist/src/index.js",
18
- "types": "dist/src/index.d.ts",
16
+ "main": "dist/index.js",
17
+ "types": "dist/ndex.d.ts",
19
18
  "files": [
20
19
  "dist"
21
20
  ],
22
21
  "scripts": {
23
22
  "clean": "rimraf dist",
24
23
  "test": "vitest run test",
25
- "test:all": "vitest run test",
26
24
  "build": "npm run clean && tsc",
27
25
  "prepublish": "npm run build"
28
26
  },
@@ -1 +0,0 @@
1
- export {};
@@ -1,256 +0,0 @@
1
- "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
- Object.defineProperty(exports, "__esModule", { value: true });
6
- const vitest_1 = require("vitest");
7
- const ioredis_1 = __importDefault(require("ioredis"));
8
- const producer_1 = require("../src/producer");
9
- const worker_1 = require("../src/worker");
10
- const metrics_1 = require("../src/metrics");
11
- const uuid_1 = require("uuid");
12
- const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
13
- class TestWorker extends worker_1.Worker {
14
- constructor(redis, groupName, streamName, concurrency = 1, maxRetries = 3, blockTimeMs = 100, claimIntervalMs = 60000, minIdleTimeMs = 300000) {
15
- super(redis, {
16
- groupName,
17
- streamName,
18
- concurrency
19
- }, {
20
- maxRetries,
21
- blockTimeMs,
22
- claimIntervalMs,
23
- minIdleTimeMs,
24
- collectMetrics: true
25
- });
26
- this.processedCount = 0;
27
- this.lastProcessedId = null;
28
- this.shouldFail = false;
29
- this.failCount = 0;
30
- this.maxFails = 0;
31
- }
32
- async start() {
33
- return super.start();
34
- }
35
- async stop() {
36
- return super.stop();
37
- }
38
- async process(data) {
39
- if (this.shouldFail) {
40
- this.failCount++;
41
- if (this.maxFails > 0 && this.failCount > this.maxFails) {
42
- // Stop failing after maxFails
43
- }
44
- else {
45
- throw new Error("Simulated Failure");
46
- }
47
- }
48
- this.processedCount++;
49
- if (data && data.id) {
50
- this.lastProcessedId = data.id;
51
- }
52
- }
53
- }
54
- (0, vitest_1.describe)('Redis Queue Integration', () => {
55
- let redis;
56
- let producer;
57
- let streamName;
58
- let workers = [];
59
- (0, vitest_1.beforeEach)(() => {
60
- redis = new ioredis_1.default(REDIS_URL);
61
- streamName = `test-queue-${(0, uuid_1.v7)()}`;
62
- producer = new producer_1.Producer(redis, streamName);
63
- workers = [];
64
- });
65
- (0, vitest_1.afterEach)(async () => {
66
- for (const w of workers) {
67
- await w.stop();
68
- }
69
- await new Promise(r => setTimeout(r, 500));
70
- // Cleanup Redis keys using the existing connection before closing
71
- if (redis.status === 'ready') {
72
- const keys = await redis.keys(`${streamName}*`);
73
- if (keys.length)
74
- await redis.del(...keys);
75
- }
76
- redis.disconnect();
77
- });
78
- const waitFor = async (condition, timeout = 5000) => {
79
- const start = Date.now();
80
- while (Date.now() - start < timeout) {
81
- if (await condition())
82
- return true;
83
- await new Promise(r => setTimeout(r, 100));
84
- }
85
- return false;
86
- };
87
- (0, vitest_1.describe)('Core Functionality', () => {
88
- (0, vitest_1.it)('Should deliver message to all target groups', async () => {
89
- const w1 = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
90
- const w2 = new TestWorker(redis, 'group-B', streamName, 1, 3, 100);
91
- workers.push(w1, w2);
92
- await w1.start();
93
- await w2.start();
94
- const id = await producer.push({ id: 'msg-1' }, ['group-A', 'group-B']);
95
- await waitFor(() => w1.processedCount === 1 && w2.processedCount === 1);
96
- (0, vitest_1.expect)(w1.processedCount).toBe(1);
97
- (0, vitest_1.expect)(w2.processedCount).toBe(1);
98
- // Test cleanup keys (but not explicit XDEL here yet, checking keys gone)
99
- const statusKey = `${streamName}:status:${id}`;
100
- const dataKey = `${streamName}:data:${id}`;
101
- (0, vitest_1.expect)(await redis.exists(statusKey)).toBe(0);
102
- (0, vitest_1.expect)(await redis.exists(dataKey)).toBe(0);
103
- });
104
- (0, vitest_1.it)('Should only deliver to targeted groups', async () => {
105
- const wA = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
106
- const wB = new TestWorker(redis, 'group-B', streamName, 1, 3, 100);
107
- workers.push(wA, wB);
108
- await wA.start();
109
- await wB.start();
110
- await producer.push({ id: 'msg-only-a' }, ['group-A']);
111
- await waitFor(() => wA.processedCount === 1);
112
- (0, vitest_1.expect)(wA.processedCount).toBe(1);
113
- (0, vitest_1.expect)(wB.processedCount).toBe(0);
114
- });
115
- (0, vitest_1.it)('Should retry only the failed group', async () => {
116
- const wOk = new TestWorker(redis, 'group-Ok', streamName, 1, 3, 100);
117
- const wFail = new TestWorker(redis, 'group-Fail', streamName, 1, 3, 100);
118
- wFail.shouldFail = true;
119
- wFail.maxFails = 1; // Fail once, then succeed
120
- workers.push(wOk, wFail);
121
- await wOk.start();
122
- await wFail.start();
123
- await producer.push({ id: 'retry-test' }, ['group-Ok', 'group-Fail']);
124
- // Wait for wOk to finish and wFail to try at least twice (fail + success)
125
- await waitFor(() => wOk.processedCount === 1 && wFail.processedCount === 1, 8000);
126
- (0, vitest_1.expect)(wOk.processedCount).toBe(1); // Processed once
127
- (0, vitest_1.expect)(wFail.failCount).toBeGreaterThanOrEqual(1); // Failed at least once
128
- (0, vitest_1.expect)(wFail.processedCount).toBe(1); // Eventually succeeded
129
- (0, vitest_1.expect)(wOk.processedCount).toBe(1); // wOk should NOT process the retry
130
- });
131
- (0, vitest_1.it)('Should move to DLQ after max retries', async () => {
132
- const wDead = new TestWorker(redis, 'group-Dead', streamName, 1, 3, 100);
133
- wDead.shouldFail = true;
134
- wDead.maxFails = 10; // Fail forever (more than max retries which is 3)
135
- workers.push(wDead);
136
- await wDead.start();
137
- const id = await producer.push({ id: 'dlq-test' }, ['group-Dead']);
138
- await waitFor(async () => {
139
- const len = await redis.xlen(`${streamName}:dlq`);
140
- return len > 0;
141
- }, 10000);
142
- const dlqLen = await redis.xlen(`${streamName}:dlq`);
143
- (0, vitest_1.expect)(dlqLen).toBe(1);
144
- (0, vitest_1.expect)(await redis.exists(`${streamName}:status:${id}`)).toBe(0);
145
- });
146
- });
147
- (0, vitest_1.describe)('Metrics & Monitoring', () => {
148
- (0, vitest_1.it)('Should track throughput and queue size', async () => {
149
- const w = new TestWorker(redis, 'group-Metrics', streamName, 1, 3, 100);
150
- const metricsService = new metrics_1.Metrics(redis, streamName);
151
- workers.push(w);
152
- await w.start();
153
- let metrics = await metricsService.getMetrics(['group-Metrics']);
154
- (0, vitest_1.expect)(metrics.dlqLength).toBe(0);
155
- const id = await producer.push({ id: 'metrics-1' }, ['group-Metrics']);
156
- await waitFor(() => w.processedCount === 1);
157
- metrics = await metricsService.getMetrics(['group-Metrics']);
158
- (0, vitest_1.expect)(metrics.throughput['group-Metrics']).toBeGreaterThanOrEqual(1);
159
- w.shouldFail = true;
160
- w.maxFails = 10;
161
- await producer.push({ id: 'metrics-fail' }, ['group-Metrics']);
162
- await waitFor(() => redis.xlen(`${streamName}:dlq`).then(len => len > 0));
163
- metrics = await metricsService.getMetrics(['group-Metrics']);
164
- (0, vitest_1.expect)(metrics.dlqLength).toBe(1);
165
- });
166
- (0, vitest_1.it)('Should export Prometheus metrics', async () => {
167
- const w = new TestWorker(redis, 'group-Prom', streamName, 1, 3, 100);
168
- const metricsService = new metrics_1.Metrics(redis, streamName);
169
- workers.push(w);
170
- await w.start();
171
- await producer.push({ id: 'prom-1' }, ['group-Prom']);
172
- await producer.push({ id: 'prom-2' }, ['group-Prom']);
173
- await waitFor(() => w.processedCount === 2);
174
- const promOutput = await metricsService.getPrometheusMetrics(['group-Prom']);
175
- (0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_throughput_1m gauge`);
176
- (0, vitest_1.expect)(promOutput).toContain(`redis_highway_queue_throughput_1m{stream="${streamName}", group="group-Prom"} 2`);
177
- (0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_jobs_total counter`);
178
- (0, vitest_1.expect)(promOutput).toContain(`redis_highway_queue_jobs_total{stream="${streamName}", group="group-Prom"} 2`);
179
- (0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_waiting_jobs gauge`);
180
- });
181
- });
182
- (0, vitest_1.describe)('Stream Cleanup', () => {
183
- (0, vitest_1.it)('Should delete message from stream after processing', async () => {
184
- const w1 = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
185
- workers.push(w1);
186
- await w1.start();
187
- const id = await producer.push({ id: 'msg-cleanup' }, ['group-A']);
188
- // Wait for processing
189
- await waitFor(() => w1.processedCount === 1);
190
- (0, vitest_1.expect)(w1.processedCount).toBe(1);
191
- // Wait for stream to be empty
192
- const success = await waitFor(async () => {
193
- const len = await redis.xlen(streamName);
194
- return len === 0;
195
- });
196
- (0, vitest_1.expect)(success).toBe(true);
197
- const messages = await redis.xrange(streamName, '-', '+');
198
- (0, vitest_1.expect)(messages.length).toBe(0);
199
- });
200
- (0, vitest_1.it)('Should delete message from stream only after ALL groups processed it', async () => {
201
- const w1 = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
202
- const w2 = new TestWorker(redis, 'group-B', streamName, 1, 3, 100);
203
- workers.push(w1, w2);
204
- await w1.start(); // Only start w1
205
- const id = await producer.push({ id: 'msg-multi' }, ['group-A', 'group-B']);
206
- // Wait for w1 to process
207
- await waitFor(() => w1.processedCount === 1);
208
- let len = await redis.xlen(streamName);
209
- (0, vitest_1.expect)(len).toBe(1); // Should still exist because group-B pending
210
- // Start w2
211
- await w2.start();
212
- // Wait for w2 to process
213
- await waitFor(() => w2.processedCount === 1);
214
- // Wait for stream to be empty
215
- const success = await waitFor(async () => {
216
- len = await redis.xlen(streamName);
217
- return len === 0;
218
- });
219
- (0, vitest_1.expect)(success).toBe(true);
220
- });
221
- });
222
- (0, vitest_1.it)('Should recover stuck messages via Auto-Claim', async () => {
223
- const groupName = 'group-Recover';
224
- // Start worker with short minIdleTime (e.g., 1000ms) to trigger claim quickly
225
- // minIdleTimeMs = 1000. claimIntervalMs = 500 (check frequently)
226
- const w = new TestWorker(redis, groupName, streamName, 1, 3, 100, 500, 1000);
227
- workers.push(w);
228
- // 1. Setup group manually
229
- await redis.xgroup('CREATE', streamName, groupName, '0', 'MKSTREAM');
230
- // 2. Push message
231
- const id = await producer.push({ id: 'stuck-msg' }, [groupName]);
232
- // 3. Simulate a consumer reading but crashing (no ACK)
233
- // consumer name 'bad-consumer'
234
- await redis.xreadgroup('GROUP', groupName, 'bad-consumer', 'COUNT', 1, 'STREAMS', streamName, '>');
235
- // 4. Wait for minIdleTime (1000ms) + buffer
236
- await new Promise(r => setTimeout(r, 1200));
237
- // 5. Start our worker
238
- await w.start();
239
- // 6. Verify worker picks it up
240
- await waitFor(() => w.processedCount === 1, 5000);
241
- (0, vitest_1.expect)(w.processedCount).toBe(1);
242
- (0, vitest_1.expect)(w.lastProcessedId).toBe('stuck-msg');
243
- // Verify it was claimed (delivered to new consumer)
244
- // We can check PEL or just trust processedCount
245
- const pending = await redis.xpending(streamName, groupName);
246
- // After processing, it should be ACKed, so pending count => 0 (if deleted)
247
- // or if finalize runs, it deletes the message entirely.
248
- // Wait for cleanup (finalize runs after process)
249
- await waitFor(async () => {
250
- const len = await redis.xlen(streamName);
251
- return len === 0;
252
- }, 2000);
253
- const len = await redis.xlen(streamName);
254
- (0, vitest_1.expect)(len).toBe(0);
255
- });
256
- });
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes