@koala42/redis-highway 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 KOALA42
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md CHANGED
@@ -1,6 +1,16 @@
1
1
  # @koala42/redis-highway
2
2
 
3
- High performance Redis stream-based queue for Node.js.
3
+ High performance Redis stream-based queue for Node.js. Supports Redis single instances and Valkey single instances
4
+
5
+ ## Missing features ATM
6
+ - Better gracefull shutdown handling
7
+ - In worker process functions expose more than just data: T like job id and current status
8
+ - Option to customize/enable/disable metrics
9
+ - Enable custom logger for workers and producers instead of console.logs
10
+
11
+
12
+ ## Roadmap
13
+ - Support redis cluster, that is probably possible only for job payloads and DLQ, since the stream is only one
4
14
 
5
15
  ## Installation
6
16
 
@@ -21,7 +31,7 @@ const producer = new Producer(redis, 'my-stream');
21
31
 
22
32
  // Send job
23
33
  await producer.push(
24
- JSON.stringify({ hello: 'world' }),
34
+ JSON.stringify({ hello: 'world' }), // Message serialization is not done automatically
25
35
  ['group-A', 'group-B'] // Target specific consumer groups
26
36
  );
27
37
  ```
@@ -32,8 +42,8 @@ await producer.push(
32
42
  import { Redis } from 'ioredis';
33
43
  import { Worker } from '@koala42/redis-highway';
34
44
 
35
- class MyWorker extends Worker {
36
- async process(data: any) {
45
+ class MyWorker extends Worker<T> {
46
+ async process(data: T) {
37
47
  console.log('Processing:', data);
38
48
  // throw new Error('fail'); // Automatic retry/DLQ logic
39
49
  }
@@ -56,8 +66,59 @@ const metrics = new Metrics(redis, 'my-stream');
56
66
  const payload = await metrics.getPrometheusMetrics(['group-A']);
57
67
  ```
58
68
 
59
- ## Features
69
+ ## Usage with NestJS
70
+
71
+ ```typescript
72
+
73
+ // Producer
74
+ @Injectable()
75
+ export class EntryService {
76
+ privater readonly producer: Producer;
77
+
78
+ constructor(){
79
+ this.producer = new Producer(
80
+ new Redis(...), // Or reuse existing ioredis connection
81
+ 'my-stream'
82
+ )
83
+ }
84
+
85
+ public async sth(): Promise<void>{
86
+ await producer.push(
87
+ JSON.stringify({ hello: 'world' }), // Message serialization is not done automatically
88
+ ['group-A', 'group-B'] // Target specific consumer groups
89
+ );
90
+ }
91
+ }
92
+
60
93
 
94
+ // Processor
95
+ @Injectable()
96
+ export class ProcessorService extends Worker<T> implements OnModuleInit, OnModuleDestroy {
97
+ constructor(){
98
+ super(
99
+ new Redis(...), // or reuse existing redis conn
100
+ 'group-A',
101
+ 'my-stream',
102
+ 50 // concurrency
103
+ )
104
+ }
105
+
106
+ async onModuleInit(): Promise<void>{
107
+ await this.start()
108
+ }
109
+
110
+ onModuleDestroy(){
111
+ this.stop()
112
+ }
113
+
114
+ async process(data: T): Promise<void>{
115
+ console.log("Processing job", JSON.stringify(data))
116
+ }
117
+ }
118
+ ````
119
+
120
+ ## Features
121
+ - **Lightweight**: Uses light Lua scripts and pipelines wherever possible, making it highly concurrents for inserts and for processing as well, because of the reduced I/O load compared to BullMQ
61
122
  - **Granular Retries**: If one group fails, only that group retries.
62
123
  - **DLQ**: Dead Letter Queue support after max retries.
63
124
  - **Metrics**: Throughput, Waiting, DLQ, Prometheus export.
package/dist/lua.d.ts CHANGED
@@ -1 +1 @@
1
- export declare const LUA_MARK_DONE = "\n-- KEYS[1] = status key status key for jog\n-- KEYS[2] = data key for job\n-- KEYS[3] = stream key\n-- KEYS[4] = group name\n-- KEYS[5] = metrics key\n-- KEYS[6] = total metrics key(persistent)\n\n-- ARGV[1] = route name\n-- ARGV[2] = timestamp\n-- ARGV[3] = msgId - redis stream item ID\n\n-- 1. Ack the stream message\nredis.call('XACK', KEYS[3], KEYS[4], ARGV[3])\n\n-- 2. in status key mark the current route as done by saving timestamp\nredis.call('HSET', KEYS[1], ARGV[1], ARGV[2])\n\n-- 3. Increment throughput metric\nif KEYS[5] then\n redis.call('INCR', KEYS[5])\n redis.call('EXPIRE', KEYS[5], 86400)\nend\n\n-- 3.1 Increment Total Metric\nif KEYS[6] then\n redis.call('INCR', KEYS[6])\nend\n\n-- 4. Check for completed routes\nlocal current_fields = redis.call('HLEN', KEYS[1])\n\n-- 5. Get the target completed routes\nlocal target_str = redis.call('HGET', KEYS[1], '__target')\nlocal target = tonumber(target_str)\n\nif not target then\n return 0\nend\n\n-- 6. If completed routes is status hash length - 1 -> all were done and we can cleanup\nif current_fields >= (target + 1) then\n redis.call('DEL', KEYS[1], KEYS[2])\n return 1 -- Cleanup, DONE\nend\n\nreturn 0 -- Some routes are not done yet\n";
1
+ export declare const LUA_MARK_DONE = "\n-- KEYS[1] = status key status key for jog\n-- KEYS[2] = data key for job\n-- KEYS[3] = stream key\n-- KEYS[4] = group name\n-- KEYS[5] = metrics key\n-- KEYS[6] = total metrics key(persistent)\n\n-- ARGV[1] = route name\n-- ARGV[2] = timestamp\n-- ARGV[3] = msgId - redis stream item ID\n\n-- 1. Ack the stream message\nredis.call('XACK', KEYS[3], KEYS[4], ARGV[3])\n\n-- 2. in status key mark the current route as done by saving timestamp\nredis.call('HSET', KEYS[1], ARGV[1], ARGV[2])\n\n-- 3. Increment throughput metric\nif KEYS[5] then\n redis.call('INCR', KEYS[5])\n redis.call('EXPIRE', KEYS[5], 86400)\nend\n\n-- 3.1 Increment Total Metric\nif KEYS[6] then\n redis.call('INCR', KEYS[6])\nend\n\n-- 4. Check for completed routes\nlocal current_fields = redis.call('HLEN', KEYS[1])\n\n-- 5. Get the target completed routes\nlocal target_str = redis.call('HGET', KEYS[1], '__target')\nlocal target = tonumber(target_str)\n\nif not target then\n return 0\nend\n\n-- 6. If completed routes is status hash length - 1 -> all were done and we can cleanup\nif current_fields >= (target + 1) then\n redis.call('DEL', KEYS[1], KEYS[2])\n redis.call('XDEL', KEYS[3], ARGV[3])\n return 1 -- Cleanup, DONE\nend\n\nreturn 0 -- Some routes are not done yet\n";
package/dist/lua.js CHANGED
@@ -44,6 +44,7 @@ end
44
44
  -- 6. If completed routes is status hash length - 1 -> all were done and we can cleanup
45
45
  if current_fields >= (target + 1) then
46
46
  redis.call('DEL', KEYS[1], KEYS[2])
47
+ redis.call('XDEL', KEYS[3], ARGV[3])
47
48
  return 1 -- Cleanup, DONE
48
49
  end
49
50
 
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,206 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ const vitest_1 = require("vitest");
7
+ const ioredis_1 = __importDefault(require("ioredis"));
8
+ const producer_1 = require("./producer");
9
+ const worker_1 = require("./worker");
10
+ const metrics_1 = require("./metrics");
11
+ const uuid_1 = require("uuid");
12
+ const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
13
+ class TestWorker extends worker_1.Worker {
14
+ constructor(redis, groupName, streamName, concurrency = 1, blockTimeMs = 100) {
15
+ super(redis, groupName, streamName, concurrency, blockTimeMs);
16
+ this.processedCount = 0;
17
+ this.lastProcessedId = null;
18
+ this.shouldFail = false;
19
+ this.failCount = 0;
20
+ this.maxFails = 0;
21
+ }
22
+ async process(data) {
23
+ if (this.shouldFail) {
24
+ this.failCount++;
25
+ if (this.maxFails > 0 && this.failCount > this.maxFails) {
26
+ // Stop failing after maxFails
27
+ }
28
+ else {
29
+ throw new Error("Simulated Failure");
30
+ }
31
+ }
32
+ this.processedCount++;
33
+ if (data && data.id) {
34
+ this.lastProcessedId = data.id;
35
+ }
36
+ }
37
+ }
38
+ (0, vitest_1.describe)('Redis Queue Integration', () => {
39
+ let redis;
40
+ let producer;
41
+ let streamName;
42
+ let workers = [];
43
+ (0, vitest_1.beforeEach)(() => {
44
+ redis = new ioredis_1.default(REDIS_URL);
45
+ streamName = `test-queue-${(0, uuid_1.v7)()}`;
46
+ producer = new producer_1.Producer(redis, streamName);
47
+ workers = [];
48
+ });
49
+ (0, vitest_1.afterEach)(async () => {
50
+ for (const w of workers) {
51
+ w.stop();
52
+ }
53
+ await new Promise(r => setTimeout(r, 500));
54
+ // Cleanup Redis keys using the existing connection before closing
55
+ if (redis.status === 'ready') {
56
+ const keys = await redis.keys(`${streamName}*`);
57
+ if (keys.length)
58
+ await redis.del(...keys);
59
+ }
60
+ redis.disconnect();
61
+ });
62
+ const waitFor = async (condition, timeout = 5000) => {
63
+ const start = Date.now();
64
+ while (Date.now() - start < timeout) {
65
+ if (await condition())
66
+ return true;
67
+ await new Promise(r => setTimeout(r, 100));
68
+ }
69
+ return false;
70
+ };
71
+ (0, vitest_1.describe)('Core Functionality', () => {
72
+ (0, vitest_1.it)('Should deliver message to all target groups', async () => {
73
+ const w1 = new TestWorker(redis, 'group-A', streamName, 1, 100);
74
+ const w2 = new TestWorker(redis, 'group-B', streamName, 1, 100);
75
+ workers.push(w1, w2);
76
+ await w1.start();
77
+ await w2.start();
78
+ const id = await producer.push(JSON.stringify({ id: 'msg-1' }), ['group-A', 'group-B']);
79
+ await waitFor(() => w1.processedCount === 1 && w2.processedCount === 1);
80
+ (0, vitest_1.expect)(w1.processedCount).toBe(1);
81
+ (0, vitest_1.expect)(w2.processedCount).toBe(1);
82
+ // Test cleanup keys (but not explicit XDEL here yet, checking keys gone)
83
+ const statusKey = `${streamName}:status:${id}`;
84
+ const dataKey = `${streamName}:data:${id}`;
85
+ (0, vitest_1.expect)(await redis.exists(statusKey)).toBe(0);
86
+ (0, vitest_1.expect)(await redis.exists(dataKey)).toBe(0);
87
+ });
88
+ (0, vitest_1.it)('Should only deliver to targeted groups', async () => {
89
+ const wA = new TestWorker(redis, 'group-A', streamName, 1, 100);
90
+ const wB = new TestWorker(redis, 'group-B', streamName, 1, 100);
91
+ workers.push(wA, wB);
92
+ await wA.start();
93
+ await wB.start();
94
+ await producer.push(JSON.stringify({ id: 'msg-only-a' }), ['group-A']);
95
+ await waitFor(() => wA.processedCount === 1);
96
+ (0, vitest_1.expect)(wA.processedCount).toBe(1);
97
+ (0, vitest_1.expect)(wB.processedCount).toBe(0);
98
+ });
99
+ (0, vitest_1.it)('Should retry only the failed group', async () => {
100
+ const wOk = new TestWorker(redis, 'group-Ok', streamName, 1, 100);
101
+ const wFail = new TestWorker(redis, 'group-Fail', streamName, 1, 100);
102
+ wFail.shouldFail = true;
103
+ wFail.maxFails = 1; // Fail once, then succeed
104
+ workers.push(wOk, wFail);
105
+ await wOk.start();
106
+ await wFail.start();
107
+ await producer.push(JSON.stringify({ id: 'retry-test' }), ['group-Ok', 'group-Fail']);
108
+ // Wait for wOk to finish and wFail to try at least twice (fail + success)
109
+ await waitFor(() => wOk.processedCount === 1 && wFail.processedCount === 1, 8000);
110
+ (0, vitest_1.expect)(wOk.processedCount).toBe(1); // Processed once
111
+ (0, vitest_1.expect)(wFail.failCount).toBeGreaterThanOrEqual(1); // Failed at least once
112
+ (0, vitest_1.expect)(wFail.processedCount).toBe(1); // Eventually succeeded
113
+ (0, vitest_1.expect)(wOk.processedCount).toBe(1); // wOk should NOT process the retry
114
+ });
115
+ (0, vitest_1.it)('Should move to DLQ after max retries', async () => {
116
+ const wDead = new TestWorker(redis, 'group-Dead', streamName, 1, 100);
117
+ wDead.shouldFail = true;
118
+ wDead.maxFails = 10; // Fail forever (more than max retries which is 3)
119
+ workers.push(wDead);
120
+ await wDead.start();
121
+ const id = await producer.push(JSON.stringify({ id: 'dlq-test' }), ['group-Dead']);
122
+ await waitFor(async () => {
123
+ const len = await redis.xlen(`${streamName}:dlq`);
124
+ return len > 0;
125
+ }, 10000);
126
+ const dlqLen = await redis.xlen(`${streamName}:dlq`);
127
+ (0, vitest_1.expect)(dlqLen).toBe(1);
128
+ (0, vitest_1.expect)(await redis.exists(`${streamName}:status:${id}`)).toBe(0);
129
+ });
130
+ });
131
+ (0, vitest_1.describe)('Metrics & Monitoring', () => {
132
+ (0, vitest_1.it)('Should track throughput and queue size', async () => {
133
+ const w = new TestWorker(redis, 'group-Metrics', streamName, 1, 100);
134
+ const metricsService = new metrics_1.Metrics(redis, streamName);
135
+ workers.push(w);
136
+ await w.start();
137
+ let metrics = await metricsService.getMetrics(['group-Metrics']);
138
+ (0, vitest_1.expect)(metrics.dlqLength).toBe(0);
139
+ const id = await producer.push(JSON.stringify({ id: 'metrics-1' }), ['group-Metrics']);
140
+ await waitFor(() => w.processedCount === 1);
141
+ metrics = await metricsService.getMetrics(['group-Metrics']);
142
+ (0, vitest_1.expect)(metrics.throughput['group-Metrics']).toBeGreaterThanOrEqual(1);
143
+ w.shouldFail = true;
144
+ w.maxFails = 10;
145
+ await producer.push(JSON.stringify({ id: 'metrics-fail' }), ['group-Metrics']);
146
+ await waitFor(() => redis.xlen(`${streamName}:dlq`).then(len => len > 0));
147
+ metrics = await metricsService.getMetrics(['group-Metrics']);
148
+ (0, vitest_1.expect)(metrics.dlqLength).toBe(1);
149
+ });
150
+ (0, vitest_1.it)('Should export Prometheus metrics', async () => {
151
+ const w = new TestWorker(redis, 'group-Prom', streamName, 1, 100);
152
+ const metricsService = new metrics_1.Metrics(redis, streamName);
153
+ workers.push(w);
154
+ await w.start();
155
+ await producer.push(JSON.stringify({ id: 'prom-1' }), ['group-Prom']);
156
+ await producer.push(JSON.stringify({ id: 'prom-2' }), ['group-Prom']);
157
+ await waitFor(() => w.processedCount === 2);
158
+ const promOutput = await metricsService.getPrometheusMetrics(['group-Prom']);
159
+ (0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_throughput_1m gauge`);
160
+ (0, vitest_1.expect)(promOutput).toContain(`redis_highway_queue_throughput_1m{stream="${streamName}", group="group-Prom"} 2`);
161
+ (0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_jobs_total counter`);
162
+ (0, vitest_1.expect)(promOutput).toContain(`redis_highway_queue_jobs_total{stream="${streamName}", group="group-Prom"} 2`);
163
+ (0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_waiting_jobs gauge`);
164
+ });
165
+ });
166
+ (0, vitest_1.describe)('Stream Cleanup', () => {
167
+ (0, vitest_1.it)('Should delete message from stream after processing', async () => {
168
+ const w1 = new TestWorker(redis, 'group-A', streamName, 1, 100);
169
+ workers.push(w1);
170
+ await w1.start();
171
+ const id = await producer.push(JSON.stringify({ id: 'msg-cleanup' }), ['group-A']);
172
+ // Wait for processing
173
+ await waitFor(() => w1.processedCount === 1);
174
+ (0, vitest_1.expect)(w1.processedCount).toBe(1);
175
+ // Wait for stream to be empty
176
+ const success = await waitFor(async () => {
177
+ const len = await redis.xlen(streamName);
178
+ return len === 0;
179
+ });
180
+ (0, vitest_1.expect)(success).toBe(true);
181
+ const messages = await redis.xrange(streamName, '-', '+');
182
+ (0, vitest_1.expect)(messages.length).toBe(0);
183
+ });
184
+ (0, vitest_1.it)('Should delete message from stream only after ALL groups processed it', async () => {
185
+ const w1 = new TestWorker(redis, 'group-A', streamName, 1, 100);
186
+ const w2 = new TestWorker(redis, 'group-B', streamName, 1, 100);
187
+ workers.push(w1, w2);
188
+ await w1.start(); // Only start w1
189
+ const id = await producer.push(JSON.stringify({ id: 'msg-multi' }), ['group-A', 'group-B']);
190
+ // Wait for w1 to process
191
+ await waitFor(() => w1.processedCount === 1);
192
+ let len = await redis.xlen(streamName);
193
+ (0, vitest_1.expect)(len).toBe(1); // Should still exist because group-B pending
194
+ // Start w2
195
+ await w2.start();
196
+ // Wait for w2 to process
197
+ await waitFor(() => w2.processedCount === 1);
198
+ // Wait for stream to be empty
199
+ const success = await waitFor(async () => {
200
+ len = await redis.xlen(streamName);
201
+ return len === 0;
202
+ });
203
+ (0, vitest_1.expect)(success).toBe(true);
204
+ });
205
+ });
206
+ });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@koala42/redis-highway",
3
- "version": "0.1.2",
3
+ "version": "0.1.4",
4
4
  "description": "High performance redis queue",
5
5
  "license": "MIT",
6
6
  "author": {
@@ -16,8 +16,10 @@
16
16
  ],
17
17
  "scripts": {
18
18
  "clean": "rimraf dist",
19
+ "test": "vitest run src/queue.spec.ts",
19
20
  "build": "npm run clean && tsc",
20
- "prepublish": "npm run build"
21
+ "prepublish": "npm run build",
22
+ "publish": "npm run test && npm run clean && npm run build && npm publish --public"
21
23
  },
22
24
  "keywords": [
23
25
  "redis",