@koala42/redis-highway 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +63 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +21 -0
- package/dist/interfaces.d.ts +2 -0
- package/dist/interfaces.js +2 -0
- package/dist/keys.d.ts +10 -0
- package/dist/keys.js +28 -0
- package/dist/lua.d.ts +1 -0
- package/dist/lua.js +51 -0
- package/dist/metrics.d.ts +24 -0
- package/dist/metrics.js +88 -0
- package/dist/producer.d.ts +19 -0
- package/dist/producer.js +43 -0
- package/dist/worker.d.ts +27 -0
- package/dist/worker.js +145 -0
- package/package.json +38 -0
package/README.md
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# @koala42/redis-highway
|
|
2
|
+
|
|
3
|
+
High performance Redis stream-based queue for Node.js.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @koala42/redis-highway
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
### Producer
|
|
14
|
+
|
|
15
|
+
```typescript
|
|
16
|
+
import { Redis } from 'ioredis';
|
|
17
|
+
import { Producer } from '@koala42/redis-highway';
|
|
18
|
+
|
|
19
|
+
const redis = new Redis();
|
|
20
|
+
const producer = new Producer(redis, 'my-stream');
|
|
21
|
+
|
|
22
|
+
// Send job
|
|
23
|
+
await producer.push(
|
|
24
|
+
JSON.stringify({ hello: 'world' }),
|
|
25
|
+
['group-A', 'group-B'] // Target specific consumer groups
|
|
26
|
+
);
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
### Worker
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
import { Redis } from 'ioredis';
|
|
33
|
+
import { Worker } from '@koala42/redis-highway';
|
|
34
|
+
|
|
35
|
+
class MyWorker extends Worker {
|
|
36
|
+
async process(data: any) {
|
|
37
|
+
console.log('Processing:', data);
|
|
38
|
+
// throw new Error('fail'); // Automatic retry/DLQ logic
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const redis = new Redis();
|
|
43
|
+
const worker = new MyWorker(redis, 'group-A', 'my-stream');
|
|
44
|
+
|
|
45
|
+
await worker.start();
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
### Metrics
|
|
49
|
+
|
|
50
|
+
```typescript
|
|
51
|
+
import { Metrics } from '@koala42/redis-highway';
|
|
52
|
+
|
|
53
|
+
const metrics = new Metrics(redis, 'my-stream');
|
|
54
|
+
|
|
55
|
+
// Prometheus format
|
|
56
|
+
const payload = await metrics.getPrometheusMetrics(['group-A']);
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Features
|
|
60
|
+
|
|
61
|
+
- **Granular Retries**: If one group fails, only that group retries.
|
|
62
|
+
- **DLQ**: Dead Letter Queue support after max retries.
|
|
63
|
+
- **Metrics**: Throughput, Waiting, DLQ, Prometheus export.
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./producer"), exports);
|
|
18
|
+
__exportStar(require("./worker"), exports);
|
|
19
|
+
__exportStar(require("./metrics"), exports);
|
|
20
|
+
__exportStar(require("./keys"), exports);
|
|
21
|
+
__exportStar(require("./interfaces"), exports);
|
package/dist/keys.d.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
export declare class KeyManager {
|
|
2
|
+
private readonly streamName;
|
|
3
|
+
constructor(streamName: string);
|
|
4
|
+
getStreamKey(): string;
|
|
5
|
+
getJobStatusKey(id: string): string;
|
|
6
|
+
getJobDataKey(id: string): string;
|
|
7
|
+
getDlqStreamKey(): string;
|
|
8
|
+
getThroughputKey(groupName: string, timestamp: number): string;
|
|
9
|
+
getTotalKey(groupName: string): string;
|
|
10
|
+
}
|
package/dist/keys.js
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.KeyManager = void 0;
|
|
4
|
+
class KeyManager {
|
|
5
|
+
constructor(streamName) {
|
|
6
|
+
this.streamName = streamName;
|
|
7
|
+
}
|
|
8
|
+
getStreamKey() {
|
|
9
|
+
return this.streamName;
|
|
10
|
+
}
|
|
11
|
+
getJobStatusKey(id) {
|
|
12
|
+
return `${this.streamName}:status:${id}`;
|
|
13
|
+
}
|
|
14
|
+
getJobDataKey(id) {
|
|
15
|
+
return `${this.streamName}:data:${id}`;
|
|
16
|
+
}
|
|
17
|
+
getDlqStreamKey() {
|
|
18
|
+
return `${this.streamName}:dlq`;
|
|
19
|
+
}
|
|
20
|
+
getThroughputKey(groupName, timestamp) {
|
|
21
|
+
const minute = Math.floor(timestamp / 60000) * 60000;
|
|
22
|
+
return `metrics:throughput:${this.streamName}:${groupName}:${minute}`;
|
|
23
|
+
}
|
|
24
|
+
getTotalKey(groupName) {
|
|
25
|
+
return `metrics:total:${this.streamName}:${groupName}`;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
exports.KeyManager = KeyManager;
|
package/dist/lua.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const LUA_MARK_DONE = "\n-- KEYS[1] = status key status key for jog\n-- KEYS[2] = data key for job\n-- KEYS[3] = stream key\n-- KEYS[4] = group name\n-- KEYS[5] = metrics key\n-- KEYS[6] = total metrics key(persistent)\n\n-- ARGV[1] = route name\n-- ARGV[2] = timestamp\n-- ARGV[3] = msgId - redis stream item ID\n\n-- 1. Ack the stream message\nredis.call('XACK', KEYS[3], KEYS[4], ARGV[3])\n\n-- 2. in status key mark the current route as done by saving timestamp\nredis.call('HSET', KEYS[1], ARGV[1], ARGV[2])\n\n-- 3. Increment throughput metric\nif KEYS[5] then\n redis.call('INCR', KEYS[5])\n redis.call('EXPIRE', KEYS[5], 86400)\nend\n\n-- 3.1 Increment Total Metric\nif KEYS[6] then\n redis.call('INCR', KEYS[6])\nend\n\n-- 4. Check for completed routes\nlocal current_fields = redis.call('HLEN', KEYS[1])\n\n-- 5. Get the target completed routes\nlocal target_str = redis.call('HGET', KEYS[1], '__target')\nlocal target = tonumber(target_str)\n\nif not target then\n return 0\nend\n\n-- 6. If completed routes is status hash length - 1 -> all were done and we can cleanup\nif current_fields >= (target + 1) then\n redis.call('DEL', KEYS[1], KEYS[2])\n return 1 -- Cleanup, DONE\nend\n\nreturn 0 -- Some routes are not done yet\n";
|
package/dist/lua.js
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.LUA_MARK_DONE = void 0;
|
|
4
|
+
exports.LUA_MARK_DONE = `
|
|
5
|
+
-- KEYS[1] = status key status key for jog
|
|
6
|
+
-- KEYS[2] = data key for job
|
|
7
|
+
-- KEYS[3] = stream key
|
|
8
|
+
-- KEYS[4] = group name
|
|
9
|
+
-- KEYS[5] = metrics key
|
|
10
|
+
-- KEYS[6] = total metrics key(persistent)
|
|
11
|
+
|
|
12
|
+
-- ARGV[1] = route name
|
|
13
|
+
-- ARGV[2] = timestamp
|
|
14
|
+
-- ARGV[3] = msgId - redis stream item ID
|
|
15
|
+
|
|
16
|
+
-- 1. Ack the stream message
|
|
17
|
+
redis.call('XACK', KEYS[3], KEYS[4], ARGV[3])
|
|
18
|
+
|
|
19
|
+
-- 2. in status key mark the current route as done by saving timestamp
|
|
20
|
+
redis.call('HSET', KEYS[1], ARGV[1], ARGV[2])
|
|
21
|
+
|
|
22
|
+
-- 3. Increment throughput metric
|
|
23
|
+
if KEYS[5] then
|
|
24
|
+
redis.call('INCR', KEYS[5])
|
|
25
|
+
redis.call('EXPIRE', KEYS[5], 86400)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
-- 3.1 Increment Total Metric
|
|
29
|
+
if KEYS[6] then
|
|
30
|
+
redis.call('INCR', KEYS[6])
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
-- 4. Check for completed routes
|
|
34
|
+
local current_fields = redis.call('HLEN', KEYS[1])
|
|
35
|
+
|
|
36
|
+
-- 5. Get the target completed routes
|
|
37
|
+
local target_str = redis.call('HGET', KEYS[1], '__target')
|
|
38
|
+
local target = tonumber(target_str)
|
|
39
|
+
|
|
40
|
+
if not target then
|
|
41
|
+
return 0
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
-- 6. If completed routes is status hash length - 1 -> all were done and we can cleanup
|
|
45
|
+
if current_fields >= (target + 1) then
|
|
46
|
+
redis.call('DEL', KEYS[1], KEYS[2])
|
|
47
|
+
return 1 -- Cleanup, DONE
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
return 0 -- Some routes are not done yet
|
|
51
|
+
`;
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { Redis } from 'ioredis';
|
|
2
|
+
export interface QueueMetrics {
|
|
3
|
+
streamLength: number;
|
|
4
|
+
dlqLength: number;
|
|
5
|
+
throughput: Record<string, number>;
|
|
6
|
+
}
|
|
7
|
+
export declare class Metrics {
|
|
8
|
+
private readonly redis;
|
|
9
|
+
private readonly streamName;
|
|
10
|
+
private keys;
|
|
11
|
+
constructor(redis: Redis, streamName: string);
|
|
12
|
+
/**
|
|
13
|
+
* Get current metrics for the queue
|
|
14
|
+
* @param groupNames - List of consumer groups to fetch throughput for
|
|
15
|
+
*/
|
|
16
|
+
getMetrics(groupNames: string[]): Promise<QueueMetrics>;
|
|
17
|
+
/**
|
|
18
|
+
* Get prometheus compatible metrics
|
|
19
|
+
* @param groupNames target group names for throughput metrics
|
|
20
|
+
* @param prefix - export prefix
|
|
21
|
+
* @returns metrics as string
|
|
22
|
+
*/
|
|
23
|
+
getPrometheusMetrics(groupNames: string[], prefix?: string): Promise<string>;
|
|
24
|
+
}
|
package/dist/metrics.js
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Metrics = void 0;
|
|
4
|
+
const keys_1 = require("./keys");
|
|
5
|
+
class Metrics {
|
|
6
|
+
constructor(redis, streamName) {
|
|
7
|
+
this.redis = redis;
|
|
8
|
+
this.streamName = streamName;
|
|
9
|
+
this.keys = new keys_1.KeyManager(streamName);
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Get current metrics for the queue
|
|
13
|
+
* @param groupNames - List of consumer groups to fetch throughput for
|
|
14
|
+
*/
|
|
15
|
+
async getMetrics(groupNames) {
|
|
16
|
+
const pipeline = this.redis.pipeline();
|
|
17
|
+
// 1. Stream Length
|
|
18
|
+
pipeline.xlen(this.streamName);
|
|
19
|
+
// 2. DLQ Length
|
|
20
|
+
pipeline.xlen(this.keys.getDlqStreamKey());
|
|
21
|
+
// 3. Throughput keys for current minute
|
|
22
|
+
const timestamp = Date.now();
|
|
23
|
+
groupNames.forEach(group => {
|
|
24
|
+
pipeline.get(this.keys.getThroughputKey(group, timestamp));
|
|
25
|
+
});
|
|
26
|
+
const results = await pipeline.exec();
|
|
27
|
+
if (!results) {
|
|
28
|
+
throw new Error("Pipeline execution failed");
|
|
29
|
+
}
|
|
30
|
+
// Helper to safely extract result
|
|
31
|
+
const getResult = (index) => {
|
|
32
|
+
const [err, res] = results[index];
|
|
33
|
+
if (err)
|
|
34
|
+
throw err;
|
|
35
|
+
return res;
|
|
36
|
+
};
|
|
37
|
+
const streamLength = getResult(0);
|
|
38
|
+
const dlqLength = getResult(1);
|
|
39
|
+
const throughput = {};
|
|
40
|
+
groupNames.forEach((group, index) => {
|
|
41
|
+
// Offset by 2 because first two are xlen calls
|
|
42
|
+
const val = getResult(index + 2);
|
|
43
|
+
throughput[group] = parseInt(val || '0', 10);
|
|
44
|
+
});
|
|
45
|
+
return {
|
|
46
|
+
streamLength,
|
|
47
|
+
dlqLength,
|
|
48
|
+
throughput
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Get prometheus compatible metrics
|
|
53
|
+
* @param groupNames target group names for throughput metrics
|
|
54
|
+
* @param prefix - export prefix
|
|
55
|
+
* @returns metrics as string
|
|
56
|
+
*/
|
|
57
|
+
async getPrometheusMetrics(groupNames, prefix = 'redis_highway_queue') {
|
|
58
|
+
const pipeline = this.redis.pipeline();
|
|
59
|
+
pipeline.xlen(this.streamName);
|
|
60
|
+
pipeline.xlen(this.keys.getDlqStreamKey());
|
|
61
|
+
const timestamp = Date.now();
|
|
62
|
+
groupNames.forEach(group => {
|
|
63
|
+
pipeline.get(this.keys.getThroughputKey(group, timestamp));
|
|
64
|
+
pipeline.get(this.keys.getTotalKey(group));
|
|
65
|
+
});
|
|
66
|
+
const results = await pipeline.exec();
|
|
67
|
+
if (!results)
|
|
68
|
+
throw new Error("Pipeline execution failed");
|
|
69
|
+
const getResult = (index) => {
|
|
70
|
+
const [err, res] = results[index];
|
|
71
|
+
if (err)
|
|
72
|
+
throw err;
|
|
73
|
+
return res;
|
|
74
|
+
};
|
|
75
|
+
const streamLength = getResult(0);
|
|
76
|
+
const dlqLength = getResult(1);
|
|
77
|
+
const response = [];
|
|
78
|
+
response.push(`# HELP ${prefix}_waiting_jobs Total jobs waiting in stream`, `# TYPE ${prefix}_waiting_jobs gauge`, `${prefix}_waiting_jobs{stream="${this.streamName}"} ${streamLength}`, `# HELP ${prefix}_dlq_jobs Total jobs in DLQ`, `# TYPE ${prefix}_dlq_jobs gauge`, `${prefix}_dlq_jobs{stream="${this.streamName}"} ${dlqLength}`);
|
|
79
|
+
groupNames.forEach((group, index) => {
|
|
80
|
+
const baseIndex = 2 + (index * 2);
|
|
81
|
+
const throughputVal = parseInt(getResult(baseIndex) || '0', 10);
|
|
82
|
+
const totalVal = parseInt(getResult(baseIndex + 1) || '0', 10);
|
|
83
|
+
response.push(`# HELP ${prefix}_throughput_1m Jobs processed in the last minute`, `# TYPE ${prefix}_throughput_1m gauge`, `${prefix}_throughput_1m{stream="${this.streamName}", group="${group}"} ${throughputVal}`, `# HELP ${prefix}_jobs_total Total jobs processed`, `# TYPE ${prefix}_jobs_total counter`, `${prefix}_jobs_total{stream="${this.streamName}", group="${group}"} ${totalVal}`);
|
|
84
|
+
});
|
|
85
|
+
return response.join('\n');
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
exports.Metrics = Metrics;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { Redis } from 'ioredis';
|
|
2
|
+
export interface JobOptions {
|
|
3
|
+
ttl?: number | null;
|
|
4
|
+
streamName?: string;
|
|
5
|
+
}
|
|
6
|
+
export declare class Producer {
|
|
7
|
+
private readonly redis;
|
|
8
|
+
private readonly streamName;
|
|
9
|
+
private keys;
|
|
10
|
+
constructor(redis: Redis, streamName: string);
|
|
11
|
+
/**
|
|
12
|
+
* Push message to queue
|
|
13
|
+
* @param payload - serialized string payload
|
|
14
|
+
* @param targetGroups - target consumers
|
|
15
|
+
* @param opts - Job options
|
|
16
|
+
* @returns Created job ID (uuidv7)
|
|
17
|
+
*/
|
|
18
|
+
push(payload: string, targetGroups: string[], opts?: JobOptions): Promise<string>;
|
|
19
|
+
}
|
package/dist/producer.js
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Producer = void 0;
|
|
4
|
+
const uuid_1 = require("uuid");
|
|
5
|
+
const keys_1 = require("./keys");
|
|
6
|
+
class Producer {
|
|
7
|
+
constructor(redis, streamName) {
|
|
8
|
+
this.redis = redis;
|
|
9
|
+
this.streamName = streamName;
|
|
10
|
+
this.keys = new keys_1.KeyManager(streamName);
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Push message to queue
|
|
14
|
+
* @param payload - serialized string payload
|
|
15
|
+
* @param targetGroups - target consumers
|
|
16
|
+
* @param opts - Job options
|
|
17
|
+
* @returns Created job ID (uuidv7)
|
|
18
|
+
*/
|
|
19
|
+
async push(payload, targetGroups, opts) {
|
|
20
|
+
const id = (0, uuid_1.v7)();
|
|
21
|
+
const ttl = opts?.ttl || null; // 24 hours in seconds
|
|
22
|
+
const pipeline = this.redis.pipeline();
|
|
23
|
+
const dataKey = this.keys.getJobDataKey(id);
|
|
24
|
+
const statusKey = this.keys.getJobStatusKey(id);
|
|
25
|
+
// Create job data
|
|
26
|
+
if (ttl) {
|
|
27
|
+
pipeline.set(dataKey, payload, 'EX', ttl);
|
|
28
|
+
}
|
|
29
|
+
else {
|
|
30
|
+
pipeline.set(dataKey, payload);
|
|
31
|
+
}
|
|
32
|
+
// Initialize job metadata - status
|
|
33
|
+
pipeline.hset(statusKey, '__target', targetGroups.length);
|
|
34
|
+
if (ttl) {
|
|
35
|
+
pipeline.expire(statusKey, ttl);
|
|
36
|
+
}
|
|
37
|
+
// Push message to stream
|
|
38
|
+
pipeline.xadd(this.streamName, '*', 'id', id, 'target', targetGroups.join(','));
|
|
39
|
+
await pipeline.exec();
|
|
40
|
+
return id;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
exports.Producer = Producer;
|
package/dist/worker.d.ts
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import Redis from "ioredis";
|
|
2
|
+
export declare abstract class Worker<T = any> {
|
|
3
|
+
protected redis: Redis;
|
|
4
|
+
protected groupName: string;
|
|
5
|
+
protected streamName: string;
|
|
6
|
+
protected concurrency: number;
|
|
7
|
+
protected blockTimeMs: number;
|
|
8
|
+
private isRunning;
|
|
9
|
+
private activeCount;
|
|
10
|
+
private readonly events;
|
|
11
|
+
private keys;
|
|
12
|
+
private readonly MAX_RETRIES;
|
|
13
|
+
constructor(redis: Redis, groupName: string, streamName: string, concurrency?: number, blockTimeMs?: number);
|
|
14
|
+
/**
|
|
15
|
+
* Start worker
|
|
16
|
+
* @returns
|
|
17
|
+
*/
|
|
18
|
+
start(): Promise<void>;
|
|
19
|
+
stop(): void;
|
|
20
|
+
private fetchLoop;
|
|
21
|
+
private spawnWorker;
|
|
22
|
+
private processInternal;
|
|
23
|
+
private handleFailure;
|
|
24
|
+
private finalize;
|
|
25
|
+
private consumerName;
|
|
26
|
+
abstract process(data: T): Promise<void>;
|
|
27
|
+
}
|
package/dist/worker.js
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Worker = void 0;
|
|
4
|
+
const events_1 = require("events");
|
|
5
|
+
const lua_1 = require("./lua");
|
|
6
|
+
const keys_1 = require("./keys");
|
|
7
|
+
class Worker {
|
|
8
|
+
constructor(redis, groupName, streamName, concurrency = 1, blockTimeMs = 2000) {
|
|
9
|
+
this.redis = redis;
|
|
10
|
+
this.groupName = groupName;
|
|
11
|
+
this.streamName = streamName;
|
|
12
|
+
this.concurrency = concurrency;
|
|
13
|
+
this.blockTimeMs = blockTimeMs;
|
|
14
|
+
this.isRunning = false;
|
|
15
|
+
this.activeCount = 0;
|
|
16
|
+
this.events = new events_1.EventEmitter();
|
|
17
|
+
this.MAX_RETRIES = 3;
|
|
18
|
+
this.events.setMaxListeners(100);
|
|
19
|
+
this.keys = new keys_1.KeyManager(streamName);
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Start worker
|
|
23
|
+
* @returns
|
|
24
|
+
*/
|
|
25
|
+
async start() {
|
|
26
|
+
if (this.isRunning) {
|
|
27
|
+
return;
|
|
28
|
+
}
|
|
29
|
+
this.isRunning = true;
|
|
30
|
+
try {
|
|
31
|
+
await this.redis.xgroup('CREATE', this.streamName, this.groupName, '0', 'MKSTREAM');
|
|
32
|
+
}
|
|
33
|
+
catch (e) {
|
|
34
|
+
if (!e.message.includes('BUSYGROUP')) {
|
|
35
|
+
throw e;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
this.fetchLoop();
|
|
39
|
+
}
|
|
40
|
+
stop() {
|
|
41
|
+
this.isRunning = false;
|
|
42
|
+
}
|
|
43
|
+
async fetchLoop() {
|
|
44
|
+
while (this.isRunning) {
|
|
45
|
+
const freeSlots = this.concurrency - this.activeCount;
|
|
46
|
+
if (freeSlots <= 0) {
|
|
47
|
+
await new Promise((resolve) => this.events.once('job_finished', resolve));
|
|
48
|
+
continue;
|
|
49
|
+
}
|
|
50
|
+
try {
|
|
51
|
+
const results = await this.redis.xreadgroup('GROUP', this.groupName, this.consumerName(), 'COUNT', freeSlots, 'BLOCK', this.blockTimeMs, 'STREAMS', this.streamName, '>');
|
|
52
|
+
if (results) {
|
|
53
|
+
const messages = results[0][1];
|
|
54
|
+
for (const msg of messages) {
|
|
55
|
+
this.spawnWorker(msg);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
catch (err) {
|
|
60
|
+
console.error(`[${this.groupName}] Fetch Error:`, err);
|
|
61
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
spawnWorker(msg) {
|
|
66
|
+
this.activeCount++;
|
|
67
|
+
this.processInternal(msg).finally(() => {
|
|
68
|
+
this.activeCount--;
|
|
69
|
+
this.events.emit('job_finished');
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
async processInternal(msg) {
|
|
73
|
+
const msgId = msg[0];
|
|
74
|
+
const messageFields = msg[1];
|
|
75
|
+
const fields = {};
|
|
76
|
+
for (let i = 0; i < messageFields.length; i += 2) {
|
|
77
|
+
fields[messageFields[i]] = messageFields[i + 1];
|
|
78
|
+
}
|
|
79
|
+
const messageUuid = fields['id'];
|
|
80
|
+
const routes = fields['target'];
|
|
81
|
+
const retryCount = parseInt(fields['retryCount'] || '0', 10);
|
|
82
|
+
if (!routes.includes(this.groupName)) {
|
|
83
|
+
await this.redis.xack(this.streamName, this.groupName, msgId);
|
|
84
|
+
return;
|
|
85
|
+
}
|
|
86
|
+
try {
|
|
87
|
+
const dataKey = this.keys.getJobDataKey(messageUuid);
|
|
88
|
+
const payload = await this.redis.get(dataKey);
|
|
89
|
+
if (!payload) {
|
|
90
|
+
// Data missing or expired
|
|
91
|
+
await this.finalize(messageUuid, msgId);
|
|
92
|
+
return;
|
|
93
|
+
}
|
|
94
|
+
await this.process(JSON.parse(payload));
|
|
95
|
+
await this.finalize(messageUuid, msgId);
|
|
96
|
+
}
|
|
97
|
+
catch (err) {
|
|
98
|
+
console.error(`[${this.groupName}] Job failed ${messageUuid}`, err);
|
|
99
|
+
await this.handleFailure(messageUuid, msgId, retryCount, err.message);
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
async handleFailure(uuid, msgId, currentRetries, errorMsg) {
|
|
103
|
+
// 1. ACK the failed message (we are done with THIS stream entry)
|
|
104
|
+
await this.redis.xack(this.streamName, this.groupName, msgId);
|
|
105
|
+
if (currentRetries < this.MAX_RETRIES) {
|
|
106
|
+
// 2a. RETRY: Re-queue to main stream targeting ONLY this group
|
|
107
|
+
console.log(`[${this.groupName}] Retrying job ${uuid} (Attempt ${currentRetries + 1}/${this.MAX_RETRIES})`);
|
|
108
|
+
const pipeline = this.redis.pipeline();
|
|
109
|
+
// Refresh TTL to ensure data persists through retries (e.g., +1 hour)
|
|
110
|
+
pipeline.expire(this.keys.getJobDataKey(uuid), 3600);
|
|
111
|
+
pipeline.expire(this.keys.getJobStatusKey(uuid), 3600);
|
|
112
|
+
pipeline.xadd(this.streamName, '*', 'id', uuid, 'target', this.groupName, // Only target THIS group
|
|
113
|
+
'retryCount', currentRetries + 1);
|
|
114
|
+
await pipeline.exec();
|
|
115
|
+
}
|
|
116
|
+
else {
|
|
117
|
+
// 2b. DEAD LETTER QUEUE (DLQ)
|
|
118
|
+
console.error(`[${this.groupName}] Job ${uuid} exhausted retries. Moving to DLQ.`);
|
|
119
|
+
const dlqStream = `${this.streamName}:dlq`;
|
|
120
|
+
// We need the payload to store in DLQ permanently?
|
|
121
|
+
// Or just reference the key? Creating a self-contained DLQ entry is safer.
|
|
122
|
+
const payload = await this.redis.get(this.keys.getJobDataKey(uuid));
|
|
123
|
+
await this.redis.xadd(dlqStream, '*', 'id', uuid, 'group', this.groupName, 'error', errorMsg, 'payload', payload || 'MISSING', 'failedAt', Date.now());
|
|
124
|
+
// We still run finalize to clean up atomic counters/status
|
|
125
|
+
// (Treat it as "done" for the main flow, even though it failed)
|
|
126
|
+
await this.finalize(uuid, msgId, true); // true = skip Lua DEL? No, finalize deletes keys if everyone is done.
|
|
127
|
+
// Actually, if we move to DLQ, we should probably let finalize clean up the hot keys.
|
|
128
|
+
// The DLQ entry contains the payload copy, so losing the hot key is fine.
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
async finalize(messageUuid, msgId, fromError = false) {
|
|
132
|
+
// If we call finalize from error handler, we already ACKed inside handleFailure.
|
|
133
|
+
// But the Lua script does ACK too. It is idempotent so it's fine.
|
|
134
|
+
const timestamp = Date.now();
|
|
135
|
+
const statusKey = this.keys.getJobStatusKey(messageUuid);
|
|
136
|
+
const dataKey = this.keys.getJobDataKey(messageUuid);
|
|
137
|
+
const throughputKey = this.keys.getThroughputKey(this.groupName, timestamp);
|
|
138
|
+
const totalKey = this.keys.getTotalKey(this.groupName);
|
|
139
|
+
await this.redis.eval(lua_1.LUA_MARK_DONE, 6, statusKey, dataKey, this.streamName, this.groupName, throughputKey, totalKey, this.groupName, timestamp, msgId);
|
|
140
|
+
}
|
|
141
|
+
consumerName() {
|
|
142
|
+
return `${this.groupName}-${process.pid}`;
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
exports.Worker = Worker;
|
package/package.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@koala42/redis-highway",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "High performance redis queue",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"author": {
|
|
7
|
+
"email": "stranavadavid@protonmail.com",
|
|
8
|
+
"name": "David Stranava",
|
|
9
|
+
"url": "https://github.com/stranavad"
|
|
10
|
+
},
|
|
11
|
+
"type": "commonjs",
|
|
12
|
+
"main": "dist/index.js",
|
|
13
|
+
"types": "dist/index.d.ts",
|
|
14
|
+
"files": [
|
|
15
|
+
"dist"
|
|
16
|
+
],
|
|
17
|
+
"scripts": {
|
|
18
|
+
"clean": "rimraf dist",
|
|
19
|
+
"build": "npm run clean && tsc",
|
|
20
|
+
"prepublish": "npm run build"
|
|
21
|
+
},
|
|
22
|
+
"keywords": [
|
|
23
|
+
"redis",
|
|
24
|
+
"queue",
|
|
25
|
+
"valkey",
|
|
26
|
+
"high-throughput"
|
|
27
|
+
],
|
|
28
|
+
"dependencies": {
|
|
29
|
+
"ioredis": "^5.9.1",
|
|
30
|
+
"uuid": "^13.0.0"
|
|
31
|
+
},
|
|
32
|
+
"devDependencies": {
|
|
33
|
+
"@types/node": "^25.0.6",
|
|
34
|
+
"rimraf": "^6.1.2",
|
|
35
|
+
"typescript": "^5.9.3",
|
|
36
|
+
"vitest": "^4.0.16"
|
|
37
|
+
}
|
|
38
|
+
}
|