@koala42/redis-highway 0.1.11 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/base-worker.d.ts +64 -0
- package/dist/src/base-worker.js +180 -0
- package/dist/src/batch-worker.d.ts +16 -0
- package/dist/src/batch-worker.js +89 -0
- package/dist/{index.d.ts → src/index.d.ts} +1 -0
- package/dist/{index.js → src/index.js} +1 -0
- package/dist/src/interfaces.d.ts +19 -0
- package/dist/src/interfaces.js +10 -0
- package/dist/src/lua.d.ts +1 -0
- package/dist/src/lua.js +31 -0
- package/dist/{stream-message-entity.d.ts → src/stream-message-entity.d.ts} +2 -0
- package/dist/{stream-message-entity.js → src/stream-message-entity.js} +4 -0
- package/dist/src/worker.d.ts +24 -0
- package/dist/src/worker.js +72 -0
- package/dist/{queue.spec.js → test/queue.spec.js} +20 -4
- package/package.json +4 -5
- package/dist/batch-worker.d.ts +0 -40
- package/dist/batch-worker.js +0 -246
- package/dist/batch-worker.spec.d.ts +0 -1
- package/dist/batch-worker.spec.js +0 -124
- package/dist/interfaces.d.ts +0 -2
- package/dist/interfaces.js +0 -2
- package/dist/lua.d.ts +0 -2
- package/dist/lua.js +0 -77
- package/dist/worker.d.ts +0 -32
- package/dist/worker.js +0 -183
- /package/dist/{keys.d.ts → src/keys.d.ts} +0 -0
- /package/dist/{keys.js → src/keys.js} +0 -0
- /package/dist/{metrics.d.ts → src/metrics.d.ts} +0 -0
- /package/dist/{metrics.js → src/metrics.js} +0 -0
- /package/dist/{producer.d.ts → src/producer.d.ts} +0 -0
- /package/dist/{producer.js → src/producer.js} +0 -0
- /package/dist/{queue.spec.d.ts → test/queue.spec.d.ts} +0 -0
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { EventEmitter } from "events";
|
|
2
|
+
import { KeyManager } from "./keys";
|
|
3
|
+
import Redis from "ioredis";
|
|
4
|
+
import { BaseWorkerControlOptions, BaseWorkerOptions, StreamMessage, XReadGroupResponse } from "./interfaces";
|
|
5
|
+
import { StreamMessageEntity } from "./stream-message-entity";
|
|
6
|
+
export declare abstract class BaseWorker<T extends Record<string, unknown>> {
|
|
7
|
+
protected redis: Redis;
|
|
8
|
+
protected _isRunning: boolean;
|
|
9
|
+
protected _activeCount: number;
|
|
10
|
+
protected readonly _consumerName: string;
|
|
11
|
+
protected readonly _events: EventEmitter<any>;
|
|
12
|
+
protected readonly _keys: KeyManager;
|
|
13
|
+
protected readonly _consumerId: string;
|
|
14
|
+
protected _blockingRedis: Redis;
|
|
15
|
+
protected readonly _groupName: string;
|
|
16
|
+
protected readonly _streamName: string;
|
|
17
|
+
protected readonly _concurrency: number;
|
|
18
|
+
protected readonly _maxRetries: number;
|
|
19
|
+
protected readonly _blockTimeMs: number;
|
|
20
|
+
protected readonly _claimIntervalMs: number;
|
|
21
|
+
protected readonly _minIdleTimeMs: number;
|
|
22
|
+
protected readonly _collectMetrics: boolean;
|
|
23
|
+
constructor(redis: Redis, options: BaseWorkerOptions, controlOptions: BaseWorkerControlOptions);
|
|
24
|
+
/**
|
|
25
|
+
* Start the worker process
|
|
26
|
+
* Starts fetch loop and auto claim loop
|
|
27
|
+
*/
|
|
28
|
+
protected start(): Promise<void>;
|
|
29
|
+
/**
|
|
30
|
+
* Gracefully stops the worker
|
|
31
|
+
* Waits for any running jobs
|
|
32
|
+
*/
|
|
33
|
+
protected stop(): Promise<void>;
|
|
34
|
+
/**
|
|
35
|
+
* Auto claim loop
|
|
36
|
+
* Checks which messages are read but not acked for longer than minIdleTimeMs (PEL)
|
|
37
|
+
* Acks them and based on retry policy either enqueues them again or moves to DLQ
|
|
38
|
+
*/
|
|
39
|
+
protected _autoClaimLoop(): Promise<void>;
|
|
40
|
+
protected _handleFailure(messages: StreamMessageEntity<T>[], errorMessage: string): Promise<void>;
|
|
41
|
+
/**
|
|
42
|
+
* Helper methods
|
|
43
|
+
*/
|
|
44
|
+
/**
|
|
45
|
+
* Read messages from stream
|
|
46
|
+
* @param count
|
|
47
|
+
* @returns XReadGroupResponse
|
|
48
|
+
*/
|
|
49
|
+
protected _readGroup(count: number): Promise<XReadGroupResponse | null>;
|
|
50
|
+
/**
|
|
51
|
+
* Auto claim messages
|
|
52
|
+
* @param count
|
|
53
|
+
* @param cursor
|
|
54
|
+
* @returns
|
|
55
|
+
*/
|
|
56
|
+
protected _autoClaimMessages(count: number, cursor: string): Promise<[string, StreamMessage[]] | null>;
|
|
57
|
+
/**
|
|
58
|
+
* Finalize messages
|
|
59
|
+
* @param messages
|
|
60
|
+
* @returns
|
|
61
|
+
*/
|
|
62
|
+
protected _finalize(messages: StreamMessageEntity<T>[]): Promise<void>;
|
|
63
|
+
protected abstract _fetchLoop(): Promise<void>;
|
|
64
|
+
}
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.BaseWorker = void 0;
|
|
4
|
+
const events_1 = require("events");
|
|
5
|
+
const keys_1 = require("./keys");
|
|
6
|
+
const uuid_1 = require("uuid");
|
|
7
|
+
const stream_message_entity_1 = require("./stream-message-entity");
|
|
8
|
+
const lua_1 = require("./lua");
|
|
9
|
+
class BaseWorker {
|
|
10
|
+
constructor(redis, options, controlOptions) {
|
|
11
|
+
this.redis = redis;
|
|
12
|
+
this._isRunning = false;
|
|
13
|
+
this._activeCount = 0;
|
|
14
|
+
this._events = new events_1.EventEmitter();
|
|
15
|
+
this._consumerId = (0, uuid_1.v7)();
|
|
16
|
+
this._events.setMaxListeners(100);
|
|
17
|
+
this._groupName = options.groupName;
|
|
18
|
+
this._streamName = options.streamName;
|
|
19
|
+
this._concurrency = options.concurrency;
|
|
20
|
+
this._maxRetries = controlOptions.maxRetries;
|
|
21
|
+
this._blockTimeMs = controlOptions.blockTimeMs;
|
|
22
|
+
this._claimIntervalMs = controlOptions.claimIntervalMs;
|
|
23
|
+
this._minIdleTimeMs = controlOptions.minIdleTimeMs;
|
|
24
|
+
this._collectMetrics = controlOptions.collectMetrics;
|
|
25
|
+
this._consumerName = `${this._groupName}-${this._consumerId}`;
|
|
26
|
+
this._keys = new keys_1.KeyManager(options.streamName);
|
|
27
|
+
this._blockingRedis = redis.duplicate();
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Start the worker process
|
|
31
|
+
* Starts fetch loop and auto claim loop
|
|
32
|
+
*/
|
|
33
|
+
async start() {
|
|
34
|
+
if (this._isRunning) {
|
|
35
|
+
return;
|
|
36
|
+
}
|
|
37
|
+
this._isRunning = true;
|
|
38
|
+
try {
|
|
39
|
+
await this.redis.xgroup('CREATE', this._streamName, this._groupName, '0', 'MKSTREAM');
|
|
40
|
+
}
|
|
41
|
+
catch (e) {
|
|
42
|
+
if (!e.message.includes('BUSYGROUP')) {
|
|
43
|
+
throw e;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
this._fetchLoop();
|
|
47
|
+
this._autoClaimLoop();
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Gracefully stops the worker
|
|
51
|
+
* Waits for any running jobs
|
|
52
|
+
*/
|
|
53
|
+
async stop() {
|
|
54
|
+
this._isRunning = false;
|
|
55
|
+
this._events.emit('job_finished');
|
|
56
|
+
if (this._blockingRedis) {
|
|
57
|
+
await this._blockingRedis.quit().catch();
|
|
58
|
+
}
|
|
59
|
+
while (this._activeCount > 0) {
|
|
60
|
+
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Auto claim loop
|
|
65
|
+
* Checks which messages are read but not acked for longer than minIdleTimeMs (PEL)
|
|
66
|
+
* Acks them and based on retry policy either enqueues them again or moves to DLQ
|
|
67
|
+
*/
|
|
68
|
+
async _autoClaimLoop() {
|
|
69
|
+
while (this._isRunning) {
|
|
70
|
+
try {
|
|
71
|
+
await new Promise(resolve => setTimeout(resolve, this._claimIntervalMs));
|
|
72
|
+
if (!this._isRunning) {
|
|
73
|
+
break;
|
|
74
|
+
}
|
|
75
|
+
let cursor = '0-0';
|
|
76
|
+
let continueClaiming = true;
|
|
77
|
+
while (continueClaiming && this._isRunning) {
|
|
78
|
+
const result = await this._autoClaimMessages(this._concurrency, cursor);
|
|
79
|
+
if (!result) {
|
|
80
|
+
continueClaiming = false;
|
|
81
|
+
break;
|
|
82
|
+
}
|
|
83
|
+
const [nextCursor, msgs] = result;
|
|
84
|
+
cursor = nextCursor;
|
|
85
|
+
if (msgs && msgs.length > 0) {
|
|
86
|
+
const messages = msgs.map((msg) => new stream_message_entity_1.StreamMessageEntity(msg));
|
|
87
|
+
await this._handleFailure(messages, 'Stuck messages');
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
continueClaiming = false;
|
|
91
|
+
}
|
|
92
|
+
if (nextCursor === '0-0') {
|
|
93
|
+
continueClaiming = false;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
catch (e) {
|
|
98
|
+
if (this._isRunning) {
|
|
99
|
+
console.error(`[${this._groupName}] auto claim err:`, e.message);
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
async _handleFailure(messages, errorMessage) {
|
|
105
|
+
if (!messages.length) {
|
|
106
|
+
return;
|
|
107
|
+
}
|
|
108
|
+
const timestamp = Date.now();
|
|
109
|
+
const pipeline = this.redis.pipeline();
|
|
110
|
+
const messagesStreamIds = messages.map((message) => message.streamMessageId);
|
|
111
|
+
pipeline.xack(this._streamName, this._groupName, ...messagesStreamIds);
|
|
112
|
+
const messagesToDLQ = [];
|
|
113
|
+
for (const message of messages) {
|
|
114
|
+
if (message.retryCount < this._maxRetries) {
|
|
115
|
+
const newJobId = (0, uuid_1.v7)();
|
|
116
|
+
pipeline.xadd(this._streamName, '*', 'id', newJobId, 'target', this._groupName, 'retryCount', message.retryCount + 1, 'data', message.serializedData);
|
|
117
|
+
const newStatusKey = this._keys.getJobStatusKey(newJobId);
|
|
118
|
+
pipeline.hset(newStatusKey, '__target', 1);
|
|
119
|
+
const statusKey = this._keys.getJobStatusKey(message.messageUuid);
|
|
120
|
+
pipeline.eval(lua_1.LUA_FINALIZE, 2, statusKey, this._streamName, this._groupName, timestamp, message.streamMessageId);
|
|
121
|
+
}
|
|
122
|
+
else {
|
|
123
|
+
console.error(`[${this._groupName}] Job ${message.messageUuid} run out of retries. Moving to DLQ`);
|
|
124
|
+
messagesToDLQ.push(message);
|
|
125
|
+
// Add message to DLQ stream
|
|
126
|
+
pipeline.xadd(this._keys.getDlqStreamKey(), '*', 'id', message.messageUuid, 'group', this._groupName, 'error', errorMessage, 'payload', message.serializedData, 'failedAt', Date.now());
|
|
127
|
+
const statusKey = this._keys.getJobStatusKey(message.messageUuid);
|
|
128
|
+
pipeline.eval(lua_1.LUA_FINALIZE, 2, statusKey, this._streamName, this._groupName, timestamp, message.streamMessageId);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
await pipeline.exec();
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* Helper methods
|
|
135
|
+
*/
|
|
136
|
+
/**
|
|
137
|
+
* Read messages from stream
|
|
138
|
+
* @param count
|
|
139
|
+
* @returns XReadGroupResponse
|
|
140
|
+
*/
|
|
141
|
+
async _readGroup(count) {
|
|
142
|
+
return this._blockingRedis.xreadgroup('GROUP', this._groupName, this._consumerName, 'COUNT', count, 'BLOCK', this._blockTimeMs, 'STREAMS', this._streamName, '>');
|
|
143
|
+
}
|
|
144
|
+
/**
|
|
145
|
+
* Auto claim messages
|
|
146
|
+
* @param count
|
|
147
|
+
* @param cursor
|
|
148
|
+
* @returns
|
|
149
|
+
*/
|
|
150
|
+
async _autoClaimMessages(count, cursor) {
|
|
151
|
+
return this.redis.xautoclaim(this._streamName, this._groupName, this._consumerName, this._minIdleTimeMs, cursor, 'COUNT', this._concurrency);
|
|
152
|
+
}
|
|
153
|
+
/**
|
|
154
|
+
* Finalize messages
|
|
155
|
+
* @param messages
|
|
156
|
+
* @returns
|
|
157
|
+
*/
|
|
158
|
+
async _finalize(messages) {
|
|
159
|
+
if (messages.length === 0) {
|
|
160
|
+
return;
|
|
161
|
+
}
|
|
162
|
+
const pipeline = this.redis.pipeline();
|
|
163
|
+
const timestamp = Date.now();
|
|
164
|
+
const throughputKey = this._keys.getThroughputKey(this._groupName, timestamp);
|
|
165
|
+
const totalKey = this._keys.getTotalKey(this._groupName);
|
|
166
|
+
const ids = messages.map(m => m.streamMessageId);
|
|
167
|
+
pipeline.xack(this._streamName, this._groupName, ...ids);
|
|
168
|
+
if (this._collectMetrics) {
|
|
169
|
+
pipeline.incrby(throughputKey, ids.length);
|
|
170
|
+
pipeline.expire(throughputKey, 86400);
|
|
171
|
+
pipeline.incrby(totalKey, ids.length);
|
|
172
|
+
}
|
|
173
|
+
for (const msg of messages) {
|
|
174
|
+
const statusKey = this._keys.getJobStatusKey(msg.messageUuid);
|
|
175
|
+
pipeline.eval(lua_1.LUA_FINALIZE, 2, statusKey, this._streamName, this._groupName, timestamp, msg.streamMessageId);
|
|
176
|
+
}
|
|
177
|
+
await pipeline.exec();
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
exports.BaseWorker = BaseWorker;
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import Redis from "ioredis";
|
|
2
|
+
import { BaseWorkerControlOptions, BatchWorkerOptions } from "./interfaces";
|
|
3
|
+
import { BaseWorker } from "./base-worker";
|
|
4
|
+
export declare abstract class BatchWorker<T extends Record<string, unknown>> extends BaseWorker<T> {
|
|
5
|
+
private readonly _batchSize;
|
|
6
|
+
private readonly _maxFetchCount;
|
|
7
|
+
constructor(redis: Redis, options: BatchWorkerOptions, controlOptions?: BaseWorkerControlOptions);
|
|
8
|
+
protected _fetchLoop(): Promise<void>;
|
|
9
|
+
/**
|
|
10
|
+
* Spawn worker for current processing
|
|
11
|
+
* @param messages
|
|
12
|
+
*/
|
|
13
|
+
private spawnWorker;
|
|
14
|
+
private processInternal;
|
|
15
|
+
abstract process(data: T[]): Promise<void>;
|
|
16
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.BatchWorker = void 0;
|
|
4
|
+
const interfaces_1 = require("./interfaces");
|
|
5
|
+
const stream_message_entity_1 = require("./stream-message-entity");
|
|
6
|
+
const base_worker_1 = require("./base-worker");
|
|
7
|
+
class BatchWorker extends base_worker_1.BaseWorker {
|
|
8
|
+
constructor(redis, options, controlOptions = interfaces_1.defaultBaseWorkerControlOptions) {
|
|
9
|
+
super(redis, options, controlOptions);
|
|
10
|
+
this._batchSize = options.batchSize;
|
|
11
|
+
this._maxFetchCount = options.maxFetchCount;
|
|
12
|
+
if (this._batchSize === 1) {
|
|
13
|
+
console.warn('Why would you create batch worker with batch size 1');
|
|
14
|
+
}
|
|
15
|
+
if (this._batchSize < 1) {
|
|
16
|
+
throw new Error('Batch size cannot be less then 0');
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
async _fetchLoop() {
|
|
20
|
+
while (this._isRunning) {
|
|
21
|
+
const freeSlots = this._concurrency - this._activeCount;
|
|
22
|
+
if (freeSlots <= 0) {
|
|
23
|
+
await new Promise((resolve) => this._events.once('job_finished', resolve));
|
|
24
|
+
continue;
|
|
25
|
+
}
|
|
26
|
+
const calculatedCount = freeSlots * this._batchSize;
|
|
27
|
+
const itemsCount = Math.min(calculatedCount, this._maxFetchCount);
|
|
28
|
+
try {
|
|
29
|
+
const results = await this._readGroup(itemsCount);
|
|
30
|
+
if (!results) {
|
|
31
|
+
continue;
|
|
32
|
+
}
|
|
33
|
+
const messages = results[0][1];
|
|
34
|
+
for (let i = 0; i < messages.length; i += this._batchSize) {
|
|
35
|
+
const chunk = messages.slice(i, i + this._batchSize);
|
|
36
|
+
this.spawnWorker(chunk);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
catch (err) {
|
|
40
|
+
if (this._isRunning) { // Quicker grace shutdown
|
|
41
|
+
console.error(`[${this._groupName}] Fetch Error: `, err);
|
|
42
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Spawn worker for current processing
|
|
49
|
+
* @param messages
|
|
50
|
+
*/
|
|
51
|
+
spawnWorker(messages) {
|
|
52
|
+
this._activeCount++;
|
|
53
|
+
this.processInternal(messages).finally(() => {
|
|
54
|
+
this._activeCount--;
|
|
55
|
+
this._events.emit('job_finished');
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
async processInternal(rawMessages) {
|
|
59
|
+
const allMessages = rawMessages.map((msg) => new stream_message_entity_1.StreamMessageEntity(msg));
|
|
60
|
+
const messages = []; // Messages to process
|
|
61
|
+
const ignoredMessages = []; // Messages to ignore
|
|
62
|
+
for (const message of allMessages) {
|
|
63
|
+
if (message.routes.includes(this._groupName)) {
|
|
64
|
+
messages.push(message);
|
|
65
|
+
}
|
|
66
|
+
else {
|
|
67
|
+
ignoredMessages.push(message);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
// ACK ignored messages
|
|
71
|
+
if (ignoredMessages.length) {
|
|
72
|
+
const ignoredMessagesStreamIds = ignoredMessages.map((msg) => msg.streamMessageId);
|
|
73
|
+
await this.redis.xack(this._streamName, this._groupName, ...ignoredMessagesStreamIds);
|
|
74
|
+
}
|
|
75
|
+
if (!messages.length) {
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
const messagesData = messages.map((msg) => msg.data);
|
|
79
|
+
try {
|
|
80
|
+
await this.process(messagesData);
|
|
81
|
+
await this._finalize(messages);
|
|
82
|
+
}
|
|
83
|
+
catch (err) {
|
|
84
|
+
console.error(`[${this._groupName}] Processing failed`, err);
|
|
85
|
+
await this._handleFailure(messages, err.message);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
exports.BatchWorker = BatchWorker;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
export type StreamMessage = [string, string[]];
|
|
2
|
+
export type XReadGroupResponse = [string, StreamMessage[]][];
|
|
3
|
+
export interface BaseWorkerOptions {
|
|
4
|
+
groupName: string;
|
|
5
|
+
streamName: string;
|
|
6
|
+
concurrency: number;
|
|
7
|
+
}
|
|
8
|
+
export interface BaseWorkerControlOptions {
|
|
9
|
+
maxRetries: number;
|
|
10
|
+
blockTimeMs: number;
|
|
11
|
+
claimIntervalMs: number;
|
|
12
|
+
minIdleTimeMs: number;
|
|
13
|
+
collectMetrics: boolean;
|
|
14
|
+
}
|
|
15
|
+
export declare const defaultBaseWorkerControlOptions: BaseWorkerControlOptions;
|
|
16
|
+
export interface BatchWorkerOptions extends BaseWorkerOptions {
|
|
17
|
+
batchSize: number;
|
|
18
|
+
maxFetchCount: number;
|
|
19
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.defaultBaseWorkerControlOptions = void 0;
|
|
4
|
+
exports.defaultBaseWorkerControlOptions = {
|
|
5
|
+
maxRetries: 3,
|
|
6
|
+
minIdleTimeMs: 120000,
|
|
7
|
+
blockTimeMs: 2000,
|
|
8
|
+
claimIntervalMs: 120000,
|
|
9
|
+
collectMetrics: true
|
|
10
|
+
};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const LUA_FINALIZE = "\n-- KEYS[1] = status key\n-- KEYS[2] = stream key\n-- ARGV[1] = group name\n-- ARGV[2] = timestamp\n-- ARGV[3] = msgId\n\n-- 1. Update status\nredis.call('HSET', KEYS[1], ARGV[1], ARGV[2])\n\n-- 2. Check completions\nlocal current_fields = redis.call('HLEN', KEYS[1])\nlocal target_str = redis.call('HGET', KEYS[1], '__target')\nlocal target = tonumber(target_str)\n\nif not target then\n return 0\nend\n\n-- 3. Cleanup if done\nif current_fields >= (target + 1) then\n redis.call('DEL', KEYS[1])\n redis.call('XDEL', KEYS[2], ARGV[3])\n return 1\nend\n\nreturn 0\n";
|
package/dist/src/lua.js
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.LUA_FINALIZE = void 0;
|
|
4
|
+
exports.LUA_FINALIZE = `
|
|
5
|
+
-- KEYS[1] = status key
|
|
6
|
+
-- KEYS[2] = stream key
|
|
7
|
+
-- ARGV[1] = group name
|
|
8
|
+
-- ARGV[2] = timestamp
|
|
9
|
+
-- ARGV[3] = msgId
|
|
10
|
+
|
|
11
|
+
-- 1. Update status
|
|
12
|
+
redis.call('HSET', KEYS[1], ARGV[1], ARGV[2])
|
|
13
|
+
|
|
14
|
+
-- 2. Check completions
|
|
15
|
+
local current_fields = redis.call('HLEN', KEYS[1])
|
|
16
|
+
local target_str = redis.call('HGET', KEYS[1], '__target')
|
|
17
|
+
local target = tonumber(target_str)
|
|
18
|
+
|
|
19
|
+
if not target then
|
|
20
|
+
return 0
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
-- 3. Cleanup if done
|
|
24
|
+
if current_fields >= (target + 1) then
|
|
25
|
+
redis.call('DEL', KEYS[1])
|
|
26
|
+
redis.call('XDEL', KEYS[2], ARGV[3])
|
|
27
|
+
return 1
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
return 0
|
|
31
|
+
`;
|
|
@@ -7,8 +7,10 @@ export declare class StreamMessageEntity<T extends Record<string, unknown>> {
|
|
|
7
7
|
private readonly _messageUuid;
|
|
8
8
|
private readonly _retryCount;
|
|
9
9
|
private readonly _data;
|
|
10
|
+
private readonly _rawData;
|
|
10
11
|
constructor(message: StreamMessage);
|
|
11
12
|
get data(): T;
|
|
13
|
+
get serializedData(): string;
|
|
12
14
|
get streamMessageId(): string;
|
|
13
15
|
get messageUuid(): string;
|
|
14
16
|
get routes(): string[];
|
|
@@ -15,10 +15,14 @@ class StreamMessageEntity {
|
|
|
15
15
|
this._routes = this._fields['target'].split(',');
|
|
16
16
|
this._retryCount = parseInt(this._fields['retryCount'] || '0', 10);
|
|
17
17
|
this._data = JSON.parse(this._fields['data']);
|
|
18
|
+
this._rawData = this._fields['data'];
|
|
18
19
|
}
|
|
19
20
|
get data() {
|
|
20
21
|
return this._data;
|
|
21
22
|
}
|
|
23
|
+
get serializedData() {
|
|
24
|
+
return this._rawData;
|
|
25
|
+
}
|
|
22
26
|
get streamMessageId() {
|
|
23
27
|
return this._streamMessageId;
|
|
24
28
|
}
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import Redis from "ioredis";
|
|
2
|
+
import { BaseWorkerOptions, BaseWorkerControlOptions } from "./interfaces";
|
|
3
|
+
import { BaseWorker } from "./base-worker";
|
|
4
|
+
export declare abstract class Worker<T extends Record<string, unknown>> extends BaseWorker<T> {
|
|
5
|
+
constructor(redis: Redis, options: BaseWorkerOptions, controlOptions?: BaseWorkerControlOptions);
|
|
6
|
+
/**
|
|
7
|
+
* Fetch loop (the main loop)
|
|
8
|
+
* Based on free slots (concurrency - active count) gets new messages
|
|
9
|
+
* Spawns worker process for them
|
|
10
|
+
*/
|
|
11
|
+
protected _fetchLoop(): Promise<void>;
|
|
12
|
+
/**
|
|
13
|
+
* Spawns async background worker
|
|
14
|
+
* @param msg
|
|
15
|
+
*/
|
|
16
|
+
private spawnWorker;
|
|
17
|
+
/**
|
|
18
|
+
* Process message
|
|
19
|
+
* @param msg
|
|
20
|
+
* @returns
|
|
21
|
+
*/
|
|
22
|
+
private processInternal;
|
|
23
|
+
abstract process(data: T): Promise<void>;
|
|
24
|
+
}
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Worker = void 0;
|
|
4
|
+
const interfaces_1 = require("./interfaces");
|
|
5
|
+
const stream_message_entity_1 = require("./stream-message-entity");
|
|
6
|
+
const base_worker_1 = require("./base-worker");
|
|
7
|
+
class Worker extends base_worker_1.BaseWorker {
|
|
8
|
+
constructor(redis, options, controlOptions = interfaces_1.defaultBaseWorkerControlOptions) {
|
|
9
|
+
super(redis, options, controlOptions);
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Fetch loop (the main loop)
|
|
13
|
+
* Based on free slots (concurrency - active count) gets new messages
|
|
14
|
+
* Spawns worker process for them
|
|
15
|
+
*/
|
|
16
|
+
async _fetchLoop() {
|
|
17
|
+
while (this._isRunning) {
|
|
18
|
+
const freeSlots = this._concurrency - this._activeCount;
|
|
19
|
+
if (freeSlots <= 0) {
|
|
20
|
+
await new Promise((resolve) => this._events.once('job_finished', resolve));
|
|
21
|
+
continue;
|
|
22
|
+
}
|
|
23
|
+
try {
|
|
24
|
+
const results = await this._readGroup(freeSlots);
|
|
25
|
+
if (results) {
|
|
26
|
+
const messages = results[0][1];
|
|
27
|
+
for (const msg of messages) {
|
|
28
|
+
this.spawnWorker(msg);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
catch (err) {
|
|
33
|
+
console.error(`[${this._groupName}] Fetch Error:`, err);
|
|
34
|
+
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Spawns async background worker
|
|
40
|
+
* @param msg
|
|
41
|
+
*/
|
|
42
|
+
spawnWorker(msg) {
|
|
43
|
+
this._activeCount++;
|
|
44
|
+
this.processInternal(msg).finally(() => {
|
|
45
|
+
this._activeCount--;
|
|
46
|
+
this._events.emit('job_finished');
|
|
47
|
+
});
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Process message
|
|
51
|
+
* @param msg
|
|
52
|
+
* @returns
|
|
53
|
+
*/
|
|
54
|
+
async processInternal(msg) {
|
|
55
|
+
const streamMessage = new stream_message_entity_1.StreamMessageEntity(msg);
|
|
56
|
+
// Message was not targeted to this group
|
|
57
|
+
// We ACK it for this consumer group but we don't need to update any statuses, since its not for this group
|
|
58
|
+
if (!streamMessage.routes.includes(this._groupName)) {
|
|
59
|
+
await this.redis.xack(this._streamName, this._groupName, streamMessage.streamMessageId);
|
|
60
|
+
return;
|
|
61
|
+
}
|
|
62
|
+
try {
|
|
63
|
+
await this.process(streamMessage.data);
|
|
64
|
+
await this._finalize([streamMessage]);
|
|
65
|
+
}
|
|
66
|
+
catch (err) {
|
|
67
|
+
console.error(`[${this._groupName}] Job failed ${streamMessage.messageUuid}`, err);
|
|
68
|
+
await this._handleFailure([streamMessage], err.message);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
exports.Worker = Worker;
|
|
@@ -5,20 +5,36 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
6
|
const vitest_1 = require("vitest");
|
|
7
7
|
const ioredis_1 = __importDefault(require("ioredis"));
|
|
8
|
-
const producer_1 = require("
|
|
9
|
-
const worker_1 = require("
|
|
10
|
-
const metrics_1 = require("
|
|
8
|
+
const producer_1 = require("../src/producer");
|
|
9
|
+
const worker_1 = require("../src/worker");
|
|
10
|
+
const metrics_1 = require("../src/metrics");
|
|
11
11
|
const uuid_1 = require("uuid");
|
|
12
12
|
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
|
|
13
13
|
class TestWorker extends worker_1.Worker {
|
|
14
14
|
constructor(redis, groupName, streamName, concurrency = 1, maxRetries = 3, blockTimeMs = 100, claimIntervalMs = 60000, minIdleTimeMs = 300000) {
|
|
15
|
-
super(redis,
|
|
15
|
+
super(redis, {
|
|
16
|
+
groupName,
|
|
17
|
+
streamName,
|
|
18
|
+
concurrency
|
|
19
|
+
}, {
|
|
20
|
+
maxRetries,
|
|
21
|
+
blockTimeMs,
|
|
22
|
+
claimIntervalMs,
|
|
23
|
+
minIdleTimeMs,
|
|
24
|
+
collectMetrics: true
|
|
25
|
+
});
|
|
16
26
|
this.processedCount = 0;
|
|
17
27
|
this.lastProcessedId = null;
|
|
18
28
|
this.shouldFail = false;
|
|
19
29
|
this.failCount = 0;
|
|
20
30
|
this.maxFails = 0;
|
|
21
31
|
}
|
|
32
|
+
async start() {
|
|
33
|
+
return super.start();
|
|
34
|
+
}
|
|
35
|
+
async stop() {
|
|
36
|
+
return super.stop();
|
|
37
|
+
}
|
|
22
38
|
async process(data) {
|
|
23
39
|
if (this.shouldFail) {
|
|
24
40
|
this.failCount++;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@koala42/redis-highway",
|
|
3
|
-
"version": "0.1
|
|
3
|
+
"version": "0.2.1",
|
|
4
4
|
"description": "High performance redis queue",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": {
|
|
@@ -9,8 +9,8 @@
|
|
|
9
9
|
"url": "https://github.com/stranavad"
|
|
10
10
|
},
|
|
11
11
|
"type": "commonjs",
|
|
12
|
-
"main": "dist/index.js",
|
|
13
|
-
"types": "dist/index.d.ts",
|
|
12
|
+
"main": "dist/src/index.js",
|
|
13
|
+
"types": "dist/src/index.d.ts",
|
|
14
14
|
"files": [
|
|
15
15
|
"dist"
|
|
16
16
|
],
|
|
@@ -19,8 +19,7 @@
|
|
|
19
19
|
"test": "vitest run src/queue.spec.ts",
|
|
20
20
|
"test:all": "vitest run src",
|
|
21
21
|
"build": "npm run clean && tsc",
|
|
22
|
-
"prepublish": "npm run build"
|
|
23
|
-
"publish": "npm run test && npm run clean && npm run build && npm publish --public"
|
|
22
|
+
"prepublish": "npm run build"
|
|
24
23
|
},
|
|
25
24
|
"keywords": [
|
|
26
25
|
"redis",
|
package/dist/batch-worker.d.ts
DELETED
|
@@ -1,40 +0,0 @@
|
|
|
1
|
-
import Redis from "ioredis";
|
|
2
|
-
export declare abstract class BatchWorker<T extends Record<string, unknown>> {
|
|
3
|
-
protected redis: Redis;
|
|
4
|
-
protected groupName: string;
|
|
5
|
-
protected streamName: string;
|
|
6
|
-
protected batchSize: number;
|
|
7
|
-
protected concurrency: number;
|
|
8
|
-
protected maxFetchSize: number;
|
|
9
|
-
protected maxRetries: number;
|
|
10
|
-
protected blockTimeMs: number;
|
|
11
|
-
protected maxFetchCount: number;
|
|
12
|
-
protected claimIntervalMs: number;
|
|
13
|
-
protected minIdleTimeMs: number;
|
|
14
|
-
private isRunning;
|
|
15
|
-
private activeCount;
|
|
16
|
-
private keys;
|
|
17
|
-
private blockingRedis;
|
|
18
|
-
private readonly events;
|
|
19
|
-
private readonly consumerId;
|
|
20
|
-
constructor(redis: Redis, groupName: string, streamName: string, batchSize?: number, // How many jobs are passed to the process function (max)
|
|
21
|
-
concurrency?: number, // How many concurrent loops should run
|
|
22
|
-
maxFetchSize?: number, // How many jobs are fetched at once from redis stream
|
|
23
|
-
maxRetries?: number, blockTimeMs?: number, // How long should the blocking redis wait for logs from stream
|
|
24
|
-
maxFetchCount?: number, claimIntervalMs?: number, // Check for stuck jobs every minute
|
|
25
|
-
minIdleTimeMs?: number);
|
|
26
|
-
start(): Promise<void>;
|
|
27
|
-
stop(): Promise<void>;
|
|
28
|
-
private autoClaimLoop;
|
|
29
|
-
private fetchLoop;
|
|
30
|
-
/**
|
|
31
|
-
* Spawn worker for current processing
|
|
32
|
-
* @param messages
|
|
33
|
-
*/
|
|
34
|
-
private spawnWorker;
|
|
35
|
-
private processInternal;
|
|
36
|
-
private handleFailure;
|
|
37
|
-
private finalize;
|
|
38
|
-
private getConsumerName;
|
|
39
|
-
abstract process(data: T[]): Promise<void>;
|
|
40
|
-
}
|