@koala42/redis-highway 0.2.9 → 0.2.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{src/keys.d.ts → keys.d.ts} +2 -1
- package/dist/{src/keys.js → keys.js} +3 -2
- package/dist/{src/metrics.d.ts → metrics.d.ts} +2 -2
- package/dist/{src/metrics.js → metrics.js} +4 -4
- package/package.json +5 -7
- package/dist/test/queue.spec.d.ts +0 -1
- package/dist/test/queue.spec.js +0 -256
- /package/dist/{src/base-worker.d.ts → base-worker.d.ts} +0 -0
- /package/dist/{src/base-worker.js → base-worker.js} +0 -0
- /package/dist/{src/batch-worker.d.ts → batch-worker.d.ts} +0 -0
- /package/dist/{src/batch-worker.js → batch-worker.js} +0 -0
- /package/dist/{src/index.d.ts → index.d.ts} +0 -0
- /package/dist/{src/index.js → index.js} +0 -0
- /package/dist/{src/interfaces.d.ts → interfaces.d.ts} +0 -0
- /package/dist/{src/interfaces.js → interfaces.js} +0 -0
- /package/dist/{src/lua.d.ts → lua.d.ts} +0 -0
- /package/dist/{src/lua.js → lua.js} +0 -0
- /package/dist/{src/producer.d.ts → producer.d.ts} +0 -0
- /package/dist/{src/producer.js → producer.js} +0 -0
- /package/dist/{src/stream-message-entity.d.ts → stream-message-entity.d.ts} +0 -0
- /package/dist/{src/stream-message-entity.js → stream-message-entity.js} +0 -0
- /package/dist/{src/worker.d.ts → worker.d.ts} +0 -0
- /package/dist/{src/worker.js → worker.js} +0 -0
|
@@ -14,8 +14,9 @@ export declare class KeyManager {
|
|
|
14
14
|
getDlqStreamKey(): string;
|
|
15
15
|
/**
|
|
16
16
|
* Metrics for storing throughput
|
|
17
|
+
* @param current - if true, takes throughput in the running minute, if false, takes -1 minute for closed throughput bucket
|
|
17
18
|
*/
|
|
18
|
-
getThroughputKey(groupName: string, timestamp: number): string;
|
|
19
|
+
getThroughputKey(groupName: string, timestamp: number, current?: boolean): string;
|
|
19
20
|
/**
|
|
20
21
|
* Metrics - retries key
|
|
21
22
|
*/
|
|
@@ -24,10 +24,11 @@ class KeyManager {
|
|
|
24
24
|
}
|
|
25
25
|
/**
|
|
26
26
|
* Metrics for storing throughput
|
|
27
|
+
* @param current - if true, takes throughput in the running minute, if false, takes -1 minute for closed throughput bucket
|
|
27
28
|
*/
|
|
28
|
-
getThroughputKey(groupName, timestamp) {
|
|
29
|
+
getThroughputKey(groupName, timestamp, current = true) {
|
|
29
30
|
const minute = Math.floor(timestamp / 60000) * 60000;
|
|
30
|
-
return `metrics:throughput:${this.streamName}:${groupName}:${minute}`;
|
|
31
|
+
return `metrics:throughput:${this.streamName}:${groupName}:${current ? minute : minute - 60000}`;
|
|
31
32
|
}
|
|
32
33
|
/**
|
|
33
34
|
* Metrics - retries key
|
|
@@ -13,12 +13,12 @@ export declare class Metrics {
|
|
|
13
13
|
* Get current metrics for the queue
|
|
14
14
|
* @param groupNames - List of consumer groups to fetch throughput for
|
|
15
15
|
*/
|
|
16
|
-
getMetrics(groupNames: string[]): Promise<QueueMetrics>;
|
|
16
|
+
getMetrics(groupNames: string[], current?: boolean): Promise<QueueMetrics>;
|
|
17
17
|
/**
|
|
18
18
|
* Get prometheus compatible metrics
|
|
19
19
|
* @param groupNames target group names for throughput metrics
|
|
20
20
|
* @param prefix - export prefix
|
|
21
21
|
* @returns metrics as string
|
|
22
22
|
*/
|
|
23
|
-
getPrometheusMetrics(groupNames: string[], prefix?: string): Promise<string>;
|
|
23
|
+
getPrometheusMetrics(groupNames: string[], prefix?: string, current?: boolean): Promise<string>;
|
|
24
24
|
}
|
|
@@ -12,13 +12,13 @@ class Metrics {
|
|
|
12
12
|
* Get current metrics for the queue
|
|
13
13
|
* @param groupNames - List of consumer groups to fetch throughput for
|
|
14
14
|
*/
|
|
15
|
-
async getMetrics(groupNames) {
|
|
15
|
+
async getMetrics(groupNames, current = false) {
|
|
16
16
|
const pipeline = this.redis.pipeline();
|
|
17
17
|
pipeline.xlen(this.streamName);
|
|
18
18
|
pipeline.xlen(this.keys.getDlqStreamKey());
|
|
19
19
|
const timestamp = Date.now();
|
|
20
20
|
groupNames.forEach(group => {
|
|
21
|
-
pipeline.get(this.keys.getThroughputKey(group, timestamp));
|
|
21
|
+
pipeline.get(this.keys.getThroughputKey(group, timestamp, current));
|
|
22
22
|
});
|
|
23
23
|
const results = await pipeline.exec();
|
|
24
24
|
if (!results) {
|
|
@@ -49,13 +49,13 @@ class Metrics {
|
|
|
49
49
|
* @param prefix - export prefix
|
|
50
50
|
* @returns metrics as string
|
|
51
51
|
*/
|
|
52
|
-
async getPrometheusMetrics(groupNames, prefix = 'redis_highway_queue') {
|
|
52
|
+
async getPrometheusMetrics(groupNames, prefix = 'redis_highway_queue', current = false) {
|
|
53
53
|
const pipeline = this.redis.pipeline();
|
|
54
54
|
pipeline.xlen(this.streamName);
|
|
55
55
|
pipeline.xlen(this.keys.getDlqStreamKey());
|
|
56
56
|
const timestamp = Date.now();
|
|
57
57
|
groupNames.forEach(group => {
|
|
58
|
-
pipeline.get(this.keys.getThroughputKey(group, timestamp));
|
|
58
|
+
pipeline.get(this.keys.getThroughputKey(group, timestamp, current));
|
|
59
59
|
pipeline.get(this.keys.getTotalKey(group));
|
|
60
60
|
pipeline.get(this.keys.getRetriesKey(group, timestamp));
|
|
61
61
|
});
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@koala42/redis-highway",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.11",
|
|
4
4
|
"description": "High performance redis queue",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": {
|
|
@@ -10,19 +10,17 @@
|
|
|
10
10
|
},
|
|
11
11
|
"repository": {
|
|
12
12
|
"url": "https://github.com/Koala42/redis-highway",
|
|
13
|
-
"type": "
|
|
14
|
-
"directory": "packages/redis-highway"
|
|
13
|
+
"type": "git"
|
|
15
14
|
},
|
|
16
15
|
"type": "commonjs",
|
|
17
|
-
"main": "dist/
|
|
18
|
-
"types": "dist/
|
|
16
|
+
"main": "dist/index.js",
|
|
17
|
+
"types": "dist/ndex.d.ts",
|
|
19
18
|
"files": [
|
|
20
19
|
"dist"
|
|
21
20
|
],
|
|
22
21
|
"scripts": {
|
|
23
22
|
"clean": "rimraf dist",
|
|
24
23
|
"test": "vitest run test",
|
|
25
|
-
"test:all": "vitest run test",
|
|
26
24
|
"build": "npm run clean && tsc",
|
|
27
25
|
"prepublish": "npm run build"
|
|
28
26
|
},
|
|
@@ -42,4 +40,4 @@
|
|
|
42
40
|
"typescript": "^5.9.3",
|
|
43
41
|
"vitest": "^4.0.16"
|
|
44
42
|
}
|
|
45
|
-
}
|
|
43
|
+
}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
package/dist/test/queue.spec.js
DELETED
|
@@ -1,256 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
const vitest_1 = require("vitest");
|
|
7
|
-
const ioredis_1 = __importDefault(require("ioredis"));
|
|
8
|
-
const producer_1 = require("../src/producer");
|
|
9
|
-
const worker_1 = require("../src/worker");
|
|
10
|
-
const metrics_1 = require("../src/metrics");
|
|
11
|
-
const uuid_1 = require("uuid");
|
|
12
|
-
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
|
|
13
|
-
class TestWorker extends worker_1.Worker {
|
|
14
|
-
constructor(redis, groupName, streamName, concurrency = 1, maxRetries = 3, blockTimeMs = 100, claimIntervalMs = 60000, minIdleTimeMs = 300000) {
|
|
15
|
-
super(redis, {
|
|
16
|
-
groupName,
|
|
17
|
-
streamName,
|
|
18
|
-
concurrency
|
|
19
|
-
}, {
|
|
20
|
-
maxRetries,
|
|
21
|
-
blockTimeMs,
|
|
22
|
-
claimIntervalMs,
|
|
23
|
-
minIdleTimeMs,
|
|
24
|
-
collectMetrics: true
|
|
25
|
-
});
|
|
26
|
-
this.processedCount = 0;
|
|
27
|
-
this.lastProcessedId = null;
|
|
28
|
-
this.shouldFail = false;
|
|
29
|
-
this.failCount = 0;
|
|
30
|
-
this.maxFails = 0;
|
|
31
|
-
}
|
|
32
|
-
async start() {
|
|
33
|
-
return super.start();
|
|
34
|
-
}
|
|
35
|
-
async stop() {
|
|
36
|
-
return super.stop();
|
|
37
|
-
}
|
|
38
|
-
async process(data) {
|
|
39
|
-
if (this.shouldFail) {
|
|
40
|
-
this.failCount++;
|
|
41
|
-
if (this.maxFails > 0 && this.failCount > this.maxFails) {
|
|
42
|
-
// Stop failing after maxFails
|
|
43
|
-
}
|
|
44
|
-
else {
|
|
45
|
-
throw new Error("Simulated Failure");
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
this.processedCount++;
|
|
49
|
-
if (data && data.id) {
|
|
50
|
-
this.lastProcessedId = data.id;
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
(0, vitest_1.describe)('Redis Queue Integration', () => {
|
|
55
|
-
let redis;
|
|
56
|
-
let producer;
|
|
57
|
-
let streamName;
|
|
58
|
-
let workers = [];
|
|
59
|
-
(0, vitest_1.beforeEach)(() => {
|
|
60
|
-
redis = new ioredis_1.default(REDIS_URL);
|
|
61
|
-
streamName = `test-queue-${(0, uuid_1.v7)()}`;
|
|
62
|
-
producer = new producer_1.Producer(redis, streamName);
|
|
63
|
-
workers = [];
|
|
64
|
-
});
|
|
65
|
-
(0, vitest_1.afterEach)(async () => {
|
|
66
|
-
for (const w of workers) {
|
|
67
|
-
await w.stop();
|
|
68
|
-
}
|
|
69
|
-
await new Promise(r => setTimeout(r, 500));
|
|
70
|
-
// Cleanup Redis keys using the existing connection before closing
|
|
71
|
-
if (redis.status === 'ready') {
|
|
72
|
-
const keys = await redis.keys(`${streamName}*`);
|
|
73
|
-
if (keys.length)
|
|
74
|
-
await redis.del(...keys);
|
|
75
|
-
}
|
|
76
|
-
redis.disconnect();
|
|
77
|
-
});
|
|
78
|
-
const waitFor = async (condition, timeout = 5000) => {
|
|
79
|
-
const start = Date.now();
|
|
80
|
-
while (Date.now() - start < timeout) {
|
|
81
|
-
if (await condition())
|
|
82
|
-
return true;
|
|
83
|
-
await new Promise(r => setTimeout(r, 100));
|
|
84
|
-
}
|
|
85
|
-
return false;
|
|
86
|
-
};
|
|
87
|
-
(0, vitest_1.describe)('Core Functionality', () => {
|
|
88
|
-
(0, vitest_1.it)('Should deliver message to all target groups', async () => {
|
|
89
|
-
const w1 = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
|
|
90
|
-
const w2 = new TestWorker(redis, 'group-B', streamName, 1, 3, 100);
|
|
91
|
-
workers.push(w1, w2);
|
|
92
|
-
await w1.start();
|
|
93
|
-
await w2.start();
|
|
94
|
-
const id = await producer.push({ id: 'msg-1' }, ['group-A', 'group-B']);
|
|
95
|
-
await waitFor(() => w1.processedCount === 1 && w2.processedCount === 1);
|
|
96
|
-
(0, vitest_1.expect)(w1.processedCount).toBe(1);
|
|
97
|
-
(0, vitest_1.expect)(w2.processedCount).toBe(1);
|
|
98
|
-
// Test cleanup keys (but not explicit XDEL here yet, checking keys gone)
|
|
99
|
-
const statusKey = `${streamName}:status:${id}`;
|
|
100
|
-
const dataKey = `${streamName}:data:${id}`;
|
|
101
|
-
(0, vitest_1.expect)(await redis.exists(statusKey)).toBe(0);
|
|
102
|
-
(0, vitest_1.expect)(await redis.exists(dataKey)).toBe(0);
|
|
103
|
-
});
|
|
104
|
-
(0, vitest_1.it)('Should only deliver to targeted groups', async () => {
|
|
105
|
-
const wA = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
|
|
106
|
-
const wB = new TestWorker(redis, 'group-B', streamName, 1, 3, 100);
|
|
107
|
-
workers.push(wA, wB);
|
|
108
|
-
await wA.start();
|
|
109
|
-
await wB.start();
|
|
110
|
-
await producer.push({ id: 'msg-only-a' }, ['group-A']);
|
|
111
|
-
await waitFor(() => wA.processedCount === 1);
|
|
112
|
-
(0, vitest_1.expect)(wA.processedCount).toBe(1);
|
|
113
|
-
(0, vitest_1.expect)(wB.processedCount).toBe(0);
|
|
114
|
-
});
|
|
115
|
-
(0, vitest_1.it)('Should retry only the failed group', async () => {
|
|
116
|
-
const wOk = new TestWorker(redis, 'group-Ok', streamName, 1, 3, 100);
|
|
117
|
-
const wFail = new TestWorker(redis, 'group-Fail', streamName, 1, 3, 100);
|
|
118
|
-
wFail.shouldFail = true;
|
|
119
|
-
wFail.maxFails = 1; // Fail once, then succeed
|
|
120
|
-
workers.push(wOk, wFail);
|
|
121
|
-
await wOk.start();
|
|
122
|
-
await wFail.start();
|
|
123
|
-
await producer.push({ id: 'retry-test' }, ['group-Ok', 'group-Fail']);
|
|
124
|
-
// Wait for wOk to finish and wFail to try at least twice (fail + success)
|
|
125
|
-
await waitFor(() => wOk.processedCount === 1 && wFail.processedCount === 1, 8000);
|
|
126
|
-
(0, vitest_1.expect)(wOk.processedCount).toBe(1); // Processed once
|
|
127
|
-
(0, vitest_1.expect)(wFail.failCount).toBeGreaterThanOrEqual(1); // Failed at least once
|
|
128
|
-
(0, vitest_1.expect)(wFail.processedCount).toBe(1); // Eventually succeeded
|
|
129
|
-
(0, vitest_1.expect)(wOk.processedCount).toBe(1); // wOk should NOT process the retry
|
|
130
|
-
});
|
|
131
|
-
(0, vitest_1.it)('Should move to DLQ after max retries', async () => {
|
|
132
|
-
const wDead = new TestWorker(redis, 'group-Dead', streamName, 1, 3, 100);
|
|
133
|
-
wDead.shouldFail = true;
|
|
134
|
-
wDead.maxFails = 10; // Fail forever (more than max retries which is 3)
|
|
135
|
-
workers.push(wDead);
|
|
136
|
-
await wDead.start();
|
|
137
|
-
const id = await producer.push({ id: 'dlq-test' }, ['group-Dead']);
|
|
138
|
-
await waitFor(async () => {
|
|
139
|
-
const len = await redis.xlen(`${streamName}:dlq`);
|
|
140
|
-
return len > 0;
|
|
141
|
-
}, 10000);
|
|
142
|
-
const dlqLen = await redis.xlen(`${streamName}:dlq`);
|
|
143
|
-
(0, vitest_1.expect)(dlqLen).toBe(1);
|
|
144
|
-
(0, vitest_1.expect)(await redis.exists(`${streamName}:status:${id}`)).toBe(0);
|
|
145
|
-
});
|
|
146
|
-
});
|
|
147
|
-
(0, vitest_1.describe)('Metrics & Monitoring', () => {
|
|
148
|
-
(0, vitest_1.it)('Should track throughput and queue size', async () => {
|
|
149
|
-
const w = new TestWorker(redis, 'group-Metrics', streamName, 1, 3, 100);
|
|
150
|
-
const metricsService = new metrics_1.Metrics(redis, streamName);
|
|
151
|
-
workers.push(w);
|
|
152
|
-
await w.start();
|
|
153
|
-
let metrics = await metricsService.getMetrics(['group-Metrics']);
|
|
154
|
-
(0, vitest_1.expect)(metrics.dlqLength).toBe(0);
|
|
155
|
-
const id = await producer.push({ id: 'metrics-1' }, ['group-Metrics']);
|
|
156
|
-
await waitFor(() => w.processedCount === 1);
|
|
157
|
-
metrics = await metricsService.getMetrics(['group-Metrics']);
|
|
158
|
-
(0, vitest_1.expect)(metrics.throughput['group-Metrics']).toBeGreaterThanOrEqual(1);
|
|
159
|
-
w.shouldFail = true;
|
|
160
|
-
w.maxFails = 10;
|
|
161
|
-
await producer.push({ id: 'metrics-fail' }, ['group-Metrics']);
|
|
162
|
-
await waitFor(() => redis.xlen(`${streamName}:dlq`).then(len => len > 0));
|
|
163
|
-
metrics = await metricsService.getMetrics(['group-Metrics']);
|
|
164
|
-
(0, vitest_1.expect)(metrics.dlqLength).toBe(1);
|
|
165
|
-
});
|
|
166
|
-
(0, vitest_1.it)('Should export Prometheus metrics', async () => {
|
|
167
|
-
const w = new TestWorker(redis, 'group-Prom', streamName, 1, 3, 100);
|
|
168
|
-
const metricsService = new metrics_1.Metrics(redis, streamName);
|
|
169
|
-
workers.push(w);
|
|
170
|
-
await w.start();
|
|
171
|
-
await producer.push({ id: 'prom-1' }, ['group-Prom']);
|
|
172
|
-
await producer.push({ id: 'prom-2' }, ['group-Prom']);
|
|
173
|
-
await waitFor(() => w.processedCount === 2);
|
|
174
|
-
const promOutput = await metricsService.getPrometheusMetrics(['group-Prom']);
|
|
175
|
-
(0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_throughput_1m gauge`);
|
|
176
|
-
(0, vitest_1.expect)(promOutput).toContain(`redis_highway_queue_throughput_1m{stream="${streamName}", group="group-Prom"} 2`);
|
|
177
|
-
(0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_jobs_total counter`);
|
|
178
|
-
(0, vitest_1.expect)(promOutput).toContain(`redis_highway_queue_jobs_total{stream="${streamName}", group="group-Prom"} 2`);
|
|
179
|
-
(0, vitest_1.expect)(promOutput).toContain(`# TYPE redis_highway_queue_waiting_jobs gauge`);
|
|
180
|
-
});
|
|
181
|
-
});
|
|
182
|
-
(0, vitest_1.describe)('Stream Cleanup', () => {
|
|
183
|
-
(0, vitest_1.it)('Should delete message from stream after processing', async () => {
|
|
184
|
-
const w1 = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
|
|
185
|
-
workers.push(w1);
|
|
186
|
-
await w1.start();
|
|
187
|
-
const id = await producer.push({ id: 'msg-cleanup' }, ['group-A']);
|
|
188
|
-
// Wait for processing
|
|
189
|
-
await waitFor(() => w1.processedCount === 1);
|
|
190
|
-
(0, vitest_1.expect)(w1.processedCount).toBe(1);
|
|
191
|
-
// Wait for stream to be empty
|
|
192
|
-
const success = await waitFor(async () => {
|
|
193
|
-
const len = await redis.xlen(streamName);
|
|
194
|
-
return len === 0;
|
|
195
|
-
});
|
|
196
|
-
(0, vitest_1.expect)(success).toBe(true);
|
|
197
|
-
const messages = await redis.xrange(streamName, '-', '+');
|
|
198
|
-
(0, vitest_1.expect)(messages.length).toBe(0);
|
|
199
|
-
});
|
|
200
|
-
(0, vitest_1.it)('Should delete message from stream only after ALL groups processed it', async () => {
|
|
201
|
-
const w1 = new TestWorker(redis, 'group-A', streamName, 1, 3, 100);
|
|
202
|
-
const w2 = new TestWorker(redis, 'group-B', streamName, 1, 3, 100);
|
|
203
|
-
workers.push(w1, w2);
|
|
204
|
-
await w1.start(); // Only start w1
|
|
205
|
-
const id = await producer.push({ id: 'msg-multi' }, ['group-A', 'group-B']);
|
|
206
|
-
// Wait for w1 to process
|
|
207
|
-
await waitFor(() => w1.processedCount === 1);
|
|
208
|
-
let len = await redis.xlen(streamName);
|
|
209
|
-
(0, vitest_1.expect)(len).toBe(1); // Should still exist because group-B pending
|
|
210
|
-
// Start w2
|
|
211
|
-
await w2.start();
|
|
212
|
-
// Wait for w2 to process
|
|
213
|
-
await waitFor(() => w2.processedCount === 1);
|
|
214
|
-
// Wait for stream to be empty
|
|
215
|
-
const success = await waitFor(async () => {
|
|
216
|
-
len = await redis.xlen(streamName);
|
|
217
|
-
return len === 0;
|
|
218
|
-
});
|
|
219
|
-
(0, vitest_1.expect)(success).toBe(true);
|
|
220
|
-
});
|
|
221
|
-
});
|
|
222
|
-
(0, vitest_1.it)('Should recover stuck messages via Auto-Claim', async () => {
|
|
223
|
-
const groupName = 'group-Recover';
|
|
224
|
-
// Start worker with short minIdleTime (e.g., 1000ms) to trigger claim quickly
|
|
225
|
-
// minIdleTimeMs = 1000. claimIntervalMs = 500 (check frequently)
|
|
226
|
-
const w = new TestWorker(redis, groupName, streamName, 1, 3, 100, 500, 1000);
|
|
227
|
-
workers.push(w);
|
|
228
|
-
// 1. Setup group manually
|
|
229
|
-
await redis.xgroup('CREATE', streamName, groupName, '0', 'MKSTREAM');
|
|
230
|
-
// 2. Push message
|
|
231
|
-
const id = await producer.push({ id: 'stuck-msg' }, [groupName]);
|
|
232
|
-
// 3. Simulate a consumer reading but crashing (no ACK)
|
|
233
|
-
// consumer name 'bad-consumer'
|
|
234
|
-
await redis.xreadgroup('GROUP', groupName, 'bad-consumer', 'COUNT', 1, 'STREAMS', streamName, '>');
|
|
235
|
-
// 4. Wait for minIdleTime (1000ms) + buffer
|
|
236
|
-
await new Promise(r => setTimeout(r, 1200));
|
|
237
|
-
// 5. Start our worker
|
|
238
|
-
await w.start();
|
|
239
|
-
// 6. Verify worker picks it up
|
|
240
|
-
await waitFor(() => w.processedCount === 1, 5000);
|
|
241
|
-
(0, vitest_1.expect)(w.processedCount).toBe(1);
|
|
242
|
-
(0, vitest_1.expect)(w.lastProcessedId).toBe('stuck-msg');
|
|
243
|
-
// Verify it was claimed (delivered to new consumer)
|
|
244
|
-
// We can check PEL or just trust processedCount
|
|
245
|
-
const pending = await redis.xpending(streamName, groupName);
|
|
246
|
-
// After processing, it should be ACKed, so pending count => 0 (if deleted)
|
|
247
|
-
// or if finalize runs, it deletes the message entirely.
|
|
248
|
-
// Wait for cleanup (finalize runs after process)
|
|
249
|
-
await waitFor(async () => {
|
|
250
|
-
const len = await redis.xlen(streamName);
|
|
251
|
-
return len === 0;
|
|
252
|
-
}, 2000);
|
|
253
|
-
const len = await redis.xlen(streamName);
|
|
254
|
-
(0, vitest_1.expect)(len).toBe(0);
|
|
255
|
-
});
|
|
256
|
-
});
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|