@pingpolls/redisq 1.0.2 โ 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/app.d.ts +144 -0
- package/dist/app.js +55 -0
- package/package.json +1 -1
- package/dist/app.test.d.ts +0 -1
- package/dist/app.test.js +0 -427
- package/dist/benchmark/stress-worker.d.ts +0 -1
- package/dist/benchmark/stress-worker.js +0 -39
- package/dist/benchmark/stress.d.ts +0 -2
- package/dist/benchmark/stress.js +0 -219
package/dist/app.d.ts
CHANGED
|
@@ -1,57 +1,149 @@
|
|
|
1
1
|
import { RedisClient as BunRedisClient } from "bun";
|
|
2
|
+
/**
|
|
3
|
+
* Options for initializing the RedisQ client.
|
|
4
|
+
* You can either provide a Redis client instance or connection details.
|
|
5
|
+
*/
|
|
2
6
|
export type QueueOptions = {
|
|
7
|
+
/** A Bun Redis client instance. */
|
|
3
8
|
redis: BunRedisClient;
|
|
4
9
|
} | {
|
|
10
|
+
/** Redis server host. */
|
|
5
11
|
host: string;
|
|
12
|
+
/** Redis server port. */
|
|
6
13
|
port: string;
|
|
14
|
+
/** Redis server user. */
|
|
7
15
|
user?: string;
|
|
16
|
+
/** Redis server password. */
|
|
8
17
|
password?: string;
|
|
18
|
+
/** Namespace for all Redis keys. */
|
|
9
19
|
namespace?: string;
|
|
20
|
+
/** Whether to use TLS for the connection. */
|
|
10
21
|
tls?: boolean;
|
|
11
22
|
};
|
|
23
|
+
/**
|
|
24
|
+
* Options for creating a new queue.
|
|
25
|
+
* The options are different for regular and batch queues.
|
|
26
|
+
*/
|
|
12
27
|
export type CreateQueueOptions<QueueName extends string = string> = QueueName extends `${string}:batch` ? {
|
|
28
|
+
/**
|
|
29
|
+
* The name of the queue. Must end with `:batch` for batch queues.
|
|
30
|
+
* @example "my-queue:batch"
|
|
31
|
+
*/
|
|
13
32
|
qname: QueueName;
|
|
33
|
+
/** Maximum message size in bytes.
|
|
34
|
+
* @default 65536
|
|
35
|
+
*/
|
|
14
36
|
maxsize?: number;
|
|
37
|
+
/**
|
|
38
|
+
* Maximum number of retries for a message.
|
|
39
|
+
* `0` = no retry, `-1` = unlimited, `n` = max retries.
|
|
40
|
+
* @default 0
|
|
41
|
+
*/
|
|
15
42
|
maxRetries?: number;
|
|
43
|
+
/**
|
|
44
|
+
* Maximum backoff time in seconds for exponential backoff.
|
|
45
|
+
* @default 30
|
|
46
|
+
*/
|
|
16
47
|
maxBackoffSeconds?: number;
|
|
48
|
+
/**
|
|
49
|
+
* Batch processing interval in seconds.
|
|
50
|
+
* @default 60
|
|
51
|
+
*/
|
|
17
52
|
every?: number;
|
|
18
53
|
} : {
|
|
54
|
+
/**
|
|
55
|
+
* The name of the queue.
|
|
56
|
+
* @example "my-queue"
|
|
57
|
+
*/
|
|
19
58
|
qname: QueueName;
|
|
59
|
+
/** Maximum message size in bytes.
|
|
60
|
+
* @default 65536
|
|
61
|
+
*/
|
|
20
62
|
maxsize?: number;
|
|
63
|
+
/**
|
|
64
|
+
* Maximum number of retries for a message.
|
|
65
|
+
* `0` = no retry, `-1` = unlimited, `n` = max retries.
|
|
66
|
+
* @default 0
|
|
67
|
+
*/
|
|
21
68
|
maxRetries?: number;
|
|
69
|
+
/**
|
|
70
|
+
* Maximum backoff time in seconds for exponential backoff.
|
|
71
|
+
* @default 30
|
|
72
|
+
*/
|
|
22
73
|
maxBackoffSeconds?: number;
|
|
23
74
|
};
|
|
75
|
+
/**
|
|
76
|
+
* Attributes of a queue.
|
|
77
|
+
*/
|
|
24
78
|
export interface QueueAttributes {
|
|
79
|
+
/** Maximum message size in bytes. */
|
|
25
80
|
maxsize: number;
|
|
81
|
+
/** Timestamp when the queue was created. */
|
|
26
82
|
created: number;
|
|
83
|
+
/** Number of pending messages in the queue. */
|
|
27
84
|
msgs: number;
|
|
85
|
+
/** Whether the queue is a batch queue. */
|
|
28
86
|
isBatch: boolean;
|
|
87
|
+
/** Maximum number of retries for a message. */
|
|
29
88
|
maxRetries: number;
|
|
89
|
+
/** Maximum backoff time in seconds for exponential backoff. */
|
|
30
90
|
maxBackoffSeconds: number;
|
|
91
|
+
/** Batch processing interval in seconds (for batch queues only). */
|
|
31
92
|
every?: number;
|
|
32
93
|
}
|
|
94
|
+
/**
|
|
95
|
+
* Options for sending a message to a regular queue.
|
|
96
|
+
*/
|
|
33
97
|
export interface SendMessageOptions {
|
|
98
|
+
/** The name of the queue. */
|
|
34
99
|
qname: string;
|
|
100
|
+
/** The message content. */
|
|
35
101
|
message: string;
|
|
102
|
+
/** Optional delay in milliseconds before the message is processed. */
|
|
36
103
|
delay?: number;
|
|
37
104
|
}
|
|
105
|
+
/**
|
|
106
|
+
* Options for sending a message to a batch queue.
|
|
107
|
+
*/
|
|
38
108
|
export interface SendBatchMessageOptions {
|
|
109
|
+
/** The name of the batch queue. */
|
|
39
110
|
qname: string;
|
|
111
|
+
/**
|
|
112
|
+
* The ID of the batch. Messages with the same `batchId` are processed together.
|
|
113
|
+
*/
|
|
40
114
|
batchId: string;
|
|
115
|
+
/** The message content. */
|
|
41
116
|
message: string;
|
|
42
117
|
}
|
|
118
|
+
/**
|
|
119
|
+
* A message received from a regular queue.
|
|
120
|
+
*/
|
|
43
121
|
export interface Message {
|
|
122
|
+
/** The unique ID of the message. */
|
|
44
123
|
id: string;
|
|
124
|
+
/** The message content. */
|
|
45
125
|
message: string;
|
|
126
|
+
/** Timestamp when the message was sent. */
|
|
46
127
|
sent: number;
|
|
128
|
+
/** The current attempt number (1-based). */
|
|
47
129
|
attempt: number;
|
|
48
130
|
}
|
|
131
|
+
/**
|
|
132
|
+
* A batch of messages received from a batch queue.
|
|
133
|
+
*/
|
|
49
134
|
export interface BatchMessage {
|
|
135
|
+
/** The ID of the batch. */
|
|
50
136
|
batchId: string;
|
|
137
|
+
/** Array of messages in the batch. */
|
|
51
138
|
messages: Omit<Message, "attempt">[];
|
|
139
|
+
/** Timestamp when the batch was created. */
|
|
52
140
|
sent: number;
|
|
141
|
+
/** The current attempt number for the batch (1-based). */
|
|
53
142
|
attempt: number;
|
|
54
143
|
}
|
|
144
|
+
/**
|
|
145
|
+
* A lightweight, type-safe Redis-based message queue for Bun.
|
|
146
|
+
*/
|
|
55
147
|
export declare class RedisQ {
|
|
56
148
|
private redis;
|
|
57
149
|
private redisUrl;
|
|
@@ -59,14 +151,47 @@ export declare class RedisQ {
|
|
|
59
151
|
private workers;
|
|
60
152
|
private batchJobs;
|
|
61
153
|
private isClosing;
|
|
154
|
+
/**
|
|
155
|
+
* Initializes the RedisQ client.
|
|
156
|
+
* @param options - Options for initializing the client.
|
|
157
|
+
*/
|
|
62
158
|
constructor(options: QueueOptions);
|
|
63
159
|
private getKey;
|
|
64
160
|
private isBatchQueue;
|
|
161
|
+
/**
|
|
162
|
+
* Creates a new queue with the specified options.
|
|
163
|
+
* @param options - Options for creating the queue.
|
|
164
|
+
* @returns `true` if the queue was created, `false` if it already exists.
|
|
165
|
+
*/
|
|
65
166
|
createQueue<QueueName extends string>(options: CreateQueueOptions<QueueName>): Promise<boolean>;
|
|
167
|
+
/**
|
|
168
|
+
* Lists all queue names.
|
|
169
|
+
* @returns An array of queue names.
|
|
170
|
+
*/
|
|
66
171
|
listQueues(): Promise<string[]>;
|
|
172
|
+
/**
|
|
173
|
+
* Gets the attributes of a queue.
|
|
174
|
+
* @param qname - The name of the queue.
|
|
175
|
+
* @returns The queue attributes, or `null` if the queue does not exist.
|
|
176
|
+
*/
|
|
67
177
|
getQueue(qname: string): Promise<QueueAttributes | null>;
|
|
178
|
+
/**
|
|
179
|
+
* Deletes a queue and all its data.
|
|
180
|
+
* @param qname - The name of the queue to delete.
|
|
181
|
+
* @returns `true` if the queue was deleted, `false` if it did not exist.
|
|
182
|
+
*/
|
|
68
183
|
deleteQueue(qname: string): Promise<boolean>;
|
|
184
|
+
/**
|
|
185
|
+
* Sends a message to a regular queue.
|
|
186
|
+
* @param options - Options for sending the message.
|
|
187
|
+
* @returns The unique ID of the message.
|
|
188
|
+
*/
|
|
69
189
|
sendMessage(options: SendMessageOptions): Promise<string>;
|
|
190
|
+
/**
|
|
191
|
+
* Sends a message to a batch queue.
|
|
192
|
+
* @param options - Options for sending the batch message.
|
|
193
|
+
* @returns The unique ID of the message.
|
|
194
|
+
*/
|
|
70
195
|
sendBatchMessage(options: SendBatchMessageOptions): Promise<string>;
|
|
71
196
|
private encodeMessage;
|
|
72
197
|
private decodeMessage;
|
|
@@ -74,12 +199,24 @@ export declare class RedisQ {
|
|
|
74
199
|
private decodeBatchMeta;
|
|
75
200
|
private fetchMessages;
|
|
76
201
|
private fetchBatchMessage;
|
|
202
|
+
/**
|
|
203
|
+
* Manually deletes a message from the queue.
|
|
204
|
+
* @param qname - The name of the queue.
|
|
205
|
+
* @param id - The ID of the message to delete.
|
|
206
|
+
* @returns `true` if the message was deleted, `false` otherwise.
|
|
207
|
+
*/
|
|
77
208
|
deleteMessage(qname: string, id: string): Promise<boolean>;
|
|
78
209
|
private deleteBatch;
|
|
79
210
|
private retryMessage;
|
|
80
211
|
private retryBatch;
|
|
81
212
|
private processDelayedMessages;
|
|
82
213
|
private processBatches;
|
|
214
|
+
/**
|
|
215
|
+
* Starts a worker to process messages from a queue.
|
|
216
|
+
* @param qname - The name of the queue.
|
|
217
|
+
* @param handler - The function to process messages.
|
|
218
|
+
* @param options - Options for the worker.
|
|
219
|
+
*/
|
|
83
220
|
startWorker<QueueName extends `${string}:batch` | (string & {})>(qname: QueueName, handler: (received: QueueName extends `${string}:batch` ? BatchMessage : Message) => Promise<{
|
|
84
221
|
success: boolean;
|
|
85
222
|
}>, options?: {
|
|
@@ -87,6 +224,13 @@ export declare class RedisQ {
|
|
|
87
224
|
silent?: boolean;
|
|
88
225
|
}): Promise<void>;
|
|
89
226
|
private runWorker;
|
|
227
|
+
/**
|
|
228
|
+
* Stops a specific worker.
|
|
229
|
+
* @param qname - The name of the queue for which to stop the worker.
|
|
230
|
+
*/
|
|
90
231
|
stopWorker(qname: string): void;
|
|
232
|
+
/**
|
|
233
|
+
* Closes all workers and the Redis connection gracefully.
|
|
234
|
+
*/
|
|
91
235
|
close(): Promise<void>;
|
|
92
236
|
}
|
package/dist/app.js
CHANGED
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
import { RedisClient as BunRedisClient } from "bun";
|
|
2
2
|
import { Cron } from "croner";
|
|
3
|
+
/**
|
|
4
|
+
* A lightweight, type-safe Redis-based message queue for Bun.
|
|
5
|
+
*/
|
|
3
6
|
export class RedisQ {
|
|
4
7
|
redis;
|
|
5
8
|
redisUrl;
|
|
@@ -7,6 +10,10 @@ export class RedisQ {
|
|
|
7
10
|
workers = new Map();
|
|
8
11
|
batchJobs = new Map();
|
|
9
12
|
isClosing = false;
|
|
13
|
+
/**
|
|
14
|
+
* Initializes the RedisQ client.
|
|
15
|
+
* @param options - Options for initializing the client.
|
|
16
|
+
*/
|
|
10
17
|
constructor(options) {
|
|
11
18
|
if ("redis" in options) {
|
|
12
19
|
this.redis = options.redis;
|
|
@@ -34,6 +41,11 @@ export class RedisQ {
|
|
|
34
41
|
isBatchQueue(qname) {
|
|
35
42
|
return qname.endsWith(":batch");
|
|
36
43
|
}
|
|
44
|
+
/**
|
|
45
|
+
* Creates a new queue with the specified options.
|
|
46
|
+
* @param options - Options for creating the queue.
|
|
47
|
+
* @returns `true` if the queue was created, `false` if it already exists.
|
|
48
|
+
*/
|
|
37
49
|
async createQueue(options) {
|
|
38
50
|
const { qname, maxsize = 65536, maxRetries = 0, maxBackoffSeconds = 30, } = options;
|
|
39
51
|
if (!/^[a-zA-Z0-9_:-]{1,160}$/.test(qname)) {
|
|
@@ -58,6 +70,10 @@ export class RedisQ {
|
|
|
58
70
|
await this.redis.hset(key, attrs);
|
|
59
71
|
return true;
|
|
60
72
|
}
|
|
73
|
+
/**
|
|
74
|
+
* Lists all queue names.
|
|
75
|
+
* @returns An array of queue names.
|
|
76
|
+
*/
|
|
61
77
|
async listQueues() {
|
|
62
78
|
const pattern = `${this.ns}:*`;
|
|
63
79
|
const keys = await this.redis.keys(pattern);
|
|
@@ -65,6 +81,11 @@ export class RedisQ {
|
|
|
65
81
|
.map((k) => k.replace(`${this.ns}:`, ""))
|
|
66
82
|
.filter((k) => !k.includes(":") || k.endsWith(":batch"));
|
|
67
83
|
}
|
|
84
|
+
/**
|
|
85
|
+
* Gets the attributes of a queue.
|
|
86
|
+
* @param qname - The name of the queue.
|
|
87
|
+
* @returns The queue attributes, or `null` if the queue does not exist.
|
|
88
|
+
*/
|
|
68
89
|
async getQueue(qname) {
|
|
69
90
|
const key = this.getKey(qname);
|
|
70
91
|
const attrs = await this.redis.hgetall(key);
|
|
@@ -91,6 +112,11 @@ export class RedisQ {
|
|
|
91
112
|
}
|
|
92
113
|
return result;
|
|
93
114
|
}
|
|
115
|
+
/**
|
|
116
|
+
* Deletes a queue and all its data.
|
|
117
|
+
* @param qname - The name of the queue to delete.
|
|
118
|
+
* @returns `true` if the queue was deleted, `false` if it did not exist.
|
|
119
|
+
*/
|
|
94
120
|
async deleteQueue(qname) {
|
|
95
121
|
const pattern = `${this.ns}:${qname}*`;
|
|
96
122
|
let cursor = "0";
|
|
@@ -105,6 +131,11 @@ export class RedisQ {
|
|
|
105
131
|
} while (cursor !== "0");
|
|
106
132
|
return keysFound;
|
|
107
133
|
}
|
|
134
|
+
/**
|
|
135
|
+
* Sends a message to a regular queue.
|
|
136
|
+
* @param options - Options for sending the message.
|
|
137
|
+
* @returns The unique ID of the message.
|
|
138
|
+
*/
|
|
108
139
|
async sendMessage(options) {
|
|
109
140
|
const { qname, message, delay } = options;
|
|
110
141
|
if (this.isBatchQueue(qname)) {
|
|
@@ -147,6 +178,11 @@ export class RedisQ {
|
|
|
147
178
|
}
|
|
148
179
|
return id;
|
|
149
180
|
}
|
|
181
|
+
/**
|
|
182
|
+
* Sends a message to a batch queue.
|
|
183
|
+
* @param options - Options for sending the batch message.
|
|
184
|
+
* @returns The unique ID of the message.
|
|
185
|
+
*/
|
|
150
186
|
async sendBatchMessage(options) {
|
|
151
187
|
const { qname, batchId, message } = options;
|
|
152
188
|
if (!this.isBatchQueue(qname)) {
|
|
@@ -256,6 +292,12 @@ export class RedisQ {
|
|
|
256
292
|
sent: meta.sent,
|
|
257
293
|
};
|
|
258
294
|
}
|
|
295
|
+
/**
|
|
296
|
+
* Manually deletes a message from the queue.
|
|
297
|
+
* @param qname - The name of the queue.
|
|
298
|
+
* @param id - The ID of the message to delete.
|
|
299
|
+
* @returns `true` if the message was deleted, `false` otherwise.
|
|
300
|
+
*/
|
|
259
301
|
async deleteMessage(qname, id) {
|
|
260
302
|
const messagesKey = this.getKey(qname, "messages");
|
|
261
303
|
const deleted = await this.redis.hdel(messagesKey, id);
|
|
@@ -388,6 +430,12 @@ export class RedisQ {
|
|
|
388
430
|
});
|
|
389
431
|
await Promise.all(batchPromises);
|
|
390
432
|
}
|
|
433
|
+
/**
|
|
434
|
+
* Starts a worker to process messages from a queue.
|
|
435
|
+
* @param qname - The name of the queue.
|
|
436
|
+
* @param handler - The function to process messages.
|
|
437
|
+
* @param options - Options for the worker.
|
|
438
|
+
*/
|
|
391
439
|
async startWorker(qname, handler, options = {}) {
|
|
392
440
|
const { concurrency = 1, silent = false } = options;
|
|
393
441
|
if (this.workers.has(qname)) {
|
|
@@ -462,6 +510,10 @@ export class RedisQ {
|
|
|
462
510
|
}
|
|
463
511
|
}
|
|
464
512
|
}
|
|
513
|
+
/**
|
|
514
|
+
* Stops a specific worker.
|
|
515
|
+
* @param qname - The name of the queue for which to stop the worker.
|
|
516
|
+
*/
|
|
465
517
|
stopWorker(qname) {
|
|
466
518
|
const controller = this.workers.get(qname);
|
|
467
519
|
if (controller) {
|
|
@@ -474,6 +526,9 @@ export class RedisQ {
|
|
|
474
526
|
this.batchJobs.delete(qname);
|
|
475
527
|
}
|
|
476
528
|
}
|
|
529
|
+
/**
|
|
530
|
+
* Closes all workers and the Redis connection gracefully.
|
|
531
|
+
*/
|
|
477
532
|
async close() {
|
|
478
533
|
this.isClosing = true;
|
|
479
534
|
for (const qname of this.workers.keys()) {
|
package/package.json
CHANGED
package/dist/app.test.d.ts
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
package/dist/app.test.js
DELETED
|
@@ -1,427 +0,0 @@
|
|
|
1
|
-
import { afterAll, describe, expect, test } from "bun:test";
|
|
2
|
-
import { RedisQ } from "./app";
|
|
3
|
-
const redisConfig = {
|
|
4
|
-
host: process.env.REDIS_HOST || "127.0.0.1",
|
|
5
|
-
namespace: "redisq-testing",
|
|
6
|
-
port: process.env.REDIS_PORT || "6379",
|
|
7
|
-
};
|
|
8
|
-
const workerConfig = {
|
|
9
|
-
silent: true,
|
|
10
|
-
};
|
|
11
|
-
/**
|
|
12
|
-
* Helper to wait for a specific duration
|
|
13
|
-
*/
|
|
14
|
-
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
15
|
-
describe("RedisQ Tests", () => {
|
|
16
|
-
test("1. Can send and receive single message", async () => {
|
|
17
|
-
const queue = new RedisQ(redisConfig);
|
|
18
|
-
await queue.createQueue({ qname: "test-basic" });
|
|
19
|
-
const message = JSON.stringify({ id: 1, name: "John Doe" });
|
|
20
|
-
let receivedCount = 0;
|
|
21
|
-
const id = await queue.sendMessage({
|
|
22
|
-
message,
|
|
23
|
-
qname: "test-basic",
|
|
24
|
-
});
|
|
25
|
-
await new Promise((resolve) => {
|
|
26
|
-
queue.startWorker("test-basic", async (received) => {
|
|
27
|
-
expect(received.message).toEqual(message);
|
|
28
|
-
expect(received.attempt).toBe(1);
|
|
29
|
-
expect(received.id).toEqual(id);
|
|
30
|
-
receivedCount++;
|
|
31
|
-
resolve();
|
|
32
|
-
return { success: true };
|
|
33
|
-
}, workerConfig);
|
|
34
|
-
});
|
|
35
|
-
expect(receivedCount).toBe(1);
|
|
36
|
-
await queue.close();
|
|
37
|
-
});
|
|
38
|
-
test("2. Can send and receive delayed message", async () => {
|
|
39
|
-
const queue = new RedisQ(redisConfig);
|
|
40
|
-
await queue.createQueue({ qname: "test-delayed" });
|
|
41
|
-
const message = "Delayed message";
|
|
42
|
-
const delayMs = 2000;
|
|
43
|
-
let receivedCount = 0;
|
|
44
|
-
const sentAt = Date.now();
|
|
45
|
-
const id = await queue.sendMessage({
|
|
46
|
-
delay: delayMs,
|
|
47
|
-
message,
|
|
48
|
-
qname: "test-delayed",
|
|
49
|
-
});
|
|
50
|
-
await new Promise((resolve) => {
|
|
51
|
-
queue.startWorker("test-delayed", async (received) => {
|
|
52
|
-
const receivedAt = Date.now();
|
|
53
|
-
const actualDelay = receivedAt - sentAt;
|
|
54
|
-
expect(received.message).toEqual(message);
|
|
55
|
-
expect(received.id).toEqual(id);
|
|
56
|
-
expect(received.attempt).toBe(1);
|
|
57
|
-
/**
|
|
58
|
-
* Check that message was delayed by at least the specified delay
|
|
59
|
-
* Allow 100ms margin for processing time
|
|
60
|
-
*/
|
|
61
|
-
expect(actualDelay).toBeGreaterThanOrEqual(delayMs - 100);
|
|
62
|
-
receivedCount++;
|
|
63
|
-
resolve();
|
|
64
|
-
return { success: true };
|
|
65
|
-
}, workerConfig);
|
|
66
|
-
});
|
|
67
|
-
expect(receivedCount).toBe(1);
|
|
68
|
-
await queue.close();
|
|
69
|
-
});
|
|
70
|
-
test("3. Can retry send and receive single message", async () => {
|
|
71
|
-
const queue = new RedisQ(redisConfig);
|
|
72
|
-
await queue.createQueue({
|
|
73
|
-
maxBackoffSeconds: 1,
|
|
74
|
-
maxRetries: 3,
|
|
75
|
-
qname: "test-retry",
|
|
76
|
-
});
|
|
77
|
-
const message = "Retry me!";
|
|
78
|
-
let attemptCount = 0;
|
|
79
|
-
await queue.sendMessage({
|
|
80
|
-
message,
|
|
81
|
-
qname: "test-retry",
|
|
82
|
-
});
|
|
83
|
-
await new Promise((resolve) => {
|
|
84
|
-
queue.startWorker("test-retry", async (received) => {
|
|
85
|
-
attemptCount++;
|
|
86
|
-
expect(received.attempt).toBe(attemptCount);
|
|
87
|
-
expect(received.message).toBe(message);
|
|
88
|
-
/**
|
|
89
|
-
* Fail first 2 attempts, succeed on 3rd
|
|
90
|
-
*/
|
|
91
|
-
if (received.attempt < 3) {
|
|
92
|
-
return { success: false };
|
|
93
|
-
}
|
|
94
|
-
resolve();
|
|
95
|
-
return { success: true };
|
|
96
|
-
}, workerConfig);
|
|
97
|
-
});
|
|
98
|
-
expect(attemptCount).toBe(3);
|
|
99
|
-
await queue.close();
|
|
100
|
-
});
|
|
101
|
-
test("4. Can retry send and receive delayed message", async () => {
|
|
102
|
-
const queue = new RedisQ(redisConfig);
|
|
103
|
-
await queue.createQueue({
|
|
104
|
-
maxBackoffSeconds: 1,
|
|
105
|
-
maxRetries: 2,
|
|
106
|
-
qname: "test-delayed-retry",
|
|
107
|
-
});
|
|
108
|
-
const message = "Delayed retry message";
|
|
109
|
-
const delayMs = 1000;
|
|
110
|
-
let attemptCount = 0;
|
|
111
|
-
const sentAt = Date.now();
|
|
112
|
-
await queue.sendMessage({
|
|
113
|
-
delay: delayMs,
|
|
114
|
-
message,
|
|
115
|
-
qname: "test-delayed-retry",
|
|
116
|
-
});
|
|
117
|
-
await new Promise((resolve) => {
|
|
118
|
-
queue.startWorker("test-delayed-retry", async (received) => {
|
|
119
|
-
attemptCount++;
|
|
120
|
-
expect(received.attempt).toBe(attemptCount);
|
|
121
|
-
expect(received.message).toBe(message);
|
|
122
|
-
/**
|
|
123
|
-
* First attempt should be delayed by initial delay
|
|
124
|
-
*/
|
|
125
|
-
if (attemptCount === 1) {
|
|
126
|
-
const actualDelay = Date.now() - sentAt;
|
|
127
|
-
expect(actualDelay).toBeGreaterThanOrEqual(delayMs - 100);
|
|
128
|
-
}
|
|
129
|
-
/**
|
|
130
|
-
* Fail first attempt, succeed on 2nd
|
|
131
|
-
*/
|
|
132
|
-
if (received.attempt < 2) {
|
|
133
|
-
return { success: false };
|
|
134
|
-
}
|
|
135
|
-
resolve();
|
|
136
|
-
return { success: true };
|
|
137
|
-
}, workerConfig);
|
|
138
|
-
});
|
|
139
|
-
expect(attemptCount).toBe(2);
|
|
140
|
-
await queue.close();
|
|
141
|
-
});
|
|
142
|
-
test("5. Can send and receive batched messages (multiple batches in same period)", async () => {
|
|
143
|
-
const queue = new RedisQ(redisConfig);
|
|
144
|
-
/**
|
|
145
|
-
* Create batch queue with 3-second interval
|
|
146
|
-
* This simulates a spreadsheet-queue:batch with shorter period for testing
|
|
147
|
-
*/
|
|
148
|
-
await queue.createQueue({
|
|
149
|
-
every: 3,
|
|
150
|
-
maxRetries: 0,
|
|
151
|
-
qname: "spreadsheet-queue:batch",
|
|
152
|
-
});
|
|
153
|
-
/**
|
|
154
|
-
* Simulate the scenario:
|
|
155
|
-
* - 10 messages sent at different timestamps
|
|
156
|
-
* - 6 messages with batchId "batch-001"
|
|
157
|
-
* - 4 messages with batchId "batch-002"
|
|
158
|
-
* - All should be processed in the SAME 3-second period
|
|
159
|
-
*/
|
|
160
|
-
const batch001Messages = [];
|
|
161
|
-
const batch002Messages = [];
|
|
162
|
-
const processedBatches = new Set();
|
|
163
|
-
let batch001Processed = false;
|
|
164
|
-
let batch002Processed = false;
|
|
165
|
-
/**
|
|
166
|
-
* Send messages FIRST, then start worker
|
|
167
|
-
* This allows all messages to be ready before the first cron cycle
|
|
168
|
-
*/
|
|
169
|
-
/**
|
|
170
|
-
* Send 6 messages to batch-001
|
|
171
|
-
*/
|
|
172
|
-
for (let i = 1; i <= 6; i++) {
|
|
173
|
-
const message = `Spreadsheet row ${i} for batch-001`;
|
|
174
|
-
await queue.sendBatchMessage({
|
|
175
|
-
batchId: "batch-001",
|
|
176
|
-
message,
|
|
177
|
-
qname: "spreadsheet-queue:batch",
|
|
178
|
-
});
|
|
179
|
-
batch001Messages.push(message);
|
|
180
|
-
}
|
|
181
|
-
/**
|
|
182
|
-
* Send 4 messages to batch-002
|
|
183
|
-
*/
|
|
184
|
-
for (let i = 1; i <= 4; i++) {
|
|
185
|
-
const message = `Spreadsheet row ${i} for batch-002`;
|
|
186
|
-
await queue.sendBatchMessage({
|
|
187
|
-
batchId: "batch-002",
|
|
188
|
-
message,
|
|
189
|
-
qname: "spreadsheet-queue:batch",
|
|
190
|
-
});
|
|
191
|
-
batch002Messages.push(message);
|
|
192
|
-
}
|
|
193
|
-
/**
|
|
194
|
-
* Start worker AFTER sending messages
|
|
195
|
-
* This ensures all messages are already pending when the cron starts
|
|
196
|
-
*/
|
|
197
|
-
const workerPromise = new Promise((resolve) => {
|
|
198
|
-
queue.startWorker("spreadsheet-queue:batch", async (received) => {
|
|
199
|
-
processedBatches.add(received.batchId);
|
|
200
|
-
if (received.batchId === "batch-001") {
|
|
201
|
-
expect(received.messages.length).toBe(6);
|
|
202
|
-
expect(received.attempt).toBe(1);
|
|
203
|
-
/**
|
|
204
|
-
* Verify all messages are present
|
|
205
|
-
*/
|
|
206
|
-
received.messages.forEach((msg, idx) => {
|
|
207
|
-
expect(msg.message).toBe(batch001Messages.at(idx) ?? "");
|
|
208
|
-
});
|
|
209
|
-
batch001Processed = true;
|
|
210
|
-
}
|
|
211
|
-
else if (received.batchId === "batch-002") {
|
|
212
|
-
expect(received.messages.length).toBe(4);
|
|
213
|
-
expect(received.attempt).toBe(1);
|
|
214
|
-
received.messages.forEach((msg, idx) => {
|
|
215
|
-
expect(msg.message).toBe(batch002Messages.at(idx) ?? "");
|
|
216
|
-
});
|
|
217
|
-
batch002Processed = true;
|
|
218
|
-
}
|
|
219
|
-
/**
|
|
220
|
-
* Resolve when both batches are processed
|
|
221
|
-
*/
|
|
222
|
-
if (batch001Processed && batch002Processed) {
|
|
223
|
-
resolve();
|
|
224
|
-
}
|
|
225
|
-
return { success: true };
|
|
226
|
-
}, workerConfig);
|
|
227
|
-
});
|
|
228
|
-
/**
|
|
229
|
-
* Wait for batches to be processed
|
|
230
|
-
* The cron job will run every 3 seconds and process all pending batches
|
|
231
|
-
* Allow up to 6 seconds for processing (2 cron cycles to be safe)
|
|
232
|
-
*/
|
|
233
|
-
await Promise.race([
|
|
234
|
-
workerPromise,
|
|
235
|
-
sleep(6000).then(() => {
|
|
236
|
-
throw new Error("Test timeout: batches not processed in time");
|
|
237
|
-
}),
|
|
238
|
-
]);
|
|
239
|
-
/**
|
|
240
|
-
* Assert both batches were processed
|
|
241
|
-
*/
|
|
242
|
-
expect(processedBatches.size).toBe(2);
|
|
243
|
-
expect(batch001Processed).toBe(true);
|
|
244
|
-
expect(batch002Processed).toBe(true);
|
|
245
|
-
await queue.close();
|
|
246
|
-
});
|
|
247
|
-
test("6. Can send and receive batched messages with selective retry", async () => {
|
|
248
|
-
const queue = new RedisQ(redisConfig);
|
|
249
|
-
/**
|
|
250
|
-
* Create batch queue with 2-second interval and retry enabled
|
|
251
|
-
*/
|
|
252
|
-
await queue.createQueue({
|
|
253
|
-
every: 2,
|
|
254
|
-
maxBackoffSeconds: 1,
|
|
255
|
-
maxRetries: 3,
|
|
256
|
-
qname: "retry-test:batch",
|
|
257
|
-
});
|
|
258
|
-
const processedBatches = [];
|
|
259
|
-
/**
|
|
260
|
-
* Create 3 different batches
|
|
261
|
-
*/
|
|
262
|
-
for (let i = 1; i <= 3; i++) {
|
|
263
|
-
await queue.sendBatchMessage({
|
|
264
|
-
batchId: `batch-00${i}`,
|
|
265
|
-
message: `Message ${i}`,
|
|
266
|
-
qname: "retry-test:batch",
|
|
267
|
-
});
|
|
268
|
-
}
|
|
269
|
-
/**
|
|
270
|
-
* Start worker AFTER sending messages
|
|
271
|
-
*/
|
|
272
|
-
const workerPromise = new Promise((resolve) => {
|
|
273
|
-
queue.startWorker("retry-test:batch", async (received) => {
|
|
274
|
-
processedBatches.push({
|
|
275
|
-
attempt: received.attempt,
|
|
276
|
-
batchId: received.batchId,
|
|
277
|
-
success: false,
|
|
278
|
-
});
|
|
279
|
-
/**
|
|
280
|
-
* Simulate batch-001 failing on first 2 attempts
|
|
281
|
-
* batch-002 and batch-003 always succeed
|
|
282
|
-
*/
|
|
283
|
-
if (received.batchId === "batch-001") {
|
|
284
|
-
if (received.attempt < 3) {
|
|
285
|
-
/**
|
|
286
|
-
* Mark as failed, will retry
|
|
287
|
-
*/
|
|
288
|
-
const lastBatch = processedBatches.at(-1);
|
|
289
|
-
if (lastBatch) {
|
|
290
|
-
lastBatch.success = false;
|
|
291
|
-
}
|
|
292
|
-
return { success: false };
|
|
293
|
-
}
|
|
294
|
-
/**
|
|
295
|
-
* 3rd attempt succeeds
|
|
296
|
-
*/
|
|
297
|
-
expect(received.attempt).toBe(3);
|
|
298
|
-
const lastBatch = processedBatches.at(-1);
|
|
299
|
-
if (lastBatch) {
|
|
300
|
-
lastBatch.success = true;
|
|
301
|
-
}
|
|
302
|
-
}
|
|
303
|
-
else {
|
|
304
|
-
/**
|
|
305
|
-
* batch-002 and batch-003 succeed immediately
|
|
306
|
-
*/
|
|
307
|
-
expect(received.attempt).toBe(1);
|
|
308
|
-
const lastBatch = processedBatches.at(-1);
|
|
309
|
-
if (lastBatch) {
|
|
310
|
-
lastBatch.success = true;
|
|
311
|
-
}
|
|
312
|
-
}
|
|
313
|
-
/**
|
|
314
|
-
* Check if all batches are done:
|
|
315
|
-
* - batch-001: should appear 3 times (attempt 1, 2, 3)
|
|
316
|
-
* - batch-002: should appear 1 time (attempt 1)
|
|
317
|
-
* - batch-003: should appear 1 time (attempt 1)
|
|
318
|
-
* Total: 5 invocations
|
|
319
|
-
*/
|
|
320
|
-
const batch001Count = processedBatches.filter((b) => b.batchId === "batch-001").length;
|
|
321
|
-
const batch002Count = processedBatches.filter((b) => b.batchId === "batch-002").length;
|
|
322
|
-
const batch003Count = processedBatches.filter((b) => b.batchId === "batch-003").length;
|
|
323
|
-
if (batch001Count === 3 &&
|
|
324
|
-
batch002Count === 1 &&
|
|
325
|
-
batch003Count === 1) {
|
|
326
|
-
resolve();
|
|
327
|
-
}
|
|
328
|
-
return { success: true };
|
|
329
|
-
}, workerConfig);
|
|
330
|
-
});
|
|
331
|
-
/**
|
|
332
|
-
* Wait for all batches to be processed (including retries)
|
|
333
|
-
* Allow up to 10 seconds (batch-001 will take multiple cycles)
|
|
334
|
-
*/
|
|
335
|
-
await Promise.race([
|
|
336
|
-
workerPromise,
|
|
337
|
-
sleep(10000).then(() => {
|
|
338
|
-
throw new Error("Test timeout: batches not processed in time");
|
|
339
|
-
}),
|
|
340
|
-
]);
|
|
341
|
-
/**
|
|
342
|
-
* Verify total handler invocations
|
|
343
|
-
*/
|
|
344
|
-
expect(processedBatches.length).toBe(5);
|
|
345
|
-
/**
|
|
346
|
-
* Verify batch-001 retry behavior
|
|
347
|
-
*/
|
|
348
|
-
const batch001Attempts = processedBatches.filter((b) => b.batchId === "batch-001");
|
|
349
|
-
expect(batch001Attempts.length).toBe(3);
|
|
350
|
-
const batch001Attempt1 = batch001Attempts.at(0);
|
|
351
|
-
expect(batch001Attempt1?.attempt).toBe(1);
|
|
352
|
-
expect(batch001Attempt1?.success).toBe(false);
|
|
353
|
-
const batch001Attempt2 = batch001Attempts.at(1);
|
|
354
|
-
expect(batch001Attempt2?.attempt).toBe(2);
|
|
355
|
-
expect(batch001Attempt2?.success).toBe(false);
|
|
356
|
-
const batch001Attempt3 = batch001Attempts.at(2);
|
|
357
|
-
expect(batch001Attempt3?.attempt).toBe(3);
|
|
358
|
-
expect(batch001Attempt3?.success).toBe(true);
|
|
359
|
-
/**
|
|
360
|
-
* Verify batch-002 succeeded on first attempt
|
|
361
|
-
*/
|
|
362
|
-
const batch002Attempts = processedBatches.filter((b) => b.batchId === "batch-002");
|
|
363
|
-
expect(batch002Attempts.length).toBe(1);
|
|
364
|
-
const batch002Attempt1 = batch002Attempts.at(0);
|
|
365
|
-
expect(batch002Attempt1?.attempt).toBe(1);
|
|
366
|
-
expect(batch002Attempt1?.success).toBe(true);
|
|
367
|
-
/**
|
|
368
|
-
* Verify batch-003 succeeded on first attempt
|
|
369
|
-
*/
|
|
370
|
-
const batch003Attempts = processedBatches.filter((b) => b.batchId === "batch-003");
|
|
371
|
-
expect(batch003Attempts.length).toBe(1);
|
|
372
|
-
const batch003Attempt1 = batch003Attempts.at(0);
|
|
373
|
-
expect(batch003Attempt1?.attempt).toBe(1);
|
|
374
|
-
expect(batch003Attempt1?.success).toBe(true);
|
|
375
|
-
/**
|
|
376
|
-
* Verify independence: batch-002 and batch-003 were not affected by batch-001 failures
|
|
377
|
-
*/
|
|
378
|
-
const successfulBatches = processedBatches.filter((b) => b.success && b.attempt === 1);
|
|
379
|
-
expect(successfulBatches.length).toBe(2); // batch-002 and batch-003
|
|
380
|
-
await queue.close();
|
|
381
|
-
});
|
|
382
|
-
test("7. Can handle high volume and concurrency", async () => {
|
|
383
|
-
const totalMessages = 10_000;
|
|
384
|
-
const queue = new RedisQ(redisConfig);
|
|
385
|
-
await queue.createQueue({ qname: "test-concurrency" });
|
|
386
|
-
const processedIds = [];
|
|
387
|
-
Array.from({ length: totalMessages }, async (_, i) => {
|
|
388
|
-
await queue.sendMessage({
|
|
389
|
-
message: JSON.stringify({
|
|
390
|
-
id: `${i}`,
|
|
391
|
-
name: `John Doe ${i}`,
|
|
392
|
-
}),
|
|
393
|
-
qname: "test-concurrency",
|
|
394
|
-
});
|
|
395
|
-
});
|
|
396
|
-
let receivedCount = 0;
|
|
397
|
-
await new Promise((resolve) => {
|
|
398
|
-
queue.startWorker("test-concurrency", async (received) => {
|
|
399
|
-
receivedCount++;
|
|
400
|
-
processedIds.push(JSON.parse(received.message).id);
|
|
401
|
-
if (receivedCount === totalMessages) {
|
|
402
|
-
resolve();
|
|
403
|
-
}
|
|
404
|
-
return { success: true };
|
|
405
|
-
}, workerConfig);
|
|
406
|
-
});
|
|
407
|
-
for (let i = 0; i < totalMessages; i++) {
|
|
408
|
-
expect(processedIds[i]).toBe(`${i}`);
|
|
409
|
-
}
|
|
410
|
-
expect(receivedCount).toBe(totalMessages);
|
|
411
|
-
await queue.close();
|
|
412
|
-
});
|
|
413
|
-
});
|
|
414
|
-
afterAll(async () => {
|
|
415
|
-
const redis = new Bun.RedisClient(`redis://${redisConfig.host}:${redisConfig.port}`);
|
|
416
|
-
let cursor = "0";
|
|
417
|
-
const pattern = `${redisConfig.namespace}:*`;
|
|
418
|
-
do {
|
|
419
|
-
const result = await redis.scan(cursor, "MATCH", pattern);
|
|
420
|
-
const [nextCursor, keys] = result;
|
|
421
|
-
cursor = nextCursor;
|
|
422
|
-
if (keys.length > 0) {
|
|
423
|
-
await redis.del(...keys);
|
|
424
|
-
}
|
|
425
|
-
} while (cursor !== "0");
|
|
426
|
-
redis.close();
|
|
427
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
import { RedisQ } from "../app";
|
|
2
|
-
self.addEventListener("message", async (event) => {
|
|
3
|
-
if (event.data.type === "start") {
|
|
4
|
-
const { messagesPerWorker, testMessage, qname, redisConfig } = event.data.data;
|
|
5
|
-
try {
|
|
6
|
-
const queue = new RedisQ(redisConfig);
|
|
7
|
-
const latencies = [];
|
|
8
|
-
const CHUNK_SIZE = 1000;
|
|
9
|
-
for (let i = 0; i < messagesPerWorker; i += CHUNK_SIZE) {
|
|
10
|
-
const promises = [];
|
|
11
|
-
const chunkEnd = Math.min(i + CHUNK_SIZE, messagesPerWorker);
|
|
12
|
-
for (let j = i; j < chunkEnd; j++) {
|
|
13
|
-
const start = performance.now();
|
|
14
|
-
promises.push(queue
|
|
15
|
-
.sendMessage({
|
|
16
|
-
message: testMessage,
|
|
17
|
-
qname,
|
|
18
|
-
})
|
|
19
|
-
.then(() => {
|
|
20
|
-
const latency = performance.now() - start;
|
|
21
|
-
latencies.push(latency);
|
|
22
|
-
}));
|
|
23
|
-
}
|
|
24
|
-
await Promise.all(promises);
|
|
25
|
-
}
|
|
26
|
-
await queue.close();
|
|
27
|
-
self.postMessage({
|
|
28
|
-
data: { latencies },
|
|
29
|
-
type: "result",
|
|
30
|
-
});
|
|
31
|
-
}
|
|
32
|
-
catch (error) {
|
|
33
|
-
self.postMessage({
|
|
34
|
-
data: { error: error.message },
|
|
35
|
-
type: "error",
|
|
36
|
-
});
|
|
37
|
-
}
|
|
38
|
-
}
|
|
39
|
-
});
|
package/dist/benchmark/stress.js
DELETED
|
@@ -1,219 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bun
|
|
2
|
-
import { RedisQ } from "../app";
|
|
3
|
-
import mediumMsg from "./medium.txt";
|
|
4
|
-
import smallMsg from "./small.txt";
|
|
5
|
-
import tinyMsg from "./tiny.txt";
|
|
6
|
-
const COLORS = {
|
|
7
|
-
blue: "\x1b[34m",
|
|
8
|
-
bright: "\x1b[1m",
|
|
9
|
-
cyan: "\x1b[36m",
|
|
10
|
-
green: "\x1b[32m",
|
|
11
|
-
magenta: "\x1b[35m",
|
|
12
|
-
red: "\x1b[31m",
|
|
13
|
-
reset: "\x1b[0m",
|
|
14
|
-
yellow: "\x1b[33m",
|
|
15
|
-
};
|
|
16
|
-
const WORKER_COUNT = 8;
|
|
17
|
-
const CONCURRENCY = 2;
|
|
18
|
-
const MSG_COUNT = 100_000;
|
|
19
|
-
function log(color, message) {
|
|
20
|
-
console.log(`${COLORS[color]}${message}${COLORS.reset}`);
|
|
21
|
-
}
|
|
22
|
-
function calculatePercentile(sorted, percentile) {
|
|
23
|
-
const index = Math.ceil((percentile / 100) * sorted.length) - 1;
|
|
24
|
-
return sorted[Math.max(0, index)] ?? 0;
|
|
25
|
-
}
|
|
26
|
-
function calculateStats(latencies) {
|
|
27
|
-
const sorted = [...latencies].sort((a, b) => a - b);
|
|
28
|
-
const sum = latencies.reduce((acc, val) => acc + val, 0);
|
|
29
|
-
return {
|
|
30
|
-
avg: sum / latencies.length,
|
|
31
|
-
latencies,
|
|
32
|
-
max: sorted[sorted.length - 1] ?? 0,
|
|
33
|
-
min: sorted[0] ?? 0,
|
|
34
|
-
p50: calculatePercentile(sorted, 50),
|
|
35
|
-
p95: calculatePercentile(sorted, 95),
|
|
36
|
-
p99: calculatePercentile(sorted, 99),
|
|
37
|
-
};
|
|
38
|
-
}
|
|
39
|
-
function printResults(testName, results) {
|
|
40
|
-
log("bright", `\n${"=".repeat(60)}`);
|
|
41
|
-
log("cyan", ` ${testName}`);
|
|
42
|
-
log("bright", "=".repeat(60));
|
|
43
|
-
log("green", "\n๐ Overall Performance:");
|
|
44
|
-
console.log(` Total Messages: ${results.totalMessages.toLocaleString()}`);
|
|
45
|
-
console.log(` Duration: ${results.duration.toFixed(2)}s`);
|
|
46
|
-
console.log(` Throughput: ${results.throughput.toFixed(2)} msg/s`);
|
|
47
|
-
log("yellow", "\nโก Latency Statistics (ms):");
|
|
48
|
-
console.log(` Min: ${results.min.toFixed(2)} ms`);
|
|
49
|
-
console.log(` Average: ${results.avg.toFixed(2)} ms`);
|
|
50
|
-
console.log(` Median (p50): ${results.p50.toFixed(2)} ms`);
|
|
51
|
-
console.log(` p95: ${results.p95.toFixed(2)} ms`);
|
|
52
|
-
console.log(` p99: ${results.p99.toFixed(2)} ms`);
|
|
53
|
-
console.log(` Max: ${results.max.toFixed(2)} ms`);
|
|
54
|
-
}
|
|
55
|
-
async function testRegularQueueParallel(config) {
|
|
56
|
-
const queue = new RedisQ({
|
|
57
|
-
host: process.env.REDIS_HOST || "127.0.0.1",
|
|
58
|
-
namespace: "stress-test",
|
|
59
|
-
port: process.env.REDIS_PORT || "6379",
|
|
60
|
-
});
|
|
61
|
-
const qname = "stress-regular";
|
|
62
|
-
await queue.createQueue({ maxRetries: 0, maxsize: 150_000, qname });
|
|
63
|
-
const allLatencies = [];
|
|
64
|
-
let receivedCount = 0;
|
|
65
|
-
const messagesPerWorker = Math.floor(config.messageCount / config.workerCount);
|
|
66
|
-
const actualMessageCount = messagesPerWorker * config.workerCount;
|
|
67
|
-
const workerPromise = new Promise((resolve) => {
|
|
68
|
-
queue.startWorker(qname, async () => {
|
|
69
|
-
receivedCount++;
|
|
70
|
-
if (receivedCount === actualMessageCount) {
|
|
71
|
-
resolve();
|
|
72
|
-
}
|
|
73
|
-
return { success: true };
|
|
74
|
-
}, { concurrency: config.concurrency, silent: true });
|
|
75
|
-
});
|
|
76
|
-
log("blue", `\n๐ Spawning ${config.workerCount} workers to send ${actualMessageCount.toLocaleString()} messages...`);
|
|
77
|
-
const startTime = performance.now();
|
|
78
|
-
const workers = [];
|
|
79
|
-
const workerPromises = [];
|
|
80
|
-
for (let i = 0; i < config.workerCount; i++) {
|
|
81
|
-
const worker = new Worker(new URL("./stress-worker.ts", import.meta.url).href);
|
|
82
|
-
workers.push(worker);
|
|
83
|
-
const workerPromise = new Promise((resolve, reject) => {
|
|
84
|
-
worker.addEventListener("message", (event) => {
|
|
85
|
-
if (event.data.type === "result") {
|
|
86
|
-
resolve(event.data.data?.latencies || []);
|
|
87
|
-
worker.terminate();
|
|
88
|
-
}
|
|
89
|
-
else if (event.data.type === "error") {
|
|
90
|
-
reject(new Error(event.data.data?.error || "Worker error"));
|
|
91
|
-
worker.terminate();
|
|
92
|
-
}
|
|
93
|
-
});
|
|
94
|
-
worker.addEventListener("error", (error) => {
|
|
95
|
-
reject(error);
|
|
96
|
-
worker.terminate();
|
|
97
|
-
});
|
|
98
|
-
worker.postMessage({
|
|
99
|
-
data: {
|
|
100
|
-
messagesPerWorker,
|
|
101
|
-
qname,
|
|
102
|
-
redisConfig: {
|
|
103
|
-
host: process.env.REDIS_HOST || "127.0.0.1",
|
|
104
|
-
namespace: "stress-test",
|
|
105
|
-
port: process.env.REDIS_PORT || "6379",
|
|
106
|
-
},
|
|
107
|
-
testMessage: config.testMessage,
|
|
108
|
-
workerIndex: i,
|
|
109
|
-
},
|
|
110
|
-
type: "start",
|
|
111
|
-
});
|
|
112
|
-
});
|
|
113
|
-
workerPromises.push(workerPromise);
|
|
114
|
-
}
|
|
115
|
-
const workerResults = await Promise.all(workerPromises);
|
|
116
|
-
for (const latencies of workerResults) {
|
|
117
|
-
allLatencies.push(...latencies);
|
|
118
|
-
}
|
|
119
|
-
const sendDuration = performance.now() - startTime;
|
|
120
|
-
log("green", `โ All messages sent in ${sendDuration.toFixed(2)}ms`);
|
|
121
|
-
log("blue", "\nโณ Waiting for all messages to be processed...");
|
|
122
|
-
await workerPromise;
|
|
123
|
-
const duration = (performance.now() - startTime) / 1000;
|
|
124
|
-
const throughput = actualMessageCount / duration;
|
|
125
|
-
await queue.close();
|
|
126
|
-
const stats = calculateStats(allLatencies);
|
|
127
|
-
return {
|
|
128
|
-
duration,
|
|
129
|
-
throughput,
|
|
130
|
-
totalMessages: actualMessageCount,
|
|
131
|
-
...stats,
|
|
132
|
-
};
|
|
133
|
-
}
|
|
134
|
-
async function cleanup() {
|
|
135
|
-
log("blue", "\n๐งน Cleaning up test data...");
|
|
136
|
-
const redis = new Bun.RedisClient(`redis://${process.env.REDIS_HOST || "127.0.0.1"}:${process.env.REDIS_PORT || "6379"}`);
|
|
137
|
-
let cursor = "0";
|
|
138
|
-
const pattern = "stress-test:*";
|
|
139
|
-
let deletedCount = 0;
|
|
140
|
-
do {
|
|
141
|
-
const result = await redis.scan(cursor, "MATCH", pattern);
|
|
142
|
-
const [nextCursor, keys] = result;
|
|
143
|
-
cursor = nextCursor;
|
|
144
|
-
if (keys.length > 0) {
|
|
145
|
-
await redis.del(...keys);
|
|
146
|
-
deletedCount += keys.length;
|
|
147
|
-
}
|
|
148
|
-
} while (cursor !== "0");
|
|
149
|
-
redis.close();
|
|
150
|
-
log("green", `โ Cleaned up ${deletedCount} keys`);
|
|
151
|
-
}
|
|
152
|
-
async function main() {
|
|
153
|
-
log("bright", "\nโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
|
|
154
|
-
log("bright", "โ RedisQ Stress Test & Benchmark Tool โ");
|
|
155
|
-
log("bright", "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
|
|
156
|
-
await cleanup();
|
|
157
|
-
const availableCPUs = navigator.hardwareConcurrency;
|
|
158
|
-
log("yellow", `\n๐ป Detected ${availableCPUs} CPU cores`);
|
|
159
|
-
log("blue", `\n๐ง Using ${WORKER_COUNT} workers for tests`);
|
|
160
|
-
try {
|
|
161
|
-
log("cyan", "\n๐ Test 1: Tiny messages (100 bytes)");
|
|
162
|
-
const test1Results = await testRegularQueueParallel({
|
|
163
|
-
concurrency: CONCURRENCY,
|
|
164
|
-
messageCount: MSG_COUNT,
|
|
165
|
-
testMessage: tinyMsg,
|
|
166
|
-
workerCount: WORKER_COUNT,
|
|
167
|
-
});
|
|
168
|
-
printResults("Test 1: Tiny Messages (100K msgs, 100 bytes)", test1Results);
|
|
169
|
-
await cleanup();
|
|
170
|
-
log("cyan", "\n๐ Test 2: Small messages (1KB)");
|
|
171
|
-
const test2Results = await testRegularQueueParallel({
|
|
172
|
-
concurrency: CONCURRENCY,
|
|
173
|
-
messageCount: MSG_COUNT,
|
|
174
|
-
testMessage: smallMsg,
|
|
175
|
-
workerCount: WORKER_COUNT,
|
|
176
|
-
});
|
|
177
|
-
printResults("Test 2: Small Messages (100K msgs, 1KB)", test2Results);
|
|
178
|
-
await cleanup();
|
|
179
|
-
log("cyan", "\n๐ Test 3: Medium messages (10KB)");
|
|
180
|
-
const test3Results = await testRegularQueueParallel({
|
|
181
|
-
concurrency: CONCURRENCY,
|
|
182
|
-
messageCount: MSG_COUNT,
|
|
183
|
-
testMessage: mediumMsg,
|
|
184
|
-
workerCount: WORKER_COUNT,
|
|
185
|
-
});
|
|
186
|
-
printResults("Test 3: Medium Messages (100K msgs, 10KB)", test3Results);
|
|
187
|
-
await cleanup();
|
|
188
|
-
// Calculate combined averages
|
|
189
|
-
const avgThroughput = (test1Results.throughput +
|
|
190
|
-
test2Results.throughput +
|
|
191
|
-
test3Results.throughput) /
|
|
192
|
-
3;
|
|
193
|
-
const avgP50 = (test1Results.p50 + test2Results.p50 + test3Results.p50) / 3;
|
|
194
|
-
const avgP95 = (test1Results.p95 + test2Results.p95 + test3Results.p95) / 3;
|
|
195
|
-
const avgP99 = (test1Results.p99 + test2Results.p99 + test3Results.p99) / 3;
|
|
196
|
-
// Summary
|
|
197
|
-
log("bright", `\n${"=".repeat(60)}`);
|
|
198
|
-
log("magenta", " ๐ BENCHMARK SUMMARY");
|
|
199
|
-
log("bright", "=".repeat(60));
|
|
200
|
-
console.log("\n Individual Queue Performance:");
|
|
201
|
-
console.log(` - Tiny messages (100B): ${test1Results.throughput.toFixed(0)} msg/s (p50: ${test1Results.p50.toFixed(2)}ms)`);
|
|
202
|
-
console.log(` - Small messages (1KB): ${test2Results.throughput.toFixed(0)} msg/s (p50: ${test2Results.p50.toFixed(2)}ms)`);
|
|
203
|
-
console.log(` - Medium messages (10KB): ${test3Results.throughput.toFixed(0)} msg/s (p50: ${test3Results.p50.toFixed(2)}ms)`);
|
|
204
|
-
log("bright", `\n${"=".repeat(60)}`);
|
|
205
|
-
log("green", "\nโ
All stress tests completed successfully!");
|
|
206
|
-
log("yellow", "\n๐ก To update README.md Performance section, use these values (averaged across all tests):");
|
|
207
|
-
console.log("\n Overall:");
|
|
208
|
-
log("cyan", ` - **Throughput**: ~${Math.round(avgThroughput).toLocaleString()} messages/second`);
|
|
209
|
-
log("cyan", ` - **Latency (p50)**: ${avgP50.toFixed(2)} ms`);
|
|
210
|
-
log("cyan", ` - **Latency (p95)**: ${avgP95.toFixed(2)} ms`);
|
|
211
|
-
log("cyan", ` - **Latency (p99)**: ${avgP99.toFixed(2)} ms`);
|
|
212
|
-
}
|
|
213
|
-
catch (error) {
|
|
214
|
-
log("red", `\nโ Error during stress test: ${error.message}`);
|
|
215
|
-
log("red", error.stack || "");
|
|
216
|
-
process.exit(1);
|
|
217
|
-
}
|
|
218
|
-
}
|
|
219
|
-
main();
|