@taicode/common-server 1.0.12 → 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/output/index.d.ts +1 -0
- package/output/index.d.ts.map +1 -1
- package/output/index.js +1 -0
- package/output/redis-queue/index.d.ts +2 -1
- package/output/redis-queue/index.d.ts.map +1 -1
- package/output/redis-queue/index.js +1 -1
- package/output/redis-queue/redis-batch-consumer.d.ts +13 -9
- package/output/redis-queue/redis-batch-consumer.d.ts.map +1 -1
- package/output/redis-queue/redis-batch-consumer.js +66 -55
- package/output/redis-queue/redis-batch-consumer.test.d.ts +5 -0
- package/output/redis-queue/redis-batch-consumer.test.d.ts.map +1 -1
- package/output/redis-queue/{batch-consumer.d.ts → redis-queue-batch-consumer.d.ts} +11 -41
- package/output/redis-queue/redis-queue-batch-consumer.d.ts.map +1 -0
- package/output/redis-queue/redis-queue-batch-consumer.js +320 -0
- package/output/redis-queue/redis-queue-batch-consumer.test.d.ts +26 -0
- package/output/redis-queue/redis-queue-batch-consumer.test.d.ts.map +1 -0
- package/output/redis-queue/{batch-consumer.test.js → redis-queue-batch-consumer.test.js} +144 -19
- package/output/redis-queue/redis-queue-common.d.ts +16 -4
- package/output/redis-queue/redis-queue-common.d.ts.map +1 -1
- package/output/redis-queue/redis-queue-common.js +88 -8
- package/output/redis-queue/redis-queue-common.test.d.ts +17 -0
- package/output/redis-queue/redis-queue-common.test.d.ts.map +1 -1
- package/output/redis-queue/redis-queue-common.test.js +27 -65
- package/output/redis-queue/redis-queue-consumer.d.ts +10 -35
- package/output/redis-queue/redis-queue-consumer.d.ts.map +1 -1
- package/output/redis-queue/redis-queue-consumer.js +98 -251
- package/output/redis-queue/redis-queue-consumer.test.d.ts +24 -0
- package/output/redis-queue/redis-queue-consumer.test.d.ts.map +1 -1
- package/output/redis-queue/redis-queue-consumer.test.js +89 -15
- package/output/redis-queue/redis-queue-provider.d.ts +14 -15
- package/output/redis-queue/redis-queue-provider.d.ts.map +1 -1
- package/output/redis-queue/redis-queue-provider.js +16 -13
- package/output/redis-queue/redis-queue-provider.test.d.ts +26 -0
- package/output/redis-queue/redis-queue-provider.test.d.ts.map +1 -1
- package/output/redis-queue/redis-queue-provider.test.js +73 -38
- package/output/redis-queue/test-helpers.d.ts +112 -0
- package/output/redis-queue/test-helpers.d.ts.map +1 -0
- package/output/redis-queue/test-helpers.js +242 -0
- package/output/redis-queue/test-helpers.test.d.ts +28 -0
- package/output/redis-queue/test-helpers.test.d.ts.map +1 -0
- package/output/redis-queue/test-helpers.test.js +572 -0
- package/output/redis-queue/types.d.ts +43 -12
- package/output/redis-queue/types.d.ts.map +1 -1
- package/package.json +5 -3
- package/output/logger/logger.d.ts +0 -33
- package/output/logger/logger.d.ts.map +0 -1
- package/output/logger/logger.js +0 -65
- package/output/logger/logger.test.d.ts +0 -2
- package/output/logger/logger.test.d.ts.map +0 -1
- package/output/logger/logger.test.js +0 -87
- package/output/redis-queue/batch-consumer.d.ts.map +0 -1
- package/output/redis-queue/batch-consumer.js +0 -492
- package/output/redis-queue/batch-consumer.test.d.ts +0 -2
- package/output/redis-queue/batch-consumer.test.d.ts.map +0 -1
- package/output/redis-queue/batch-redis-queue.d.ts +0 -136
- package/output/redis-queue/batch-redis-queue.d.ts.map +0 -1
- package/output/redis-queue/batch-redis-queue.js +0 -583
- package/output/redis-queue/batch-redis-queue.test.d.ts +0 -2
- package/output/redis-queue/batch-redis-queue.test.d.ts.map +0 -1
- package/output/redis-queue/batch-redis-queue.test.js +0 -243
- package/output/redis-queue/redis-queue.d.ts +0 -129
- package/output/redis-queue/redis-queue.d.ts.map +0 -1
- package/output/redis-queue/redis-queue.js +0 -557
- package/output/redis-queue/redis-queue.test.d.ts +0 -2
- package/output/redis-queue/redis-queue.test.d.ts.map +0 -1
- package/output/redis-queue/redis-queue.test.js +0 -234
- package/output/redis-queue/registry.d.ts +0 -57
- package/output/redis-queue/registry.d.ts.map +0 -1
- package/output/redis-queue/registry.js +0 -30
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
import { catchIt } from '@taicode/common-base';
|
|
2
|
+
import { RedisQueueCommon } from './redis-queue-common';
|
|
3
|
+
/**
|
|
4
|
+
* Redis 批量队列消费者
|
|
5
|
+
*
|
|
6
|
+
* 批量消费任务,每次处理多条
|
|
7
|
+
*
|
|
8
|
+
* @template K 队列键名类型
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```ts
|
|
12
|
+
* const consumer = new RedisQueueBatchConsumer('email-queue', {
|
|
13
|
+
* redisUrl: 'redis://localhost:6379',
|
|
14
|
+
* batchSize: 50,
|
|
15
|
+
* handler: async (dataList) => {
|
|
16
|
+
* await sendEmailsBatch(dataList.map(d => d.to))
|
|
17
|
+
* return catchIt(() => {})
|
|
18
|
+
* }
|
|
19
|
+
* })
|
|
20
|
+
*
|
|
21
|
+
* await consumer.connect() // 自动开始消费
|
|
22
|
+
*
|
|
23
|
+
* // 获取统计信息
|
|
24
|
+
* const stats = await consumer.statistics()
|
|
25
|
+
*
|
|
26
|
+
* // 停止消费
|
|
27
|
+
* consumer.disconnect()
|
|
28
|
+
* ```
|
|
29
|
+
*/
|
|
30
|
+
export class RedisQueueBatchConsumer extends RedisQueueCommon {
|
|
31
|
+
consumerRunning = false;
|
|
32
|
+
consumerInterval = null;
|
|
33
|
+
recoveryInterval = null;
|
|
34
|
+
processingBatches = 0; // 当前正在处理的批次数
|
|
35
|
+
processingPromises = new Set(); // 跟踪所有正在处理的批次
|
|
36
|
+
config;
|
|
37
|
+
constructor(queueKey, config) {
|
|
38
|
+
// 验证必填参数
|
|
39
|
+
if (!config.handler) {
|
|
40
|
+
throw new Error('[RedisQueueBatchConsumer] handler is required');
|
|
41
|
+
}
|
|
42
|
+
// 调用父类构造函数
|
|
43
|
+
super(queueKey, {
|
|
44
|
+
redisUrl: config.redisUrl,
|
|
45
|
+
redisClient: config.redisClient,
|
|
46
|
+
cleanupDelay: config.cleanupDelay ?? 86400,
|
|
47
|
+
});
|
|
48
|
+
this.config = {
|
|
49
|
+
handler: config.handler,
|
|
50
|
+
batchSize: config.batchSize ?? 10,
|
|
51
|
+
maxRetries: config.maxRetries ?? 3,
|
|
52
|
+
concurrency: config.concurrency ?? 1,
|
|
53
|
+
consumerInterval: config.consumerInterval ?? 1000,
|
|
54
|
+
processingTimeout: config.processingTimeout ?? 60000, // 60 seconds
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
getLogPrefix() {
|
|
58
|
+
return 'RedisQueueBatchConsumer';
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* 连接 Redis 并自动启动消费者
|
|
62
|
+
*/
|
|
63
|
+
async connect() {
|
|
64
|
+
await super.connect();
|
|
65
|
+
// 连接成功后启动消费者和恢复机制
|
|
66
|
+
this.startConsumer();
|
|
67
|
+
this.startRecovery();
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* 断开 Redis 连接并停止消费者
|
|
71
|
+
* 会等待所有正在处理的批次完成后再断开连接
|
|
72
|
+
*/
|
|
73
|
+
async disconnect() {
|
|
74
|
+
// 先停止消费者和恢复机制,不再接受新任务
|
|
75
|
+
this.stopConsumer();
|
|
76
|
+
this.stopRecovery();
|
|
77
|
+
// 等待所有正在处理的批次完成
|
|
78
|
+
if (this.processingPromises.size > 0) {
|
|
79
|
+
console.log(`[RedisQueueBatchConsumer] Waiting for ${this.processingPromises.size} batches to complete...`);
|
|
80
|
+
await Promise.allSettled(this.processingPromises);
|
|
81
|
+
console.log(`[RedisQueueBatchConsumer] All batches completed`);
|
|
82
|
+
}
|
|
83
|
+
// 最后断开 Redis 连接
|
|
84
|
+
super.disconnect();
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* 批量处理任务
|
|
88
|
+
*/
|
|
89
|
+
async processBatch(taskIds) {
|
|
90
|
+
this.processingBatches++;
|
|
91
|
+
// 创建一个 Promise 来跟踪这个批次的处理过程
|
|
92
|
+
const batchPromise = (async () => {
|
|
93
|
+
try {
|
|
94
|
+
// 获取所有任务
|
|
95
|
+
const tasks = await Promise.all(taskIds.map(id => this.getTask(id)));
|
|
96
|
+
const validTasks = tasks.filter((task) => task !== null);
|
|
97
|
+
if (validTasks.length === 0) {
|
|
98
|
+
console.warn(`[RedisQueueBatchConsumer] No valid tasks found in batch`);
|
|
99
|
+
return;
|
|
100
|
+
}
|
|
101
|
+
// 过滤出有效的待处理任务
|
|
102
|
+
const tasksToProcess = [];
|
|
103
|
+
for (const task of validTasks) {
|
|
104
|
+
// 检查状态
|
|
105
|
+
if (task.status !== 'pending') {
|
|
106
|
+
console.log(`[RedisQueueBatchConsumer] Task ${task.id} has invalid status (${task.status}), marking as failed`);
|
|
107
|
+
await this.applyStatus(task.id, task.status, 'failed');
|
|
108
|
+
continue;
|
|
109
|
+
}
|
|
110
|
+
tasksToProcess.push(task);
|
|
111
|
+
}
|
|
112
|
+
if (tasksToProcess.length === 0) {
|
|
113
|
+
return;
|
|
114
|
+
}
|
|
115
|
+
try {
|
|
116
|
+
// 任务已在 processing 队列中(由 Lua 脚本完成),只需更新状态和开始时间
|
|
117
|
+
const taskIdList = tasksToProcess.map(t => t.id);
|
|
118
|
+
const now = Date.now();
|
|
119
|
+
await Promise.all(tasksToProcess.map(task => {
|
|
120
|
+
task.status = 'processing';
|
|
121
|
+
task.processingStartTime = now;
|
|
122
|
+
const taskKey = `${this.queueKey}:task:${task.id}`;
|
|
123
|
+
return this.redis.setEx(taskKey, this.cleanupDelay, JSON.stringify(task));
|
|
124
|
+
}));
|
|
125
|
+
// 批量处理所有任务
|
|
126
|
+
const dataList = tasksToProcess.map(t => t.data);
|
|
127
|
+
await this.config.handler(dataList);
|
|
128
|
+
// 批量更新状态为完成
|
|
129
|
+
const updatedCount = await this.applyStatusBatch(taskIdList, 'processing', 'completed');
|
|
130
|
+
console.log(`[RedisQueueBatchConsumer] Batch completed: ${updatedCount} tasks`);
|
|
131
|
+
}
|
|
132
|
+
catch (error) {
|
|
133
|
+
console.error(`[RedisQueueBatchConsumer] Batch failed:`, error);
|
|
134
|
+
// 批量重试或失败
|
|
135
|
+
for (const task of tasksToProcess) {
|
|
136
|
+
if (task.retryCount < task.maxRetries) {
|
|
137
|
+
task.retryCount++;
|
|
138
|
+
task.status = 'pending';
|
|
139
|
+
task.processingStartTime = undefined;
|
|
140
|
+
const taskKey = `${this.queueKey}:task:${task.id}`;
|
|
141
|
+
const script = `
|
|
142
|
+
redis.call('SETEX', KEYS[1], ARGV[1], ARGV[2])
|
|
143
|
+
redis.call('LREM', KEYS[2], 0, ARGV[3])
|
|
144
|
+
redis.call('RPUSH', KEYS[3], ARGV[3])
|
|
145
|
+
return 1
|
|
146
|
+
`;
|
|
147
|
+
await this.redis.eval(script, {
|
|
148
|
+
keys: [taskKey, this.processingQueue, this.pendingQueue],
|
|
149
|
+
arguments: [this.cleanupDelay.toString(), JSON.stringify(task), task.id],
|
|
150
|
+
});
|
|
151
|
+
console.log(`[RedisQueueBatchConsumer] Task ${task.id} will retry (${task.retryCount}/${task.maxRetries})`);
|
|
152
|
+
}
|
|
153
|
+
else {
|
|
154
|
+
task.status = 'failed';
|
|
155
|
+
task.processingStartTime = undefined;
|
|
156
|
+
const taskKey = `${this.queueKey}:task:${task.id}`;
|
|
157
|
+
const script = `
|
|
158
|
+
redis.call('SETEX', KEYS[1], ARGV[1], ARGV[2])
|
|
159
|
+
redis.call('LREM', KEYS[2], 0, ARGV[3])
|
|
160
|
+
redis.call('RPUSH', KEYS[3], ARGV[3])
|
|
161
|
+
return 1
|
|
162
|
+
`;
|
|
163
|
+
await this.redis.eval(script, {
|
|
164
|
+
keys: [taskKey, this.processingQueue, this.failedQueue],
|
|
165
|
+
arguments: [this.cleanupDelay.toString(), JSON.stringify(task), task.id],
|
|
166
|
+
});
|
|
167
|
+
console.error(`[RedisQueueBatchConsumer] Task ${task.id} failed after ${task.maxRetries} retries`);
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
finally {
|
|
173
|
+
this.processingBatches--;
|
|
174
|
+
}
|
|
175
|
+
})();
|
|
176
|
+
// 将批次 Promise 添加到集合中
|
|
177
|
+
this.processingPromises.add(batchPromise);
|
|
178
|
+
// 批次完成后从集合中移除
|
|
179
|
+
batchPromise.finally(() => {
|
|
180
|
+
this.processingPromises.delete(batchPromise);
|
|
181
|
+
});
|
|
182
|
+
// 返回批次 Promise
|
|
183
|
+
return batchPromise;
|
|
184
|
+
}
|
|
185
|
+
/**
|
|
186
|
+
* 启动恢复机制
|
|
187
|
+
*/
|
|
188
|
+
startRecovery() {
|
|
189
|
+
if (this.recoveryInterval || !this.redis) {
|
|
190
|
+
return;
|
|
191
|
+
}
|
|
192
|
+
// 立即执行一次恢复
|
|
193
|
+
this.recoverStalled(this.config.processingTimeout).catch(error => {
|
|
194
|
+
console.error('[RedisQueueBatchConsumer] Initial recovery error:', error);
|
|
195
|
+
});
|
|
196
|
+
// 定期检查(每 10 秒检查一次)
|
|
197
|
+
this.recoveryInterval = setInterval(() => {
|
|
198
|
+
this.recoverStalled(this.config.processingTimeout).catch(error => {
|
|
199
|
+
console.error('[RedisQueueBatchConsumer] Recovery error:', error);
|
|
200
|
+
});
|
|
201
|
+
}, 10000);
|
|
202
|
+
console.log('[RedisQueueBatchConsumer] Recovery mechanism started');
|
|
203
|
+
}
|
|
204
|
+
/**
|
|
205
|
+
* 停止恢复机制
|
|
206
|
+
*/
|
|
207
|
+
stopRecovery() {
|
|
208
|
+
if (this.recoveryInterval) {
|
|
209
|
+
clearInterval(this.recoveryInterval);
|
|
210
|
+
this.recoveryInterval = null;
|
|
211
|
+
console.log('[RedisQueueBatchConsumer] Recovery mechanism stopped');
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* 启动消费者
|
|
216
|
+
*/
|
|
217
|
+
startConsumer() {
|
|
218
|
+
if (this.consumerRunning || !this.redis) {
|
|
219
|
+
return;
|
|
220
|
+
}
|
|
221
|
+
this.consumerRunning = true;
|
|
222
|
+
console.log(`[RedisQueueBatchConsumer] Consumer started with batchSize: ${this.config.batchSize}, concurrency: ${this.config.concurrency}`);
|
|
223
|
+
// Lua 脚本: 批量原子化从 pending 取出任务并移到 processing
|
|
224
|
+
const popAndMoveBatchScript = `
|
|
225
|
+
local pendingQueue = KEYS[1]
|
|
226
|
+
local processingQueue = KEYS[2]
|
|
227
|
+
local queueKeyPrefix = KEYS[3]
|
|
228
|
+
local count = tonumber(ARGV[1])
|
|
229
|
+
local currentTime = tonumber(ARGV[2])
|
|
230
|
+
|
|
231
|
+
local taskIds = {}
|
|
232
|
+
local checkedCount = 0
|
|
233
|
+
local maxCheck = count * 3
|
|
234
|
+
|
|
235
|
+
while #taskIds < count and checkedCount < maxCheck do
|
|
236
|
+
local taskId = redis.call('LPOP', pendingQueue)
|
|
237
|
+
if not taskId then
|
|
238
|
+
break
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
checkedCount = checkedCount + 1
|
|
242
|
+
|
|
243
|
+
local taskKey = queueKeyPrefix .. ':task:' .. taskId
|
|
244
|
+
local taskData = redis.call('GET', taskKey)
|
|
245
|
+
|
|
246
|
+
if taskData then
|
|
247
|
+
local task = cjson.decode(taskData)
|
|
248
|
+
local delayUntil = task.delayUntil
|
|
249
|
+
|
|
250
|
+
if not delayUntil or delayUntil <= currentTime then
|
|
251
|
+
redis.call('RPUSH', processingQueue, taskId)
|
|
252
|
+
table.insert(taskIds, taskId)
|
|
253
|
+
else
|
|
254
|
+
redis.call('RPUSH', pendingQueue, taskId)
|
|
255
|
+
end
|
|
256
|
+
end
|
|
257
|
+
end
|
|
258
|
+
|
|
259
|
+
return taskIds
|
|
260
|
+
`;
|
|
261
|
+
this.consumerInterval = setInterval(async () => {
|
|
262
|
+
try {
|
|
263
|
+
// 检查当前是否有可用的并发槽位
|
|
264
|
+
const availableSlots = this.config.concurrency - this.processingBatches;
|
|
265
|
+
if (availableSlots <= 0) {
|
|
266
|
+
return; // 已达到并发限制
|
|
267
|
+
}
|
|
268
|
+
// 批量取出任务
|
|
269
|
+
const taskIds = await this.redis.eval(popAndMoveBatchScript, {
|
|
270
|
+
keys: [this.pendingQueue, this.processingQueue, this.queueKey],
|
|
271
|
+
arguments: [this.config.batchSize.toString(), Date.now().toString()],
|
|
272
|
+
});
|
|
273
|
+
if (taskIds.length > 0) {
|
|
274
|
+
// 处理这批任务(不等待完成)
|
|
275
|
+
this.processBatch(taskIds).catch(error => {
|
|
276
|
+
console.error(`[RedisQueueBatchConsumer] Unhandled error in processBatch:`, error);
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
catch (error) {
|
|
281
|
+
console.error('[RedisQueueBatchConsumer] Consumer error:', error);
|
|
282
|
+
}
|
|
283
|
+
}, this.config.consumerInterval);
|
|
284
|
+
}
|
|
285
|
+
/**
|
|
286
|
+
* 停止消费者
|
|
287
|
+
*/
|
|
288
|
+
stopConsumer() {
|
|
289
|
+
if (this.consumerInterval) {
|
|
290
|
+
clearInterval(this.consumerInterval);
|
|
291
|
+
this.consumerInterval = null;
|
|
292
|
+
}
|
|
293
|
+
this.consumerRunning = false;
|
|
294
|
+
console.log('[RedisQueueBatchConsumer] Consumer stopped');
|
|
295
|
+
}
|
|
296
|
+
/**
|
|
297
|
+
* 获取队列统计信息(O(1) 时间复杂度)
|
|
298
|
+
*/
|
|
299
|
+
async statistics() {
|
|
300
|
+
if (!this.redis) {
|
|
301
|
+
return { pending: 0, processing: 0, completed: 0, failed: 0 };
|
|
302
|
+
}
|
|
303
|
+
const [pending, processing, completed, failed] = await Promise.all([
|
|
304
|
+
this.redis.lLen(this.pendingQueue),
|
|
305
|
+
this.redis.lLen(this.processingQueue),
|
|
306
|
+
this.redis.lLen(this.completedQueue),
|
|
307
|
+
this.redis.lLen(this.failedQueue),
|
|
308
|
+
]);
|
|
309
|
+
return { pending, processing, completed, failed };
|
|
310
|
+
}
|
|
311
|
+
/**
|
|
312
|
+
* 健康检查
|
|
313
|
+
*/
|
|
314
|
+
async health() {
|
|
315
|
+
if (!this.redis)
|
|
316
|
+
return false;
|
|
317
|
+
const result = await catchIt(() => this.redis.ping());
|
|
318
|
+
return !result.isError() && result.value === 'PONG';
|
|
319
|
+
}
|
|
320
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
type TestTaskData = {
|
|
2
|
+
id?: string;
|
|
3
|
+
value?: number | string;
|
|
4
|
+
test?: boolean | string;
|
|
5
|
+
index?: number;
|
|
6
|
+
payload?: string;
|
|
7
|
+
order?: number;
|
|
8
|
+
taskId?: number;
|
|
9
|
+
message?: string;
|
|
10
|
+
name?: string;
|
|
11
|
+
shouldFail?: boolean;
|
|
12
|
+
step?: number;
|
|
13
|
+
real?: boolean;
|
|
14
|
+
nested?: {
|
|
15
|
+
value: number;
|
|
16
|
+
};
|
|
17
|
+
example?: string;
|
|
18
|
+
data?: any;
|
|
19
|
+
};
|
|
20
|
+
declare module './types' {
|
|
21
|
+
interface RedisQueueRegistry {
|
|
22
|
+
[key: `test:batch:${string}`]: TestTaskData;
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
export {};
|
|
26
|
+
//# sourceMappingURL=redis-queue-batch-consumer.test.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"redis-queue-batch-consumer.test.d.ts","sourceRoot":"","sources":["../../source/redis-queue/redis-queue-batch-consumer.test.ts"],"names":[],"mappings":"AAQA,KAAK,YAAY,GAAG;IAClB,EAAE,CAAC,EAAE,MAAM,CAAA;IACX,KAAK,CAAC,EAAE,MAAM,GAAG,MAAM,CAAA;IACvB,IAAI,CAAC,EAAE,OAAO,GAAG,MAAM,CAAA;IACvB,KAAK,CAAC,EAAE,MAAM,CAAA;IACd,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB,KAAK,CAAC,EAAE,MAAM,CAAA;IACd,MAAM,CAAC,EAAE,MAAM,CAAA;IACf,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,UAAU,CAAC,EAAE,OAAO,CAAA;IACpB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,MAAM,CAAC,EAAE;QAAE,KAAK,EAAE,MAAM,CAAA;KAAE,CAAA;IAC1B,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB,IAAI,CAAC,EAAE,GAAG,CAAA;CACX,CAAA;AAGD,OAAO,QAAQ,SAAS,CAAC;IACvB,UAAU,kBAAkB;QAC1B,CAAC,GAAG,EAAE,cAAc,MAAM,EAAE,GAAG,YAAY,CAAA;KAC5C;CACF"}
|
|
@@ -1,22 +1,18 @@
|
|
|
1
1
|
import { describe, it, expect, afterEach } from 'vitest';
|
|
2
2
|
import { catchIt } from '@taicode/common-base';
|
|
3
3
|
import { RedisQueueProvider } from './redis-queue-provider';
|
|
4
|
-
import {
|
|
4
|
+
import { RedisQueueBatchConsumer } from './redis-queue-batch-consumer';
|
|
5
|
+
import { dispatchQueueTask, waitQueueCompletion, clearQueue, getQueueTasks } from './test-helpers';
|
|
5
6
|
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
|
|
6
|
-
describe('
|
|
7
|
+
describe('RedisQueueBatchConsumer', () => {
|
|
7
8
|
const providers = [];
|
|
8
9
|
const consumers = [];
|
|
9
10
|
afterEach(async () => {
|
|
10
11
|
for (const consumer of consumers) {
|
|
11
|
-
consumer
|
|
12
|
+
await clearQueue(consumer);
|
|
13
|
+
await consumer.disconnect();
|
|
12
14
|
}
|
|
13
15
|
for (const provider of providers) {
|
|
14
|
-
try {
|
|
15
|
-
await provider.clear();
|
|
16
|
-
}
|
|
17
|
-
catch (error) {
|
|
18
|
-
// 忽略清理错误
|
|
19
|
-
}
|
|
20
16
|
provider.disconnect();
|
|
21
17
|
}
|
|
22
18
|
providers.length = 0;
|
|
@@ -24,13 +20,11 @@ describe('RedisBatchConsumer', () => {
|
|
|
24
20
|
});
|
|
25
21
|
const createQueue = (handler, options) => {
|
|
26
22
|
const uniqueKey = `test:batch:${Date.now()}:${Math.random()}`;
|
|
27
|
-
const provider = new RedisQueueProvider({
|
|
23
|
+
const provider = new RedisQueueProvider(uniqueKey, {
|
|
28
24
|
redisUrl: REDIS_URL,
|
|
29
|
-
queueKey: uniqueKey,
|
|
30
25
|
});
|
|
31
|
-
const consumer = new
|
|
26
|
+
const consumer = new RedisQueueBatchConsumer(uniqueKey, {
|
|
32
27
|
redisUrl: REDIS_URL,
|
|
33
|
-
queueKey: uniqueKey,
|
|
34
28
|
consumerInterval: 100,
|
|
35
29
|
maxRetries: 2,
|
|
36
30
|
batchSize: 10,
|
|
@@ -79,6 +73,27 @@ describe('RedisBatchConsumer', () => {
|
|
|
79
73
|
const stats = await consumer.statistics();
|
|
80
74
|
expect(stats.completed).toBe(5);
|
|
81
75
|
});
|
|
76
|
+
it('应该能够使用 dispatchQueueTask 立即处理批量任务', async () => {
|
|
77
|
+
const processedBatches = [];
|
|
78
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
79
|
+
processedBatches.push([...dataList]);
|
|
80
|
+
return catchIt(() => { });
|
|
81
|
+
}, { batchSize: 3 });
|
|
82
|
+
await provider.connect();
|
|
83
|
+
await consumer.connect();
|
|
84
|
+
const taskIds = await provider.enqueue([
|
|
85
|
+
{ value: 1 }, { value: 2 }, { value: 3 },
|
|
86
|
+
{ value: 4 }, { value: 5 },
|
|
87
|
+
]);
|
|
88
|
+
// 使用 dispatchQueueTask 立即处理任务
|
|
89
|
+
await dispatchQueueTask(consumer, taskIds);
|
|
90
|
+
// 应该有 2 个批次(3+2)
|
|
91
|
+
expect(processedBatches).toHaveLength(2);
|
|
92
|
+
expect(processedBatches[0]).toHaveLength(3);
|
|
93
|
+
expect(processedBatches[1]).toHaveLength(2);
|
|
94
|
+
const stats = await consumer.statistics();
|
|
95
|
+
expect(stats.completed).toBe(5);
|
|
96
|
+
});
|
|
82
97
|
it('应该能处理大批量任务', async () => {
|
|
83
98
|
let totalProcessed = 0;
|
|
84
99
|
const { provider, consumer } = createQueue(async (dataList) => {
|
|
@@ -92,6 +107,41 @@ describe('RedisBatchConsumer', () => {
|
|
|
92
107
|
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
93
108
|
expect(totalProcessed).toBe(200);
|
|
94
109
|
});
|
|
110
|
+
it('应该能够使用 waitQueueCompletion 等待批量处理完成', async () => {
|
|
111
|
+
const processedBatches = [];
|
|
112
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
113
|
+
processedBatches.push(dataList.length);
|
|
114
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
115
|
+
return catchIt(() => { });
|
|
116
|
+
}, { batchSize: 5 });
|
|
117
|
+
await provider.connect();
|
|
118
|
+
await consumer.connect();
|
|
119
|
+
await provider.enqueue(Array.from({ length: 15 }, (_, i) => ({ index: i })));
|
|
120
|
+
// 等待所有任务完成
|
|
121
|
+
await waitQueueCompletion(consumer, stats => stats.pending === 0 && stats.processing === 0 && stats.completed === 15);
|
|
122
|
+
// 应该有 3 个批次
|
|
123
|
+
expect(processedBatches).toHaveLength(3);
|
|
124
|
+
expect(processedBatches).toEqual([5, 5, 5]);
|
|
125
|
+
});
|
|
126
|
+
it('应该能够使用 getQueueTasks 获取批量任务详情', async () => {
|
|
127
|
+
const { provider, consumer } = createQueue(async () => {
|
|
128
|
+
// 模拟失败
|
|
129
|
+
throw new Error('Batch failed');
|
|
130
|
+
}, { batchSize: 3, maxRetries: 0 });
|
|
131
|
+
await provider.connect();
|
|
132
|
+
await consumer.connect();
|
|
133
|
+
const taskIds = await provider.enqueue([
|
|
134
|
+
{ name: 'task1' }, { name: 'task2' }, { name: 'task3' }
|
|
135
|
+
]);
|
|
136
|
+
// 立即处理
|
|
137
|
+
await dispatchQueueTask(consumer, taskIds);
|
|
138
|
+
// 等待一下确保任务处理完成
|
|
139
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
140
|
+
// 获取失败任务
|
|
141
|
+
const failedTasks = await getQueueTasks(provider, 'failed');
|
|
142
|
+
expect(failedTasks.length).toBeGreaterThan(0);
|
|
143
|
+
expect(failedTasks.every(t => t.status === 'failed')).toBe(true);
|
|
144
|
+
});
|
|
95
145
|
});
|
|
96
146
|
describe('重试机制', () => {
|
|
97
147
|
it('失败的批次应该重试', async () => {
|
|
@@ -107,6 +157,35 @@ describe('RedisBatchConsumer', () => {
|
|
|
107
157
|
const stats = await consumer.statistics();
|
|
108
158
|
expect(stats.failed).toBe(2);
|
|
109
159
|
});
|
|
160
|
+
it('disconnect 应该等待正在处理的批次完成', async () => {
|
|
161
|
+
const processedBatches = [];
|
|
162
|
+
let processingBatch = null;
|
|
163
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
164
|
+
processingBatch = dataList.map(d => d.index || 0);
|
|
165
|
+
await new Promise(resolve => setTimeout(resolve, 500)); // 模拟长时间处理
|
|
166
|
+
processedBatches.push(dataList.map(d => d.index || 0));
|
|
167
|
+
processingBatch = null;
|
|
168
|
+
return catchIt(() => { });
|
|
169
|
+
}, { batchSize: 2 });
|
|
170
|
+
await provider.connect();
|
|
171
|
+
await consumer.connect();
|
|
172
|
+
// 添加一批任务
|
|
173
|
+
await provider.enqueue([{ index: 1 }, { index: 2 }]);
|
|
174
|
+
// 等待批次开始处理
|
|
175
|
+
await new Promise(resolve => setTimeout(resolve, 200));
|
|
176
|
+
expect(processingBatch).toEqual([1, 2]);
|
|
177
|
+
expect(processedBatches).toHaveLength(0);
|
|
178
|
+
// 调用 disconnect,应该等待批次完成
|
|
179
|
+
const disconnectPromise = consumer.disconnect();
|
|
180
|
+
// 此时批次应该还在处理中
|
|
181
|
+
expect(processingBatch).toEqual([1, 2]);
|
|
182
|
+
// 等待 disconnect 完成
|
|
183
|
+
await disconnectPromise;
|
|
184
|
+
// disconnect 完成后,批次应该已经处理完成
|
|
185
|
+
expect(processingBatch).toBe(null);
|
|
186
|
+
expect(processedBatches).toHaveLength(1);
|
|
187
|
+
expect(processedBatches[0]).toEqual([1, 2]);
|
|
188
|
+
});
|
|
110
189
|
});
|
|
111
190
|
describe('幂等性', () => {
|
|
112
191
|
it('应该支持在 data 中指定 id 来实现幂等性', async () => {
|
|
@@ -144,22 +223,19 @@ describe('RedisBatchConsumer', () => {
|
|
|
144
223
|
let consumer1Count = 0;
|
|
145
224
|
let consumer2Count = 0;
|
|
146
225
|
const uniqueKey = `test:batch:concurrent:${Date.now()}:${Math.random()}`;
|
|
147
|
-
const provider = new RedisQueueProvider({
|
|
226
|
+
const provider = new RedisQueueProvider(uniqueKey, {
|
|
148
227
|
redisUrl: REDIS_URL,
|
|
149
|
-
queueKey: uniqueKey,
|
|
150
228
|
});
|
|
151
|
-
const consumer1 = new
|
|
229
|
+
const consumer1 = new RedisQueueBatchConsumer(uniqueKey, {
|
|
152
230
|
redisUrl: REDIS_URL,
|
|
153
|
-
queueKey: uniqueKey,
|
|
154
231
|
batchSize: 5,
|
|
155
232
|
handler: async (dataList) => {
|
|
156
233
|
consumer1Count += dataList.length;
|
|
157
234
|
return catchIt(() => { });
|
|
158
235
|
},
|
|
159
236
|
});
|
|
160
|
-
const consumer2 = new
|
|
237
|
+
const consumer2 = new RedisQueueBatchConsumer(uniqueKey, {
|
|
161
238
|
redisUrl: REDIS_URL,
|
|
162
|
-
queueKey: uniqueKey,
|
|
163
239
|
batchSize: 5,
|
|
164
240
|
handler: async (dataList) => {
|
|
165
241
|
consumer2Count += dataList.length;
|
|
@@ -213,4 +289,53 @@ describe('RedisBatchConsumer', () => {
|
|
|
213
289
|
expect(receivedData[0].data).toBe(largeString);
|
|
214
290
|
});
|
|
215
291
|
});
|
|
292
|
+
describe('超时恢复', () => {
|
|
293
|
+
it('超时的任务应该直接标记为失败', { timeout: 15000 }, async () => {
|
|
294
|
+
let processCount = 0;
|
|
295
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
296
|
+
processCount++;
|
|
297
|
+
// 模拟处理卡住,永远不完成 (但超时机制会检测到)
|
|
298
|
+
await new Promise(resolve => setTimeout(resolve, 20000));
|
|
299
|
+
return catchIt(() => { });
|
|
300
|
+
}, {
|
|
301
|
+
batchSize: 1,
|
|
302
|
+
processingTimeout: 500, // 500ms 超时
|
|
303
|
+
consumerInterval: 100,
|
|
304
|
+
});
|
|
305
|
+
await provider.connect();
|
|
306
|
+
await consumer.connect();
|
|
307
|
+
await provider.enqueue([{ value: 1 }]);
|
|
308
|
+
// 等待任务被处理和超时恢复 (需要等待至少2个恢复周期: 10秒)
|
|
309
|
+
await new Promise(resolve => setTimeout(resolve, 12000));
|
|
310
|
+
const stats = await consumer.statistics();
|
|
311
|
+
// 超时后直接标记为失败,不应该重试
|
|
312
|
+
expect(stats.failed).toBe(1);
|
|
313
|
+
expect(stats.pending).toBe(0);
|
|
314
|
+
expect(stats.processing).toBe(0);
|
|
315
|
+
expect(stats.completed).toBe(0);
|
|
316
|
+
// 应该只处理一次,不会重试
|
|
317
|
+
expect(processCount).toBe(1);
|
|
318
|
+
});
|
|
319
|
+
it('未超时的失败任务应该正常重试', async () => {
|
|
320
|
+
let attemptCount = 0;
|
|
321
|
+
const { provider, consumer } = createQueue(async () => {
|
|
322
|
+
attemptCount++;
|
|
323
|
+
throw new Error('Task failed');
|
|
324
|
+
}, {
|
|
325
|
+
batchSize: 1,
|
|
326
|
+
maxRetries: 3,
|
|
327
|
+
processingTimeout: 5000, // 5秒超时,足够长不会触发
|
|
328
|
+
consumerInterval: 100,
|
|
329
|
+
});
|
|
330
|
+
await provider.connect();
|
|
331
|
+
await consumer.connect();
|
|
332
|
+
await provider.enqueue([{ value: 1 }]);
|
|
333
|
+
// 等待所有重试完成
|
|
334
|
+
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
335
|
+
const stats = await consumer.statistics();
|
|
336
|
+
// 应该重试 3 次后失败
|
|
337
|
+
expect(stats.failed).toBe(1);
|
|
338
|
+
expect(attemptCount).toBe(4); // 初始 1 次 + 3 次重试
|
|
339
|
+
});
|
|
340
|
+
});
|
|
216
341
|
});
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import { RedisClientType } from 'redis';
|
|
2
|
-
import type { Status,
|
|
2
|
+
import type { Status, Task, RedisQueueCommonConfig } from './types';
|
|
3
3
|
/**
|
|
4
4
|
* Redis 队列基础类
|
|
5
5
|
* 封装 Provider 和 Consumer 的公共逻辑
|
|
6
6
|
*/
|
|
7
|
-
export declare abstract class RedisQueueCommon
|
|
7
|
+
export declare abstract class RedisQueueCommon {
|
|
8
8
|
protected redis: RedisClientType | null;
|
|
9
9
|
protected isExternalRedis: boolean;
|
|
10
10
|
protected readonly queueKey: string;
|
|
@@ -14,7 +14,7 @@ export declare abstract class RedisQueueCommon<T extends TaskData = TaskData> {
|
|
|
14
14
|
protected readonly pendingQueue: string;
|
|
15
15
|
protected readonly processingQueue: string;
|
|
16
16
|
protected readonly completedQueue: string;
|
|
17
|
-
constructor(
|
|
17
|
+
constructor(queueKey: string, config?: RedisQueueCommonConfig);
|
|
18
18
|
/**
|
|
19
19
|
* 获取日志前缀(子类实现)
|
|
20
20
|
*/
|
|
@@ -30,7 +30,7 @@ export declare abstract class RedisQueueCommon<T extends TaskData = TaskData> {
|
|
|
30
30
|
/**
|
|
31
31
|
* 获取任务详情
|
|
32
32
|
*/
|
|
33
|
-
protected getTask(taskId: string): Promise<Task
|
|
33
|
+
protected getTask(taskId: string): Promise<Task | null>;
|
|
34
34
|
/**
|
|
35
35
|
* 根据状态获取对应的队列键
|
|
36
36
|
*/
|
|
@@ -57,5 +57,17 @@ export declare abstract class RedisQueueCommon<T extends TaskData = TaskData> {
|
|
|
57
57
|
* 健康检查
|
|
58
58
|
*/
|
|
59
59
|
health(): Promise<boolean>;
|
|
60
|
+
/**
|
|
61
|
+
* 恢复超时的任务
|
|
62
|
+
* 检查 processing 队列中的任务,将超时的任务直接标记为失败
|
|
63
|
+
* 使用 Lua 脚本批量处理以提高性能和原子性
|
|
64
|
+
*
|
|
65
|
+
* @param processingTimeout 处理超时时间(毫秒)
|
|
66
|
+
* @returns 恢复的任务统计 {timeoutFailed: number, cleaned: number}
|
|
67
|
+
*/
|
|
68
|
+
protected recoverStalled(processingTimeout: number): Promise<{
|
|
69
|
+
timeoutFailed: number;
|
|
70
|
+
cleaned: number;
|
|
71
|
+
}>;
|
|
60
72
|
}
|
|
61
73
|
//# sourceMappingURL=redis-queue-common.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"redis-queue-common.d.ts","sourceRoot":"","sources":["../../source/redis-queue/redis-queue-common.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,eAAe,EAAE,MAAM,OAAO,CAAA;AACrD,OAAO,KAAK,EAAE,MAAM,
|
|
1
|
+
{"version":3,"file":"redis-queue-common.d.ts","sourceRoot":"","sources":["../../source/redis-queue/redis-queue-common.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,eAAe,EAAE,MAAM,OAAO,CAAA;AACrD,OAAO,KAAK,EAAE,MAAM,EAAY,IAAI,EAAE,sBAAsB,EAAE,MAAM,SAAS,CAAA;AAE7E;;;GAGG;AACH,8BAAsB,gBAAgB;IACpC,SAAS,CAAC,KAAK,EAAE,eAAe,GAAG,IAAI,CAAO;IAC9C,SAAS,CAAC,eAAe,EAAE,OAAO,CAAQ;IAE1C,SAAS,CAAC,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAA;IACnC,SAAS,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,MAAM,CAAA;IACpC,SAAS,CAAC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAA;IAGvC,SAAS,CAAC,QAAQ,CAAC,WAAW,EAAE,MAAM,CAAA;IACtC,SAAS,CAAC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAA;IACvC,SAAS,CAAC,QAAQ,CAAC,eAAe,EAAE,MAAM,CAAA;IAC1C,SAAS,CAAC,QAAQ,CAAC,cAAc,EAAE,MAAM,CAAA;gBAE7B,QAAQ,EAAE,MAAM,EAAE,MAAM,GAAE,sBAA2B;IAwCjE;;OAEG;IACH,SAAS,CAAC,QAAQ,CAAC,YAAY,IAAI,MAAM;IAEzC;;OAEG;IACG,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAS9B;;OAEG;IACH,UAAU,IAAI,IAAI;IAalB;;OAEG;cACa,OAAO,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,GAAG,IAAI,CAAC;IAW7D;;OAEG;IACH,SAAS,CAAC,gBAAgB,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM;IAUlD;;OAEG;cACa,WAAW,CAAC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAqChG;;;OAGG;cACa,gBAAgB,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAqD1G;;OAEG;IACG,UAAU,IAAI,OAAO,CAAC;QAAE,OAAO,EAAE,MAAM,CAAC;QAAC,UAAU,EAAE,MAAM,CAAC;QAAC,SAAS,EAAE,MAAM,CAAC;QAAC,MAAM,EAAE,MAAM,CAAA;KAAE,CAAC;IAevG;;OAEG;IACG,MAAM,IAAI,OAAO,CAAC,OAAO,CAAC;IAUhC;;;;;;;OAOG;cACa,cAAc,CAAC,iBAAiB,EAAE,MAAM,GAAG,OAAO,CAAC;QAAE,aAAa,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAE,CAAC;CA4E/G"}
|